query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Save the anonymised c3d file.
def saveC3D(self, outputdir=None, condition=None, trialno=None): fpath = self.createFilePath(outputdir, condition, trialno) self.trialC3D.write(fpath) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_spi3d(self):\n lut = self.generate_lut()\n file_path = os.path.join(self.output, self.name)\n file_io.save_file(lut, file_path)", "def save(self, path):\n if isinstance(path, str) and path.startswith(\"s3://\"):\n raise NotImplementedError(\"TODO: Implement saving to s3\")\n path = Path(path).resolve()\n \n # Make a folder to save things into\n if path.suffix == \".zip\":\n folder = path.parent / f\"tmp_{path.stem}\"\n else:\n folder = path\n folder.mkdir(parents=True, exist_ok=False)\n \n # Save the trace - ugly hack needed due to multiindex not being supported yet\n ifd = self.ifd_.copy()\n ifd.constant_data = ifd.constant_data.reset_index('idx')\n ifd.observed_data = ifd.observed_data.reset_index('idx')\n ifd.prior_predictive = ifd.prior_predictive.reset_index('idx')\n ifd.posterior_predictive = ifd.posterior_predictive.reset_index('idx')\n ifd.log_likelihood = ifd.log_likelihood.reset_index('idx')\n az.to_netcdf(ifd, folder / \"ifd.nc\")\n \n # Save other data\n self.C_mean_.rename(\"C_mean_\").to_csv(folder / \"C_mean_.csv\")\n self.C_std_.rename(\"C_std_\").to_csv(folder / \"C_std_.csv\")\n \n if path.suffix == \".zip\":\n # Save to zip\n zf = ZipFile(path, mode='w')\n for file in folder.glob(\"*\"):\n zf.write(folder / file.name, arcname=file.name)\n zf.close()\n shutil.rmtree(folder)", "def save_file(self, filename):\n if self.t3data:\n np.savetxt(filename, self.t3data)\n else:\n self.export_to_ascii()", "def save(filename, points3, tris, metadata):\n logging.info(\"saving mesh: %s\"%filename)\n cells = {'triangle':tris}\n vtk_io.write(filename, points3, cells)\n with open(filename+'.readme','w') as fid:\n fid.write(metadata)", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.setChanged()\n self.tes3.hedr.setChanged()\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Other Records\n for record in self.records:\n record.getSize()\n record.dump(out)\n out.close()", "def save(self, filename):\n np.savez(temp_dir + '/' + filename + '.npz', chip_ids=self.chip_ids, core_ids=self.core_ids, cx_ids=self.cx_ids)", "def save(self,filepath):\n d = self.X.tocoo(copy=False)\n v = self.col_view.tocoo(copy=False)\n np.savez(filepath,row=d.row,col=d.col,data=d.data,shape=d.shape,\n v_row=v.row,v_col=v.col,v_data=v.data,v_shape=v.shape)", "def save_spi3d(self):\n for filename, colormap in colors.colormaps.items():\n if self.test:\n self.print_colormap(filename, colormap)\n lut = self.generate_spi3d_from_colormap(colormap)\n file_path = os.path.join(self.output, filename)\n file_io.save_file(lut, file_path)\n\n for filename, ev_colormap in colors.ev_colormaps.items():\n if self.test:\n self.print_colormap(filename, ev_colormap)\n lut = self.generate_spi3d_from_evs(ev_colormap)\n file_path = os.path.join(self.output, filename)\n file_io.save_file(lut, file_path)", "def save(self, filepath):\n save_ckpt = {\n 'ae': self.state_dict(),\n 'optimizer': self.optimizer.state_dict()\n }\n try:\n torch.save(save_ckpt, os.path.join(filepath, 'ckpt_ae.pth'))\n except:\n print('Cannot save autoencoder.')", "def save(self, fname):\n lh_data = self.data[:len(self.lh_vertno)]\n rh_data = self.data[-len(self.rh_vertno):]\n\n print 'Writing STC to disk...',\n write_stc(fname + '-lh.stc', tmin=self.tmin, tstep=self.tstep,\n vertices=self.lh_vertno, data=lh_data)\n write_stc(fname + '-rh.stc', tmin=self.tmin, tstep=self.tstep,\n vertices=self.rh_vertno, data=rh_data)\n print '[done]'", "def save(self):\n if PYTHON3:\n fileobj = open(self.filename, 'w', encoding=self.ENCODING, errors=\"replace\")\n else:\n fileobj = open(self.filename, 'w')\n self.save_to_fileobj(fileobj)\n fileobj.close()", "def save(self, filename):\n np.savez(temp_dir + '/' + filename + '.npz', core_ids=self.core_ids, cx_ids=self.cx_ids)", "def write_gff3(self,gff3_file=None):\r\n # write the new gff3\r\n if gff3_file:\r\n outfile = open(gff3_file, 'w')\r\n else:\r\n outfile = sys.stdout\r\n for set in self.sets:\r\n if isinstance(set, GT_seq_location):\r\n outfile.write(set.compose())\r\n else:\r\n outfile.write(set)\r\n outfile.close()", "def Construct3DMolToFile(fileName,writeFile):\r\n # Writing sets of molecules\r\n \r\n\r\n w = Chem.SDWriter(writeFile)\r\n suppl = Chem.SDMolSupplier(fileName)\r\n mols = [x for x in suppl]\r\n for mol in mols:\r\n \t# print(mol.GetProp(\"Solvent\"))\r\n \t# print(mol.GetPropNames)\r\n \tsignal.signal(signal.SIGALRM, handler)\r\n \tsignal.alarm(100)\r\n \ttry:\r\n \t\tmol3d = GetMolFromMol(mol,dimension=3)\r\n \t\tw.write(mol3d)\r\n \texcept Exception:\r\n \t\tmol3d = mol\r\n \t\tw.write(mol3d)\r\n \t\t# print(mol.GetPropsAsDict())\r\n\r\n\r\n w.close()", "def save(self, _name):\r\n try:\r\n with open(_name, 'w+') as fout:\r\n fout.write(\".cube file generated from prt_esolv.py\\n\")\r\n fout.write(f\"{_name}\\n\")\r\n\r\n fout.write(\r\n f\"{int(self.n_atoms)} {float(self.origin[0])} {float(self.origin[1])} {float(self.origin[2])}\\n\")\r\n\r\n fout.write(f\"{int(self.n_x)} {float(self.x[0])} {float(self.x[1])} {float(self.x[2])}\\n\")\r\n fout.write(f\"{int(self.n_y)} {float(self.y[0])} {float(self.y[1])} {float(self.y[2])}\\n\")\r\n fout.write(f\"{int(self.n_z)} {float(self.z[0])} {float(self.z[1])} {float(self.z[2])}\\n\")\r\n\r\n for atom, xyz in zip(self.atoms, self.atoms_xyz):\r\n fout.write(f\"{atom} 0 {xyz[0]} {xyz[1]} {xyz[2]}\\n\")\r\n\r\n for ix in range(self.n_x):\r\n for iy in range(self.n_y):\r\n for iz in range(self.n_z):\r\n fout.write(f\"{self.data[ix][iy][iz]}\")\r\n if iz % 6 == 5:\r\n fout.write('\\n')\r\n fout.write(\"\\n\")\r\n except IOError:\r\n print(f\"Can't create {_name} file!!!\")\r\n raise\r\n\r\n return None", "def save_pca(self, filepath):\n mean_beam, principal_components, variances = self.pca()\n image_shape = np.array(self.image_shape)\n with open(filepath, 'wb') as f:\n np.save(f, image_shape)\n np.save(f, mean_beam)\n np.save(f, principal_components)\n np.save(f, variances)\n np.save(f, self.mask)", "def save(self,outPath=None):\n if (not self.canSave or self.skipObjRecords): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.changed = 1\n self.tes3.hedr.changed = 1\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Size Cell Records\n cntRecords = 0\n progress = self.progress\n progress.setMax(len(self.cells))\n progress(0.0,'Saving '+self.fileInfo.name)\n for record in self.cells:\n record.getSize()\n #--Progress\n cntRecords += 1\n progress(cntRecords)\n #--Other Records\n for record in self.records:\n record.getSize() #--Should already be done, but just in case.\n record.dump(out)\n out.close()", "def save_n3d_coords(file_path, coords_dict, seq_pos_dict): \n \n file_obj = open(file_path, 'w')\n write = file_obj.write\n \n for chromo in seq_pos_dict:\n chromo_coords = coords_dict[chromo]\n chromo_seq_pos = seq_pos_dict[chromo]\n \n num_models = len(chromo_coords)\n num_coords = len(chromo_seq_pos)\n \n if chromo[:3].lower() != 'chr':\n chromo_name = 'chr' + chromo\n else:\n chromo_name = chromo\n \n line = '%s\\t%d\\t%d\\n' % (chromo_name, num_coords, num_models)\n write(line)\n \n for j in range(num_coords):\n data = chromo_coords[:,j].ravel().tolist()\n data = '\\t'.join('%.8f' % d for d in data)\n \n line = '%d\\t%s\\n' % (chromo_seq_pos[j], data)\n write(line)\n\n file_obj.close()", "def save(self, path):\n np.savez_compressed(path, **self.model_dict)", "def create_e3d_file(self,path='./'):\n dt=0.606*self.model_parameters['dh']/np.max(self.velocity_model['vp']) # dt needs to satify the courant condition\n t=int(self.model_parameters['duration']/dt)\n \n # Check path exists, if not create one\n if not os.path.exists(path):\n os.makedirs(path)\n \n # Create e3d parameter file\n f=open('%s%s_e3dmodel.txt'%(path,self.model_name),'w')\n f.write(\"grid x=%s z=%s dh=%s b=2 q=1\\ntime dt=%0.5f t=%s\\n\"%(self.model_parameters['xmax'],self.model_parameters['zmax'],self.model_parameters['dh'],dt,t))\n f.write(\"block p=%s s=%s r=%s Q=20 Qf=50\\n\"%(self.velocity_model['vp'][0],self.velocity_model['vs'][0],self.velocity_model['rho'][0]))\n \n for i in range(1,len(self.velocity_model['vp'])-1):\n f.write(\"block p=%s s=%s r=%s z1=%s z2=%s Q=20 Qf=50\\n\"%(self.velocity_model['vp'][i],self.velocity_model['vs'][i],self.velocity_model['rho'][i],\n self.velocity_model['depth'][i],self.velocity_model['depth'][i+1]))\n \n f.write(\"block p=%s s=%s r=%s z1=%s z2=%s Q=20 Qf=50\\n\\n\"%(self.velocity_model['vp'][i+1],self.velocity_model['vs'][i+1],self.velocity_model['rho'][i+1],\n self.velocity_model['depth'][i+1],self.model_parameters['zmax'])) # extend to the based of the model \n \n f.write(\"visual movie=5\\n\\n\")\n\n if self.source['src_type']!=4:\n f.write(\"source type=%s x=%s z=%s freq=%s amp=%s\\n\\n\"%(self.source['src_type'],self.source['srcx'],self.source['srcz'],self.source['freq'],self.source['amp'])) \n else:\n f.write(\"source type=%s x=%s z=%s freq=%s amp=%s Mxx=%s Myy=%s Mzz=%s Mxy=%s Mxz=%s Myz=%s\\n\\n\"%(self.source['src_type'],self.source['srcx'],self.source['srcz'],self.source['freq'],self.source['amp'],self.source['mt'][0],self.source['mt'][1],self.source['mt'][2],self.source['mt'][3],self.source['mt'][4],self.source['mt'][5])) \n\n for r in range(len(self.receivers['recxs'])):\n f.write('sac x=%0.3f z=%0.3f file=%s\\n'%(self.receivers['recxs'][r],self.receivers['reczs'][r],self.model_name))\n\n f.write(\"visual sample=0.1 movie=1 scale=10000000000/n\")\n f.close()\n \n print('File created: %s%s_e3dmodel.txt'%(path,self.model_name))", "def save(self, output, data):", "def save_state(self, file):\n np.savez(file, z_mn=self.z_mn, theta=self.theta, phi=self.phi,\n z_best=self.z_best, ll_best=self.ll_best, log=self.log)", "def save(self):\n # TODO: save the file", "def writeMCToGR3File(filename, mc):\n nodes = np.vstack((mc.x, mc.y)).T\n nodalValues = mc.data[:, 0, 0].squeeze()[:, None]\n connectivity = mc.connectivity\n openBndNodes = []\n landBndNodes = []\n writeGR3File(filename, '', nodes, nodalValues, connectivity, mc.boundaries)", "def write_to_file(self, filename):\n self.octree.write(str.encode(filename))\n print(\"Save octomap to \"+filename)", "def save(self, path):\n save(self.actor_net.state_dict(), path + '_actor.pkl')\n save(self.critic_net.state_dict(), path + '_critic.pkl')", "def save_to_xyz(self, filename): \n with open( filename, 'a' ) as F:\n F = open( filename, 'a' )\n F.write( '%d\\n'%self.num_atoms )\n F.write( \"XYZ\\n\" )\n for num,row in enumerate(self.atoms):\n try:\n F.write('%s '%self.species[num])\n except:\n F.write('X%d '%num)\n F.write( mat2str( row, \"%16.10f\" ) )\n F.write( \"\\n\" )", "def save(self):\n data = (\n self.Joints,\n self.Links,\n self.joint_syms,\n self.global_syms,\n self.name,\n self.sym_prefix,\n )\n cloudpickle.dump(data, open(self.save_filename, \"wb\"))", "def _save_model(self):\n with open(self.filepath, 'wb') as file:\n pickle.dump(self.cmodel, file)", "def saveauto(self):\n self.inp.getedge()\n ss=ss=strftime(\"_%Y-%m-%d_%H:%M:%S\", gmtime())\n fn=os.environ['VMEWORKDIR'] +\"/WORK/phases/\"+self.name+ss+self.inp.edge+\"_\"+self.inp.inpnum+\"_\"+self.inp.ctpnum+\".ps\"\n rc=self.c1.postscript(file=fn)\n if rc is not '':\n MywError(errmsg=\"File \"+fn+\" cannot be created.\")\n print \"rc=\",rc,len(rc)\n else:\n print \"File \",fn, \" saved.\"" ]
[ "0.6890905", "0.6811769", "0.6598678", "0.6471373", "0.642383", "0.63084406", "0.6303359", "0.627521", "0.62555593", "0.6254733", "0.6240116", "0.6228313", "0.6198321", "0.6152307", "0.6072066", "0.60581666", "0.60330385", "0.6018503", "0.5994799", "0.5973119", "0.59553504", "0.5954806", "0.5917288", "0.5916417", "0.59065247", "0.5876068", "0.585212", "0.5850753", "0.5835718", "0.5822824" ]
0.7049525
0
takes a parsed yaml node and translates it to a list of arguments that can be passed any subprocess command. for dictionary values the values can only be of type str or list. nested dictionary is not supported.
def _flat_node_to_cmd_line_args(node): if isinstance(node, list): return node elif isinstance(node, dict): return list(itertools.chain(*[['--%s' % key,node[key]] if isinstance(node[key],basestring) else ['--%s' % key] + node[key] for key in node.keys()])) elif isinstance(node, basestring): return node.split() else: raise ValueError("%s node is has unsupported data type")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def yaml_to_args( obj ):\n if isinstance( obj, list ):\n args = []\n for arg in obj:\n for key in arg:\n args.append((key,arg[key]))\n return args\n elif isinstance( obj, dict ):\n return [(key, obj[key]) for key in obj ]\n else:\n return obj", "def _args_to_params(self, args, tree):\n with tree.treeChangeBlocker():\n for key, val in args.items():\n if 'range' in key:\n _range = tree.child(key)\n if val is None:\n _range.child(\"Auto\").setValue(True)\n else:\n _range.child(\"Low\").setValue(val[0])\n _range.child(\"High\").setValue(val[1])\n _range.child(\"Auto\").setValue(False)\n elif key == 'polarization_factor':\n if val is None:\n tree.child('Apply polarization factor').setValue(True)\n else:\n tree.child('Apply polarization factor').setValue(True)\n tree.child(key).setValue(val)\n else:\n try:\n child = tree.child(key)\n except:\n # No specific error thrown for missing child\n child = None\n if child is not None:\n if val is None:\n child.setValue('None')\n else:\n child.setValue(val)", "def _parse_args(self, cmd_line_list):\n parser = ArgumentParser()\n parser.add_argument('--yaml', help='yaml file specifying config to run')\n args = parser.parse_args(cmd_line_list)\n return vars(args)", "def _params_to_args(self, args, tree):\n for child in tree.children():\n if 'range' in child.name():\n if child.child(\"Auto\").value():\n args[child.name()] = None\n else:\n args[child.name()] = [child.child(\"Low\").value(),\n child.child(\"High\").value()]\n elif child.name() == 'polarization_factor':\n pass\n elif child.name() == 'Apply polarization factor':\n if child.value():\n args['polarization_factor'] = \\\n tree.child('polarization_factor').value()\n else:\n args['polarization_factor'] = None\n else:\n val = child.value()\n if val == 'None': \n args[child.name()] = None\n else:\n args[child.name()] = val", "def Args(parser):", "def _dict_to_args(self, arg_dict):\n if arg_dict:\n yield \"--{}=data:application/json;charset=utf-8,{}\".format(\n self._CONFIG_FLAG.name,\n urllib.parse.quote(json_encode(arg_dict, pretty=False), encoding=\"utf-8\")\n )", "def parse_arguments(args):", "def _parse_args(self, args : dict):\n result = {}\n for key, value in args.items():\n if key in self._subparsers:\n # if it's a list, it is because it's a preset\n if isinstance(value, list):\n result[key] = value[0]\n else:\n result[key] = self._subparsers[key]._parse_args(value)\n elif key in self._actions:\n result[key] = self._actions[key](value)\n else:\n raise ValueError(f\"Unknown argument {key}\")\n\n return result", "def args(hub, val: List[str] or str) -> Tuple[List[str], Dict[str, str]]:\n args = []\n kwargs = {}\n for v in hub.render.cli.render(val):\n if isinstance(v, dict):\n kwargs.update(v)\n else:\n args.append(v)\n\n return args, kwargs", "def _check_and_transform_args(cls, args: Sequence[Any]) -> Sequence['Node']:\n if len(args) != len(cls._arg_data_types):\n raise ValueError(f'{cls.__name__} expects {len(cls._arg_data_types)} '\n f'arguments but got {len(args)}.')\n transformed_args = []\n for arg, arg_data_type in zip(args, cls._arg_data_types):\n if (arg_data_type == DataType.ARTIFACT_MULTIMAP and\n isinstance(arg, dict)):\n arg = DictNode(arg)\n if not isinstance(arg, Node):\n raise ValueError('Cannot directly call ResolverOp with real values. '\n 'Use output of another operator as an argument.')\n if arg.output_data_type != arg_data_type:\n raise TypeError(\n f'{cls.__name__} takes {DataType.Name(arg_data_type)} type '\n f'but got {DataType.Name(arg.output_data_type)} instead.')\n transformed_args.append(arg)\n return transformed_args", "def toargs(context, schema, data):\n data = dict(data)\n args = {}\n for name, field in schema.namesAndDescriptions(True):\n field = field.bind(context)\n n = name\n if n.endswith('_') and iskeyword(n[:-1]):\n n = n[:-1]\n\n s = data.get(n, data)\n if s is not data:\n s = str(s)\n del data[n]\n\n try:\n args[str(name)] = field.from_unicode(s)\n except ValidationError as v:\n reraise(ConfigurationError('Invalid value for', n, str(v)),\n None, sys.exc_info()[2])\n elif field.required:\n # if the default is valid, we can use that:\n default = field.default\n try:\n field.validate(default)\n except ValidationError:\n raise ConfigurationError('Missing parameter:', n)\n args[str(name)] = default\n\n if data:\n # we had data left over\n try:\n keyword_arguments = schema.getTaggedValue('keyword_arguments')\n except KeyError:\n keyword_arguments = False\n if not keyword_arguments:\n raise ConfigurationError('Unrecognized parameters:', *data)\n\n for name in data:\n args[str(name)] = data[name]\n\n return args", "def configToCliArguments(config):\n if not isinstance(config, dict):\n raise TypeError(\"Expected dict for config\")\n\n args = []\n for key, value in config.items():\n if value == None:\n args.append(f\"--{key}\")\n continue\n\n if isinstance(value, list):\n value = \",\".join(value)\n args.append(f\"--{key}={value}\")\n\n return args", "def command_friendly_kv_pair(dict):\n # subprocess.run expects parameters to be in the foo=bar format. We build this format here and return a list\n output = []\n for key, value in dict.items():\n output.append('%s=%s' % (key, value))\n return output", "def epc_arg_transformer(arg):\n if type(arg) != list:\n return arg\n\n # NOTE: Empty list elisp can be treated as both empty python dict/list\n # Convert empty elisp list to empty python dict due to compatibility.\n\n # check if we can tranform arg to python dict instance\n type_dict_p = len(arg) % 2 == 0\n if type_dict_p:\n for v in arg[::2]:\n if type(v) != sexpdata.Symbol or not v.value().startswith(\":\"):\n type_dict_p = False\n break\n\n if type_dict_p:\n # transform [Symbol(\":a\"), 1, Symbol(\":b\"), 2] to dict(a=1, b=2)\n ret = dict()\n for i in range(0, len(arg), 2):\n ret[arg[i].value()[1:]] = epc_arg_transformer(arg[i + 1])\n return ret\n else:\n return list(map(epc_arg_transformer, arg))", "def config_to_args(config):\n result = []\n\n for key, value in iteritems(config):\n if value is False:\n continue\n\n key = '--{0}'.format(key.replace('_', '-'))\n\n if isinstance(value, (list, set, tuple)):\n for item in value:\n result.extend((key, smart_str(item)))\n elif value is not True:\n result.extend((key, smart_str(value)))\n else:\n result.append(key)\n\n return tuple(result)", "def parseArgs():\n\n def getInput(name: str, *, required=False):\n val = os.environ.get(f'INPUT_{name.replace(\" \", \"_\").upper()}', \"\")\n if not val and required:\n raise ValueError(f\"Missing required parameter: {name}\")\n return val\n\n certificate = getInput(\"certificate\", required=True)\n private_key = getInput(\"private_key\", required=True)\n connectorId = getInput(\"connector_id\", required=True)\n host = getInput(\"host\", required=True)\n body = yaml.load(getInput(\"args\", required=True), yaml.Loader)\n\n with string_to_tempfile(certificate) as cert_file, string_to_tempfile(\n private_key\n ) as key_file:\n yield ActionArgs(\n host=host,\n auth=AuthCert(cert=Path(cert_file.name), private_key=Path(key_file.name)),\n args=AddOrUpdateIncident2Args(**body, connectorId=connectorId),\n )", "def parse_generate_arguments(arguments):\n return_value = {}\n for key in arguments:\n return_value[key] = CONFIG_KEY_PARSER[key](arguments[key])\n\n return return_value", "def yamlConfigForParsingPlugins():\n parameters = \"\"\"\njoinPaths: !joinPaths\n - a\n - b\n - \"c\"\nrunPageTemplates: !findRunPageTemplates\n - \"templates\"\nbcrypt: !bcrypt\n bcryptLogRounds: 12\n user: \"pass\"\nbcryptNoUser: !bcrypt\n bcryptLogRounds: 12\n null: null\nsecretKey: !secretKey 12345\nsecretKeyGen: !secretKey null\n \"\"\"\n # Load parameters\n parameters = yaml.load(parameters, Loader = yaml.SafeLoader)\n return parameters", "def process_command_line_arguments() -> Namespace:\n\n parser = build_parser()\n arguments = parser.parse_args()\n\n return arguments", "def cli(arg_dict):\n\n keys = list(arg_dict.keys())\n for key in keys:\n v = arg_dict[key]\n usr_args_ls = sys.argv\n for ind in range(len(usr_args_ls)):\n val = usr_args_ls[ind]\n if val == \"-\" + key[0] or val == \"--\" + key:\n if type(v).__name__ == \"bool\":\n v = not v\n else:\n v = usr_args_ls[ind + 1]\n\n arg_dict[key] = v", "def dump(arg):\n return yaml.safe_dump_all(\n arg,\n allow_unicode=True,\n default_flow_style=False,\n )", "def parse_arguments(args: list = None) -> Dict[str, str]:\n arg_parser = argparse.ArgumentParser(description=\"Console command to crypt \"\n \"and decrypt texts using \"\n \"classic methods. It also \"\n \"performs crypto attacks \"\n \"against those methods.\\n\",\n epilog=\"Follow cifra development at: \"\n \"<https://github.com/dante-signal31/cifra>\")\n cifra_subparsers = arg_parser.add_subparsers(help=\"Available modes\",\n dest=\"mode\",\n required=True)\n # DICTIONARY MANAGEMENT.\n dictionary_parser = cifra_subparsers.add_parser(name=\"dictionary\",\n help=\"Manage dictionaries to \"\n \"perform crypto attacks.\")\n dictionary_actions_subparser = dictionary_parser.add_subparsers(help=\"Action to perform.\",\n dest=\"action\")\n # DICTIONARY CREATION.\n dictionary_create_parser = dictionary_actions_subparser.add_parser(name=\"create\",\n help=\"Create a dictionary of unique words.\")\n dictionary_create_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to create.\",\n metavar=\"NEW_DICTIONARY_NAME\")\n dictionary_create_parser.add_argument(\"-i\", \"--initial_words_file\",\n type=_check_is_file,\n help=\"Optionally you can load in the dictionary words located in a text file\",\n metavar=\"PATH_TO FILE_WITH_WORDS\")\n # DICTIONARY REMOVAL.\n dictionary_delete_parser = dictionary_actions_subparser.add_parser(name=\"delete\",\n help=\"Remove an existing dictionary.\")\n dictionary_delete_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to delete.\",\n metavar=\"DICTIONARY_NAME_TO_DELETE\")\n # DICTIONARY UPDATING.\n dictionary_update_parser = dictionary_actions_subparser.add_parser(name=\"update\",\n help=\"Add words to an existing dictionary.\")\n dictionary_update_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to update with additional words.\",\n metavar=\"DICTIONARY_NAME_TO_UPDATE\")\n dictionary_update_parser.add_argument(\"words_file\",\n type=_check_is_file,\n help=\"Pathname to a file with words to add to dictionary\",\n metavar=\"PATH_TO_FILE_WITH_WORDS\")\n # DICTIONARY LISTING.\n _ = dictionary_actions_subparser.add_parser(name=\"list\",\n help=\"Show existing dictionaries.\")\n # CIPHER MANAGEMENT.\n cipher_parser = cifra_subparsers.add_parser(name=\"cipher\",\n help=\"Cipher a text using a key.\")\n cipher_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to use to cipher.\",\n metavar=\"ALGORITHM_NAME\")\n cipher_parser.add_argument(\"key\",\n type=str,\n help=\"Key to use to cipher.\",\n metavar=\"CIPHERING_KEY\")\n cipher_parser.add_argument(\"file_to_cipher\",\n type=_check_is_file,\n help=\"Path to file with text to cipher.\",\n metavar=\"FILE_TO_CIPHER\")\n cipher_parser.add_argument(\"-o\", \"--ciphered_file\",\n type=str,\n help=\"Path to output file to place ciphered text. If not used then\"\n \"ciphered text will be dumped to console.\",\n metavar=\"OUTPUT_CIPHERED_FILE\")\n cipher_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n # DECIPHERING MANAGEMENT\n decipher_parser = cifra_subparsers.add_parser(name=\"decipher\",\n help=\"Decipher a text using a key.\")\n decipher_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to use to decipher.\",\n metavar=\"ALGORITHM_NAME\")\n decipher_parser.add_argument(\"key\",\n type=str,\n help=\"Key to use to decipher.\",\n metavar=\"CIPHERING_KEY\")\n decipher_parser.add_argument(\"file_to_decipher\",\n type=_check_is_file,\n help=\"Path to file with text to decipher.\",\n metavar=\"FILE_TO_DECIPHER\")\n decipher_parser.add_argument(\"-o\", \"--deciphered_file\",\n type=str,\n help=\"Path to output file to place deciphered text. If not used then\"\n \"deciphered text will be dumped to console.\",\n metavar=\"OUTPUT_DECIPHERED_FILE\")\n decipher_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n # ATTACK MANAGEMENT\n attack_parser = cifra_subparsers.add_parser(name=\"attack\",\n help=\"Attack a ciphered text to get its plain text\")\n attack_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to attack.\",\n metavar=\"ALGORITHM_NAME\")\n attack_parser.add_argument(\"file_to_attack\",\n type=_check_is_file,\n help=\"Path to file with text to attack.\",\n metavar=\"FILE_TO_ATTACK\")\n attack_parser.add_argument(\"-o\", \"--deciphered_file\",\n type=str,\n help=\"Path to output file to place deciphered text. If not used then\"\n \"deciphered text will be dumped to console.\",\n metavar=\"OUTPUT_DECIPHERED_FILE\")\n attack_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n\n parsed_arguments = vars(arg_parser.parse_args(args))\n filtered_parser_arguments = {key: value for key, value in parsed_arguments.items()\n if value is not None}\n return filtered_parser_arguments", "def parse_arguments(self):\n self.args = self.argparser.parse_args(self.template_args) # noqa: T484\n\n # get values from args or defaults\n for name, (categ, rest) in self.data.items():\n if categ not in '<>?':\n continue\n val = getattr(self.args, name)\n if rest.get('type') == 'flag':\n val = str(rest.get('val')) if val else ''\n else:\n val = val if val is not None else rest.get('default')\n self.variables[name] = val\n\n # possibly fill in substitutions in the template variables\n findreplace = re.compile(r'{{\\s*(\\w+)\\s*}}')\n for name, val in self.variables.items():\n if findreplace.search(val):\n t = jinja2.Template(val)\n self.variables[name] = t.render(self.variables)", "def add_args(self, parser):", "def arguments(self):\n return parse_arguments(self['data'])", "def _get_args(item):\n args = item.get('args')\n if args and not isinstance(args, collections.Sequence):\n args = (args,)\n return args", "def parse_list_args(args):\n\n args.image_transformers = parse_transformers(args.image_transformers)\n args.tensor_transformers = parse_transformers(args.tensor_transformers)\n args.test_image_transformers = parse_transformers(args.test_image_transformers)\n args.test_tensor_transformers = parse_transformers(args.test_tensor_transformers)\n\n args.block_layout = parse_block_layout(args.block_layout)", "def import_args_from_dict(value, args, config):\n if isinstance(value, six.string_types):\n for match in TOKEN_REGEX.finditer(str(value)):\n token = match.group(1)\n if token in args:\n actual_param = args[token]\n if isinstance(actual_param, six.string_types):\n value = value.replace(\"@\"+token, args[token])\n else:\n value = actual_param\n elif isinstance(value, list):\n return [import_args_from_dict(item, args, config) for item in value]\n elif isinstance(value, dict):\n return {\n key: import_args_from_dict(val, args, config)\n for key, val in value.items()\n }\n elif isinstance(value, tuple):\n return tuple(import_args_from_dict(val, args, config) for val in value)\n return value", "def parse_arguments(args: List[Dict]) -> 'Dict[str, Argument]':\n if not args:\n return {}\n result = {}\n for a in args:\n if not a:\n continue\n arg = Argument(a)\n result[arg.name] = arg\n return result", "def _get_normal_args(self, config_name: str, config_value: Union[list, dict, str, bool]) -> str:\n try:\n arg = self.args[config_name]\n except KeyError as e:\n incorrect_key = e.args[0]\n raise exceptions.IncorrectConfigException(config_name=self.config_name, incorrect_key=incorrect_key)\n arg_type, name = arg[\"type\"], arg[\"name\"]\n\n arg_is_dict_or_list = set(arg_type).intersection([list, dict])\n arg_is_str = isinstance(config_value, str)\n\n if arg_is_dict_or_list and not arg_is_str:\n args = self._convert_list_to_args(config_value, name)\n elif bool in arg_type:\n args = f\"{name} \"\n else:\n if isinstance(config_value, list) and str in arg_type:\n config_value = \" \".join(config_value)\n args = f\"{name} {config_value} \"\n\n return args" ]
[ "0.66765696", "0.6097904", "0.59122384", "0.5839684", "0.58242154", "0.5823643", "0.5762801", "0.567206", "0.56293094", "0.5592766", "0.5565872", "0.5493016", "0.54782856", "0.5477749", "0.5472146", "0.54652345", "0.5385457", "0.5366413", "0.5267159", "0.5258225", "0.5252986", "0.52494943", "0.5240564", "0.52373004", "0.5215189", "0.52123374", "0.520837", "0.52080935", "0.520454", "0.5199911" ]
0.6760684
0
Add Capacity via UI.
def add_capacity_ui(self): self.navigate_installed_operators_page() self.do_click(self.infra_loc["ocs_operator"]) self.do_click(self.infra_loc["storage_cluster_tab"]) self.do_click(self.infra_loc["kebab_storage_cluster"]) self.do_click(self.infra_loc["add_capacity_button"]) self.do_click(self.infra_loc["select_sc_add_capacity"]) self.do_click(self.infra_loc[self.storage_class]) self.do_click(self.infra_loc["confirm_add_capacity"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_capacity(self, cap):\n return self.get_interaction().set_capacity(cap)", "def capacity(self, value: typing.Union[str, int, None]):\n self._properties[\"capacity\"] = _types.integer_or_string(value)", "def capacity(self) -> Capacity:\n raw = self._call('GET', 'capacity')\n return Capacity.parse_raw(raw)", "def set_capacity(self, cap):\n self._capacity.type = 'value'\n self._capacity._value = float(cap) # TODO getter/setter", "def set_capacity(self, capacity):\r\n params = {\r\n 'AutoScalingGroupName' : self.name,\r\n 'DesiredCapacity' : capacity,\r\n }\r\n req = self.connection.get_object('SetDesiredCapacity', params,\r\n Request)\r\n self.connection.last_request = req\r\n return req", "def __init__(self, capacity, units=0):\n self.capacity = capacity\n self.units = units", "def capacity(self):\n raise NotImplementedError()", "def capacitygroup_create(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_create(cmd_ctx, cpc, options))", "def Capacity(self) -> int:", "def capacitygroup_group():", "def capacity(self, capacity):\n\n self._capacity = capacity", "def add_subsystem(self):\n ss = self.subsystemComboBox.itemData(self.subsystemComboBox.currentIndex())\n\n # Create the subsystem view\n # Add it to the Tab\n #ssUI = subsystem_view.Ui_Subsystem()\n ssVM = subsystem_vm.SubsystemVM(self.tabSubsystem, self, ss, None)\n ss_label = \"[\" + str(ss) + \"] - \" + SS.ss_label(ss)\n self.tabSubsystem.addTab(ssVM, ss_label)\n\n # Add subsystem to CEPO\n self.cepo_list.append(ss)\n\n # Update the Burst ID\n self.updateBurstID()\n\n # Recalculate\n self.calculate()\n\n self.parent.statusBar().showMessage(ss_label + ' added to configuration.')", "def _create_capacity(self, m, comp, prod_name):\n name = comp.name\n cap_res = comp.get_capacity_var() # name of resource that defines capacity\n r = m.resource_index_map[comp][cap_res] # production index of the governing resource\n # production is always lower than capacity\n ## NOTE get_capacity returns (data, meta) and data is dict\n ## TODO does this work with, e.g., ARMA-based capacities?\n ### -> \"time\" is stored on \"m\" and could be used to correctly evaluate the capacity\n cap = comp.get_capacity(None, None, None, None)[0][cap_res] # value of capacity limit (units of governing resource)\n rule = partial(self._capacity_rule, prod_name, r, cap)\n constr = pyo.Constraint(m.T, rule=rule)\n setattr(m, '{c}_{r}_capacity_constr'.format(c=name, r=cap_res), constr)\n # minimum production\n print('DEBUGG dispatchable?', comp.name, comp.is_dispatchable())\n if comp.is_dispatchable() == 'fixed':\n minimum = cap\n var = getattr(m, prod_name)\n values = var.get_values()\n for k in values:\n values[k] = cap\n var.set_values(values)\n else:\n minimum = 0 # -> for now just use 0, but fix this! XXX\n print('DEBUGG ... min:', minimum)\n rule = partial(self._min_prod_rule, prod_name, r, cap, minimum)\n constr = pyo.Constraint(m.T, rule=rule)\n setattr(m, '{c}_{r}_minprod_constr'.format(c=name, r=cap_res), constr)", "def __init__(__self__, *,\n capacity: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input[str]] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def capacitygroup_add_partition(cmd_ctx, cpc, capacitygroup, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_add_partition(\n cmd_ctx, cpc, capacitygroup, options))", "def capacity(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"capacity\")", "def __init__(__self__, *,\n capacity: Optional[int] = None,\n name: Optional[str] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "def capacity_used(self):\n raise NotImplementedError()", "def test_rest_v20_dd_systems_systemid_stats_capacity_get(self):\n pass", "def get_num_slots(self):\n # Your code here\n return self.capacity", "def capacity(self):\n return str(int(self._properties.get('capacity')) * 1073741824)", "def get_capacity_param(self):\n intr = self.get_interaction()\n return intr.get_capacity(None, None, None, None, raw=True)", "def __init__(self, capacity, initial):\n\t\tself.capacity = capacity\n\t\tself.amount = initial", "def show_board(self):\n print(self.capacity_list)", "def scale_plant_capacity(self, resource, zone_name=None, plant_id=None):\n self._add_plant_entries(resource, resource, zone_name, plant_id)", "def add_storage_capacity(self, info):\n if not isinstance(info, list):\n raise TypeError(\"Argument enclosing new storage(s) must be a list\")\n\n info = copy.deepcopy(info)\n new_storages = []\n required = {\"bus_id\", \"capacity\"}\n optional = {\n \"duration\",\n \"min_stor\",\n \"max_stor\",\n \"energy_value\",\n \"InEff\",\n \"OutEff\",\n \"LossFactor\",\n \"terminal_min\",\n \"terminal_max\",\n }\n anticipated_bus = self._get_df_with_new_elements(\"bus\")\n for i, storage in enumerate(info):\n self._check_entry_keys(storage, i, \"storage\", required, None, optional)\n if storage[\"bus_id\"] not in anticipated_bus.index:\n raise ValueError(\n f\"No bus id {storage['bus_id']} available for {ordinal(i)} storage\"\n )\n for o in optional:\n if o not in storage:\n storage[o] = self.grid.storage[o]\n for k, v in storage.items():\n if not isinstance(v, (int, float)):\n err_msg = f\"values must be numeric, bad type for {ordinal(i)} {k}\"\n raise ValueError(err_msg)\n if v < 0:\n raise ValueError(\n f\"values must be non-negative, bad value for {ordinal(i)} {k}\"\n )\n for k in {\"min_stor\", \"max_stor\", \"InEff\", \"OutEff\", \"LossFactor\"}:\n if storage[k] > 1:\n raise ValueError(\n f\"value for {k} must be <=1, bad value for {ordinal(i)} storage\"\n )\n new_storages.append(storage)\n if \"storage\" not in self.ct:\n self.ct[\"storage\"] = []\n self.ct[\"storage\"] += new_storages", "def __init__(__self__, *,\n capacity: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input[str]] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if name is None:\n name = 'S0'\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tier is None:\n tier = 'Standard'\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def initialize_supply(self):\n unit_count = 0\n for i in range(self.start_allocation[0 ] -1, self.start_allocation[1]):\n for j in range(len(self.capacity_list[i][1])):\n self.capacity_list[i][1][j] = 1\n unit_count += 1\n self.total_supply -= unit_count", "def __init__(__self__, *,\n active_capacity: int,\n capacity: Optional[int] = None,\n scale_type: Optional[str] = None):\n pulumi.set(__self__, \"active_capacity\", active_capacity)\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if scale_type is not None:\n pulumi.set(__self__, \"scale_type\", scale_type)", "def __init__(__self__, *,\n name: pulumi.Input[str],\n capacity: Optional[pulumi.Input[int]] = None,\n tier: Optional[pulumi.Input[Union[str, 'CapacitySkuTier']]] = None):\n pulumi.set(__self__, \"name\", name)\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)" ]
[ "0.669308", "0.6396389", "0.6279793", "0.6190911", "0.6123263", "0.610596", "0.60797817", "0.604714", "0.6036148", "0.5991488", "0.5956968", "0.5945528", "0.5833985", "0.58118176", "0.5805878", "0.58014524", "0.57177514", "0.5693951", "0.567192", "0.563066", "0.5628445", "0.56257415", "0.56210625", "0.56124985", "0.56073165", "0.55981344", "0.5579868", "0.5569737", "0.55591327", "0.5540074" ]
0.8122652
0
plotte Parameterkombinationen zu Zeit t, mit erlaubter Abweichung von t um epsilon
def plot_params_at_time(folder, t, epsilon=0.1, show_params=False): # Plot erstellen und beschriften if "2s" not in folder: logging.log(40, "Plot nicht verfuegbar fuer 3-Zustaende Modell") return ax = plt.axes() ax.get_xaxis().get_major_formatter().set_useOffset(False) plt.xlabel("ps") plt.ylabel("pm") filenames = [name for name in os.listdir(folder) if name.startswith("Sim_")] filenames.sort() # Alle Sim durchgehen, wenn Bedingung erfuellt, plotten dots = iter(["r^", "co", "ks", "g^", "yo", "ms", "b^", "ko", "gs", "m^", "ro", "ys", "c^", "bo", "ws"]) for filename in filenames: with open (folder+filename, "r+b") as data: sim = pickle.load(data) if sim.valid: if abs(sim.pd[0] - t) > epsilon: logging.log(24, "Abweichung zu gross, %s, bei sim %s", sim.pd[0], sim) else: #Groesse der Punkte zeigt Breite(IQR) des Peaks logging.log(20, "Gefunden: %s", sim.pd) ax.plot(sim.params[0], sim.params[1], dots.__next__(), markersize = 2+(sim.pd[2]), label = (str(round(sim.pd[2],2)) + " " + str(round(sim.pd[0], 2)))) if show_params: ax.text(sim.params[0], sim.params[1], str(round(sim.params[0],8)) +"_" + str(round(sim.params[1],5))) logging.log(21, sim.pd[3]) plt.suptitle("Parameter fuer Zeit "+ str(t) + " mit Abweichung " + str(epsilon)) plt.legend(title = "Breite, Retentionszeit", numpoints = 1, loc = 2) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def e_from_p_function(self):\r\n\r\n (fig, ax) = plt.subplots()\r\n ax.plot(\r\n self.dt.momentum_t,\r\n self.dt.eng_t,\r\n marker='o',\r\n linestyle=' ',\r\n color='black',\r\n label='result of measurements',\r\n )\r\n ax.plot(self.dt.momentum_t, self.dt.eng_t_acc, linestyle='-',\r\n color='red', label='continuum')\r\n ax.plot(\r\n self.dt.momentum_t,\r\n self.dt.en,\r\n linestyle='--',\r\n marker='x',\r\n color='blue',\r\n label=\"Euler's method\",\r\n )\r\n\r\n # error\r\n\r\n ax.errorbar(self.dt.momentum_t, self.dt.eng_t, fmt='k ',\r\n yerr=self.dt.eng_t_err_sum)\r\n\r\n xm = -1.0\r\n for i in range(len(self.dt.momentum_t)):\r\n if self.dt.momentum_t[i] > xm:\r\n xm = self.dt.momentum_t[i]\r\n stepx = round(xm / float(len(self.dt.momentum_t)), 1)\r\n xm = round(xm + stepx, 1)\r\n ax.set_xlim([0, xm]) # 0.85\r\n\r\n # signature on the horizontal x-axis\r\n\r\n ax.set_xlabel('p')\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on x)\r\n\r\n locatorx = matplotlib.ticker.MultipleLocator(base=stepx) # stepx$ step on x is base=0.1\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.xaxis.set_major_locator(locatorx)\r\n\r\n ym = -1.0\r\n y0 = self.dt.mass\r\n for i in range(len(self.dt.eng_t)):\r\n if self.dt.eng_t[i] > ym:\r\n ym = self.dt.eng_t[i]\r\n stepy = round((ym - y0) / float(len(self.dt.eng_t)), 2)\r\n\r\n # print ym, stepy\r\n\r\n ym = round(ym + stepy, 2)\r\n ax.set_ylim([y0 - stepy, ym]) # 0.9, 1.4\r\n\r\n # signature on vertical y axis\r\n\r\n ax.set_ylabel('E')\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on y)\r\n\r\n locatory = matplotlib.ticker.MultipleLocator(base=stepy) # stepy $ step on y is base=0.05\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.yaxis.set_major_locator(locatory)\r\n ax.grid()\r\n\r\n # show legend\r\n\r\n ax.legend(loc='upper left')\r\n\r\n # show drawing\r\n\r\n plt.show()", "def verhulst(nb_init, t0, tf, eps, methode, gamma, K) :\n f=lambda y,t : gamma*y*(1-y/K)\n Y=meth_epsilon(nb_init, t0, tf, eps, f, methode)\n return Y", "def epsilon_delta(self):", "def __init__(self, epsilon=1e-14):\n self.epsilon = epsilon", "def plot_forces_parachute(t, v, dt, tp, m, a_first, a_last):\n plt.figure()\n drag = zeros(len(v))\n for i in range(len(v)):\n if(i*dt <= tp):\n drag[i] = -m*a_first*abs(v[i])*v[i]\n else:\n drag[i] = -m*a_last*abs(v[i])*v[i]\n grav = [-m*9.81]*len(v)\n Boyancy = [1. * 9.81 * 0.1]*len(v) # rho * g * V\n Fsum = drag+grav+Boyancy\n plt.plot(t, drag, t, grav, t, Boyancy, t, Fsum)\n plt.legend([\"Drag force\", \"Gravity force\", \"Boyancy\", \"Sum of forces\"])\n plt.savefig('Parachute_forces.png')", "def plot_parameter(self, parm):\n # If user wants to plot density, make sure it exists\n if parm == 'density' and 'density' not in self.ds.data_vars:\n self.insert_density()\n \n if parm == 'theta' and 'theta' not in self.ds.data_vars:\n self.insert_potential_density()\n \n if parm == 'N' and 'N' not in self.ds.data_vars:\n self.insert_buoyancy_frequency()\n \n # Use xarray to plot this parameter\n self.ds[parm].plot(y=self.ztsp[0])\n if plt.ylim()[0] <= 0:\n plt.gca().invert_yaxis()\n plt.tight_layout()\n \n plt.show()", "def _gammaParameter(self, t : float, dtau : float) -> float:\n pass", "def v_from_p_function(self):\r\n\r\n track_c = [] # p classical function,\r\n for i in range(len(self.dt.momentum_t)):\r\n track_c.append(self.dt.momentum_t[i] / self.dt.mass)\r\n\r\n (fig, ax) = plt.subplots()\r\n\r\n ax.plot(\r\n self.dt.momentum_t,\r\n track_c,\r\n linestyle=':',\r\n linewidth=1,\r\n color='b',\r\n label='classic',\r\n )\r\n\r\n # marker=\"+\", markersize = 13,\r\n # ax.plot(self.dt.momentum_t, self.observer.velT, linestyle=\" \",\r\n # color=\"k\",marker=\"+\", markersize = 13, label=\"measurement\")\r\n\r\n ax.plot(\r\n self.dt.momentum_t,\r\n self.dt.vel_t,\r\n linestyle=' ',\r\n color='k',\r\n marker='o',\r\n label='result of measurements',\r\n )\r\n ax.plot(\r\n self.dt.momentum_t,\r\n self.dt.vel_anl,\r\n linestyle='-',\r\n color='red',\r\n linewidth=1,\r\n label='continuum',\r\n )\r\n\r\n # Euler's method == analitical function. We not plot it.\r\n\r\n ax.plot(\r\n self.dt.momentum_t,\r\n self.dt.vn,\r\n linestyle='--',\r\n color='blue',\r\n marker='x',\r\n linewidth=1,\r\n label=\"Euler's method\",\r\n )\r\n\r\n # error\r\n\r\n ax.errorbar(self.dt.momentum_t, self.dt.vel_t, fmt='k ',\r\n yerr=self.dt.vel_t_err)\r\n\r\n xm = -1.0\r\n for i in range(len(self.dt.momentum_t)):\r\n if self.dt.momentum_t[i] > xm:\r\n xm = self.dt.momentum_t[i]\r\n stepx = round(xm / float(len(self.dt.momentum_t)), 1)\r\n xm = round(xm + stepx, 1)\r\n ax.set_xlim([0, xm]) # xm = 0.85\r\n\r\n # signature on the horizontal x-axis\r\n\r\n ax.set_xlabel('p')\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on x)\r\n\r\n locatorx = matplotlib.ticker.MultipleLocator(base=stepx) # step on x is base=0.1\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.xaxis.set_major_locator(locatorx)\r\n\r\n # line draw\r\n\r\n line = matplotlib.lines.Line2D([0.0, 9.0], [1.0, 1.0], color='b'\r\n )\r\n ax.add_line(line)\r\n plt.text(0.7, 1.01, u'light speed', horizontalalignment='center'\r\n )\r\n ax.set_ylim([0, 1.1])\r\n\r\n # signature on vertical y axis\r\n\r\n ax.set_ylabel('v')\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on y)\r\n\r\n locatory = matplotlib.ticker.MultipleLocator(base=0.1) # step on y is base=0.1\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.yaxis.set_major_locator(locatory)\r\n\r\n ax.grid()\r\n\r\n # show legend\r\n\r\n ax.legend(loc='upper left')\r\n\r\n # show drawing\r\n # pylab.show()\r\n\r\n plt.show()", "def p(e, t):\n return b * e ** 2", "def line_param(point_a, point_b, t):\n new_point = point_a - point_b\n return point_b + t*new_point", "def plotComparison(x, nt, nx, c, phi, phiExact, methodName):\n \n plt.figure()\n plt.plot(x, phiExact)\n\n plt.plot(x, phi)\n plt.ylim([-0.2, 1.4])\n plt.title(str(methodName)+\" scheme\\nExact vs Numerical solution \"\\\n \"nt=\"+str(nt)+\", nx=\"+str(nx)+\"\\n\"\n \"Courant number: \"+str(c))\n plt.show()", "def graphe_regret_temps(opt, rgt, res):\n T = np.arange(opt.size)\n fig, ax = plt.subplots()\n ax.grid(True)\n plt.xlabel(\"T\")\n \n ax.plot(T, rgt, label = 'regret') \n ax.plot(T, opt, label = 'optimal')\n ax.plot(T, res, label = 'resultat obtenu')\n plt.legend(loc = \"upper left\")", "def variation_of_parameters(y: List[Symbol], gt: Symbol, t: Symbol = t, do_integral=True) -> Tuple[Symbol, Procedure]:\n W, w = Wronskian(y, t)\n goW = simplify(gt / W)\n\n yp = 0\n\n Wdets = []\n integrals = []\n\n col = [0] * len(y)\n col[-1] = 1\n for i in range(len(y)):\n Wi = w.copy()\n Wi[:, i] = col.copy()\n\n # reduce cos^2 t + sin^2 t to 1\n Wi_det = trigsimp(simplify(Wi.det()), deep=True, recursive=True)\n\n integrand = (Wi_det * goW).expand()\n integral = integrate(\n integrand, t) if do_integral else Integral(integrand, t)\n yp += y[i] * integral\n\n if do_integral:\n integrals.append(\n Eq(Dummy('mu_{}'.format(i + 1)),\n Eq(Integral(integrand, t), integral, evaluate=False), evaluate=False)\n )\n else:\n integrals.append(Eq(Dummy('mu_{}'.format(i)),\n Integral(integrand, t), evaluate=False))\n\n Wdets.append(\n Eq(Symbol('W{}'.format(i+1)), Eq(Determinant(Wi), Wi_det, evaluate=False), evaluate=False))\n\n yps = logcombine(simplify(yp))\n\n procedure = Procedure()\n procedure\\\n .text('Compute the Wronskian determinant', nl=True)\\\n .eq(Eq(Dummy('W'), Eq(Determinant(w), W, evaluate=False), evaluate=False))\\\n .text('Compute ').latex('W_i', nl=True)\\\n .equlist(Wdets)\\\n .text('Calculate and simplify ').latex('\\\\frac{g(t)}{W(t)}', nl=True)\\\n .eq(Eq(sympy.Mul(gt, sympy.Pow(W, -1, evaluate=False), evaluate=False), goW, evaluate=False))\\\n .text('Compute ').latex('\\\\mu_i = \\\\int \\\\frac{g(t)W_i(t)}{W(t)} dt', nl=True)\\\n .equlist(integrals)\\\n .text('Compute the sum ').latex('\\\\sum_{i=1}^{k} y_i \\\\int \\\\frac{g(t)W_i(t)}{W(t)} dt', nl=True)\\\n .equlist([\n Eq(Dummy('y_p'), yp, evaluate=False),\n Eq(Dummy('y_p'), yps, evaluate=False)\n ])\\\n .text('Complementray + particular = general', nl=True)\\\n .eq(Eq(Dummy('y'), to_general(y, yps)[0], evaluate=False))\n\n return yps, procedure", "def __init__(self, eps: float=1e-5):\n self.eps = eps", "def __init__(self, epsilon=1e-7):\n super().__init__()\n self.epsilon = epsilon", "def __init__(self, p=1.5, eps=1e-8):\n assert 1 < p < 2, \"make sure 1 < p < 2\" \n self.p, self.eps = p, eps", "def _define_epsilon(n,T,a=1):\n\n return np.sqrt(np.log(n)/T)*a", "def plot_time(self, X, x0, t):\n\n Pressure = [Solution(self, (x-x0)/t).pressure for x in X]\n Velocity = [Solution(self, (x-x0)/t).velocity for x in X]\n Density = [Solution(self, (x-x0)/t).rho for x in X]\n\n fig, axs = plt.subplots(3, sharex=True)\n fig.suptitle(\"Solution of the Riemann problem\\nat t = {}s\".format(t))\n axs[0].plot(X, Density)\n axs[1].plot(X, Velocity)\n axs[2].plot(X, Pressure)\n\n axs[0].grid()\n axs[0].set(ylabel = \"Density\")\n axs[1].grid()\n axs[1].set(ylabel = \"Velocity\")\n axs[2].grid()\n axs[2].set(ylabel = \"Pressure\")\n\n plt.xlabel(\"Location x\")", "def plotComparison2(x, nt, nx, c, phi, phiExact, methodName): \n plt.plot(x, phiExact)\n\n plt.plot(x, phi, label=methodName)\n plt.ylim([-0.2, 1.4])", "def SVP_deviation_step(i0, Tf, offset, SVP, plot):\n SVP_D=SVP[0]\n SVP_V=SVP[1]\n\n theoric_angle = i0\n tf=Tf/2\n d0=0\n X=0\n Y=0\n T=0\n\n X_list=[X]\n Y_list=[Y]\n T_list=[T]\n i=0\n\n end_flag=False\n\n while not(end_flag):\n #Vecteur unitaire de la couche\n x=cos(i0+offset)\n y=sin(i0+offset)\n\n #Calcul du tps passe dans la couche ainsi que la distance parcourue dans cette couche\n if i==(len(SVP_V)-1):#Deniere couche, on sait le tps qu'il manque et la vitesse\n t=tf-T\n d=SVP_V[i]*t\n end_flag=True\n else:#Sinon on calcul la distance à la fin de la couche puis le temps passé dans la couche\n d=(SVP_D[i]-d0)/cos(i0)\n t=d/SVP_V[i]\n T+=t\n if T>=tf:#Si on a depasse le temps tf alors on sait qu'on va finir dans cette couche et on mesure la distance parcourue\n t=(tf-(T-t))\n d=SVP_V[i]*t\n end_flag=True\n\n #Calcul du vecteur cumule\n X+=x*d\n Y+=y*d\n\n #Enregistrement du vecteur cumule et du tps passe dans la couche\n X_list.append(X)\n Y_list.append(Y)\n T_list.append(t)\n\n\n #Initialisation de la prochaine couche\n if i<(len(SVP_V)-1):\n i0=np.arcsin(sin(i0)*SVP_V[i+1]/SVP_V[i])\n d0=SVP_D[i]\n i+=1\n\n\n if plot:\n plt.figure()\n plt.plot(X_list,Y_list,'or')\n plt.plot(cos(theoric_angle+offset)*SVP_V[0]*tf,sin(theoric_angle+offset)*SVP_V[0]*tf,'ob')\n for y in Y_list[0:-1]:\n plt.plot([-X_list[-1]-5,X_list[-1]+5],[y,y],'black')\n plt.legend([\"Vector with SVP\",\"Vector without SVP\"])\n plt.xlabel(\"Distance [m]\")\n plt.ylabel(\"Depth [m]\")\n plt.title(\"SVP correction\")\n\n\n return (X_list,Y_list, T_list)", "def plot_v(t, v):\n p1 = plt.plot(t,v)\n plt.xlabel('Time [s]')\n plt.ylabel('Velocity [m/s]')\n plt.title('Velocity for the skydiver as a function of time')\n plt.show()\n plt.savefig('Parachute_velocity.png')", "def comp_time_plot(p1=database['K+'], p2=database['pi+'], pmax=80, plot=True):\r\n dt = []\r\n p_range = np.linspace(10, pmax, 1000)\r\n m1 = p1.mass\r\n m2 = p2.mass\r\n for p in p_range:\r\n t1_per_m = 76.273/(beta(p, m1)*gamma(p, m1)*c)\r\n t2_per_m = 76.273/(beta(p, m2)*gamma(p, m2)*c)\r\n dt.append(abs(t1_per_m - t2_per_m)*1e12)\r\n dt_12_5 = dt[np.argmin(abs(p_range-12.5))]\r\n dt_75 = dt[np.argmin(abs(p_range-75))]\r\n ratio = dt_12_5/dt_75\r\n if plot==True:\r\n fig = plt.figure(figsize=[10, 5])\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.plot(p_range, dt, 'b', label=r'$\\Delta t$')\r\n ax.axvline(12.5, color='r', label='p=12.5 GeV')\r\n ax.axvline(75, color='g', label='p=75 GeV')\r\n ax.set_xlim(10, pmax)\r\n ax.set_ylim(0)\r\n ax.set_xlabel('p / GeV', fontsize=20)\r\n# ax.set_yscale('log')\r\n ax.set_ylabel(r'$\\Delta t$ / ps', fontsize=20)\r\n title = f'{p1.name} to {p2.name} '\r\n title += r'$\\Delta t$ dependancy on particle momenta'\r\n ax.set_title(title, fontsize=20)\r\n ax.legend(fontsize=20)\r\n text = 'dt(12.5) = {0:.2f} ps, '.format(dt_12_5)\r\n text += 'dt(75) = {0:.2f} ps, '.format(dt_75)\r\n text += 'ratio = {0:.3f}'.format(ratio)\r\n plt.show()\r\n print(text)\r\n return [dt_12_5, dt_75, ratio]", "def plot_T(df, parameters_list, title, point_type):\n slope, intercept, r, p, std_er = stats.linregress(df['T_exp'], df['T_prd'])\n plot = sns.regplot(x='T_exp', y='T_prd', data=df, line_kws={'label':\n \"y={0:.3f}x+{1:.3f}\".format(slope, intercept)})\n plot.legend()\n plot.set(xlabel=\"Experimental Temperature (K)\",\n ylabel=\"Predicted Temperature (K)\")\n plot.set_title(\"Parameters: \" + \", \".join(parameters_list[:-1]))\n plot.figure.suptitle(point_type + \": Experimental Versus Predicted\")\n plot.figure.savefig(title)\n plt.figure()", "def plot():\n xvals = np.arange(-50, 250, step=0.1)\n\n fig = plt.figure()\n plt.suptitle(\"Gaussian with smooth transition to power law\")\n\n A0vals = [10, 11]\n avals = [5*10**-3, 10**-3, 5*10**-4]\n ttvals = [10., 50., 100.]\n cvals = [-0.1, -0.9, -5./3., -4.]\n offset = [-30, 0.0, 30]\n\n paramvals = [A0vals, avals, ttvals,cvals, offset]\n titles, labels = return_parameter_names()\n\n nplots = len(paramvals)\n\n for i in range(nplots):\n plt.subplot(nplots, 1, i+1)\n vals = paramvals[i]\n for j in range(len(vals)):\n pset = list(default())\n pset[i] = vals[j]\n yvals=[]\n ypower=[]\n ypeak=[]\n for x in xvals:\n yvals.append(fitfunc(x, pset))\n ypeak.append(logpeak(x,pset))\n if x > 0:\n ypower.append(logpowerlaw(x,pset))\n label = labels[i] + \"=\"+str(vals[j])\n plt.plot(xvals, yvals, label = label)\n\n plt.title(titles[i])\n plt.legend()\n\n fig.set_size_inches(15, 30)\n plt.savefig(\"graphs/misc/lightcurve_models.pdf\")\n plt.close()", "def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin", "def plot_params(self,sel='all',niter=None) :\n\t\talpha = np.array([param['alpha'] for param in self.params])\n\t\tbeta = np.array([param['beta'] for param in self.params])\n\t\tg = np.array([param['g'] for param in self.params])\n\t\th = np.array([param['h'] for param in self.params])\n\t\tpdict = {\n\t\t\t'alpha' : alpha,\n\t\t\t'beta' : beta,\n\t\t\t'g' : g,\n\t\t\t'h' : h\n\t\t}\t\n\n\t\t# Plot true params\n\t\tif sel == 'all' : \n\t\t\tfor key in pdict.keys() : \n\t\t\t\tkey_t = self.ar.ss._ss_params[key][self.eqn-1]\n\t\t\t\tif type(key_t) is list : \n\t\t\t\t\tfor t in key_t : \n\t\t\t\t\t\tpl.axhline(y=t,color=\"black\",linewidth=2,\\\n\t\t\t\t\t\tlinestyle=\"dashed\")\n\t\t\t\telse : \n\t\t\t\t\tpl.axhline(y=key_t,color=\"black\",linewidth=2,\\\n\t\t\t\t\t\tlinestyle=\"dashed\")\n\t\telse : \n\t\t\tkey_t = self.ar.ss._ss_params[sel][self.eqn-1]\n\t\t\tif type(key_t) is list : \n\t\t\t\tfor t in key_t : \n\t\t\t\t\tpl.axhline(y=t,color=\"black\",linewidth=2,\\\n\t\t\t\t\t\tlinestyle=\"dashed\")\n\t\t\telse : \n\t\t\t\tpl.axhline(y=key_t,color=\"black\",linewidth=2,\\\n\t\t\t\t\t\tlinestyle=\"dashed\")\n\n\n\t\t# Plot estimated params\n\t\tif sel == 'all' : \n\t\t\tfor key in pdict.keys() : \n\t\t\t\tpl.plot(pdict[key],label=key)\n\t\t\tpl.legend()\n\t\t\tpl.xlabel(\"Iteration\")\n\t\t\tpl.ylabel(\"Params\")\n\t\telse : \n\t\t\tpl.plot(pdict[sel],label=sel)\n\t\t\tpl.legend()\n\t\t\tpl.xlabel(\"Iteration\")\n\t\t\tpl.ylabel(sel)\n\t\tpl.show()", "def draw_parameters_trend(X, Y):\n ridge_weights, _ = generate_parameters(X, Y)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(range(-30,30), ridge_weights) # plot of first dimension 和 alpha 定义域一致\n plt.show()", "def set_epsilon(self,epsilon):\r\n\t\tself.epsilon = epsilon", "def test_t(self):\n assert np.isclose(self.stepper.t, self.final_t)", "def test_set_pT(self):\n s = State(substance=\"water\")\n s.pT = Q_(101325.0, \"Pa\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.pT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.pT[0], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None\n assert s.phase == \"gas\"" ]
[ "0.61496925", "0.5935934", "0.59310377", "0.5835695", "0.57948995", "0.5774567", "0.5751974", "0.57288474", "0.5722192", "0.56809306", "0.5624164", "0.5594", "0.55729085", "0.5567407", "0.5561646", "0.55605805", "0.55587107", "0.5550242", "0.5546648", "0.55364907", "0.55207247", "0.5519837", "0.55182076", "0.5497417", "0.54957354", "0.54911524", "0.5454475", "0.54478776", "0.54377586", "0.5428655" ]
0.67288375
0
Yields tuples of (schema_name, function_name)
def functions(self): with self.conn.cursor() as cur: _logger.debug('Functions Query. sql: %r', self.functions_query) cur.execute(self.functions_query % self.dbname) for row in cur: yield row
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generator(fn):\n def wrapped(schema):\n result = fn(schema)\n if result is not None:\n expected_result = _check_for_expected_result(fn.__name__, schema)\n return (fn.__name__, result, expected_result)\n return\n return wrapped", "def yield_column_names(schema: ColumnGroupSchema) -> Iterator[str]:\n if isinstance(schema, str):\n yield schema\n else:\n seq = schema.values() if isinstance(schema, collections.abc.Mapping) else schema\n for value in seq:\n yield from yield_column_names(value)", "def schema_generators():\n return {\n \"trips\": trips_schema,\n \"status_changes\": status_changes_schema,\n \"events\": events_schema,\n \"vehicles\": vehicles_schema,\n \"stops\": stops_schema\n }", "def descriptors(mol, functions):\n for name, function in functions:\n yield (name, function(mol))", "def get_user_functions(table):\n for f in [(f) for f in table.values() if type(f) == types.FunctionType]:\n yield f", "def _columns(cls, schema: dsl.Source.Schema) -> typing.Sequence[str]:\n return tuple(f.name for f in schema)", "def iter_stats_schema(schema: tfgnn.GraphSchema) -> Iterator[Tuple[str, Any]]:\n\n for set_type, set_name, set_obj in tfgnn.iter_sets(schema):\n if set_type != tfgnn.CONTEXT:\n # Output a feature for the size of the set.\n key = f\"{set_type}/{set_name}/{tfgnn.SIZE_NAME}\"\n yield key, set_obj\n\n # Output the values for each feature.\n for feature_name, feature in set_obj.features.items():\n if tf.dtypes.as_dtype(feature.dtype) == tf.string:\n continue\n key = f\"{set_type}/{set_name}/{feature_name}\"\n yield key, feature", "def schema() -> None:\n pass", "def get_func_tuples():\n func_tuples = [\n ('met_gumeJ1_3sopt_tr20', 'Rel-UME J1', 'C1-.'),\n ('met_gumeJ5_3sopt_tr20', 'Rel-UME J5', 'r-^'),\n ('met_gfssdJ1_3sopt_tr20', 'Rel-FSSD J1', 'C4--'),\n ('met_gfssdJ5_3sopt_tr20', 'Rel-FSSD J5', 'b-x'),\n\n ('met_gmmd_med', 'Rel-MMD', 'k-.'),\n ('met_gmmd_med_bounliphone', 'Rel-MMD medboun', 'k-'),\n\n ('met_gfssdJ1_3sopt_tr50', 'FSSD-opt3 J1', 'b-^'),\n ('met_gfssdJ5_3sopt_tr50', 'FSSD-opt3 J5', 'b-.h'),\n\n ('met_gumeJ1_2V_rand', 'UME-rand J1', 'r--^'),\n ('met_gumeJ1_1V_rand', 'UME-rand J1 1V', 'y-'),\n ('met_gumeJ2_2V_rand', 'UME-rand J2', 'g--^'),\n ('met_gumeJ3_2V_rand', 'UME-rand J3', 'b--^'),\n ('met_gumeJ5_2V_rand', 'UME-rand J5', 'k--^'),\n\n ('met_gumeJ1_2sopt_tr20', 'Rel-UME-opt2 J1', 'C2-.'),\n ('met_gumeJ5_2sopt_tr20', 'Rel-UME-opt2 J5', 'g-'),\n ('met_gumeJ1_2sopt_tr50', 'Rel-UME-opt2 J1', 'r-.h'),\n\n ('met_gumeJ1_3sopt_tr50', 'UME-opt3 J1', 'r-'),\n ('met_gumeJ5_3sopt_tr50', 'UME-opt3 J5', 'k-'),\n\n\n ]\n return func_tuples", "def get_schema(schema): # noqa: E501\n return 'do some magic!'", "def XsamsFunctions(Functions):\n if not isiterable(Functions):\n return\n yield '<Functions>\\n'\n for Function in Functions:\n\n cont, ret = checkXML(Function)\n if cont:\n yield ret\n continue\n\n G = lambda name: GetValue(name, Function=Function)\n yield makePrimaryType(\"Function\", \"Function\", G, extraAttr={\"functionID\":\"F%s-%s\" % (NODEID, G(\"FunctionID\"))})\n\n yield \"<Name>%s</Name>\" % G(\"FunctionName\")\n yield \"<Expression computerLanguage=%s>%s</Expression>\\n\" % (G(\"FunctionComputerLanguage\"), G(\"FunctionExpression\"))\n yield \"<Y name='%s', units='%s'>\" % (G(\"FunctionYName\"), G(\"FunctionYUnits\"))\n desc = G(\"FunctionYDescription\")\n if desc:\n yield \"<Description>%s</Description>\" % desc\n lowlim = G(\"FunctionYLowerLimit\")\n if lowlim:\n yield \"<LowerLimit>%s</LowerLimit>\" % lowlim\n hilim = G(\"FunctionYUpperLimit\")\n if hilim:\n yield \"<UpperLimit>%s</UpperLimit>\"\n yield \"</Y>\"\n\n yield \"<Arguments>\\n\"\n for FunctionArgument in Function.Arguments:\n\n cont, ret = checkXML(FunctionArgument)\n if cont:\n yield ret\n continue\n\n GA = lambda name: GetValue(name, FunctionArgument=FunctionArgument)\n yield makeArgumentType(\"Argument\", \"FunctionArgument\", GA)\n yield \"</Arguments>\"\n\n if hasattr(Function, \"Parameters\"):\n yield \"<Parameters>\"\n for Parameter in makeiter(Function.Parameters):\n\n cont, ret = checkXML(Parameter)\n if cont:\n yield ret\n continue\n\n GP = lambda name: GetValue(name, Parameter=Parameter)\n yield \"<Parameter name='%s', units='%s'>\" % (GP(\"FunctionParameterName\"), GP(\"FunctionParameterUnits\"))\n desc = GP(\"FunctionParameterDescription\")\n if desc:\n yield \"<Description>%s</Description>\" % desc\n yield \"</Parameter>\\n\"\n yield \"</Parameters>\"\n\n yield \"\"\"<ReferenceFrame>%s</ReferenceFrame>\n<Description>%s</Description>\n<SourceCodeURL>%s</SourceCodeURL>\n\"\"\" % (G(\"FunctionReferenceFrame\"), G(\"FunctionDescription\"), G(\"FunctionSourceCodeURL\"))\n yield '</Functions>'", "def generate_schema_list():\n src = os.path.join(os.path.dirname(__file__), '../schemas')\n for root, dirs, files in os.walk(src):\n for fname in files:\n if not fname.endswith('.yaml'):\n continue\n if os.path.splitext(fname)[0] in (\n 'draft-01', 'asdf-schema-1.0.0'):\n continue\n yield os.path.join(root, fname)", "def iter_decl(self, node):\n for func in node.functions:\n func.update_names()", "def schema(self):", "def bunch__functions(idfobject): \n funcdct = idfobject.__functions\n funcsresults = [(key, funcdct[key](idfobject)) for key in funcdct.keys()]\n return funcsresults", "def stax_functions(self):\n return self.stax_init, self.stax_apply", "def iter_schemas(self):\n warn(\"you may actually mean .my_schemas\", LikelyMistake)\n return super(CamTypeMixin, self).iter_schemas()", "def make_function_stubs(self):\n res = \"\"\n for node in self.description.declarations() + self.description.definitions():\n if isinstance(node.type,pdl.TypeFunction):\n res += \"def {}({}):\\n pass\".format(node.name, \", \".join(map(\n lambda t: \"{}\".format(t.name), node.type.args)) )\n\n return res", "def get_table_names() -> Iterable[str]:\n for pipeline_name in get_pipeline_names():\n yield pipeline_name.replace(\"_\", \"-\")", "def generate_structure(\n schema: s_schema.Schema,\n *,\n make_funcs: bool=True,\n) -> SchemaReflectionParts:\n\n delta = sd.DeltaRoot()\n classlayout: Dict[\n Type[s_obj.Object],\n SchemaTypeLayout,\n ] = {}\n\n ordered_link = schema.get('schema::ordered', type=s_links.Link)\n\n if make_funcs:\n schema = _run_ddl(\n '''\n CREATE FUNCTION sys::_get_pg_type_for_edgedb_type(\n typeid: std::uuid,\n kind: std::str,\n elemid: OPTIONAL std::uuid,\n sql_type: OPTIONAL std::str,\n ) -> std::int64 {\n USING SQL FUNCTION 'edgedb.get_pg_type_for_edgedb_type';\n SET volatility := 'STABLE';\n SET impl_is_strict := false;\n };\n\n CREATE FUNCTION sys::_expr_from_json(\n data: json\n ) -> OPTIONAL tuple<text: str, refs: array<uuid>> {\n USING SQL $$\n SELECT\n \"data\"->>'text' AS text,\n coalesce(r.refs, ARRAY[]::uuid[]) AS refs\n FROM\n (SELECT\n array_agg(v::uuid) AS refs\n FROM\n jsonb_array_elements_text(\"data\"->'refs') AS v\n ) AS r\n WHERE\n jsonb_typeof(\"data\") != 'null'\n $$;\n SET volatility := 'IMMUTABLE';\n };\n ''',\n schema=schema,\n delta=delta,\n )\n\n py_classes = []\n for py_cls in s_obj.ObjectMeta.get_schema_metaclasses():\n if isinstance(py_cls, adapter.Adapter):\n continue\n\n if py_cls is s_obj.GlobalObject:\n continue\n\n py_classes.append(py_cls)\n\n read_sets: Dict[Type[s_obj.Object], List[str]] = {}\n\n for py_cls in py_classes:\n rschema_name = get_schema_name_for_pycls(py_cls)\n schema_objtype = schema.get(\n rschema_name,\n type=s_objtypes.ObjectType,\n default=None,\n )\n\n bases = []\n for base in py_cls.__bases__:\n if base in py_classes:\n bases.append(get_schema_name_for_pycls(base))\n\n default_base = get_default_base_for_pycls(py_cls)\n if not bases and rschema_name != default_base:\n bases.append(default_base)\n\n reflection = py_cls.get_reflection_method()\n is_simple_wrapper = issubclass(py_cls, s_types.CollectionExprAlias)\n\n if schema_objtype is None:\n as_abstract = (\n reflection is s_obj.ReflectionMethod.REGULAR\n and not is_simple_wrapper\n and (\n py_cls is s_obj.InternalObject\n or not issubclass(py_cls, s_obj.InternalObject)\n )\n )\n\n schema = _run_ddl(\n f'''\n CREATE {'ABSTRACT' if as_abstract else ''}\n TYPE {rschema_name}\n EXTENDING {', '.join(str(b) for b in bases)};\n ''',\n schema=schema,\n delta=delta,\n )\n\n schema_objtype = schema.get(\n rschema_name, type=s_objtypes.ObjectType)\n else:\n ex_bases = schema_objtype.get_bases(schema).names(schema)\n _, added_bases = s_inh.delta_bases(\n ex_bases,\n bases,\n t=type(schema_objtype),\n )\n\n if added_bases:\n for subset, position in added_bases:\n # XXX: Don't generate changes for just moving around the\n # order of types when the mismatch between python and\n # the schema, since it doesn't work anyway and causes mass\n # grief when trying to patch the schema.\n subset = [x for x in subset if x.name not in ex_bases]\n if not subset:\n continue\n\n if isinstance(position, tuple):\n position_clause = (\n f'{position[0]} {position[1].name}'\n )\n else:\n position_clause = position\n\n bases_expr = ', '.join(str(t.name) for t in subset)\n\n stmt = f'''\n ALTER TYPE {rschema_name} {{\n EXTENDING {bases_expr} {position_clause}\n }}\n '''\n\n schema = _run_ddl(\n stmt,\n schema=schema,\n delta=delta,\n )\n\n if reflection is s_obj.ReflectionMethod.NONE:\n continue\n\n referrers = py_cls.get_referring_classes()\n\n if reflection is s_obj.ReflectionMethod.AS_LINK:\n if not referrers:\n raise RuntimeError(\n f'schema class {py_cls.__name__} is declared with AS_LINK '\n f'reflection method but is not referenced in any RefDict'\n )\n\n is_concrete = not schema_objtype.get_abstract(schema)\n\n if (\n is_concrete\n and not is_simple_wrapper\n and any(\n not b.get_abstract(schema)\n for b in schema_objtype.get_ancestors(schema).objects(schema)\n )\n ):\n raise RuntimeError(\n f'non-abstract {schema_objtype.get_verbosename(schema)} has '\n f'non-abstract ancestors'\n )\n\n read_shape = read_sets[py_cls] = []\n\n if is_concrete:\n read_shape.append(\n '_tname := .__type__[IS schema::ObjectType].name'\n )\n\n classlayout[py_cls] = {}\n ownfields = py_cls.get_ownfields()\n\n for fn, field in py_cls.get_fields().items():\n sfn = field.sname\n\n if (\n field.ephemeral\n or (\n field.reflection_method\n is not s_obj.ReflectionMethod.REGULAR\n )\n ):\n continue\n\n storage = _classify_object_field(field)\n\n ptr = schema_objtype.maybe_get_ptr(schema, sn.UnqualName(sfn))\n\n if fn in ownfields:\n qual = \"REQUIRED\" if field.required else \"OPTIONAL\"\n otd = \" { ON TARGET DELETE ALLOW }\" if field.weak_ref else \"\"\n if ptr is None:\n schema = _run_ddl(\n f'''\n ALTER TYPE {rschema_name} {{\n CREATE {qual}\n {storage.ptrkind} {sfn} -> {storage.ptrtype}\n {otd};\n }}\n ''',\n schema=schema,\n delta=delta,\n )\n ptr = schema_objtype.getptr(schema, sn.UnqualName(fn))\n\n if storage.shadow_ptrkind is not None:\n pn = f'{sfn}__internal'\n internal_ptr = schema_objtype.maybe_get_ptr(\n schema, sn.UnqualName(pn))\n if internal_ptr is None:\n ptrkind = storage.shadow_ptrkind\n ptrtype = storage.shadow_ptrtype\n schema = _run_ddl(\n f'''\n ALTER TYPE {rschema_name} {{\n CREATE {qual}\n {ptrkind} {pn} -> {ptrtype};\n }}\n ''',\n schema=schema,\n delta=delta,\n )\n\n else:\n assert ptr is not None\n\n if is_concrete:\n read_ptr = sfn\n\n if field.type_is_generic_self:\n read_ptr = f'{read_ptr}[IS {rschema_name}]'\n\n if field.reflection_proxy:\n proxy_type, proxy_link = field.reflection_proxy\n read_ptr = (\n f'{read_ptr}: {{name, value := .{proxy_link}.id}}'\n )\n\n if ptr.issubclass(schema, ordered_link):\n read_ptr = f'{read_ptr} ORDER BY @index'\n\n read_shape.append(read_ptr)\n\n if storage.shadow_ptrkind is not None:\n read_shape.append(f'{sfn}__internal')\n\n if field.reflection_proxy:\n proxy_type_name, proxy_link_name = field.reflection_proxy\n proxy_obj = schema.get(\n proxy_type_name, type=s_objtypes.ObjectType)\n proxy_link_obj = proxy_obj.getptr(\n schema, sn.UnqualName(proxy_link_name))\n tgt = proxy_link_obj.get_target(schema)\n else:\n tgt = ptr.get_target(schema)\n assert tgt is not None\n cardinality = ptr.get_cardinality(schema)\n assert cardinality is not None\n classlayout[py_cls][sfn] = SchemaFieldDesc(\n fieldname=fn,\n schema_fieldname=sfn,\n type=tgt,\n cardinality=cardinality,\n properties={},\n storage=storage,\n is_ordered=ptr.issubclass(schema, ordered_link),\n reflection_proxy=field.reflection_proxy,\n )\n\n # Second pass: deal with RefDicts, which are reflected as links.\n for py_cls in py_classes:\n rschema_name = get_schema_name_for_pycls(py_cls)\n schema_cls = schema.get(rschema_name, type=s_objtypes.ObjectType)\n\n for refdict in py_cls.get_own_refdicts().values():\n ref_ptr = schema_cls.maybe_get_ptr(\n schema, sn.UnqualName(refdict.attr))\n ref_cls = refdict.ref_cls\n assert issubclass(ref_cls, s_obj.Object)\n shadow_ref_ptr = None\n reflect_as_link = (\n ref_cls.get_reflection_method()\n is s_obj.ReflectionMethod.AS_LINK\n )\n\n if reflect_as_link:\n reflection_link = ref_cls.get_reflection_link()\n assert reflection_link is not None\n target_field = ref_cls.get_field(reflection_link)\n target_cls = target_field.type\n shadow_pn = f'{refdict.attr}__internal'\n shadow_ref_ptr = schema_cls.maybe_get_ptr(\n schema, sn.UnqualName(shadow_pn))\n\n if reflect_as_link and not shadow_ref_ptr:\n schema = _run_ddl(\n f'''\n ALTER TYPE {rschema_name} {{\n CREATE OPTIONAL MULTI LINK {shadow_pn}\n EXTENDING schema::reference\n -> {get_schema_name_for_pycls(ref_cls)} {{\n ON TARGET DELETE ALLOW;\n }};\n }}\n ''',\n schema=schema,\n delta=delta,\n )\n shadow_ref_ptr = schema_cls.getptr(\n schema, sn.UnqualName(shadow_pn))\n else:\n target_cls = ref_cls\n\n if ref_ptr is None:\n ptr_type = get_schema_name_for_pycls(target_cls)\n schema = _run_ddl(\n f'''\n ALTER TYPE {rschema_name} {{\n CREATE OPTIONAL MULTI LINK {refdict.attr}\n EXTENDING schema::reference\n -> {ptr_type} {{\n ON TARGET DELETE ALLOW;\n }};\n }}\n ''',\n schema=schema,\n delta=delta,\n )\n\n ref_ptr = schema_cls.getptr(\n schema, sn.UnqualName(refdict.attr))\n\n assert isinstance(ref_ptr, s_links.Link)\n\n if py_cls not in classlayout:\n classlayout[py_cls] = {}\n\n # First, fields declared to be reflected as link properties.\n props = _get_reflected_link_props(\n ref_ptr=ref_ptr,\n target_cls=ref_cls,\n schema=schema,\n )\n\n if reflect_as_link:\n # Then, because it's a passthrough reflection, all scalar\n # fields of the proxy object.\n fields_as_props = [\n f\n for f in ref_cls.get_ownfields().values()\n if (\n not f.ephemeral\n and (\n f.reflection_method\n is not s_obj.ReflectionMethod.AS_LINK\n )\n and f.name != refdict.backref_attr\n and f.name != ref_cls.get_reflection_link()\n )\n ]\n\n extra_props = _classify_scalar_object_fields(fields_as_props)\n\n for field, storage in {**props, **extra_props}.items():\n sfn = field.sname\n prop_ptr = ref_ptr.maybe_get_ptr(schema, sn.UnqualName(sfn))\n if prop_ptr is None:\n pty = storage.ptrtype\n schema = _run_ddl(\n f'''\n ALTER TYPE {rschema_name} {{\n ALTER LINK {refdict.attr} {{\n CREATE OPTIONAL PROPERTY {sfn} -> {pty};\n }}\n }}\n ''',\n schema=schema,\n delta=delta,\n )\n\n if shadow_ref_ptr is not None:\n assert isinstance(shadow_ref_ptr, s_links.Link)\n shadow_pn = shadow_ref_ptr.get_shortname(schema).name\n for field, storage in props.items():\n sfn = field.sname\n prop_ptr = shadow_ref_ptr.maybe_get_ptr(\n schema, sn.UnqualName(sfn))\n if prop_ptr is None:\n pty = storage.ptrtype\n schema = _run_ddl(\n f'''\n ALTER TYPE {rschema_name} {{\n ALTER LINK {shadow_pn} {{\n CREATE OPTIONAL PROPERTY {sfn}\n -> {pty};\n }}\n }}\n ''',\n schema=schema,\n delta=delta,\n )\n\n for py_cls in py_classes:\n rschema_name = get_schema_name_for_pycls(py_cls)\n schema_cls = schema.get(rschema_name, type=s_objtypes.ObjectType)\n\n is_concrete = not schema_cls.get_abstract(schema)\n read_shape = read_sets[py_cls]\n\n for refdict in py_cls.get_refdicts():\n if py_cls not in classlayout:\n classlayout[py_cls] = {}\n\n ref_ptr = schema_cls.getptr(\n schema, sn.UnqualName(refdict.attr), type=s_links.Link)\n tgt = ref_ptr.get_target(schema)\n assert tgt is not None\n cardinality = ref_ptr.get_cardinality(schema)\n assert cardinality is not None\n classlayout[py_cls][refdict.attr] = SchemaFieldDesc(\n fieldname=refdict.attr,\n schema_fieldname=refdict.attr,\n type=tgt,\n cardinality=cardinality,\n properties={},\n is_ordered=ref_ptr.issubclass(schema, ordered_link),\n reflection_proxy=None,\n is_refdict=True,\n )\n\n target_cls = refdict.ref_cls\n\n props = _get_reflected_link_props(\n ref_ptr=ref_ptr,\n target_cls=target_cls,\n schema=schema,\n )\n\n reflect_as_link = (\n target_cls.get_reflection_method()\n is s_obj.ReflectionMethod.AS_LINK\n )\n\n prop_layout = {}\n extra_prop_layout = {}\n\n for field, storage in props.items():\n prop_ptr = ref_ptr.getptr(schema, sn.UnqualName(field.sname))\n prop_tgt = prop_ptr.get_target(schema)\n assert prop_tgt is not None\n prop_layout[field.name] = (prop_tgt, storage.fieldtype)\n\n if reflect_as_link:\n # Then, because it's a passthrough reflection, all scalar\n # fields of the proxy object.\n fields_as_props = [\n f\n for f in target_cls.get_ownfields().values()\n if (\n not f.ephemeral\n and (\n f.reflection_method\n is not s_obj.ReflectionMethod.AS_LINK\n )\n and f.name != refdict.backref_attr\n and f.name != target_cls.get_reflection_link()\n )\n ]\n\n extra_props = _classify_scalar_object_fields(fields_as_props)\n\n for field, storage in extra_props.items():\n prop_ptr = ref_ptr.getptr(\n schema, sn.UnqualName(field.sname))\n prop_tgt = prop_ptr.get_target(schema)\n assert prop_tgt is not None\n extra_prop_layout[field.name] = (\n prop_tgt, storage.fieldtype)\n else:\n extra_prop_layout = {}\n\n classlayout[py_cls][refdict.attr].properties.update({\n **prop_layout, **extra_prop_layout,\n })\n\n if reflect_as_link:\n shadow_tgt = schema.get(\n get_schema_name_for_pycls(ref_cls),\n type=s_objtypes.ObjectType,\n )\n\n iname = f'{refdict.attr}__internal'\n classlayout[py_cls][iname] = (\n SchemaFieldDesc(\n fieldname=refdict.attr,\n schema_fieldname=iname,\n type=shadow_tgt,\n cardinality=qltypes.SchemaCardinality.Many,\n properties=prop_layout,\n is_refdict=True,\n )\n )\n\n if is_concrete:\n read_ptr = refdict.attr\n prop_shape_els = []\n\n if reflect_as_link:\n read_ptr = f'{read_ptr}__internal'\n ref_ptr = schema_cls.getptr(\n schema,\n sn.UnqualName(f'{refdict.attr}__internal'),\n )\n\n for field in props:\n sfn = field.sname\n prop_shape_els.append(f'@{sfn}')\n\n if prop_shape_els:\n prop_shape = ',\\n'.join(prop_shape_els)\n read_ptr = f'{read_ptr}: {{id, {prop_shape}}}'\n\n if ref_ptr.issubclass(schema, ordered_link):\n read_ptr = f'{read_ptr} ORDER BY @index'\n\n read_shape.append(read_ptr)\n\n local_parts = []\n global_parts = []\n for py_cls, shape_els in read_sets.items():\n if (\n not shape_els\n # The CollectionExprAlias family needs to be excluded\n # because TupleExprAlias and ArrayExprAlias inherit from\n # concrete classes and so are picked up from those.\n or issubclass(py_cls, s_types.CollectionExprAlias)\n ):\n continue\n\n rschema_name = get_schema_name_for_pycls(py_cls)\n shape = ',\\n'.join(shape_els)\n qry = f'''\n SELECT {rschema_name} {{\n {shape}\n }}\n '''\n if not issubclass(py_cls, (s_types.Collection, s_obj.GlobalObject)):\n qry += ' FILTER NOT .builtin'\n\n if issubclass(py_cls, s_obj.GlobalObject):\n global_parts.append(qry)\n else:\n local_parts.append(qry)\n\n delta.canonical = True\n return SchemaReflectionParts(\n intro_schema_delta=delta,\n class_layout=classlayout,\n local_intro_parts=local_parts,\n global_intro_parts=global_parts,\n )", "def pairs(self) -> Iterator[tuple[str, list[CommandParser]]]:\n for module, cmds in self._registry[\"by_module\"].items():\n yield (module, cmds)", "def test_func_generator_name():\n for i in range(0, 4):\n yield 'try_odd', i", "def test_graph_query_create_funcs(self):\n provider = QueryProvider(data_environment=\"SecurityGraph\", driver=self.provider)\n\n all_queries = [q for q in dir(provider.all_queries) if not q.startswith(\"__\")]\n alert_queries = [\n q for q in dir(provider.SecurityGraphAlert) if not q.startswith(\"__\")\n ]\n self.assertGreaterEqual(len(all_queries), 7)\n self.assertGreaterEqual(len(alert_queries), 7)\n\n # Test that function attributes have been created properly\n for _, func in provider.all_queries:\n self.assertIsInstance(func, partial)\n self.assertTrue(len(func.__doc__))\n self.assertIn(\"Parameters\", func.__doc__)", "def create_schemas():\n\n for name in get_schemas():\n create_schema(name)\n\n print(green('ALL SCHEMAS CREATED'))", "def _find_functions(whole):\n for res in function_start_regex.findall(whole):\n function_start, function_name, params = res\n params_split = [x.strip() for x in params.split(',')]\n stack, code, core_code = 1, function_start, ''\n start = whole.find(function_start) + len(code)\n while stack > 0:\n try:\n next_char = whole[start]\n except IndexError: # dont worry we will obfuscte the one we found\n return # sometimes fails to find all functions on big files\n core_code += next_char\n if next_char == '{':\n stack += 1\n elif next_char == '}':\n stack -= 1\n start += 1\n yield (params, params_split, core_code[:-1], function_start)", "def find_functions(module):\n for attrname in dir(module):\n attr = getattr(module, attrname)\n # iteratively get __module__ or __class__ (where __module__ fails for clas\n if callable(attr) and getattr(attr, '__module__', getattr(attr, '__class__', '')) == module.__name__:\n yield attr", "def split_funcs(disasm):\n\tstart_positions = [0]\n\tend_positions = []\n\tnames = []\n\tif not disasm.startswith(\"Disassembly\"):\n\t\tnames.append(\"main\")\n\tfor match in re.finditer(r\"Disassembly of (.+):\", disasm):\n\t\tend_positions.append(match.start())\n\t\tstart_positions.append(match.end())\n\t\tname = match.group(1)\n\t\tif name.startswith(\"<\"):\n\t\t\tnames.append(get_code_obj_name(name))\n\t\telse:\n\t\t\tnames.append(name)\n\tend_positions.append(len(disasm))\n\tif disasm.startswith(\"Disassembly\"):\n\t\tstart_positions.pop(0)\n\t\tend_positions.pop(0)\n\tfor start, end, name in zip(start_positions, end_positions, names):\n\t\tyield (name, disasm[start:end])", "def schemaIterator(self):\n if self.schema:\n for field in self.schema:\n field_id = field.getFieldName()\n label = field.getLabel()\n value = getattr(self.instance, field_id, NOT_ANSWERED_VALUE)\n if isinstance(value, list):\n value = ', '.join(value)\n\n yield label, value", "def get_schema(self) -> dict:", "def test_all():\n for descr, args in INPUT_PARAMS.iteritems():\n func = run_summarize_h5lmt\n func.description = 'bin/summarize_h5lmt.py ' + descr\n yield func, args" ]
[ "0.6727939", "0.62836844", "0.6221021", "0.59776676", "0.59344625", "0.588578", "0.5832448", "0.57567436", "0.5631567", "0.5585038", "0.5553903", "0.55262053", "0.5488672", "0.54839253", "0.5379389", "0.53702706", "0.5357351", "0.5348017", "0.5325605", "0.5268671", "0.526406", "0.5236537", "0.5218407", "0.52080095", "0.52044755", "0.51922005", "0.51610696", "0.5147119", "0.5136781", "0.5135779" ]
0.643095
1
Converts an item without children, uses the offset of the match from the start of the HTML as the task ID
def _convert_item(self, match: re.Match) -> str: state, caption = match.groups() return render_item(caption, state != " ", match.start())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mongo_item_to_task(item):\n return Task(\n id=str(item[\"_id\"]),\n task=item[\"task\"],\n args=item[\"args\"],\n kwargs=item[\"kwargs\"],\n wait=item[\"wait\"],\n recurring=item[\"recurring\"],\n when=item[\"when\"],\n )", "def get_tw_item(task: taskw.task.Task) -> v0.Item: # type: ignore\n field = get_as_subtext_field\n task_id = tw_side.get_task_id(task)\n\n actions = [\n FuncAction(\n \"Complete task\",\n lambda args_list=[\"done\", task_id]: run_tw_action(args_list),\n ),\n FuncAction(\n \"Delete task\",\n lambda args_list=[\"delete\", task_id]: run_tw_action(args_list),\n ),\n FuncAction(\n \"Start task\",\n lambda args_list=[\"start\", task_id]: run_tw_action(args_list),\n ),\n FuncAction(\n \"Stop task\",\n lambda args_list=[\"stop\", task_id]: run_tw_action(args_list),\n ),\n FuncAction(\n \"Edit task interactively\",\n lambda args_list=[\"edit\", task_id]: run_tw_action(args_list, need_pty=True),\n ),\n FuncAction(\n \"Fail task\",\n lambda task_id=task_id: fail_task(task_id=task_id),\n ),\n ClipAction(\"Copy task UUID\", f\"{task_id}\"),\n ]\n\n found_urls = url_re.findall(task[\"description\"])\n if \"annotations\" in task.keys():\n found_urls.extend(url_re.findall(\" \".join(task[\"annotations\"])))\n\n for url in found_urls[-1::-1]:\n actions.insert(0, UrlAction(f\"Open {url}\", url))\n\n if reminders_tag_path.is_file():\n global reminders_tag\n reminders_tag = load_data(reminders_tag_path)\n else:\n save_data(\"remindme\", str(reminders_tag_path))\n\n actions.append(\n FuncAction(\n f\"Add to Reminders (+{reminders_tag})\",\n lambda args_list=[\n \"modify\",\n task_id,\n f\"+{reminders_tag}\",\n ]: run_tw_action(args_list),\n )\n )\n\n actions.append(\n FuncAction(\n \"Work on next (+next)\",\n lambda args_list=[\n \"modify\",\n task_id,\n \"+next\",\n ]: run_tw_action(args_list),\n )\n )\n\n urgency_str, icon = urgency_to_visuals(task.get(\"urgency\"))\n text = task[\"description\"]\n due = None\n if \"due\" in task:\n due = task[\"due\"].astimezone(dateutil.tz.tzlocal()).strftime(\"%Y-%m-%d %H:%M:%S\") # type: ignore\n\n return get_as_item(\n text=text,\n subtext=\"{}{}{}{}{}\".format(\n field(urgency_str),\n \"ID: {}... | \".format(tw_side.get_task_id(task)[:8]),\n field(task[\"status\"]),\n field(task.get(\"tags\"), \"tags\"),\n field(due, \"due\"),\n )[:-2],\n icon=[str(icon)],\n completion=f'{curr_trigger}{task[\"description\"]}',\n actions=actions,\n urgency=task.get(\"urgency\"),\n )", "def _clean_seq_titles(self, element):\r\n return self.REMOVE_SPAN_TAG_RE.sub('', element.get_attribute('innerHTML')).strip().split('\\n')[0]", "def _parse_start(self, item):\n parts = item.css(\"::text\").get().split()\n if len(parts) == 0:\n return\n try:\n return parse(parts[-1])\n except ParserError:\n return", "def _e_to_id(self, e):\n return (e.attrib['href']\n [(e.attrib['href']\n .rfind('/id')+3):]\n .replace('?mt=2', ''))", "def _convert_item_with_children(self, match: re.Match) -> str:\n state, caption = match.groups()\n return render_item(caption, state != \" \", match.start()) + \"<ul><li>\"", "def test_custom_ids(self):\n it = [\n \"[[Chapter]]{'id': '/base/chapter/1'} Chapter I\",\n \"This is chapter I text\",\n \"[[Article]]{'id': '/base/article/1'} Article I\",\n \"This is article I text\",\n ]\n\n descriptor = {\n 'components': ['Chapter', 'Section', 'Sub-section', 'Article'],\n 'patterns': ['Chapter', 'Section', 'Sub-section', 'Article']\n }\n\n doc = parse_iterable(it, descriptor)\n\n result = [n for n in doc.graph.nodes(data=True)]\n\n expected = [\n ('ROOT [0]', {'meta': 'root', 'level': 0, 'text': [], 'pad': False, 'id': '/root'}),\n ('Chapter [1]', {'meta': 'Chapter', 'level': 1, 'pad': False, 'text': [\"Chapter I\", 'This is chapter I text'], 'id': '/base/chapter/1'}),\n ('Article [2]', {'meta': 'Article', 'level': 4, 'pad': False, 'text': [\"Article I\", 'This is article I text'], 'id': '/base/article/1'})\n ]\n\n self.assertListEqual(result, expected)", "def _mock_single_task_response(self):\n response = self.adapter._es.tasks.list()\n parsed = self.adapter._parse_task_result(response, _return_one=False)\n task_id = list(parsed)[0] # get the first task_id\n # prune the response of all tasks but one\n for node_name, info in response[\"nodes\"].items():\n for t_id in list(info[\"tasks\"]):\n if t_id != task_id:\n info[\"tasks\"].pop(t_id)\n with patch.object(self.adapter._es.tasks, \"list\", return_value=response) as patched:\n yield task_id, patched", "def _extract_id(self, dirty_id):\n if dirty_id[:1] == \"/\":\n return dirty_id.split(\"/\")[-1]\n else:\n return dirty_id", "def _parse_next_start(self, item):\n return parse(\" \".join(item.split(\"–\")[:-1]))", "def parseTodoEntry(entry, wikiDocument=None):\r\n return None", "def extract_item_id(url):\n m = re.search('/([0-9]+)\\.htm', url)\n if m is not None:\n return m.group(1)\n else:\n return None", "def transform(self, item):\n return self.transformer(item)", "def pre_process_string_data(item: dict):\r\n try:\r\n result_item = {key: item[key] for key in KEYS + ['_id']}\r\n for prop in result_item:\r\n if type(result_item[prop]) is str and prop != '_id':\r\n result_item[prop] = re.sub(' +', ' ', item[prop])\r\n result_item[prop] = re.sub('\\n', ' ', item[prop])\r\n result_item[prop] = item[prop].strip().strip('\"').strip(\"'\").lower().strip()\r\n return result_item\r\n except KeyError:\r\n logging.warning(\"Wrong formed entity with id %s\", item['_id'])\r\n return None", "def process_item(self, item, spider):\n task = SpiderTask.objects.get(id=spider.task_id)\n dj_item = Item.objects.create(task=task, **item)\n return dj_item", "def _traverse_1_0_1(item, nodes):\n if 'content' in item.keys():\n ids = []\n for node in item['content']:\n nodes[node['id']] = node\n ids.append(node['id'])\n _traverse_1_0_1(node, nodes)\n item['content'] = ids", "def _make_task_id(self, task):\n index = self._tasks.add(task)\n task_id = '{name}-{idx}'.format(name=task.name, idx=index)\n\n return task_id", "def put_pid(html):\n pid = 1\n while \"<p>\" in html:\n pttn = \"<p id=\\\"p\"+str(pid)+\"\\\">\"\n html = html.replace(\"<p>\", pttn, 1)\n pid += 1\n return html", "def item_to_label(cls, item: T) -> LabelItem:\n return LabelItem(id=str(item.id), label=str(item))", "def subtask_prune(tree):\n\n for st in tree.subtrees():\n if \"start\" in st.label():\n new_label = st.label().split(\"start\")[0] + \"start\"\n st.set_label(new_label)\n if \"end\" in st.label():\n new_label = st.label().split(\"end\")[0] + \"end\"\n st.set_label(new_label)\n\n return tree", "def _scrape_agenda_item(self, agenda_item_location):\n pass", "def _get_id(self, item, prefix, item_list):\r\n try:\r\n index = item_list.index(item)\r\n except ValueError:\r\n index = len(item_list)\r\n item_list.append(item)\r\n\r\n return self._id_for_index(prefix, index)", "def parse_task(k):\r\n return [stringify_children(xml_object.xpath(k)[i]) for i in xrange(0, len(xml_object.xpath(k)))]", "def get_short_task_id(task_id: str) -> str:\n return task_id.split(MESOS_TASK_SPACER)[-1]", "def _get_subitem_id(item_id, infobox_data):\n\n # Checking if item has no subitems\n if infobox_data.get('id', -1) == item_id:\n return -1\n\n # Trying to get the id for the right sub item\n for i in range(1000):\n if int(infobox_data.get('id%s' % i, -1)) == item_id:\n return i\n\n return None", "def templatize(self):\n self.sanitize_ids()\n del self.steps[1:]\n self.current_step = None", "def create_dummy_content(user_id):\n task = TodoItem(\n user=user_id,\n task=u'Find a shrubbery',\n tags=[u'quest', u'ni', u'knight'],\n due_date=datetime.utcnow() + timedelta(days=60),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Search for the holy grail',\n tags=[u'quest'],\n due_date=datetime.utcnow() - timedelta(days=1),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Recruit Knights of the Round Table',\n tags=[u'quest', u'knight', u'discuss'],\n due_date=datetime.utcnow() + timedelta(minutes=45),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Build a Trojan Rabbit',\n tags=[u'quest', u'rabbit'],\n due_date=datetime.utcnow() + timedelta(days=1),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Talk to Tim the Enchanter',\n tags=[u'quest', u'discuss'],\n due_date=datetime.utcnow() + timedelta(days=90),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Defeat the Rabbit of Caerbannog',\n tags=[u'quest', u'rabbit'],\n due_date=None,\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Cross the Bridge of Death',\n tags=[u'quest'],\n due_date=None,\n )\n DBSession.add(task)", "def dt_convert(item):\n\t\n\tsplitup = str(item).split('-')\n\t\n\tif len(splitup)==2: return splitup[-1]\n\t\n\telse: return str(item).split(' ')[0]", "def _getText(self, item): # TODO: move this method to more suitable place when possible (scripting base class)\r\n if not isinstance(item, basestring):\r\n return item\r\n\r\n translation = self.phone.getTranslation(item)\r\n if translation != None:\r\n return translation\r\n\r\n return item", "def process_item(self, item, spider):\n if item['name'] in self.ids_seen:\n raise DropItem(\"Duplicate item found: %s\" % item)\n else:\n self.ids_seen.add(item['name'])\n return item #return the item" ]
[ "0.50826526", "0.50334203", "0.49368155", "0.49145508", "0.48586226", "0.48134792", "0.48118064", "0.48103452", "0.47756848", "0.47478804", "0.47209668", "0.46880686", "0.46830603", "0.46760982", "0.46448636", "0.46284938", "0.45968413", "0.45758584", "0.45556232", "0.45417786", "0.45355994", "0.45334983", "0.45321676", "0.4532065", "0.45156673", "0.44961348", "0.4483385", "0.4477883", "0.44753432", "0.44578585" ]
0.54481596
0
Converts an item with children, uses the offset of the match from the start of the HTML as the task ID
def _convert_item_with_children(self, match: re.Match) -> str: state, caption = match.groups() return render_item(caption, state != " ", match.start()) + "<ul><li>"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _traverse_1_0_1(item, nodes):\n if 'content' in item.keys():\n ids = []\n for node in item['content']:\n nodes[node['id']] = node\n ids.append(node['id'])\n _traverse_1_0_1(node, nodes)\n item['content'] = ids", "def test_custom_ids(self):\n it = [\n \"[[Chapter]]{'id': '/base/chapter/1'} Chapter I\",\n \"This is chapter I text\",\n \"[[Article]]{'id': '/base/article/1'} Article I\",\n \"This is article I text\",\n ]\n\n descriptor = {\n 'components': ['Chapter', 'Section', 'Sub-section', 'Article'],\n 'patterns': ['Chapter', 'Section', 'Sub-section', 'Article']\n }\n\n doc = parse_iterable(it, descriptor)\n\n result = [n for n in doc.graph.nodes(data=True)]\n\n expected = [\n ('ROOT [0]', {'meta': 'root', 'level': 0, 'text': [], 'pad': False, 'id': '/root'}),\n ('Chapter [1]', {'meta': 'Chapter', 'level': 1, 'pad': False, 'text': [\"Chapter I\", 'This is chapter I text'], 'id': '/base/chapter/1'}),\n ('Article [2]', {'meta': 'Article', 'level': 4, 'pad': False, 'text': [\"Article I\", 'This is article I text'], 'id': '/base/article/1'})\n ]\n\n self.assertListEqual(result, expected)", "def parse_task(k):\r\n return [stringify_children(xml_object.xpath(k)[i]) for i in xrange(0, len(xml_object.xpath(k)))]", "def locateChild(ctx, segments):", "def map_to_parent_eid(self, eid):\n ...", "def _convert_item(self, match: re.Match) -> str:\n state, caption = match.groups()\n return render_item(caption, state != \" \", match.start())", "def fix_ids(self, tree: nodes.document) -> None:\n def update_node_id(node: Element) -> None:\n \"\"\"Update IDs of given *node*.\"\"\"\n new_ids: list[str] = []\n for node_id in node['ids']:\n new_id = self.fix_fragment('', node_id)\n if new_id not in new_ids:\n new_ids.append(new_id)\n node['ids'] = new_ids\n\n for reference in tree.findall(nodes.reference):\n if 'refuri' in reference:\n m = self.refuri_re.match(reference['refuri'])\n if m:\n reference['refuri'] = self.fix_fragment(m.group(1), m.group(2))\n if 'refid' in reference:\n reference['refid'] = self.fix_fragment('', reference['refid'])\n\n for target in tree.findall(nodes.target):\n update_node_id(target)\n\n next_node: Node = target.next_node(ascend=True)\n if isinstance(next_node, nodes.Element):\n update_node_id(next_node)\n\n for desc_signature in tree.findall(addnodes.desc_signature):\n update_node_id(desc_signature)", "def transform(self, item):\n return self.transformer(item)", "def __getitem__(self, item):\n return self.children[item]", "def processTask(self, task, parent=False):\n\n tasklist = []\n\n #turn the task into a tuple\n processedTask = [task.__class__.__name__, parent, task.msg]\n\n #add that task to the list\n tasklist.append(processedTask)\n\n #add all children if the task is a container\n if isinstance(task,TaskContainer):\n for subtask in task.subtasks:\n tasklist += self.processTask(subtask.task, task.id)\n\n return tasklist", "def _traverse_1_0_0(item):\n if 'child_nodes' in item.keys():\n for child_node in item['child_nodes']:\n _traverse_1_0_0(child_node)\n item['content'] = item['child_nodes']\n del item['child_nodes']", "def __getitem__(self, item):\n if self.child_keys is None:\n self.child_keys = sorted(self.children.keys(), key=str.lower)\n return self.children[self.child_keys[item]]", "def _mongo_item_to_task(item):\n return Task(\n id=str(item[\"_id\"]),\n task=item[\"task\"],\n args=item[\"args\"],\n kwargs=item[\"kwargs\"],\n wait=item[\"wait\"],\n recurring=item[\"recurring\"],\n when=item[\"when\"],\n )", "def run(self, tree):\r\n self.stashed_nodes = {}\r\n\r\n stack = [tree]\r\n\r\n while stack:\r\n currElement = stack.pop()\r\n insertQueue = []\r\n for child in currElement.getchildren():\r\n if child.text and not isinstance(child.text, util.AtomicString):\r\n text = child.text\r\n child.text = None\r\n lst = self.__processPlaceholders(self.__handleInline(\r\n text), child)\r\n stack += lst\r\n insertQueue.append((child, lst))\r\n if child.tail:\r\n tail = self.__handleInline(child.tail)\r\n dumby = util.etree.Element('d')\r\n tailResult = self.__processPlaceholders(tail, dumby)\r\n if dumby.text:\r\n child.tail = dumby.text\r\n else:\r\n child.tail = None\r\n pos = currElement.getchildren().index(child) + 1\r\n tailResult.reverse()\r\n for newChild in tailResult:\r\n currElement.insert(pos, newChild)\r\n if child.getchildren():\r\n stack.append(child)\r\n\r\n if self.markdown.enable_attributes:\r\n for element, lst in insertQueue:\r\n if element.text:\r\n element.text = \\\r\n inlinepatterns.handleAttributes(element.text, \r\n element)\r\n i = 0\r\n for newChild in lst:\r\n # Processing attributes\r\n if newChild.tail:\r\n newChild.tail = \\\r\n inlinepatterns.handleAttributes(newChild.tail,\r\n element)\r\n if newChild.text:\r\n newChild.text = \\\r\n inlinepatterns.handleAttributes(newChild.text,\r\n newChild)\r\n element.insert(i, newChild)\r\n i += 1\r\n return tree", "def visit_children(node):\n output_fragments = []\n for child in node.children_not_empty:\n fragment = child.value\n text = fragment.text_fragment\n output_fragments.append({\n \"id\": text.identifier,\n \"language\": text.language,\n \"lines\": text.lines,\n \"begin\": gf.time_to_ssmmm(fragment.begin),\n \"end\": gf.time_to_ssmmm(fragment.end),\n \"children\": visit_children(child)\n })\n return output_fragments", "def parse_items(self):", "def get_child_ids(cur, node):\n sql = \"\"\"\n SELECT\n id\n FROM\n nodes\n WHERE\n parent=%s\n ORDER BY\n position;\n \"\"\"\n cur.execute(sql, (str(node), ))\n for result in cur:\n yield str(result['id'])", "def visit_children(node, parent_elem):\n for child in node.children_not_empty:\n fragment = child.value\n fragment_elem = etree.SubElement(parent_elem, \"fragment\")\n fragment_elem.attrib[\"id\"] = fragment.text_fragment.identifier\n fragment_elem.attrib[\"begin\"] = gf.time_to_ssmmm(fragment.begin)\n fragment_elem.attrib[\"end\"] = gf.time_to_ssmmm(fragment.end)\n for line in fragment.text_fragment.lines:\n line_elem = etree.SubElement(fragment_elem, \"line\")\n line_elem.text = line\n children_elem = etree.SubElement(fragment_elem, \"children\")\n visit_children(child, children_elem)", "def _make_item(self, parent_item: FolderTreeItem, folder: dict, level=0) -> FolderTreeItem:\n for sub_folder in folder['folders']:\n sub_folder_item = FolderTreeItem(Folder(sub_folder), parent_item)\n item = self._make_item(sub_folder_item, sub_folder, level + 1)\n parent_item.append_child(item)\n for query in folder['queries']:\n item = QueryTreeItem(Query(query), parent_item)\n parent_item.append_child(item)\n return parent_item", "def parse(self, response):\n item = NewsScraperItem()\n containers = response.xpath(\"//div[contains(@class,'largeTitle')]/article[contains(@class,\"\n \"'articleItem')]/div[contains(@class,'textDiv')]\")\n for info in containers:\n\n try:\n date = info.xpath(\".//div[contains(@class,'articleDetails')]/span[contains(@class,'date')]/text()\").extract_first()\n date = re.sub(r'\\xa0-\\xa0', '', date)\n # Convert 'minutes ago' to datetime\n date = datetime.now() - timedelta(minutes=int(re.sub(r'[^0-9]', '', date))) # Regex = Where not numeric\n item['date'] = date.strftime(\"%Y/%m/%d %H:%M:%S\")\n earn_id = re.search(r'[0-9]{4,}', info.xpath(\".//a/@onclick\").extract_first())\n item['id'] = earn_id.group()\n item['title'] = info.xpath(\".//a/text()\").extract_first()\n item['author'] = info.xpath(\".//div[contains(@class,'articleDetails')]/span/text()\").extract_first()\n item['text'] = info.xpath(\".//p/text()\").extract_first()\n item['link'] = info.xpath(\".//a/@href\").extract_first()\n yield item\n\n except:\n print(\"Unusual format detected\")\n logging.warning(\"Item skipped due to unusual format\")", "def _apply_template(self, original_item):\n\t\t# TODO: Performance optimization. Don't recursively call _apply_template on hosts we have already\n\t\t# applied templates to. This needs more work.\n\t\tif not original_item.has_key('use'):\n\t\t\treturn original_item\n\t\tobject_type = original_item['meta']['object_type']\n\t\t# Performance tweak, if item has been parsed. Lets not do it again\n\t\tif original_item.has_key('name') and self.item_apply_cache[object_type].has_key( original_item['name'] ):\n\t\t\treturn self.item_apply_cache[object_type][ original_item['name'] ]\n\t\t# End of performance tweak\n\t\tparent_names = original_item['use'].split(',')\n\t\tparent_items = []\n\t\tfor parent_name in parent_names:\n\t\t\tparent_item = self._get_item( parent_name, object_type )\n\t\t\tif parent_item == None: \n\t\t\t\terror_string = \"error in %s\\n\" % (original_item['meta']['filename'])\n\t\t\t\terror_string = error_string + \"Can not find any %s named %s\\n\" % (object_type,parent_name)\n\t\t\t\terror_string = error_string + self.print_conf(original_item)\n\t\t\t\tself.errors.append( ParserError(error_string,item=original_item) )\n\t\t\t\tcontinue\n\t\t\t# Parent item probably has use flags on its own. So lets apply to parent first\n\t\t\tparent_item = self._apply_template( parent_item )\n\t\t\tparent_items.append( parent_item )\n\t\tfor parent_item in parent_items:\n\t\t\tfor k,v in parent_item.iteritems():\n\t\t\t\tif k == 'use':\n\t\t\t\t\tcontinue\n\t\t\t\tif k == 'register':\n\t\t\t\t\tcontinue\n\t\t\t\tif k == 'meta':\n\t\t\t\t\tcontinue\n\t\t\t\tif k == 'name':\n\t\t\t\t\tcontinue\n\t\t\t\tif not original_item['meta']['inherited_attributes'].has_key(k):\n\t\t\t\t\toriginal_item['meta']['inherited_attributes'][k] = v\n\t\t\t\tif not original_item.has_key(k):\n\t\t\t\t\toriginal_item[k] = v\n\t\t\t\t\toriginal_item['meta']['template_fields'].append(k)\n\t\tif original_item.has_key('name'):\n\t\t\tself.item_apply_cache[object_type][ original_item['name'] ] = original_item\n\t\treturn original_item", "def visit_tstem(self, node, children):\n items = {k: v for d in children for k, v in d.items()}\n items['anchor'] = items.get('anchor', 0)\n tstem = {node.rule_name: items}\n return tstem", "def subtask_prune(tree):\n\n for st in tree.subtrees():\n if \"start\" in st.label():\n new_label = st.label().split(\"start\")[0] + \"start\"\n st.set_label(new_label)\n if \"end\" in st.label():\n new_label = st.label().split(\"end\")[0] + \"end\"\n st.set_label(new_label)\n\n return tree", "def pytest_itemcollected(item):\n item.name = item.name.split('[', 1)[1][:-1]\n # pylint: disable=protected-access\n item._nodeid = item.name", "def get_tw_item(task: taskw.task.Task) -> v0.Item: # type: ignore\n field = get_as_subtext_field\n task_id = tw_side.get_task_id(task)\n\n actions = [\n FuncAction(\n \"Complete task\",\n lambda args_list=[\"done\", task_id]: run_tw_action(args_list),\n ),\n FuncAction(\n \"Delete task\",\n lambda args_list=[\"delete\", task_id]: run_tw_action(args_list),\n ),\n FuncAction(\n \"Start task\",\n lambda args_list=[\"start\", task_id]: run_tw_action(args_list),\n ),\n FuncAction(\n \"Stop task\",\n lambda args_list=[\"stop\", task_id]: run_tw_action(args_list),\n ),\n FuncAction(\n \"Edit task interactively\",\n lambda args_list=[\"edit\", task_id]: run_tw_action(args_list, need_pty=True),\n ),\n FuncAction(\n \"Fail task\",\n lambda task_id=task_id: fail_task(task_id=task_id),\n ),\n ClipAction(\"Copy task UUID\", f\"{task_id}\"),\n ]\n\n found_urls = url_re.findall(task[\"description\"])\n if \"annotations\" in task.keys():\n found_urls.extend(url_re.findall(\" \".join(task[\"annotations\"])))\n\n for url in found_urls[-1::-1]:\n actions.insert(0, UrlAction(f\"Open {url}\", url))\n\n if reminders_tag_path.is_file():\n global reminders_tag\n reminders_tag = load_data(reminders_tag_path)\n else:\n save_data(\"remindme\", str(reminders_tag_path))\n\n actions.append(\n FuncAction(\n f\"Add to Reminders (+{reminders_tag})\",\n lambda args_list=[\n \"modify\",\n task_id,\n f\"+{reminders_tag}\",\n ]: run_tw_action(args_list),\n )\n )\n\n actions.append(\n FuncAction(\n \"Work on next (+next)\",\n lambda args_list=[\n \"modify\",\n task_id,\n \"+next\",\n ]: run_tw_action(args_list),\n )\n )\n\n urgency_str, icon = urgency_to_visuals(task.get(\"urgency\"))\n text = task[\"description\"]\n due = None\n if \"due\" in task:\n due = task[\"due\"].astimezone(dateutil.tz.tzlocal()).strftime(\"%Y-%m-%d %H:%M:%S\") # type: ignore\n\n return get_as_item(\n text=text,\n subtext=\"{}{}{}{}{}\".format(\n field(urgency_str),\n \"ID: {}... | \".format(tw_side.get_task_id(task)[:8]),\n field(task[\"status\"]),\n field(task.get(\"tags\"), \"tags\"),\n field(due, \"due\"),\n )[:-2],\n icon=[str(icon)],\n completion=f'{curr_trigger}{task[\"description\"]}',\n actions=actions,\n urgency=task.get(\"urgency\"),\n )", "def test_team_template_folders_id_children_get(self):\n pass", "def get_children(self, item, level):\n return item.children", "def process_item(self, item, spider):\n task = SpiderTask.objects.get(id=spider.task_id)\n dj_item = Item.objects.create(task=task, **item)\n return dj_item", "def map_tree(a_task: Task, tree: Any) -> Any:\n\n def map_node(node):\n if isinstance(node, list):\n return [map_node(child) for child in node]\n else:\n return a_task(node)\n\n return map_node(tree)", "def put_pid(html):\n pid = 1\n while \"<p>\" in html:\n pttn = \"<p id=\\\"p\"+str(pid)+\"\\\">\"\n html = html.replace(\"<p>\", pttn, 1)\n pid += 1\n return html" ]
[ "0.532541", "0.5295402", "0.5228904", "0.5006564", "0.4999942", "0.4974197", "0.48438826", "0.48322746", "0.4828575", "0.47614872", "0.47370318", "0.4712531", "0.47079116", "0.4680026", "0.46800196", "0.46510932", "0.46390906", "0.4634886", "0.46297765", "0.4618333", "0.46118987", "0.45845786", "0.45754266", "0.45712942", "0.45658213", "0.45496053", "0.45387357", "0.45303717", "0.45210373", "0.45065817" ]
0.58649266
0
ReLU function, takes input h and return result of ReLU(h)
def relu(h): return np.maximum(h, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relu_prime(h):\n return_value = h\n return_value[return_value <= 0] = 0\n return_value[return_value > 0 ] = 1\n return return_value", "def ReLU(self, x):\n self.x = x\n output = np.maximum(0, x)\n return output", "def _rnn_relu_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh):\n if b_ih is None:\n igates = P.MatMul(False, True)(inputs, w_ih)\n hgates = P.MatMul(False, True)(hidden, w_hh)\n else:\n igates = P.MatMul(False, True)(inputs, w_ih) + b_ih\n hgates = P.MatMul(False, True)(hidden, w_hh) + b_hh\n return P.ReLU()(igates + hgates)", "def gradient_hidden(self, h):\n if self.relu:\n return 1.0*(h > 0)\n else:\n return 1 - h * h", "def __call__(self, x):\n h = F.relu(self.l0(x))\n h = F.relu(self.l1(h))\n return self.l2(h)", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n out = np.maximum(0, x)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n out = np.maximum(x,0)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n out = x.copy()\n out[x<=0] = 0\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def grad_ReLU(self):\n y = self.x\n y[y<=0] = 0\n y[y>0] = 1\n return y\n raise NotImplementedError(\"ReLU gradient not implemented\")", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n out = x * (x > 0)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def ReLU(z):\n return np.max([z, np.zeros(z.shape)], axis=0)", "def relu_backward(self, dUpper, cache):\n x = cache\n #############################################################################\n # TODO: Implement the ReLU backward pass. #\n #############################################################################\n x = np.array(x , copy=True)\n x[x <= 0] = 0\n x[x > 0] = 1\n drelu = dUpper * x\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return drelu", "def relu_forward(x):\n ############################################################################\n # TODO: Implement the ReLU forward pass. #\n ############################################################################\n ############################################################################\n # START OF YOUR CODE #\n ############################################################################\n out = x\n out[out<0] = 0\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return out", "def activation_ReLU(self):\n self.value = max(0, self.value)", "def ReLU(self, x):\r\n self.x = x\r\n return np.maximum(x, 0)", "def activation(h):\n\n if(h > 0):\n return 1\n\n else:\n return 0", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n #out = np.zeros(x.shape)\n #np.clip(x, 0, None, out)\n out = np.empty_like(x) #faster than zeros\n np.clip(x, 0, None, out)\n #out = x\n #out [out < 0] = 0\n #print(x)\n #print(out)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def relu(t: Tensor) -> Tensor:\n tensor = t if torch.is_tensor(t) else torch.tensor(t)\n return torch.max(tensor, torch.zeros_like(tensor))", "def relu(input, inplace=False):\n return FunctionLib.apply(\n 'Relu', input.device, [input],\n outputs=[input if inplace else None], alpha=0.)", "def get_output(self, X):\n return ReLU(X)", "def compute_hidden(self, a_h):\n if self.relu:\n return a_h*(a_h > 0)\n else:\n return np.tanh(a_h)", "def forward(self, state):\n\n x = state # Careful: deepcopy bug?\n # Intermediate Layers\n for layer in self.layers[:-1]:\n\n x = nn.ReLU()(layer(x))\n\n x = nn.Tanh()(self.layers[-1](x))\n return x", "def relu_forward(self, x):\n #out = None\n #############################################################################\n # TODO: Implement the ReLU forward pass. #\n #############################################################################\n out = np.array(x, copy=True)\n out[out <= 0] = 0\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = x\n return out, cache", "def lrelu(self):\n return self.add_layer(lrelu)", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n out = np.where(x<=0, 0, x)\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def forward(inputs):\n h1 = ReLU(np.dot(w1, inputs) + b1)\n h2 = np.dot(w2, h1) + b2\n return (h1, h2), softmax(h2)", "def lrelu(x):\r\n return max(0.1*x, x)", "def relu_forward(x):\r\n cache = x\r\n out = np.maximum(0, x)\r\n return out, cache", "def relu_forward(x):\n out = None\n out = np.maximum(0.0, x)\n cache = x\n return out, cache", "def relu(self):\n return self * self.ge(0)" ]
[ "0.6746711", "0.664049", "0.6611213", "0.6571714", "0.65068805", "0.64704126", "0.6451632", "0.6394144", "0.634299", "0.6341994", "0.63404304", "0.6331816", "0.63282824", "0.6320288", "0.63083297", "0.6306409", "0.6283333", "0.6253684", "0.62309307", "0.620276", "0.6181885", "0.61782056", "0.6117417", "0.6117257", "0.6109755", "0.6047431", "0.60161746", "0.60032123", "0.6001906", "0.59560496" ]
0.77946955
0
Initialises weights depending on layer sizes and whether Xavier Initialisation is needed
def init_weights(n_input_layer, n_hidden_layer, n_hidden_layer_2, n_output_layer, xavier_init): W1, W2, W3 = None, None, None if xavier_init: # Checks if Xavier initialisation is wanted # Initialises weights depending on number of layers present using: # Normally distributed random number * square_root(1 / number of input neurons to that layer) if n_hidden_layer > 0: W1 = np.random.randn(n_hidden_layer, n_input_layer) * np.sqrt(1 / (n_input_layer)) if n_hidden_layer_2 > 0: W2 = np.random.randn(n_hidden_layer_2, n_hidden_layer) * np.sqrt(1 / (n_hidden_layer)) W3 = np.random.randn(n_output_layer, n_hidden_layer_2) * np.sqrt(1 / (n_hidden_layer_2)) else: W2 = np.random.randn(n_output_layer, n_hidden_layer) * np.sqrt(1 / (n_hidden_layer)) else: W1 = np.random.randn(n_output_layer, n_input_layer) * np.sqrt(1 / (n_input_layer)) else: # Weights are randomly picked from a uniform distribution between 0 and 1 # They are normalized by making sure the weights sum to 1 # Uses different configurations depending on number of layers required if n_hidden_layer > 0: W1 = np.random.uniform(0,1,(n_hidden_layer, n_input_layer)) W1 = np.divide(W1,np.matlib.repmat(np.sum(W1,1)[:,None],1,n_input_layer)) if n_hidden_layer_2 > 0: W2=np.random.uniform(0,1,(n_hidden_layer_2,n_hidden_layer)) W2=np.divide(W2,np.matlib.repmat(np.sum(W2,1)[:,None],1,n_hidden_layer)) W3=np.random.uniform(0,1,(n_output_layer,n_hidden_layer_2)) W3=np.divide(W3,np.matlib.repmat(np.sum(W3,1)[:,None],1,n_hidden_layer_2)) else: W2 = np.random.uniform(0,1,(n_output_layer, n_hidden_layer)) W2 = np.divide(W2,np.matlib.repmat(np.sum(W2,1)[:,None],1,n_hidden_layer)) else: W1 = np.random.randn(n_output_layer, n_input_layer) * np.sqrt(1 / (n_input_layer)) return W1, W2, W3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_weights_xavier(self):\n\t\tself.weights = [np.random.uniform(-1/sqrt(size1), 1/sqrt(size1)) for size1, size2 in zip(self.sizes[:-1], self.sizes[1:])]\n\t\tself.biases = [np.zeros([size, ]) for size in self.sizes[1:]]", "def init_weights_(self):\n raise NotImplementedError", "def _initialize_weights(self):\n pass", "def _init_weights(self):\n nn.init.xavier_normal_(self.out.weight)", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m)", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n xavier_uniform_(m.weight)\n if m.bias is not None:\n zeros_(m.bias)", "def init_weights(model):\n ...", "def init_weights(self, num_features):\n for each_label in self.valid_labels:\n self.weights[each_label] = np.zeros(num_features)", "def init_weights(self, load_weights=None):\n if load_weights:\n # TODO\n pass\n else:\n # x: lower layer nodes n\n # y: current layer nodes n\n x = self.weights_shape[1]\n y = self.weights_shape[0]\n self.weights = np.random.randn(y, x) / np.sqrt(x) # pylint: disable=no-member\n self.biases = np.random.randn(y, 1) # pylint: disable=no-member", "def init_weight(self):\n init_layer(self.conv1)\n init_layer(self.conv2)\n init_bn(self.norm1)\n init_bn(self.norm2)", "def init_weight(self):\r\n xavier_init(self.output_proj, distribution='uniform', bias=0.)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def init_weights(self):\n # Initialize weights\n self.apply(self._init_weights)", "def init_weights(self):\n # Initialize weights\n self.apply(self._init_weights)\n # Tie weights if needed\n self.tie_weights()", "def _init_weights(layer):\n if isinstance(layer, (nn.Conv2d, nn.Linear)):\n torch.nn.init.xavier_uniform_(layer.weight)\n try:\n # Some layers may not have biases, so catch the exception and pass.\n layer.bias.data.fill_(0.0)\n except AttributeError:\n pass", "def init_weights(self, leveledinit: bool, kernel_size: int, bias: bool) -> None:\n if leveledinit:\n nn.init.normal_(self.conv1d.weight, std=1e-3)\n nn.init.normal_(self.conv1d.bias, std=1e-6)\n with torch.no_grad():\n self.conv1d.weight[:, 0, :] += 1.0 / kernel_size\n else:\n nn.init.xavier_uniform_(self.conv1d.weight)\n\n if self.embed in (\"pre\", \"post\"):\n nn.init.xavier_uniform_(self.embedding.weight)", "def initialize_weights(self):\n weights_initializer.WeightsInitializer.initialize_layer_or_model(\n self._batch)", "def __init__(self, sizes, afunc): \n\t\tself.num_layers = len(sizes)\n\t\tself.sizes = sizes\n\t\tself.afunc = afunc;\n\t\tself.initialize_weights_uniform()\n\t\t#self.initialize_weights_gaussian(0.1)\n\t\t#self.initialize_weights_xavier()", "def init_weights(self):\n if self.init_cfg:\n super().init_weights()\n else:\n # Use smaller std for better stability and performance. We\n # use 0.1. See more details in \"ESRGAN: Enhanced Super-Resolution\n # Generative Adversarial Networks\"\n for m in [\n self.conv_first, self.conv_body, self.conv_up1,\n self.conv_up2, self.conv_hr, self.conv_last\n ]:\n default_init_weights(m, 0.1)", "def init_weights(self, sizes, init, X, Y):\n\t\tinit = init.lower()\n\n\t\tif init == 'none':\n\t\t\tpass\n\t\telif init == 'zeros':\n\t\t\tself.wts = arr([np.zeros((sizes[i + 1],sizes[i] + 1)) for i in range(len(sizes) - 1)], dtype=object)\n\t\telif init == 'random':\n\t\t\tself.wts = arr([.0025 * np.random.randn(sizes[i+1],sizes[i]+1) for i in range(len(sizes) - 1)], dtype=object)\n\t\telse:\n\t\t\traise ValueError('NNetClassify.init_weights: ' + str(init) + ' is not a valid option for init')", "def init_layer(layer):\n \n if layer.weight.ndimension() == 4:\n (n_out, n_in, height, width) = layer.weight.size()\n n = n_in * height * width\n \n elif layer.weight.ndimension() == 2:\n (n_out, n) = layer.weight.size()\n\n std = math.sqrt(2. / n)\n scale = std * math.sqrt(3.)\n layer.weight.data.uniform_(-scale, scale)\n\n if layer.bias is not None:\n layer.bias.data.fill_(0.)", "def initialize_weights(self):\n for layer in self._cnn_layers:\n weights_initializer.WeightsInitializer.initialize_layer_or_model(layer)", "def init_sg_weights(self):\n n = self.weights_shape[0] # size of current layer\n # pylint: disable=no-member\n A = np.random.randn(n, n) / np.sqrt(n)\n B = np.random.randn(10, n) / np.sqrt(n)\n C = np.random.randn(1, n) / np.sqrt(n)\n # pylint: enable=no-member\n self.sg_weights = [A, B, C]", "def init_weights(layer):\r\n layer_name = layer.__class__.__name__\r\n if layer_name.find(\"Conv\") != -1:\r\n layer.weight.data.normal_(0.0, 0.02)\r\n elif layer_name.find(\"BatchNorm\") != -1:\r\n layer.weight.data.normal_(1.0, 0.02)\r\n layer.bias.data.fill_(0)", "def InitWeights(self):\n self.w = -1 + 2 * np.random.rand(self.num_of_inputs,)\n self.w0 = -1 + 2 * np.random.rand()", "def init_weights(layer):\n layer_name = layer.__class__.__name__\n if layer_name.find(\"Conv\") != -1 and hasattr(layer, 'weight'):\n layer.weight.data.normal_(0.0, 0.02)\n elif layer_name.find(\"BatchNorm\") != -1 and hasattr(layer, 'weight'):\n layer.weight.data.normal_(1.0, 0.02)\n layer.bias.data.fill_(0)", "def init_weights(self, dims):\n self.W = np.random.normal(size=dims) * 0.0001", "def init_weights(self):\n for i in range(5):\n default_init_weights(getattr(self, f'conv{i+1}'), 0.1)" ]
[ "0.76051825", "0.7580625", "0.7418632", "0.7384778", "0.73651063", "0.72985464", "0.72827876", "0.723169", "0.72292835", "0.72177035", "0.72110796", "0.7196356", "0.7196356", "0.7196356", "0.71878", "0.7185429", "0.7176544", "0.71507525", "0.71335864", "0.71014833", "0.70875585", "0.706855", "0.70517623", "0.7044872", "0.69890434", "0.6979859", "0.69683796", "0.69557846", "0.6947948", "0.69380337" ]
0.7777824
0
Trains the model based on the system parameters Uses the ReLU function and derivative to train neuron weights Weights are trained depending on the number of layers Batch training is carried out with weights being updated at the end of each bach After each epoch accuracy, error and average weight update (for single layer) are calculated At the end of training graphs are output for error, accuracy and average weight (single layer) per epoch
def train(epoch, w1, w2, w3, samples, n_batches, bias_w1, bias_w2, bias_w3, n_hidden_layer, n_hidden_layer_2, batch_size, train_data, train_output, valid_data, valid_output, learning_rate, lmbda, l1): # Initialise empty error and accuracy arrays errors = np.zeros((epoch,)) accuracies = np.zeros((epoch,)) # If it is only a single layer network initialise variables for calcualting average weight if (n_hidden_layer == 0) and (n_hidden_layer_2 == 0): tau = 0.01 average_weight = np.zeros(w1.shape) average_weight_plot = np.zeros((epoch,1)) prev_w1 = np.copy(w1) # Epoch loop for i in range(epoch): # Build an array of shuffled indexes shuffled_indexes = np.random.permutation(samples) # Batch loop for batch in range(0, n_batches): # Initialise empty change in weight and bias depending on number of layers delta_w1 = np.zeros(w1.shape) delta_bias_w1 = np.zeros(bias_w1.shape) if n_hidden_layer > 0: delta_w2 = np.zeros(w2.shape) delta_bias_w2 = np.zeros(bias_w2.shape) if n_hidden_layer_2 > 0: delta_w3 = np.zeros(w3.shape) delta_bias_w3 = np.zeros(bias_w3.shape) # Extract indexes, and corresponding data from the input and expected output indexes = shuffled_indexes[batch*batch_size : (batch+1)*batch_size] x0 = train_data[indexes].T t = train_output[indexes].T # Apply input weights to summation of inputs and add bias terms h1 = np.matmul(w1, x0) + bias_w1 # Apply the activation function to the summation x1 = relu(h1) # For first hidden layer if n_hidden_layer > 0: # Apply input weights to summation of inputs and add bias terms h2 = np.matmul(w2, x1) + bias_w2 # Apply the activation function to the summation x2 = relu(h2) # For second hidden layer if n_hidden_layer_2 > 0: # Apply input weights to summation of inputs and add bias terms h3 = np.matmul(w3, x2) + bias_w3 # Apply the activation function to the summation x3 = relu(h3) # Error signal error = t - x3 # Local gradient for second hidden layer delta_3 = relu_prime(x3) * error # Change in weight at second hidden layer delta_w3 = (learning_rate / batch_size) * np.matmul(delta_3, x2.T) # Change in bias at second hidden layer delta_bias_w3 = (learning_rate / batch_size) * np.sum(delta_3, axis=1) # Reshape to be a matrix rather than column vector delta_bias_w3 = delta_bias_w3.reshape(-1, 1) # Local gradient for first hidden layer delta_2 = relu_prime(h2) * np.matmul(w3.T, delta_3) # Change in weight at first hidden layer delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T) # Change in bias at first hidden layer delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1) # Reshape to be a matrix rather than column vector delta_bias_w2 = delta_bias_w2.reshape(-1, 1) # Local gradient for input layer delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2) # Change in weight at input layer delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T) # Change in bias at input layer delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1) # Reshape to be a matrix rather than column vector delta_bias_w1 = delta_bias_w1.reshape(-1, 1) else: # Error signal error = t - x2 # Change in weight at first hidden layer delta_2 = relu_prime(x2) * error # Change in weight at first hidden layer delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T) # Change in bias at first hidden layer delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1) # Reshape to be a matrix rather than column vector delta_bias_w2 = delta_bias_w2.reshape(-1, 1) # Local gradient for input layer delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2) # Change in weight at input layer delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T) # Change in bias at input layer delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1) # Reshape to be a matrix rather than column vector delta_bias_w1 = delta_bias_w1.reshape(-1, 1) else: # Error signal error = t - x1 # Local gradient for input layer delta_1 = relu_prime(x1) * error # Change in weight at input layer delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T) # Change in bias at input layer delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1) # Reshape to be a matrix rather than column vector delta_bias_w1 = delta_bias_w1.reshape(-1, 1) # Checks if L1 error is used as well if l1: # Takes away the derivative of L1 from the change in weight delta_w1 -= (learning_rate / batch_size) * lmbda * np.sign(w1) # Takes away the derivative of L1 from the change in bias delta_bias_w1 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w1) # Checks if hidden layer present if n_hidden_layer > 0: # Takes away the derivative of L1 from the change in weight delta_w2 -= (learning_rate / batch_size) * lmbda * np.sign(w2) # Takes away the derivative of L1 from the change in bias delta_bias_w2 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w2) # Checks if second hidden layer present if n_hidden_layer_2 > 0: # Takes away the derivative of L1 from the change in weight delta_w3 -= (learning_rate / batch_size) * lmbda * np.sign(w3) # Takes away the derivative of L1 from the change in bias delta_bias_w3 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w3) # Add change in weight w1 += delta_w1 # Add change in bias bias_w1 += delta_bias_w1 # Checks if hidden layer present if n_hidden_layer > 0: # Add change in weight w2 += delta_w2 # Add change in bias bias_w2 += delta_bias_w2 # Checks if second hidden layer present if n_hidden_layer_2 > 0: # Add change in weight w3 += delta_w3 # Add change in bias bias_w3 += delta_bias_w3 # Calculate and print average weight (single layer), accuracy and error at the end of the epoch print("------ Epoch {} ------".format(i+1)) if n_hidden_layer == 0: # If single layer present calculate average weight change average_weight_plot, average_weight = calculate_average_weight(tau, average_weight, average_weight_plot, prev_w1, w1, i) prev_w1 = np.copy(w1) # Calculate accuracy and error based on validation data accuracies[i], errors[i] = test(valid_data, valid_output, n_hidden_layer, n_hidden_layer_2, w1, w2, w3, bias_w1, bias_w2, bias_w3, l1, lmbda) print("---------------------") print("\n") # Plot results for error, accruacy and average weight (single layer) #if n_hidden_layer == 0: # plot_results(average_weight_plot, 'Epoch', 'Average Weight Update Sum', # 'Average Weight Update Sum per Epoch', 'Average Weight Update Sum') #plot_results(errors, 'Epoch', 'Error', 'Error on Validation Set per Epoch', 'Error') #plot_results(accuracies, 'Epoch', 'Accuracy', 'Accuracy on Validation Set per Epoch', 'Accuracy') return w1, w2, w3, bias_w1, bias_w2, bias_w3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self) -> None:\n for _ in range(self.epochs):\n for x, y in zip(self.x_train, self.y_train):\n\n weights_gradient = [\n None for weight in self.weights\n ] # Initializing weight gradients for each layer which are going to be used to update the weights in the network.\n\n biases_gradient = [\n None for bias in self.biases\n ] # Initializing bias gradients for each layer which are going to be used to update the biases in the network.\n\n activation = np.expand_dims(x, axis=1)\n activations = [\n activation\n ] # A list for storing all the activations when doing forward propagation\n\n values = (\n []\n ) # A list for storing weight * x + bias values without applying the activation function.\n\n for weight, bias in zip(self.weights, self.biases):\n value = np.dot(weight, activation) + bias\n values.append(value)\n\n activation = self.sigmoid(value)\n activations.append(activation)\n\n \"\"\"\n Calculating the error delta from output layer to be propagated backwards in the network. It is calculated\n by taking the derivative of the loss function, which in our case is MSE, and multiply with derivate of\n the sigmoid function applied on the value that entered the last layer of the network.\n \"\"\"\n\n error_delta = (activations[-1] - y) * self.sigmoid_derivative(\n values[-1]\n )\n\n weights_gradient[-1] = np.dot(\n error_delta, activations[-2].T\n ) # Setting error delta multiplied with the second last layer activations as weight gradient for last layer.\n\n biases_gradient[-1] = error_delta # Setting error delta as bias gradient for last layer.\n\n \"\"\"\n This for-loop does the same as the code from line 128 - 136, but for each layer in the network.\n Thus, the error is propagated backwards in the network, and the gradients for each layer are set.\n \"\"\"\n for layer in range(2, self.total_layers):\n error_delta = np.dot(\n self.weights[-layer + 1].T, error_delta\n ) * self.sigmoid_derivative(values[-layer])\n\n weights_gradient[-layer] = np.dot(\n error_delta, activations[-layer - 1].T\n )\n\n biases_gradient[-layer] = error_delta\n\n self.weights = [\n weight - self.lr * weight_gradient\n for weight, weight_gradient in zip(self.weights, weights_gradient)\n ] # Updating the weights of the network by w_i - learning_rate * nabla w_i (w_i is the weight matrix at layer i, and nabla w_i is weight gradient.)\n\n self.biases = [\n bias - self.lr * bias_gradient\n for bias, bias_gradient in zip(self.biases, biases_gradient)\n ] # Updating the biases of the network by b_i - learning_rate * nabla b_i (b_i is the bias vector at layer i, and nabla b_i is weight gradient.)", "def train(self, verbose=True):\n\n\n learned = False\n iteration = 0\n\n from util.loss_functions import DifferentError\n loss = DifferentError()\n\n\n\n\n\n # Train for some epochs if the error is not 0\n while not learned:\n # x ist ein Bild bestehend aus einem Label (erster Eintrag) und 784 Pixeln\n # t ist das Zielergebnis von x (überprüfbar mit dem Label)\n # o ist der tatsächliche Ergebnis von x\n # w ist der Gewichtsvektor\n # Als Aktivierungsfunktion verwenden wir die Sigmoid Funktion\n # Das Training wird dann beendet, sobald das Fehlerkriterium konvergiert\n\n totalError = 0\n\n output = []\n labels = self.trainingSet.label\n inputs = self.trainingSet.input\n\n # iteriere für jede Instanz im Trainingsset x € X\n for input in inputs:\n # Ermittle O_x = sig(w*x)\n output.append(self.fire(input))\n\n # Ermittle Fehler AE = tx - ox\n error = loss.calculateError(np.array(labels), np.array(output))\n\n # grad = [0]\n grad = np.zeros(len(self.trainingSet.input[0]))\n grad2 = np.zeros(len(self.trainingSet.input[0]))\n\n for e, input, out in zip(error, inputs, output):\n activationPrime = Activation.getDerivative(activationName)(np.dot(np.array(input), self.weight))\n #grad += np.multiply( np.multiply( input, e), activationPrime)\n grad += np.multiply( input, e)\n\n # Update grad = grad + errorPrime * x * activationPrime\n\n\n\n # print grad - grad2\n #print \"Error: \" + str(error) + \" Grad: \" + str(grad)\n\n # update w: w <- w + n*grad\n self.updateWeights(grad)\n\n\n iteration += 1\n totalError = error.sum()\n\n if verbose:\n logging.info(\"Epoch: %i; Error: %i\", iteration, totalError)\n\n if abs(totalError) < 0.01 or iteration >= self.epochs:\n # stop criteria is reached\n learned = True\n\n pass", "def train(model, x_train, y_train, x_valid, y_valid, config):\n \n epochs = config['epochs']\n threshold = config['early_stop_epoch']\n alpha = config['learning_rate']\n# val_loss = 10000*np.ones((epochs,1))\n beta = config['momentum_gamma']\n batch_size = config['batch_size']\n \n N = x_train.shape[0]\n num_batches = int((N+batch_size -1 )/ batch_size)\n \n best_weight = []\n best_epoch = []\n best_bias = []\n #print(len(model.layers))\n train_loss_list = []\n \n train_acc_list = []\n val_acc_list = []\n val_loss_list = []\n \n counter = 0\n \n lam = 0.0001\n \n \n for i in range(1, epochs+1):\n shuffled_indices = np.random.permutation(range(N))\n \n for batch in range(num_batches):\n minibatch_indices = shuffled_indices[batch_size*batch:min(batch_size*(batch+1), N)]\n #print(len(minibatch_indices))\n xbatch = x_train[minibatch_indices, :]\n ybatch = y_train[minibatch_indices, :]\n #print(ybatch.shape)\n y, loss = model(xbatch, ybatch)\n \n model.backward() \n #weight update and storing\n for k in range(0, len(config['layer_specs']), 2):\n mom_w = -model.layers[k].d_v_w * beta + alpha*(model.layers[k].d_w + lam*model.layers[k].w )\n mom_b = -model.layers[k].d_v_b * beta + alpha*(model.layers[k].d_b + lam*model.layers[k].b )\n model.layers[k].w = model.layers[k].w - (mom_w )\n model.layers[k].b = model.layers[k].b - (mom_b )\n model.layers[k].d_v_w = -mom_w\n model.layers[k].d_v_b = -mom_b \n\n y, loss = model(x_train, y_train) \n train_loss_list.append(loss)\n \n train_pred = np.argmax(y, axis=1) \n acc = np.mean(np.argwhere(y_train==1)[:,1]==train_pred) \n \n train_acc_list.append(acc)\n \n \n #print(\"Training acc for epoch \", i, \" is:\\n\", acc) \n #print(\"Training loss for epoch \", i, \" is:\\n\", loss) \n val_y, val_loss = model(x_valid, y_valid)\n val_loss_list.append(val_loss)\n\n val_pred = np.argmax(val_y, axis=1) \n acc = np.mean(np.argwhere(y_valid==1)[:,1]==val_pred) \n val_acc_list.append(acc)\n \n #print(\"Validation acc for epoch \", i, \" is:\\n\", acc) \n #print(\"Validation loss for epoch \", i, \" is:\\n\", val_loss)\n if(i>1 and val_loss <min(val_loss_list[:-1])):\n #update best weights\n counter = 0\n weight = []\n bias = []\n for k in range(0, len(config['layer_specs']), 2):\n weight.append(model.layers[k].w)\n bias.append(model.layers[k].b)\n best_weight = weight \n best_bias = bias\n best_epoch = i\n else:\n counter +=1\n \n if counter > threshold:\n print(\"best epoch:\", best_epoch)\n break\n\n# if(i>=6 and val_loss[i-1]>=val_loss[i-2] and val_loss[i-2]>=val_loss[i-3]and val_loss[i-3]>=val_loss[i-4]and val_loss[i-4]>=val_loss[i-5]and val_loss[i-5]>=val_loss[i-6]):\n# break\n \n print(len(best_weight))\n print('Epoch: ', i)\n #print(val_loss)\n p = 0\n for k in range(0, len(config['layer_specs']), 2):\n model.layers[k].w = best_weight[p]\n model.layers[k].b = best_bias[p]\n p = p + 1\n \n return train_loss_list, val_loss_list, train_acc_list, val_acc_list\n raise NotImplementedError(\"Train method not implemented\")", "def train(epochs, batch_size, lr, verbose):\n # autograd globally off\n torch.set_grad_enabled(False)\n # generate training and testing datasets\n train_data, train_label = generate_data()\n test_data, test_label = generate_data()\n # normalize data be centered at 0\n train_data, test_data = normalize(train_data, test_data)\n\n if verbose:\n print(\"--- Dataset ---\")\n print(\"Train X: \", train_data.size(), \" | Train y: \", train_label.size())\n print(\" Test X: \", test_data.size(), \" | Test y: \", test_label.size())\n\n layers =[]\n # input layer (2 input units)\n linear1 = Linear(2, 25, bias= True, weight_init=xavier_uniform)\n\n # 3 hidden layers (each 25 units)\n linear2 = Linear(25, 25, bias= True, weight_init=xavier_uniform)\n linear3 = Linear(25, 25, bias= True, weight_init=xavier_uniform)\n linear4 = Linear(25, 25, bias= True, weight_init=xavier_uniform)\n\n # output layer (2 output units)\n linear5 = Linear(25, 2, bias= True, weight_init=xavier_uniform)\n\n\n layers.append(linear1)\n layers.append(Relu())\n layers.append(linear2)\n layers.append(Relu())\n layers.append(linear3)\n layers.append(Relu())\n layers.append(linear4)\n layers.append(Tanh())\n layers.append(linear5)\n\n model = Sequential(layers)\n if verbose: print(\"Number of model parameters: {}\".format(sum([len(p) for p in model.param()])))\n\n criterion = MSE()\n optimizer = SGD(model, lr=lr)\n\n train_losses, test_losses = [], []\n train_accuracies, test_accuracies = [], []\n train_errors, test_errors = [], []\n\n if verbose: print(\"--- Training ---\")\n for epoch in range(1, epochs+1):\n if verbose:print(\"Epoch: {}\".format(epoch))\n\n # TRAINING\n for batch_idx in range(0, train_data.size(0), batch_size):\n # axis 0, start from batch_idx until batch_idx+batch_size\n output = model.forward(train_data.narrow(0, batch_idx, batch_size))\n\n # Calculate loss\n loss = criterion.forward(output, train_label.narrow(0, batch_idx, batch_size))\n train_losses.append(loss)\n if verbose: print(\"Train Loss: {:.2f}\".format(loss.item()))\n\n # put to zero weights and bias\n optimizer.zero_grad()\n\n ## Backpropagation\n # Calculate grad of loss\n loss_grad = criterion.backward()\n\n # Grad of the model\n model.backward(loss_grad)\n\n # Update parameters\n optimizer.step()\n\n train_prediction = model.forward(train_data)\n acc = accuracy(train_prediction, train_label)\n train_accuracies.append(acc)\n train_errors.append(1-acc)\n if verbose: print(\"Train Accuracy: {:.2f}\".format(acc.item()))\n\n # EVALUATION\n for batch_idx in range(0, test_data.size(0), batch_size):\n # axis 0, start from batch_idx until batch_idx+batch_size\n output = model.forward(test_data.narrow(0, batch_idx, batch_size))\n\n # Calculate loss\n loss = criterion.forward(output, test_label.narrow(0, batch_idx, batch_size))\n test_losses.append(loss)\n if verbose: print(\"Test Loss: {:.2f}\".format(loss.item()))\n\n test_prediction = model.forward(test_data)\n acc = accuracy(test_prediction, test_label)\n test_accuracies.append(acc) \n test_errors.append(1-acc)\n if verbose: print(\"Test Accuracy: {:.2f}\".format(acc.item()))\n\n return train_losses, test_losses, train_accuracies, test_accuracies, train_errors, test_errors", "def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=500,num_hidden_units_2=300,num_hidden_units_3=200,num_code_units=50):\n \n print(\"Making the model...\")\n network = model((None,200),200,num_hidden_units,num_hidden_units_2,num_hidden_units_3,num_code_units)\n print(\"Done!\")\n\n\n for tetrode_number in [10]:\n\n print(\"Loading the model parameters from {}\".format(MODEL_FILENAME+str(tetrode_number)))\n f = open(MODEL_FILENAME+str(tetrode_number),'r')\n all_param_values = pickle.load(f)\n f.close()\n # print(all_param_values)\n lasagne.layers.set_all_param_values(network, all_param_values)\n\n print(\"Loading the data...\")\n dataset = load_data(tetrode_number)\n print(\"Done!\")\n\n print(dataset['data'].shape)\n\n print(\"Setting up the training functions...\")\n training = funcs(dataset,network)\n print(\"Done!\")\n\n for i in range(NUM_EPOCHS):\n costs = []\n\n for start, end in zip(range(0, dataset['data'].shape[0], BATCH_SIZE), range(BATCH_SIZE, dataset['data'].shape[0], BATCH_SIZE)):\n cost = training['train'](dataset['data'][start:end],dataset['data'][start:end])\n costs.append(cost)\n\n meanTrainCost = np.mean(np.asarray(costs,dtype=np.float32))\n # accuracy = training['accuracy'](dataset['X_test'],dataset['y_test'])\n\n print(\"Epoch: {}, Training cost: {}\".format(i+1,meanTrainCost))\n # NUM_POINTS = 5000\n codes = training['code'](dataset['data'][0:NUM_POINTS])\n\n \n\n # y = set(list(d.predict(dataset['data'][0:NUM_POINTS])))\n\n # print(y)\n\n # activations_1 = training['activations_1'](dataset['data'][0:NUM_POINTS])\n # activations_2 = training['activations_2'](dataset['data'][0:NUM_POINTS])\n # codes = training['code'](dataset['data'][0:NUM_POINTS])\n # # print(codes.shape)\n # # codes_2d = bh_sne(codes)\n\n # for k in range(3):\n # print(k)\n\n # codes_2d = bh_sne(np.asarray(codes[:(k+1)*12000],dtype=np.float64))\n\n # # d = DPGMM(n_components=10, covariance_type='full')\n # d = DPGMM(n_components=15,n_iter=100)\n\n # d.fit(codes_2d[:(k+1)*12000])\n\n # hdp = d.predict_proba(codes_2d[:(k+1)*12000])\n\n # hdp_1d = [np.argmax(z) for z in hdp]\n\n # print(set(list(hdp_1d)))\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/hdp_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # m = TSNE(n_components=2, random_state=0)\n \n # # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # # activations_1_2d = bh_sne(activations_1)\n # # activations_2_2d = bh_sne(activations_2)\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS][:(k+1)*12000],alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # This is where the code for the video will go\n # ##############################################################################\n # # Compute DBSCAN\n # db = None\n # core_samples_mask = None\n # labels = None\n\n # num_labels = 0\n # eps=1.0\n # while(num_labels < 10):\n # db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n # core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n # core_samples_mask[db.core_sample_indices_] = True\n # labels = db.labels_\n # num_labels = np.amax(labels)\n # eps -= 0.1\n\n # print(\"Num learned labels: {}\".format(num_labels))\n\n # plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS][:(k+1)*12000],lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # # pickle.dump(labels, f)\n # # f.close()\n\n codes_2d = bh_sne(np.asarray(codes,dtype=np.float64),theta=0.4)\n\n # d = DPGMM(n_components=10, covariance_type='full')\n d = DPGMM(n_components=15,n_iter=1000)\n\n d.fit(codes_2d)\n\n hdp = d.predict_proba(codes_2d)\n\n hdp_1d = [np.argmax(z) for z in hdp]\n\n print(set(list(hdp_1d)))\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/hdp_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # m = TSNE(n_components=2, random_state=0)\n \n # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # activations_1_2d = bh_sne(activations_1)\n # activations_2_2d = bh_sne(activations_2)\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS],alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # This is where the code for the video will go\n ##############################################################################\n # Compute DBSCAN\n db = None\n core_samples_mask = None\n labels = None\n\n num_labels = 0\n eps=1.0\n while(num_labels < 10):\n db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n num_labels = np.amax(labels)\n eps -= 0.1\n\n print(\"Num learned labels: {}\".format(num_labels))\n\n plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS],lw=0)\n plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # pickle.dump(labels, f)\n # f.close()", "def train(self, inputs, targets, eta, niterations):\n ndata = np.shape(inputs)[0] # number of data samples\n # adding the bias\n inputs = np.concatenate((inputs, -np.ones((ndata, 1))), axis=1)\n\n # numpy array to store the update weights\n updatew1 = np.zeros((np.shape(self.weights1)))\n updatew2 = np.zeros((np.shape(self.weights2)))\n updatew3 = np.zeros((np.shape(self.weights3)))\n\n self.Errors = []\n for n in range(niterations):\n\n #############################################################################\n # TODO: implement the training phase of one iteration which consists of two phases:\n # the forward phase and the backward phase. you will implement the forward phase in \n # the self.forwardPass method and return the outputs to self.outputs. Then compute \n # the error (hints: similar to what we did in the lab). Next is to implement the \n # backward phase where you will compute the derivative of the layers and update \n # their weights. \n #############################################################################\n\n # forward phase \n self.outputs = self.forwardPass(inputs)\n\n # Error using the sum-of-squares error function\n error = 0.5 * np.sum((self.outputs - targets) ** 2)\n\n if np.mod(n, 100) == 0:\n self.Errors.append(error)\n print(\"Iteration: \", n, \" Error: \", error)\n\n # backward phase \n # Compute the derivative of the output layer. NOTE: you will need to compute the derivative of \n # the softmax function. Hints: equation 4.55 in the book. \n # deltao = (self.outputs - targets) * (self.outputs - self.outputs ** 2)\n deltao = (self.outputs - targets) * self.outputs * (1 - self.outputs)\n\n # compute the derivative of the second hidden layer\n\n deltah2 = self.beta * self.hidden2 * (1.0 - self.hidden2) * (np.dot(deltao, np.transpose(self.weights3)))\n\n\n # compute the derivative of the first hidden layer\n deltah1 = self.beta * self.hidden1 * (1.0 - self.hidden1) * (np.dot(deltah2[:, :-1], np.transpose(self.weights2)))\n\n # update the weights of the three layers: self.weights1, self.weights2 and self.weights3\n # here you can update the weights as we did in the week 4 lab (using gradient descent) \n # but you can also add the momentum\n\n updatew1 = eta * np.dot(np.transpose(inputs), deltah1[:, :-1]) + self.momentum * updatew1\n updatew2 = eta * np.dot(np.transpose(self.hidden1), deltah2[:, :-1]) + self.momentum * updatew2\n updatew3 = eta * np.dot(np.transpose(self.hidden2), deltao) + self.momentum * updatew3\n\n self.weights1 -= updatew1\n self.weights2 -= updatew2\n self.weights3 -= updatew3\n\n #############################################################################\n # END of YOUR CODE \n #############################################################################", "def trainer(model, X_train, y_train, X_valid, y_valid, config):\n # loop for number of epochs\n # shuffle inputs based off seed\n # need to shuffle validation based off same seed\n # forward prop and get xenloss\n # backprop and update weights\n\n stop_count = config['early_stop_epoch']\n b_size = config[\"batch_size\"]\n stop = config['early_stop']\n\n xnloss = []\n val_loss = [float('inf')]\n test_scores = []\n\n train_accu = []\n valid_accu = []\n\n\n #validation loss increase per epoch counter\n c = -1\n \n for i in range(config[\"epochs\"]):\n np.random.seed(i)\n np.random.shuffle(X_train)\n\n np.random.seed(i)\n np.random.shuffle(y_train)\n\n '''You should average the loss across all mini batches'''\n #means sum up loss from all mini-batches and divide by num_batches\n sums = 0\n\n num_batches = int(X_train.shape[0] / b_size)\n k=0\n for j in range(num_batches):\n # choose minibatch\n x = X_train[j * b_size: (j+1) * b_size]\n targets = y_train[j * b_size: (j+1) * b_size]\n loss, y_pred = model.forward_pass(x, targets)\n loss = loss / (config['batch_size'] * 10) # 10 classes\n sums += loss\n #xnloss.append(loss)\n model.backward_pass()\n k +=1\n # if k < 5 or k > 44:\n # print(targets[0, :])\n # print(y_pred[0, :])\n # print(y_pred[0, :].sum())\n # print(k, '=============')\n\n # mini-batch done here, take avg of loss\n avg_loss = sums / num_batches\n xnloss.append(avg_loss)\n \n ''' epochs loop continues here\n 0) perform validation and compute its (val) loss\n\n 1) calculate test accuracy for every epoch where the\n validation loss is better than the previous validation loss.\n \n 2) Save this result (test score OR loss?) and choose the best \n one when you hit the early stopping criteria.\n\n 3) early stopping - stop training (epochs loop) after 5th consecutive \n increase in validation loss. (Experiment with diff values).\n '''\n\n '''VALIDATION PERFORMACE'''\n v_loss, v_pred = model.forward_pass(X_valid, y_valid)\n v_loss_norm = v_loss / (len(X_valid) * 10)\n\n\n '''TEST ACCURACY''' \n #if val loss better (less) than prev: calculate test scores\n \n if v_loss_norm > val_loss[-1]:\n print(\"val loss going up from last time at epoch i=\", i)\n c += 1\n else:\n c = 0\n '''insert code for test accu here'''\n # val_loss.append(v_loss_norm)\n # else: #else val loss increased, so increment counter\n \n val_loss.append(v_loss_norm)\n \n '''EARLY STOPPING'''\n if stop and c == stop_count:\n print(\"early stopped at epoch =\", i+1)\n break\n\n print(val_loss[1:3])\n print(val_loss, len(xnloss), len(val_loss[1:]))\n #outside of epochs loop\n plt.plot(xnloss, label='training loss')\n plt.plot(val_loss[1:], label='validation loss')\n plt.title(\"losses across all epochs\")\n plt.xlabel(\"epochs\")\n plt.ylabel(\"avg loss for the epoch\")\n plt.legend()\n plt.savefig('raised_a.png')\n plt.show()\n #firstplot.png is training loss against # of batches, in 1 epoch\n #avgacrossepochs.png is avg training loss of all batches, across 50 epochs\n # both_losses = []\n \n # for i in range(len(xnloss)):\n # both_losses.append((val_loss[i], xnloss[i]))\n # print(\"validation errors: \", [(val_loss[i], xnloss[i]) for i in range(len(xnloss))])", "def trainer(model, X_train, y_train, X_valid, y_valid, config):\r\n\r\n batch_size = config['batch_size']\r\n alpha = config['learning_rate']\r\n penalty = config['L2_penalty']\r\n momentum = config['momentum_gamma']\r\n\r\n num_train = X_train.shape[0]\r\n num_batches = int(np.ceil(num_train / config['batch_size']))\r\n\r\n train_accu = []\r\n valid_accu = []\r\n\r\n valid_losses = []\r\n train_losses = []\r\n\r\n check_early = []\r\n best_test_accu = []\r\n\r\n for n in range(config['epochs']):\r\n print('Current Epoch is ' + str(n + 1))\r\n\r\n if (n + 1) % 10 == 0:\r\n print(max(best_test_accu))\r\n\r\n for i in range(num_batches):\r\n\r\n # Get train and target sets for the batch training\r\n batch_train_X = X_train[i * batch_size: (i + 1) * batch_size]\r\n batch_train_y = y_train[i * batch_size: (i + 1) * batch_size]\r\n\r\n # Forward Pass\r\n train_loss, train_pred = model.forward_pass(batch_train_X, batch_train_y)\r\n\r\n # Backward Pass\r\n model.backward_pass()\r\n\r\n # Update weights and bias\r\n for layer in model.layers:\r\n if isinstance(layer, Layer):\r\n # With momentum\r\n if config['momentum']:\r\n if not hasattr(layer, 'prev_w'):\r\n layer.prev_w = np.zeros_like(layer.w)\r\n\r\n # Weights and bias update\r\n d_w = layer.d_w * alpha + momentum * layer.prev_w\r\n layer.w = layer.w * (1 - alpha * penalty / batch_size) + d_w\r\n layer.prev_w = d_w\r\n layer.b = layer.d_b * alpha\r\n # Without momentum\r\n else:\r\n layer.w = layer.w * (1 - alpha * penalty / batch_size) + layer.d_w * alpha\r\n\r\n # Calculate validation loss and accuracy\r\n valid_loss, valid_pred = model.forward_pass(X_valid, y_valid)\r\n\r\n train_accu.append(test(model, X_train, y_train, config))\r\n valid_accu.append(test(model, X_valid, y_valid, config))\r\n\r\n valid_losses.append(valid_loss)\r\n train_losses.append(train_loss)\r\n best_test_accu.append(test(model, X_test, y_test, config))\r\n\r\n if config[\"early_stop\"]:\r\n if not check_early:\r\n check_early.append(valid_loss)\r\n else:\r\n if len(check_early) == config['early_stop_epoch']:\r\n break\r\n elif valid_loss >= check_early[-1]:\r\n check_early.append(valid_loss)\r\n else:\r\n check_early = []\r\n\r\n print('The best test accuracy is ' + str(max(best_test_accu) * 100) + '%.')\r\n print('The best epoch is epoch ' + str(best_test_accu.index(max(best_test_accu))) + '.')\r\n plot_train_vad_accu_and_losses('2c', train_accu, valid_accu, train_losses, valid_losses, config) # 2c)\r\n # plot_train_vad_accu_and_losses('2d', train_accu, valid_accu, train_losses, valid_losses, config) # 2d)\r\n # plot_train_vad_accu_and_losses('2ea', train_accu, valid_accu, train_losses, valid_losses, config) # 2e)a\r\n # plot_train_vad_accu_and_losses('2eb', train_accu, valid_accu, train_losses, valid_losses, config) # 2e)b\r\n # plot_train_vad_accu_and_losses('2fa', train_accu, valid_accu, train_losses, valid_losses, config) # 2f)a\r\n # plot_train_vad_accu_and_losses('2fb', train_accu, valid_accu, train_losses, valid_losses, config) # 2f)b\r\n # plot_train_vad_accu_and_losses('2fc', train_accu, valid_accu, train_losses, valid_losses, config) # 2f)c\r", "def train(self):\n\t\tprint(\"Training...\")\n\t\tprev_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\tfor i in range(self.max_iter):\n\t\t\t# gradient descent\n\t\t\tdw0, dw = self.compute_grad(self.w0, self.w, 'train')\n\t\t\tself.w0 -= self.step_size * dw0\n\t\t\tself.w = [wj-self.step_size*dwj for wj, dwj in zip(self.w, dw)]\n\t\t\tcurr_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\t\tif i%(self.max_iter/10)==0:\n\t\t\t\tprint('iteration: {}, loss: {}'.format(i, curr_loss))\n\t\t\tif abs(curr_loss-prev_loss) < self.tolerance:\n\t\t\t\tprint('# of iterations:',i)\n\t\t\t\tbreak\n\t\tself.trained = True\n\t\tprint('Mean log loss of TRAIN data:', curr_loss)", "def train():\n if os.path.isfile(load_model):\n all_weights = np.load(load_model) \n else:\n print(\"Model file does not exist. Exiting....\")\n return\n\n print(\"Build up the network\")\n\n\n # Two different types of input\n image_input_var = T.tensor4('original_inputs')\n rotated_image_input_var = T.tensor4('rotated_image_input')\n target_var = T.ivector('targets')\n\n # Build teacher network\n cnn_model, cnn_mid_output, weight_decay_penalty = cifar10_merge.build_cnn(image_input_var)\n\n # Get the intermediate layer of the teacher network\n original_model_mid_output = lasagne.layers.get_output(cnn_mid_output, image_input_var, deterministic = True)\n\n # Get the softmax output of the teacher network.\n\n original_model_output_val = lasagne.layers.get_output(cnn_model, image_input_var, deterministic = True)\n \n # Build the student network\n \n rotated_cnn_model, rotated_model_mid, rotated_weight_penalty = \\\n cifar10_merge.build_cnn(rotated_image_input_var)\n \n # Get the softmax output of the student network. Since it need to be trained on, deterministic = False\n rotated_model_mid_output = lasagne.layers.get_output(rotated_model_mid, rotated_image_input_var, deterministic = False)\n\n # Get the model output of the studenet network.\n rotated_model_output = lasagne.layers.get_output(rotated_cnn_model, rotated_image_input_var, deterministic = True)\n\n # Set the weights for the teacher network\n lasagne.layers.set_all_param_values(cnn_model, all_weights)\n\n # Get the initialized weights below the intermediate layer\n rotated_net_weights_below_mid = lasagne.layers.get_all_param_values(rotated_model_mid)\n\n # Get the parameter of the student network that needs to be trained.\n rotated_net_training_param = lasagne.layers.get_all_params(rotated_model_mid, trainable=True)\n\n # Set the weights for the student network\n lasagne.layers.set_all_param_values(rotated_cnn_model, all_weights)\n\n lasagne.layers.set_all_param_values(rotated_model_mid,\n rotated_net_weights_below_mid)\n \n # cross_entropy_loss = lasagne.objectives.categorical_crossentropy(rotated_model_mid_output, target_var)\n\n # cross_entropy_loss_mean = cross_entropy_loss.mean()\n\n # L = T.mean(lasagne.objectives.squared_error(original_model_mid_output, rotated_model_mid_output), axis = 1)\n L = lasagne.objectives.squared_error(original_model_mid_output, rotated_model_mid_output).mean()\n # cost = T.mean(L)\n\n # cost = cross_entropy_loss_mean\n cost = L\n\n # updates = lasagne.updates.adagrad(cost, rotated_net_training_param, learning_rate=0.1)\n updates = lasagne.updates.adam(cost, rotated_net_training_param, learning_rate=0.001)\n\n # cross_entropy_loss = lasagne.objectives.categorical_crossentropy(model_output, target_var)\n\n # cross_entropy_loss_mean = cross_entropy_loss.mean()\n\n # loss = cross_entropy_loss_mean + weight_decay_penalty\n\n\n train_acc = T.mean(T.eq(T.argmax(rotated_model_output, axis = 1), target_var),\n dtype=theano.config.floatX)\n\n original_model_acc = T.mean(T.eq(T.argmax(original_model_output_val, axis = 1), target_var),\n dtype=theano.config.floatX)\n\n train_fn = theano.function(inputs = [image_input_var, rotated_image_input_var, target_var],\n outputs = [original_model_mid_output, rotated_model_mid_output, train_acc], updates = updates)\n\n # Return the accuracy for teacher network and student network, respectively\n val_fn = theano.function(inputs = [image_input_var, rotated_image_input_var, target_var],\n outputs = [original_model_acc, train_acc])\n\n if os.path.isfile(os.path.join(train_dir, 'latest_model.txt')):\n weight_file = \"\"\n with open(os.path.join(train_dir, 'latest_model.txt'), 'r') as checkpoint_file:\n weight_file = checkpoint_file.read().replace('\\n', '')\n print(\"Loading from: \", weight_file)\n model_weights = np.load(weight_file)\n lasagne.layers.set_all_param_values(rotated_cnn_model, model_weights)\n\n # Get images and labels for CIFAR-10.\n\n cifar10_data = cifar10_merge_input.load_cifar10()\n\n bkgimg = np.array([np.mean(cifar10_data.train.images[cifar10_data.train.labels==i], axis = 0) for i in range(10)])\n for epoch in xrange(max_steps):\n start_time = time.time()\n\n original_test_image, rotated_test_image, test_label = cifar10_data.test.next_eval_batch(batch_size)\n total_t_net_for_original = 0\n total_s_net_for_original = 0\n total_t_net_for_rotation = 0\n total_s_net_for_rotation = 0\n total_count = 0\n\n print(\"Start Evaluating\")\n\n while(rotated_test_image is not None):\n t_net_for_original, s_net_for_original = val_fn(original_test_image, original_test_image, test_label)\n total_t_net_for_original += t_net_for_original * original_test_image.shape[0]\n total_s_net_for_original += s_net_for_original * original_test_image.shape[0]\n\n t_net_for_rotated, s_net_for_rotated = val_fn(rotated_test_image, rotated_test_image, test_label)\n total_t_net_for_rotation += t_net_for_rotated * rotated_test_image.shape[0]\n total_s_net_for_rotation += s_net_for_rotated * rotated_test_image.shape[0]\n\n total_count += rotated_test_image.shape[0]\n original_test_image, rotated_test_image, test_label = cifar10_data.test.next_eval_batch(batch_size)\n \n print(\"Student Network Accuracy on Original Image: %.4f\" % (float(total_s_net_for_original / total_count)))\n print(\"Teacher Network Accuracy on Original Image: %.4f\" % (float(total_t_net_for_original / total_count)))\n\n print(\"Student Network Accuracy on Rotated Image: %.4f\" % (float(total_s_net_for_rotation / total_count)))\n print(\"Teacher Network Accuracy on Rotated Image: %.4f\" % (float(total_t_net_for_rotation / total_count)))\n\n\n print(\"Start Training...\")\n original_train_image, rotated_train_image, train_label, start = cifar10_data.train.next_batch(batch_size)\n original_train_image = generate(original_train_image, train_label, bkgimg, 16, 32, None, batch_size)\n # rotated_train_image = random_rotated_image(original_train_image[::-1])\n rotated_train_image = random_rotated_image(original_train_image)\n\n end_time_1 = time.time() - start_time\n step = 1\n loss_total = 0\n original_start = start\n\n while(start != 0):\n #loss_value, train_acc = train_fn(original_train_image, rotated_train_image, train_label)\n \n ori_mid, rot_mid, train_acc = train_fn(original_train_image, rotated_train_image, train_label)\n # ori_mid, rot_mid, train_acc = train_fn(original_train_image, np.array(np.random.rand(batch_size, 3, 32, 32), dtype = np.float32), train_label)\n step += 1\n if start == original_start:\n print(ori_mid[0])\n print(rot_mid[0])\n print(train_label)\n \n original_train_image, rotated_train_image, train_label, start = cifar10_data.train.next_batch(batch_size)\n original_train_image = generate(original_train_image, train_label, bkgimg, 16, 32, None, batch_size)\n rotated_train_image = random_rotated_image(original_train_image)\n # assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n # loss_total += loss_value\n if 1:\n if epoch % 100 == 0 or (step + 1) == max_steps:\n checkpoint_path = os.path.join(train_dir, 'model_step%d.npy' % epoch)\n weightsOfParams = lasagne.layers.get_all_param_values(rotated_cnn_model)\n np.save(checkpoint_path, weightsOfParams)\n latest_model_path = os.path.join(train_dir, 'latest_model.txt')\n try:\n os.remove(latest_model_path)\n except OSError:\n pass\n latest_model_file = open(latest_model_path, \"w\")\n latest_model_file.write(checkpoint_path)\n latest_model_file.close()\n\n # print(\"Epoch Stop, loss_averge\", float(loss_total) / float(step))\n duration = time.time() - start_time\n print(\"Duration is\", duration)", "def train():\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n # Get negative slope parameter for LeakyReLU\n neg_slope = FLAGS.neg_slope\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n import matplotlib.pyplot as plt\n\n data = cifar10_utils.get_cifar10(FLAGS.data_dir)\n train = data['train']\n test = data['test']\n dim_x = train.images.shape[1]*train.images.shape[2]*train.images.shape[3]\n\n mlp = MLP(dim_x, dnn_hidden_units, train.labels.shape[1], neg_slope)\n loss_module = CrossEntropyModule()\n\n loss_train = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n loss_test = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n accuracy_test = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n\n images_test = test.images\n labels_test = test.labels\n images_test = np.reshape(images_test, (images_test.shape[0], dim_x))\n\n for i in range(0, FLAGS.max_steps):\n if PRINTS:\n print('iter', i+1, end='\\r')\n images, labels = train.next_batch(FLAGS.batch_size) \n images = np.reshape(images, (images.shape[0], dim_x))\n\n pred = mlp.forward(images)\n loss = loss_module.forward(pred, labels)\n loss_grad = loss_module.backward(pred, labels)\n mlp.backward(loss_grad)\n\n for module in reversed(mlp.modules):\n if isinstance(module, LinearModule):\n module.params['weight'] -= 1/FLAGS.batch_size*FLAGS.learning_rate*module.grads['weight']\n module.params['bias'] -= 1/FLAGS.batch_size*FLAGS.learning_rate*module.grads['bias']\n if (i+1) % FLAGS.eval_freq == 0:\n pred_test = mlp.forward(images_test)\n loss_train[i // FLAGS.eval_freq] = loss\n accuracy_test[i // FLAGS.eval_freq] = accuracy(pred_test, labels_test)\n loss_test[i // FLAGS.eval_freq] = loss_module.forward(pred_test, labels_test)\n if PRINTS:\n print()\n print('test_loss:', loss_test[i // FLAGS.eval_freq])\n print('test_accuracy:', accuracy_test[i // FLAGS.eval_freq])\n print('train_loss:', loss_train[i // FLAGS.eval_freq])\n\n if PLOTS:\n fig, ax = plt.subplots(1, 2, figsize=(10,5))\n fig.suptitle('Training curves for Numpy MLP\\nFinal test accuracy: {:0.4f}, default configuration'.format(accuracy_test[i // FLAGS.eval_freq]))\n\n ax[0].set_title('Loss')\n ax[0].set_ylabel('Loss value')\n ax[0].set_xlabel('No of batches seen x{}'.format(FLAGS.eval_freq))\n ax[0].plot(loss_train, label='Train')\n ax[0].plot(loss_test, label='Test')\n ax[0].legend()\n\n ax[1].set_title('Accuracy')\n ax[1].set_ylabel('Accuracy value')\n ax[1].set_xlabel('No of batches seen x{}'.format(FLAGS.eval_freq))\n ax[1].plot(accuracy_test, label='Test')\n ax[1].legend()\n plt.show()\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def train(self): \n self.current_step = 0\n self.log = log_setup(self.args)\n self.current_gamma = self.args.initial_gamma\n with tf.Session(graph = self.computation_graph) as session:\n self.init.run()\n print(\"Model Initialized.\")\n for repetition in range(0, self.args.epochs):\n\n random.shuffle(self.nodes)\n self.optimization_time = 0 \n self.average_loss = 0\n\n epoch_printer(repetition)\n for i in tqdm(range(int(len(self.edges)/self.args.batch_size))):\n self.current_step = self.current_step + 1\n self.current_gamma = gamma_incrementer(self.current_step, self.args.initial_gamma, self.current_gamma, self.true_step_size)\n feed_dict = self.feed_dict_generator(self.edges[i*self.args.batch_size:(i+1)*self.args.batch_size], self.current_step, self.current_gamma)\n start = time.time()\n _, loss = session.run([self.train_op , self.loss], feed_dict=feed_dict)\n end = time.time()\n self.optimization_time = self.optimization_time + (end-start)\n self.average_loss = self.average_loss + loss\n\n print(\"\")\n self.average_loss = self.average_loss/self.vocab_size\n self.final_embeddings = self.factorization_layer.embedding_matrix.eval()\n if \"CODE\" in self.args.model: \n self.c_means = self.cluster_layer.cluster_means.eval()\n self.modularity_score, assignments = neural_modularity_calculator(self.graph, self.final_embeddings, self.c_means)\n else:\n self.modularity_score, assignments = classical_modularity_calculator(self.graph, self.final_embeddings, self.args)\n self.log = log_updater(self.log, repetition, self.average_loss, self.optimization_time, self.modularity_score)\n tab_printer(self.log)\n if \"CODE\" in self.args.model: \n initiate_dump_grafcode(self.log, assignments, self.args, self.final_embeddings, self.c_means)\n else:\n initiate_dump_graf(self.log, assignments, self.args, self.final_embeddings)", "def train(self, trainingData, trainingLabels, validationData, validationLabels ):\n\n self.setWeights(trainingData.shape[1])\n # DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR\n # THE AUTOGRADER WILL LIKELY DEDUCT POINTS.\n \n # Hyper-parameters. Your can reset them. Default batchSize = 100, weight_decay = 1e-3, learningRate = 1e-2\n \"*** YOU CODE HERE ***\"\n self.batchSize = 100\n self.weight_decay = 1e-3\n self.learningRate = 0.1\n\n def Softmax(x):\n x_max = np.max(x, axis=0)\n x_exp = np.exp(x - x_max)\n x_exp_sum = np.sum(x_exp, axis=0)\n return x_exp / x_exp_sum\n\n for iteration in range(self.max_iterations):\n if iteration % 10 == 0:\n print(\"Starting iteration \", iteration, \"...\")\n self.learningRate *= 0.9\n dataBatches = self.prepareDataBatches(trainingData, trainingLabels)\n for batchData, batchLabel in dataBatches:\n \"*** YOUR CODE HERE ***\"\n Y = np.zeros((len(self.legalLabels), self.batchSize))\n for i in range(self.batchSize):\n Y[batchLabel[i]][i] = 1\n Y_pred = Softmax((batchData @ self.weights + self.bias).T)\n d_weight = ((Y_pred - Y) @ batchData / batchData.shape[0]).T + self.weight_decay * sum(self.weights)\n d_bias = np.mean(Y_pred - Y, axis=1) + self.weight_decay * sum(self.bias)\n self.weights -= d_weight * self.learningRate\n self.bias -= d_bias * self.learningRate", "def train(train_loader, model, criterion, optimizer, lr_schedule, epoch,Lambda,layerID):\r\n global total_steps, exp_flops, exp_l0, args, writer\r\n losses = AverageMeter()\r\n top1 = AverageMeter()\r\n model.eval()\r\n lr_schedule.step(epoch=epoch)\r\n for i, (input_, target) in enumerate(train_loader):\r\n total_steps += 1\r\n if torch.cuda.is_available():\r\n target = target.cuda()\r\n input_ = input_.cuda()\r\n input_var = torch.autograd.Variable(input_)\r\n target_var = torch.autograd.Variable(target)\r\n # compute output\r\n output = model(input_var)\r\n totalloss,loss, reg = criterion(model,output, target_var,layerID,Lambda)\r\n prec1 = accuracy(output.data, target, topk=(1,))[0]\r\n losses.update(totalloss.data, input_.size(0))\r\n top1.update(100 - prec1, input_.size(0))\r\n ## Adjust LR\r\n oldloss = totalloss\r\n if oldloss-totalloss > 1.0:\r\n optimizer.defaults['lr'] = optimizer.defaults['lr']*1\r\n # compute gradient and do SGD step\r\n optimizer.zero_grad()\r\n totalloss.backward()\r\n optimizer.step()\r\n # clamp the parameters\r\n layers = model.layers if not args.multi_gpu else model.module.layers\r\n for k, layer in enumerate(layers):\r\n if not isinstance(layer,nn.Linear):\r\n layer.constrain_parameters()\r\n TotalDataScale = len(train_loader.dataset)\r\n # input()\r\n IMPORTANCE = model.layers[layerID].qz_loga/(1+model.layers[layerID].qz_loga)\r\n MAX = torch.max(IMPORTANCE)\r\n MIN = torch.min(IMPORTANCE)\r\n if i == 0:\r\n Log = ('\\nEpoch:[{0}][{1}/{2}], '\r\n 'Loss:{loss:.4f}, '\r\n 'Reg:{reg:.4f}, '\r\n 'Max Importance:{max:.4f}, ''Min Importance:{min:.4f}, '\r\n 'Lr:{lr:.4f}'.format(\r\n epoch, i, TotalDataScale,reg=reg, loss=loss, top1=top1,max=MAX,min=MIN,lr=optimizer.defaults['lr']))\r\n else:\r\n Log = ('\\rEpoch:[{0}][{1}/{2}], '\r\n 'Loss:{loss:.4f}, '\r\n 'Reg:{reg:.4f}, '\r\n 'Max Importance:{max:.4f}, ''Min Importance:{min:.4f}, '\r\n 'Lr:{lr:.4f}'.format(\r\n epoch, i, len(train_loader), reg=reg, loss=loss, top1=top1, max=MAX, min=MIN,\r\n lr=optimizer.defaults['lr']))\r\n sys.stdout.write(Log)\r\n\r\n return top1.avg", "def training(self):\n \n best_valid_loss = np.inf\n c = 0\n \n self.train_loader, self.test_loader = self.get_train_test_loaders()\n \n print('Training the {} model with the following architecture:'.format(self.model_name))\n print(summary(self.model, (3, self.image_width, self.image_height)))\n print('*'*100)\n print('Starting the training...')\n print('*'*100)\n \n # Create the model save dir if it already doesn't exist\n if not os.path.exists(self.model_save_dir):\n os.makedirs(self.model_save_dir)\n \n for epoch in range(self.n_epochs):\n\n print(f'Epoch: {epoch+1:02}')\n\n start_time = time.time()\n\n train_loss = self.train(self.train_loader)\n valid_loss = self.evaluate(self.test_loader)\n\n epoch_mins, epoch_secs = self.epoch_time(start_time, time.time())\n\n c+=1\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(self.model.state_dict(), os.path.join(self.model_save_dir, '{}_trained.pt'.format(self.model_name)))\n c=0\n\n if c>4:\n #decrease lr if loss does not decrease after 5 steps\n self.scheduler.step()\n c=0\n\n print(f'Time: {epoch_mins}m {epoch_secs}s') \n print(f'Train Loss: {train_loss:.3f}')\n print(f'Val Loss: {valid_loss:.3f}')\n print('-'*60)\n print('The best validation loss is', best_valid_loss)\n print('*'*100)", "def train_model():\n\n if python_version == 2 :\n if num_hidden is None:\n num_hidden = int(raw_input('Enter number of hidden layers: '))\n if num_neuron is None:\n num_neuron = int(raw_input('Enter number of neurons in each hidden layer: '))\n else:\n if num_hidden is None:\n num_hidden = int(input('Enter number of hidden layers: '))\n if num_neuron is None:\n num_neuron = int(input('Enter number of neurons in each hidden layer: '))\n\n print('Activations are LeakyReLU. Optimizer is ADAM. Batch sizei is 32.' + \\\n 'Fully connected network without dropout.')\n\n # Construct model\n model = Sequential()\n\n # Add input layer.\n # MNIST dataset: each image is a 28x28 pixel square (784 pixels total).\n model.add(Flatten(input_shape=(1, 28, 28)))\n\n # Add hidden layers.\n for _ in range(num_hidden):\n model.add(Dense(num_neuron, use_bias=False))\n model.add(LeakyReLU(alpha=.01))\n\n # Add output layer\n model.add(Dense(10, activation='softmax', use_bias=False))\n\n # Compile the model\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n # Print information about the model\n print(model.summary())\n\n X_train, Y_train, X_test, Y_test = load_data()\n X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train,\n test_size=1/6.0,\n random_state=seed)\n\n # Fit the model\n model.fit(X_train, Y_train, batch_size=32, epochs=10, verbose=1)\n\n print(\"Save the model\")\n model_name = __save_trained_model(model, num_hidden, num_neuron)\n\n print(\"Training done\")\n\n return model_name, model", "def train(self):\n\n # Set the pretrain log\n trlog = {}\n trlog['args'] = vars(self.args)\n trlog['train_loss'] = []\n trlog['val_loss'] = []\n trlog['train_acc'] = []\n trlog['val_acc'] = []\n trlog['train_iou']=[]\n trlog['val_iou']=[]\n trlog['max_iou'] = 0.0\n trlog['max_iou_epoch'] = 0\n\n # Set the timer\n timer = Timer()\n # Set global count to zero\n global_count = 0\n # Set tensorboardX\n writer = SummaryWriter(comment=self.args.save_path)\n\n # Start pretrain\n for epoch in range(1, self.args.pre_max_epoch + 1):\n # Update learning rate\n self.lr_scheduler.step()\n # Set the model to train mode\n self.model.train()\n self.model.mode = 'train'\n # Set averager classes to record training losses and accuracies\n train_loss_averager = Averager()\n train_acc_averager = Averager()\n train_iou_averager = Averager()\n\n # Using tqdm to read samples from train loader\n tqdm_gen = tqdm.tqdm(self.train_loader)\n\n for i, batch in enumerate(tqdm_gen, 1):\n # Update global count number \n global_count = global_count + 1\n if torch.cuda.is_available():\n data, label = [_.cuda() for _ in batch]\n else:\n data = batch[0]\n label = batch[1]\n\n # Output logits for model\n logits = self.model(data)\n # Calculate train loss\n # CD loss is modified in the whole project to incorporate ony Cross Entropy loss. Modify as per requirement.\n #loss = self.FL(logits, label) + self.CD(logits,label) + self.LS(logits,label)\n loss = self.CD(logits,label) \n \n # Calculate train accuracy\n self._reset_metrics()\n seg_metrics = eval_metrics(logits, label, self.args.num_classes)\n self._update_seg_metrics(*seg_metrics)\n pixAcc, mIoU, _ = self._get_seg_metrics(self.args.num_classes).values()\n\n # Add loss and accuracy for the averagers\n train_loss_averager.add(loss.item())\n train_acc_averager.add(pixAcc)\n train_iou_averager.add(mIoU)\n\n # Print loss and accuracy till this step\n tqdm_gen.set_description('Epoch {}, Loss={:.4f} Acc={:.4f} IOU={:.4f}'.format(epoch, train_loss_averager.item(),train_acc_averager.item()*100.0,train_iou_averager.item()))\n \n # Loss backwards and optimizer updates\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Update the averagers\n train_loss_averager = train_loss_averager.item()\n train_acc_averager = train_acc_averager.item()\n train_iou_averager = train_iou_averager.item()\n\n writer.add_scalar('data/train_loss(Pre)', float(train_loss_averager), epoch)\n writer.add_scalar('data/train_acc(Pre)', float(train_acc_averager)*100.0, epoch) \n writer.add_scalar('data/train_iou (Pre)', float(train_iou_averager), epoch)\n \n print('Epoch {}, Train: Loss={:.4f}, Acc={:.4f}, IoU={:.4f}'.format(epoch, train_loss_averager, train_acc_averager*100.0,train_iou_averager)) \n \n # Start validation for this epoch, set model to eval mode\n self.model.eval()\n self.model.mode = 'val'\n\n # Set averager classes to record validation losses and accuracies\n val_loss_averager = Averager()\n val_acc_averager = Averager()\n val_iou_averager = Averager()\n\n # Print previous information \n if epoch % 1 == 0:\n print('Best Val Epoch {}, Best Val IoU={:.4f}'.format(trlog['max_iou_epoch'], trlog['max_iou']))\n\n # Run validation\n for i, batch in enumerate(self.val_loader, 1):\n if torch.cuda.is_available():\n data, labels,_ = [_.cuda() for _ in batch]\n else:\n data = batch[0]\n label=labels[0]\n p = self.args.way*self.args.shot\n data_shot, data_query = data[:p], data[p:]\n label_shot,label=labels[:p],labels[p:]\n \n par=data_shot, label_shot, data_query\n logits = self.model(par)\n # Calculate preval loss\n \n #loss = self.FL(logits, label) + self.CD(logits,label) + self.LS(logits,label)\n loss = self.CD(logits,label) \n \n # Calculate val accuracy\n self._reset_metrics()\n seg_metrics = eval_metrics(logits, label, self.args.way)\n self._update_seg_metrics(*seg_metrics)\n pixAcc, mIoU, _ = self._get_seg_metrics(self.args.way).values()\n\n val_loss_averager.add(loss.item())\n val_acc_averager.add(pixAcc)\n val_iou_averager.add(mIoU) \n\n # Update validation averagers\n val_loss_averager = val_loss_averager.item()\n val_acc_averager = val_acc_averager.item()\n val_iou_averager = val_iou_averager.item()\n \n writer.add_scalar('data/val_loss(Pre)', float(val_loss_averager), epoch)\n writer.add_scalar('data/val_acc(Pre)', float(val_acc_averager)*100.0, epoch) \n writer.add_scalar('data/val_iou (Pre)', float(val_iou_averager), epoch) \n \n # Print loss and accuracy for this epoch\n print('Epoch {}, Val: Loss={:.4f} Acc={:.4f} IoU={:.4f}'.format(epoch, val_loss_averager, val_acc_averager*100.0,val_iou_averager))\n\n # Update best saved model\n if val_iou_averager > trlog['max_iou']:\n trlog['max_iou'] = val_iou_averager\n trlog['max_iou_epoch'] = epoch\n print(\"model saved in max_iou\")\n self.save_model('max_iou')\n\n # Save model every 10 epochs\n if epoch % 10 == 0:\n self.save_model('epoch'+str(epoch))\n\n # Update the logs\n trlog['train_loss'].append(train_loss_averager)\n trlog['train_acc'].append(train_acc_averager)\n trlog['val_loss'].append(val_loss_averager)\n trlog['val_acc'].append(val_acc_averager)\n trlog['train_iou'].append(train_iou_averager)\n trlog['val_iou'].append(val_iou_averager)\n\n # Save log\n torch.save(trlog, osp.join(self.args.save_path, 'trlog'))\n\n if epoch % 1 == 0:\n print('Running Time: {}, Estimated Time: {}'.format(timer.measure(), timer.measure(epoch / self.args.max_epoch)))\n writer.close()", "def train():\n model.train()\n for batch_index, (xb, yb) in enumerate(train_dl):\n loss = loss_func(model(xb), yb)\n\n loss.backward()\n opt.step()\n opt.zero_grad()", "def train_model(self, \n num_epochs, \n learning_rate, \n momentum, \n num_iters,\n report_pseudo_cost,\n save_every_epoch,\n report_step,\n report_p_tilda,\n report_w_norm,\n exp_path,\n test_gradients = False):\n \n if report_p_tilda:\n \n p_t_i = 0\n \n p_tilda_all = np.zeros(\n [num_epochs*num_iters//report_step,self.batch_size]\n )\n \n else:\n \n p_tilda_all = []\n \n pseudo_losses = []\n \n if report_w_norm:\n \n w_norms = np.zeros(num_epochs*num_iters//report_step)\n \n w_i = 0\n \n else:\n \n w_norms = []\n \n start_train_time = timeit.default_timer()\n \n for epoch_index in range(num_epochs):\n \n epoch_time0 = timeit.default_timer()\n \n perm_inds = self.np_rand_gen.permutation(self.N_train)\n \n #put different learning_rate rules (per epoch) for now here:\n \n #lrate_epoch = learning_rate\n \n if self.mixture:\n \n lrate_epoch =\\\n (1.0/(1+epoch_index/10000.0))*learning_rate/self.batch_size\n \n else:\n lrate_epoch =\\\n (1.0/(1+epoch_index/100.0))*learning_rate/self.batch_size\n \n # lrate_epoch = (0.9**epoch_index)*learning_rate # 0.99\n \n # lrate_epoch = learning_rate\n \n if self.use_momentum:\n \n momentum_epoch = momentum\n \n print(\"Learning rate for epoch %d --- %f\"%\n (epoch_index,lrate_epoch))\n \n if report_pseudo_cost:\n \n avg_pseudo_cost_val = []\n \n for i in range(num_iters):\n \n iter_start_time = timeit.default_timer()\n \n minibatch_inds =\\\n perm_inds[self.batch_size*i:self.batch_size*(i+1)]\n \n if \"CSS\" in self.algorithm:\n \n assert self.num_samples > 0 \n \n is_samples, is_probs =\\\n self.is_sampler(minibatch_set =list(minibatch_inds),\n t = epoch_index)\n \n is_samples = np.asarray(is_samples, \n dtype = theano.config.floatX)\n if test_gradients:\n \n t0 = timeit.default_timer()\n \n approx_cost, p_tilda =\\\n self.optimize(is_samples, \n list(minibatch_inds),\n lrate_epoch)\n \n t1 = timeit.default_timer() \n print(\"Gradient computation with implementation 1 took\"+\\\n \" --- %f minutes\"%((t1 - t0)/60.0))\n W_implicit = np.asarray(self.W.get_value())\n b_implicit = np.asarray(self.b.get_value())\n t0 = timeit.default_timer()\n self.test_grad_computations(is_samples, list(minibatch_inds))\n t1 = timeit.default_timer()\n print(\"Gradient computation with implementation 2 took \"+\\\n \"--- %f minutes\"%((t1 - t0)/60.0))\n W_explicit = np.asarray(self.W.get_value())\n b_explicit = np.asarray(self.b.get_value())\n print(\"Equivalence of W updates in two implementations:\")\n print((np.round(W_implicit,12) == np.round(W_explicit,12)).all())\n print(\"Equivalence of b updates in two implementations:\")\n print((np.round(b_implicit,12) == np.round(b_explicit,12)).all())\n sys.exit()\n \n if ((self.mf_steps > 0) and (self.alpha >0)): \n \n if self.use_momentum:\n \n approx_cost, p_tilda =\\\n self.optimize(is_samples, \n list(minibatch_inds),\n is_probs,\n lrate_epoch,\n momentum_epoch)\n \n elif (not self.use_momentum):\n \n approx_cost, p_tilda =\\\n self.optimize(is_samples, \n list(minibatch_inds),\n is_probs,\n lrate_epoch)\n \n elif ((self.alpha ==0) or (self.mf_steps ==0) or\\\n self.mixture) and self.gibbs_steps ==0:\n \n if self.use_momentum:\n \n approx_cost, p_tilda =\\\n self.optimize(is_samples, \n list(minibatch_inds),\n lrate_epoch,\n momentum_epoch)\n \n elif (not self.use_momentum):\n \n approx_cost, p_tilda =\\\n self.optimize(is_samples, \n list(minibatch_inds),\n lrate_epoch)\n \n elif self.gibbs_steps > 0:\n \n if self.use_momentum:\n \n approx_cost, p_tilda =\\\n self.optimize(is_samples, \n list(minibatch_inds),\n is_probs,\n lrate_epoch,\n momentum_epoch)\n \n elif (not self.use_momentum):\n \n approx_cost, p_tilda =\\\n self.optimize(is_samples, \n list(minibatch_inds),\n is_probs,\n lrate_epoch)\n \n elif \"CD\" in self.algorithm:\n \n if self.num_hidden ==0:\n \n if \"PCD\" in self.algorithm:\n \n mf_sample, cd_sample = self.cd_sampling()\n \n self.x_gibbs.set_value(np.transpose(cd_sample))\n \n else:\n ### \"CD\" \n mf_sample, cd_sample =\\\n self.cd_sampling(list(minibatch_inds))\n \n self.x_gibbs.set_value(np.transpose(cd_sample))\n \n if self.use_momentum:\n \n approx_cost, p_tilda = self.optimize(list(minibatch_inds),\n lrate_epoch,\n momentum_epoch)\n \n else:\n \n approx_cost, p_tilda = self.optimize(list(minibatch_inds),\n lrate_epoch)\n \n avg_pseudo_cost_val.append(approx_cost)\n \n if report_pseudo_cost:\n \n if i % report_step ==0:\n \n print('Training epoch %d ---- Iter %d ----'%\n (epoch_index, i)+' pseudo cost value: %f'%approx_cost)\n \n if report_p_tilda:\n \n if i % report_step == 0:\n print(\"p_tilda values for training examples:\")\n if self.batch_size >=20:\n print_inds = range(0,20)\n print(p_tilda[print_inds])\n else:\n print(p_tilda[0:self.batch_size])\n print(\"sum of these values:\")\n if self.batch_size >= 20:\n print_inds = range(0,20)\n print(p_tilda[print_inds])\n else:\n print(np.sum(p_tilda[0:self.batch_size])) \n \n p_tilda_all[p_t_i,:] = p_tilda[0:self.batch_size]\n \n p_t_i +=1\n \n if report_w_norm: \n \n curr_w = self.W.get_value() \n \n w_norms[w_i] = np.sum(np.multiply(curr_w, curr_w))\n \n w_i +=1\n \n iter_end_time = timeit.default_timer()\n \n print('Training iteration took %f minutes'%\n ((iter_end_time - iter_start_time) / 60.))\n \n if report_pseudo_cost:\n \n avg_pseudo_cost_val = np.mean(avg_pseudo_cost_val)\n \n pseudo_losses.append(avg_pseudo_cost_val)\n \n print('Training epoch %d ---- average pseudo cost value: %f'\n %(epoch_index, avg_pseudo_cost_val))\n \n epoch_time1 = timeit.default_timer()\n \n print ('Training epoch took %f minutes'%\n ((epoch_time1 - epoch_time0)/60.))\n \n if save_every_epoch:\n \n params_to = os.path.join(exp_path,\"TRAINED_PARAMS_END.model\")\n \n self.save_model_params(params_to)\n \n end_train_time = timeit.default_timer()\n \n training_time = (end_train_time - start_train_time)/60.0\n\n print('Training process took %f minutes'%training_time)\n \n params_to = os.path.join(exp_path,\"TRAINED_PARAMS_END.model\")\n \n self.save_model_params(params_to)\n \n return p_tilda_all, pseudo_losses, training_time, w_norms", "def train(self):\n\n # Ensure everything is sent to GPU if being trained on the cloud\n if self.local == False:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n print(\"\\n \\n EVERYTHING TO CUDA \\n \\n\")\n\n # Load weights if applicable\n if self.load_weights == True:\n start_epoch, loss = self.load_checkpoint(\n self.model, self.optimizer, self.model_name\n )\n start_epoch += 1\n print(\"\\n \\n [WEIGHTS LOADED]\")\n else:\n start_epoch = 0\n\n # Start Training Loop\n for epoch in range(start_epoch, self.epochs + 1):\n\n # TRAIN\n if epoch > 0:\n\n # Set model to training mode\n self.model.train()\n\n # Initialize loss and counter that will allow model weights to be saved and overwritten every 10 minibatches\n train_loss = 0\n counter = 0\n\n # Iterate through train set\n for (\n image1,\n image2,\n annotation1,\n annotation2,\n landmark1,\n landmark2,\n ) in tqdm(self.train_loader, desc=\"Train Epoch \" + str(epoch)):\n\n # image tensors and bounding box and label tensors to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss from one pass and append to training loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n train_loss += loss.item()\n\n # Clear optimizer gradient\n self.optimizer.zero_grad()\n\n # Backprop\n loss.backward()\n\n # Take a step with optimizer\n self.optimizer.step()\n\n # Save/overwrite model weights every 10 minibatches\n if counter % 10 == 0:\n self.save_checkpoint(\n self.model,\n self.optimizer,\n self.model_name,\n epoch,\n train_loss,\n )\n\n print(\n f\"====> Epoch: {epoch} Average train loss: {train_loss / len(self.train_loader.dataset):.4f}\\n\"\n )\n\n # Save entire model as .pt after every epoch of training\n if self.local == True:\n torch.save(\n self.model,\n os.path.join(self.save_path, self.model_name + \".pt\"),\n )\n else:\n torch.save(\n self.model, self.model_name + \"_epoch\" + str(epoch) + \".pt\"\n )\n print(\"SAVED MODEL EPOCH \" + str(epoch))\n\n # Evaluate on Validation Set after each epoch\n with torch.no_grad():\n\n # Set model to evaluation mode\n self.model.eval()\n\n # Iterate through validation set\n for image1, image2, annotation1, annotation2 in tqdm(\n self.val_loader, desc=\"Validation Epoch \" + str(epoch)\n ):\n\n # Initialize validation loss\n val_loss = 0\n\n # Send images to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss and append to validation loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n val_loss += loss\n\n print(\n f\"====> Epoch: {epoch} Average test loss: {val_loss / len(self.val_loader.dataset):.4f}\\n\"\n )\n\n print(\"[DONE EPOCH{}]\".format(epoch))\n\n print(\"[DONE TRAINING]\")\n\n # Save model after all epochs are finished\n if self.local == True:\n torch.save(\n self.model, os.path.join(self.save_path, self.model_name + \".pt\")\n )\n else:\n torch.save(self.model, self.model_name + \".pt\")", "def train(self):\n train_dataloader = self.get_train_dataloader()\n\n if self.args.max_steps > 0:\n t_total = self.args.max_steps\n num_train_epochs = (\n self.args.max_steps // (len(train_dataloader) // self.args.gradient_accumulation_steps) + 1\n )\n else:\n t_total = int(len(train_dataloader) // self.args.gradient_accumulation_steps * self.args.num_train_epochs)\n num_train_epochs = self.args.num_train_epochs\n\n lr_scheduler = orttrainer.optim.LinearWarmupLRScheduler(t_total, self.args.warmup_steps / float(t_total))\n\n loss_scaler = amp.DynamicLossScaler() if self.args.fp16 else None\n device = self.args.device.type\n\n device = f\"{device}:{self.args.device.index}\" if self.args.device.index else f\"{device}:0\"\n options = orttrainer.ORTTrainerOptions(\n {\n \"batch\": {\"gradient_accumulation_steps\": self.args.gradient_accumulation_steps},\n \"device\": {\"id\": device},\n \"mixed_precision\": {\"enabled\": self.args.fp16, \"loss_scaler\": loss_scaler},\n \"debug\": {\n \"deterministic_compute\": True,\n },\n \"utils\": {\"grad_norm_clip\": False},\n \"distributed\": {\n # we are running single node multi gpu test. thus world_rank = local_rank\n # and world_size = self.args.n_gpu\n \"world_rank\": max(0, self.args.local_rank),\n \"world_size\": int(self.world_size),\n \"local_rank\": max(0, self.args.local_rank),\n \"allreduce_post_accumulation\": True,\n },\n \"lr_scheduler\": lr_scheduler,\n }\n )\n\n param_optimizer = list(self.model.named_parameters())\n params = [\n {\n \"params\": [n for n, p in param_optimizer if \"bias\" in n or \"LayerNorm.weight\" in n],\n \"weight_decay_mode\": 1,\n },\n {\n \"params\": [n for n, p in param_optimizer if not (\"bias\" in n or \"LayerNorm.weight\" in n)],\n \"weight_decay_mode\": 1,\n },\n ]\n\n optim_config = optim.AdamConfig(params=params, lr=2e-5, do_bias_correction=True)\n self.model = orttrainer.ORTTrainer(self.model, self.model_desc, optim_config, options=options)\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataloader.dataset))\n logger.info(\" Num Epochs = %d\", num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", self.args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n self.args.train_batch_size\n * self.args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", self.args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n\n tr_loss = 0.0\n logging_loss = 0.0\n train_iterator = trange(\n epochs_trained,\n int(num_train_epochs),\n desc=\"Epoch\",\n disable=self.args.local_rank not in [-1, 0],\n )\n\n for _epoch in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=self.args.local_rank not in [-1, 0])\n for step, inputs in enumerate(epoch_iterator):\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n tr_loss += self._training_step(self.model, inputs)\n\n if (step + 1) % self.args.gradient_accumulation_steps == 0 or (\n len(epoch_iterator) <= self.args.gradient_accumulation_steps and (step + 1) == len(epoch_iterator)\n ):\n global_step += 1\n\n if self.args.local_rank in [-1, 0]:\n if (self.args.logging_steps > 0 and global_step % self.args.logging_steps == 0) or (\n global_step == 1 and self.args.logging_first_step\n ):\n logs = {}\n if self.args.evaluate_during_training:\n results = self.evaluate()\n for key, value in results.items():\n eval_key = f\"eval_{key}\"\n logs[eval_key] = value\n\n loss_scalar = (tr_loss - logging_loss) / self.args.logging_steps\n\n logs[\"loss\"] = loss_scalar\n logging_loss = tr_loss\n\n epoch_iterator.write(json.dumps({**logs, **{\"step\": global_step}}))\n\n if self.args.max_steps > 0 and global_step > self.args.max_steps:\n epoch_iterator.close()\n break\n if self.args.max_steps > 0 and global_step > self.args.max_steps:\n train_iterator.close()\n break\n\n logger.info(\"\\n\\nTraining completed. \\n\\n\")\n return TrainOutput(global_step, tr_loss / global_step)", "def train_step(self, batch_sample, epoch_it):\n batch_x = batch_sample['waveform']\n data_type = batch_sample['data_type']\n batch_target = {\n 'ov': batch_sample['ov'],\n 'sed': batch_sample['sed_label'],\n 'doa': batch_sample['doa_label'],\n }\n if self.cuda:\n batch_x = batch_x.cuda(non_blocking=True)\n batch_target['sed'] = batch_target['sed'].cuda(non_blocking=True)\n batch_target['doa'] = batch_target['doa'].cuda(non_blocking=True)\n\n\n self.optimizer.zero_grad()\n self.af_extractor.train()\n self.model.train()\n\n (batch_x, batch_target) = self.af_extractor((batch_x, batch_target,'train', data_type))\n batch_x = (batch_x - self.mean) / self.std\n if self.cfg['training']['model'] == 'SELD_ATT' or self.cfg['training']['model'] == 'SELD_ATT_LIGHT':\n pred, pred_constraint = self.model(batch_x)\n if self.cfg['training']['model'] == 'EINV2':\n pred = self.model(batch_x)\n if self.cfg['training']['model'] == 'SELD_ATT' or self.cfg['training']['model'] == 'SELD_ATT_LIGHT':\n loss_dict = self.losses.calculate_attention(pred, pred_constraint,batch_target, epoch_it,self.model)\n if self.cfg['training']['model'] == 'EINV2':\n loss_dict = self.losses.calculate(pred, batch_target, epoch_it, self.model)\n\n loss_dict[self.cfg['training']['loss_type']].backward(retain_graph=False)\n self.optimizer.step()\n\n self.train_losses['train_loss_all'] += loss_dict['all'].item()\n self.train_losses['train_loss_sed'] += loss_dict['sed'].item()\n self.train_losses['train_loss_doa'] += loss_dict['doa'].item()\n\n if self.cfg['training']['weight_constraints']:\n self.train_losses['train_loss_weight_orthogonal'] += loss_dict['loss_weight_orthogonal'].item()\n\n if self.cfg['training']['weight_constraints_1']:\n self.train_losses['train_loss_weight_orthogonal_1'] += loss_dict['loss_weight_orthogonal_1'].item()\n\n if self.cfg['training']['layer_constraints']:\n self.train_losses['train_loss_layer_orthogonal'] += loss_dict['loss_layer_orthogonal'].item()\n\n if self.cfg['training']['layer_constraints_1']:\n self.train_losses['train_loss_layer_orthogonal_1'] += loss_dict['loss_layer_orthogonal_1'].item()\n\n if self.cfg['training']['smoothness_loss']:\n self.train_losses['train_loss_doa_smoothness'] += loss_dict['loss_doa_smoothness'].item()", "def train(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n train_state = self.train_state\r\n optimizer = self.optimizer\r\n scheduler = self.scheduler\r\n train_bar = self.train_bar\r\n val_bar = self.val_bar\r\n epoch_bar = self.epoch_bar\r\n \r\n for epoch_index in range(args.num_epochs):\r\n train_state['epoch_index'] = epoch_index\r\n \r\n # Iterate over training dataset\r\n \r\n running_loss,running_acc = self.train_loop(epoch_index, args, \r\n model, dataset, \r\n optimizer, train_bar)\r\n \r\n train_state['train_loss'].append(running_loss)\r\n train_state['train_acc'].append(running_acc)\r\n \r\n running_loss,running_acc = self.val_loop(epoch_index, args, model, \r\n dataset, optimizer, val_bar)\r\n \r\n \r\n train_state['val_loss'].append(running_loss)\r\n train_state['val_acc'].append(running_acc)\r\n \r\n print(\"Epoch \"+str(epoch_index+1)+\": Running loss=\"+ \\\r\n str(running_loss)+\", Running Acc=\"+str(running_acc))\r\n \r\n train_state = update_train_state(args=args, model=model, \r\n train_state=train_state)\r\n \r\n scheduler.step(train_state['val_loss'][-1])\r\n \r\n if train_state['stop_early']:\r\n break\r\n \r\n train_bar.n = 0\r\n val_bar.n = 0\r\n epoch_bar.set_postfix(best_val=train_state['early_stopping_best_val'] )\r\n epoch_bar.update()\r\n \r\n state_dict = torch.load(train_state['model_filename'])\r\n model.load_state_dict(state_dict)\r\n return model", "def train_step(model, batch, epoch, num_epochs, display_step, optimizer):\n epoch_loss = 0\n total_step = len(batch)\n model.train()\n FirstTime = False\n # Forward pass\n with torch.autograd.set_detect_anomaly(True): # Error catcher\n for step, sample in enumerate(batch):\n X = torch.tensor(sample[0], dtype=torch.float).to(device)\n y = torch.tensor(sample[1], dtype=torch.float).to(device)\n if args['notes_aggeregate'] == 'Mean' or args['notes_aggeregate'] == 'TimeAttn' or args['notes_aggeregate'] == 'Attn':\n text = [torch.tensor(x, dtype=torch.long).to(device) for x in sample[2]]\n attn = [torch.tensor(x, dtype=torch.long).to(device) for x in sample[3]]\n times = [torch.tensor(x, dtype=torch.float).to(device) for x in sample[4]]\n else:\n text = torch.tensor(sample[2], dtype=torch.long).to(device)\n attn = torch.tensor(sample[3], dtype=torch.long).to(device)\n times = [torch.tensor(x, dtype=torch.float).to(device) for x in sample[4]]\n \n # with open(os.path.join('Logs', args['checkpoint_path'] + '_ManLog.txt'), 'a+') as f:\n # print('len(text), text, X shape', len(text), text[0].shape, X.shape, file=f)\n # print('len(text), text, X shape', len(text), text[0].shape, X.shape)\n Logits, Probs = model(X, text, attn, times)\n \n Lambd = torch.tensor(0.01).to(device)\n l2_reg = model.get_l2()\n # print('Logits, y shape', Logits.shape, y.shape)\n # with open(os.path.join('Logs', args['checkpoint_path'] + '_ManLog.txt'), 'a+') as f:\n # print('Logits, y shape', Logits.shape, y.shape, file=f)\n loss = model.criterion(Logits, y)\n loss += Lambd * l2_reg\n with torch.no_grad():\n predicted = Probs.data > 0.5\n if not FirstTime:\n PredScores = Probs\n TrueLabels = y\n PredLabels = predicted\n FirstTime = True\n else:\n PredScores = torch.cat([PredScores, Probs])\n TrueLabels = torch.cat([TrueLabels, y])\n PredLabels = torch.cat([PredLabels, predicted])\n epoch_loss += loss.item() * y.size(0)\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (step + 1) % display_step == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch + 1, num_epochs, step + 1, total_step, loss.item()))\n # if (step+1) % 2 == 0:\n # break\n with torch.no_grad():\n prf_train = Evaluate(TrueLabels.detach().cpu(), PredLabels.detach().cpu(), PredScores.detach().cpu())\n prf_train['epoch_loss'] = epoch_loss / TrueLabels.shape[0]\n return prf_train", "def _train_epoch(self, epoch):\n self.model.train()\n\n\n total_loss = 0\n total_metrics = np.zeros(len(self.metrics))\n length = len(self.data_loader)\n total_pm25_loss = 0\n total_pm10_loss = 0\n pm25_loss = 0\n pm10_loss = 0\n\n for batch_idx, (data, target) in enumerate(self.data_loader):\n data, target = data.to(self.device), target.to(self.device)\n\n self.optimizer.zero_grad()\n output = self.model(data)\n # print(output.shape, target.shape)\n target = target.squeeze()\n model_loss = self.loss(output, target)\n\n # print(output.shape, target.shape)\n # pm25_predict, pm10_predict = torch.chunk(output, 2, dim=1)\n # pm25_target, pm10_target = torch.chunk(target, 2, dim=1)\n # pm25_loss = self.loss(pm25_predict, pm25_target)\n # pm10_loss = self.loss(pm10_predict, pm10_target)\n # total_pm25_loss += pm25_loss.item()\n # total_pm10_loss += pm10_loss.item()\n\n l2_reg = torch.tensor(0.0).to(self.device)\n if self.config['trainer']['l2_regularization']:\n for param in self.model.parameters():\n l2_reg += torch.norm(param, p=2)\n loss = model_loss + self.config['trainer']['l2_lambda'] * l2_reg\n else:\n loss = model_loss\n\n loss.backward()\n # torch.nn.utils.clip_grad_norm(self.model.parameters(), 1)\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.writer.add_scalar('loss', loss.item())\n total_loss += loss.item()\n # total_metrics += self._eval_metrics(output, target)\n\n if batch_idx % self.log_step == 0:\n self.logger.debug('Train Epoch: {} {} Loss: {:.6f} model_loss: {:.6f} pm25_loss {:.6f} pm10_loss {:.6f} l2_loss: {:.6f}'.format(\n epoch,\n self._progress(batch_idx),\n loss.item(),\n model_loss.item(),\n pm25_loss,\n pm10_loss,\n l2_reg.item()))\n\n if batch_idx == self.len_epoch:\n break\n\n log = {\n 'loss': total_loss / length,\n 'rmse_pm25_loss': np.sqrt(total_pm25_loss / length),\n 'rmse_pm10_loss': np.sqrt(total_pm10_loss / length)\n }\n\n\n val_log = self._valid_epoch(epoch)\n log.update(val_log)\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step(val_log['val_loss'])\n\n return log", "def train(x_train, y_train, x_valid, y_valid, config):\n train_acc = []\n valid_acc = []\n train_loss = []\n valid_loss = []\n best_model = None\n NUM_EPOCH = config['epochs']\n EARLY_STOP = config['early_stop']\n EARLY_STOP_EPOCH = config['early_stop_epoch']\n BATCH_SIZE = config['batch_size']\n model = NeuralNetwork(config=config)\n loss = float('inf')\n best_loss = float('inf')\n best_accuracy = 0\n patience = 0\n\n\n\n for i in range (NUM_EPOCH):\n\n x_train, y_train = shuffle(x_train, y_train)\n x_train = np.asarray(x_train)\n y_train = np.asarray(y_train)\n\n for j in range (0, len(x_train), BATCH_SIZE):\n start = j\n end = j + BATCH_SIZE\n if (end > len(x_train)):\n end = len(x_train)\n\n x = x_train[start:end]\n y = y_train[start:end]\n\n model.forward(x, y) \n model.backward()\n\n train_epoch_loss = model.forward(x_train, y_train)\n \n train_predict = np.zeros_like(model.y)\n train_predict[np.arange(len(model.y)), model.y.argmax(1)] = 1\n\n train_accuracy = sum([1 if all(train_predict[i] == y_train[i]) else 0 for i in range(len(y_train))])/len(y_train)\n\n train_loss.append(train_epoch_loss)\n train_acc.append(train_accuracy)\n \n valid_epoch_loss = model.forward(x_valid, y_valid)\n valid_predict = np.zeros_like(model.y)\n valid_predict[np.arange(len(model.y)), model.y.argmax(1)] = 1\n\n valid_accuracy = sum([1 if all(valid_predict[i] == y_valid[i]) else 0 for i in range(len(y_valid))])/len(y_valid)\n\n valid_loss.append(valid_epoch_loss)\n valid_acc.append(valid_accuracy)\n\n\n print(\"Epoch:\", i, \"Train Accuracy|Loss:\", train_accuracy,\"| \", train_epoch_loss, \"~|~ Valid: \", valid_accuracy, \" | \", valid_epoch_loss)\n if EARLY_STOP:\n if valid_epoch_loss > best_loss and patience >= EARLY_STOP_EPOCH:\n return train_acc, valid_acc, train_loss, valid_loss, best_model\n elif valid_epoch_loss > best_loss and patience < EARLY_STOP_EPOCH:\n patience += 1\n else:\n patience = 0\n if valid_epoch_loss < best_loss:\n best_loss = valid_epoch_loss\n best_accuracy = valid_accuracy\n best_model = copy.deepcopy(model)\n\n loss = valid_epoch_loss\n\n \n best_model = model \n return train_acc, valid_acc, train_loss, valid_loss, best_model", "def _train(self):\n self.network.train() # note that here we are calling torch.nn.Module train class method\n epochs_since_improvement = 0\n best_params = None\n self.calculate_validation_loss()\n best_validation_loss = self.validation_average_loss\n\n while epochs_since_improvement < 10:\n self.train_epoch()\n self.calculate_validation_loss()\n if self.validation_average_loss < best_validation_loss:\n epochs_since_improvement = 0\n best_validation_loss = self.validation_average_loss\n best_params = self.network.state_dict()\n else:\n epochs_since_improvement += 1\n LOGGER.info(\"Epochs since improvement in validation_loss: {} \\n\".format(epochs_since_improvement))\n if self.maximum_epochs_allowed is not None and self.epochs_trained >= self.maximum_epochs_allowed:\n break\n LOGGER.info(\"Training complete after {} epochs \\n\".format(self.epochs_trained))\n LOGGER.info(\"Best training loss achieved: {} \\n\".format(self.training_average_loss))\n LOGGER.info(\"Best validation loss achieved: {}\".format(self.validation_average_loss))\n self.learned_params = best_params\n self.network.load_state_dict(best_params)", "def fit(self):\n for i in range(self.current_epoch, self.max_epoch):\n self.current_epoch += 1\n # train\n train_dataloader = self.data_module.get_train_dataloader(\n batch_size=self.train_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers,\n pin_memory=True\n )\n neptune.log_metric(\"learning_rate_vs_epoch\", self.optimizer.param_groups[0]['lr'])\n self.train_one_epoch(train_dataloader)\n\n # validate \n if self.validate_after == 'epoch' and self.train_on_all_data == False and self.run_lr_range_test == False:\n validation_dataloader = self.data_module.get_valid_dataloader(\n batch_size=self.valid_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers, \n pin_memory=True\n )\n self.validate_one_epoch(validation_dataloader)\n\n if self.scheduler:\n if self.step_scheduler_after == 'epoch': \n if self.step_scheduler_metric == 'val_auc':\n self.scheduler.step(self.metrics['valid'][-1]['auc_score'])\n else:\n self.scheduler.step()\n\n if self.run_lr_range_test:\n neptune.log_metric('validation_epoch_end_AUC_vs_LR', \n self.scheduler.get_last_lr()[0], y=self.metrics['valid'][-1]['auc_score'])\n\n # checkpoint model for resuming model\n if (self.current_epoch % self.checkpoint_epochs) == 0:\n self.save_checkpoint()\n\n # sleep the training process\n if self.current_epoch % self.sleep_in_epochs == 0:\n print(f\"SLEEPING FOR {self.sleep_time} at epoch={self.current_epoch}\")\n for i in range(int(self.sleep_time/30)):\n time.sleep(i)\n neptune.log_metric(\"sleeping_status\", y=1)\n\n stop_training = self.stopping_criteria()\n if stop_training:\n if self.fp16:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n self.optimizer.zero_grad()\n else:\n self.optimizer.step()\n self.optimizer.zero_grad()\n # backward all the accumulate gradients\n print(f\"stopped training at {self.current_epoch} epoch\")\n break", "def train(args,train_loader, model, criterion, optimizer, epoch, pruner, writer):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n\n \n\n model.train()\n total =0 \n correct = 0\n reg_loss = 0.0\n train_loss = 0.0\n end = time.time()\n\n for i, (inputs, target) in enumerate(train_loader):\n\n target = target.cuda()\n inputs = inputs.cuda()\n \n inputs, targets_a, targets_b, lam = mixup_data(inputs, target, args.alpha, True)\n inputs, targets_a, targets_b = map(Variable, (inputs, targets_a, targets_b))\n\n ##input_var = torch.autograd.Variable(input)\n ##target_var = torch.autograd.Variable(target)\n\n\n outputs = model(inputs)\n ##outputs, Qs, Ys = model(inputs)\n ##loss = criterion(output, target_var)\n loss = mixup_criterion(criterion, outputs, targets_a, targets_b, lam)\n## print(\"loss:\")\n## print(loss)\n## print(loss.item())\n## train_loss += loss.data[0]\n train_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n total += target.size(0)\n correct += (lam * predicted.eq(targets_a.data).cpu().sum().float()\n + (1 - lam) * predicted.eq(targets_b.data).cpu().sum().float())\n\n## prec1 = accuracy(output.data, target, topk=(1,))[0]\n## losses.update(loss.data.item(), input.size(0))\n## top1.update(prec1.item(), input.size(0))\n\n optimizer.zero_grad()\n\n\n\n## for y in Ys:\n## y.retain_grad()\n\n\n\n loss.backward()\n\n\n optimizer.step()\n\n\n\n\n if pruner is not None:\n pruner.prune(update_state=False)\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n\n if 0:\n kwalt = epoch*len(train_loader)+i\n if writer is not None:\n for j,q in enumerate(Qs):\n writer.add_scalar(\"variances %d\" % j, q.cpu().numpy(), kwalt)\n\n for l,y in enumerate(Ys):\n if y.grad is not None:\n writer.add_scalar(\"grad %d\" % (l-j), getQ(y.grad).cpu().numpy(), kwalt)\n\n## writer.add_scalars(\"variancess\", { \"%d\"% j : q.cpu().numpy() for j,q in enumerate(Qs)}, i)\n\n\n\n if 0:\n if i % args.print_freq == 0:\n print(\n f\"Epoch: [{epoch}][{i}/{len(train_loader)}]\\t\"\n f\"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t\"\n f\"Loss {losses.val:.4f} ({losses.avg:.4f})\\t\"\n f\"Prec@1 {top1.val:.3f} ({top1.avg:.3f})\"\n )\n niter = epoch*len(train_loader)+i\n\n batch_idx = i\n if writer is not None:\n writer.add_scalar('Train/Loss', train_loss/batch_idx, epoch)\n writer.add_scalar('Train/Prec@1', 100.*correct/total, epoch)", "def train(self,block=0): \n folder_save = os.path.join(self.path_save,'epoch')\n if self.mode=='first_layer':\n print('====================A new training starts!=============')\n # trains the first layer\n print('=================== Block number {} ==================='.format(0))\n # to store results\n loss_epochs_train = np.zeros(self.nb_epochs[0])\n loss_epochs_val = np.zeros(self.nb_epochs[0])\n loss_min_val = float('Inf')\n self.CreateFolders(0)\n folder = os.path.join(self.path_save,'block_'+str(0))\n self.CreateLoader(block=0)\n # defines the optimizer\n lr = self.lr_first_layer #learnig rate\n optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,self.parameters()),lr=lr) \n #==========================================================================================================\n # for the first layer\n # trains for several epochs\n for epoch in range(0,self.nb_epochs[0]): \n print('This is epoch {} '.format(epoch))\n # sets training mode\n self.model.Layers[0].train()\n gc.collect()\n # goes through all minibatches\n print('This is traning stage')\n for i,minibatch in enumerate(self.train_loader,0):\n [names, x_true_RGB, x_true, x_blurred_RGB , x_blurred, h] = minibatch # get the minibatch\n if names =='.ipynb_checkpoints': continue\n print('The name is {} '.format(names)) \n x_true = Variable(x_true.type(self.dtype),requires_grad=False)\n x_blurred = Variable(x_blurred.type(self.dtype),requires_grad=False)\n h = Variable(h.type(self.dtype),requires_grad=False)\n batch = x_true.shape[0]\n sizex = x_true.shape[1]\n sizeh = h.shape[1]\n SNR_init = 0\n SNR_temp = 0\n init = Initialization(batch,sizex,sizeh,self.dtype)\n self.T_vec,self.t,h_vec,Ch_vec,gamma_vec,lambda_vec = init.f(x_blurred,h)\n mk_vec,diagSigma_vec,newmh_vec,newSigmah_vec,Gammap_vec,LAMBDAk_vec = self.model(x_blurred,x_true,x_blurred,h,self.noise_std_range[0]**2,h_vec,Ch_vec,gamma_vec,lambda_vec,self.mode) \n hhat_vec=self.T_vec@newmh_vec+self.t\n hhat_vec = torch.reshape(hhat_vec,(batch,sizeh,sizeh))#the restored kernel of \n for j in range(batch):\n print('This is batch {}'.format(j))\n SNR_temp = ComputeSNR(x_true[j,:,:],mk_vec[j,:,:])\n SNR_init = ComputeSNR(x_true[j,:,:],x_blurred[j,:,:])\n print('The initial SNR is {}'.format(SNR_init))\n print('The current SNR is {}'.format(SNR_temp))\n loss_ = self.loss_fun_mh(hhat_vec[j,:,:],h[j,:,:])\n print('The RMSE is {}'.format(loss_))\n loss = self.loss_fun_mh(hhat_vec,h)\n print('The loss over all batches are {}'.format(loss))\n loss_epochs_train[epoch] += torch.Tensor.item(loss)\n sys.stdout.write('\\r(%d, %3d) minibatch loss: %5.4f '%(epoch,i,torch.Tensor.item(loss)))\n \n # sets the gradients to zero, performs a backward pass, and updates the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step() #performs a parameter update\n\n # tests on validation set\n print('This is validation stage')\n self.model.eval() # evaluation mode\n self.last_layer.eval() # evaluation mode\n loss_current_val = 0\n for minibatch in self.val_loader:\n [names, x_true_RGB, x_true, x_blurred_RGB , x_blurred, h] = minibatch # gets the minibatch\n if names =='.ipynb_checkpoints': continue\n print('The name is {} '.format(names)) \n x_true = Variable(x_true.type(self.dtype),requires_grad=False)\n x_blurred = Variable(x_blurred.type(self.dtype),requires_grad=False)\n h = Variable(h.type(self.dtype),requires_grad=False)\n batch = x_true.shape[0]\n sizex = x_true.shape[1]\n sizeh = h.shape[1]\n SNR_init = 0\n SNR_temp = 0\n init = Initialization(batch,sizex,sizeh,self.dtype)\n __,__,h_vec,Ch_vec,gamma_vec,lambda_vec = init.f(x_blurred,h)\n mk_vec,diagSigma_vec,newmh_vec,newSigmah_vec,Gammap_vec,LAMBDAk_vec = self.model(x_blurred,x_true,x_blurred,h,self.noise_std_range[0]**2,h_vec,Ch_vec,gamma_vec,lambda_vec,self.mode) \n hhat_vec=self.T_vec@newmh_vec+self.t\n hhat_vec = torch.reshape(hhat_vec,(batch,sizeh,sizeh))\n for j in range(batch):\n print('This is batch {}'.format(j))\n SNR_temp = ComputeSNR(x_true[j,:,:],mk_vec[j,:,:])\n SNR_init = ComputeSNR(x_true[j,:,:],x_blurred[j,:,:])\n print('The initial SNR is {}'.format(SNR_init))\n print('The current SNR is {}'.format(SNR_temp))\n loss_ = self.loss_fun_mh(hhat_vec[j,:,:],h[j,:,:])\n print('The RMSE is {}'.format(loss_))\n # computes loss on validation set\n loss = self.loss_fun_mh(hhat_vec,h)\n print('The loss over all batches are {}'.format(loss))\n loss_current_val += torch.Tensor.item(loss)\n loss_epochs_val[epoch] += torch.Tensor.item(loss)\n \n if loss_min_val>loss_current_val:\n torch.save(self.model.state_dict(),os.path.join(folder,'trained_model_MinLossOnVal1.pt'))\n loss_min_val = loss_current_val\n\n # save the results for each epoch\n folder_results_train = os.path.join(folder_save,'block'+str(block),'train')\n # create the path if it does not exist \n if not os.path.exists(folder_results_train):\n os.makedirs(folder_results_train)\n with open(folder_results_train+'/loss_epoch_train.txt', \"a\") as file_object:\n if epoch == 0:\n file_object.write('------------------A new test-------------------------------')\n file_object.write('The loss for epoch {} is {}'.format(epoch,loss_epochs_train[epoch]))\n file_object.write(\"\\n\")\n file_object.write('----------------------------------------------------------')\n file_object.write(\"\\n\")\n folder_results_val = os.path.join(folder_save,'block'+str(block),'val')\n # create the path if it does not exist \n if not os.path.exists(folder_results_val):\n os.makedirs(folder_results_val)\n with open(folder_results_val+'/loss_epoch_val.txt', \"a\") as file_object:\n file_object.write('The loss for epoch {} is {}'.format(epoch,loss_epochs_val[epoch]))\n file_object.write(\"\\n\")\n file_object.write('----------------------------------------------------------')\n file_object.write(\"\\n\")\n #==========================================================================================================\n # training is finished\n print('-----------------------------------------------------------------')\n print('Training of Block 0 is done.')\n self.save_OneBlock(block=0)\n print('-----------------------------------------------------------------')\n \n \n # calls the same function to start training of the next layer\n self.mode = 'greedy'\n self.train(block=1)\n \n#===========================================================================================================\n \n \n elif self.mode=='greedy':\n print('This is greedy processing')\n # trains the next layer\n print('=================== Block number {} ==================='.format(block))\n # to store results\n loss_epochs_train = np.zeros(self.nb_epochs[1])\n loss_epochs_val = np.zeros(self.nb_epochs[1])\n loss_min_val = float('Inf')\n self.CreateFolders(block)\n folder = os.path.join(self.path_save,'block_'+str(block))\n self.CreateLoader(block=block)\n # puts first blocks in evaluation mode: gradient is not computed\n self.model.GradFalse(block,self.mode)\n # defines the optimizer\n lr = self.lr_greedy\n optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,self.parameters()),lr=lr)\n #==========================================================================================================\n # trains for several epochs\n for epoch in range(0,self.nb_epochs[1]):\n print('This is epoch {}'.format(epoch))\n self.model.Layers[block].train() # training mode\n gc.collect()\n # goes through all minibatches\n for i,minibatch in enumerate(self.train_loader,0):\n [names, x_true, x_blurred, h, noise_std, estimatedimage_vec, diagSigma_vec, h_vec, Ch_vec, gamma_vec, lambda_vec] = minibatch # gets the minibatch\n h_vec = h_vec.type(self.dtype)\n Ch_vec = Ch_vec.type(self.dtype)\n if names =='.ipynb_checkpoints': continue\n print('The name is {} '.format(names)) \n x_true = Variable(x_true.type(self.dtype),requires_grad=False)\n x_blurred = Variable(x_blurred.type(self.dtype),requires_grad=False)\n h = Variable(h.type(self.dtype),requires_grad=False)\n batch = x_true.shape[0]\n sizex = x_true.shape[1]\n sizeh = h.shape[1]\n #print('The batch size is ',batch_size)\n SNR_init = 0\n SNR_temp = 0\n mk_vec,diagSigma_vec,newmh_vec,newSigmah_vec,Gammap_vec,LAMBDAk_vec = self.model(x_blurred,x_true,estimatedimage_vec,h,self.noise_std_range[0]**2,h_vec,Ch_vec,gamma_vec,lambda_vec,self.mode,block=block) \n hhat_vec=self.T_vec@newmh_vec+self.t\n hhat_vec = torch.reshape(hhat_vec,(batch,sizeh,sizeh))\n for j in range(batch):\n print('This is batch {}'.format(j))\n SNR_temp = ComputeSNR(x_true[j,:,:],mk_vec[j,:,:])\n SNR_init = ComputeSNR(x_true[j,:,:],x_blurred[j,:,:])\n print('The initial SNR is {}'.format(SNR_init))\n print('The current SNR is {}'.format(SNR_temp))\n loss_ = self.loss_fun_mh(hhat_vec[j,:,:],h[j,:,:])\n print('The RMSE is {}'.format(loss_))\n \n # Computes and prints loss\n loss = self.loss_fun_mh(hhat_vec,h)\n print('The loss over all batches are {}'.format(loss))\n loss_epochs_train[epoch] += torch.Tensor.item(loss)\n sys.stdout.write('\\r(%d, %3d) minibatch loss: %5.4f '%(epoch,i,torch.Tensor.item(loss)))\n \n # sets the gradients to zero, performs a backward pass, and updates the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # tests on validation set\n print('This is validation stage')\n self.model.eval() # evaluation mode\n #self.last_layer.eval() # evaluation mode\n loss_current_val = 0\n for minibatch in self.val_loader:\n [names, x_true, x_blurred, h, noise_std, estimatedimage_vec, diagSigma_vec, h_vec, Ch_vec, gamma_vec, lambda_vec] = minibatch # gets the minibatch\n h_vec = h_vec.type(self.dtype)\n Ch_vec = Ch_vec.type(self.dtype)\n if names =='.ipynb_checkpoints': continue\n print('The name is ',names) \n x_true = Variable(x_true.type(self.dtype),requires_grad=False)\n x_blurred = Variable(x_blurred.type(self.dtype),requires_grad=False)\n h = Variable(h.type(self.dtype),requires_grad=False)\n batch = x_true.shape[0]\n SNR_init = 0\n SNR_temp = 0\n mk_vec,diagSigma_vec,newmh_vec,newSigmah_vec,Gammap_vec,LAMBDAk_vec = self.model(x_blurred,x_true,estimatedimage_vec,h,self.noise_std_range[0]**2,h_vec,Ch_vec,gamma_vec,lambda_vec,self.mode,block=block) \n hhat_vec =self.T_vec@newmh_vec+self.t\n hhat_vec = torch.reshape(hhat_vec,(batch,sizeh,sizeh))\n for j in range(batch):\n print('This is batch {}'.format(j))\n SNR_temp = ComputeSNR(x_true[j,:,:],mk_vec[j,:,:])\n SNR_init = ComputeSNR(x_true[j,:,:],x_blurred[j,:,:])\n print('The initial SNR is {}'.format(SNR_init))\n print('The current SNR is {}'.format(SNR_temp))\n loss_ = self.loss_fun_mh(hhat_vec[j,:,:],h[j,:,:])\n print('The RMSE is {}'.format(loss_))\n loss = self.loss_fun_mh(hhat_vec,h)\n print('The loss over all batches are {}'.format(loss))\n loss_epochs_val[epoch] += torch.Tensor.item(loss)\n # computes loss on validation set\n loss_current_val += torch.Tensor.item(self.loss_fun_mh(hhat_vec, h))\n\n if loss_min_val>loss_current_val:\n torch.save(self.model.state_dict(),os.path.join(folder,'trained_model_MinLossOnVal1.pt'))\n loss_min_val = loss_current_val\n\n # save the results for each epoch\n folder_results_train = os.path.join(folder_save,'block'+str(block),'train')\n # create the path if it does not exist \n if not os.path.exists(folder_results_train):\n os.makedirs(folder_results_train)\n with open(folder_results_train+'/loss_epoch_train.txt', \"a\") as file_object:\n if epoch == 0:\n file_object.write('------------------A new test-------------------------------')\n file_object.write('The loss for epoch {} is {}'.format(epoch,loss_epochs_train[epoch]))\n file_object.write(\"\\n\")\n file_object.write('----------------------------------------------------------')\n file_object.write(\"\\n\")\n folder_results_val = os.path.join(folder_save,'block'+str(block),'val')\n # create the path if it does not exist \n if not os.path.exists(folder_results_val):\n os.makedirs(folder_results_val)\n with open(folder_results_val+'/loss_epoch_val.txt', \"a\") as file_object:\n file_object.write('The loss for epoch {} is {}'.format(epoch,loss_epochs_val[epoch]))\n file_object.write(\"\\n\")\n file_object.write('----------------------------------------------------------')\n file_object.write(\"\\n\")\n #==========================================================================================================\n # training is finished\n print('-----------------------------------------------------------------')\n print('Training of Block {} is done.'.format(block))\n self.save_OneBlock(block=block)\n print('-----------------------------------------------------------------')\n \n #calls the same function to start training of next block \n if block==self.nb_blocks-1:\n self.mode = 'lpp'\n self.train()\n else: \n self.train(block=block+1)\n\n#===========================================================================================================\n \n elif self.mode=='lpp':\n # trains the post-processing layer for RGB images\n print('start the post-processing layer for RGB images')\n # to store results\n loss_epochs_train = np.zeros(self.nb_epochs[2])\n loss_epochs_val = np.zeros(self.nb_epochs[2])\n loss_min_val = float('Inf')\n self.CreateFolders(self.nb_blocks)\n folder = os.path.join(self.path_save,'lpp')\n self.CreateLoader(block=self.nb_blocks)\n # puts first blocks in evaluation mode: gradient is not computed\n self.model.GradFalse(self.nb_blocks,self.mode) \n # defines the optimizer\n lr = self.lr_lpp\n optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,self.last_layer.parameters()),lr=lr)\n \n #==============================================================================================\n # trains for several epochs\n for epoch in range(0,self.nb_epochs[2]):\n self.model.eval() \n self.last_layer.train() #training mode\n gc.collect()\n # goes through all minibatches\n for i,minibatch in enumerate(self.train_loader,0):\n [names, x_true, x_blurred, h, noise_std, estimatedimage_vec, diagSigma_vec, h_vec, Ch_vec, gamma_vec, lambda_vec] = minibatch # gets the minibatch\n batch = x_true.shape[0]\n sizex = x_true.shape[1]\n sizeh = h.shape[1]\n print(x_true.shape)\n estimatedimage_vec_ = estimatedimage_vec.reshape(batch,1,sizex,sizex)\n x_true_ = torch.zeros((batch,3,sizex,sizex)).type(self.dtype)\n for channel in range(3):\n x_true_[:,channel,:,:] = torch.tensor(x_true[:,:,:,channel])\n x_blurred_ = x_blurred.reshape(batch,3,sizex,sizex)\n h_vec = h_vec.type(self.dtype)\n Ch_vec = Ch_vec.type(self.dtype)\n if names =='.ipynb_checkpoints': continue\n print('The name is {} '.format(names)) \n x_true = Variable(x_true.type(self.dtype),requires_grad=False)\n x_blurred = Variable(x_blurred.type(self.dtype),requires_grad=False)\n h = Variable(h.type(self.dtype),requires_grad=False)\n # add the post-processing on RGB images (3 channels)\n U_new,V_new = RGBtoYUV(x_blurred,3,self.dtype)\n RGB_new = YUVtoRGB(estimatedimage_vec_,U_new,V_new,self.dtype)\n mk_vec = self.sigmoid(RGB_new + self.last_layer(RGB_new)) \n SNR_temp = np.zeros((3))\n SNR_init = np.zeros((3))\n for j in range(batch):\n print('This is batch {}'.format(j))\n for c in range(3): # 3 channels\n SNR_temp[c] = ComputeSNR(x_true[j,:,:,c],mk_vec[j,c,:,:])\n SNR_init[c] = ComputeSNR(x_true[j,:,:,c],x_blurred[j,:,:,c])\n print('The initial SNR is {}'.format(np.mean(SNR_init)))\n print('The current SNR is {}'.format(np.mean(SNR_temp)))\n # Computes and prints loss\n loss = self.loss_fun_mk(mk_vec, x_true_)\n print('The SSIM over all batches are {}'.format(-loss))\n loss_epochs_train[epoch] += torch.Tensor.item(loss)\n sys.stdout.write('\\r(%d, %3d) minibatch loss: %5.4f '%(epoch,i,torch.Tensor.item(-loss)))\n\n # sets the gradients to zero, performs a backward pass, and updates the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n # tests on validation set\n self.model.eval() # evaluation mode\n self.last_layer.eval() # evaluation mode\n \n loss_current_val = 0\n for minibatch in self.val_loader:\n [names, x_true, x_blurred, h, noise_std, estimatedimage_vec, diagSigma_vec, h_vec, Ch_vec, gamma_vec, lambda_vec] = minibatch # gets the minibatch\n batch = x_true.shape[0]\n sizex = x_true.shape[1]\n sizeh = h.shape[1]\n estimatedimage_vec_ = estimatedimage_vec.reshape(batch,1,sizex,sizex)\n x_true_ = torch.zeros((batch,3,sizex,sizex)).type(self.dtype)\n for channel in range(3):\n x_true_[:,channel,:,:] = torch.tensor(x_true[:,:,:,channel])\n x_blurred_ = x_blurred.reshape(batch,3,sizex,sizex)\n h_vec = h_vec.type(self.dtype)\n Ch_vec = Ch_vec.type(self.dtype)\n if names =='.ipynb_checkpoints': continue\n print('The name is {} '.format(names)) \n x_true = Variable(x_true.type(self.dtype),requires_grad=False)\n x_blurred = Variable(x_blurred.type(self.dtype),requires_grad=False)\n h = Variable(h.type(self.dtype),requires_grad=False)\n # add the post-processing on RGB images (3 channels)\n U_new,V_new = RGBtoYUV(x_blurred,3,self.dtype)\n RGB_new = YUVtoRGB(estimatedimage_vec_,U_new,V_new,self.dtype)\n mk_vec = self.sigmoid(RGB_new + self.last_layer(RGB_new))\n SNR_temp = np.zeros((3))\n SNR_init = np.zeros((3))\n for j in range(batch):\n print('This is batch {}'.format(j))\n for c in range(3): # 3 channels\n SNR_temp[c] = ComputeSNR(x_true[j,:,:,c],mk_vec[j,c,:,:])\n SNR_init[c] = ComputeSNR(x_true[j,:,:,c],x_blurred[j,:,:,c])\n print('The initial SNR is {}'.format(np.mean(SNR_init)))\n print('The current SNR is {}'.format(np.mean(SNR_temp)))\n # Computes and prints loss\n loss = self.loss_fun_mk(mk_vec, x_true_)\n print('The SSIM over all batches are {}'.format(-loss))\n loss_epochs_val[epoch] += torch.Tensor.item(loss)\n loss_current_val += torch.Tensor.item(loss)\n \n\n if loss_min_val>loss_current_val:\n torch.save(self.last_layer.state_dict(),os.path.join(folder,'trained_post-processing_MinLossOnVal.pt'))\n loss_min_val = loss_current_val\n \n # save the results for each epoch\n folder_results_train = os.path.join(folder_save,'lpp','train')\n # create the path if it does not exist \n if not os.path.exists(folder_results_train):\n os.makedirs(folder_results_train) \n with open(folder_results_train+'/SSIM_epoch_train_new.txt', \"a\") as file_object:\n if epoch == 0:\n file_object.write('------------------A new test-------------------------------')\n file_object.write('The loss for epoch {} is {}'.format(epoch,-loss_epochs_train[epoch]))\n file_object.write(\"\\n\")\n file_object.write('----------------------------------------------------------')\n file_object.write(\"\\n\")\n folder_results_val = os.path.join(folder_save,'lpp','val')\n # create the path if it does not exist \n if not os.path.exists(folder_results_val):\n os.makedirs(folder_results_val)\n with open(folder_results_val+'SSIM_epoch_val_new.txt', \"a\") as file_object:\n file_object.write('The loss for epoch {} is {}'.format(epoch,-loss_epochs_val[epoch]))\n file_object.write(\"\\n\")\n file_object.write('----------------------------------------------------------')\n file_object.write(\"\\n\")\n \n \n \n # training of greedy approach is finished\n print('-----------------------------------------------------------------')\n print('Training of lpp is done.')\n print('-----------------------------------------------------------------')\n return \n \n############################################################################################################## \n\n elif self.mode=='all_layers':\n # start the N-N training\n # trains several blocks as one\n print('=================== Block number {} to Block number {} ==================='.format(0,self.nb_blocks-1))\n # to store results\n loss_epochs_train = np.zeros(self.nb_epochs[1])\n loss_epochs_val = np.zeros(self.nb_epochs[1])\n loss_min_val = float('Inf')\n self.CreateFolders(self.nb_blocks-1)\n folder = os.path.join(self.path_save,'block_'+str(0)+'_'+str(self.nb_blocks-1))\n self.CreateLoader(0)\n # puts first blocks in evaluation mode: gradient is not computed\n self.model.GradFalse(self.nb_blocks,self.mode)\n # defines the optimizer\n lr = self.lr_N_N #learnig rate\n optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,self.parameters()),lr=lr,weight_decay=1e-4)\n\n# ==========================================================================================================\n #for the first layer\n # trains for several epochs\n for epoch in range(0,self.nb_epochs[1]): \n print('This is epoch {} '.format(epoch))\n # sets training mode\n for k in range(0,self.nb_blocks):\n self.model.Layers[k].train() #training mode\n gc.collect()\n # goes through all minibatches\n print('This is traning stage')\n for i,minibatch in enumerate(self.train_loader,0):\n [names, x_true_RGB, x_true, x_blurred_RGB, x_blurred, h] = minibatch # get the minibatch\n if names =='.ipynb_checkpoints': continue\n print('The name is {} '.format(names)) \n x_true_RGB = Variable(x_true_RGB.type(self.dtype),requires_grad=False)\n x_true = Variable(x_true.type(self.dtype),requires_grad=False)\n x_blurred_RGB = Variable(x_blurred_RGB.type(self.dtype),requires_grad=False)\n x_blurred = Variable(x_blurred.type(self.dtype),requires_grad=False)\n h = Variable(h.type(self.dtype),requires_grad=False)\n batch = x_true.shape[0]\n sizex = x_true.shape[1]\n sizeh = h.shape[1]\n x_true_ = torch.zeros((batch,3,sizex,sizex)).type(self.dtype)\n for channel in range(3):\n x_true_[:,channel,:,:] = torch.tensor(x_true_RGB[:,:,:,channel])\n SNR_init = 0\n SNR_temp = 0\n init = Initialization(batch,sizex,sizeh,self.dtype)\n T_vec,t,h_vec,Ch_vec,gamma_vec,lambda_vec = init.f(x_blurred,h)\n mk_vec,diagSigma_vec,newmh_vec,newSigmah_vec,Gammap_vec,LAMBDAk_vec = self.model(x_blurred,x_true,x_blurred,h,self.noise_std_range[0]**2,h_vec,Ch_vec,gamma_vec,lambda_vec,self.mode) \n estimatedimage_vec_ = mk_vec.reshape(batch,1,sizex,sizex)\n hhat_vec=T_vec@newmh_vec+t\n hhat_vec = torch.reshape(hhat_vec,(batch,sizeh,sizeh))#the restored kernel\n # lpp \n U_new,V_new = RGBtoYUV(x_blurred_RGB,3,self.dtype)\n RGB_new = YUVtoRGB(estimatedimage_vec_,U_new,V_new,self.dtype)\n mk_vec_RGB = self.sigmoid(RGB_new + self.last_layer(RGB_new)) \n SNR_temp = np.zeros((3))\n SNR_init = np.zeros((3))\n for j in range(batch):\n print('This is batch {}'.format(j))\n for c in range(3): # 3 channels\n SNR_temp[c] = ComputeSNR(x_true_RGB[j,:,:,c],mk_vec_RGB[j,c,:,:])\n SNR_init[c] = ComputeSNR(x_true_RGB[j,:,:,c],x_blurred_RGB[j,:,:,c])\n print('The initial SNR is {}'.format(np.mean(SNR_init)))\n print('The current SNR is {}'.format(np.mean(SNR_temp)))\n # Computes and prints loss\n loss = self.loss_fun_mk(mk_vec_RGB, x_true_)\n print('The SSIM over all batches are {}'.format(-loss))\n loss_epochs_train[epoch] += torch.Tensor.item(loss)\n sys.stdout.write('\\r(%d, %3d) minibatch loss: %5.4f '%(epoch,i,torch.Tensor.item(-loss)))\n \n # sets the gradients to zero, performs a backward pass, and updates the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step() #performs a parameter update\n \n # tests on validation set\n print('This is validation stage')\n self.model.eval() # evaluation mode\n self.last_layer.eval() # evaluation mode\n loss_current_val = 0\n for minibatch in self.val_loader:\n [names, x_true_RGB, x_true, x_blurred_RGB, x_blurred, h] = minibatch # gets the minibatch\n if names =='.ipynb_checkpoints': continue\n print('The name is {} '.format(names)) \n x_true_RGB = Variable(x_true_RGB.type(self.dtype),requires_grad=False)\n x_true = Variable(x_true.type(self.dtype),requires_grad=False)\n x_blurred_RGB = Variable(x_blurred_RGB.type(self.dtype),requires_grad=False)\n x_blurred = Variable(x_blurred.type(self.dtype),requires_grad=False)\n h = Variable(h.type(self.dtype),requires_grad=False)\n batch = x_true.shape[0]\n sizex = x_true.shape[1]\n sizeh = h.shape[1]\n x_true_ = torch.zeros((batch,3,sizex,sizex)).type(self.dtype)\n for channel in range(3):\n x_true_[:,channel,:,:] = torch.tensor(x_true_RGB[:,:,:,channel])\n SNR_init = 0\n SNR_temp = 0\n init = Initialization(batch,sizex,sizeh,self.dtype)\n T_vec,t,h_vec,Ch_vec,gamma_vec,lambda_vec = init.f(x_blurred,h)\n mk_vec,diagSigma_vec,newmh_vec,newSigmah_vec,Gammap_vec,LAMBDAk_vec = self.model(x_blurred,x_true,x_blurred,h,self.noise_std_range[0]**2,h_vec,Ch_vec,gamma_vec,lambda_vec,self.mode) \n estimatedimage_vec_ = mk_vec.reshape(batch,1,sizex,sizex)\n hhat_vec=T_vec@newmh_vec+t\n hhat_vec = torch.reshape(hhat_vec,(batch,sizeh,sizeh))\n # lpp \n U_new,V_new = RGBtoYUV(x_blurred_RGB,3,self.dtype)\n RGB_new = YUVtoRGB(estimatedimage_vec_,U_new,V_new,self.dtype)\n mk_vec_RGB = self.sigmoid(RGB_new + self.last_layer(RGB_new)) \n SNR_temp = np.zeros((3))\n SNR_init = np.zeros((3))\n for j in range(batch):\n print('This is batch {}'.format(j))\n for c in range(3): # 3 channels\n SNR_temp[c] = ComputeSNR(x_true_RGB[j,:,:,c],mk_vec_RGB[j,c,:,:])\n SNR_init[c] = ComputeSNR(x_true_RGB[j,:,:,c],x_blurred_RGB[j,:,:,c])\n print('The initial SNR is {}'.format(np.mean(SNR_init)))\n print('The current SNR is {}'.format(np.mean(SNR_temp)))\n # Computes and prints loss\n loss = self.loss_fun_mk(mk_vec_RGB, x_true_)\n print('The SSIM over all batches are {}'.format(-loss))\n loss_current_val += torch.Tensor.item(loss)\n loss_epochs_val[epoch] += torch.Tensor.item(loss)\n\n if loss_min_val>loss_current_val:\n torch.save(self.last_layer.state_dict(),os.path.join(folder,'trained_post-processing_MinLossOnVal.pt'))\n torch.save(self.model.state_dict(),os.path.join(folder,'trained_model_MinLossOnVal.pt'))\n loss_min_val = loss_current_val\n\n # save the results for each epoch\n folder_results_train = os.path.join(folder_save,'block0_'+str(self.nb_blocks-1),'train')\n # create the path if it does not exist \n if not os.path.exists(folder_results_train):\n os.makedirs(folder_results_train)\n with open(folder_results_train+'/loss_epoch_train.txt', \"a\") as file_object:\n if epoch == 0:\n file_object.write('------------------A new test-------------------------------')\n file_object.write('The loss for epoch {} is {}'.format(epoch,loss_epochs_train[epoch]))\n file_object.write(\"\\n\")\n file_object.write('----------------------------------------------------------')\n file_object.write(\"\\n\")\n folder_results_val = os.path.join(folder_save,'block0_'+str(self.nb_blocks-1),'val')\n # create the path if it does not exist \n if not os.path.exists(folder_results_val):\n os.makedirs(folder_results_val)\n with open(folder_results_val+'/loss_epoch_val.txt', \"a\") as file_object:\n file_object.write('The loss for epoch {} is {}'.format(epoch,loss_epochs_val[epoch]))\n file_object.write(\"\\n\")\n file_object.write('----------------------------------------------------------')\n file_object.write(\"\\n\")\n \n #==========================================================================================================\n # training is finished\n print('-----------------------------------------------------------------')\n print('Training of Block {} to Block {} + lpp is done.'.format(0,self.nb_blocks-1))\n print('-----------------------------------------------------------------')" ]
[ "0.7444253", "0.72861606", "0.7262928", "0.7260432", "0.725449", "0.7204505", "0.7139221", "0.7114966", "0.7096684", "0.7072422", "0.7066302", "0.70376366", "0.7002768", "0.699714", "0.69897795", "0.6973925", "0.6948941", "0.6938138", "0.693342", "0.6905513", "0.6872959", "0.6872879", "0.6863361", "0.68596274", "0.684404", "0.6839605", "0.6837036", "0.68302083", "0.6827399", "0.6826844" ]
0.7422293
1
Get the details for an individual vocab
def retrieve(self, request, pk=None): queryset = Vocab.objects.all() vocab = get_object_or_404(queryset, pk=pk) serializer = VocabDetailSerializer(vocab, context={"request": request}) return Response(serializer.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_vocab(which_vocab):\n path = os.path.join(mg.WORKING_PATH, 'vocab', ''.join([which_vocab, '.json'\n ]))\n if os.path.exists(path):\n with open(path, 'r') as js:\n return(json.load(js))\n else:\n return(dict())", "def load_vocab():\n # vocab loaded internally at google\n unused = r.sp_model\n del unused\n return r", "def vocabularies(self, lang):\n\n payload = {'lang': lang}\n req = requests.get(self.api_base + 'vocabularies', params=payload)\n req.raise_for_status()\n return req.json()['vocabularies']", "def get_vocab(self):\n\n\t\tself.parse_transcript() \n\t\tself.purge_words()\n\t\tself.analyze_words()\n\t\tself.sort_word_analysis()", "def vocab(self) -> Vocabulary:\n return self._model.vocab", "def vocabfn(self):\n return Vocab(self.COMMON_ATOMS)", "def retrieve(self, request, vocab, pk=None, format=None):\n pk = \"{}/{}\".format(vocab, pk)\n queryset = Collection.objects.all()\n collection = get_object_or_404(queryset, pk=pk)\n serializer = CollectionsSerializer(collection, context={\"request\": request})\n return Response(serializer.data)", "def vocab(self):\n num_words = -1\n if not self._vocab:\n c = self._conn.cursor()\n c.execute('select feature, censored, word_id from vocab')\n\n d = {}\n for ww, cc, ii in c:\n d[ii] = ww\n d[ww] = ii\n if cc == 1:\n self._censored.add(ww)\n num_words = max(ii, num_words)\n\n logger.info(\"Loaded vocab with %i words; %i censored\" % \\\n (len(d) / 2, len(self._censored)))\n\n # Add the start symbol\n if not START_SYMBOL in d:\n d[START_SYMBOL] = num_words + 1\n d[num_words + 1] = START_SYMBOL\n\n logger.info(\"Retrieved %i words\" % num_words)\n self._vocab = d\n\n return self._vocab", "def retrieve(self, request, vocab, collection, pk=None, format=None):\n pk = \"{}/{}/{}\".format(vocab, collection, pk)\n queryset = Concept.objects.all()\n concept = get_object_or_404(queryset, pk=pk)\n serializer = ConceptSerializer(concept, context={\"request\": request})\n return Response(serializer.data)", "def vocab(self: TokenMatcher) -> Vocab:\n return self._searcher.vocab", "def get_vocab():\n if data_dir is not None and vocab_filename is not None:\n vocab_filepath = os.path.join(data_dir, vocab_filename)\n if tf.gfile.Exists(vocab_filepath):\n tf.logging.info(\"Found vocab file: %s\", vocab_filepath)\n vocab_symbolizer = text_encoder.SubwordTextEncoder(vocab_filepath)\n return vocab_symbolizer\n else:\n raise ValueError(\"Vocab file does not exist: %s\" % vocab_filepath)\n return None", "def vocab():\n symbols = DEFAULT_SPECIAL_SYMBOLS + [\"mouse\", \"dog\", \"tree\"]\n return Vocabulary(symbols)", "def get_vocab(self, filename):\n return read_file(filename) #TODO(tilo): the-FAQ!", "def get_vocabs(datasets):\n print(\"Building vocab...\")\n vocab_intents = set()\n vocab_words = set()\n vocab_tags = set()\n for dataset in datasets:\n for intent, words, tags in dataset:\n vocab_intents.add(intent)\n vocab_words.update(words)\n vocab_tags.update(tags)\n print(\"- done. {} tokens\".format(len(vocab_words)))\n return vocab_intents, vocab_words, vocab_tags", "def load_vocab(vocab):\r\n\tvocab = [line.split()[0] for line in open(\r\n\t\t'{}{}'.format(pm.vocab_path, vocab), 'r', encoding='utf-8').read().splitlines()\r\n\t\t\t if int(line.split()[1]) >= pm.word_limit_size]\r\n\tword2idx_dic = {word: idx for idx, word in enumerate(vocab)}\r\n\tidx2word_dic = {idx: word for idx, word in enumerate(vocab)}\r\n\treturn word2idx_dic, idx2word_dic", "def getVocabulary(vocabulary_id):\n relex_web = getSite().restrictedTraverse('relex_web')\n key = KEY_STORAGE + \".\" + vocabulary_id\n vocabulary = json.loads(getattr(relex_web, key, \"[]\"))\n return vocabulary", "def list(self, request, vocab, format=None):\n # What we really want is the vocab, which contains a list of\n # collections\n return redirect(\"/vocabs/\" + vocab)", "def load_vocab(fn):\n return corpora.Dictionary.load(fn)", "def get_vocabulary(self, vocid, lang=None):\n\n payload = {}\n if lang is not None:\n payload['lang'] = lang\n req = requests.get(self.api_base + vocid + '/', params=payload)\n if req.status_code == 404:\n raise ValueError(req.text)\n req.raise_for_status()\n return req.json()", "def GetVocab(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def load_vocab(self):\n keys = []\n values = []\n with open(self.embed_file, 'r') as f:\n lines = f.readlines()\n\n for line in lines:\n key = line.split(\" \")[0]\n value = line.split(\" \")[1:]\n keys.append(key)\n values.append(value)\n # form <dict>\n # vocab = dict(zip(keys, values))\n return keys, values", "def getVocabulary(self): # real signature unknown; restored from __doc__\n pass", "def load_vocab(path: str) -> Vocab:\n return torch.load(path, map_location=lambda storage, loc: storage)['args'].vocab", "def _get_vocabulary(connection):\n print('---Getting vocabulary---')\n vocabulary = {}\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM words;\")\n res = cursor.fetchall()\n num_words = 0\n for word in res:\n vocabulary[word[0]] = num_words\n num_words += 1\n return vocabulary", "def get_vocab(self):\n if os.path.exists(self.vocab_file) & self.vocab_from_file:\n f = open(self.vocab_file, \"rb\")\n vocab = pickle.load(f)\n self.word2idx = vocab.word2idx\n self.idx2word = vocab.idx2word\n f.close()\n else:\n self.build_vocab()\n with open(self.vocab_file, 'wb') as f:\n pickle.dump(self, f)", "def getVocabList():\n vocabList = pd.read_csv(os.path.join(folder, 'vocab.txt'),\n delimiter='\\t',\n names=['index', 'vocab'],\n index_col='index')\n return vocabList", "def vocabulary(self):\n return [recid for recid in self._model.vocab]", "def load_vocab(vocab_file):\n vocab = collections.OrderedDict()\n index = 0\n path = keras.utils.get_file(\"bert_vocab.txt\", vocab_file)\n with tf.io.gfile.GFile(path, \"r\") as reader:\n while True:\n token = convert_to_unicode(reader.readline())\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab", "def vocabulary(self):\n return self._vocabulary", "def trainingsVocabulary(context):\n ct = getToolByName(context,'portal_catalog')\n dictSearch = {'portal_type':'apyb.papers.training',\n 'sort_on':'sortable_title',\n 'review_state':'confirmed'}\n trainings = ct.searchResults(**dictSearch)\n trainings = [SimpleTerm(b.UID,b.UID,b.Title) for b in trainings]\n return SimpleVocabulary(trainings)" ]
[ "0.70155954", "0.6758839", "0.66370237", "0.658239", "0.641147", "0.6386239", "0.63847965", "0.6307677", "0.6295874", "0.6261076", "0.6252203", "0.6244632", "0.6216861", "0.6213393", "0.61783177", "0.6141274", "0.61408085", "0.61311096", "0.6130657", "0.6122938", "0.6116046", "0.60985565", "0.60604995", "0.60405827", "0.60272306", "0.6019794", "0.59652376", "0.5954675", "0.5949476", "0.59374124" ]
0.69035906
1
Return current position of cover. 0 is closed, 100 is open.
def current_cover_position(self): state = self.channel_data.get("state") if state: return 100 - state["shut"] return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_cover_position(self):\n return self._pos", "def current_cover_position(self):\n return 50", "def current_cover_position(self) -> int:\n return int(self._value)", "def current_cover_position(self) -> int | None:\n position = None\n if self.data.levelpercentage is not None:\n position = 100 - self.data.levelpercentage\n return position", "def current_cover_position(self) -> int | None:\n return self._position", "def current_cover_position(self) -> int | None:\n return self._current_position", "def current_cover_position(self) -> int:\n return self._device[\"current_state\"]", "def current_cover_position(self):\n return self._device.level * 100.0", "def current_cover_position(self):\n return hd_position_to_hass(self._current_cover_position)", "def _async_update_current_cover_position(self):\n _LOGGER.debug(\"Raw data update: %s\", self._shade.raw_data)\n position_data = self._shade.raw_data.get(ATTR_POSITION_DATA, {})\n if ATTR_POSITION1 in position_data:\n self._current_cover_position = int(position_data[ATTR_POSITION1])\n self._is_opening = False\n self._is_closing = False", "def get_pos(self):\r\n return self.pos", "def _get_pos(self):\n return self._pos", "def getPos(self):\n return self.__current_pos", "def get_pos(self):\n return self.pos", "def tell(self):\n _complain_ifclosed(self._closed)\n return self._position", "def pos(self):\n return self._pos", "def pos(self):\n return self._pos", "def current_cover_tilt_position(self):\n return 50", "def pos(self):\n return (self.raw - self.raw_zero) / self.ratio", "def get_pos(self):\n\n return self._pos", "def pos(self):\n return self._position", "def test_cover_set_position(self):\n with patch.dict(TYPES, {'WindowCovering': self.mock_type}):\n state = State('cover.set_position', 'open',\n {ATTR_SUPPORTED_FEATURES: 4})\n get_accessory(None, state, 2, {})", "def position(self):\n return self._position", "def get_pos(self):\n return self.rect.midtop", "def test_set_position_optimistic(self):\n with assert_setup_component(1, 'cover'):\n assert setup.setup_component(self.hass, 'cover', {\n 'cover': {\n 'platform': 'template',\n 'covers': {\n 'test_template_cover': {\n 'set_cover_position': {\n 'service': 'test.automation',\n },\n }\n }\n }\n })\n self.hass.start()\n self.hass.block_till_done()\n\n state = self.hass.states.get('cover.test_template_cover')\n assert state.attributes.get('current_position') is None\n\n self.hass.services.call(\n DOMAIN, SERVICE_SET_COVER_POSITION,\n {ATTR_ENTITY_ID: ENTITY_COVER, ATTR_POSITION: 42}, blocking=True)\n self.hass.block_till_done()\n state = self.hass.states.get('cover.test_template_cover')\n assert state.attributes.get('current_position') == 42.0\n\n self.hass.services.call(\n DOMAIN, SERVICE_CLOSE_COVER,\n {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True)\n self.hass.block_till_done()\n state = self.hass.states.get('cover.test_template_cover')\n assert state.state == STATE_CLOSED\n\n self.hass.services.call(\n DOMAIN, SERVICE_OPEN_COVER,\n {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True)\n self.hass.block_till_done()\n state = self.hass.states.get('cover.test_template_cover')\n assert state.state == STATE_OPEN", "def position(self):\n if self.p:\n if self._finished:\n return None\n return self.p.get_position()*10", "def get_position(self):\n return self._pos", "def get_position(self):\n return self._pos", "def _cur_close(self):\n open = self._data['open'][self._offset]\n rel_close = self._data['close'][self._offset]\n return open * (1.0 + rel_close)", "def position(self):\n return self.__position" ]
[ "0.84004843", "0.8276662", "0.82298696", "0.8225084", "0.821339", "0.8193297", "0.8009616", "0.7996925", "0.7842341", "0.68242145", "0.6570729", "0.6545508", "0.6545042", "0.65387315", "0.635869", "0.634725", "0.634725", "0.63252664", "0.6286137", "0.6246093", "0.6218258", "0.62155235", "0.6211619", "0.6211564", "0.6175016", "0.61406374", "0.61207986", "0.61207986", "0.6120536", "0.61153805" ]
0.8492424
0
Return if the cover is closed.
def is_closed(self): if self.current_cover_position is None: return None return self.current_cover_position == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_closed(self):\n return self.current_cover_position <= 0", "def is_closed(self) -> bool | None:\n if self.current_cover_position is None:\n return None\n return self.current_cover_position == 0", "def is_closed(self):\n # if self.current_cover_position is not None:\n # return self.current_cover_position == 0\n return None", "def is_closed(self):\n if self.current_cover_position == None:\n return None\n elif self.current_cover_position == 0.0:\n return True\n return False", "def is_closed(self):\n return self._current_cover_position == MIN_POSITION", "def _is_closed(self) -> bool:\n return self._status == Status.CLOSED", "def isClosed(self) -> bool:\r\n\r\n return self.__is_closed", "def is_closed(self) -> bool:", "def isClosed(self):\n pass", "def is_close(self) -> bool:\n return not self.open", "def is_closed(self) -> bool:\n return self._closed or super().is_closed", "def is_closed(self):\n return self.state == CLOSED or self.state == CLOSING", "def is_closed(self) -> bool:\n return self._closed", "def is_closed(self) -> bool:\n return self._closed", "def is_closed(self) -> bool:\n raise NotImplementedError() # pragma: nocover", "def is_closed(self):\r\n return self.position == SinglePhaseBreaker.CLOSED", "def closed(self):\n return self.state == \"CLOSED\"", "def is_closed(self) -> bool | None:\n if self._is_open is None:\n return None\n return not self._is_open", "def closed(self):\n return self._close_state.is_set()", "def closed(self):\n\n return not self.open", "def _isclose(self):\n return self.dp.state()==PyTango.DevState.CLOSE", "def is_closed(self): # -> bool | Any:\n ...", "def is_closed(self) -> bool:\n lopen = self.left_boundary['open']\n ropen = self.right_boundary['open']\n return not lopen and not ropen", "def is_closed(self) -> bool:\n return self._device[\"current_state\"] < 1", "def isopen(self):\n return _image.image_isopen(self)", "def is_closed(self):\n raise NotImplementedError", "def is_closed(self) -> bool | None:\n if self.data.levelpercentage is None:\n return None\n return self.data.levelpercentage == 100 # type: ignore [no-any-return]", "def closed(self) -> bool:\n return self._closed", "def closed(self) -> bool:\n return self._closed", "def closed(self):\n return self.__closeEvent.is_set()" ]
[ "0.8593738", "0.85137665", "0.84728414", "0.8464409", "0.82930636", "0.7766478", "0.7753021", "0.77493626", "0.77096367", "0.7578489", "0.75766754", "0.7570424", "0.75386494", "0.75386494", "0.7503816", "0.7495912", "0.7482948", "0.74642336", "0.74502677", "0.74016404", "0.72888845", "0.7275678", "0.72320825", "0.72258234", "0.720577", "0.7191263", "0.7186632", "0.71784323", "0.71784323", "0.7161736" ]
0.85620844
1
Model with bottleneck, split, followed by second bottleneck and exp recovery in Eastern pop nu, or ancestral population size defaults to 1. B1= Time of the ancestral population bottleneck. P1= The ancestral population size after bottleneck.
def bottle_split_bottleExpansion((B1,P1,nuW,nuEF,nuEB,TE), (n1,n2), pts): #Define grid to use xx = yy = dadi.Numerics.default_grid(pts) #phi for equilibrium ancestral population phi = dadi.PhiManip.phi_1D(xx) # Now do the population bottleneck event. phi = dadi.Integration.one_pop(phi, xx, B1, P1) # grow the ancient population #The ancestral population splits into the West and East, and the East undergoes a second bottleneck followed by an exponential population size change. phi = dadi.PhiManip.phi_1D_to_2D(xx, phi) #Function for the Eastern population describing an second bottleneck followed by exponential population growth until present nuE_func = lambda t: nuEB*numpy.exp(numpy.log(nuEF/nuEB) * t/TE) # function for growth in west nuW_func = lambda t: numpy.exp(numpy.log(nuW) * t/TE) # integrate the two populations phi = dadi.Integration.two_pops(phi,xx,TE, nu1=nuW_func, nu2=nuE_func) #Return frequency spectrum fs = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,yy)) return fs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def config1() :\n data_name = \"titanic\" ### in data/input/\n model_class = 'AutoML' ### ACTUAL Class name for model_sklearn.py\n n_sample = 1000\n\n def post_process_fun(y): ### After prediction is done\n return int(y)\n\n def pre_process_fun(y): ### Before the prediction is done\n return int(y)\n\n\n model_dict = {'model_pars': {\n ### LightGBM API model #######################################\n 'model_class': model_class\n ,'model_pars' : {\n 'total_time_limit' : 20,\n 'algorithms' : 'auto',\n 'results_path' : root_repo + f'/data/output/{data_name}/{os_get_function_name()}/automl_1',\n 'eval_metric' : 'auto'\n\n # mode='Explain',\n # ml_task='auto', model_time_limit=None, algorithms='auto', train_ensemble=True,\n # stack_models='auto', eval_metric='auto', validation_strategy='auto', explain_level='auto',\n # golden_features='auto', features_selection='auto', start_random_models='auto',\n # hill_climbing_steps='auto', top_models_to_improve='auto', verbose=1, random_state=1234)\n }\n\n , 'post_process_fun' : post_process_fun ### After prediction ##########################################\n , 'pre_process_pars' : {'y_norm_fun' : pre_process_fun , ### Before training ##########################\n\n\n ### Pipeline for data processing ##############################\n 'pipe_list': [\n #### coly target prorcessing\n {'uri': 'source/prepro.py::pd_coly', 'pars': {}, 'cols_family': 'coly', 'cols_out': 'coly', 'type': 'coly' },\n\n\n {'uri': 'source/prepro.py::pd_colnum_bin', 'pars': {}, 'cols_family': 'colnum', 'cols_out': 'colnum_bin', 'type': '' },\n {'uri': 'source/prepro.py::pd_colnum_binto_onehot', 'pars': {}, 'cols_family': 'colnum_bin', 'cols_out': 'colnum_onehot', 'type': '' },\n\n #### catcol INTO integer, colcat into OneHot\n {'uri': 'source/prepro.py::pd_colcat_bin', 'pars': {}, 'cols_family': 'colcat', 'cols_out': 'colcat_bin', 'type': '' },\n # {'uri': 'source/prepro.py::pd_colcat_to_onehot', 'pars': {}, 'cols_family': 'colcat_bin', 'cols_out': 'colcat_onehot', 'type': '' },\n\n\n ### Cross_feat = feat1 X feat2\n # {'uri': 'source/prepro.py::pd_colcross', 'pars': {}, 'cols_family': 'colcross', 'cols_out': 'colcross_pair', 'type': 'cross'},\n\n\n #### Example of Custom processor\n #{'uri': THIS_FILEPATH + '::pd_col_myfun', 'pars': {}, 'cols_family': 'colnum', 'cols_out': 'col_myfun', 'type': '' }, \n\n\n ],\n }\n },\n\n 'compute_pars': { 'metric_list': ['accuracy_score','average_precision_score']\n\n ,'mlflow_pars' : None # {} ### Not empty --> use mlflow\n },\n\n 'data_pars': { 'n_sample' : n_sample,\n\n 'download_pars' : None,\n\n\n 'cols_input_type' : cols_input_type_1,\n ### family of columns for MODEL #########################################################\n # \"colnum\", \"colnum_bin\", \"colnum_onehot\", \"colnum_binmap\", #### Colnum columns\n # \"colcat\", \"colcat_bin\", \"colcat_onehot\", \"colcat_bin_map\", #### colcat columns\n # 'colcross_single_onehot_select', \"colcross_pair_onehot\", 'colcross_pair', #### colcross columns 'coldate', 'coltext',\n 'cols_model_group': [ 'colnum_bin',\n 'colcat_bin',\n # 'coltext',\n # 'coldate',\n #'colcross_pair',\n \n ### example of custom\n # 'col_myfun'\n ]\n\n ### Filter data rows ##################################################################\n ,'filter_pars': { 'ymax' : 2 ,'ymin' : -1 }\n\n }\n }\n\n ##### Filling Global parameters ############################################################\n model_dict = global_pars_update(model_dict, data_name, config_name=os_get_function_name() )\n return model_dict", "def Bayes5DStats(numIters, numRuns):\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n\n assert comm.Get_size() == numRuns, \"Please ensure there is one process running per run i.e \" + str(numRuns) + \" processes.\"\n \n problemBounds = {\"Bfield\": choco.uniform(10, 1300), \"T\": choco.uniform(50, 230), \"Btheta\": choco.uniform(0, 90), \"Etheta\": choco.uniform(0, 90), \"Bphi\": choco.uniform(0, 90)}\n\n # Set up the database for the chocolate optimiser.\n connection = choco.SQLiteConnection(\"sqlite:///bayes_5D_\" + str(rank) + \"_db.db\")\n\n if rank == 0:\n timeList = []\n bestFoMList = []\n\n # Define which solver will be used.\n solver = choco.Bayes(connection, problemBounds, utility_function = \"ei\", n_bootstrap = int(np.ceil(numIters/10)), clear_db = True)\n\n # Clear the database. TODO: To do this?\n connection.clear()\n\n # Start timing.\n startTime = time.time()\n bestFoM = 0\n\n # Start optimisation.\n for iteration in range(numIters):\n\n # Make one suggestion.\n try:\n token, nextParams = solver.next()\n except:\n print(\"Error suggesting a new point. Here are the last set of parameters sampled, and it's returned value:\")\n print(str(nextParams))\n print(\"Iteration number: \" + str(iteration))\n continue\n\n # Check what FoM this gives. Go negative as this is a minimisation routine.\n fEval = abs(Fitness5D(**nextParams))\n\n # Update best FoM.\n if fEval > bestFoM:\n bestFoM = fEval\n \n # Tell the optimiser about the result.\n solver.update(token, fEval)\n\n # One run complete.\n timeElapsed = time.time() - startTime\n # Run complete. Send results to main process. Tags are unique identifiers.\n if rank != 0:\n comm.send(timeElapsed, dest = 0, tag = 1)\n comm.send(bestFoM, dest = 0, tag = 2)\n \n # Wait for all the processes to end.\n comm.Barrier()\n \n if rank == 0:\n # Add own data first.\n bestFoMList.append(bestFoM)\n timeList.append(timeElapsed)\n\n for process in range(comm.Get_size() - 1):\n # Get the data.\n individualTime = None\n individualTime = comm.recv(individualTime, source = process + 1, tag = 1)\n\n individualFoM = None\n individualFoM = comm.recv(individualFoM, source = process + 1, tag = 2)\n\n bestFoMList.append(individualFoM)\n timeList.append(individualTime)\n\n avgRuntime = np.average(timeList)\n avgFoM = np.average(bestFoMList)\n avgFoMPerTime = np.average(np.divide(bestFoMList, timeList))\n avgFoMPerIter = np.average(np.divide(bestFoMList, numIters))\n absBestFoM = np.max(bestFoMList)\n\n print(\"Bayesian optimisation 5D testing complete! Here are the stats:\")\n print(\"Average runtime per run (s): \" + str(avgRuntime))\n print(\"Average FoM: \" + str(avgFoM))\n print(\"Average FoM per unit time: \" + str(avgFoMPerTime))\n print(\"Average FoM per unit iteration: \" + str(avgFoMPerIter))\n print(\"Absolute best FoM determined: \" + str(absBestFoM))\n print(\"------------------------------------------------------------------------------------------------------------------\")\n \n return", "def test_bottleneck(self):\n # import the experiment variable from the example\n exp = bottleneck_example(20, 5, render=False)\n\n # run the experiment for a few time steps to ensure it doesn't fail\n exp.run(1, 5)", "def objective(params, n_folds=N_FOLDS):\n\n # Keep track of evals\n global ITERATION\n\n ITERATION += 1\n\n # Retrieve the subsample if present otherwise set to 1.0\n subsample = params['boosting_type'].get('subsample', 1.0)\n\n # Extract the boosting type\n params['boosting_type'] = params['boosting_type']['boosting_type']\n params['subsample'] = subsample\n\n # Make sure parameters that need to be integers are integers\n for parameter_name in ['max_depth', 'subsample_for_bin', 'min_child_samples','min_child_weight','num_parallel_tree']:\n params[parameter_name] = int(params[parameter_name])\n\n start = timer()\n\n print('params',params)\n # Perform n_folds cross validation\n cv_results = xgb.cv(params, train_set,\n num_boost_round=3000,\n nfold=n_folds,\n stratified=True,\n early_stopping_rounds=100,\n feval=tpr_weight_funtion_xgb_cv,\n seed=50,\n verbose_eval=True,\n\n )\n\n print('cv_results\\n',type(cv_results),'\\n',cv_results)\n\n run_time = timer() - start\n\n # Extract the best score\n best_score = np.min(cv_results['test-TPR-mean'])\n\n # Loss must be minimized\n loss = best_score\n\n TPR_std = cv_results[cv_results['test-TPR-mean']==best_score]['test-TPR-std'].values[0]\n print('TPR_stdv', TPR_std)\n\n\n # Boosting rounds that returned the highest cv score\n n_estimators = int(np.argmin(cv_results['test-TPR-mean']) + 1)\n\n # Write to the csv file ('a' means append)\n of_connection = open(out_file, 'a')\n writer = csv.writer(of_connection)\n writer.writerow([loss,TPR_std, params, ITERATION, n_estimators, run_time])\n\n # Dictionary with information for evaluation\n return {'loss': loss,'TPR_std':TPR_std, 'params': params, 'iteration': ITERATION,\n 'estimators': n_estimators,\n 'train_time': run_time, 'status': STATUS_OK}", "def one_step(self, T):\n print(\"\\n--------------------\")\n print(\"------ STEP\", T ,\"------\")\n print(\"--------------------\\n\")\n\n print('Splitting data')\n X1_train, Y1_train, X2_train, Y2_train = self.cut_Z(self.Z_train, T)\n print(\"Train: X1 shape:\", X1_train.shape, \"X2 shape:\", X2_train.shape, '\\n')\n \n nSegments = X1_train.shape[1]\n \n nb_proc=12 #number of simultaneous processus to use during trainning\n \n print(\"Training with\", nb_proc,\"processus...\")\n\n start = time.time()\n pool = mp.Pool(nb_proc, init, [X1_train, Y1_train, X2_train, Y2_train])\n \n results = pool.map(fit_double_lasso, range(nSegments))\n end=time.time()\n\n print(\"Training done in \", end - start, \"seconds\")\n pool.close()\n \n #Computing the MSE for this step\n mse = self.compute_mse(results, T, nSegments)\n print('MSE:', mse)\n \n \n alphas = [] \n \n for i in range(nSegments):\n alphas.append([results[i][0].alpha_,results[i][1].alpha_]) #alpha[i][0] = alpha pour la premiere periode pour la section i\n alphas = np.array(alphas)\n return mse, alphas", "def main():\n target = 'Coding:Level1'\n output_root = f'problem_5_output/{target.replace(\":\", \"_\")}'\n if not os.path.exists(output_root):\n os.makedirs(output_root, exist_ok=True)\n\n # dictionary of parameter grids, one for each process\n param_grids = {\n 'early_stopping': ParameterGrid([\n {\n 'patience': [15], # , 20, 40]\n },\n ]),\n 'fit': ParameterGrid([\n {\n 'batch_size': [128], # , 64, 128, 256],\n 'epochs': [16], # 20, 50],\n },\n ]),\n 'model_preprocessor': ParameterGrid([\n {\n 'num_unique_words': [5000], # 4000, 1000, 6000, 10000],\n 'max_sequence_length': [150], # 50, 75, 100, 125, 150, 200],\n },\n ]),\n 'model': ParameterGrid([\n # {\n # Dense single hidden layer model hyperparameters:\n # 'name': ['dense_h1'],\n # 'embedded_dims': [8], # , 16, 32, 64, 128, 256],\n # 'num_units_h1': [8], # , 16, 32, 64, 128, 256],\n # 'drop_h1': [None], # , 0.1, 0.2, 0.25, 0.5, 0.75],\n # 'optimizer': ['nadam', 'adam'],\n # 'learning_rate': [None], # , 0.01, 0.001],\n # 'activation': ['relu', 'tanh'],\n # },\n # {\n # Dense double hidden layer model hyperparameters:\n # 'name': ['dense_h2'],\n # 'embedded_dims': [64],\n # 'num_units_h1': [128],\n # 'num_units_h2': [128],\n # 'drop_h1': [None],\n # 'drop_h2': [0.5],\n # 'optimizer': ['nadam'],\n # 'activation': ['relu'],\n # 'learning_rate': [0.01],\n # },\n # {\n # CNN single hidden layer model hyperparameters\n # 'name': ['conv_h1'],\n # 'embedded_dims': [64],\n # 'num_units_h1': [32], # , 64, 256],\n # 'k_conv_h1': [2], # , 3, 4],\n # 'drop_embed': [0.2], # , 0.5],\n # 'activation': ['relu', 'tanh'],\n # 'optimizer': ['adam', 'nadam']\n # },\n # {\n # CNN double hidden layer model hyperparameters\n # 'name': ['conv_h2'],\n # 'embedded_dims': [128], # , 64, 32, 16, 8],\n # 'num_units_h1': [32], # , 64, 128],\n # 'drop_h2': [0.5], # , 0.75, 0.25, 0.1],\n # 'k_conv_h1': [2], # , 3, 4],\n # 'num_units_h2': [128], # , 64, 32, 16, 8],\n # 'drop_embed': [0.2], # , 0.50],\n # 'activation': ['relu'],\n # 'optimizer': ['adam'], # , 'nadam'],\n # },\n # {\n # CNN double hidden layer model hyperparameters\n # 'name': ['conv_h2.1'],\n # 'embedded_dims': [64],\n # 'num_units_h1': [32], # , 64, 128],\n # 'k_conv_h1': [2], # , 3, 4],\n # 'drop_embed': [0.2], # , 0.5],\n # 'activation': ['relu'],\n # 'optimizer': ['adam'], # , 'nadam']\n # },\n # {\n # RNN single hidden layer model hyperparameters\n # 'name': ['rnn_h1'],\n # 'embedded_dims': [64],\n # 'drop_embed': [0.2],\n # 'num_units_h1': [128],\n # 'optimizer': ['nadam'],\n # 'learning_rate': [0.01]\n # },\n # {\n # # LSTM double hidden layer (second layer dense FC) model hyperparameters\n # 'name': ['lstm_h1'],\n # 'embedded_dims': [64],\n # 'drop_embed': [0.2],\n # 'drop_h1': [0.5],\n # 'num_units_h1': [128],\n # 'optimizer': ['nadam'],\n # },\n # {\n # LSTM double hidden layer (second layer dense FC) model hyperparameters\n # 'name': ['lstm_h2'],\n # 'embedded_dims': [64],\n # 'drop_embed': [0.2],\n # 'num_units_h1': [128],\n # 'drop_h1': [0.5],\n # 'num_units_h2': [128],\n # 'optimizer': ['nadam'],\n # 'activation': ['relu']\n # },\n # {\n # # Bi-directional LSTM single hidden layer model hyperparameters\n # 'name': ['bi_lstm_h1'],\n # 'embedded_dims': [32], # , 64, 128],\n # 'drop_embed': [0.2], # , 0.25, 0.5],\n # 'num_units_h1': [32], # , 64, 128],\n # 'drop_h1': [0.2], # , 0.25, 0.5],\n # 'optimizer': ['nadam', 'adam']\n # },\n # {\n # Bi-directional LSTM double hidden layer (second layer Bi-LSTM) model hyperparameters\n # 'name': ['bi_lstm_h2'],\n # 'embedded_dims': [32], # , 64, 128],\n # 'num_units_h1': [32], # , 64, 128],\n # 'num_units_h2': [32], # , 64, 128],\n # 'drop_h1': [0.25, 0.5],\n # 'drop_h2': [0.25, 0.5],\n # 'optimizer': ['nadam', 'adam']\n # },\n {\n # Multi Convolutional model hyperparameters\n 'name': ['multi_conv_h3_s2'],\n 'drop_embed': [0.5], # , 0.3],\n 'embedded_dims': [128], # , 64, 128, 256],\n 'num_units_h1': [128], # , 64, 128, 256],\n 'num_units_h2': [128], # , 64, 128, 256],\n 'num_units_h3': [128], # , 64, 128, 256],\n 'num_units_h4': [128], # , 64, 128, 256],\n 'k_conv_h1': [3],\n 'k_conv_h2': [2],\n 'activation': ['relu'], # , 'tanh'],\n 'drop_h3': [0.2], # , 0.2, 0.25, 0.5, 0.75],\n 'optimizer': ['adam'], # 'nadam']\n },\n # {\n # # Multi Convolutional model hyperparameters\n # 'name': ['multi_conv_h3_s3'],\n # 'drop_embed': [0.5], # , 0.3],\n # 'embedded_dims': [32], # , 64, 128, 256],\n # 'num_units_h1': [32], # , 64, 128, 256],\n # 'num_units_h2': [32], # , 64, 128, 256],\n # 'num_units_h3': [32], # , 64, 128, 256],\n # 'num_units_h4': [32], # , 64, 128, 256],\n # 'k_conv_h1': [3],\n # 'k_conv_h2': [2],\n # 'k_conv_h3': [4],\n # 'k_conv_h4': [4],\n # 'activation': ['relu', 'tanh'],\n # 'drop_h4': [0.1], # , 0.2, 0.25, 0.5, 0.75],\n # 'optimizer': ['adam', 'nadam']\n # },\n ]),\n 'preprocessor': ParameterGrid([\n # {\n # 'do_clean': [False],\n # 'pad_type': ['pre', 'post'],\n # 'trunc_type': ['pre', 'post'],\n # },\n {\n 'do_clean': [True],\n 'pad_type': ['post'], # , 'post'],\n 'trunc_type': ['post'], # 'post'],\n 'omit_stopwords': [False],\n 'ignore_urls': [False],\n 'fix_contractions': [True],\n 'stem': [True],\n 'remove_foreign_characters': [False], # , False],\n 'lower': [True], # , False],\n 'remove_punctuation': [True], # , False],\n 'bigrams': [True], # , False]\n },\n ])\n }\n\n def prod(a):\n if len(a) == 0:\n return 1\n return a[0] * prod(a[1:])\n\n num_models = prod([len(pg) for pg in param_grids.values()])\n\n param_grid_names = sorted(list(param_grids.keys()))\n param_grid_list = [param_grids[k] for k in param_grid_names]\n\n all_params, best_params = assemble_results(output_root)\n\n if CHECK_ONLY:\n for i, params in enumerate(itertools.product(*param_grid_list[3:5])):\n params = {k: v for k, v in zip(param_grid_names[3:5], params)}\n print(i, params)\n Preprocessor(**params['preprocessor'], **params['model_preprocessor'])\n\n for i, params in enumerate(itertools.product(*param_grid_list[2:4])):\n params = {k: v for k, v in zip(param_grid_names[2:4], params)}\n print(i, params)\n build_fn(num_classes=3, **params['model'], **params['model_preprocessor'])\n return\n\n for i, params in enumerate(itertools.product(*param_grid_list)):\n mem = psutil.virtual_memory()\n percent_used = 1 - mem.available / mem.total\n print(f'{percent_used:.2%} memory used')\n if percent_used > 0.80:\n # exit failure\n print('Exiting (-1)')\n exit(-1)\n\n params = {k: v for k, v in zip(param_grid_names, params)}\n print(f'\\n{i + 1}/{num_models}: {params}\\n')\n\n if params in all_params:\n # skip this one because we already ran it.\n continue\n\n if best_params is not None:\n # print best performance so far\n print(f'best params: {best_params}')\n print(f'best val loss: {best_params[\"results\"][\"valid\"][\"loss\"]:.6f}')\n print(f'best val acc: {best_params[\"results\"][\"valid\"][\"accuracy\"]:.4%}')\n\n # create a new output directory with path to model file.\n date = datetime.datetime.utcnow().strftime(\"%Y-%m-%d-%H.%M.%S.%f\")\n output_dir = os.path.join(output_root, date)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_file = os.path.join(output_dir, 'model.h5')\n\n # get the preprocessed training and validation data\n preprocess_time = time.time()\n classes, data_sets, set_names = get_xy(Preprocessor(**params['preprocessor'], **params['model_preprocessor']),\n target=target)\n ((x_train, y_train), (x_valid, y_valid)) = data_sets\n preprocess_time -= time.time()\n\n # build and compile model\n model = build_fn(num_classes=len(classes), **params['model'], **params['model_preprocessor'])\n\n # setup callbacks\n early_stopping = EarlyStopping(monitor='val_loss', verbose=1, **params['early_stopping'])\n model_checkpoint = ModelCheckpoint(\n filepath=model_file,\n save_weights_only=False, save_freq='epoch',\n save_best_only=True, monitor='val_loss', verbose=1)\n callbacks = [early_stopping, model_checkpoint]\n\n # Use sample weights to treat classes equally in loss and accuracy.\n sample_weight = get_sample_weight(y_train)\n sample_weight_valid = get_sample_weight(y_valid)\n\n # fit the model\n train_time = time.time()\n model.fit(x=x_train, y=y_train, sample_weight=sample_weight, verbose=1,\n validation_data=(x_valid, y_valid, sample_weight_valid), callbacks=callbacks, **params['fit'])\n train_time -= time.time()\n\n # load the best model (last one saved)\n model = load_model(model_file, compile=True)\n\n # compute results\n results = get_performance(model, data_sets, set_names)\n results['time'] = {'train': train_time, 'preprocess': preprocess_time}\n\n print(pd.DataFrame(data=results).T)\n params['results'] = results\n\n # save params and results\n with open(os.path.join(output_dir, 'params.json'), 'w') as fp:\n json.dump(params, fp)\n\n # save a copy of *this* Python file.\n shutil.copyfile(__file__, os.path.join(output_dir, 'roatan.py'))\n\n # for convenience, show the validation loss and accuracy in a file name in the same directory.\n result_file_name = f'{params[\"results\"][\"valid\"][\"loss\"]:.6f}_{params[\"results\"][\"valid\"][\"accuracy\"]:.4f}.out'\n with open(os.path.join(output_dir, result_file_name), 'w'):\n pass\n\n # check_model(output_dir)\n\n if best_params is None or (params['results']['valid']['loss'] < best_params['results']['valid']['loss']):\n best_params = params\n\n # assemble results from all runs into one CSV file in output root.\n assemble_results(output_root)", "def SP_init(train_data, test_data, n_cols, n_proxim_con, perm_thresh, perm_inc, perm_dec, min_overlap, n_winners, beta_boost, T_boost_speed, verbose, pixel_size):\n\n if(pixel_size == None):\n pixel_size = 28\n\n # Load MNIST data input_size (aka nn) of 256 or 1024\n input_size = len(train_data[1])\n # print(len(train_data[1]))\n\n # Initialize synapses and permanence arrays\n syn_index = np.random.randint(0, input_size, (n_cols, n_proxim_con))\n syn_array = np.zeros((n_cols, input_size), dtype=int)\n # syn_array[syn_index] = 1\n\n # Synapses array\n for i in range(n_cols):\n syn_array[i, syn_index[i]] = 1\n\n # syn_array = np.random.randint(0, 2, (n_cols, input_size))\n perm_array = np.random.uniform(0, 1, (n_cols, input_size))\n perm_array = syn_array * perm_array\n\n # Initialize empty SDR array\n # overlap_scores = np.zeros([1, n_cols])\n sdr_train_array = np.zeros((len(train_data), n_cols), dtype=int)\n sdr_test_array = np.zeros((len(test_data), n_cols), dtype=int)\n\n # Initialize empty boosting arrays; time-averaged activation level\n time_avg_act = np.zeros([1, n_cols])\n prev_time_avg_act = np.zeros([1, n_cols])\n boosting = np.ones([1, n_cols])\n\n # Initialize metric arrays\n sparse_train_array = np.zeros(([len(train_data), 1]))\n sparse_test_array = np.zeros(([len(test_data), 1]))\n\n # Main code\n train_en = True\n for epoch in range(0, 2):\n input_set = train_data\n if train_en == False:\n input_set = test_data\n\n for iter in range(0, len(input_set)):\n # Calculate overlap scores\n overlap_scores = np.dot((syn_array * (perm_array >= perm_thresh)), input_set[iter, :].transpose()) \\\n * boosting\n\n # Initialize SDR (activations of cols)\n sdr = np.zeros(([1, n_cols]), dtype=int)\n\n # Select the winners\n for i in range(n_winners):\n win_val = np.max(overlap_scores)\n win_index = np.argmax(overlap_scores)\n if(win_val >= min_overlap):\n sdr[0, win_index] = 1\n overlap_scores[0, win_index] = 0\n\n #num_wins = sum(sdr)\n #print('This is num_wins')\n #print(num_wins)\n\n # Calculating activation level current and then previous, a_bar(t) and a_bar(t-1)\n if iter >= T_boost_speed:\n time_avg_act = ((T_boost_speed - 1) * prev_time_avg_act + sdr) / T_boost_speed\n prev_time_avg_act = time_avg_act\n\n # Calculating mini column neighborhood\n recent_act = (1/abs(n_cols)) * np.sum(time_avg_act)\n\n # Calculate boosting for next time\n boosting = np.exp(-beta_boost * (time_avg_act - recent_act))\n\n if(train_en == True):\n # Update permanence values for learning -> Hebbian learning\n z = sdr.transpose() * syn_array\n polar_input = np.copy(input_set[iter, :])\n polar_input[polar_input == 1] = perm_inc\n polar_input[polar_input == 0] = perm_dec\n delta_perm = polar_input * z\n perm_array = perm_array + delta_perm\n perm_array[perm_array > 1] = 1\n perm_array[perm_array < 0] = 0\n\n # Add SDR to array and calculate metrics\n # Metrics include: sparseness\n if train_en == False:\n sdr_test_array[iter, :] = sdr\n sparse_test_array[iter, 0] = sparseness(n_cols, sdr)\n else:\n sdr_train_array[iter, :] = sdr\n sparse_train_array[iter, 0] = sparseness(n_cols, sdr)\n\n # You are set!!!\n\n if(verbose):\n pixel_sdr = int(n_cols**(0.5))\n # Plot the image\n sdr_image = np.reshape(sdr, (pixel_sdr, pixel_sdr))\n\n # Plot the image\n image = np.reshape(train_data[iter, :], (pixel_size, pixel_size))\n plt.figure(1)\n plt.subplot(211)\n plt.imshow(image, cmap='gray_r')\n plt.title('Train')\n\n # Plot the sdr\n plt.subplot(212)\n plt.imshow(sdr_image, cmap='gray_r')\n plt.title('SDR')\n plt.tight_layout()\n plt.show()\n if(iter % 10 == 0):\n print(iter)\n\n train_en = False\n return sdr_train_array, sdr_test_array, sparse_train_array, sparse_test_array", "def lgb_hyperopt(data, labels, num_evals=1000, n_folds=5, diagnostic=False):\r\n LGBM_MAX_LEAVES = 2**11 #maximum number of leaves per tree for LightGBM\r\n LGBM_MAX_DEPTH = 25 #maximum tree depth for LightGBM \r\n EVAL_METRIC_LGBM_CLASS = 'f1'\r\n\r\n def lgb_f1_score(y_hat, data):\r\n y_true = data.get_label()\r\n y_hat = np.round(y_hat)\r\n return 'f1', f1_score(y_true, y_hat), True\r\n\r\n print('Running {} rounds of LightGBM parameter optimisation:'.format(num_evals))\r\n #clear space\r\n \r\n integer_params = ['max_depth',\r\n 'num_leaves',\r\n 'max_bin',\r\n 'min_data_in_leaf',\r\n 'min_data_in_bin']\r\n \r\n def objective(space_params):\r\n \r\n #cast integer params from float to int\r\n for param in integer_params:\r\n space_params[param] = int(space_params[param])\r\n \r\n #extract nested conditional parameters\r\n if space_params['boosting']['boosting'] == 'goss':\r\n top_rate = space_params['boosting'].get('top_rate')\r\n other_rate = space_params['boosting'].get('other_rate')\r\n #0 <= top_rate + other_rate <= 1\r\n top_rate = max(top_rate, 0)\r\n top_rate = min(top_rate, 0.5)\r\n other_rate = max(other_rate, 0)\r\n other_rate = min(other_rate, 0.5)\r\n space_params['top_rate'] = top_rate\r\n space_params['other_rate'] = other_rate\r\n \r\n subsample = space_params['boosting'].get('subsample', 1.0)\r\n space_params['boosting'] = space_params['boosting']['boosting']\r\n space_params['subsample'] = subsample\r\n \r\n cv_results = lgb.cv(space_params, train, nfold = n_folds, stratified=True,\r\n early_stopping_rounds=100, seed=42, feval=lgb_f1_score)\r\n \r\n best_loss = -cv_results['f1-mean'][-1]\r\n\r\n return{'loss':best_loss, 'status': STATUS_OK }\r\n \r\n train = lgb.Dataset(data, labels)\r\n \r\n #integer and string parameters, used with hp.choice()\r\n boosting_list = [{'boosting': 'gbdt',\r\n 'subsample': hp.uniform('subsample', 0.5, 1)},\r\n {'boosting': 'goss',\r\n 'subsample': 1.0,\r\n 'top_rate': hp.uniform('top_rate', 0, 0.5),\r\n 'other_rate': hp.uniform('other_rate', 0, 0.5)}] #if including 'dart', make sure to set 'n_estimators'\r\n\r\n objective_list_reg = ['huber', 'gamma', 'fair', 'tweedie']\r\n objective_list_class = ['binary', 'cross_entropy']\r\n objective_list = objective_list_class\r\n is_unbalance_list = [True]\r\n\r\n space ={'boosting' : hp.choice('boosting', boosting_list),\r\n 'num_leaves' : hp.quniform('num_leaves', 2, LGBM_MAX_LEAVES, 1),\r\n 'max_depth': hp.quniform('max_depth', 2, LGBM_MAX_DEPTH, 1),\r\n 'max_bin': hp.quniform('max_bin', 32, 255, 1),\r\n 'min_data_in_leaf': hp.quniform('min_data_in_leaf', 1, 256, 1),\r\n 'min_data_in_bin': hp.quniform('min_data_in_bin', 1, 256, 1),\r\n 'min_gain_to_split' : hp.quniform('min_gain_to_split', 0.1, 5, 0.01),\r\n 'lambda_l1' : hp.uniform('lambda_l1', 0, 5),\r\n 'lambda_l2' : hp.uniform('lambda_l2', 0, 5),\r\n 'learning_rate' : hp.loguniform('learning_rate', np.log(0.005), np.log(0.2)),\r\n 'metric' : None, \r\n 'objective' : hp.choice('objective', objective_list),\r\n 'feature_fraction' : hp.quniform('feature_fraction', 0.5, 1, 0.01),\r\n 'bagging_fraction' : hp.quniform('bagging_fraction', 0.5, 1, 0.01),\r\n 'is_unbalance' : hp.choice('is_unbalance', is_unbalance_list)\r\n }\r\n\r\n trials = Trials()\r\n best = fmin(fn=objective,\r\n space=space,\r\n algo=tpe.suggest,\r\n max_evals=num_evals, \r\n trials=trials)\r\n \r\n #fmin() will return the index of values chosen from the lists/arrays in 'space'\r\n #to obtain actual values, index values are used to subset the original lists/arrays\r\n #extract nested conditional parameters\r\n try:\r\n if best['boosting']['boosting'] == 'goss':\r\n top_rate = best['boosting'].get('top_rate')\r\n other_rate = best['boosting'].get('other_rate')\r\n #0 <= top_rate + other_rate <= 1\r\n top_rate = max(top_rate, 0)\r\n top_rate = min(top_rate, 0.5)\r\n other_rate = max(other_rate, 0)\r\n other_rate = min(other_rate, 0.5)\r\n best['top_rate'] = top_rate\r\n best['other_rate'] = other_rate\r\n except:\r\n if boosting_list[best['boosting']]['boosting'] == 'goss':\r\n top_rate = best['top_rate']\r\n other_rate = best['other_rate']\r\n #0 <= top_rate + other_rate <= 1\r\n top_rate = max(top_rate, 0)\r\n top_rate = min(top_rate, 0.5)\r\n other_rate = max(other_rate, 0)\r\n other_rate = min(other_rate, 0.5)\r\n best['top_rate'] = top_rate\r\n best['other_rate'] = other_rate\r\n best['boosting'] = boosting_list[best['boosting']]['boosting']#nested dict, index twice\r\n best['metric'] = metric_list[best['metric']]\r\n best['objective'] = objective_list[best['objective']]\r\n best['is_unbalance'] = is_unbalance_list[best['is_unbalance']]\r\n \r\n #cast floats of integer params to int\r\n for param in integer_params:\r\n best[param] = int(best[param])\r\n \r\n print('{' + '\\n'.join('{}: {}'.format(k, v) for k, v in best.items()) + '}')\r\n if diagnostic:\r\n return(best, trials)\r\n else:\r\n return(best)", "def ensemble_models(input_data: str, test_file=None,models=None,\n models_file=None,\n genome_handler_file=None,\n top_n=10,\n trained=True,\n ensemble_method=\"average\",\n batch_size=64, nb_epoch=100, early_stop=None, mod=None,\n max_x_length=50, min_rt=0, max_rt=120, unit=\"s\", out_dir=\"./\", prefix=\"test\"):\n from AutoSeq import GenomeHandler\n\n # print(\"The number of models:\", len(models))\n\n # test data\n X_test = np.empty(1)\n Y_test = np.empty(1)\n\n y_pr = []\n score = []\n\n model_list = dict()\n\n\n if genome_handler_file is not None:\n X_train, Y_train, X_test, Y_test, min_rt, max_rt = data_processing(input_data=input_data, test_file=test_file,\n mod=mod, max_x_length=max_x_length,\n min_rt=min_rt, max_rt=max_rt, unit=unit,\n out_dir=out_dir)\n model_list['dp_model'] = dict()\n model_list['max_x_length'] = X_train.shape[1]\n model_list['aa'] = out_dir + \"/aa.tsv\"\n print(\"max_x_length: %s\" % (max_x_length))\n # read models from genetic search result configure file\n optimizer_name = dict()\n if models_file is not None:\n models = dict()\n gn = pd.read_csv(models_file)\n select_models = gn.sort_values('Val Accuracy', ascending=True).head(top_n)\n genome_handler = pickle.load(open(genome_handler_file, \"rb\"))\n genome_handler.input_shape = X_train.shape[1:]\n select_models = np.array(select_models.iloc[:, 0:(select_models.shape[1] - 2)])\n for i in range(0, select_models.shape[0]):\n #models[i], optimizer_name = genome_handler.decodeOneHot(select_models[i],return_optimizer=True)\n models[i], optimizer_name[i] = genome_handler.decodeOneHotPlusLSTM(select_models[i], return_optimizer=True)\n\n trained = False\n else:\n print(\"\")\n\n if not trained:\n print(\"Training ...\")\n # For each model, train the model\n for (name, model) in models.items():\n print(\"Train model:\", name)\n # perform sample specific training\n res_map = train_model(input_data=input_data, test_file=test_file, batch_size=batch_size,\n nb_epoch=nb_epoch, early_stop=early_stop, mod=mod,\n max_x_length=max_x_length, min_rt=min_rt, max_rt=max_rt, unit=unit,\n out_dir=out_dir, prefix=str(name), model=model,\n optimizer_name=optimizer_name[name])\n\n ## save the model to a file:\n model_file_name = \"model_\" + str(name) + \".h5\"\n model_file_path = out_dir + \"/\" + model_file_name\n res_map[\"model\"].save(model_file_path)\n\n model_list['dp_model'][name] = model_file_path\n\n del res_map\n gc.collect()\n K.clear_session()\n tf.reset_default_graph()\n else:\n print(\"The models have been trained!\")\n\n\n else:\n\n ## Transfer learning\n with open(models_file, \"r\") as read_file:\n model_list = json.load(read_file)\n\n model_folder = os.path.dirname(models_file)\n aa_file = os.path.basename(model_list['aa'])\n aa_file = model_folder + \"/\" + aa_file\n X_train, Y_train, X_test, Y_test, min_rt, max_rt = data_processing(input_data=input_data, test_file=test_file,\n mod=mod, max_x_length=model_list['max_x_length'],\n min_rt=min_rt, max_rt=max_rt, unit=unit,\n out_dir=out_dir,aa_file=aa_file)\n\n\n new_model_list = dict()\n new_model_list['dp_model'] = dict()\n for (name, dp_model_file) in model_list['dp_model'].items():\n print(\"\\nDeep learning model:\", name)\n # keras model evaluation: loss and accuracy\n # load model\n model_name = os.path.basename(dp_model_file)\n model_full_path = model_folder + \"/\" + model_name\n\n model = load_model(model_full_path)\n #new_model = change_model(model, X_train.shape[1:])\n new_model = model\n\n print(\"Perform transfer learning ...\")\n n_layers = len(new_model.layers)\n print(\"The number of layers: %d\" % (n_layers))\n #for layer in new_model.layers:\n # layer_name = str(layer.name)\n # if layer_name.startswith(\"dense\"):\n # break\n # else:\n # layer.trainable = False\n # print(\"layer (frozen:True): %s\" % (layer_name))\n\n new_model.compile(loss='mean_squared_error',\n ## In this case, we cannot change the learning rate.\n optimizer=model.optimizer,\n #optimizer=Adam(lr=0.0001),\n #optimizer=SGD(lr=1e-3, decay=1e-4, momentum=0.9, nesterov=True),\n metrics=['mse', 'mae'])\n my_callbacks = RegCallback(X_train, X_test, Y_train, Y_test, min_rt=min_rt, max_rt=max_rt)\n # Save model\n model_chk_path = out_dir + \"/best_model.hdf5\"\n mcp = ModelCheckpoint(model_chk_path, monitor=\"val_mean_squared_error\", save_best_only=True,\n save_weights_only=False,\n verbose=1, mode='min')\n\n ## monitor training information\n # tbCallBack = callbacks.TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)\n new_model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, validation_data=(X_test, Y_test),\n callbacks=[my_callbacks, mcp])\n\n ## get the best model\n best_model = load_model(model_chk_path)\n ## save the model to a file:\n model_file_name = \"model_\" + str(name) + \".h5\"\n model_file_path = out_dir + \"/\" + model_file_name\n best_model.save(model_file_path)\n\n new_model_list['dp_model'][name] = model_file_path\n\n gc.collect()\n K.clear_session()\n tf.reset_default_graph()\n\n new_model_list['max_x_length'] = model_list['max_x_length']\n new_aa_file = out_dir + \"/\" + os.path.basename(model_list['aa'])\n copyfile(aa_file, new_aa_file)\n new_model_list['aa'] = new_aa_file\n\n ## Useful for new data prediction\n new_model_list['min_rt'] = min_rt\n new_model_list['max_rt'] = max_rt\n\n model_list = new_model_list\n\n\n # save model data\n #file_all_models = open(out_dir + \"/all_models.obj\", 'wb')\n #pickle.dump(models, file_all_models)\n #file_all_models.close()\n\n ####################################################################################################################\n print(\"Ensemble learning ...\")\n\n\n para = dict()\n para['min_rt'] = min_rt\n para['max_rt'] = max_rt\n\n ## save result\n model_json = out_dir + \"/model.json\"\n with open(model_json, 'w') as f:\n json.dump(model_list, f)\n\n ## evaluation\n if test_file is not None:\n ensemble_predict(model_json,x=X_test,y=Y_test,para=para, batch_size=batch_size,method=ensemble_method,\n out_dir=out_dir,\n prefix=\"final_eval\")\n\n ####################################################################################################################", "def boosting(train_data, dim, t):\n w = []\n w.append([float(1) / float(len(train_data))] * len(train_data))\n\n # Store models in m, models are stored as a tuple with the w_vector as well\n # as the t_vector\n\n m = []\n\n for i in range(t):\n print(\"Iteration \" + str(i + 1) + str(\":\"))\n t_vec, w_vec, error = binary_classifier(train_data, dim, w[i])\n alpha = 0.5 * math.log(float(1 - error) / float(error))\n print(\"Error = \" + str(error))\n print(\"Alpha = \" + str(alpha))\n if error >= 0.5:\n break\n # Add model only if it has error rate less than 0.5\n m.append((t_vec, w_vec, alpha))\n\n is_increase_weights_printed = False\n is_decrease_weights_printed = False\n factor_to_increase = 0\n factor_to_decrease = 0\n # Update weights by figuring out which points that are misclassified\n w.append([0] * len(train_data))\n for j in range(len(train_data)):\n if np.dot(train_data[j][0:dim], w_vec) > t_vec:\n if train_data[j][dim] == -1:\n # misclassified\n w[i + 1][j] = float(w[i][j]) / float(2 * error)\n if not is_increase_weights_printed:\n factor_to_increase = float(1) / float(2 * error)\n is_increase_weights_printed = True\n else:\n # correctly classified\n w[i + 1][j] = float(w[i][j]) / float(2 * (1 - error))\n if not is_decrease_weights_printed:\n factor_to_decrease = float(1) / float(2 * (1 - error))\n is_decrease_weights_printed = True\n else:\n if train_data[j][dim] == 1:\n # misclassified\n w[i + 1][j] = float(w[i][j]) / float(2 * error)\n if not is_increase_weights_printed:\n factor_to_increase = float(1) / float(2 * error)\n is_increase_weights_printed = True\n else:\n # correctly classified\n w[i + 1][j] = float(w[i][j]) / float(2 * (1 - error))\n if not is_decrease_weights_printed:\n factor_to_decrease = float(1) / float(2 * (1 - error))\n is_decrease_weights_printed = True\n\n print(\"Factor to increase weights = \" + str(factor_to_increase))\n print(\"Factor to decrease weights = \" + str(factor_to_decrease))\n\n return m", "def one_experiment():\n\n # set the name of the experiment\n now = datetime.datetime.now()\n experiment_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute)\n experiment_name = 'overfit_' + str(experiment_id)\n\n # define if you want to use preprocessed data from file\n use_prep_data = False\n if use_prep_data:\n set_params(preproc_data_id='16_5_10.16.47')\n\n # define the changing parameter and its value\n changing_param_name = 'class_weights'\n changing_param_value = [{0: 15, 1: 85}]\n # {0:15, 1:85}]#, {0:4, 1:100}, {0:3, 1:100}, {0:2, 1:100}, {0:1, 1:100}] #[{0:1, 1:1}, {0:15, 1:85}]#\n\n features_to_use = ['user', 'countries', 'session', 'format', 'token']\n # set constant parameters\n set_params(use_word_emb=1)\n set_params(epochs=40)\n set_params(features_to_use=features_to_use)\n\n # save constant parameters to a new \"experiment_..\" filgithx+P@2ub\n save_constant_parameters(experiment_name, changing_param_name)\n\n # run experiment for every parameter value\n for value in changing_param_value:\n process = psutil.Process(os.getpid())\n print(\"-----MEMORY before starting experiment ------\", int(process.memory_info().rss/(8*10**3)), \"KB\")\n\n # update the parameter value\n set_params(class_weights_1=value)\n\n # update the model_id for this new model\n now = datetime.datetime.now()\n new_model_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute) + \".\" + str(now.second)\n\n set_params(model_id=new_model_id)\n\n # evaluate the new model and save the results in the experiment file\n oneExperiment = Process(target=run_experiment, args=(experiment_name,\n new_model_id, changing_param_name, value,))\n oneExperiment.start()\n oneExperiment.join()", "def __init__(\n self,\n boosting_type: str = 'gbdt',\n num_leaves: int = 31,\n max_depth: int = -1,\n learning_rate: float = 0.1,\n n_estimators: int = 100,\n subsample_for_bin: int = 200000,\n objective: Optional[Union[Callable, str]] = None,\n class_weight: Optional[Union[dict, str]] = None,\n min_split_gain: float = 0.,\n min_child_weight: float = 1e-3,\n min_child_samples: int = 20,\n subsample: float = 1.,\n subsample_freq: int = 0,\n colsample_bytree: float = 1.,\n reg_alpha: float = 0.,\n reg_lambda: float = 0.,\n random_state: Optional[Union[int, np.random.RandomState]] = None,\n n_jobs: int = -1,\n silent: bool = True,\n importance_type: str = 'split',\n client: Optional[Client] = None,\n **kwargs: Any\n ):\n self.client = client\n super().__init__(\n boosting_type=boosting_type,\n num_leaves=num_leaves,\n max_depth=max_depth,\n learning_rate=learning_rate,\n n_estimators=n_estimators,\n subsample_for_bin=subsample_for_bin,\n objective=objective,\n class_weight=class_weight,\n min_split_gain=min_split_gain,\n min_child_weight=min_child_weight,\n min_child_samples=min_child_samples,\n subsample=subsample,\n subsample_freq=subsample_freq,\n colsample_bytree=colsample_bytree,\n reg_alpha=reg_alpha,\n reg_lambda=reg_lambda,\n random_state=random_state,\n n_jobs=n_jobs,\n silent=silent,\n importance_type=importance_type,\n **kwargs\n )", "def __init__(\n self,\n boosting_type: str = 'gbdt',\n num_leaves: int = 31,\n max_depth: int = -1,\n learning_rate: float = 0.1,\n n_estimators: int = 100,\n subsample_for_bin: int = 200000,\n objective: Optional[Union[Callable, str]] = None,\n class_weight: Optional[Union[dict, str]] = None,\n min_split_gain: float = 0.,\n min_child_weight: float = 1e-3,\n min_child_samples: int = 20,\n subsample: float = 1.,\n subsample_freq: int = 0,\n colsample_bytree: float = 1.,\n reg_alpha: float = 0.,\n reg_lambda: float = 0.,\n random_state: Optional[Union[int, np.random.RandomState]] = None,\n n_jobs: int = -1,\n silent: bool = True,\n importance_type: str = 'split',\n client: Optional[Client] = None,\n **kwargs: Any\n ):\n self.client = client\n super().__init__(\n boosting_type=boosting_type,\n num_leaves=num_leaves,\n max_depth=max_depth,\n learning_rate=learning_rate,\n n_estimators=n_estimators,\n subsample_for_bin=subsample_for_bin,\n objective=objective,\n class_weight=class_weight,\n min_split_gain=min_split_gain,\n min_child_weight=min_child_weight,\n min_child_samples=min_child_samples,\n subsample=subsample,\n subsample_freq=subsample_freq,\n colsample_bytree=colsample_bytree,\n reg_alpha=reg_alpha,\n reg_lambda=reg_lambda,\n random_state=random_state,\n n_jobs=n_jobs,\n silent=silent,\n importance_type=importance_type,\n **kwargs\n )", "def __init__(\n self,\n boosting_type: str = 'gbdt',\n num_leaves: int = 31,\n max_depth: int = -1,\n learning_rate: float = 0.1,\n n_estimators: int = 100,\n subsample_for_bin: int = 200000,\n objective: Optional[Union[Callable, str]] = None,\n class_weight: Optional[Union[dict, str]] = None,\n min_split_gain: float = 0.,\n min_child_weight: float = 1e-3,\n min_child_samples: int = 20,\n subsample: float = 1.,\n subsample_freq: int = 0,\n colsample_bytree: float = 1.,\n reg_alpha: float = 0.,\n reg_lambda: float = 0.,\n random_state: Optional[Union[int, np.random.RandomState]] = None,\n n_jobs: int = -1,\n silent: bool = True,\n importance_type: str = 'split',\n client: Optional[Client] = None,\n **kwargs: Any\n ):\n self.client = client\n super().__init__(\n boosting_type=boosting_type,\n num_leaves=num_leaves,\n max_depth=max_depth,\n learning_rate=learning_rate,\n n_estimators=n_estimators,\n subsample_for_bin=subsample_for_bin,\n objective=objective,\n class_weight=class_weight,\n min_split_gain=min_split_gain,\n min_child_weight=min_child_weight,\n min_child_samples=min_child_samples,\n subsample=subsample,\n subsample_freq=subsample_freq,\n colsample_bytree=colsample_bytree,\n reg_alpha=reg_alpha,\n reg_lambda=reg_lambda,\n random_state=random_state,\n n_jobs=n_jobs,\n silent=silent,\n importance_type=importance_type,\n **kwargs\n )", "def bottlegrow_split_bottleExpansion((nu,T,nuW,nuEF,nuEB,TE), (n1,n2), pts): \n #Define grid to use\n xx = yy = dadi.Numerics.default_grid(pts)\n \n #phi for equilibrium ancestral population\n phi = dadi.PhiManip.phi_1D(xx)\n \n # bottlegrowth in ancient population\n nu_func = lambda t: numpy.exp(numpy.log(nu) * t/T)\n\n phi = Integration.one_pop(phi, xx, T, nu_func)\n\n\n #The ancestral population splits into the West and East, and the East undergoes a second bottleneck followed by an exponential population size change.\n phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)\n \n #Function for the Eastern population describing an second bottleneck followed by exponential population growth until present\n nuE_func = lambda t: nuEB*numpy.exp(numpy.log(nuEF/nuEB) * t/TE)\n\n # function for growth in west\n nuW_func = lambda t: numpy.exp(numpy.log(nuW) * t/TE)\n\n # integrate the two populations\n phi = dadi.Integration.two_pops(phi,xx,TE, nu1=nuW_func, nu2=nuE_func)\n \n #Return frequency spectrum\n fs = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,yy))\n return fs\n\n\n\n\n\n def bottlegrow_split_bottleExpansion_mig((nu,T,nuW,nuEF,nuEB,TE,m12,m21), (n1,n2), pts):\n \"\"\"\n Model with bottlegrowth, split, followed by second bottleneck and exp recovery in Eastern pop\n \n nu, or ancestral population size defaults to 1.\n \n nu= Ratio of contemporary to ancient population size\n T = Time in the past at which growth began\n nuW: The size of the western population after split\n nuEF: The final size for the eastern population\n nuEB: The size of the eastern population after the bottleneck\n TE: The time of the eastern-western split\n m12: Migration from pop 2 to pop 1 (2*Na*m12)\n m21: Migration from pop 1 to pop 2\n\n n1,n2: Size of fs to generate.\n pts: Number of points to use in grid for evaluation.\n \"\"\" \n #Define grid to use\n xx = yy = dadi.Numerics.default_grid(pts)\n \n #phi for equilibrium ancestral population\n phi = dadi.PhiManip.phi_1D(xx)\n \n # bottlegrowth in ancient population\n nu_func = lambda t: numpy.exp(numpy.log(nu) * t/T)\n\n phi = Integration.one_pop(phi, xx, T, nu_func)\n\n\n #The ancestral population splits into the West and East, and the East undergoes a second bottleneck followed by an exponential population size change.\n phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)\n \n #Function for the Eastern population describing an second bottleneck followed by exponential population growth until present\n nuE_func = lambda t: nuEB*numpy.exp(numpy.log(nuEF/nuEB) * t/TE)\n\n # function for growth in west\n nuW_func = lambda t: numpy.exp(numpy.log(nuW) * t/TE)\n\n # integrate the two populations\n phi = dadi.Integration.two_pops(phi,xx,TE, nu1=nuW_func, nu2=nuE_func,m12=m12, m21=m21)\n \n #Return frequency spectrum\n fs = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,yy))\n return fs", "def fit(self, x, y, time_limit=None, datainfo=None):\n\n if time_limit is None:\n time_limit = 24 * 60 * 60\n if datainfo is None:\n datainfo = {'loaded_feat_types': [0] * 4}\n self.time_limit = time_limit\n self.datainfo = datainfo\n\n if self.objective == 'multiclass' or self.objective == 'binary':\n n_classes = len(set(y))\n if n_classes == 2:\n self.objective = 'binary'\n self.lgbm = LGBMClassifier(silent=False,\n verbose=-1,\n n_jobs=1,\n objective=self.objective)\n else:\n self.objective = 'multiclass'\n self.lgbm = LGBMClassifier(silent=False,\n verbose=-1,\n n_jobs=1,\n num_class=n_classes,\n objective=self.objective)\n\n elif self.objective == 'regression':\n self.lgbm = LGBMRegressor(silent=False,\n verbose=-1,\n n_jobs=1,\n objective=self.objective)\n\n self.tabular_preprocessor = TabularPreprocessor()\n x = self.tabular_preprocessor.fit(x, y, self.time_limit, self.datainfo)\n\n if x.shape[1] == 0:\n raise ValueError(\"No feature exist!\")\n\n if x.shape[0] > 6000:\n grid_train_perentage = 0.1\n else:\n grid_train_perentage = 1\n grid_N = int(x.shape[0] * grid_train_perentage)\n idx = random.sample(list(range(x.shape[0])), grid_N)\n\n grid_train_x = x[idx, :]\n grid_train_y = y[idx]\n\n while x.shape[0] < 60:\n x = np.concatenate([x, x], axis=0)\n y = np.concatenate([y, y], axis=0)\n\n response_rate = sum(y) / len(y)\n print('Response Rate', response_rate)\n\n if not self.is_trained:\n # Two-step cross-validation for hyperparameter selection\n print('-----------------Search Regularization Params---------------------')\n if response_rate < 0.005:\n depth_choice = [5]\n else:\n depth_choice = [8, 10]\n\n params = {\n 'min_split_gain': [0.1],\n 'max_depth': depth_choice,\n 'min_child_weight': [5, 10, 30, 50, 60, 80, 100],\n 'colsample_bytree': [0.6, 0.7],\n 'learning_rate': [0.3],\n 'subsample': [0.8],\n 'num_leaves': [80],\n }\n\n cv_start = time.time()\n search_iter = 14\n n_estimators_choice = [50]\n _, best_param = search(self.lgbm,\n params,\n search_iter,\n n_estimators_choice,\n grid_train_x, grid_train_y)\n\n print('-----------------Search Learning Rate---------------------')\n for key, value in best_param.items():\n best_param[key] = [value]\n best_param['learning_rate'] = [0.03, 0.045, 0.06, 0.075, 0.85, 0.95, 0.105, 0.12]\n n_estimators_choice = [100, 150, 200]\n search_iter = 16\n\n self.clf, best_param = search(self.lgbm,\n best_param,\n search_iter,\n n_estimators_choice,\n grid_train_x, grid_train_y)\n\n print('self.clf', self.clf)\n cv_end = time.time()\n self.cv_time = cv_end - cv_start\n self.is_trained = True\n\n # Fit Model\n self.clf.fit(x, y)\n\n pre_model_name = []\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n for file in os.listdir(self.path):\n if file.endswith(\"_lgb.txt\"):\n pre_model_name.append(file)\n self.save_filename = self.path + '/' + str(len(pre_model_name) + 1) + '_lgb.txt'\n self.clf.booster_.save_model(self.save_filename)\n\n print(\"The whole available data is: \")\n print(\"Real-FIT: dim(X)= [{:d}, {:d}]\".format(x.shape[0], x.shape[1]))\n\n print('Feature Importance:')\n print(self.clf.feature_importances_)", "def run(self,step=2,\n sizePop=100,\n infoFields=['migrate_to','fitness'],\n recombination_rate = 0.00375,\n migration_rate = 0.01,\n mutation_rate = [0.00000001],\n subPopNames = ['x','y','z','w'],\n alleleNames = ['A','B'],\n s1 = 0.1,\n burnin=50,\n **kargs):\n\n self.reset()\n pop=sim.Population(size=[sizePop]*self.numPop, loci=self.loci, lociPos=list(range(self.dist, (self.dist*self.loci)+1,self.dist)), subPopNames=subPopNames, infoFields=infoFields)\n \n simu = sim.Simulator(pop)\n print(\"The simulation has started\")\n t1 = time.time()\n\n\n mutate_snps=range(0,50)+range(51,101)\n\n # define the initialization of each loci based the beta distribution where a and b parameters are allele frequencies from noncoding human regions\n snps=[0.14, 0.11, 0.17, 0.11, 0.32, 0.33, 0.21, 0.11, 0.11, 0.28, 0.11, 0.12, 0.8, 0.66, 0.74, 0.68, 0.66, 0.77, 0.77, 0.76, 0.77, 0.74, 0.72, 0.11, 0.73, 0.72, 0.72, 0.72, 0.54, 0.17, 0.78, 0.64, 0.78, 0.2, 0.24, 0.25, 0.78, 0.66, 0.2, 0.14, 0.75, 0.16, 0.72, 0.18, 0.77, 0.42, 0.34, 0.7, 0.17, 0.14, 0.2, 0.46, 0.13, 0.26, 0.16, 0.13, 0.14, 0.24, 0.18, 0.36, 0.71, 0.27, 0.28, 0.25, 0.25, 0.3, 0.19, 0.14, 0.16, 0.3, 0.39, 0.16, 0.24, 0.32, 0.11, 0.18, 0.48, 0.31, 0.21, 0.15, 0.34, 0.71, 0.33, 0.18, 0.71, 0.13, 0.23, 0.2, 0.22, 0.23, 0.16, 0.23, 0.23, 0.22, 0.24, 0.82, 0.36, 0.37, 0.72, 0.16, 0.14]\n self.initFreq=[]\n\n \n for i in range(len(snps)):\n alpha=float(4*sizePop*migration_rate*snps[i])\n bhta=float(4*sizePop*migration_rate*(1-snps[i])) \n p=numpy.random.beta(alpha,bhta)\n while (p>=0.9 or p<=0.1):\n p=numpy.random.beta(alpha,bhta)\n \n print \" SNP {snp} with alpha {alpha}, bhta {bhta} and frequency {p}\".format(snp=i, alpha=alpha, bhta=bhta, p=p)\n self.initFreq.append(p)\n\n simu.evolve(\n \n initOps=[sim.InitGenotype(freq=[self.initFreq[i], 1-self.initFreq[i]], loci=i) for i in range(len(snps))],\n \n\n # initialize the sex and select the 50 loci (parents)\n preOps = [sim.InitSex(maleProp=0.5,at=[0]),\n\n # initialize the genotype of locus 50 at generation 0 (in the beginning of the simulation)\n sim.PyOperator(self.genotypeBegin,at=[0]),\n \n # Wait 50 generations for the system to reach equilibrium\n # Then, change the the genotype of locus 50 at generation 50 by inserting a single copy of allele 0 in one individual \n sim.PyOperator(self.genotypeAfter,at=[50]),\n\n # function that carries out the selection proccess\n sim.MaSelector(loci=50,wildtype=0,fitness=[1+s1, 1+s1/2, 1],begin=50, end=-1,subPops=1)],\n\n # recombination\n matingScheme=sim.RandomMating(ops=[\n sim.Recombinator(rates=recombination_rate)]),\n \n # mutation and migration of offsprings\n postOps = [\n\n \n sim.SNPMutator(u=mutation_rate,loci=mutate_snps),\n \n # call function to calculate Fst and check for equilibrium state\n sim.PyOperator(self.calcFst,step=step),\n\n #migration\n # Here we define an island model, but this can easily be changed.\n # For more information about the migration models, please look in the documentation of SimuPOP here http://simupop.sourceforge.net/manual_svn/build/userGuide_ch7_sec3.html\n sim.Migrator(sim.utils.migrIslandRates(migration_rate,self.numPop)),\n \n # call function to save the allele frequencies\n sim.PyOperator(self.checkAlleles, step=step, param = subPopNames),\n \n \n # check if locus 50 is lost due to genetic drift. If yes, we terminate the simulation\n sim.Stat(alleleFreq=50,step=step,subPops=1,begin=50,end=-1),\n sim.TerminateIf('alleleFreq[50][0] == 0',step=step,begin=50,end=-1),\n \n # check the progress of the simulation\n sim.PyEval('\"Gen: %d\" % gen',step=step),\n sim.PyOutput('\\n',step=step),\n \n ],\n gen=self.Gen\n \n )\n \n \n t2 = time.time()\n print \"simulation took\", t2-t1, \"seconds.\"", "def main_predefined_split():\n\n average_performance = []\n fold_num = 'predefined'\n output_file_folder = \"output/{}\".format(args.experiment_name)\n output_file_name = \"{}/lnnel_{}.csv\".format(output_file_folder, fold_num)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = output_file_name\n\n if args.use_blink:\n df_train = pd.read_csv(\"./data/lcquad/blink/lcquad_train_sorted.csv\")\n df_test = pd.read_csv(\"./data/lcquad/blink/lcquad_test_sorted.csv\")\n else:\n df_train = pd.read_csv(\"./data/lcquad/dbpedia/lcquad_train_sorted.csv\")\n df_test = pd.read_csv(\"./data/lcquad/dbpedia/lcquad_test_sorted.csv\")\n\n # filter out the questions with single positive or many negatives in trianing set\n filtered_question_mentions = []\n for qm in df_train.QuestionMention.unique():\n df_ = df_train[df_train.QuestionMention == qm]\n if df_.Label.sum() == 0:\n filtered_question_mentions.append(qm)\n if df_.Label.sum() == 1 and df_.shape[0] == 1:\n filtered_question_mentions.append(qm)\n # print(df_.Label.values)\n df_train_split_filtered = df_train[~df_train.QuestionMention.isin(filtered_question_mentions)]\n df_train_split_filtered = df_train_split_filtered.sort_values(by=['QuestionMention', 'Label'])\n df_train = df_train_split_filtered\n\n # train\n features_train = np.array(\n [np.fromstring(s[1:-1], dtype=np.float, sep=', ') for s in df_train.Features.values])\n x_train = torch.from_numpy(features_train).float()\n y_train = torch.from_numpy(df_train.Label.values).float().reshape(-1, 1)\n m_labels_train = df_train.Mention_label.values\n ques_train = df_train.Question.values\n\n # test\n features_test = np.array(\n [np.fromstring(s[1:-1], dtype=np.float, sep=', ') for s in df_test.Features.values])\n x_test = torch.from_numpy(features_test).float()\n y_test = torch.from_numpy(df_test.Label.values).float().reshape(-1, 1)\n m_labels_test = df_test.Mention_label.values\n ques_test = df_test.Question.values\n\n # train model and evaluate\n model = pick_model(args.model_name, args.alpha)\n model = model.to(device)\n\n # move to gpu\n x_train, y_train = x_train.to(device), y_train.to(device)\n x_test, y_test = x_test.to(device), y_test.to(device)\n\n print(model)\n\n print(\"model: \", args.model_name, args.alpha)\n print(model(x_train, m_labels_train))\n\n print(\"y_train sum\", sum(y_train), sum(y_train) / len(y_train))\n print(\"y_test sum\", sum(y_test), sum(y_test) / len(y_test))\n\n # aggregate the data into train, val, and test\n train_data = (x_train, y_train, m_labels_train, ques_train)\n print(\"train:\", x_train.shape, y_train.shape, m_labels_train.shape, ques_train.shape)\n test_data = (x_test, y_test, m_labels_test, ques_test)\n print(\"test:\", x_test.shape, y_test.shape, m_labels_test.shape, ques_test.shape)\n\n # check class distribution\n print(\"y_train sum\", sum(y_train), sum(y_train) / len(y_train))\n print(\"y_test sum\", sum(y_test), sum(y_test) / len(y_test))\n\n train(model, train_data, test_data, test_data, args.checkpoint_name, args.num_epoch, args.margin,\n args.learning_rate)\n test_pred, best_scores = test(x_test, m_labels_test, ques_test, args.alpha, args.checkpoint_name,\n args.model_name,\n args.output_file_name)\n with open(args.log_file_name, 'a') as f:\n f.write(\n \"model={}; use_fixed_threshold={}; alpha={}; p={}; r={}; f1={}; lr={}; margin={}\\n\".format(\n args.model_name,\n args.use_fixed_threshold,\n args.alpha,\n best_scores[\n 'precision'],\n best_scores[\n 'recall'],\n best_scores['f1'],\n args.learning_rate,\n args.margin))\n print(\"model={}; use_fixed_threshold={}; alpha={}; p={}; r={}; f1={}\\n\".format(args.model_name,\n args.use_fixed_threshold,\n args.alpha,\n best_scores['precision'],\n best_scores['recall'],\n best_scores['f1']))\n average_performance.append([best_scores['precision'], best_scores['recall'], best_scores['f1']])\n\n average_performance = np.array(average_performance)\n print(\"Avg performance is prec - rec - f1: \", average_performance.mean(0))", "def hyperopt_lgb(X, y, mode, N, time_limit, max_train_size=None, max_train_rows=None):\n\n print('hyperopt..')\n\n start_time = time.time()\n\n # train-test split\n train_size = 0.7\n # restrict size of train set to be not greater than max_train_size\n if max_train_size is not None:\n size_factor = max(1, 0.7*X.memory_usage(deep=True).sum()/max_train_size)\n # restrict number of rows in train set to be not greater than max_train_rows\n if max_train_rows is not None:\n rows_factor = max(1, 0.7*X.shape[0]/max_train_rows)\n train_size = train_size/max(size_factor, rows_factor)\n Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=train_size, random_state=42)\n print('train shape {}, size {}'.format(Xtrain.shape, Xtrain.memory_usage(deep=True).sum()/1024/1024))\n\n # search space to pass to hyperopt\n fspace = {\n 'num_leaves': hp.choice('num_leaves', [5,10,20,30,50,70,100]),\n 'subsample': hp.choice('subsample', [0.7,0.8,0.9,1]),\n 'colsample_bytree': hp.choice('colsample_bytree', [0.5,0.6,0.7,0.8,0.9,1]),\n 'min_child_weight': hp.choice('min_child_weight', [5,10,15,20,30,50]),\n 'learning_rate': hp.choice('learning_rate', [0.02,0.03,0.05,0.07,0.1,0.2]),\n }\n\n # objective function to pass to hyperopt\n def objective(params):\n\n iteration_start = time.time()\n\n # print(params)\n params.update({'n_estimators': 500, 'random_state': 42, 'n_jobs': -1})\n\n model = lgb_model(params, mode)\n model.fit(Xtrain, ytrain)\n\n if mode == 'regression':\n pred = model.predict(Xtest)\n loss = np.sqrt(mean_squared_error(ytest, pred))\n elif mode == 'classification':\n pred = model.predict_proba(Xtest)[:, 1]\n loss = -roc_auc_score(ytest, pred)\n\n iteration_time = time.time()-iteration_start\n print('iteration time %.1f, loss %.5f' % (iteration_time, loss))\n\n return {'loss': loss, 'status': STATUS_OK,\n 'runtime': iteration_time,\n 'params': params}\n\n\n # object with history of iterations to pass to hyperopt\n trials = Trials()\n\n # loop over iterations of hyperopt\n for t in range(N):\n # run hyperopt, n_startup_jobs - number of first iterations with random search\n best = fmin(fn=objective, space=fspace, algo=partial(tpe.suggest, n_startup_jobs=10),\n max_evals=t+1, trials=trials)\n # check if time limit exceeded, then interrupt search\n elapsed = time.time()-start_time\n if elapsed >= time_limit:\n print('time limit exceeded')\n break\n\n print('best parameters', trials.best_trial['result']['params'])\n\n return trials.best_trial['result']['params']", "def run_code_for_training_with_CrossEntropy_and_BCE_Losses(self, net):\n filename_for_out1 = \"performance_numbers_\" + str(self.dl_studio.epochs) + \"label.txt\"\n filename_for_out2 = \"performance_numbers_\" + str(self.dl_studio.epochs) + \"regres.txt\"\n FILE1 = open(filename_for_out1, 'w')\n FILE2 = open(filename_for_out2, 'w')\n net = copy.deepcopy(net)\n net = net.to(self.dl_studio.device)\n criterion1 = nn.CrossEntropyLoss()\n# criterion2 = self.dl_studio.DetectAndLocalize.IOULoss(self.dl_studio.batch_size)\n criterion2 = nn.BCELoss()\n optimizer = optim.SGD(net.parameters(), \n lr=self.dl_studio.learning_rate, momentum=self.dl_studio.momentum)\n for epoch in range(self.dl_studio.epochs): \n running_loss_labeling = 0.0\n running_loss_regression = 0.0 \n for i, data in enumerate(self.train_dataloader):\n gt_too_small = False\n inputs, bbox_gt, labels = data['image'], data['bbox'], data['label']\n if self.dl_studio.debug_train and i % 1000 == 999:\n print(\"\\n\\n[iter=%d:] Ground Truth: \" % (i+1) + \n ' '.join('%5s' % self.dataserver_train.class_labels[labels[j].item()] for j in range(self.dl_studio.batch_size)))\n inputs = inputs.to(self.dl_studio.device)\n labels = labels.to(self.dl_studio.device)\n bbox_gt = bbox_gt.to(self.dl_studio.device)\n optimizer.zero_grad()\n outputs = net(inputs)\n outputs_label = outputs[0]\n bbox_pred = outputs[1]\n if self.dl_studio.debug_train and i % 500 == 499:\n inputs_copy = inputs.detach().clone()\n inputs_copy = inputs_copy.cpu()\n bbox_pc = bbox_pred.detach().clone()\n bbox_pc[bbox_pc<0] = 0\n bbox_pc[bbox_pc>31] = 31\n _, predicted = torch.max(outputs_label.data, 1)\n print(\"[iter=%d:] Predicted Labels: \" % (i+1) + \n ' '.join('%10s' % self.dataserver_train.class_labels[predicted[j].item()] \n for j in range(self.dl_studio.batch_size)))\n for idx in range(self.dl_studio.batch_size):\n i1 = int(bbox_gt[idx][1])\n i2 = int(bbox_gt[idx][3])\n j1 = int(bbox_gt[idx][0])\n j2 = int(bbox_gt[idx][2])\n k1 = int(bbox_pc[idx][1])\n k2 = int(bbox_pc[idx][3])\n l1 = int(bbox_pc[idx][0])\n l2 = int(bbox_pc[idx][2])\n print(\" gt_bb: [%d,%d,%d,%d]\"%(j1,i1,j2,i2))\n print(\" pred_bb: [%d,%d,%d,%d]\"%(l1,k1,l2,k2))\n inputs_copy[idx,0,i1:i2,j1] = 255\n inputs_copy[idx,0,i1:i2,j2] = 255\n inputs_copy[idx,0,i1,j1:j2] = 255\n inputs_copy[idx,0,i2,j1:j2] = 255\n inputs_copy[idx,2,k1:k2,l1] = 255 \n inputs_copy[idx,2,k1:k2,l2] = 255\n inputs_copy[idx,2,k1,l1:l2] = 255\n inputs_copy[idx,2,k2,l1:l2] = 255\n self.dl_studio.display_tensor_as_image(\n torchvision.utils.make_grid(inputs_copy, normalize=True),\n \"see terminal for TRAINING results at iter=%d\" % (i+1))\n mask_regress = torch.zeros(self.dl_studio.batch_size,32,32,requires_grad=False)\n mask_gt = torch.zeros(self.dl_studio.batch_size, 32,32)\n for k,out_regres in enumerate(bbox_pred):\n x1,y1,x2,y2 = bbox_pred[k].tolist()\n x1_gt,y1_gt,x2_gt,y2_gt = bbox_gt[k].tolist()\n x1,y1,x2,y2 = [int(item) if item >0 else 0 for item in (x1,y1,x2,y2)]\n x1_gt,y1_gt,x2_gt,y2_gt = [int(item) if item>0 else 0 for item in (x1_gt,y1_gt,x2_gt,y2_gt)]\n if abs(x1_gt - x2_gt)<5 or abs(y1_gt-y2_gt) < 5: gt_too_small = True\n mask_regress_np = np.zeros((32,32), dtype=bool)\n mask_gt_np = np.zeros((32,32), dtype=bool)\n mask_regress_np[y1:y2,x1:x2] = 1\n mask_gt_np[y1_gt:y2_gt, x1_gt:x2_gt] = 1\n mask_regress[k,:,:] = torch.from_numpy(mask_regress_np)\n mask_regress.reqiures_grad=True\n mask_gt[k,:,:] = torch.from_numpy(mask_gt_np)\n mask_gt.reqiures_grad=True \n loss_labeling = criterion1(outputs_label, labels)\n loss_labeling.backward(retain_graph=True) \n loss_regression = criterion2(mask_regress, mask_gt)\n loss_regression.requires_grad = True\n loss_regression.backward()\n optimizer.step()\n running_loss_labeling += loss_labeling.item() \n running_loss_regression += loss_regression.item() \n if i % 1000 == 999: \n avg_loss_labeling = running_loss_labeling / float(1000)\n avg_loss_regression = running_loss_regression / float(1000)\n print(\"[epoch:%d, batch:%5d] loss_labeling: %.3f loss_regression: %.3f \" % (epoch + 1, i + 1, avg_loss_labeling, avg_loss_regression))\n FILE1.write(\"%.3f\\n\" % avg_loss_labeling)\n FILE1.flush()\n FILE2.write(\"%.3f\\n\" % avg_loss_regression)\n FILE2.flush()\n running_loss_labeling = 0.0\n running_loss_regression = 0.0\n print(\"\\nFinished Training\\n\")\n self.save_model(net)", "def _para_boosting(self, H):\n # print '----------------primal-dual boost-------------------'\n H = np.hstack((H, -H))\n # H_ft = np.asfortranarray((H.copy()))\n (n, p) = H.shape\n self.c = np.log(n*p)\n nu = int(n * self.ratio)\n\n if self.max_iter < 50:\n delta = 1\n else:\n delta = 40\n d = np.ones(n) / n\n d_bar = np.ones(n) / n\n a_bar = np.ones(p) / p\n a = np.ones(p) / p\n h_a = np.sum(H, axis=1) / p\n h_a_bar = h_a.copy()\n # a_bar = a\n # a_tilde = np.ones(p) / p\n h_a_tilde = h_a.copy()\n # d_tilde = np.zeros(p)\n theta = 1\n sig = 1\n tau = 1\n t = 0\n logscale = 0\n for t in range(self.max_iter):\n d = prox_mapping(h_a_tilde, d, tau, 2)\n if self.has_dcap:\n d2 = proj_cap_ent(d, 1.0 / nu)\n # d_new = d_new/d_new.sum()\n if np.abs(d.sum() - d2.sum()) > 0.0001:\n print 'error'\n d = d2\n d_tilde = d\n dtH = np.dot(d_tilde, H)\n # dtH = np.dot(H.T, d_tilde)\n a_new = prox_mapping(-dtH, a, sig, 2)\n h_a_new = np.dot(H, a_new)\n # a_tilde = a_new + theta * (a_new - a)\n h_a_tilde = (1+theta) * h_a_new - theta * h_a\n a = a_new\n h_a = h_a_new\n d_bar *= t / (t + 1.0)\n d_bar += 1.0 / (t + 1) * d\n a_bar *= t / (t + 1.0)\n a_bar += 1.0 / (t + 1) * a\n # h_a_bar = np.dot(H, a_bar)\n h_a_bar = t / (t + 1.0) * h_a_bar + 1.0/(t+1) * h_a\n if int(np.log(t+1)) == logscale:\n logscale += 1\n self.iter_num.append(t)\n if self.has_dcap:\n min_margin = ksmallest2(h_a_bar, nu)\n self.primal_obj.append(-np.mean(min_margin))\n else:\n self.primal_obj.append(- np.min(h_a_bar))\n self.margin.append(-self.primal_obj[-1])\n self.dual_obj.append(-np.max(np.dot(d_bar, H)))\n self.gap.append(self.primal_obj[-1] - self.dual_obj[-1])\n self.err_tr.append(np.mean(h_a_bar < 0))\n # if t % 100 == 0:\n # print 'iter ' + str(t) + ' ' + str(self.gap[-1])\n if self.gap[-1] < self.epsi:\n break\n self.alpha = a_bar[:p / 2] - a_bar[p / 2:]\n self.d = d_bar\n print \" pd-boosting(python): max iter#%d: , actual iter#%d\" % (self.max_iter, t)", "def run_my_stack():\r\n\r\n X_train, y_train, X_test = load_features()\r\n fit_funcs = list()\r\n predict_funcs = list()\r\n configs = list()\r\n MAX_ROUND = 3\r\n\r\n # lgb\r\n num_leaves = [31, 41, 51, 61, 71, 81, 91]\r\n feature_fractions = [0.4, 0.4, 0.4, 0.3, 0.3, 0.3, 0.3]\r\n for i in range(len(num_leaves)):\r\n lgb_config = LGB_Config()\r\n lgb_config.params['num_leaves'] = num_leaves[i]\r\n lgb_config.params['feature_fraction'] = feature_fractions[i]\r\n lgb_config.seed = np.random.randint(0, 10000)\r\n lgb_config.save_model_path = None\r\n # lgb_config.max_round = MAX_ROUND\r\n configs.append(lgb_config)\r\n fit_funcs.append(lgb_fit)\r\n predict_funcs.append(lgb_predict)\r\n\r\n max_depths = [6, 7]\r\n colsample_bytrees = [0.7, 0.6]\r\n for i in range(len(max_depths)):\r\n xgb_config = XGB_Config()\r\n xgb_config.params['max_depth'] = max_depths[i]\r\n xgb_config.params['colsample_bytree'] = colsample_bytrees[i]\r\n xgb_config.seed = np.random.randint(0, 10000)\r\n xgb_config.save_model_path = None\r\n # xgb_config.max_round = MAX_ROUND\r\n configs.append(xgb_config)\r\n fit_funcs.append(xgb_fit)\r\n predict_funcs.append(xgb_predict)\r\n\r\n # cgb\r\n max_depths = [8]\r\n for i in range(len(max_depths)):\r\n cgb_config = CGB_Config()\r\n cgb_config.params['depth'] = max_depths[i]\r\n cgb_config.seed = np.random.randint(0, 10000)\r\n cgb_config.save_model_path = None\r\n # cgb_config.max_round = MAX_ROUND\r\n configs.append(cgb_config)\r\n fit_funcs.append(cgb_fit)\r\n predict_funcs.append(cgb_predict)\r\n\r\n X_train_stack, y_train_stack, X_test_stack = my_stacking(fit_funcs, predict_funcs, configs, X_train, y_train,\r\n X_test)\r\n result_path = 'result/my_stack_result-{}.csv'.format(time.strftime(\"%m%d-%H%M%S\"))\r\n y_pred_prob = final_fit_predict(X_train_stack, y_train_stack, X_test_stack, save_result_path=result_path)\r\n return y_pred_prob", "def model_and_data(request, hyperparams, estep_conf):\n if tvo.get_run_policy() == \"mpi\":\n init_processes()\n\n precision, N, D, H, batch_size = get(hyperparams, \"precision\", \"N\", \"D\", \"H\", \"batch_size\")\n\n if request.param == \"BSC\":\n W_gt = generate_bars(H, bar_amp=10.0, precision=precision)\n sigma2_gt = to.ones((1,), dtype=precision, device=tvo.get_device())\n pies_gt = to.full((H,), 2.0 / H, dtype=precision, device=tvo.get_device())\n\n to.manual_seed(999)\n W_init = to.rand((D, H), dtype=precision)\n W_init = W_init.to(device=tvo.get_device())\n broadcast(W_init)\n\n sigma2_init = to.tensor([1.0], dtype=precision, device=tvo.get_device())\n pies_init = to.full((H,), 1.0 / H, dtype=precision, device=tvo.get_device())\n\n model = BSC(\n H=H, D=D, W_init=W_gt, sigma2_init=sigma2_gt, pies_init=pies_gt, precision=precision\n )\n\n fname = \"bars_test_data_bsc.h5\"\n\n write_dataset(fname, N, D, np.float32, model)\n\n model.theta[\"W\"] = W_init\n model.theta[\"sigma2\"] = sigma2_init\n model.theta[\"pies\"] = pies_init\n\n elif request.param == \"NoisyOR\":\n W_gt = generate_bars(H, bar_amp=0.8, bg_amp=0.1, precision=precision)\n pies_gt = to.full((H,), 2.0 / H, dtype=precision, device=tvo.get_device())\n\n to.manual_seed(999)\n W_init = to.rand((D, H), dtype=precision)\n W_init = W_init.to(device=tvo.get_device())\n broadcast(W_init)\n pies_init = to.full((H,), 1.0 / H, dtype=precision, device=tvo.get_device())\n\n model = NoisyOR(H=H, D=D, W_init=W_gt, pi_init=pies_gt, precision=precision)\n\n fname = \"bars_test_data_nor.h5\"\n\n write_dataset(fname, N, D, np.uint8, model)\n\n model.theta[\"W\"] = W_init\n model.theta[\"pies\"] = pies_init\n\n if tvo.get_run_policy() == \"mpi\":\n dist.barrier()\n\n return model, fname", "def __init__(self, data_provider, growth, depth,\n total_blocks,stages, keep_prob,\n weight_decay, nesterov_momentum, model_type, dataset,\n should_save_logs, should_save_model,\n renew_logs=False,\n reduction=1.0,\n bc_mode=False,\n **kwargs):\n self.data_provider = data_provider\n self.data_shape = data_provider.data_shape # (W,H,C)\n self.n_classes = data_provider.n_classes\n self.depth = depth\n\n #self.growth_rate = growth_rate\n # how many features will be received after first convolution\n # value the same as in the original Torch code\n self.growth = growth\n self.first_output_features = growth[0] * 2\n self.total_blocks = total_blocks\n self.stages = stages\n self.group_1x1 = kwargs['group_1x1']\n self.group_3x3 = kwargs['group_3x3']\n self.condense_factor = kwargs['condense_factor']\n self.bottleneck = kwargs['bottleneck']\n self.group_lasso_lambda= kwargs['group_lasso_lambda']\n\n #self.layers_per_block = (depth - (total_blocks + 1)) // total_blocks\n self.bc_mode = bc_mode\n # compression rate at the transition layers\n self.reduction = reduction\n '''\n if not bc_mode:\n print(\"Build %s model with %d blocks, \"\n \"%d composite layers each.\" % (\n model_type, self.total_blocks, self.layers_per_block))\n if bc_mode:\n self.layers_per_block = self.layers_per_block // 2\n print(\"Build %s model with %d blocks, \"\n \"%d bottleneck layers and %d composite layers each.\" % (\n model_type, self.total_blocks, self.layers_per_block,\n self.layers_per_block))\n '''\n print(\"Reduction at transition layers: %.1f\" % self.reduction)\n\n self.keep_prob = keep_prob\n self.weight_decay = weight_decay\n self.nesterov_momentum = nesterov_momentum\n self.model_type = model_type\n self.dataset_name = dataset\n self.should_save_logs = should_save_logs\n self.should_save_model = should_save_model\n self.renew_logs = renew_logs\n self.batches_step = 0\n\n self._stage = 0\n self._define_inputs()\n self._build_graph()\n self._initialize_session()\n self._count_trainable_params()", "def train_and_score_bagging(network):\n\n train_predictions = pd.read_pickle('data/train_predictions.pkl.gz', compression='gzip')\n test_predictions = pd.read_pickle('data/test_predictions.pkl.gz', compression='gzip')\n\n train_actuals = pd.read_pickle('data/train_actuals.pkl.gz', compression='gzip')\n test_actuals = pd.read_pickle('data/test_actuals.pkl.gz', compression='gzip')\n\n\n train_x = np.array(train_predictions.values)\n train_y = train_actuals[0].values\n train_log_y = safe_log(train_y)\n test_x = np.array(test_predictions.values)\n test_y = test_actuals[0].values\n test_log_y = safe_log(test_y)\n\n model = compile_model(network)\n\n print('\\rNetwork')\n\n for property in network:\n print(property, ':', network[property])\n logging.info('%s: %s' % (property, network[property]))\n\n test = xgb.DMatrix(test_x)\n train = xgb.DMatrix(train_x, label=train_log_y)\n\n\n\n eval_set = [(test_x, test_log_y)]\n model.fit(train_x, train_log_y, early_stopping_rounds=20, eval_metric='mae', eval_set=eval_set,\n verbose=False)\n\n # eval_set = [(test, test_log_y)]\n # xgb.train(network, train, num_boost_round=5000, evals=eval_set, early_stopping_rounds=5)\n\n predictions = model.predict(test_x)\n # predictions = xgb.predict(test_x)\n inverse_predictions = safe_exp(predictions)\n score = mean_absolute_error(test_y, inverse_predictions)\n mape = safe_mape(test_y, inverse_predictions)\n\n print('\\rResults')\n\n best_round = xgb.best_iteration\n\n if np.isnan(score):\n score = 9999\n\n print('best round:', best_round)\n print('loss:', score)\n print('mape:', mape)\n print('-' * 20)\n\n logging.info('best round: %d' % best_round)\n logging.info('loss: %.4f' % score)\n logging.info('mape: %.4f' % mape)\n logging.info('-' * 20)\n\n eval_results({'xgb_predictions': {\n 'actual_y': test_y,\n 'y_predict': inverse_predictions\n }\n })\n\n range_results({\n 'xgb_predictions': inverse_predictions,\n }, test_y)", "def bd_init(trajectory, rep=10, method='Nelder-Mead', mp=True,\n fix_N=None, brute=True, fit_func=bd_nll):\n\n # Make sure that there are no DP values = 0 in dataset\n trajectory.loc[trajectory['DP'] == 0, 'DP'] = (\n trajectory[trajectory.DP != 0].DP.mean())\n trajectory['DP'] = trajectory['DP'].astype(int)\n\n # Select random parameter initiations\n fitness_range = np.linspace(fitness_bounds[0], fitness_bounds[1], rep+1)\n t0_range = np.linspace(0, trajectory.iloc[0].age-1, rep+1).astype('int')\n\n # the number of wild type stem cells can be fix using fix_N parameter\n if fix_N is None:\n # if fix_N is not set, then compute a range of valid stem cell counts\n N_w_range = np.linspace(N_w_bounds[0], N_w_bounds[1], rep+1, dtype=int)\n else:\n N_w_range = [fix_N]\n\n # Create all possible combinations of initial parameters\n params_init = list(product(fitness_range, N_w_range, t0_range))\n\n # Set fitting function\n if brute is False:\n partial_func = partial(bd_fit,\n trajectory=trajectory,\n method=method,\n fit_func=fit_func)\n else:\n # Set fit_func function to return parameters\n fit_func_return_params = partial(fit_func,\n return_params=True)\n partial_func = partial(fit_func_return_params,\n trajectory=trajectory)\n\n if mp is True:\n with Pool(8) as p:\n model_list = list(p.map(partial_func, params_init))\n else:\n model_list = list(map(partial_func, params_init))\n\n if brute is False:\n # Optimal model\n optimal_model = model_list[0]\n for model in model_list:\n if model.fit.fun < optimal_model.fit.fun:\n optimal_model = model\n\n else:\n # Unpack nll and parameter values from model_list\n nll, params = zip(*model_list)\n s, N_w, t0 = zip(*params)\n\n # Convert negative log-likelihoods to likelihood\n likelihood = np.exp(-np.array(nll))\n # Create DataFrame with the nll for each combination of parameters\n brute_force_df = pd.DataFrame({'fitness': s,\n 'N_w': N_w,\n 't0': t0,\n 'likelihood': likelihood})\n # find dataframe row of optimal nll\n optimal_idx = brute_force_df['likelihood'].idxmax()\n\n # Fit new model with optimal parameters as initial parameters\n model_fit = bd_fit(params=[brute_force_df.iloc[optimal_idx].fitness,\n brute_force_df.iloc[optimal_idx].N_w,\n brute_force_df.iloc[optimal_idx].t0],\n trajectory=trajectory,\n method='Nelder-Mead',\n fit_func=fit_func)\n\n # Create model class object from optimal model\n optimal_model = trajectory_model(model_type='bd_process',\n fit=model_fit.fit,\n data=trajectory)\n\n # Create heatmaps for each combination of 2 parameters\n heatmaps = likelihood_heatmaps(brute_force_df)\n conditional_distribution_plots = conditional_distributions(\n brute_force_df)\n\n # Add dataframe and heatmap as attribute of the class object.\n optimal_model.brute_force_df = brute_force_df\n optimal_model.heatmap = heatmaps\n optimal_model.conditional_dist = conditional_distribution_plots\n\n return optimal_model", "def split_full_4epochs_iter1 (params, ns):\n #29 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m2_12, m2_13, m2_21, m2_23, m2_31, m2_32, T0, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n mig2 = numpy.array([[0, m2_12, m2_13],[m2_21, 0, m2_23], [m2_31, m2_32, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T3 = [nu1c, nu2c, nu3c]\n fs.integrate(nu_T3, T3) \n return fs", "def split_full_4epochs_iter5 (params, ns):\n #29 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m2_12, m2_13, m2_21, m2_23, m2_31, m2_32, T0, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n mig2 = numpy.array([[0, m2_12, m2_13],[m2_21, 0, m2_23], [m2_31, m2_32, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T3 = [nu1c, nu2c, nu3c]\n fs.integrate(nu_T3, T3) \n return fs", "def main():\n bee_model = md.BeeForagingModel(GRID_WIDTH, GRID_HEIGHT, 10, 30, 7)\n\n iteration_size = 1000\n\n for i in range(45):\n print(f'ITERATION {i*iteration_size}')\n\n print({k: len(v) for k, v in bee_model.grid.grids.items()})\n start_time = time.time()\n bee_model.run_model(iteration_size)\n print(time.time() - start_time)", "def ComputeRegenerativeBraking(self):\r\n pass" ]
[ "0.59412456", "0.5714792", "0.5617969", "0.558789", "0.55688375", "0.5551032", "0.5532042", "0.55194557", "0.5486247", "0.54783905", "0.54762954", "0.54687536", "0.54687536", "0.54687536", "0.54658645", "0.54642564", "0.5456077", "0.5421649", "0.54122037", "0.5380967", "0.53753185", "0.5358601", "0.5347967", "0.5335504", "0.5331857", "0.53310716", "0.53286254", "0.53272045", "0.5327074", "0.5318432" ]
0.59255534
1
Model with bottlegrowth, split, followed by second bottleneck and exp recovery in Eastern pop nu, or ancestral population size defaults to 1. nu= Ratio of contemporary to ancient population size T = Time in the past at which growth began
def bottlegrow_split_bottleExpansion((nu,T,nuW,nuEF,nuEB,TE), (n1,n2), pts): #Define grid to use xx = yy = dadi.Numerics.default_grid(pts) #phi for equilibrium ancestral population phi = dadi.PhiManip.phi_1D(xx) # bottlegrowth in ancient population nu_func = lambda t: numpy.exp(numpy.log(nu) * t/T) phi = Integration.one_pop(phi, xx, T, nu_func) #The ancestral population splits into the West and East, and the East undergoes a second bottleneck followed by an exponential population size change. phi = dadi.PhiManip.phi_1D_to_2D(xx, phi) #Function for the Eastern population describing an second bottleneck followed by exponential population growth until present nuE_func = lambda t: nuEB*numpy.exp(numpy.log(nuEF/nuEB) * t/TE) # function for growth in west nuW_func = lambda t: numpy.exp(numpy.log(nuW) * t/TE) # integrate the two populations phi = dadi.Integration.two_pops(phi,xx,TE, nu1=nuW_func, nu2=nuE_func) #Return frequency spectrum fs = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,yy)) return fs def bottlegrow_split_bottleExpansion_mig((nu,T,nuW,nuEF,nuEB,TE,m12,m21), (n1,n2), pts): """ Model with bottlegrowth, split, followed by second bottleneck and exp recovery in Eastern pop nu, or ancestral population size defaults to 1. nu= Ratio of contemporary to ancient population size T = Time in the past at which growth began nuW: The size of the western population after split nuEF: The final size for the eastern population nuEB: The size of the eastern population after the bottleneck TE: The time of the eastern-western split m12: Migration from pop 2 to pop 1 (2*Na*m12) m21: Migration from pop 1 to pop 2 n1,n2: Size of fs to generate. pts: Number of points to use in grid for evaluation. """ #Define grid to use xx = yy = dadi.Numerics.default_grid(pts) #phi for equilibrium ancestral population phi = dadi.PhiManip.phi_1D(xx) # bottlegrowth in ancient population nu_func = lambda t: numpy.exp(numpy.log(nu) * t/T) phi = Integration.one_pop(phi, xx, T, nu_func) #The ancestral population splits into the West and East, and the East undergoes a second bottleneck followed by an exponential population size change. phi = dadi.PhiManip.phi_1D_to_2D(xx, phi) #Function for the Eastern population describing an second bottleneck followed by exponential population growth until present nuE_func = lambda t: nuEB*numpy.exp(numpy.log(nuEF/nuEB) * t/TE) # function for growth in west nuW_func = lambda t: numpy.exp(numpy.log(nuW) * t/TE) # integrate the two populations phi = dadi.Integration.two_pops(phi,xx,TE, nu1=nuW_func, nu2=nuE_func,m12=m12, m21=m21) #Return frequency spectrum fs = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,yy)) return fs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bottle_split_bottleExpansion((B1,P1,nuW,nuEF,nuEB,TE), (n1,n2), pts): \n #Define grid to use\n xx = yy = dadi.Numerics.default_grid(pts)\n \n #phi for equilibrium ancestral population\n phi = dadi.PhiManip.phi_1D(xx)\n \n # Now do the population bottleneck event.\n phi = dadi.Integration.one_pop(phi, xx, B1, P1)\n\n # grow the ancient population\n\n\n #The ancestral population splits into the West and East, and the East undergoes a second bottleneck followed by an exponential population size change.\n phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)\n \n #Function for the Eastern population describing an second bottleneck followed by exponential population growth until present\n nuE_func = lambda t: nuEB*numpy.exp(numpy.log(nuEF/nuEB) * t/TE)\n\n # function for growth in west\n nuW_func = lambda t: numpy.exp(numpy.log(nuW) * t/TE)\n\n # integrate the two populations\n phi = dadi.Integration.two_pops(phi,xx,TE, nu1=nuW_func, nu2=nuE_func)\n \n #Return frequency spectrum\n fs = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,yy))\n return fs", "def split_nomig_growthall(params, ns):\n #10 parameters\t\n nu10, nu1, nuA0, nuA, nu20, nu2, nu30, nu3, T1, T2 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function for T1\n nuA_func = lambda t: nuA0 * (nuA/nuA0)**(t/T1) \n nu_T1_func = lambda t: [nu1, nuA_func(t)]\n fs.integrate(nu_T1_func, T1)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function for T2\n nu1_func = lambda t: nu10 * (nu1/nu10)**(t/T2) \n nu2_func = lambda t: nu20 * (nu2/nu20)**(t/T2) \n nu3_func = lambda t: nu30 * (nu3/nu30)**(t/T2) \n nu_T2_func = lambda t: [nu1_func(t), nu2_func(t), nu3_func(t)]\n fs.integrate(nu_T2_func, T2) \n return fs", "def split_bottleExpansion((nuW,nuEF,nuEB,TE), (n1,n2), pts): \n #Define grid to use\n xx = yy = dadi.Numerics.default_grid(pts)\n \n #phi for equilibrium ancestral population\n phi = dadi.PhiManip.phi_1D(xx)\n \n \n #The ancestral population splits into the West and East, and the East undergoes a second bottleneck followed by an exponential population size change.\n phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)\n \n #Function for the Eastern population describing an second bottleneck followed by exponential population growth until present\n nuE_func = lambda t: nuEB*numpy.exp(numpy.log(nuEF/nuEB) * t/TE)\n\n # function for growth in west\n nuW_func = lambda t: numpy.exp(numpy.log(nuW) * t/TE)\n\n # integrate the two populations\n phi = dadi.Integration.two_pops(phi,xx,TE, nu1=nuW_func, nu2=nuE_func)\n \n #Return frequency spectrum\n fs = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,yy))\n return fs", "def split_nomig_growth(params, ns):\n #7 parameters\t\n nu1, nuA0, nuA, nu2, nu3, T1, T2 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function for T1\n nuA_func = lambda t: nuA0 * (nuA/nuA0)**(t/T1) \n nu_T1_func = lambda t: [nu1, nuA_func(t)]\n fs.integrate(nu_T1_func, T1)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function for T2\n nu_T2 = [nu1, nu2, nu3]\n fs.integrate(nu_T2, T2) \n return fs", "def NuGrid_net(self,model_type='delay'):\n\n # Create list of masses and metallicites:\n self.masses = [12.0,15.0,20.0,25.0]\n self.metallicities = [0.02,0.01,0.006,0.001,0.0001]\t\t\n \n # First define names of yield tables and the remnant masses for each metallicity (in solar masses)\n if model_type == 'delay':\n filename=localpath+'input/yields/NuGrid/H NuGrid yields delay_total.txt'\n remnants = {}\n remnants[0.02] = [1.61,1.61,2.73,5.71] # This gives remnant masses for each mass\n remnants[0.01] = [1.61,1.61,2.77,6.05]\n remnants[0.006] = [1.62,1.62,2.79,6.18]\n remnants[0.001] = [1.62,1.62,2.81,6.35]\n remnants[0.0001] = [1.62,1.62,2.82,6.38]\n elif model_type == 'rapid':\n filename = localpath+'input/yields/NuGrid/H NuGrid yields rapid total.txt'\n remnants = {}\n remnants[0.02] = [1.44,1.44,2.70,12.81] # Define remnants from metallicities\n remnants[0.01] = [1.44,1.44,1.83,9.84]\n remnants[0.006] = [1.44, 1.44, 1.77, 7.84]\n remnants[0.001] = [1.44,1.44,1.76,5.88]\n remnants[0.0001] = [1.44,1.44,1.76,5.61]\n else:\n raise ValueError('Wrong type: must be delay or rapid')\n \n # Define which lines in the .txt files to use. \n # This defines cuts starting at each relevant table\n cuts={}\n for z in self.metallicities:\n cuts[z] = [] \n for mass in self.masses:\n txtfile=open(filename,\"r\")\n for line_no,line in enumerate(txtfile):\n if str(mass) in line and str(z) in line:\n cuts[z].append(line_no)\n \n line_end = line_no # Final line\n \n # Create list of elements taken from data-file (from first relevant table)\n data = np.genfromtxt(filename,skip_header=int(cuts[0.02][0])+4,\n skip_footer=line_end-int(cuts[0.02][0])-83,\n dtype=['<U8','<U15','<U15','<U15'])\n \n self.elements = [str(line[0][1:]) for line in data]\n \n self.table={} # Initialize final output\n \n for z in self.metallicities: # Produce subtable for each metallicity\n yield_subtable={}\n yield_subtable['Mass'] = self.masses\n yield_subtable['mass_in_remnants'] = np.divide(np.asarray(remnants[z]),self.masses) # Initialize lists\n for el in self.elements:\n yield_subtable[el] = []\n \n for m_index,mass in enumerate(self.masses): # Create data array for each mass\n unprocessed_mass = mass-remnants[z][m_index] # Mass not in remnants in Msun\n data = np.genfromtxt(filename,skip_header=int(cuts[z][m_index])+4,\n skip_footer=line_end-int(cuts[z][m_index])-83,dtype=['<U8','<U15','<U15','<U15']) # Read from data file\n \n # Now iterate over data-file and read in element names\n # NB: [1:]s are necessary as each element in txt file starts with & \t\t\n for line in data:\n el_name = str(line[0][1:]) # Name of element\n el_yield = float(line[1][1:]) # Yield in Msun\n el_init = float(line[2][1:]) # Initial mass fraction \n el_net = el_yield-el_init*unprocessed_mass\n yield_subtable[el_name].append(el_net/mass) # Net mass fraction\n \n # Calculate summed net yield - should be approximately 0\t\n summed_yields = np.zeros(len(self.masses))\n for el in self.elements:\n yield_subtable[el] = np.asarray(yield_subtable[el])\n summed_yields+=yield_subtable[el]\n \n # Compute mass not in remnants with summed net yield small correction\t\t\n yield_subtable['unprocessed_mass_in_winds'] = 1.0-yield_subtable['mass_in_remnants']-summed_yields\n \n # Restructure dictionary into record array for output\n all_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']+self.elements\n list_of_arrays = [yield_subtable[key] for key in all_keys]\n restructure_subtable = np.core.records.fromarrays(list_of_arrays,names=all_keys)\n \n self.table[z] = restructure_subtable # This is output table for specific z\n \n # Yield table output is self.table", "def split_full_4epochs_iter5 (params, ns):\n #29 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m2_12, m2_13, m2_21, m2_23, m2_31, m2_32, T0, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n mig2 = numpy.array([[0, m2_12, m2_13],[m2_21, 0, m2_23], [m2_31, m2_32, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T3 = [nu1c, nu2c, nu3c]\n fs.integrate(nu_T3, T3) \n return fs", "def split_symmig_all_growthall(params, ns):\n #14 parameters \n nu10, nu1, nuA0, nuA, nu20, nu2, nu30, nu3, m1_1, m2_1, m2_2, m2_3, T1, T2 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T1\n nuA_func = lambda t: nuA0 * (nuA/nuA0)**(t/T1) \n ## Population function for T1\n nu_T1_func = lambda t: [nu1, nuA_func(t)]\n mig1 = numpy.array([[0, m1_1],[m1_1, 0]])\n fs.integrate(nu_T1_func, T1, m=mig1)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T2\n nu1_func = lambda t: nu10 * (nu1/nu10)**(t/T2) \n nu2_func = lambda t: nu20 * (nu2/nu20)**(t/T2) \n nu3_func = lambda t: nu30 * (nu3/nu30)**(t/T2) \n nu_T2_func = lambda t: [nu1_func(t), nu2_func(t), nu3_func(t)]\n mig2 = numpy.array([[0, m2_1, m2_3],[m2_1, 0, m2_2], [m2_3, m2_2, 0]]) \n fs.integrate(nu_T2_func, T2, m=mig2) \n return fs", "def residual_bottleneck_unit(n,bottom, nout, s, newdepth = False, use_global_stats=False):\n \n# bottom = n.__dict__['tops'][n.__dict__['tops'].keys()[-1]]\n \n ns=dict()\n stride = newdepth if newdepth else 1\n\n ns[s + '_branch2conv1'], ns[s + '_branch2bn1'], ns[s + '_branch2scale1'] = conv_bn_scale(bottom, ks = 1, \n stride = stride, nout = nout, pad = 0,\n use_global_stats=use_global_stats)\n ns[s + '_branch2relu1'] = L.ReLU(ns[s + '_branch2scale1'], in_place=True)\n ns[s + '_branch2conv2'], ns[s + '_branch2bn2'], ns[s + '_branch2scale2'] = conv_bn_scale(ns[s + '_branch2relu1'], ks = 3,\n stride = 1, nout = nout, pad = 1,\n use_global_stats=use_global_stats)\n ns[s + '_branch2relu2'] = L.ReLU(ns[s + '_branch2scale2'], in_place=True)\n ns[s + '_branch2conv3'], ns[s + '_branch2bn3'], ns[s + '_branch2scale3'] = conv_bn_scale(ns[s + '_branch2relu2'], ks = 1,\n stride = 1, nout = nout*4, pad = 0,\n use_global_stats=use_global_stats)\n \n if newdepth:\n ns[s + '_branch1conv'], ns[s + '_branch1bn1'], ns[s + '_branch1scale1'] = conv_bn_scale(bottom, ks = 1, \n stride = stride, nout = nout*4, pad = 0,\n use_global_stats=use_global_stats)\n ns[s] = L.Eltwise(ns[s + '_branch1scale1'],ns[s + '_branch2scale3'])\n else:\n ns[s] = L.Eltwise(bottom, ns[s + '_branch2scale3'])\n\n ns[s + '_relu'] = L.ReLU(ns[s], in_place=True)\n \n dict2net(n,ns)\n return ns[s + '_relu']", "def TNG_net(self):\n import h5py as h5\n filename = localpath+'input/yields/TNG/AGB.hdf5'\n # Read H5 file\n f = h5.File(filename, \"r\")\n\n indexing = {}\n indexing['H'] = 'Hydrogen'\n indexing['He'] = 'Helium'\n indexing['C'] = 'Carbon'\n indexing['N']= 'Nitrogen'\n indexing['O'] = 'Oxygen'\n indexing['Ne'] = 'Neon'\n indexing['Mg'] = 'Magnesium'\n indexing['Si'] = 'Silicon'\n indexing['S'] = 'Sulphur' # Not used by TNG simulation\n indexing['Ca'] = 'Calcium' # Not used by TNG simulation\n indexing['Fe'] = 'Iron'\n\n self.elements = list(indexing.keys())\n \n self.table = {}\n \n self.metallicities = list(f['Metallicities'].value)\n self.masses = f['Masses'].value\n \n\n for z_index,z in enumerate(self.metallicities):\n\n yield_subtable = {}\n \n z_name = f['Yield_names'].value[z_index].decode('utf-8')\n z_data = f['Yields/'+z_name+'/Yield']\n \n ejecta_mass = f['Yields/'+z_name+'/Ejected_mass'].value\n \n yield_subtable['Mass'] = list(reversed(self.masses))\n remnants = self.masses-ejecta_mass\n yield_subtable['mass_in_remnants'] = np.divide(list(reversed(remnants)),yield_subtable['Mass'])\n for el in list(indexing.keys()):\n yield_subtable[el] = np.zeros(len(self.masses))\n \n summed_yields = np.zeros(len(self.masses))\n \n for m_index,mass in enumerate(yield_subtable['Mass']):\n for el_index,el in enumerate(self.elements):\n el_yield = z_data[el_index][len(self.masses)-m_index-1]\n el_yield_fraction = el_yield/mass\n yield_subtable[el][m_index] = el_yield_fraction\n summed_yields[m_index]+=el_yield_fraction\n \n yield_subtable['unprocessed_mass_in_winds'] = 1.-summed_yields-yield_subtable['mass_in_remnants']\n \n self.table[z.astype(float)] = yield_subtable\n \n # Restructure table\n all_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']+self.elements\n \n list_of_arrays = [yield_subtable[key] for key in all_keys]\n restructure_subtable = np.core.records.fromarrays(list_of_arrays,names=all_keys)\n \n self.table[z] = restructure_subtable", "def objective(params, n_folds=N_FOLDS):\n\n # Keep track of evals\n global ITERATION\n\n ITERATION += 1\n\n # Retrieve the subsample if present otherwise set to 1.0\n subsample = params['boosting_type'].get('subsample', 1.0)\n\n # Extract the boosting type\n params['boosting_type'] = params['boosting_type']['boosting_type']\n params['subsample'] = subsample\n\n # Make sure parameters that need to be integers are integers\n for parameter_name in ['max_depth', 'subsample_for_bin', 'min_child_samples','min_child_weight','num_parallel_tree']:\n params[parameter_name] = int(params[parameter_name])\n\n start = timer()\n\n print('params',params)\n # Perform n_folds cross validation\n cv_results = xgb.cv(params, train_set,\n num_boost_round=3000,\n nfold=n_folds,\n stratified=True,\n early_stopping_rounds=100,\n feval=tpr_weight_funtion_xgb_cv,\n seed=50,\n verbose_eval=True,\n\n )\n\n print('cv_results\\n',type(cv_results),'\\n',cv_results)\n\n run_time = timer() - start\n\n # Extract the best score\n best_score = np.min(cv_results['test-TPR-mean'])\n\n # Loss must be minimized\n loss = best_score\n\n TPR_std = cv_results[cv_results['test-TPR-mean']==best_score]['test-TPR-std'].values[0]\n print('TPR_stdv', TPR_std)\n\n\n # Boosting rounds that returned the highest cv score\n n_estimators = int(np.argmin(cv_results['test-TPR-mean']) + 1)\n\n # Write to the csv file ('a' means append)\n of_connection = open(out_file, 'a')\n writer = csv.writer(of_connection)\n writer.writerow([loss,TPR_std, params, ITERATION, n_estimators, run_time])\n\n # Dictionary with information for evaluation\n return {'loss': loss,'TPR_std':TPR_std, 'params': params, 'iteration': ITERATION,\n 'estimators': n_estimators,\n 'train_time': run_time, 'status': STATUS_OK}", "def split_full_4epochs_iter2 (params, ns):\n #29 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m2_12, m2_13, m2_21, m2_23, m2_31, m2_32, T0, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n mig2 = numpy.array([[0, m2_12, m2_13],[m2_21, 0, m2_23], [m2_31, m2_32, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T3 = [nu1c, nu2c, nu3c]\n fs.integrate(nu_T3, T3) \n return fs", "def extra_trees_test(n_jobs=1):\n # model = models.RandomForest.ExtraTreesModel()\n # model.run('cv')\n\n # tune the model - 15 trees already gives .13 RMSE, I think that's slightly better than RF with that number of trees\n params = {\n 'n_estimators': [15, 50, 100, 250]\n }\n model = models.RandomForest.ExtraTreesModel(\n grid_search_parameters=params,\n grid_search_sample=0.5,\n n_jobs=n_jobs\n )\n model.run('grid_search', refit=True)\n # 2014-01-21 05:45:28 - Base - INFO - Found best parameters:\n # 2014-01-21 05:45:28 - Base - INFO - {'n_estimators': 250}\n # 2014-01-21 05:45:28 - Base - INFO - Predicting on holdout set\n # 2014-01-21 05:45:41 - classes - INFO - RMSE: 0.124530683233\n # 2014-01-21 05:45:41 - Base - INFO - RMSE on holdout set: 0.124530683233\n # 2014-01-21 05:45:41 - Base - INFO - Grid search completed in 8916.21896791\n # 2014-01-21 05:45:41 - Base - INFO - Model completed in 9332.45440102\n\n # As expected, more trees = better performance. Seems like the performance is on par/slightly better than random forest", "def TNG_net(self): \n \n import h5py as h5\n filename = localpath+'input/yields/TNG/SNII.hdf5'\n # Read H5 file\n f = h5.File(filename, \"r\")\n \n # Define element indexing\t\t\t\n indexing = {}\n indexing['H'] = 'Hydrogen'\n indexing['He'] = 'Helium'\n indexing['C'] = 'Carbon'\n indexing['N']= 'Nitrogen'\n indexing['O'] = 'Oxygen'\n indexing['Ne'] = 'Neon'\n indexing['Mg'] = 'Magnesium'\n indexing['Si'] = 'Silicon'\n indexing['S'] = 'Sulphur' # Not used by TNG simulation\n indexing['Ca'] = 'Calcium' # Not used by TNG simulation\n indexing['Fe'] = 'Iron'\n \n self.elements = list(indexing.keys())\n \n self.table = {}\n \n # Define masses / metallicities\n self.metallicities = list(f['Metallicities'].value)\n self.masses = f['Masses'].value\n\n \n for z_index,z in enumerate(self.metallicities):\n \n yield_subtable = {}\n \n z_name = f['Yield_names'].value[z_index].decode('utf-8')\n z_data = f['Yields/'+z_name+'/Yield']\n \n ejecta_mass = f['Yields/'+z_name+'/Ejected_mass'].value\n \n yield_subtable['Mass'] = self.masses\n remnants = self.masses-ejecta_mass\n yield_subtable['mass_in_remnants'] = np.divide(remnants,self.masses)\n for el in list(indexing.keys()):\n yield_subtable[el] = np.zeros(len(self.masses))\n \n summed_yields = np.zeros(len(self.masses))\n \n for m_index,mass in enumerate(self.masses):\n for el_index,el in enumerate(self.elements):\n el_yield_fraction = z_data[el_index][m_index]/mass #(mass-remnants[m_index]) # Find fraction of ejecta per element\n yield_subtable[el][m_index] = el_yield_fraction\t\t\t\t\t\n summed_yields[m_index]+=el_yield_fraction # Compute total yield\n \n yield_subtable['unprocessed_mass_in_winds'] = 1.-summed_yields-yield_subtable['mass_in_remnants']\n \n # Restructure table\n all_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']+self.elements\n \n list_of_arrays = [yield_subtable[key] for key in all_keys]\n restructure_subtable = np.core.records.fromarrays(list_of_arrays,names=all_keys)\n \n self.table[z] = restructure_subtable", "def split_full_4epochs_iter1 (params, ns):\n #29 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m2_12, m2_13, m2_21, m2_23, m2_31, m2_32, T0, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n mig2 = numpy.array([[0, m2_12, m2_13],[m2_21, 0, m2_23], [m2_31, m2_32, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T3 = [nu1c, nu2c, nu3c]\n fs.integrate(nu_T3, T3) \n return fs", "def check_BDT_simulations_slice_KS(bolo_name, analysis_type, mass):\n\n\tplt.ion()\n\n\tpop_path = \"../Analyse_\" + bolo_name + \"/Populations/Pop_for_scaling/\"\n\tBDT_path = \"/home/irfulx204/mnt/tmain/Desktop/Run308_BDT_simu_better/BDT_\" + bolo_name + \"/\" + analysis_type + \"/\"\n\n\tttrue,ftrue = PyRPl.open_ROOT_object(\"../Fond_ERA_merged/\" + bolo_name + \"_\" + analysis_type + \"_lowmass_fond.root\", \"t_merged\")\n\ttsimu, fsimu = PyRPl.open_ROOT_object(BDT_path +\"True_events/ROOT_files/\" + bolo_name + \"_true_events_tree.root\", \"t_new0\")\n\n\tprint \"true: \", ttrue.GetEntries(\"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\tprint \"simu: \", tsimu.GetEntries(\"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\n\tttrue.Draw(\"0.5*(EIB+EID):0.5*(EC1+EC2)>>hist(1000,-2,15,1000,-2,15\", \"\")\n\ttsimu.Draw(\"0.5*(EIB+EID):0.5*(EC1+EC2)>>hist2(1000,-2,15,1000,-2,15\", \"\")\n\n\t# ttrue.Draw(\"0.5*(EIB+EID):0.5*(EC1+EC2)>>hist(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\t# tsimu.Draw(\"0.5*(EIB+EID):0.5*(EC1+EC2)>>hist2(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\t# ttrue.Draw(\"0.414*EIB+(1-0.414)*EID:0.574*EC1+(1-0.574)*EC2>>hist(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0 && EIA<2 && EIC<2\")\n\t# tsimu.Draw(\"0.414*EIB+(1-0.414)*EID:0.574*EC1+(1-0.574)*EC2>>hist2(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\n\t# ttrue.Draw(\"EIB:EID>>hist(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0 && EIA<2 && EIC<2\")\n\t# tsimu.Draw(\"EIB:EID>>hist2(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\t# ttrue.Draw(\"EC1:EC2>>hist(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0 && EIA<2 && EIC<2\")\n\t# tsimu.Draw(\"EC1:EC2>>hist2(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\n\thist.SetMarkerColor(kRed)\n\thist.SetMarkerStyle(20)\n\thist2.SetMarkerStyle(20)\n\thist.Draw()\n\thist2.Draw(\"same\")\n\n\traw_input()\n\n\t#Open event files\n\tdata_types = {\"names\": (\"EC1\", \"EC2\", \"EIA\", \"EIB\", \"EIC\", \"EID\"), \"formats\": (\"f\", \"f\", \"f\", \"f\", \"f\", \"f\")}\n\n\tarr_true = np.loadtxt(pop_path + bolo_name + \"_true_events_all.txt\", delimiter=\",\", dtype=data_types)\n\tarr_simu = np.loadtxt(pop_path + bolo_name + \"_simu_events_all.txt\", delimiter=\",\", dtype=data_types)\n\n\tEI_true = 0.5*(arr_true[\"EIB\"]+arr_true[\"EID\"])\n\tEC_true = 0.5*(arr_true[\"EC1\"]+arr_true[\"EC2\"])\n\n\tEI_simu = 0.5*(arr_simu[\"EIB\"]+arr_simu[\"EID\"])\n\tEC_simu = 0.5*(arr_simu[\"EC1\"]+arr_simu[\"EC2\"])\n\n\th2Darr = TH2F(\"h2Darr\", \"h2Darr\", 1000, -2, 15, 1000, -2, 15)\n\th2Dsimu = TH2F(\"h2Dsimu\", \"h2Dsimu\", 1000, -2, 15, 1000, -2, 15)\n\n\tfor i in range(EI_true.shape[0]):\n\t\th2Darr.Fill(EC_true[i], EI_true[i])\n\tfor i in range(EI_simu.shape[0]):\n\t\th2Dsimu.Fill(EC_simu[i],EI_simu[i])\n\n\tPyRPl.process_TH2(h2Darr, X_title = \"EC\", Y_title = \"EI\", color = kRed)\n\tPyRPl.process_TH2(h2Dsimu, X_title = \"EC\", Y_title = \"EI\", color = kBlack)\n\n\th2Darr.Draw()\n\th2Dsimu.Draw(\"same\")\n\n\t#Slices on EC\n\tfor EC in range(2,15):\n\t\tl_true = np.where(np.logical_and(EC_true>EC-1 , EC_true<EC))\n\t\tl_simu = np.where(np.logical_and(EC_simu>EC-1 , EC_simu<EC))\n\n\t\tslice_EI_true = EI_true[l_true]\n\t\tslice_EI_simu = EI_simu[l_simu]\n\n\t\tprint scipy.stats.ks_2samp(slice_EI_true, slice_EI_simu),\" \", 1.36*sqrt(len(slice_EI_true) + len(slice_EI_simu))/sqrt(len(slice_EI_true) * len(slice_EI_simu))\n\n\t\ttrue_cdf = sm.distributions.ECDF(slice_EI_true)\n\t\tsimu_cdf = sm.distributions.ECDF(slice_EI_simu)\n\n\t\tx_true = np.linspace(min(slice_EI_true), max(slice_EI_true))\n\t\tx_simu = np.linspace(min(slice_EI_simu), max(slice_EI_simu))\n\t\ty_true = true_cdf(x_true)\n\t\ty_simu = simu_cdf(x_simu)\n\n\t\tplt.step(x_true, y_true, \"r\", label = \"True IonFid CDF @ EC in [\" + str(EC-1) + \",\" + str(EC) + \"]\" )\n\t\tplt.step(x_simu, y_simu, \"k\", label = \"Simu IonFid CDF @ EC in [\" + str(EC-1) + \",\" + str(EC) + \"]\")\n\t\tplt.legend(loc=\"upper left\", prop={\"size\":10})\n\n\t\tplt.show()\n\t\traw_input()\n\t\tplt.clf()\n\n\t#Slices on EI\n\tfor EI in range(1,15):\n\t\tl_true = np.where(np.logical_and(EI_true>EI-1 , EI_true<EI))\n\t\tl_simu = np.where(np.logical_and(EI_simu>EI-1 , EI_simu<EI))\n\n\t\tslice_EC_true = EC_true[l_true]\n\t\tslice_EC_simu = EC_simu[l_simu]\n\n\t\tprint scipy.stats.ks_2samp(slice_EC_true, slice_EC_simu),\" \", 1.36*sqrt(len(slice_EC_true) + len(slice_EC_simu))/sqrt(len(slice_EC_true) * len(slice_EC_simu))\n\n\t\ttrue_cdf = sm.distributions.ECDF(slice_EC_true)\n\t\tsimu_cdf = sm.distributions.ECDF(slice_EC_simu)\n\n\t\tx_true = np.linspace(min(slice_EC_true), max(slice_EC_true))\n\t\tx_simu = np.linspace(min(slice_EC_simu), max(slice_EC_simu))\n\t\ty_true = true_cdf(x_true)\n\t\ty_simu = simu_cdf(x_simu)\n\n\t\tplt.step(x_true, y_true, \"r\", label = \"True IonFid CDF @ EI in [\" + str(EI-1) + \",\" + str(EI) + \"]\" )\n\t\tplt.step(x_simu, y_simu, \"k\", label = \"Simu IonFid CDF @ EI in [\" + str(EI-1) + \",\" + str(EI) + \"]\")\n\t\tplt.legend(loc=\"upper left\", prop={\"size\":10})\n\n\t\tplt.show()\n\t\traw_input()\n\t\tplt.clf()", "def run(self,step=2,\n sizePop=100,\n infoFields=['migrate_to','fitness'],\n recombination_rate = 0.00375,\n migration_rate = 0.01,\n mutation_rate = [0.00000001],\n subPopNames = ['x','y','z','w'],\n alleleNames = ['A','B'],\n s1 = 0.1,\n burnin=50,\n **kargs):\n\n self.reset()\n pop=sim.Population(size=[sizePop]*self.numPop, loci=self.loci, lociPos=list(range(self.dist, (self.dist*self.loci)+1,self.dist)), subPopNames=subPopNames, infoFields=infoFields)\n \n simu = sim.Simulator(pop)\n print(\"The simulation has started\")\n t1 = time.time()\n\n\n mutate_snps=range(0,50)+range(51,101)\n\n # define the initialization of each loci based the beta distribution where a and b parameters are allele frequencies from noncoding human regions\n snps=[0.14, 0.11, 0.17, 0.11, 0.32, 0.33, 0.21, 0.11, 0.11, 0.28, 0.11, 0.12, 0.8, 0.66, 0.74, 0.68, 0.66, 0.77, 0.77, 0.76, 0.77, 0.74, 0.72, 0.11, 0.73, 0.72, 0.72, 0.72, 0.54, 0.17, 0.78, 0.64, 0.78, 0.2, 0.24, 0.25, 0.78, 0.66, 0.2, 0.14, 0.75, 0.16, 0.72, 0.18, 0.77, 0.42, 0.34, 0.7, 0.17, 0.14, 0.2, 0.46, 0.13, 0.26, 0.16, 0.13, 0.14, 0.24, 0.18, 0.36, 0.71, 0.27, 0.28, 0.25, 0.25, 0.3, 0.19, 0.14, 0.16, 0.3, 0.39, 0.16, 0.24, 0.32, 0.11, 0.18, 0.48, 0.31, 0.21, 0.15, 0.34, 0.71, 0.33, 0.18, 0.71, 0.13, 0.23, 0.2, 0.22, 0.23, 0.16, 0.23, 0.23, 0.22, 0.24, 0.82, 0.36, 0.37, 0.72, 0.16, 0.14]\n self.initFreq=[]\n\n \n for i in range(len(snps)):\n alpha=float(4*sizePop*migration_rate*snps[i])\n bhta=float(4*sizePop*migration_rate*(1-snps[i])) \n p=numpy.random.beta(alpha,bhta)\n while (p>=0.9 or p<=0.1):\n p=numpy.random.beta(alpha,bhta)\n \n print \" SNP {snp} with alpha {alpha}, bhta {bhta} and frequency {p}\".format(snp=i, alpha=alpha, bhta=bhta, p=p)\n self.initFreq.append(p)\n\n simu.evolve(\n \n initOps=[sim.InitGenotype(freq=[self.initFreq[i], 1-self.initFreq[i]], loci=i) for i in range(len(snps))],\n \n\n # initialize the sex and select the 50 loci (parents)\n preOps = [sim.InitSex(maleProp=0.5,at=[0]),\n\n # initialize the genotype of locus 50 at generation 0 (in the beginning of the simulation)\n sim.PyOperator(self.genotypeBegin,at=[0]),\n \n # Wait 50 generations for the system to reach equilibrium\n # Then, change the the genotype of locus 50 at generation 50 by inserting a single copy of allele 0 in one individual \n sim.PyOperator(self.genotypeAfter,at=[50]),\n\n # function that carries out the selection proccess\n sim.MaSelector(loci=50,wildtype=0,fitness=[1+s1, 1+s1/2, 1],begin=50, end=-1,subPops=1)],\n\n # recombination\n matingScheme=sim.RandomMating(ops=[\n sim.Recombinator(rates=recombination_rate)]),\n \n # mutation and migration of offsprings\n postOps = [\n\n \n sim.SNPMutator(u=mutation_rate,loci=mutate_snps),\n \n # call function to calculate Fst and check for equilibrium state\n sim.PyOperator(self.calcFst,step=step),\n\n #migration\n # Here we define an island model, but this can easily be changed.\n # For more information about the migration models, please look in the documentation of SimuPOP here http://simupop.sourceforge.net/manual_svn/build/userGuide_ch7_sec3.html\n sim.Migrator(sim.utils.migrIslandRates(migration_rate,self.numPop)),\n \n # call function to save the allele frequencies\n sim.PyOperator(self.checkAlleles, step=step, param = subPopNames),\n \n \n # check if locus 50 is lost due to genetic drift. If yes, we terminate the simulation\n sim.Stat(alleleFreq=50,step=step,subPops=1,begin=50,end=-1),\n sim.TerminateIf('alleleFreq[50][0] == 0',step=step,begin=50,end=-1),\n \n # check the progress of the simulation\n sim.PyEval('\"Gen: %d\" % gen',step=step),\n sim.PyOutput('\\n',step=step),\n \n ],\n gen=self.Gen\n \n )\n \n \n t2 = time.time()\n print \"simulation took\", t2-t1, \"seconds.\"", "def split_symmig_all_growth(params, ns):\n #11 parameters \n nu1, nuA0, nuA, nu2, nu3, m1_1, m2_1, m2_2, m2_3, T1, T2 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T1\n nuA_func = lambda t: nuA0 * (nuA/nuA0)**(t/T1) \n ## Population function for T1\n nu_T1_func = lambda t: [nu1, nuA_func(t)]\n mig1 = numpy.array([[0, m1_1],[m1_1, 0]])\n fs.integrate(nu_T1_func, T1, m=mig1)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T2\n nu_T2 = [nu1, nu2, nu3]\n mig2 = numpy.array([[0, m2_1, m2_3],[m2_1, 0, m2_2], [m2_3, m2_2, 0]]) \n fs.integrate(nu_T2, T2, m=mig2) \n return fs", "def bootstrap(model,state_dim,prior,ens_size,interval,nanl,tanl,obs,Q):\n \n # store the analysis times indices in the full integration interval\n a_time = np.array(range(0,len(interval),tanl))\n\n # storage dictionary for the trajectories and weights\n p_series = {}\n A = 'A_'\n\n # divergence safety check\n divergence = False\n \n # define the initial weights\n weights = (1.0/ens_size)*np.ones(ens_size) \n\n # loop through the analysis times starting at time zero\n for i in range(nanl):\n\n # store the prior weights and states\n\tprior_W = weights \n\tprior_S = np.reshape(prior,[ens_size,state_dim])\n\n # recompute the weights, and throw out neglible particles\n [analysis,weights,ens_size] = no_resample_update(weights,obs[i,:],Q,prior,ens_size,state_dim) \n\tpost_S = np.reshape(analysis,[ens_size,state_dim])\n\n # check for filter divergence\n if ens_size < 10:\n divergence = True\n A_i = A + str(i)\n p_series[A_i] = {'prior':prior_S,'prior_weight':prior_W,'post':post_S,'post_weight':weights}\n break\n \n # integrate the initial cloud to the next analysis time;\n # note integration interval starts at time 0, and slice notation goes to the last index - 1\n traj = odeint(model,analysis,interval[a_time[i]:a_time[i+1]+1])\n \n #create storage for next iteration\n A_i = A + str(i)\n p_series[A_i] = {'prior':prior_S,'prior_weight':prior_W,'post':post_S,'post_weight':weights,'traj':traj}\n \n #initialize the next forecast\n prior = traj[-1,:]\n \n # final analysis time weight update - no forward trajectory to store\n if not divergence:\n\tprior_W = weights\n\tprior_S = np.reshape(prior,[ens_size,state_dim])\n [analysis,weights,ens_size] = no_resample_update(weights,obs[i+1,:],Q,prior,ens_size,state_dim)\n\tpost_S = np.reshape(analysis,[ens_size,state_dim]) \n\tA_i = A + str(i+1)\n p_series[A_i] = {'prior':prior_S,'prior_weight':prior_W,'post':post_S,'post_weight':weights}\n \n return p_series", "def stiffenerBuckle(dim):\n bst = dim[0]\n tst = dim[1]\n tsk = dim[2]\n\n epsilont = kt * ((tst / bst)) ** 2\n Et = (Esk * tsk) + (Est * ((bst * tst) / bsk))\n Nst = Et*epsilont # Critical Load\n rsf = Nst/Nx\n return rsf - 1 # Using a target Reserve Factor of 1", "def model_and_data(request, hyperparams, estep_conf):\n if tvo.get_run_policy() == \"mpi\":\n init_processes()\n\n precision, N, D, H, batch_size = get(hyperparams, \"precision\", \"N\", \"D\", \"H\", \"batch_size\")\n\n if request.param == \"BSC\":\n W_gt = generate_bars(H, bar_amp=10.0, precision=precision)\n sigma2_gt = to.ones((1,), dtype=precision, device=tvo.get_device())\n pies_gt = to.full((H,), 2.0 / H, dtype=precision, device=tvo.get_device())\n\n to.manual_seed(999)\n W_init = to.rand((D, H), dtype=precision)\n W_init = W_init.to(device=tvo.get_device())\n broadcast(W_init)\n\n sigma2_init = to.tensor([1.0], dtype=precision, device=tvo.get_device())\n pies_init = to.full((H,), 1.0 / H, dtype=precision, device=tvo.get_device())\n\n model = BSC(\n H=H, D=D, W_init=W_gt, sigma2_init=sigma2_gt, pies_init=pies_gt, precision=precision\n )\n\n fname = \"bars_test_data_bsc.h5\"\n\n write_dataset(fname, N, D, np.float32, model)\n\n model.theta[\"W\"] = W_init\n model.theta[\"sigma2\"] = sigma2_init\n model.theta[\"pies\"] = pies_init\n\n elif request.param == \"NoisyOR\":\n W_gt = generate_bars(H, bar_amp=0.8, bg_amp=0.1, precision=precision)\n pies_gt = to.full((H,), 2.0 / H, dtype=precision, device=tvo.get_device())\n\n to.manual_seed(999)\n W_init = to.rand((D, H), dtype=precision)\n W_init = W_init.to(device=tvo.get_device())\n broadcast(W_init)\n pies_init = to.full((H,), 1.0 / H, dtype=precision, device=tvo.get_device())\n\n model = NoisyOR(H=H, D=D, W_init=W_gt, pi_init=pies_gt, precision=precision)\n\n fname = \"bars_test_data_nor.h5\"\n\n write_dataset(fname, N, D, np.uint8, model)\n\n model.theta[\"W\"] = W_init\n model.theta[\"pies\"] = pies_init\n\n if tvo.get_run_policy() == \"mpi\":\n dist.barrier()\n\n return model, fname", "def lgb_hyperopt(data, labels, num_evals=1000, n_folds=5, diagnostic=False):\r\n LGBM_MAX_LEAVES = 2**11 #maximum number of leaves per tree for LightGBM\r\n LGBM_MAX_DEPTH = 25 #maximum tree depth for LightGBM \r\n EVAL_METRIC_LGBM_CLASS = 'f1'\r\n\r\n def lgb_f1_score(y_hat, data):\r\n y_true = data.get_label()\r\n y_hat = np.round(y_hat)\r\n return 'f1', f1_score(y_true, y_hat), True\r\n\r\n print('Running {} rounds of LightGBM parameter optimisation:'.format(num_evals))\r\n #clear space\r\n \r\n integer_params = ['max_depth',\r\n 'num_leaves',\r\n 'max_bin',\r\n 'min_data_in_leaf',\r\n 'min_data_in_bin']\r\n \r\n def objective(space_params):\r\n \r\n #cast integer params from float to int\r\n for param in integer_params:\r\n space_params[param] = int(space_params[param])\r\n \r\n #extract nested conditional parameters\r\n if space_params['boosting']['boosting'] == 'goss':\r\n top_rate = space_params['boosting'].get('top_rate')\r\n other_rate = space_params['boosting'].get('other_rate')\r\n #0 <= top_rate + other_rate <= 1\r\n top_rate = max(top_rate, 0)\r\n top_rate = min(top_rate, 0.5)\r\n other_rate = max(other_rate, 0)\r\n other_rate = min(other_rate, 0.5)\r\n space_params['top_rate'] = top_rate\r\n space_params['other_rate'] = other_rate\r\n \r\n subsample = space_params['boosting'].get('subsample', 1.0)\r\n space_params['boosting'] = space_params['boosting']['boosting']\r\n space_params['subsample'] = subsample\r\n \r\n cv_results = lgb.cv(space_params, train, nfold = n_folds, stratified=True,\r\n early_stopping_rounds=100, seed=42, feval=lgb_f1_score)\r\n \r\n best_loss = -cv_results['f1-mean'][-1]\r\n\r\n return{'loss':best_loss, 'status': STATUS_OK }\r\n \r\n train = lgb.Dataset(data, labels)\r\n \r\n #integer and string parameters, used with hp.choice()\r\n boosting_list = [{'boosting': 'gbdt',\r\n 'subsample': hp.uniform('subsample', 0.5, 1)},\r\n {'boosting': 'goss',\r\n 'subsample': 1.0,\r\n 'top_rate': hp.uniform('top_rate', 0, 0.5),\r\n 'other_rate': hp.uniform('other_rate', 0, 0.5)}] #if including 'dart', make sure to set 'n_estimators'\r\n\r\n objective_list_reg = ['huber', 'gamma', 'fair', 'tweedie']\r\n objective_list_class = ['binary', 'cross_entropy']\r\n objective_list = objective_list_class\r\n is_unbalance_list = [True]\r\n\r\n space ={'boosting' : hp.choice('boosting', boosting_list),\r\n 'num_leaves' : hp.quniform('num_leaves', 2, LGBM_MAX_LEAVES, 1),\r\n 'max_depth': hp.quniform('max_depth', 2, LGBM_MAX_DEPTH, 1),\r\n 'max_bin': hp.quniform('max_bin', 32, 255, 1),\r\n 'min_data_in_leaf': hp.quniform('min_data_in_leaf', 1, 256, 1),\r\n 'min_data_in_bin': hp.quniform('min_data_in_bin', 1, 256, 1),\r\n 'min_gain_to_split' : hp.quniform('min_gain_to_split', 0.1, 5, 0.01),\r\n 'lambda_l1' : hp.uniform('lambda_l1', 0, 5),\r\n 'lambda_l2' : hp.uniform('lambda_l2', 0, 5),\r\n 'learning_rate' : hp.loguniform('learning_rate', np.log(0.005), np.log(0.2)),\r\n 'metric' : None, \r\n 'objective' : hp.choice('objective', objective_list),\r\n 'feature_fraction' : hp.quniform('feature_fraction', 0.5, 1, 0.01),\r\n 'bagging_fraction' : hp.quniform('bagging_fraction', 0.5, 1, 0.01),\r\n 'is_unbalance' : hp.choice('is_unbalance', is_unbalance_list)\r\n }\r\n\r\n trials = Trials()\r\n best = fmin(fn=objective,\r\n space=space,\r\n algo=tpe.suggest,\r\n max_evals=num_evals, \r\n trials=trials)\r\n \r\n #fmin() will return the index of values chosen from the lists/arrays in 'space'\r\n #to obtain actual values, index values are used to subset the original lists/arrays\r\n #extract nested conditional parameters\r\n try:\r\n if best['boosting']['boosting'] == 'goss':\r\n top_rate = best['boosting'].get('top_rate')\r\n other_rate = best['boosting'].get('other_rate')\r\n #0 <= top_rate + other_rate <= 1\r\n top_rate = max(top_rate, 0)\r\n top_rate = min(top_rate, 0.5)\r\n other_rate = max(other_rate, 0)\r\n other_rate = min(other_rate, 0.5)\r\n best['top_rate'] = top_rate\r\n best['other_rate'] = other_rate\r\n except:\r\n if boosting_list[best['boosting']]['boosting'] == 'goss':\r\n top_rate = best['top_rate']\r\n other_rate = best['other_rate']\r\n #0 <= top_rate + other_rate <= 1\r\n top_rate = max(top_rate, 0)\r\n top_rate = min(top_rate, 0.5)\r\n other_rate = max(other_rate, 0)\r\n other_rate = min(other_rate, 0.5)\r\n best['top_rate'] = top_rate\r\n best['other_rate'] = other_rate\r\n best['boosting'] = boosting_list[best['boosting']]['boosting']#nested dict, index twice\r\n best['metric'] = metric_list[best['metric']]\r\n best['objective'] = objective_list[best['objective']]\r\n best['is_unbalance'] = is_unbalance_list[best['is_unbalance']]\r\n \r\n #cast floats of integer params to int\r\n for param in integer_params:\r\n best[param] = int(best[param])\r\n \r\n print('{' + '\\n'.join('{}: {}'.format(k, v) for k, v in best.items()) + '}')\r\n if diagnostic:\r\n return(best, trials)\r\n else:\r\n return(best)", "def split_simsplit_3epochs_iter5(params, ns):\n #24 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m2_12, m2_13, m2_21, m2_23, m2_31, m2_32, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n mig2 = numpy.array([[0, m2_12, m2_13],[m2_21, 0, m2_23], [m2_31, m2_32, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T3 = [nu1c, nu2c, nu3c]\n fs.integrate(nu_T3, T3) \n return fs", "def test_burst_tmax(self):\n n = 25\n t_max = 50\n dt = 0.1\n\n G = HVCLikeLayer(n)\n G.burst_noise = 0.0\n G.spike_noise = 0.0\n\n M1 = simulation.EventMonitor(G)\n sim1 = simulation.Simulation(G, M1, dt=dt)\n sim1.run(t_max)\n\n G = HVCLikeLayer(n, burst_tmax=50)\n G.burst_noise = 0.0\n G.spike_noise = 0.0\n\n M2 = simulation.EventMonitor(G)\n sim2 = simulation.Simulation(G, M2, dt=dt)\n sim2.run(2*t_max)\n\n self.assertTrue(np.allclose(M1.t, M2.t))\n self.assertTrue(np.allclose(M1.i, M2.i))", "def test_burst_tmax(self):\n n = 10\n t_max = 25\n dt = 0.1\n G = RateHVCLayer(n)\n G.burst_noise = 0.0\n\n M1 = simulation.StateMonitor(G, 'out')\n\n sim1 = simulation.Simulation(G, M1, dt=dt)\n sim1.run(t_max)\n\n G = RateHVCLayer(n, burst_tmax=t_max)\n G.burst_noise = 0.0\n \n M2 = simulation.StateMonitor(G, 'out')\n sim2 = simulation.Simulation(G, M2, dt=dt)\n sim2.run(2*t_max)\n\n self.assertTrue(np.allclose(M1.out, M2.out[:, :M1.out.shape[1]]))", "def split_full_4epochs_iter3 (params, ns):\n #29 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m2_12, m2_13, m2_21, m2_23, m2_31, m2_32, T0, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n mig2 = numpy.array([[0, m2_12, m2_13],[m2_21, 0, m2_23], [m2_31, m2_32, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T3 = [nu1c, nu2c, nu3c]\n fs.integrate(nu_T3, T3) \n return fs", "def __init__(self,b,u,v,hbls_old,hbbl_old,Kv_old,Kt_old,srflx,sustr,svstr,f,grid_dict,tstep_mode,dt):\n \n # INPUTS FROM TTTW SYSTEM\n self.b = b #buoyancy field: [Ly,N]\n self.u = u # x-component of velocity [Ly,N]\n self.v = v # y-component of velocity [Ly+1,N]\n self.hbls_old = hbls_old #boundary layer depth from previous time step [Ly]\n self.hbbl_old = hbbl_old # bottom boundary layer depth from previous time step [Ly]\n self.Kv_old = Kv_old # momentum mixing coefficeint from previous time step [Ly,N+1]\n self.Kt_old = Kt_old # tracer mixing coefficient from previous time step [Ly,N+1]\n self.srflx = srflx #solar heat flux [Ly] (degC * (m/s))\n self.sustr = sustr # x-component surface wind stress [Ly] (N/m^2) \n self.svstr = svstr # y-component surface wind stress [Ly+1] (N/m^2)\n self.grid_dict = grid_dict #gridded data\n self.f = f #coriolis parameter\n # KPP-SPECIFIC VARIABLES \n self.hbls = np.zeros([self.b.shape[0]])\n self.hbbl = np.zeros([self.b.shape[0]])\n self.ustar = []\n self.bvf = [] \n self.kmo = []\n self.C_h_MO = []\n self.kbl = []\n self.Cr = [] \n self.Fc = []\n self.ghat = [] #NONLOCAL TERM: TO BE USED IN TIME STEPPING\n self.tstep_mode = tstep_mode# if in time steppign mode, turn on HBL_RATE_LIMIT\n self.dt = dt", "def amet_memoryWise(self):\r\n # set up logging files to monitor the calculation\r\n logging.basicConfig(filename = os.path.join(self.path,'history_amet_python.log'),\r\n filemode = 'w+', level = logging.DEBUG,\r\n format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n # initialize the time span\r\n # define sigma level\r\n A, B = self.defineSigmaLevels()\r\n # use example input file to load the basic dimensions information\r\n datapath_var = os.path.join(self.path, 'MERRA2_400.inst3_3d_asm_Nv.20160101.nc4.nc')\r\n var_key = Dataset(datapath_var)\r\n lat = var_key.variables['lat'][:]\r\n lon = var_key.variables['lon'][:]\r\n # calculate the reference levels based on A & B and standard surface pressure\r\n half_level = A + B * 101325\r\n level = (half_level[1:] + half_level[:-1]) / 2\r\n # create space for the output\r\n # AMET in the entire column\r\n E = np.zeros((len(lat),len(lon)), dtype=float)\r\n cpT = np.zeros((len(lat),len(lon)), dtype=float)\r\n Lvq = np.zeros((len(lat),len(lon)), dtype=float)\r\n gz = np.zeros((len(lat),len(lon)), dtype=float)\r\n uv2 = np.zeros((len(lat),len(lon)), dtype=float)\r\n logging.info(\"Start retrieving variables T,q,u,v,sp\")\r\n # The shape of each variable is (8,72,361,576)\r\n T = var_key.variables['T'][:]\r\n q = var_key.variables['QV'][:]\r\n sp = var_key.variables['PS'][:] #(8,361,576)\r\n u = var_key.variables['U'][:]\r\n v = var_key.variables['V'][:]\r\n logging.info(\"Extracting variables successfully!\") \r\n # compute gz\r\n z_model = self.calc_gz(var_key)\r\n # get the basic shape\r\n tt, hh, yy, xx = q.shape\r\n AMET = amet.met()\r\n E, cpT, Lvq, gz, uv2 = AMET.calc_met(T, q, sp, u, v, z_model, A, B,\r\n tt, hh, len(lat), len(lon), lat, self.lat_unit)\r\n\r\n return np.mean(E)", "def mso_separation_task():\n input_range = np.arange(3000) #np.array([range(2000)])\n timescale=10.0\n osc1 = np.sin(input_range/timescale)\n osc2 = np.sin(2.1*input_range/timescale)\n osc3 = np.sin(3.4*input_range/timescale)\n train_target = np.column_stack((osc1, osc2, osc3))\n train_input = osc1*np.cos(osc2+2.345*osc3)\n train_input = train_input[:, None] #1d->2d\n\n machine = ESN(1, 800, leak_rate=0.5)\n print 'Starting training...'\n start = time.time()\n trainer = LinearRegressionReadout(machine)\n trainer.train(train_input[:2000], train_target[:2000])\n print 'Training Time: ', time.time() - start, 's'\n prediction = trainer.predict(train_input[2000:])\n mse = error_metrics.mse(prediction,train_target[2000:])\n nrmse = error_metrics.nrmse(prediction,train_target[2000:])\n print 'MSE: ', mse, 'NRMSE:' , nrmse\n \n plt.subplot(3,1,1)\n plt.plot(train_input[2800:3000])\n plt.title('Input')\n plt.subplot(3,1,2)\n plt.plot(train_target[2800:3000])\n plt.title('Targets')\n plt.subplot(3,1,3)\n plt.plot(prediction[800:1000])\n plt.title('Predictions')\n plt.show()\n \n return nrmse", "def testSimpleTrendDGP(self):\n N1, N0_sim, N0_not = 1, 50, 50\n N0 = N0_sim + N0_not\n N = N1 + N0\n treated_units, control_units = range(N1), range(N1, N)\n T0, T1 = 2, 1\n T = T0 + T1 # unused\n proto_sim = np.array([1, 0] + [2], ndmin=2)\n proto_not = np.array([0, 1] + [1], ndmin=2)\n te = 2\n proto_tr = proto_sim + np.hstack((np.zeros((1, T0)), np.full((1, T1), te)))\n Y1 = np.matmul(np.ones((N1, 1)), proto_tr)\n Y0_sim = np.matmul(np.ones((N0_sim, 1)), proto_sim)\n Y0_sim = Y0_sim + np.random.normal(0,0.1,Y0_sim.shape)\n #Y0_sim = Y0_sim + np.hstack((np.zeros((N0_sim,1)), \n # np.random.normal(0,0.1,(N0_sim,1)),\n # np.zeros((N0_sim,T-2))))\n Y0_not = np.matmul(np.ones((N0_not, 1)), proto_not)\n Y0_not = Y0_not + np.random.normal(0,0.1,Y0_not.shape)\n Y = np.vstack((Y1, Y0_sim, Y0_not))\n\n unit_treatment_periods = np.full((N), -1)\n unit_treatment_periods[0] = T0\n\n # Y += np.random.normal(0, 0.01, Y.shape)\n\n # OPTIMIZE OVER THE V_PEN'S\n # for v_pen, w_pen in [(1,1), (1,1e-10), (1e-10,1e-10), (1e-10,1), (None, None)]: #\n # print(\"\\nv_pen=%s, w_pen=%s\" % (v_pen, w_pen))\n ret = SC.estimate_effects(\n Y,\n unit_treatment_periods,\n ret_CI=True,\n max_n_pl=200,\n #stopping_rule=4,\n **command_line_options,\n )\n TestDGPs.simple_summ(ret.fits[T0], Y)\n V_penalty = ret.fits[T0].fitted_v_pen\n\n Y_sc = ret.fits[T0].predict(Y)# [control_units, :]\n te_vec_est = (Y - Y_sc)[0:T0:]\n # weight_sums = np.sum(ret.fit.sc_weights, axis=1)\n\n # print(ret.fit.scores)\n p_value = ret.p_value\n #print(\"p-value: %s\" % p_value)\n #print( ret.CI)\n #print(np.diag(ret.fit.V))\n #import pdb; pdb.set_trace()\n # print(ret)\n assert te in ret.CI, \"Confidence interval does not include the true effect\"\n assert p_value is not None\n assert p_value < 0.1, \"P-value is larger than expected\"\n\n # [sc_raw, sc_diff] = ind_sc_plots(Y[0, :], Y_sc[0, :], T0, ind_ci=ret.ind_CI)\n # plt.figure(\"sc_raw\")\n # plt.title(\"Unit 0\")\n # ### SHOW() blocks!!!!\n # # plt.show()\n # plt.figure(\"sc_diff\")\n # plt.title(\"Unit 0\")\n # # plt.show()\n # [te] = te_plot(ret)\n # plt.figure(\"te\")\n # plt.title(\"Average Treatment Effect\")\n # # plt.show()", "def split_full_3epochs_iter5 (params, ns):\n #19 parameters \n nu1a, nuA, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m1_12, m1_21, m2_12, m2_13, m2_21, m2_23, m2_31, m2_32, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1a, nuA]\n mig1 = numpy.array([[0, m1_12],[m1_21, 0]])\n fs.integrate(nu_T1, T1, m=mig1)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n mig2 = numpy.array([[0, m2_12, m2_13],[m2_21, 0, m2_23], [m2_31, m2_32, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3\n nu_T3 = [nu1c, nu2c, nu3c]\n fs.integrate(nu_T3, T3) \n return fs" ]
[ "0.63740695", "0.5965839", "0.5927294", "0.58659667", "0.5702715", "0.566868", "0.5498249", "0.54930544", "0.542103", "0.5419258", "0.5402037", "0.5399739", "0.5398051", "0.53829664", "0.53793573", "0.53616595", "0.53513664", "0.5348822", "0.5341492", "0.5327674", "0.53178793", "0.52976376", "0.529191", "0.52803665", "0.52604043", "0.52465755", "0.5232298", "0.5209216", "0.52043724", "0.518551" ]
0.65301913
0
Compute the coefficient matrix of b which is a Toeplitz matrix
def coeff_b(nrows, ncols) -> np.ndarray: coeff_array = np.zeros((nrows, ncols), dtype="complex_") for idx, _ in np.ndenumerate(coeff_array): coeff_array[idx] = 1j * (idx[0] - idx[1]) return coeff_array
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def acoeff(self):\n return np.dot(self.mmi,np.dot(self.mmatrix.T,self.bvec))", "def _compute_b_matrix(self) -> None:\n self.b_matrix = self._kronecker_product(tf.eye(self.n_points_int,\n dtype=tf.float64),\n self.g_matrix)\n self.b_matrix = tf.reshape(self.b_matrix,\n [self.n_points, self.dimensionality,\n self.n_points, self.dimensionality])\n self.b_matrix = tf.transpose(self.b_matrix, [1, 0, 3, 2])\n self.b_matrix = tf.reshape(self.b_matrix,\n [self.n_points * self.dimensionality,\n self.n_points * self.dimensionality])\n return", "def complex_mul3d(a, b):\n op = partial(torch.einsum, \"bixyz,ioxyz->boxyz\")\n return torch.stack([\n op(a[..., 0], b[..., 0]) - op(a[..., 1], b[..., 1]),\n op(a[..., 1], b[..., 0]) + op(a[..., 0], b[..., 1])\n ],\n dim=-1)", "def multiply(self, b):\n assert(self.Dimension == b.Dimension)\n p = []\n for meb in b.Elements:\n for mea in self.Elements:\n if mea.j == meb.i:\n temp = mea.val * meb.val\n temp = MatrixElement(mea.i, meb.j, temp)\n p.append(temp)\n p = SparseMatrix(len(p), p)\n #print(p)\n return p", "def toeplitz_multiplication(u, c, r=None):\n n = len(u)\n if r is None:\n r = c\n u1 = zeros((2*n))\n u1[0:n] = u\n \n c = np.concatenate((c, [0], r[-1:0:-1])) \n \n y1 = circulant_multiplication(u1, c)\n \n return y1[0:n]", "def __calc_jacobian_matrix(self):\n\n tf_matrix_first_to_last = self.tf_matrices_list[-1]\n self.jacobian_matrix = [diff(tf_matrix_first_to_last[:3, -1], self.q[i]).reshape(1, 3) for i in range(len(self.q))]\n self.jacobian_matrix = Matrix(self.jacobian_matrix).T # .T returns the transpose of matrix.", "def row_matrix_col(a, b, A):\n\n\treturn (a[0]*A[0][0]*b[0] + a[1]*A[1][0]*b[0] + a[2]*A[2][0]*b[0] + \n\t a[0]*A[0][1]*b[1] + a[1]*A[1][1]*b[1] + a[2]*A[2][1]*b[1] + \n\t a[0]*A[0][2]*b[2] + a[1]*A[1][2]*b[2] + a[2]*A[2][2]*b[2])", "def cofactor_matrix(self):\n resp = []\n len_b = len(self.take_vec())\n for i in range(self.order):\n _matrix = aux.cofactor(self.take_matrix(),\n (i, self.order-1)\n )\n _resp = math.pow(-1, len_b-1)\n _resp = _resp * np.linalg.det(_matrix)\n _resp = _resp * math.pow(-1, i * (self.order-1))\n resp.append(int(round(_resp)))\n\n return resp", "def choleski_solve(A, b, half_bandwidth=None):\n n = len(A[0])\n if half_bandwidth is None:\n elimination(A, b)\n else:\n elimination_banded(A, b, half_bandwidth)\n x = Matrix.empty(n, 1)\n back_substitution(A, x, b)\n return x", "def complex_mul2d(a, b):\n op = partial(torch.einsum, \"bixy,ioxy->boxy\")\n return torch.stack([\n op(a[..., 0], b[..., 0]) - op(a[..., 1], b[..., 1]),\n op(a[..., 1], b[..., 0]) + op(a[..., 0], b[..., 1])\n ],\n dim=-1)", "def cartan_matrix(self):\n # as soon as CartanMatrix is implemented we should use it here:\n # from sage.combinat.root_system.cartan_matrix import CartanMatrix\n cmat = copy(self.b_matrix())\n for i,j in cmat.nonzero_positions():\n a = cmat[i,j]\n if a > 0: cmat[i,j] = -a\n for i in range(self._rank):\n cmat[i,i] = 2\n # return CartanMatrix(cmat)\n return cmat", "def _get_jacobian(tris_pts):\n a = np.array(tris_pts[:, 1, :] - tris_pts[:, 0, :])\n b = np.array(tris_pts[:, 2, :] - tris_pts[:, 0, :])\n J = _to_matrix_vectorized([[a[:, 0], a[:, 1]],\n [b[:, 0], b[:, 1]]])\n return J", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def complex_mul1d(a, b):\n op = partial(torch.einsum, \"bix,iox->box\")\n return torch.stack([\n op(a[..., 0], b[..., 0]) - op(a[..., 1], b[..., 1]),\n op(a[..., 1], b[..., 0]) + op(a[..., 0], b[..., 1])\n ],\n dim=-1)", "def _C(self):\n\n # Find the local x and y coordinates at each node\n xi = 0\n yi = 0\n xj = self.width()\n yj = 0\n xm = xj\n ym = self.height()\n xn = 0\n yn = ym\n\n # Calculate the [C] coefficient matrix\n C = array([[1, xi, yi, xi**2, xi*yi, yi**2, xi**3, xi**2*yi, xi*yi**2, yi**3, xi**3*yi, xi*yi**3],\n [0, 0, 1, 0, xi, 2*yi, 0, xi**2, 2*xi*yi, 3*yi**2, xi**3, 3*xi*yi**2],\n [0, -1, 0, -2*xi, -yi, 0, -3*xi**2, -2*xi*yi, -yi**2, 0, -3*xi**2*yi, -yi**3],\n \n [1, xj, yj, xj**2, xj*yj, yj**2, xj**3, xj**2*yj, xj*yj**2, yj**3, xj**3*yj, xj*yj**3],\n [0, 0, 1, 0, xj, 2*yj, 0, xj**2, 2*xj*yj, 3*yj**2, xj**3, 3*xj*yj**2],\n [0, -1, 0, -2*xj, -yj, 0, -3*xj**2, -2*xj*yj, -yj**2, 0, -3*xj**2*yj, -yj**3],\n\n [1, xm, ym, xm**2, xm*ym, ym**2, xm**3, xm**2*ym, xm*ym**2, ym**3, xm**3*ym, xm*ym**3],\n [0, 0, 1, 0, xm, 2*ym, 0, xm**2, 2*xm*ym, 3*ym**2, xm**3, 3*xm*ym**2],\n [0, -1, 0, -2*xm, -ym, 0, -3*xm**2, -2*xm*ym, -ym**2, 0, -3*xm**2*ym, -ym**3],\n\n [1, xn, yn, xn**2, xn*yn, yn**2, xn**3, xn**2*yn, xn*yn**2, yn**3, xn**3*yn, xn*yn**3],\n [0, 0, 1, 0, xn, 2*yn, 0, xn**2, 2*xn*yn, 3*yn**2, xn**3, 3*xn*yn**2],\n [0, -1, 0, -2*xn, -yn, 0, -3*xn**2, -2*xn*yn, -yn**2, 0, -3*xn**2*yn, -yn**3]])\n \n # Return the coefficient matrix\n return C", "def it_matrixpower(p,t,n,root_field=RR):\n assert n>=2, \"Carleman matrix must at least be of size 2 to retrieve the coefficients. But given was \" + repr(n)\n CM = p.carleman_matrix(n)\n ev = CM.charpoly().roots(root_field)\n assert len(ev) == n, \"Carleman matrix must have exactly \" + repr(n) + \"eigenvalues, but has \" + repr(len(ev))\n\n Char = [0]*n\n for k in range(n):\n #here is possibility for improvement of precision\n #to separate the fractional from the root parts\n #expanding the product\n Char[k] = CM - ev[k][0]*identity_matrix(n)\n\n #we want to have the first row of the product of the matrices\n #thatswhy we mulitply in front with:\n prod = vector(p.K,[0,1]+[0]*(n-2))\n prodwo = [0]*n\n for k in range(n):\n prodwo[k]=prod #these are the first terms until k-1\n\n #no need to continue\n if k == n-1:\n break\n\n #and we add the terms starting with k+1\n for i in range(k+1,n):\n prodwo[k] = prodwo[k] * Char[i]\n\n prod = prod * Char[k]\n\n sprodwo = [0]*n\n for k in range(n):\n if k==0:\n sprodwo[k] = ev[k][0] - ev[1][0]\n start = 2\n else:\n sprodwo[k] = ev[k][0] - ev[0][0]\n start = 1\n\n for i in range(start,n):\n if i != k:\n sprodwo[k] = sprodwo[k] * (ev[k][0] - ev[i][0])\n\n res = ev[0][0]**t/sprodwo[0] * prodwo[0]\n for k in range(1,n):\n res += ev[k][0]**t/sprodwo[k]*prodwo[k]\n\n return res.list()", "def solve(self,b):\n nrows = self.nrows\n ncols = self.ncols\n newmatrix = Matrix(nrows,ncols+b.ncols) #Account for b not being just a column vector\n for i in range(nrows):\n for j in range(ncols):\n newmatrix[i,j]= self[i,j]\n for j in range(b.ncols):\n newmatrix[i,ncols+j] = b[i,j]\n newmatrix.gaussianelimination()\n x = Matrix(nrows,b.ncols)\n for i in range(x.nrows):\n for j in range(b.ncols):\n x[i,j] = newmatrix[i,j+ncols]\n return x", "def new_basis(abc, lattice):\n return np.dot(abc.T, lattice.inv_matrix.T)", "def forward_substitution(self, b):\n if not self.is_lower_triangular():\n raise ValueError(\"Not a lower triangular matrix\")\n if b.cols != 1:\n raise IndexError(\"Require an Nx1 Matrix: (%i, %i)\"%\n (b.rows, b.cols))\n if b.rows != self.rows:\n raise IndexError(\"Row/column mismatch: (%i, %i) x (%i, %i)\"%\n (self.rows, self.cols, b.rows, b.cols))\n\n L = self\n N = self.rows\n\n y = make_matrix(N, 1)\n for i in range(N):\n y[i, 0] = (b[i, 0] - sum(L[i, k] * y[k, 0] for k in range(i))) / L[i, i]\n\n return y", "def cheby_coeff2(m,s):\r\n c = np.zeros(m+1)\r\n for j in range(m+1):\r\n c[j] = 2*np.exp(-s)*j1(-s)\r\n \r\n return c", "def cbacksolve(A, b, d):\n A = matrix(A)\n n = len(b)\n for k in range(n - 1,0,-1):\n b[k] /= A[k, k]\n lk = array([0, k - d]).max()\n b[lk:k] -= (A[lk:k, k]*b[k])\n b[0] /= A[0,0]", "def updateC(A, U, B):\n \n m_dim = A.shape[1] \n q_dim = B.shape[0]\n \n C_tensor = np.zeros((m_dim, m_dim, q_dim), dtype=np.complex)\n \n for k in range(q_dim):\n A_k = A[:, :, k]\n b_k = B[k]\n \n x_hat = U @ b_k\n y_hat = A_k.conj().T @ x_hat\n \n phase_y = np.exp(1j*np.angle(y_hat))\n #phase_y = np.sign(y_hat)\n C_k = np.diag(phase_y)\n C_tensor[:, :, k] = C_k\n \n \n return C_tensor", "def abc_matrix(a, b, c):\n ax = np.linalg.norm(a)\n a_hat = a/ax\n bx = np.dot(b, a_hat)\n by = np.linalg.norm(np.cross(a_hat, b))\n cx = np.dot(c, a_hat)\n axb = np.cross(a,b)\n axb_hat = axb / np.linalg.norm(axb)\n cy = np.dot(c, np.cross(axb_hat, a_hat))\n cz = np.dot(c, axb_hat)\n return np.array([[ax, bx, cx],[0, by, cy],[0 , 0, cz]])", "def project_oblique(B, C):\n r = C.shape[0]\n F = block([[C @ C.T, C @ B.T], [B @ C.T, B @ B.T]])\n return block([C.T, B.T]) @ (np.linalg.pinv(F)[:, :r]) @ C", "def _zchi2_one(Tb, weights, flux, wflux, zcoeff, solve_matrices_algorithm=\"PCA\"):\n\n M = Tb.T.dot(np.multiply(weights[:,None], Tb))\n y = Tb.T.dot(wflux)\n\n try:\n zcoeff[:] = solve_matrices(M, y, solve_algorithm=solve_matrices_algorithm, use_gpu=False)\n except np.linalg.LinAlgError:\n return 9e99\n except NotImplementedError:\n return 9e99\n\n model = Tb.dot(zcoeff)\n\n zchi2 = np.dot( (flux - model)**2, weights )\n\n return zchi2", "def conv_layer_as_matrix_op(self, W, b, x, n, k):\n\n i = b.shape[0]\n\n output_im_size = n - k + 1\n\n Wx = (W @ x).reshape(i, output_im_size, output_im_size)\n return np.maximum(Wx + b.reshape(i, 1, 1), 0)", "def get_reactant_coeffs(reactants,product):\n specorder = product.specorder\n b_vec = np.array([ float(bi) for bi in product.natm_per_species()])\n #...Construct A_mat\n nsp = len(specorder)\n nreact = len(reactants)\n A_mat = np.zeros((nsp,nreact))\n for ir,reactant in enumerate(reactants):\n specorder_ir = reactant.specorder\n natm_per_sp = reactant.natm_per_species()\n for isp,sp in enumerate(specorder):\n if not sp in specorder_ir:\n A_mat[isp,ir] = 0.0\n else:\n sp_in_spir = specorder_ir.index(sp)\n A_mat[isp,ir] = float(natm_per_sp[sp_in_spir])\n print(' A_mat = ',A_mat)\n print(' b_vec = ',b_vec)\n #...Since nreact could be nsp, A_mat may not have inverse, \n #...so solve minimization of |Ax-b|^2 to obtain x^* vector, x^*=(A^T*A)*A^T*b.\n AA = np.dot(A_mat.T,A_mat)\n AAinv = np.linalg.inv(AA)\n x = np.dot(AAinv,np.dot(A_mat.T,b_vec))\n #...For check\n Ax = np.dot(A_mat,x)\n if len(Ax) != len(b_vec):\n raise ValueError('len(Ax) != len(b_vec)')\n wrong = False\n for i in range(len(b_vec)):\n if abs(Ax[i] -b_vec[i]) > 0.01:\n wrong = True\n if wrong:\n print(' WARNING: Exact solution was not obtained.')\n print(' Result maybe wrong: i,Ax[i],b[i].')\n for i in range(len(b_vec)):\n print(' {0:2d} {1:5.1f} {2:5.1f}'.format(i,Ax[i],b_vec[i]))\n else:\n print(' Ax=b is satisfied, which means the exact number relationship between LHS and RHS is found.')\n \n return x", "def jaccard_coeff(self):\n a, c, _, b = self.to_ccw()\n return _div(a, a + b + c)", "def b_coefficients(x1,x2,x3,y1,y2,y3,CCoefficients,DCoefficients):\n\tBCoefficients = np.array([\t((y2-y1)/(x2-x1)-CCoefficients[0]*(x2-x1) - DCoefficients[0]*((x2-x1)**2)), \\\n\t\t\t\t\t\t\t\t((y3-y2)/(x3-x2)-CCoefficients[1]*(x3-x2) - DCoefficients[1]*((x3-x2)**2)) \t]).astype(float)\n\treturn(BCoefficients)", "def solve(self, A, b):\n if is_sparse(A) or is_sparse(b):\n A, b = A.tocsc(), b.tocsc()\n x = sparse.COO(scipy.sparse.linalg.spsolve(A, b))\n else:\n x = np.linalg.solve(A, b)\n\n return x" ]
[ "0.6314898", "0.6010264", "0.5852862", "0.57264954", "0.5714183", "0.57140785", "0.56770253", "0.56455976", "0.564191", "0.5627044", "0.56211114", "0.5616878", "0.5616609", "0.5612154", "0.5594928", "0.55856484", "0.5559769", "0.55383754", "0.55161476", "0.55161124", "0.5503608", "0.5501817", "0.5499013", "0.549152", "0.5490477", "0.5490114", "0.5468006", "0.54675066", "0.5456946", "0.5449764" ]
0.6335417
0
Initialise arrays that will hold momentum and position matrices in their respective basis (only diagonal entries) under unitary timeevolution and computes the momentum and position operators after each iteration and stores them in tensors. Additionally, computes the microcanonical OTOC (c)
def init_compute(params): # unzip dictionary and assign parameters N, M, K, T = params.values() p0 = np.fft.fftfreq(N, 1.0 / N) x0 = np.arange(0, 2 * np.pi, 2 * np.pi / N) p_time_evolution = np.zeros((N, T), dtype="complex_") x_time_evolution = np.zeros((N, T), dtype="complex_") p_time_evolution[:, 0] = p0 x_time_evolution[:, 0] = x0 # Initialise b,c tensor to hold b,c matrices iteravtively b_tensor = np.zeros((N, N, T), dtype="complex_") c_tensor = np.zeros((N, N, T), dtype="complex_") c_trace = np.zeros(T, dtype="complex_") for i in range(T - 1): # Evolve b = coeff_b(N, N) * p_time_evolution[:, i] c = b @ b.T.conj() print(c.trace()) c_trace[i] = c.trace() p_time_evolution[:, i + 1] = evolve( p_time_evolution[:, i], "momentum", x_time_evolution[:, i], p_time_evolution[:, i], params, ) x_time_evolution[:, i + 1] = evolve( x_time_evolution[:, i], "position", x_time_evolution[:, i], p_time_evolution[:, i], params, ) b_tensor[:, :, i], c_tensor[:, :, i] = b, c return b_tensor, c_tensor
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(M):\n\n t = T.scalar()\n dgamma = T.matrix() # velocity of Euclidean curve\n dsm = T.matrix() # derivative of Euclidean semimartingale\n u = M.FM_element()\n d = M.dim\n\n # Deterministic development\n def ode_development(dgamma,t,u):\n x = u[0:d]\n nu = u[d:].reshape((d,-1))\n m = nu.shape[1]\n\n det = T.tensordot(M.Horizontal(u)[:,0:m], dgamma, axes = [1,0])\n \n return det\n\n M.development = lambda u,dgamma: integrate(ode_development,u,dgamma)\n M.developmentf = theano.function([u,dgamma], M.development(u,dgamma))\n\n # Stochastic development\n def sde_development(dsm,t,u):\n x = u[0:d]\n nu = u[d:].reshape((d,-1))\n m = nu.shape[1]\n\n sto = T.tensordot(M.Horizontal(u)[:,0:m], dsm, axes = [1,0])\n \n return (T.zeros_like(sto), sto, M.Horizontal(u)[:,0:m])\n\n M.sde_development = sde_development\n M.sde_developmentf = theano.function([dsm,t,u], M.sde_development(dsm,t,u), on_unused_input = 'ignore') \n M.stochastic_development = lambda u,dsm: integrate_sde(sde_development,integrator_stratonovich,u,dsm)\n M.stochastic_developmentf = theano.function([u,dsm], M.stochastic_development(u,dsm))", "def u_t(self):\n\t\tdim = self.dim \n\t\ttim_all = self.tim_all\n\t\t#ctrl = self.ctrl\n\t\tH0 = self.H0\n\t\tHctrl = self.Hctrl\n\n\t\tu_all = np.zeros((tim_all+1,dim,dim),dtype = complex)\n\t\tu_all[0,:,:] = np.eye(dim)\n\n\t\tfor tim in xrange(tim_all):\n\t\t\tH = H0 + Hctrl[tim]#np.matrix( ctrl[i,tim] * np.array(Hctrl[i]))\n\t\t\tu_all[tim+1,:,:] = np.dot(self.u_dt(H,tim), u_all[tim,:,:])\n\n\n\t\treturn u_all", "def timlinucb(\n df_edges,\n df_feats,\n times,\n nodes,\n num_seeds=5,\n sigma=4,\n c=0.1,\n epsilon=0.4,\n num_repeats_oim=10,\n num_repeats_oim_reward=10,\n style=\"additive\",\n persist=False,\n hide_tqdm=False,\n):\n results = []\n # For persistent parameters - making the b and M matrices\n if persist:\n b = np.zeros((df_feats.shape[1], 1))\n m_inv = np.eye(df_feats.shape[1], df_feats.shape[1])\n else:\n b = None\n m_inv = None\n\n times_iter = (\n times\n if hide_tqdm\n else tqdm(times, desc=f\"TOIM iters\", leave=False, file=sys.stderr)\n )\n\n for t in times_iter:\n if style == \"additive\":\n df_t = df_edges[df_edges[\"day\"] <= t].sort_values(\"source\").reset_index()\n elif style == \"dynamic\":\n df_t = df_edges[df_edges[\"day\"] == t].sort_values(\"source\").reset_index()\n df_feats_t = df_t[\"index\"].apply(lambda x: df_feats.loc[x])\n result_oim = oim_node2vec(\n df_t,\n df_feats_t,\n nodes,\n num_inf=num_seeds,\n sigma=sigma,\n c=c,\n epsilon=epsilon,\n num_repeats=num_repeats_oim,\n num_repeats_reward=num_repeats_oim_reward,\n persist=persist,\n m_inv=m_inv,\n b=b,\n )\n result_oim[\"time\"] = t\n if persist:\n m_inv = result_oim.pop(\"m_inv\")\n b = result_oim.pop(\"b\")\n results.append(result_oim)\n return pd.DataFrame(results)", "def matrices_TC(l, omega, S, cn, csn, rhos, rho):\n MN = (np.linalg.inv(matrix_M1(l, omega, S, cn, csn, rhos, rho))\n * matrix_N1(l, omega, S, cn)\n )\n KL = (np.linalg.inv(matrix_K1(l, omega, S, cn, csn, rhos, rho))\n * matrix_L1(l, omega, S, cn)\n )\n T = np.zeros((3,3))\n C = np.zeros((3,3))\n T[:2,:2] = MN[:2]\n T[ 3, 3] = KL[0]\n C[:2,:2] = MN[2:]\n C[ 3, 3] = KL[1]\n return T, C", "def timlinucb_parallel_t(\n df_edges,\n df_feats,\n times,\n nodes,\n num_seeds=5,\n sigma=4,\n c=0.1,\n epsilon=0.4,\n num_repeats_oim=10,\n num_repeats_oim_reward=10,\n style=\"additive\",\n process_id=1,\n persist=False,\n):\n if \"tim\" not in os.listdir():\n logger_tlu.warning(\"Couldn't find TIM in the program directory\")\n return False\n\n tim_name = f\"tim_tlu_{process_id}\"\n dir_name = f\"{tim_name}_dir\"\n logger_tlu.debug(f\"Name of the new TIM file: {tim_name}\")\n shutil.copyfile(\"tim\", tim_name)\n\n # Making the new tim file executable\n st = os.stat(tim_name)\n os.chmod(tim_name, st.st_mode | stat.S_IEXEC)\n\n results = []\n\n # For persistent parameters - making the b and M matrices\n if persist:\n b = np.zeros((df_feats.shape[1], 1))\n m_inv = np.eye(df_feats.shape[1], df_feats.shape[1])\n else:\n b = None\n m_inv = None\n\n for t in times:\n if style == \"additive\":\n df_t = df_edges[df_edges[\"day\"] <= t].sort_values(\"source\").reset_index()\n elif style == \"dynamic\":\n df_t = df_edges[df_edges[\"day\"] == t].sort_values(\"source\").reset_index()\n\n df_feats_t = df_t[\"index\"].apply(lambda x: df_feats.loc[x])\n\n result_oim = oim_node2vec(\n df_t,\n df_feats_t,\n nodes,\n num_inf=num_seeds,\n sigma=sigma,\n c=c,\n epsilon=epsilon,\n num_repeats=num_repeats_oim,\n num_repeats_reward=num_repeats_oim_reward,\n oracle=partial(tim_parallel, tim_file=tim_name, temp_dir=dir_name),\n hide_tqdm=True,\n persist=persist,\n m_inv=m_inv,\n b=b,\n )\n\n result_oim[\"time\"] = t\n\n if persist:\n m_inv = result_oim.pop(\"m_inv\")\n b = result_oim.pop(\"b\")\n\n results.append(result_oim)\n\n logger_tlu.debug(\n f\"Removing the new TIM files {tim_name} and the temp directories {dir_name}\"\n )\n os.remove(tim_name)\n shutil.rmtree(dir_name)\n\n return pd.DataFrame(results)", "def u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=50): \n # PREMULTIPLIED LHS IS AN EXTRA ARGUMENT! Set it to None and add solver! \n \"\"\"In the following +[[]] and [:-1] are added to keep thing 1dim array of objects and still multiply it elemtwisely\"\"\" \n# #B.append([]) #THIS IS WRONG, CHANGES THE LIST \n# B_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(B+[[]])[:-1], axis = 0) \n# A_ls = np.concatenate([(1/np.sqrt(2*eta0))*A, B_concat], axis = 0) \n# #print(np.array(B).shape) \n# #print(w[0].shape) \n# #print(w, eta) \n# #w.append([]) THIS IS WRONG, CHANGES THE LIST \n# w_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(w+[[]])[:-1], axis = 0) #[:-1] Added as a hack to keep it one-dim array of objects \n# eta_w = np.expand_dims(1/np.sqrt(2*eta),1)*np.array(w) \n# print(eta_w.shape) \n# b_ls = np.concatenate([(1/np.sqrt(2*eta_0))*w_0, eta_w.flatten()], axis = 0) \n #Use correct broadcasting?\n w_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(w+[[]])[:-1], axis = 0) #[:-1] Added as a hack to keep it one-dim array of objects \n b_ls = np.concatenate([(1/np.sqrt(2*eta_0))*w_0, w_concat, (1/np.sqrt(2*eta_lin))*w_lin], axis = 0) \n# print(np.sum(eta_w.flatten() != w_concat)) \n# premultiplied_time_start = time.time() \n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray() \n# premultiplied_time_end = time.time() \n# print('premultiplying took {}'.format(premultiplied_time_end - premultiplied_time_start)) \n# premultiplied_rhs = eta_T_H_stacked.T.dot(b_ls) \n# u_next = nnls_predotted(premultiplied_lhs, premultiplied_rhs, tol=1e-5) \n# print(eta_T_H_stacked.shape, b_ls.shape) \n# A_ls_t_b = eta_T_H_stacked.T.dot(b_ls) \n# w =scipy.sparse.linalg.spsolve_triangular(RT, A_ls_t_b, lower = True) \n# x = scipy.sparse.linalg.spsolve_triangular(R, w, lower = False) \n# u_next = x \n u_next = scipy.optimize.lsq_linear(eta_T_H_L_stacked, b_ls, bounds = (0, np.inf), tol=1e-3, lsmr_tol=1e-3, max_iter=nnls_max_iter, verbose=1).x \n# u = scipy.optimize.lsq_linear(premultiplied_lhs, premultiplied_rhs, bounds = (0, np.inf), tol=1e-5).x \n return u_next", "def ADT_QCD_LEPTON():\n\n # As input for the quark-mass ratios, we use the quark masses at MZ and the lepton masses\n ip = Num_input()\n\n mu = ip.mu_at_MZ\n md = ip.md_at_MZ\n ms = ip.ms_at_MZ\n me = ip.me\n mmu = ip.mmu\n mtau = ip.mtau\n\n # Create the ADT:\n\n gamma_hat_P63eu_Q81u = np.hstack((-16 * me**2/mu**2, np.zeros(5)))\n gamma_hat_P63muu_Q81u = np.hstack((np.zeros(1), -16 * mmu**2/mu**2, np.zeros(4)))\n gamma_hat_P63tauu_Q81u = np.hstack((np.zeros(2), -16 * mtau**2/mu**2, np.zeros(3)))\n\n gamma_hat_P63ed_Q81d = np.hstack((-16 * me**2/md**2, np.zeros(5)))\n gamma_hat_P63mud_Q81d = np.hstack((np.zeros(1), -16 * mmu**2/md**2, np.zeros(4)))\n gamma_hat_P63taud_Q81d = np.hstack((np.zeros(2), -16 * mtau**2/md**2, np.zeros(3)))\n\n gamma_hat_P63es_Q81s = np.hstack((-16 * me**2/ms**2, np.zeros(5)))\n gamma_hat_P63mus_Q81s = np.hstack((np.zeros(1), -16 * mmu**2/ms**2, np.zeros(4)))\n gamma_hat_P63taus_Q81s = np.hstack((np.zeros(2), -16 * mtau**2/ms**2, np.zeros(3)))\n\n\n\n gamma_hat_P63eu_Q82u = np.hstack((np.zeros(3), -16 * me**2/mu**2, np.zeros(2)))\n gamma_hat_P63muu_Q82u = np.hstack((np.zeros(4), -16 * mmu**2/mu**2, np.zeros(1)))\n gamma_hat_P63tauu_Q82u = np.hstack((np.zeros(5), -16 * mtau**2/mu**2))\n\n gamma_hat_P63ed_Q82d = np.hstack((np.zeros(3), -16 * me**2/md**2, np.zeros(2)))\n gamma_hat_P63mud_Q82d = np.hstack((np.zeros(4), -16 * mmu**2/md**2, np.zeros(1)))\n gamma_hat_P63taud_Q82d = np.hstack((np.zeros(5), -16 * mtau**2/md**2))\n\n gamma_hat_P63es_Q82s = np.hstack((np.zeros(3), -16 * me**2/ms**2, np.zeros(2)))\n gamma_hat_P63mus_Q82s = np.hstack((np.zeros(4), -16 * mmu**2/ms**2, np.zeros(1)))\n gamma_hat_P63taus_Q82s = np.hstack((np.zeros(5), -16 * mtau**2/ms**2))\n\n\n\n gamma_hat_P62ue_Q83u = np.hstack((-16 * me**2/mu**2, np.zeros(5)))\n gamma_hat_P62umu_Q83u = np.hstack((np.zeros(1), -16 * mmu**2/mu**2, np.zeros(4)))\n gamma_hat_P62utau_Q83u = np.hstack((np.zeros(2), -16 * mtau**2/mu**2, np.zeros(3)))\n\n gamma_hat_P62de_Q83d = np.hstack((-16 * me**2/md**2, np.zeros(5)))\n gamma_hat_P62dmu_Q83d = np.hstack((np.zeros(1), -16 * mmu**2/md**2, np.zeros(4)))\n gamma_hat_P62dtau_Q83d = np.hstack((np.zeros(2), -16 * mtau**2/md**2, np.zeros(3)))\n\n gamma_hat_P62se_Q83s = np.hstack((-16 * me**2/ms**2, np.zeros(5)))\n gamma_hat_P62smu_Q83s = np.hstack((np.zeros(1), -16 * mmu**2/ms**2, np.zeros(4)))\n gamma_hat_P62stau_Q83s = np.hstack((np.zeros(2), -16 * mtau**2/ms**2, np.zeros(3)))\n\n\n\n gamma_hat_P62ue_Q84u = np.hstack((np.zeros(3), -16 * me**2/mu**2, np.zeros(2)))\n gamma_hat_P62umu_Q84u = np.hstack((np.zeros(4), -16 * mmu**2/mu**2, np.zeros(1)))\n gamma_hat_P62utau_Q84u = np.hstack((np.zeros(5), -16 * mtau**2/mu**2))\n\n gamma_hat_P62de_Q84d = np.hstack((np.zeros(3), -16 * me**2/md**2, np.zeros(2)))\n gamma_hat_P62dmu_Q84d = np.hstack((np.zeros(4), -16 * mmu**2/md**2, np.zeros(1)))\n gamma_hat_P62dtau_Q84d = np.hstack((np.zeros(5), -16 * mtau**2/md**2))\n\n gamma_hat_P62se_Q84s = np.hstack((np.zeros(3), -16 * me**2/ms**2, np.zeros(2)))\n gamma_hat_P62smu_Q84s = np.hstack((np.zeros(4), -16 * mmu**2/ms**2, np.zeros(1)))\n gamma_hat_P62stau_Q84s = np.hstack((np.zeros(5), -16 * mtau**2/ms**2))\n\n\n\n gamma_hat_Q81u = np.vstack((gamma_hat_P63eu_Q81u, gamma_hat_P63muu_Q81u, gamma_hat_P63tauu_Q81u, np.zeros((15,6))))\n gamma_hat_Q81d = np.vstack((np.zeros((3,6)), gamma_hat_P63ed_Q81d, gamma_hat_P63mud_Q81d, gamma_hat_P63taud_Q81d, np.zeros((12,6))))\n gamma_hat_Q81s = np.vstack((np.zeros((6,6)), gamma_hat_P63es_Q81s, gamma_hat_P63mus_Q81s, gamma_hat_P63taus_Q81s, np.zeros((9,6))))\n\n gamma_hat_Q82u = np.vstack((gamma_hat_P63eu_Q82u, gamma_hat_P63muu_Q82u, gamma_hat_P63tauu_Q82u, np.zeros((15,6))))\n gamma_hat_Q82d = np.vstack((np.zeros((3,6)), gamma_hat_P63ed_Q82d, gamma_hat_P63mud_Q82d, gamma_hat_P63taud_Q82d, np.zeros((12,6))))\n gamma_hat_Q82s = np.vstack((np.zeros((6,6)), gamma_hat_P63es_Q82s, gamma_hat_P63mus_Q82s, gamma_hat_P63taus_Q82s, np.zeros((9,6))))\n\n gamma_hat_Q83u = np.vstack((np.zeros((9,6)), gamma_hat_P62ue_Q83u, gamma_hat_P62umu_Q83u, gamma_hat_P62utau_Q83u, np.zeros((6,6))))\n gamma_hat_Q83d = np.vstack((np.zeros((12,6)), gamma_hat_P62de_Q83d, gamma_hat_P62dmu_Q83d, gamma_hat_P62dtau_Q83d, np.zeros((3,6))))\n gamma_hat_Q83s = np.vstack((np.zeros((15,6)), gamma_hat_P62se_Q83s, gamma_hat_P62smu_Q83s, gamma_hat_P62stau_Q83s))\n\n gamma_hat_Q84u = np.vstack((np.zeros((9,6)), gamma_hat_P62ue_Q84u, gamma_hat_P62umu_Q84u, gamma_hat_P62utau_Q84u, np.zeros((6,6))))\n gamma_hat_Q84d = np.vstack((np.zeros((12,6)), gamma_hat_P62de_Q84d, gamma_hat_P62dmu_Q84d, gamma_hat_P62dtau_Q84d, np.zeros((3,6))))\n gamma_hat_Q84s = np.vstack((np.zeros((15,6)), gamma_hat_P62se_Q84s, gamma_hat_P62smu_Q84s, gamma_hat_P62stau_Q84s))\n\n\n\n\n gamma_hat = np.array([gamma_hat_Q81u, gamma_hat_Q81d, gamma_hat_Q81s, gamma_hat_Q82u, gamma_hat_Q82d, gamma_hat_Q82s,\n gamma_hat_Q83u, gamma_hat_Q83d, gamma_hat_Q83s, gamma_hat_Q84u, gamma_hat_Q84d, gamma_hat_Q84s])\n\n\n # Return the tensor\n\n # tensor, zeile, spalte\n\n return gamma_hat", "def simulate(\n init_pos, init_en, model, pcas, desc_scalers, en_scaler, soap,\n steps=100000, delta=0.04, T=100, dataset='zundel_100k'):\n\n hartree = 4.3597443419e-18\n kb = 1.381e-23 / hartree\n beta = 1 / (kb * T)\n\n n_atoms = data.get_n_atoms(dataset)\n atoms = data.get_atoms_list(dataset)\n symbols = data.get_symbols(dataset)\n\n acceptance = 0\n cur_pos = np.copy(init_pos)\n cur_en = init_en\n\n pos_history = np.empty((steps, n_atoms, 3))\n en_history = np.empty(steps)\n\n for i in tqdm(range(steps)):\n dr = np.random.random((n_atoms, 3)) * 2 * delta - delta\n try_pos = np.copy(cur_pos) + dr\n\n molec = np.empty(1, dtype=object)\n molec[0] = Atoms(symbols, positions=try_pos)\n\n desc = data.compute_desc(\n molec, dataset=dataset, soap_params=soap\n )\n\n desc = preprocessing.transform_set(\n atoms=atoms, descriptors=desc, transformers=pcas\n )\n\n desc = preprocessing.transform_set(\n atoms=atoms, descriptors=desc, transformers=desc_scalers\n )\n\n desc = preprocessing.convert_to_inputs(desc)\n\n try_en = model.predict(desc)\n try_en = en_scaler.inverse_transform(try_en)[0, 0]\n \n if try_en < cur_en or exp(-beta * (try_en - cur_en)) >= random():\n acceptance += 1\n cur_pos, cur_en = try_pos, try_en\n\n pos_history[i] = cur_pos\n en_history[i] = cur_en\n\n return pos_history, en_history, float(acceptance) / steps", "def ha(env, cstate=0):\n T1 = 10\n T2 = 10\n thM = 20\n thm = 5\n vr = 10.5\n v1 = -1.3\n v2 = -2.7\n assert(T1 == T2)\n\n delta = None # None to cause failure\n # The continous variables used in this ha\n x = T1 # clock1 variable\n y = T2 # clock2 variable\n th = 11.5 # The reactor temperature\n\n # You need vtol here, because of floating point error.\n loc0_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc0_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc0_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(vr),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc0_FT = False\n\n loc1_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc1_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc1_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(v1),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc1_FT = False\n\n loc2_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc2_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc2_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(v2),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc2_FT = False\n\n # Location 3 is reactor shutdown\n loc3_FT = False\n\n # Location 0\n def location0(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thM and x >= T1:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 1, 0, x, y, th, None, True, None, None, curr_time\n elif th == thM and y >= T2:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 2, 0, x, y, th, None, None, True, None, curr_time\n elif th == thM and x < T1 and y < T2:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 3, 0, x, y, th, None, None, None, True, curr_time\n # The invariant\n elif th <= thM:\n if not loc0_FT:\n x = loc0_ode_x.compute(vals, curr_time-prev_time)\n y = loc0_ode_y.compute(vals, curr_time-prev_time)\n th = loc0_ode_th.compute(vals, curr_time-prev_time)\n loc0_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thM) > loc0_ode_th.vtol:\n deltath = loc0_ode_th.delta(vals, quanta=(thM-th))\n else:\n th = thM\n deltath = 0\n return 0, deltath, x, y, th, False, None, None, None, curr_time\n else:\n # print('th:', th)\n raise RuntimeError('Reached unreachable branch'\n ' in location 0')\n\n def location1(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thm:\n x = 0 # Reset\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 0, 0, x, y, th, True, None, None, None, curr_time\n # The invariant\n elif th >= thm:\n if not loc1_FT:\n x = loc1_ode_x.compute(vals, curr_time-prev_time)\n y = loc1_ode_y.compute(vals, curr_time-prev_time)\n th = loc1_ode_th.compute(vals, curr_time-prev_time)\n loc1_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thm) > loc1_ode_th.vtol:\n deltath = loc1_ode_th.delta(vals, quanta=(thm-th))\n else:\n th = thm\n deltath = 0\n return 1, deltath, x, y, th, False, None, None, None, curr_time\n else:\n raise RuntimeError('Reached unreachable branch'\n ' in location 1')\n\n def location2(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thm:\n y = 0 # Reset\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 0, 0, x, y, th, True, None, None, None, curr_time\n # The invariant\n elif th >= thm:\n if not loc2_FT:\n x = loc2_ode_x.compute(vals, curr_time-prev_time)\n y = loc2_ode_y.compute(vals, curr_time-prev_time)\n th = loc2_ode_th.compute(vals, curr_time-prev_time)\n loc2_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thm) > loc2_ode_th.vtol:\n deltath = loc2_ode_th.delta(vals, quanta=(thm-th))\n else:\n th = thm\n deltath = 0\n return 2, deltath, x, y, th, False, None, None, None, curr_time\n else:\n raise RuntimeError('Reached unreachable branch'\n ' in location 2')\n\n def location3(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n global step\n # print('total steps: ', step)\n # Done\n print(time.time()-start)\n sys.exit(1)\n\n # The dictionary for the switch statement.\n switch_case = {\n 0: location0,\n 1: location1,\n 2: location2,\n 3: location3\n }\n\n prev_time = env.now\n while(True):\n (cstate, delta, x, y, th,\n loc0_FT, loc1_FT, loc2_FT, loc3_FT,\n prev_time) = switch_case[cstate](x, y, th,\n loc0_FT,\n loc1_FT,\n loc2_FT,\n loc3_FT,\n prev_time)\n # This should always be the final statement in this function\n global step\n step += 1\n yield env.timeout(delta)", "def simulate_quantities_of_interest_superoperator(tlist, c_ops, noise_parameters_CZ, fluxlutman,\n fluxbias_q1, amp,\n sim_step,\n verbose: bool=True):\n\n H_0=calc_hamiltonian(0,fluxlutman,noise_parameters_CZ) # computed at 0 amplitude\n # NOTE: parameters of H_0 could be not exactly e.g. the bare frequencies\n\n # We change the basis from the standard basis to the basis of eigenvectors of H_0\n # The columns of S are the eigenvectors of H_0, appropriately ordered\n if noise_parameters_CZ.dressed_compsub():\n S = qtp.Qobj(matrix_change_of_variables(H_0),dims=[[3, 3], [3, 3]])\n else:\n S = qtp.tensor(qtp.qeye(3),qtp.qeye(3)) # line here to quickly switch off the use of S\n H_0_diag = S.dag()*H_0*S\n\n #w_q0 = fluxlutman.q_freq_01()\n w_q0 = (H_0_diag[1,1]-H_0_diag[0,0]) / (2*np.pi)\n #w_q1 = fluxlutman.q_freq_10()\n w_q1 = (H_0_diag[3,3]-H_0_diag[0,0]) / (2*np.pi)\n\n # H_rotateaway = coupled_transmons_hamiltonian_new(w_q0=w_q0, w_q1=w_q1, \n # alpha_q0=-2*w_q0, alpha_q1=-2*w_q1, J=0)\n\n w_q1_sweetspot = noise_parameters_CZ.w_q1_sweetspot()\n # Correction up to second order of the frequency due to flux noise, computed from w_q0(phi) = w_q0^sweetspot * sqrt(cos(pi * phi/phi_0))\n w_q1_biased = w_q1 - np.pi/2 * (w_q1_sweetspot**2/w_q1) * np.sqrt(1 - (w_q1**4/w_q1_sweetspot**4)) * fluxbias_q1 - \\\n - np.pi**2/2 * w_q1_sweetspot * (1+(w_q1**4/w_q1_sweetspot**4)) / (w_q1/w_q1_sweetspot)**3 * fluxbias_q1**2\n # with sigma up to circa 1e-3 \\mu\\Phi_0 the second order is irrelevant\n correction_to_H = coupled_transmons_hamiltonian_new(w_q0=0, w_q1=np.real(w_q1_biased-w_q1), alpha_q0=0, alpha_q1=0, J=0)\n\n\n t0 = time.time()\n\n exp_L_total=1\n for i in range(len(amp)):\n H=calc_hamiltonian(amp[i],fluxlutman,noise_parameters_CZ) + correction_to_H\n H=S.dag()*H*S\n if c_ops != []:\n c_ops_temp=[]\n for c in range(len(c_ops)):\n if isinstance(c_ops[c],list):\n c_ops_temp.append(c_ops[c][0]*c_ops[c][1][i]) # c_ops are already in the H_0 basis\n else:\n c_ops_temp.append(c_ops[c])\n liouville_exp_t=(qtp.liouvillian(H,c_ops_temp)*sim_step).expm()\n else:\n liouville_exp_t=(-1j*H*sim_step).expm()\n exp_L_total=liouville_exp_t*exp_L_total\n\n t1 = time.time()\n #print('\\n alternative propagator',t1-t0)\n\n\n U_final = exp_L_total\n #U_final=rotating_frame_transformation_new(U_final, fluxlutman.cz_length(), H_0_diag)\n\n phases = phases_from_superoperator(U_final) # order is phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond\n phi_cond = phases[-1]\n L1 = leakage_from_superoperator(U_final)\n population_02_state = calc_population_02_state(U_final)\n L2 = seepage_from_superoperator(U_final)\n avgatefid = pro_avfid_superoperator_phasecorrected(U_final,phases)\n avgatefid_compsubspace = pro_avfid_superoperator_compsubspace_phasecorrected(U_final,L1,phases) # leakage has to be taken into account, see Woods & Gambetta\n #print('avgatefid_compsubspace',avgatefid_compsubspace)\n\n \n \n #H_twoqubits = coupled_transmons_hamiltonian_new(w_q0=w_q0, w_q1=w_q1, \n # alpha_q0=-2*w_q0, alpha_q1=-2*w_q1, J=0)\n #U_final_new = rotating_frame_transformation_new(U_final, fluxlutman.cz_length(), H_twoqubits) ### old method rotating away also the phase of the |2> state\n\n t = tlist[-1]+sim_step\n U_final_new = correct_reference(U=U_final,w_q1=w_q1,w_q0=w_q0,t=t)\n\n ### Script to check that we are correctly removing the single qubit phases in the rotating frame\n # cz_length = fluxlutman.cz_length()\n # U_check = (1j*H_twoqubits*cz_length).expm() * (-1j*H_0_diag*cz_length).expm()\n # phases_check = phases_from_superoperator(U_check)\n # print(phases_check)\n\n \n avgatefid_compsubspace_notphasecorrected = pro_avfid_superoperator_compsubspace(U_final_new,L1)\n # NOTE: a single qubit phase off by 30 degrees costs 5.5% fidelity\n\n ### Script to check that leakage and phi_cond are not affected by the phase correction, as it should be\n # L1_bis = leakage_from_superoperator(U_final_new)\n # phi_cond_bis = phases_from_superoperator(U_final_new)[-1]\n # print('leakage',L1-L1_bis)\n # print('phi_cond',phi_cond-phi_cond_bis)\n\n phases = phases_from_superoperator(U_final_new) # order is phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond\n phase_q0 = (phases[1]-phases[0]) % 360\n phase_q1 = (phases[2]-phases[0]) % 360\n\n\n # We now correct only for the phase of qubit left (q1), in the rotating frame\n avgatefid_compsubspace_pc_onlystaticqubit = pro_avfid_superoperator_compsubspace_phasecorrected_onlystaticqubit(U_final_new,L1,phases)\n \n\n return {'phi_cond': phi_cond, 'L1': L1, 'L2': L2, 'avgatefid_pc': avgatefid,\n 'avgatefid_compsubspace_pc': avgatefid_compsubspace, 'phase_q0': phase_q0, 'phase_q1': phase_q1,\n 'avgatefid_compsubspace': avgatefid_compsubspace_notphasecorrected,\n 'avgatefid_compsubspace_pc_onlystaticqubit': avgatefid_compsubspace_pc_onlystaticqubit, 'population_02_state': population_02_state,\n 'U_final_new': U_final_new}", "def construct_basis_tensors(self):\n\t\n\tu = np.array([self.cth*self.cphi, self.cth*self.sphi, -self.sth])\n\tv = np.array([self.sphi, -self.cphi, 0.0])\n\n\tep = np.outer(u,u) - np.outer(v,v)\n\tec = np.outer(u,v) + np.outer(v,u)\n\t\n\tself.ep = self.c2psi*ep - self.s2psi*ec\n\tself.ec = self.s2psi*ep + self.c2psi*ec\n\t\t\n\treturn", "def _compute_t_matrix(self):\n self.t_matrix = self._kronecker_product(\n tf.diag(tf.reshape(self.likelihood_variances, [-1])),\n tf.eye(self.n_points_int, dtype=tf.float64))\n return", "def calc_tau(z_array_reion_allmodels, cosmology_allmodels, helium_allmodels,\n mass_frac_allmodels):\n\n def integrand(z, h, OM):\n H = av.Hubble_Param(z, h, OM) / (av.pc_to_m * 1.0e6 / 1.0e3)\n return (((1 + z)**2) / H)\n\n tau = []\n for model_number in range(len(mass_frac_allmodels)):\n\n # Set up some things for the model cosmology etc.\n model_mass_frac = mass_frac_allmodels[model_number]\n model_helium = helium_allmodels[model_number]\n model_h = cosmology_allmodels[model_number].H(0).value/100.0\n model_OM = cosmology_allmodels[model_number].Om0\n model_OB = cosmology_allmodels[model_number].Ob0\n model_z = z_array_reion_allmodels[model_number]\n\n model_tau = np.zeros(len(model_mass_frac))\n\n # First determine optical depth for redshift 0 to 4.\n tau_04 = integrate.quad(integrand, 0, 4, args=(model_h, model_OM,))[0] \n tau_04 *= (1 + 2*model_helium/(4 * (1-model_helium)))\n\n # Then determine optical depth from z = 4 to lowest z of model.\n tau_46 = integrate.quad(integrand, 4, model_z[-1], args=(model_h, model_OM,))[0]\n tau_46 *= (1 + model_helium/(4* (1-model_helium)))\n\n tau_06 = tau_04 + tau_46\n\n model_tau[-1] = tau_06\n\n # Then loop down through snapshots (low z to high z) and calculate tau.\n for snapnum in np.arange(len(model_mass_frac) - 2, -1, -1):\n\n this_z = model_z[snapnum]\n prev_z = model_z[snapnum + 1]\n\n # Hubble Parameter in Mpc/s/Mpc.\n H = av.Hubble_Param(this_z, model_h, model_OM) / (av.pc_to_m * 1.0e6 / 1.0e3)\n numerator = ((1 + this_z) **2) * (1.0 - model_mass_frac[snapnum])\n \n model_tau[snapnum] = model_tau[snapnum+1] + (( numerator / H) * (this_z - prev_z) * (1 + model_helium/(4 * (1-model_helium)))) \n\n model_tau *= av.n_HI(0, model_h, model_OB, model_helium) * av.c_in_ms * av.Sigmat\n\n tau.append(model_tau)\n\n return tau", "def correlation_4op_3t(self, rho0, oplist, signature, tau):\n\n\n if len(oplist) != 4:\n raise ValueError('Number of operators is not 4.')\n\n a = operator_to_superoperator(oplist[0], signature[0])\n b = operator_to_superoperator(oplist[1], signature[1])\n c = operator_to_superoperator(oplist[2], signature[2])\n d = operator_to_superoperator(oplist[3], signature[3])\n\n for _ in oplist:\n if issparse(_):\n _ = _.toarray()\n\n\n # nmax = max(len(tau3), len(tau2), len(tau1))\n\n # g = -1j * (tau > 0) * self.propagator(tau)\n # G = np.zeros((self.dim, self.dim, len(tau)))\n\n # for n, elem in enumerate(g):\n # G[:,:, n] = g[n]\n\n if self.G is None:\n self.propagator(tau)\n\n G = self.G\n\n # unit operator in Liouville space\n N = self.dim\n idm = self.idm(sp=False)\n\n # print(d.shape, dm2vec(rho0.toarray()).shape)\n if issparse(rho0):\n rho = d.dot(dm2vec(rho0.toarray()))\n else:\n rho = d.dot(dm2vec(rho0))\n\n # print(type(rho), rho.shape)\n\n if issparse(rho): rho = rho.toarray()\n\n tmp = np.tensordot(G, rho, axes=((1), (0)))\n tmp = c.dot(tmp)\n tmp = np.tensordot(G, tmp, axes=([1], [0])) # ajk\n\n # tmp = np.einsum('dcj, ca, abk, b -> djk', G, c, G, rho)\n\n\n '''\n Scipy sparse matrix does not support dimensions more than 2, so\n ndarray has to be used for the tensor products.\n\n This can be improved by using sparse package.\n '''\n tmp = np.tensordot(b.todense(), tmp, axes=([1], [0]))\n # tmp = b.todense().dot(tmp)\n\n tmp = np.tensordot(G, tmp, axes=([1], [0]))\n\n return oe.contract('a, ab, bijk -> ijk', idm, a.todense(), tmp)\n\n # corr = np.einsum('a, ab, bck, bc, cdj, de, efi, f ->kji', idm, \\\n # left(a), G, left(b), left(c), G, left(d).dot(dm2vec(rho0))\n\n # return", "def comp_vext_tem_pyth(self, ao_log=None, numba_parallel=True):\n\n def c2r_lm(conv, clm, clmm, m):\n \"\"\"\n clm: sph harmonic l and m\n clmm: sph harmonic l and -m\n convert from real to complex spherical harmonic\n for an unique value of l and m\n \"\"\"\n rlm = 0.0\n if m == 0:\n rlm = conv._c2r[conv._j, conv._j]*clm\n else:\n rlm = conv._c2r[m+conv._j, m+conv._j]*clm +\\\n conv._c2r[m+conv._j, -m+conv._j]*clmm\n\n if rlm.imag > 1e-10:\n print(rlm)\n raise ValueError(\"Non nul imaginary paert for c2r conversion\")\n return rlm.real\n\n def get_index_lm(l, m):\n \"\"\"\n return the index of an array ordered as \n [l=0 m=0, l=1 m=-1, l=1 m=0, l=1 m=1, ....]\n \"\"\"\n return (l+1)**2 -1 -l + m\n\n warnings.warn(\"Obselete routine use comp_vext_tem\")\n\n if use_numba:\n get_time_potential = nb.jit(nopython=True, parallel=numba_parallel)(get_tem_potential_numba)\n V_time = np.zeros((self.time.size), dtype=np.complex64)\n\n aome = ao_matelem_c(self.ao_log.rr, self.ao_log.pp)\n me = ao_matelem_c(self.ao_log) if ao_log is None else aome.init_one_set(ao_log)\n atom2s = np.zeros((self.natm+1), dtype=np.int64)\n for atom,sp in enumerate(self.atom2sp): \n atom2s[atom+1]= atom2s[atom] + me.ao1.sp2norbs[sp]\n\n R0 = self.vnorm*self.time[0]*self.vdir + self.beam_offset\n rr = self.ao_log.rr\n dr = (np.log(rr[-1])-np.log(rr[0]))/(rr.size-1)\n dt = self.time[1]-self.time[0]\n dw = self.freq_symm[1] - self.freq_symm[0]\n wmin = self.freq_symm[0]\n tmin = self.time[0]\n nff = self.freq.size\n ub = self.freq_symm.size//2 - 1\n l2m = [] # list storing m value to corresponding l\n fact_fft = np.exp(-1.0j*self.freq_symm[ub:ub+nff]*tmin)\n pre_fact = dt*np.exp(-1.0j*wmin*(self.time-tmin))\n\n for l in range(me.jmx+1):\n lm = []\n for m in range(-l, l+1):\n lm.append(m)\n l2m.append(np.array(lm))\n\n for atm, sp in enumerate(self.atom2sp):\n rcut = self.ao_log.sp2rcut[sp]\n center = self.atom2coord[atm, :]\n rmax = find_nearrest_index(rr, rcut)\n\n si = atom2s[atm]\n fi = atom2s[atm+1]\n\n for mu, l in enumerate(self.pb.prod_log.sp_mu2j[sp]):\n s = self.pb.prod_log.sp_mu2s[sp][mu]\n f = self.pb.prod_log.sp_mu2s[sp][mu+1]\n\n fr_val = self.pb.prod_log.psi_log[sp][mu, :]\n inte1 = np.sum(fr_val[0:rmax+1]*rr[0:rmax+1]**(l+2)*rr[0:rmax+1]*dr)\n\n for k in range(s, f):\n V_time.fill(0.0)\n\n m = l2m[l][k-s]\n ind_lm = get_index_lm(l, m)\n ind_lmm = get_index_lm(l, -m)\n\n if use_numba:\n get_time_potential(self.time, R0, self.vnorm, self.vdir, center, rcut, inte1,\n rr, dr, fr_val, me._c2r, l, m, me._j, ind_lm, ind_lmm, V_time)\n else:\n for it, t in enumerate(self.time):\n R_sub = R0 + self.vnorm*self.vdir*(t - self.time[0]) - center\n norm = np.sqrt(np.dot(R_sub, R_sub))\n\n if norm > rcut:\n I1 = inte1/(norm**(l+1))\n I2 = 0.0\n else:\n rsub_max = find_nearrest_index(rr, norm)\n\n I1 = np.sum(fr_val[0:rsub_max+1]*\n rr[0:rsub_max+1]**(l+2)*rr[0:rsub_max+1])\n I2 = np.sum(fr_val[rsub_max+1:]*\n rr[rsub_max+1:]/(rr[rsub_max+1:]**(l-1)))\n\n I1 = I1*dr/(norm**(l+1))\n I2 = I2*(norm**l)*dr\n clm_tem = csphar(R_sub, l)\n clm = (4*np.pi/(2*l+1))*clm_tem[ind_lm]*(I1 + I2)\n clmm = (4*np.pi/(2*l+1))*clm_tem[ind_lmm]*(I1 + I2)\n rlm = c2r_lm(me, clm, clmm, m)\n V_time[it] = rlm + 0.0j\n \n V_time *= pre_fact\n \n\n FT = fft(V_time)\n\n self.V_freq[:, si + k] = FT[ub:ub+nff]*fact_fft", "def setup_UT_te(self):\n self.setup_O()\n self.setup_T()\n # diagonalizing T\n ET, LT, RT = eig(self.T, b=self.O, left=True, right=True)\n LT = LT.transpose().conjugate()\n exp_T = np.exp(-1j*ET / self.hbar)\n # order according to absolute value:\n i_sort = np.argsort(-abs(exp_T))\n exp_T = exp_T[i_sort]\n RT = RT[:,i_sort]\n LT = LT[i_sort,:]\n # normalize RL to O and test the decomposition\n RT, LT = self.normalize_RL_to_O(RT, LT)\n # test the quality of the decomposition -------------------------\n # we exclude directions of evals below 10**(-15) by hand\n max_mode = len(np.where(abs(exp_T)>10**(-15))[0])\n ET_red = ET[:max_mode]\n RT_red = RT[:,:max_mode]\n LT_red = LT[:max_mode,:]\n # 1) test of orthogonality on the reduced space\n unity = np.dot(LT_red, np.dot(self.O, RT_red))\n ortho_error = abs(unity - np.diag(np.ones(max_mode))).max()\n print(\"Orthogonality errors\", ortho_error)\n # 1) test difference between the full and the reduced te-operator\n UT_red = np.dot(RT_red, np.dot(np.diag(exp_T[:max_mode]),\n np.dot(LT_red, self.O)))\n UT = np.dot(RT, np.dot(np.diag(exp_T), np.dot(LT, self.O)))\n print(\"Propagator error\", abs(UT_red - UT).max())\n self.UT = UT", "def tcs2(self):\n S = self.M.allocState({})\n self.M.propagate(S, 0, 1)\n\n # set initial beam data\n S.ref_IonZ = self.refIonZ\n S.IonZ = self.IonZ\n\n S.moment0 = self.BC0\n S.moment1 = self.ENV0\n\n S.ref_IonEk = self.refIonEk\n \n S.phis = S.moment0[PS_S,:]\n S.IonEk = S.moment0[PS_PS,:]*MeVtoeV + S.ref_IonEk\n\n ### Reconstract data\n U = collections.OrderedDict()\n T = collections.OrderedDict()\n \n U,T = self.save(U, T, S)\n\n\n #S.clng = self.clng\n\n fin = len(self.M)\n\n H = self.M.propagate(S, 1, fin, observe=range(fin))\n \n ### Reconstract data\n U = collections.OrderedDict()\n T = collections.OrderedDict()\n\n for i in range(fin-1):\n U,T = self.save(U, T, H[i][1])\n\n return U,T", "def _compute_omega_matrix(self, t: tf.Tensor, t_mean: tf.Tensor,\n t_std_dev: tf.Tensor) -> None:\n t_original = t_std_dev * t + t_mean\n diff_matrix = t_original - tf.transpose(t_original)\n sum_matrix = t_original + tf.transpose(t_original)\n self.omega_k = 0.5 * tf.exp(-tf.abs(diff_matrix))\\\n - 0.5 * tf.exp(- sum_matrix)\n self.omega_matrix = self._kronecker_product(\n tf.eye(self.dimensionality_int, dtype=tf.float64), self.omega_k)\n return", "def initialize_system(self):\n self.mfd.set_mesh(self.mesh)\n [[div_data, div_row, div_col], \n [div_t_data, div_t_row, div_t_col]] = self.mfd.build_div()\n [self.m_x_coo_data, \n m_x_coo_row, \n m_x_coo_col] = self.mfd.build_m(save_update_info=True)\n\n self.m_x_coo_length = len(self.m_x_coo_data)\n \n # The data for the bottom right should be zeros. \n [c_data, c_row, c_col] = self.mfd.build_bottom_right()\n \n [coupling_data, coupling_row, coupling_col] = self.mfd.build_coupling_terms()\n\n self.div = sparse.coo_matrix((np.array(div_data), \n (np.add(np.array(div_row), \n -self.mesh.get_number_of_faces()), \n np.array(div_col))))\n self.div = self.div.tocsr()\n\n lhs_data = self.m_x_coo_data\n lhs_row = m_x_coo_row\n lhs_col = m_x_coo_col\n \n lhs_data += div_data\n lhs_row += div_row\n lhs_col += div_col\n\n lhs_data += div_t_data\n lhs_row += div_t_row\n lhs_col += div_t_col \n \n self.c_start = len(lhs_data)\n \n lhs_data += c_data\n lhs_row += c_row\n lhs_col += c_col \n\n self.c_end = len(c_data)\n\n lhs_data += coupling_data\n lhs_row += coupling_row\n lhs_col += coupling_col\n\n # Convert m_x_coo_data to numpy array. \n self.m_x_coo_data = np.array(self.m_x_coo_data)\n\n self.lhs_coo = sparse.coo_matrix((np.array(lhs_data), \n (np.array(lhs_row), \n np.array(lhs_col))))\n\n # RHS construction is for Neumann and Dirichlet \n # boundaries specified by the mesh. \n self.rhs_mfd = self.mfd.build_rhs()", "def update_variables(self):\n self.dl21 = self.l21-self.l11; self.dl22 = self.l22-self.l12; self.dl23 = self.l23-self.l13;\n self.kappa1, self.phi1, self.seg_len1 = self.configuration_space(self.l11, self.l12, self.l13, self.d, self.n)\n self.kappa2, self.phi2, self.seg_len2 = self.configuration_space(self.dl21, self.dl22, self.dl23, self.d, self.n)\n # aquire transformation matrices and tips for segment 1 and 2\n self.T01_bishop = self.transformation_matrix_bishop(self.kappa1, self.phi1, self.seg_len1)\n self.T12_bishop = self.transformation_matrix_bishop(self.kappa2, self.phi2, self.seg_len2)\n self.T02_bishop = np.matmul(self.T01_bishop, self.T12_bishop)\n self.T01_frenet = self.transformation_matrix_frenet(self.kappa1, self.phi1, self.seg_len1)\n self.T12_frenet = self.transformation_matrix_frenet(self.kappa2, self.phi2, self.seg_len2)\n self.T02_frenet = np.matmul(self.T01_frenet, self.T12_frenet)\n self.tip_vec1 = np.matmul(self.T01_bishop, self.base)[0:3]\n self.tip_vec2 = np.matmul(self.T02_bishop, self.base)[0:3]\n # Frenet frames\n self.normal_vec_frenet1 = self.T01_frenet[0:3, 0]\n self.binormal_vec_frenet1 = self.T01_frenet[0:3, 1]\n self.tangent_vec_frenet1 = self.T01_frenet[0:3, 2]\n self.normal_vec_frenet2 = self.T02_frenet[0:3, 0]\n self.binormal_vec_frenet2 = self.T02_frenet[0:3, 1]\n self.tangent_vec_frenet2 = self.T02_frenet[0:3, 2]\n # Bishop frames\n self.normal_vec_bishop1 = self.T01_bishop[0:3, 0]\n self.binormal_vec_bishop1 = self.T01_bishop[0:3, 1]\n self.tangent_vec_bishop1 = self.T01_bishop[0:3, 2]\n self.normal_vec_bishop2 = self.T02_bishop[0:3, 0]\n self.binormal_vec_bishop2 = self.T02_bishop[0:3, 1]\n self.tangent_vec_bishop2 = self.T02_bishop[0:3, 2]", "def compute(self): \n Ex=np.zeros((self.nx,self.ny+1))\n Ey=np.zeros((self.nx+1,self.ny))\n Hz=np.zeros((self.nx,self.ny))\n Hzx=np.zeros((self.nx,self.ny))\n Hzy=np.zeros((self.nx,self.ny))\n \n imx = []\n #eps, mu = self.makeenv()\n mu=np.ones((self.nx,self.ny))*const.mu_0\n eps = self.luneberg(int(self.nx/2), int(self.ny*2/3), self.R)\n eps[:20,:] *= self.q #adself.ds a space of higher permittivity \n eps[-20:,:] *= self.q #adself.ds a space of higher permittivity \n eps[:,:20] *= self.q #adself.ds a space of higher permittivity \n eps[:,-20:] *= self.q #adself.ds a space of higher permittivity \n\n c = self.dt/(eps*self.ds)\n d = self.dt/(mu* self.ds)\n \n sigma = self.pml(eps, mu, 20)\n cax = 1 - (sigma[0] * self.dt / eps)\n cay = 1 - (sigma[1] * self.dt / eps)\n dax = 1 - (sigma[2] * self.dt / mu) \n day = 1 - (sigma[3] * self.dt / mu)\n \n bar = progressbar.ProgressBar()\n for n in bar(range(self.nt+1)):\n Ex[:,1:-1] = (cay[:,1:]+cay[:,:-1])/2*Ex[:,1:-1] + (c[:,1:]+c[:,:-1])/2*(Hz[:,1:]-Hz[:,:-1])\n Ey[1:-1,:] = (cax[1:,:]+cax[:-1,:])/2*Ey[1:-1,:] - (c[1:,:]+c[:-1,:])/2*(Hz[1:,:]-Hz[:-1,:])\n \n Hzx = dax*Hzx - d*(Ey[1:,:] - Ey[:-1,:])\n Hzy = day*Hzy + d*(Ex[:,1:] - Ex[:,:-1]) \n Hz = Hzx + Hzy + self.actualsource(self.source, self.f, n, self.dt) \n \n if(n%self.interval == 0): imx.append(Ex[:self.nx,:self.ny]**2 + Ey[:self.nx, :self.ny]**2)\n\n return imx", "def motion_model(particle_poses, speed_command, odom_pose, odom_pose_prev, dt):\n \n M = particle_poses.shape[0]\n \n # TODO. For each particle calculate its predicted pose plus some\n # additive error to represent the process noise. With this demo\n # code, the particles move in the -y direction with some Gaussian\n # additive noise in the x direction. Hint, to start with do not\n # add much noise.\n\n #time is in ns 1e-9\n dt = dt * 1e-9\n \n if dt ==0:\n return particle_poses\n\n for m in range(M):\n\n theta = particle_poses[m, 2]\n\n v = speed_command[0]\n omega = speed_command[1]\n \n if motion_model_velocity: #Velocity\n\n if omega == 0: #straight\n vel_dx = v * cos(theta) * dt\n vel_dy = v * sin(theta) * dt\n vel_dtheta = 0\n\n else:\n vel_dx = -v / omega * sin(theta) + v / omega * sin(theta + omega * dt)\n vel_dy = v / omega * cos(theta) - v / omega * cos(theta + omega * dt)\n vel_dtheta = omega * dt\n \n\n\n if motion_model_odom:\n odom_mov = rev_odm(odom_pose, odom_pose_prev)\n\n #particle_poses[m] = fwd_odm(particle_poses[m], odom_mov)\n\n #odom_dpose = fwd_odm2(particle_poses[m], odom_mov)\n (odom_dx, odom_dy, odom_dtheta) = fwd_odm2(particle_poses[m], odom_mov)\n\n\n\n\n #fusion\n w = motion_weighting\n dx = w * odom_dx + (1-w) * vel_dx\n dy = w * odom_dy + (1-w) * vel_dy\n dtheta = w * odom_dtheta + (1-w) * vel_dtheta\n \n \n\n \n \n #process noise\n if motion_model_noise:\n noise_x= np.random.normal(0, motion_sigma_x)\n noise_y= np.random.normal(0, motion_sigma_y)\n noise_theta= np.random.normal(0, motion_sigma_theta)\n \n #local noise\n if motion_model_noise_alt:\n localnoise_x = np.random.normal(0, motion_sigma_x)\n localnoise_y = np.random.normal(0, motion_sigma_y)\n\n noise_x = localnoise_x * cos(theta) - localnoise_y * sin(theta)\n noise_y = localnoise_y * sin(theta) + localnoise_y * cos(theta)\n noise_theta = np.random.normal(0, motion_sigma_theta)\n\n\n\n particle_poses[m, 0] += dx + noise_x\n particle_poses[m, 1] += dy + noise_y\n particle_poses[m, 2] = wraptopi(theta + dtheta + noise_theta)\n\n \n return particle_poses", "def exercise1d():\n\n # Defination of muscles\n muscle_parameters = MuscleParameters()\n print(muscle_parameters.showParameters())\n\n mass_parameters = MassParameters()\n print(mass_parameters.showParameters())\n\n # Create muscle object\n muscle = Muscle(muscle_parameters)\n\n # Create mass object\n mass = Mass(mass_parameters)\n\n pylog.warning(\"Isotonic muscle contraction to be implemented\")\n\n # Instatiate isotonic muscle system\n sys = IsotonicMuscleSystem()\n\n # Add the muscle to the system\n sys.add_muscle(muscle)\n\n # Add the mass to the system\n sys.add_mass(mass)\n\n # You can still access the muscle inside the system by doing\n # >>> sys.muscle.L_OPT # To get the muscle optimal length\n\n # Evalute for a single load\n load = 100.\n\n # Evalute for a single muscle stimulation\n muscle_stimulation = 1.\n\n # Set the initial condition\n x0 = [0.0, sys.muscle.L_OPT,\n sys.muscle.L_OPT + sys.muscle.L_SLACK, 0.0]\n \n # x0[0] - -> activation\n # x0[1] - -> contractile length(l_ce)\n # x0[2] - -> position of the mass/load\n # x0[3] - -> velocity of the mass/load\n \n\n # Set the time for integration\n t_start = 0.0\n t_stop = 0.5\n time_step = 0.001\n time_stabilize = 0.2\n\n time = np.arange(t_start, t_stop, time_step)\n \n loads = np.arange(20, 351, 10)\n \n velocities = []\n\n for index, load in enumerate(loads):\n \n # Run the integration\n result = sys.integrate(x0=x0,\n time=time,\n time_step=time_step,\n time_stabilize=time_stabilize,\n stimulation=muscle_stimulation,\n load=load) \n\n if (result.l_mtc[-1] < sys.muscle.L_OPT + sys.muscle.L_SLACK):\n velocities.append(np.max(result.v_ce))\n print('max')\n else:\n velocities.append(np.min(result.v_ce))\n print('min')\n\n\n #Muscle contracile Velocity - Tension (load) relationship\n \n plt.figure('Isotonic muscle experiment')\n plt.title('Isotonic muscle experiment')\n plt.xlabel('Muscle Contractile Velocity [m/s]')\n plt.ylabel('Tension (load) [N]')\n plt.plot(velocities, loads)\n plt.grid()\n \n #For different stimulations 1.f\n \n muscle_stimulation = np.arange(0,1.1,0.2)\n plt.figure('Isotonic muscle exp with different stimulations')\n plt.title('Isotonic muscle experiment with different stimulations')\n\n for stim in muscle_stimulation:\n velocities = []\n for index, load in enumerate(loads):\n # Run the integration\n result = sys.integrate(x0=x0,\n time=time,\n time_step=time_step,\n time_stabilize=time_stabilize,\n stimulation=stim,\n load=load) \n\n if (result.l_mtc[-1] < sys.muscle.L_OPT + sys.muscle.L_SLACK):\n velocities.append(np.max(result.v_ce))\n else:\n velocities.append(np.min(result.v_ce))\n plt.xlabel('Muscle Contractile Velocity [m/s]')\n plt.ylabel('Tension (load) [N]')\n plt.plot(velocities, loads)\n \n plt.legend(('0','0.2','0.4','0.6','0.8','1.0'))\n plt.grid()", "def _calc_R_T_amp(self, polarization, n, delta):\n t_amp = np.zeros((len(self.structure), len(self.structure)), dtype=complex)\n r_amp = np.zeros((len(self.structure), len(self.structure)), dtype=complex)\n# # debugging statement\n# print(\"\\nr_amp is:\")\n# for i in range(len(self.structure)):\n# for j in range(len(self.structure)):\n# print(\"{}{} {}\".format(i,j,r_amp[i][j]))\n# # debugging statement\n# print(\"\\nt_amp is:\")\n# for i in range(len(self.structure)):\n# for j in range(len(self.structure)):\n# print(\"{}{} {}\".format(i,j,t_amp[i][j]))\n\n for i in range(len(self.structure)-1):\n t_amp[i,i+1] = self._t_at_interface(polarization, n[i], n[i+1])\n r_amp[i,i+1] = self._r_at_interface(polarization, n[i], n[i+1])\n# # debugging statement\n# print(\"\\nmod r_amp is:\")\n# for i in range(len(self.structure)):\n# for j in range(len(self.structure)):\n# print(\"{}{} {}\".format(i,j,r_amp[i][j]))\n# # debugging statement\n# print(\"\\nmod t_amp is:\")\n# for i in range(len(self.structure)):\n# for j in range(len(self.structure)):\n# print(\"{}{} {}\".format(i,j,t_amp[i][j]))\n\n M = np.zeros((len(self.structure),2,2),dtype=complex)\n# # debugging statement\n# print(\"\\nThe 'M' matrix is:\")\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"M{}{}{} ---> {}\".format(i,j,k,M[i][j][k]))\n\n m_r_amp = np.zeros((len(self.structure),2,2), dtype=complex)\n m_t_amp = np.zeros((len(self.structure),2,2), dtype=complex)\n for i in range(1,len(self.structure)-1):\n m_t_amp[i] = self._make_2x2(np.exp(-1j*delta[i]), 0., 0., np.exp(1j*delta[i]), dtype=complex)\n m_r_amp[i] = self._make_2x2(1., r_amp[i,i+1], r_amp[i,i+1], 1., dtype=complex)\n\n# # debugging statement\n# print(\"\\nThe temporary 'm_r_amp' matrix is:\")\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"m_r_amp{}{}{} ---> {}\".format(i,j,k,m_r_amp[i][j][k]))\n\n# # debugging statement\n# print(\"\\nThe temporary 'm_t_amp' matrix is:\")\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"m_t_amp{}{}{} ---> {}\".format(i,j,k,m_t_amp[i][j][k]))\n\n m_temp = np.dot(m_t_amp, m_r_amp)\n\n# # debugging statement\n# print(\"\\nThe 'm_temp' matrix is:\")\n# for i in m_temp:\n# print i\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"m_temp{}{}{} ---> {}\".format(i,j,k,m_temp[i][j][k]))\n\n for i in range(1,len(self.structure)-1):\n M[i] = 1/t_amp[i,i+1] * np.dot(self._make_2x2(np.exp(-1j*delta[i]),\n 0., 0., np.exp(1j*delta[i]),\n dtype=complex),\n self._make_2x2(1., r_amp[i,i+1], \\\n r_amp[i,i+1], 1., \\\n dtype=complex))\n# # debugging statement\n# print(\"\\nThe modified 'M' matrix is:\")\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"mod M{}{}{} ---> {}\".format(i,j,k,M[i][j][k]))\n\n M_prime = self._make_2x2(1., 0., 0., 1., dtype=complex)\n\n# # debugging statement\n# print(\"\\nThe first modified 'M_prime' matrix is:\")\n# for i in range(2):\n# for j in range(2):\n# print(\"1st mod M_prime{}{} ---> {}\".format(i,j,M_prime[i][j]))\n\n for i in range(1, len(self.structure)-1):\n# print(\"\\n'M_prime' #{} is:\\n{}\".format(i,M_prime))\n M_prime = np.dot(M_prime, M[i])\n\n# # debugging statement\n# print(\"\\nThe second modified 'M_prime' matrix is:\")\n# for i in range(2):\n# for j in range(2):\n# print(\"2nd mod M_prime{}{} ---> {}\".format(i,j,M_prime[i][j]))\n\n# print(\"\\nr_amp01 is ---> {}\".format(r_amp[0,1]))\n# print(\"t_amp01 is ---> {}\".format(t_amp[0,1]))\n\n mod_M_prime = self._make_2x2(1.,r_amp[0,1], r_amp[0,1], 1., dtype=complex)/t_amp[0,1]\n\n# # debugging statement\n# print(\"\\nThe third modified 'M_prime' matrix is:\")\n# for i in range(2):\n# for j in range(2):\n# print(\"3rd mod M_prime{}{} ---> {}\".format(i, j, mod_M_prime[i][j]))\n\n M_prime = np.dot(self._make_2x2(1., r_amp[0,1], r_amp[0,1], 1., \\\n dtype=complex)/t_amp[0,1], M_prime)\n\n# # debugging statement\n# print(\"\\nThe 'M_final' matrix is:\")\n# for i in range(2):\n# for j in range(2):\n# print(\"M_final{}{} ---> {}\".format(i, j, M_prime[i][j]))\n\n t = 1/M_prime[0,0]\n r = M_prime[0,1]/M_prime[0,0]\n\n# # debugging statement\n# print(\"\\n't' ---> {}\".format(t))\n# print(\"'r' ---> {}\".format(r))\n\n return (r, t)", "def build_system(u, dt, dx, D=4, C=1, time_diff='FD', space_diff='FD', width_x=None,\n width_t=None, deg_x=5, deg_t=None):\n\n n, m = u.shape\n\n if width_x == None: width_x = n / 10\n if width_t == None: width_t = m / 10\n if deg_t == None: deg_t = deg_x\n\n # If we're using polynomials to take derviatives, then we toss the data around the edges.\n if time_diff == 'poly':\n m2 = m - 2 * width_t\n offset_t = width_t\n else:\n m2 = m\n offset_t = 0\n if space_diff == 'poly':\n n2 = n - 2 * width_x\n offset_x = width_x\n else:\n n2 = n\n offset_x = 0\n\n ########################\n # First take the time derivaitve for the left hand side of the equation\n ########################\n ut = np.zeros((n2, m2), dtype=np.complex64)\n\n if time_diff == 'poly':\n T = np.linspace(0, (m - 1) * dt, m)\n for i in range(n2):\n ut[i, :] = PolyDiff(u[i + offset_x, :], T, diff=1, width=width_t, deg=deg_t)[:, 0]\n\n else:\n for i in range(n2):\n ut[i, :] = FiniteDiff(u[i + offset_x, :], dt, 1)\n\n ut = np.reshape(ut, (n2 * m2, 1), order='F')\n\n ########################\n # Now form the rhs one column at a time, and record what each one is\n ########################\n\n u2 = u[offset_x:n - offset_x, offset_t:m - offset_t]\n Theta = np.zeros((n2 * m2, (D + 1) * C), dtype=np.complex64)\n ux = np.zeros((n2, m2), dtype=np.complex64)\n rhs_description = ['' for i in range((D + 1) * C)]\n\n if space_diff == 'poly':\n Du = {}\n for i in range(m2):\n Du[i] = PolyDiff(u[:, i + offset_t], np.linspace(0, (n - 1) * dx, n), diff=D, width=width_x, deg=deg_x)\n if space_diff == 'Fourier': ik = 1j * np.fft.fftfreq(n) * n\n\n for d in range(D + 1):\n # compute derivatives of d degree\n if d > 0:\n for i in range(m2):\n if space_diff == 'FD':\n ux[:, i] = FiniteDiff(u[:, i + offset_t], dx, d)\n elif space_diff == 'poly':\n ux[:, i] = Du[i][:, d - 1]\n else:\n ux = np.array(u2, dtype=np.complex64)\n # if d == 1: print(ux)\n\n # compute polynomials of all terms, c used as c+1\n for c in range(C):\n Theta[:, d * C + c] = np.reshape(np.power(ux, c+1), (n2 * m2), order='F')\n # print('d:{}, c:{}, mean:{}'.format(d, c, np.mean(Theta[:, d * C + c])))\n\n if d > 0:\n rhs_description[d * C + c] = rhs_description[d * C + c] + \\\n 'u_{' + ''.join(['x' for _ in range(d)]) + '}'\n else:\n rhs_description[d * C + c] = rhs_description[d * C + c] + 'u'\n\n if c > 0:\n rhs_description[d * C + c] = rhs_description[d * C + c] + '^' + str(c+1)\n\n # print(rhs_description)\n features, rhs = create_cross_features(Theta, rhs_description)\n features = np.concatenate((Theta, features), 1)\n rhs = np.concatenate((rhs_description, rhs), 0)\n\n return ut, features, rhs", "def Cost_function_OO_OneNorm(Rot_param_values, verbose=False): \n t1 = time.time()\n K = md.K_matr(Rot_param_values,\n nmo,\n active_indices,\n occupied_indices,\n optimize_occ)\n \n U_OO = scipy.linalg.expm( - K )\n \n if localize:\n C_MO = C_locPM @ U_OO\n elif OAO_start:\n C_MO = C_OAO @ U_OO\n else:\n C_MO = C_nonloc @ U_OO\n \n C_CMO_LMO = C_nonloc[:,ncore:ntot].T @ ovlp @ C_MO[:,ncore:ntot]\n \n if consider_cas:\n one_body_integrals_MO = np.einsum('ip,ij,jq->pq', C_CMO_LMO,\n one_body_integrals_CAS,C_CMO_LMO,\n optimize=True)\n two_body_integrals_MO = np.einsum('ip,jq,ijkl,kr,ls->pqrs', C_CMO_LMO,\n C_CMO_LMO,\n two_body_integrals_CAS,\n C_CMO_LMO,C_CMO_LMO,optimize=True)\n else:\n one_body_integrals_MO = np.einsum('ip,ij,jq->pq', C_CMO_LMO,\n one_body_integrals_CMO,C_CMO_LMO,\n optimize=True)\n two_body_integrals_MO = np.einsum('ip,jq,ijkl,kr,ls->pqrs', C_CMO_LMO,\n C_CMO_LMO,\n two_body_integrals_CMO,\n C_CMO_LMO,C_CMO_LMO,optimize=True)\n \n \n \n OneNorm = JW1norm_wo_const(constant+CASconstant,\n one_body_integrals_MO,\n two_body_integrals_MO) \n \n if verbose: print('1-Norm =', OneNorm)\n \n \n \n # if verbose: print('Calculating 1norm took:', time.time()-t1)\n return OneNorm", "def compute_autocorrelation_rlzn_ensemble(fopen_list, te):\n print 'Compute the autocorrelation'\n\n # initialize components of rho\n sumuu = 0.0\n sumvv = 0.0\n\n psiuu = 0.0\n psivv = 0.0\n\n sumup2 = 0.0\n sumvp2 = 0.0\n\n # get characteristics of mean velocity field\n fbs = netCDF4.Dataset('buoyancySurface.nc','r')\n lonCell = fbs.variables['lonCell']\n latCell = fbs.variables['latCell']\n lon = np.degrees(np.mod(lonCell[:]+np.pi,2*np.pi)-np.pi)\n lat = np.degrees(latCell[:])\n hull = spatial.ConvexHull(np.vstack((lon,lat)).T) \n triang = Triangulation(lon,lat)\n buoy_surf_zonal = fbs.variables['buoyancySurfaceVelocityZonal']\n buoy_surf_merid = fbs.variables['buoyancySurfaceVelocityMeridional']\n\n \n # build up layers for interpolation of particle layers\n interp_zonal = []\n interp_merid = []\n nlayers = len(fbs.dimensions['nBuoyancySurfaces'])\n for alayer in np.arange(nlayers):\n interp_zonal.append(LinearTriInterpolator(triang, buoy_surf_zonal[0,:,alayer]))\n interp_merid.append(LinearTriInterpolator(triang, buoy_surf_merid[0,:,alayer]))\n\n for num, afile in enumerate(fopen_list):\n print 'working on %d' % num\n # interpolate mean velocities onto points for the computation\n x = afile.variables['xParticle'][:te,:]\n y = afile.variables['yParticle'][:te,:]\n z = afile.variables['zParticle'][:te,:]\n latr, lonr = proj_lat_long(x,y,z)\n latr = np.degrees(latr)\n lonr = np.degrees(lonr)\n\n ubar = np.zeros(x.shape)\n vbar = np.zeros(x.shape)\n nparticle_layer = x.shape[1]/nlayers\n for alayer in np.arange(nlayers):\n ps = np.arange(alayer*nparticle_layer,(alayer+1)*nparticle_layer)\n ubar[:,ps] = interp_zonal[alayer](lonr[:,ps],latr[:,ps])\n vbar[:,ps] = interp_merid[alayer](lonr[:,ps],latr[:,ps])\n\n # compute portions of autocorrelation\n u = afile.variables['lonVel'][:te,:]\n up = u - ubar\n up0 = up[0,:]\n\n v = afile.variables['latVel'][:te,:]\n vp = v - vbar\n vp0 = vp[0,:]\n\n sumuu += up0*up\n sumvv += vp0*vp\n\n psiuu += up0*up0\n psivv += vp0*vp0\n \n sumup2 += np.nanmean(up**2.0, axis=0)\n sumvp2 += np.nanmean(vp**2.0, axis=0)\n \n\n fbs.close()\n\n # note division by psi removes need to divide the sums by the number of realizations\n sumuu /= psiuu \n sumvv /= psivv\n\n sumup2 /= len(fopen_list)\n sumvp2 /= len(fopen_list)\n\n print 'done'\n\n return sumuu, sumvv, sumup2, sumvp2, lonr[0,:], latr[0,:], lon, lat, hull", "def jacdelta_xi_tot(xis, cco2, n_alts = 40):\n\n J = np.empty((len(allatms), len(xis)))\n jacall = jacdelta_xi_all_x0s_fast(xis, cco2)\n delta = delta_xi_tot(xis, cco2)\n alldeltas = []\n for ialt in range(n_alts):\n alldeltas.append(delta_xi_at_x0(xis, cco2, ialt))\n\n for i in range(len(allatms)):\n for k in range(len(xis)):\n #print(i,k)\n J[i,k] = 1/(delta[i]) * np.sum([alldeltas[ialt][i]*jacall[i,k,ialt] for ialt in range(n_alts)])\n\n #print(np.mean(J))\n return J", "def create_deltas_tensor(self, deltas):\n T = self.T\n N = self.N\n neighs = self.neighs\n self.deltas = {}\n for n in range(N):\n self.deltas[n] = self.get_empty_matrix((len(neighs[n]), T))\n for cc in deltas:\n t = int(cc[0])\n if t >= T:\n raise ValueError(\"Contact time above T!\")\n i = int(cc[1])\n j = int(cc[2])\n delta = cc[3]\n #lam = np.clip(lam, 0, 1 - self.err_max_lambda)\n #print(t,i,j,lam)\n index_i = neighs[j].index(i)\n self.deltas[j][index_i][t] = delta\n\n '''def create_delta_tensor(self, gamma):\n \"\"\"\n Deltas values for the computation of parameters of rate of contagion\n \"\"\"\n N = self.N\n self.deltas = {}\n for n in range(N):\n self.deltas[n] = self.logp_lam[n]/gamma\n '''", "def get_observations(self):\n joint_states = self.joints_state\n self.force = self.wrench_stamped.wrench.force\n self.torque = self.wrench_stamped.wrench.torque\n self.static_taxel = self.tactile_static.taxels\n# dynamic_taxel= tactile_dynamic\n\n# print(\"[force]\", self.force.x, self.force.y, self.force.z)\n# print(\"[torque]\", self.torque.x, self.torque.y, self.torque.z)\n shp_joint_ang = joint_states.position[0]\n shl_joint_ang = joint_states.position[1]\n elb_joint_ang = joint_states.position[2]\n wr1_joint_ang = joint_states.position[3]\n wr2_joint_ang = joint_states.position[4]\n wr3_joint_ang = joint_states.position[5]\n\n shp_joint_vel = joint_states.velocity[0]\n shl_joint_vel = joint_states.velocity[1]\n elb_joint_vel = joint_states.velocity[2]\n wr1_joint_vel = joint_states.velocity[3]\n wr2_joint_vel = joint_states.velocity[4]\n wr3_joint_vel = joint_states.velocity[5]\n\n q = [shp_joint_ang, shl_joint_ang, elb_joint_ang, wr1_joint_ang, wr2_joint_ang, wr3_joint_ang]\n# print(\"q(observation):\", q)\n eef_x, eef_y, eef_z = self.get_xyz(q)\n self.end_effector = self.get_xyz(q)\n eef_x_ini, eef_y_ini, eef_z_ini = self.get_xyz(self.init_joint_pose2) \n\n delta_image_r, delta_image_l = self.get_image()\n self.cnn_image_r = agent.update_cnn(delta_image_r)\n self.cnn_image_l = agent.update_cnn(delta_image_l)\n self.cnn_image_r_list = self.cnn_image_r.tolist()\n self.cnn_image_l_list = self.cnn_image_l.tolist()\n print(\"r_list\", self.cnn_image_r_list)\n print(\"l_list\", self.cnn_image_l_list)\n\n observation = []\n# rospy.logdebug(\"List of Observations==>\"+str(self.observations))\n for obs_name in self.observations:\n if obs_name == \"shp_joint_ang\":\n observation.append((shp_joint_ang - self.init_joint_pose2[0]) * self.joint_n)\n elif obs_name == \"shl_joint_ang\":\n observation.append((shl_joint_ang - self.init_joint_pose2[1]) * self.joint_n)\n elif obs_name == \"elb_joint_ang\":\n observation.append((elb_joint_ang - self.init_joint_pose2[2]) * self.joint_n)\n elif obs_name == \"wr1_joint_ang\":\n observation.append((wr1_joint_ang - self.init_joint_pose2[3]) * self.joint_n)\n elif obs_name == \"wr2_joint_ang\":\n observation.append((wr2_joint_ang - self.init_joint_pose2[4]) * self.joint_n)\n elif obs_name == \"wr3_joint_ang\":\n observation.append((wr3_joint_ang - self.init_joint_pose2[5]) * self.joint_n)\n elif obs_name == \"shp_joint_vel\":\n observation.append(shp_joint_vel)\n elif obs_name == \"shl_joint_vel\":\n observation.append(shl_joint_vel)\n elif obs_name == \"elb_joint_vel\":\n observation.append(elb_joint_vel)\n elif obs_name == \"wr1_joint_vel\":\n observation.append(wr1_joint_vel)\n elif obs_name == \"wr2_joint_vel\":\n observation.append(wr2_joint_vel)\n elif obs_name == \"wr3_joint_vel\":\n observation.append(wr3_joint_vel)\n elif obs_name == \"eef_x\":\n observation.append((eef_x - eef_x_ini) * self.eef_n)\n elif obs_name == \"eef_y\":\n observation.append((eef_y - eef_y_ini) * self.eef_n)\n elif obs_name == \"eef_z\":\n observation.append((eef_z - eef_z_ini) * self.eef_n)\n elif obs_name == \"force_x\":\n observation.append((self.force.x - self.force_ini.x) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_y\":\n observation.append((self.force.y - self.force_ini.y) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_z\":\n observation.append((self.force.z - self.force_ini.z) / self.force_limit1 * self.force_n)\n elif obs_name == \"torque_x\":\n observation.append((self.torque.x - self.torque_ini.x) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_y\":\n observation.append((self.torque.y - self.torque_ini.y) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_z\":\n observation.append((self.torque.z - self.torque_ini.z) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"image_cnn\":\n for x in range(0, 10):\n observation.append(self.cnn_image_r_list[0][x])\n# print(\"r_list\", self.cnn_image_r_list[0][x])\n for x in range(0, 10):\n observation.append(self.cnn_image_l_list[0][x])\n# print(\"l_list\", self.cnn_image_l_list[0][x])\n elif obs_name == \"static_taxel\":\n for x in range(0, 28):\n observation.append((self.static_taxel[0].values[x] - self.static_taxel_ini[0].values[x]) * self.taxel_n)\n for x in range(0, 28):\n observation.append((self.static_taxel[1].values[x] - self.static_taxel_ini[1].values[x]) * self.taxel_n)\n# elif obs_name == \"dynamic_taxel\":\n# observation.append(dynamic_taxel[0].values) * self.taxel_n\n# observation.append(dynamic_taxel[1].values) * self.taxel_n\n else:\n raise NameError('Observation Asked does not exist=='+str(obs_name))\n\n print(\"observation\", list(map(round, observation, [3]*len(observation))))\n# print(\"observation\", observation)\n\n return observation" ]
[ "0.5968374", "0.58851063", "0.5832688", "0.57955104", "0.5791791", "0.5745432", "0.5741553", "0.5735688", "0.57326496", "0.57203805", "0.56878024", "0.5681646", "0.56741714", "0.5659518", "0.56522816", "0.56369907", "0.5602459", "0.5594687", "0.55834895", "0.55351895", "0.5519467", "0.551474", "0.5514066", "0.5485939", "0.5485182", "0.5480458", "0.5480224", "0.546969", "0.5443741", "0.54424655" ]
0.63071626
0
Tags a photo. Returns true if the photo was tagged, false otherwise. If the photo already has the tag, this is considered a success.
def tag_photo(photo_id, tag_name, user_id=None): db = get_database() photo = get_photo(photo_id, user_id) if not photo: return False tag_name = tag_name.strip().lower() tag = Tag.query.filter_by(tag=tag_name).first() if not tag: tag = Tag(tag=tag_name) db.session.add(tag) db.session.commit() photo = get_photo(photo_id, user_id) photo_tag = PhotoTag(tag_id=tag.id, photo_id=photo.id) db.session.add(photo_tag) db.session.commit() return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_photo_tag(self, tag_name):\n data = self.db.make_query(\n '''select * from photo_tag where tag_name = \"{}\" '''\n .format(tag_name))\n\n if len(data) > 0:\n return True\n return False", "def tag(self, repository: str, tag: Optional[str], force: bool = False) -> bool:\n _ = force\n\n params = {\"repo\": repository}\n if tag is not None:\n params[\"tag\"] = tag\n\n response = self.client.post(f\"/images/{self.id}/tag\", params=params)\n\n if response.status_code == 201:\n return True\n\n error = response.json()\n if response.status_code == 404:\n raise ImageNotFound(error[\"cause\"], response=response, explanation=error[\"message\"])\n raise APIError(error[\"cause\"], response=response, explanation=error[\"message\"])", "def untag_photo(photo_id, tag_name, user_id=None):\n\tdb = get_database()\n\n\tphoto = get_photo(photo_id, user_id)\n\tif not photo:\n\t\treturn False\n\n\ttag_name = tag_name.strip().lower()\n\ttag = Tag.query.filter_by(tag=tag_name).first()\n\tif not tag:\n\t\treturn True\n\n\tphoto_tag = PhotoTag.query.filter_by(tag_id=tag.id, photo_id=photo.id).first()\n\n\tif photo_tag:\n\t\tdb.session.delete(photo_tag)\n\n\treturn True", "def add_tags_to_photo(self, photo_id, tag_list):\n print('\\nHello from add_tags_to_photo, the tag list is: ', tag_list)\n\n # for each tag\n # check if the tag is in the database already\n # if it is not then add it to the tag table\n for tag in tag_list:\n\n # will return None if the tag is not in the tag table\n # tag_name is the column name\n data = self.db.get_row('tag', 'tag_name', tag)\n\n print('data is', data)\n\n if data is None:\n\n print('\\nthat value {} is not in the db\\n'.format(tag))\n\n self.db.make_query(\n '''\n insert into tag (tag_name, user_id, photos)\n values (\"{}\", \"{}\", {})\n '''.format(\n tag,\n '28035310@N00',\n self.get_photo_count_by_tag(tag)\n )\n )\n\n print('\\nshould be added now...\\n')\n\n if self.db.get_row('tag', 'tag_name', tag):\n print('\\nadded tag, ', tag, '\\n')\n\n # UNIQUE constraint can cause problems here\n # so catch any exceptions\n try:\n # The tag is now in the database.\n self.db.make_query(\n '''\n insert into photo_tag (photo_id, tag_name)\n values ({}, \"{}\")\n '''.format(photo_id, tag)\n )\n except Exception as e:\n print('Problem adding tag to photo_tag ', e)\n\n data = self.db.make_query(\n '''\n select * from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n tags_in_data = []\n if len(data) > 0:\n for tag in data:\n tags_in_data.append(tag[1])\n\n print(tags_in_data)\n for tag in tag_list:\n if tag not in tags_in_data:\n return False\n else:\n self.update_photo_count(tag)\n\n return True", "def image_exists(self, id=None, tag=None):\n exists = False\n if id and self.image_by_id(id):\n exists = True\n elif tag and self.image_by_tag(tag):\n exists = True\n\n return exists", "def confirmTag(owner_id=None, photo_id=None, tag_id=None):\n params = {\n 'owner_id': owner_id,\n 'photo_id': photo_id,\n 'tag_id': tag_id\n }\n result = call('photos.confirmTag', **params)\n return parse_response(result)", "def tag_image(self, owner_userid, tag_userid, image_id, tag_name):\n\t\ttry:\n\t\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\t\ttag_userid = validation.cast_integer(tag_userid, 'userid')\n\t\t\timage_id = validation.cast_integer(image_id, 'image_id')\n\t\t\tvalidation.required(tag_name, 'tag_name')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\t\ttag_name = tag_name.strip()\n\t\t@stack\n\t\tdef tag_txn(txn, owner, tagger, image, tag):\n\t\t\ttag = tag.lower()\n\t\t\ttxn.execute(\"\"\"\n\t\t\t\tselect zoto_insert_user_image_tag(%s, %s, %s, %s)\n\t\t\t\"\"\", (owner, image, tag, tagger))\n\n\t\treturn self.app.db.runInteraction(tag_txn, owner_userid, tag_userid, image_id, tag_name)", "def check_image(self, tag):\n image_name = self.build_image_name(tag)\n try:\n self.client.images.get_registry_data(image_name)\n return True\n except Exception as ex:\n print('Image {} does not exist: '.format(image_name), str(ex))\n return False", "def check_all_tag_photo_counts(self):\n data = self.db.get_query_as_list(\n '''\n select * from tag\n '''\n )\n\n for tag in data:\n print()\n print(tag)\n # query for the number of photos using the tag\n # compare it to the number in the photos column\n # update if necessary\n query_count = self.db.get_query_as_list(\n '''\n select count(tag_name)\n from photo_tag\n where tag_name = \"{}\"\n '''.format(tag['tag_name'])\n )\n\n if query_count[0]['count(tag_name)'] == tag['photos']:\n print('OK', 'actual photos number with tag',\n query_count[0]['count(tag_name)'], 'in photos column', tag['photos'])\n else:\n print('MISSMATCH IN PHOTOS AND PHOTOS WITH TAG\\n', 'actual photos number with tag',\n query_count[0]['count(tag_name)'], 'in photos column', tag['photos'])\n\n tag_name = tag['tag_name']\n count = query_count[0]['count(tag_name)']\n break\n\n print('\\nDONE NO PROBLEMS!')", "def check_image_local(self, tag):\n tags = self.get_tags()\n return (tag in tags)", "def tag(self, image, repo, tag):\n check_blacklist(repo)\n logger.info(\"Tagging Docker image {} as {}:{}\".format(image, repo, tag))\n if not self.client.tag(image, repo, tag=tag, force=True):\n raise docker.errors.DockerException(\"tagging failed\")", "def is_tagged(self, instance_id, tag_name):\n tag_value = self.get_tag_for_instance(instance_id, tag_name)\n if tag_value is not None and tag_value == 'true':\n return True\n else:\n return False", "def putTag(owner_id=None, photo_id=None, user_id=None, x=None, y=None, x2=None,\\\n y2=None):\n params = {\n 'owner_id': owner_id,\n 'photo_id': photo_id,\n 'user_id': user_id,\n 'x': x,\n 'y': y,\n 'x2': x2,\n 'y2': y2\n }\n result = call('photos.putTag', **params)\n return parse_response(result)", "def has_image(self, container_name):\n name, tag = split_container_name(container_name)\n images = self._client.images(all=True)\n return any(container_name in image[\"RepoTags\"] for image in images)", "def update_photo_count(self, tag_name):\n count = self.get_photo_count_by_tag(tag_name)\n\n self.db.make_query(\n '''\n update tag\n set photos = {}\n where tag_name = \"{}\"\n '''.format(count, tag_name)\n )", "def add_tag(request):\n if request.method != 'POST':\n return HttpResponseRedirect(reverse('wainz.views.composite'))\n else:\n img_id = request.POST['id']\n try:\n img = Image.objects.get(pk=img_id)\n except:\n return HttpResponseRedirect(reverse('wainz.views.composite'))\n tag_val = request.POST['tag']\n try:\n tag = tag_utils.TagsFromText(tag_val)[0]\n added = True\n img.tags.add(tag)\n img.save()\n except:\n added = False\n resp = rest.rest_success(request, img_id)\n respJson = json.loads(resp.content)\n respJson['added'] = added\n resp.content = json.dumps(respJson)\n return resp", "def tag_image(image):\n\n headers = {\n \"Content-Type\": \"application/octet-stream\",\n \"Ocp-Apim-Subscription-Key\": COMPUTER_VISION_KEY\n }\n\n endpoint = COMPUTER_VISION_URL + \"/vision/v3.1/tag\"\n if DEBUG:\n print(\"Calling endpoint %s\" % endpoint)\n\n response = requests.post(endpoint, data=image, headers=headers)\n if response.status_code == 200:\n tags = json.loads(response.content)\n return tags['tags']\n else:\n if DEBUG:\n print(\"Call to endpoint '%s' returned status code %s. Reason: %s\" % (endpoint, str(response.status_code), response.content))\n return None", "def test_update_multiple(self):\n tag_id = \"update_photo_tag\"\n # Get a couple of photos\n photos = self.photos[:2]\n\n # Add the tag using a list of photo objects\n self.client.photos.update(photos, tagsAdd=tag_id)\n\n # Check that it's there\n for photo in self.client.photos.list()[:2]:\n self.assertIn(tag_id, photo.tags)\n\n # Remove the tags using a list of photo ids\n self.client.photos.update([photo.id for photo in photos],\n tagsRemove=tag_id)", "def has_tag(self, tag):\n return tag in self.tags", "def has_tag(self, tag):\n return tag in self.tags", "def has_tags(self):\n return bool(self.tags)", "def is_tagged(self,tag_name,element):\n return (tag_name in self.tag2elements.keys()) and (element in self.tag2elements[tag_name])", "def replace_tags(self, photo_id, tag_list):\n # get all the tags attached to the photo\n current_tags = self.db.make_query(\n '''\n select * from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n print(current_tags)\n\n # remove the current tags\n self.db.make_query(\n '''\n delete from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n for tag in tag_list:\n # add tags in the tag_list\n self.db.make_query(\n '''\n insert into photo_tag (photo_id, tag_name)\n values ({}, \"{}\")\n '''.format(photo_id, tag)\n )\n\n self.update_photo_count(tag)", "def add_photo_of_pet(self,auth_key: json, pet_id: str, pet_photo: str) -> json:\n\n data = MultipartEncoder(\n fields={\n 'pet_photo': (pet_photo, open(pet_photo, 'rb'), 'image/jpeg')\n })\n headers = {'auth_key': auth_key['key'], 'Content-Type': data.content_type}\n\n res = requests.post(self.base_url + f'api/pets/set_photo/{pet_id}', headers=headers, data=data)\n status = res.status_code\n\n result = \"\"\n try:\n result = res.json()\n except:\n result = res.text\n\n return status, result", "def test_tag_image_duplicate(self):\n\n message = {\n \"method\": \"build_image\",\n \"params\": {\"url\": self.url,\n \"tag_image\": self.tag_image}\n }\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"error\")", "def tag_images(self, image_files, model=None):\n return self._multi_image_op(image_files, ['tag'], model=model)", "def _is_desired_tag(self, tag):\n if self._tags is None:\n return True\n\n if self._ignore_namespace:\n for desired_tag in self._tags:\n if tag.localname == desired_tag.localname:\n return True\n else:\n for desired_tag in self._tags:\n if tag == desired_tag:\n return True\n\n return False", "def add_tag(tag):\n check_call(['git', 'tag', tag])", "def check_tag(self, session, tag):\n if not tag:\n return False\n\n try:\n self._tag(session.get, key=tag, session=session)\n return True\n except exceptions.NotFound:\n return False", "def modificar_tags(self, nota_id, tags):\n nota = self._encontrar_nota(nota_id)\n if nota:\n nota.tags = tags\n return True\n return False" ]
[ "0.69589335", "0.6736868", "0.6084362", "0.6025986", "0.5851356", "0.5627572", "0.56024647", "0.55849546", "0.5533153", "0.5501501", "0.5438295", "0.53037816", "0.52874833", "0.5228985", "0.5188661", "0.5185491", "0.5161937", "0.51610476", "0.51548713", "0.51548713", "0.5135326", "0.51214176", "0.50712913", "0.50643843", "0.5063921", "0.5020382", "0.50195205", "0.5016574", "0.5011289", "0.50034577" ]
0.75611085
0
Untags a photo. Returns true if the photo was untagged, false otherwise. If the photo already does not have the tag, this is considered a success.
def untag_photo(photo_id, tag_name, user_id=None): db = get_database() photo = get_photo(photo_id, user_id) if not photo: return False tag_name = tag_name.strip().lower() tag = Tag.query.filter_by(tag=tag_name).first() if not tag: return True photo_tag = PhotoTag.query.filter_by(tag_id=tag.id, photo_id=photo.id).first() if photo_tag: db.session.delete(photo_tag) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tag_photo(photo_id, tag_name, user_id=None):\n\tdb = get_database()\n\n\tphoto = get_photo(photo_id, user_id)\n\tif not photo:\n\t\treturn False\n\n\ttag_name = tag_name.strip().lower()\n\ttag = Tag.query.filter_by(tag=tag_name).first()\n\tif not tag:\n\t\ttag = Tag(tag=tag_name)\n\t\tdb.session.add(tag)\n\t\tdb.session.commit()\n\n\tphoto = get_photo(photo_id, user_id)\n\tphoto_tag = PhotoTag(tag_id=tag.id, photo_id=photo.id)\n\tdb.session.add(photo_tag)\n\tdb.session.commit()\n\n\treturn True", "def check_photo_tag(self, tag_name):\n data = self.db.make_query(\n '''select * from photo_tag where tag_name = \"{}\" '''\n .format(tag_name))\n\n if len(data) > 0:\n return True\n return False", "def untag_image(self, owner_userid, tag_userid, image_id, tag_name):\n\t\ttry:\n\t\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\t\ttag_userid = validation.cast_integer(tag_userid, 'owner_userid')\n\t\t\tmedia_id = validation.cast_integer(image_id, 'image_id')\n\t\t\tvalidation.required(tag_name, 'tag_name')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\t\ttag_name = tag_name.strip()\n\n\t\td = self.app.db.runOperation(\"\"\"\n\t\t\t\tselect zoto_remove_user_image_tag(\n\t\t\t\t\t%s,\n\t\t\t\t\t%s,\n\t\t\t\t\t%s,\n\t\t\t\t\t%s\n\t\t\t\t)\n\t\t\t\t\"\"\", (owner_userid, image_id, tag_name, tag_userid))\n\t\td.addCallback(lambda _: (0, 'tag [%s] removed from [%s]' % (tag_name, image_id)))\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage()))\n\t\treturn d", "def confirmTag(owner_id=None, photo_id=None, tag_id=None):\n params = {\n 'owner_id': owner_id,\n 'photo_id': photo_id,\n 'tag_id': tag_id\n }\n result = call('photos.confirmTag', **params)\n return parse_response(result)", "def use_tag(self, tag):\n try:\n self.available_tags.remove(tag)\n except ValueError:\n return False\n return True", "def remove_tag(self, name):\n eh = SimpleErrorHandler()\n\n self._client.execute('tag', name, remove=True, eh=eh)\n\n return bool(eh)", "def tag(self, repository: str, tag: Optional[str], force: bool = False) -> bool:\n _ = force\n\n params = {\"repo\": repository}\n if tag is not None:\n params[\"tag\"] = tag\n\n response = self.client.post(f\"/images/{self.id}/tag\", params=params)\n\n if response.status_code == 201:\n return True\n\n error = response.json()\n if response.status_code == 404:\n raise ImageNotFound(error[\"cause\"], response=response, explanation=error[\"message\"])\n raise APIError(error[\"cause\"], response=response, explanation=error[\"message\"])", "def removeTag(owner_id=None, photo_id=None, tag_id=None):\n params = {\n 'owner_id': owner_id,\n 'photo_id': photo_id,\n 'tag_id': tag_id\n }\n result = call('photos.removeTag', **params)\n return parse_response(result)", "def completely_remove_tag(self, owner_userid, tag_name):\n\t\tself.log.debug(\"completely_remove_tag()\")\n\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\n\t\td = self.app.db.runOperation(\"SELECT zoto_remove_tag_from_all_user_images(%s, %s)\",\n\t\t\t\t (owner_userid, tag_name))\n\t\td.addCallback(lambda _: (0, \"success\"))\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage()))\n\t\treturn d", "def remove_tag(tag):\n check_call(['git', 'tag', '-d', tag])", "def multi_untag_image(self, owner_userid, tag_userid, image_ids, tag_names):\n\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\tfor id in image_ids:\n\t\t\tid = validation.cast_integer(id, 'image_id')\n\t\t@stack\n\t\tdef delete_txn(txn, owner, tags, ids, tagger):\n\t\t\tfor tag in tags:\n\t\t\t\tid_list = []\n\t\t\t\tfor id in ids:\n\t\t\t\t\ttxn.execute(\"\"\"\n\t\t\t\t\t\tselect zoto_remove_user_image_tag(\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s\n\t\t\t\t\t\t)\n\t\t\t\t\t\"\"\", (owner, id, tag, tagger))\n\n\t\treturn self.app.db.runInteraction(delete_txn, owner_userid, tag_names, image_ids, tag_userid)", "def check_image_local(self, tag):\n tags = self.get_tags()\n return (tag in tags)", "def has_tags(self):\n return bool(self.tags)", "def test_delete_image_by_wrong_tag(self, test_image):\n tag = f\"{TEST_IMAGE_NAME}:wrong_tag\"\n assert image_exists(TEST_IMAGE_NAME)\n assert not delete_image(tag, force=True)\n assert image_exists(TEST_IMAGE_NAME)\n\n # now delete using that tag, both tags will be gone because it's the same image.\n build_test_image(tag=tag)\n assert image_exists(TEST_IMAGE_NAME)\n assert image_exists(tag)\n assert delete_image(tag, force=True)\n assert not image_exists(TEST_IMAGE_NAME)\n assert not image_exists(tag)", "def clean(self):\n tags = self.get_tags()\n for tag in tags:\n image_name = self.build_image_name(tag)\n try:\n self.client.images.remove(image_name, force=True)\n except Exception as ex:\n print('Cannot remove {}: {}'.format(tag, str(ex)))", "def untag(self, tag):\n if isinstance(tag, six.integer_types):\n try:\n tag = Tag.objects.get(pk=tag, owner=self.owner)\n except Tag.DoesNotExist:\n return\n \n if isinstance(tag, six.string_types):\n try:\n tag = Tag.objects.get(slug=makeslug(tag), owner=self.owner)\n except Tag.DoesNotExist:\n return\n \n self.tags.remove(tag)", "def test_remove_single_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-remove-single-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'tag1',\n 'value': 'to-delete'}\n ],\n })\n p.run()\n\n # verify the initial tag set\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'tag1': 'to-delete', 'testtag': 'testvalue'})\n\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['tag1']}\n ],\n })\n p.run()\n\n # verify that the a tag is deleted without modifying existing tags\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})", "def test_removal_does_not_raise_on_nonexistent_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['tag-does-not-exist']},\n ],\n })\n\n # verify initial tag set is empty\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})\n\n raised = False\n try:\n p.run()\n except KeyError:\n raised = True\n\n # verify no exception raised and no changes to tags on resource\n self.assertFalse(raised)\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})", "def untag():\n version = git.prompt_tag('Which tag to delete?')\n if not version:\n abort('No available version tag')\n git.delete_tag(version)", "def remove_tag(self, tag):\n _tag_entity('task', self.task_id, tag, untag=True)", "def replace_tags(self, photo_id, tag_list):\n # get all the tags attached to the photo\n current_tags = self.db.make_query(\n '''\n select * from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n print(current_tags)\n\n # remove the current tags\n self.db.make_query(\n '''\n delete from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n for tag in tag_list:\n # add tags in the tag_list\n self.db.make_query(\n '''\n insert into photo_tag (photo_id, tag_name)\n values ({}, \"{}\")\n '''.format(photo_id, tag)\n )\n\n self.update_photo_count(tag)", "def test_update_multiple(self):\n tag_id = \"update_photo_tag\"\n # Get a couple of photos\n photos = self.photos[:2]\n\n # Add the tag using a list of photo objects\n self.client.photos.update(photos, tagsAdd=tag_id)\n\n # Check that it's there\n for photo in self.client.photos.list()[:2]:\n self.assertIn(tag_id, photo.tags)\n\n # Remove the tags using a list of photo ids\n self.client.photos.update([photo.id for photo in photos],\n tagsRemove=tag_id)", "def delete_image(image_url):\n done = False\n\n if os.path.exists(image_url):\n try:\n os.remove(image_url)\n done = True\n except Exception:\n pass\n return done", "def test_remove_tag(self):\n fc = self.read_feature(region='Adriatic_Sea')\n\n fc.tag(tags=['Mediterranean_Basin', 'tag1'], remove=True)\n assert (fc.features[0]['properties']['tags'] == 'Adriatic_Sea')\n\n self.check_feature(fc.features[0])", "def test_add_remove_tag(self):\n record = self.good_record()\n # Create a fake record in mongo\n id = self.images.insert(record)\n\n self.assertIsNotNone(id)\n before = self.images.find_one({'_id': id})\n self.assertIsNotNone(before)\n # Add a tag a make sure it worked\n status = self.m.add_tag(id, self.system, 'testtag')\n self.assertTrue(status)\n after = self.images.find_one({'_id': id})\n self.assertIsNotNone(after)\n self.assertIn('testtag', after['tag'])\n self.assertIn(self.tag, after['tag'])\n # Remove a tag and make sure it worked\n status = self.m.remove_tag(self.system, 'testtag')\n self.assertTrue(status)\n after = self.images.find_one({'_id': id})\n self.assertIsNotNone(after)\n self.assertNotIn('testtag', after['tag'])", "def is_tag(tag_name, user_path, current_user) -> bool:\n user = current_user[0]\n tag_list = os.listdir((user_path + '\\\\' + user).encode('unicode_escape'))\n temp = list(map(bytes.decode, tag_list))\n if tag_name in temp:\n return True\n else:\n return False", "def image_exists(self, id=None, tag=None):\n exists = False\n if id and self.image_by_id(id):\n exists = True\n elif tag and self.image_by_tag(tag):\n exists = True\n\n return exists", "def done(self, remove=False, verbose=True):\n return _image.image_done(self, remove, verbose)", "def modificar_tags(self, nota_id, tags):\n nota = self._encontrar_nota(nota_id)\n if nota:\n nota.tags = tags\n return True\n return False", "def remove_images(self, images):\n url = (\"https://api.imgur.com/3/album/{0}/\"\n \"remove_images\".format(self._delete_or_id_hash))\n # NOTE: Returns True and everything seem to be as it should in testing.\n # Seems most likely to be upstream bug.\n params = {'ids': images}\n return self._imgur._send_request(url, params=params, method=\"DELETE\")" ]
[ "0.6170899", "0.60096574", "0.59105897", "0.580249", "0.5790244", "0.57739645", "0.5647035", "0.56177795", "0.54015493", "0.53528", "0.5332508", "0.52831495", "0.5233669", "0.51115745", "0.5084609", "0.50655323", "0.5036791", "0.4999244", "0.4991585", "0.4976553", "0.4951583", "0.4928146", "0.49257377", "0.49197364", "0.48751953", "0.48492882", "0.48387992", "0.48334363", "0.48314574", "0.48217812" ]
0.73501134
0
Gets an existing photo. Returns None if the photo_id does not belong to user_id. If user_is not provided, defaults to the currently logged in user.
def get_photo(photo_id, user_id=None): if user_id == None: user_id = current_user.get_user().id return Photo.query.filter_by(id=photo_id, user_id=user_id).first()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_image(image_id, user_id):\n\n with image_backend(user_id) as backend:\n return backend.get_image(image_id)", "def get_photo(self, photo_id):\n uri = 'photos/' + photo_id\n return self.make_request(uri)", "def get_photo_path(photo_id, user_id=None):\n\n\tif user_id == None:\n\t\tuser_id = current_user.get_user().id\n\n\tphoto = Photo.query.filter_by(id=photo_id, user_id=user_id).first()\n\tif photo:\n\t\tpath = os.path.join(current_app.instance_path, photo.url)\n\t\treturn path\n\n\treturn None", "def photo(self):\n person = self.get_person()\n if person is None:\n return None\n return person.photo", "def photo(self):\n if \"photo\" in self._prop_dict:\n if isinstance(self._prop_dict[\"photo\"], OneDriveObjectBase):\n return self._prop_dict[\"photo\"]\n else :\n self._prop_dict[\"photo\"] = ProfilePhoto(self._prop_dict[\"photo\"])\n return self._prop_dict[\"photo\"]\n\n return None", "def get_picture_for_model(user_id, image_id):\n path = f'images/{user_id}/{image_id}'\n s3_resource.Object(bucket_name, path).download_file(f'{image_id}')\n return None", "def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"user_id\",\n type=int, location=\"args\", required=True)\n args = parser.parse_args()\n\n try:\n #get user from database\n user = User.query.filter(User.id==args.user_id).first()\n if not user:\n return Response(status=404,\n message=\"User not found.\").__dict__,404\n\n return Response(status=200, message=\"Pictures found.\",\n value=[p.dict_repr() for p in user.pictures.all()])\\\n .__dict__, 200\n except Exception as e:\n app.logger.error(e)\n return Response(status=500, message=\"Internal server error.\").__dict__,500", "def get_img(self, img=None):\n\n if self.img is None: #No image specified to the ROI object\n\n # If no image is saved, check if an image was passed. If so, return the ROI of that image.\n if img is None:\n print('no image provided')\n else:\n return img[self.coords[0]:self.coords[1], self.coords[2]:self.coords[3]]\n else:\n return self.img", "def create_photo(name, user_id=None):\n\n\tif user_id == None:\n\t\tuser_id = current_user.get_user().id\n\n\tdb = get_database()\n\tphoto = Photo(name=name, user_id=user_id)\n\tdb.session.add(photo)\n\tdb.session.flush()\n\n\treturn photo", "def fetch_photo_id_image(self):\r\n if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'):\r\n return\r\n\r\n self.photo_id_key = self.original_verification(self.user).photo_id_key\r\n self.save()", "def read_one(id):\n # Get the photo requested\n photo = Photo.query.filter(Photo.id == id).one_or_none()\n\n # Did we find a photo?\n if photo is not None:\n\n # Serialize the data for the response\n photo_schema = PhotoSchema()\n data = photo_schema.dump(photo)\n return data\n\n # Otherwise, nope, didn't find that photo\n else:\n abort(\n 404,\n \"Photo not found for Id: {id}\".format(id=id),\n )", "def get_photos(self, user_id):\n\n json_photos = self._receive_photos_from_vk(user_id)\n return self._parse_photos(json_photos)", "def profile_pic(request):\n if request.user.is_authenticated:\n profile_obj = CustomUser.objects.get(id__exact=request.user.id)\n pic = profile_obj.avatar\n return {'picture': pic}\n return {}", "def get_image(self, image_id):\n url = self.get_url(image_id)\n return image_util.load_image_from_url(url) if url else None", "def get_image_id(self, user_id, ts):\n return self.user_id_ts_to_image_id[(user_id, ts)]", "def photos_by_user(user_id):\n photos = Photo.query.filter(Photo.user_id == user_id).all()\n return photos", "def get_object(self):\n return get_object_or_404(User, id__iexact=self.request.user.id)", "def get_object(self):\n return get_object_or_404(User, id__iexact=self.request.user.id)", "def get_image(self, image_id):\r\n try:\r\n return self.get_all_images(image_ids=[image_id])[0]\r\n except IndexError: # None of those images available\r\n return None", "def get_queryset(self):\n return Photo.objects.filter(user=self.request.user)", "def resolve_photo(self, info):\n if self.photo:\n return info.context.build_absolute_uri(self.photo.url)\n return None", "def get_object(self):\n return get_object_or_404(User, pk__iexact=self.request.user.id)", "def send_photo(photo_id, user_id=None):\n\n\tpath = get_photo_path(photo_id, user_id)\n\tif path:\n\t\treturn send_file(path)", "def get_user(self):\n if \"user\" not in self._data:\n self._data[\"user\"] = User.objects.get(pk=self.kwargs[\"user_id\"])\n return self._data[\"user\"]", "def get_image():\n return models.Image.objects.all()[0]", "def get_profile_image(strategy, details, response, uid, user, social, is_new=False, *args, **kwargs):\n\n # If we don't have a user then just return\n if user is None:\n return\n\n # Save photo from FB\n if kwargs['backend'].name == \"facebook\":\n try:\n image_url = \"https://graph.facebook.com/%s/picture?type=large\" % uid\n result = urllib.urlretrieve(image_url)\n\n def save_image(user, uid, result):\n user.original_photo.save(\"%s.jpg\" % uid, File(open(result[0])))\n user.save(update_fields=['original_photo'])\n\n retry_cloudfiles(save_image, user, uid, result)\n except URLError:\n pass\n elif kwargs['backend'].name == \"twitter\" and social:\n try:\n # Get profile image to save\n if response['profile_image_url'] != '':\n image_result = urllib.urlretrieve(response['profile_image_url'])\n\n def save_image(user, uid, image_result):\n user.original_photo.save(\"%s.jpg\" % uid, File(open(image_result[0])))\n user.save(update_fields=['original_photo'])\n\n retry_cloudfiles(save_image, user, uid, image_result)\n except URLError:\n pass", "def get_object(self, url_id, user_id):\n try:\n return Link.objects.get(id=url_id, user=user_id)\n except Link.DoesNotExist:\n return None", "def get_user(self, username=None,id=None,include_detail=False,include_pics=False,pic_page_size=None,last_pic_id=False):\n \n if (id is None and username is None):\n if self.authenticator.access_token is not None:\n id = 'self'\n else:\n raise PicplzError(\"get_user method requires one of a pic id, longurl_id or shorturl_id\")\n \n parameters = {}\n if id is not None:\n parameters['id']=id\n if username is not None:\n parameters['username']=username\n if include_detail:\n parameters['include_detail']=1\n if include_pics:\n parameters['include_pics']=1\n if last_pic_id:\n parameters['last_pic_id']=last_pic_id\n if pic_page_size is not None:\n parameters['pic_page_size']=pic_page_size\n \n if id == 'self':\n returned_json = self.__make_authenticated_get__(self.user_endpoint, parameters)\n returned_json = self.__make_unauthenticated_get__(self.user_endpoint, parameters)\n returned_data = simplejson.loads(returned_json)\n data = returned_data['value']['users'][0]\n user = PicplzUser.from_dict(self, data)\n try:\n has_more_pics = returned_data['value']['users'][0]['more_pics']\n if has_more_pics:\n user.__has_more_pics__ = True\n else:\n user.__has_more_pics__ = False\n except:\n user.__has_more_pics__ = False\n try:\n last_pic_id = returned_data['value']['users'][0]['last_pic_id']\n user.__last_pic_id__ = last_pic_id\n except:\n user.__last_pic_id__ = False\n \n return user", "def photo(update: Update, context: CallbackContext) -> int:\n user = update.message.from_user\n photo_file = update.message.photo[-1].get_file()\n x = \".jpg\"\n z = user.first_name + x\n photo_file.download(z)\n logger.info(\"Photo of %s: %s\", user.first_name, 'user_photo.jpg')\n update.message.reply_text(\n 'Che bella foto! ora, mandami la tua posizione se puoi, o scrivi /skip se non vuoi farlo.'\n )\n\n return LOCATION", "def get_photo(self, i):\r\n return self.__photos[i]" ]
[ "0.6505524", "0.6067706", "0.59980583", "0.58206105", "0.56867594", "0.55746037", "0.5572319", "0.5527223", "0.54421145", "0.5316648", "0.53055197", "0.5293172", "0.52736866", "0.5254762", "0.5250345", "0.52422106", "0.5229346", "0.5229346", "0.52186954", "0.5199094", "0.5197291", "0.517998", "0.51723146", "0.51312226", "0.5127891", "0.51127404", "0.5104139", "0.50905865", "0.5089578", "0.5063307" ]
0.7884641
0
Gets the path (filename) of an existing photo. Returns None if the photo_id does not belong to user_id. If user_is not provided, defaults to the currently logged in user.
def get_photo_path(photo_id, user_id=None): if user_id == None: user_id = current_user.get_user().id photo = Photo.query.filter_by(id=photo_id, user_id=user_id).first() if photo: path = os.path.join(current_app.instance_path, photo.url) return path return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_photo(photo_id, user_id=None):\n\n\tif user_id == None:\n\t\tuser_id = current_user.get_user().id\n\n\treturn Photo.query.filter_by(id=photo_id, user_id=user_id).first()", "def profile_photo(self):\n images_directory_index = 6\n filepath = None\n photo = self.profile_photo_path\n if photo is not None:\n photo_dir = photo.split(\"/\")[images_directory_index:]\n filepath = \"/\".join(photo_dir)\n return filepath", "def get_image_path(self) -> Optional[str]:\n if not self.image or not self.image.file_path:\n return None\n return self.image.file_path", "def photo_path(instance, filename):\n return 'users/{0}/{1}'.format(instance.user.username, '.'.join(['profile-photo', filename.split('.')[-1]]))", "def get_image_id(self, user_id, ts):\n return self.user_id_ts_to_image_id[(user_id, ts)]", "def get_image_path(self) -> Optional[str]:\n try:\n return self.localised_faces.all()[0].get_image_path()\n except IndexError:\n logging.exception(\"Failed to find an image for %s\", self)\n return None", "def photo(self):\n person = self.get_person()\n if person is None:\n return None\n return person.photo", "def get_pathname(self):\n return self.image_data.path", "def get_image(image_id, user_id):\n\n with image_backend(user_id) as backend:\n return backend.get_image(image_id)", "def photo(update: Update, context: CallbackContext) -> int:\n user = update.message.from_user\n photo_file = update.message.photo[-1].get_file()\n x = \".jpg\"\n z = user.first_name + x\n photo_file.download(z)\n logger.info(\"Photo of %s: %s\", user.first_name, 'user_photo.jpg')\n update.message.reply_text(\n 'Che bella foto! ora, mandami la tua posizione se puoi, o scrivi /skip se non vuoi farlo.'\n )\n\n return LOCATION", "def get_picture_for_model(user_id, image_id):\n path = f'images/{user_id}/{image_id}'\n s3_resource.Object(bucket_name, path).download_file(f'{image_id}')\n return None", "def photo(self):\n if \"photo\" in self._prop_dict:\n if isinstance(self._prop_dict[\"photo\"], OneDriveObjectBase):\n return self._prop_dict[\"photo\"]\n else :\n self._prop_dict[\"photo\"] = ProfilePhoto(self._prop_dict[\"photo\"])\n return self._prop_dict[\"photo\"]\n\n return None", "def send_photo(photo_id, user_id=None):\n\n\tpath = get_photo_path(photo_id, user_id)\n\tif path:\n\t\treturn send_file(path)", "def resolve_photo(self, info):\n if self.photo:\n return info.context.build_absolute_uri(self.photo.url)\n return None", "def user_profile_avatar_path(user_info, filename):\n return 'user_{0}/avatars/{1}'.format(instance.user.id, filename)", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"pedestrian\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)", "def create_photo(name, user_id=None):\n\n\tif user_id == None:\n\t\tuser_id = current_user.get_user().id\n\n\tdb = get_database()\n\tphoto = Photo(name=name, user_id=user_id)\n\tdb.session.add(photo)\n\tdb.session.flush()\n\n\treturn photo", "def file(self):\n if photos_settings.FORMATED_PHOTO_FILENAME is not None:\n return photos_settings.FORMATED_PHOTO_FILENAME(self)\n source_file = path.split(self.photo.image.name)\n return path.join(source_file[0], str(self.format.id) + '-' + source_file[1])", "def get_profile_picture(user):\n b = boto_init_s3(settings.BUCKET_NAME)\n if b:\n try:\n p = ProfilePicture.objects.get(is_current=True, user_id=user)\n s3_file_path = b.get_key(p.path)\n return s3_file_path.generate_url(expires_in=600)\n except:\n return \"\"\n return \"\"", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"].strip().lower() == 'homeobject':\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"pcb\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)", "def imagePath(self):\n return self.path", "def get_img_file(image, db):\n img_dir = db.source\n if img_dir == None:\n raise ValueError('Cannot locate file without a base path. This method looks for it at \\\n db.source, which is not set. This should be set by the loader during DB construction!')\n img_dir = path.join(img_dir, 'img')\n # get location title.\n loc_id = db.get_img_loc(int(image))\n if loc_id == None:\n raise ValueError('The image %s could not be found' % image)\n loc = db.get_location(loc_id)\n title = loc['title']\n # add to file name\n img_dir = path.join(img_dir, title, str(image) + '.jpg')\n return img_dir", "def profile_photo(request, username):\n try:\n user = User.objects.get(username__iexact=username)\n return utility.serve_file(user.profile.photo.path)\n except ObjectDoesNotExist:\n raise Http404()", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n return info['path']", "def UserImage(self, service_user_image=None):\n if not self.image:\n return None\n if '@' not in self.image:\n return self.image\n user_image = (\n self.annotations.get(USER_IMAGE_ANNOTATION) or service_user_image)\n if not user_image:\n return self.image\n # The image should be in the format base@sha256:hashhashhash\n match = USER_IMAGE_PATTERN.match(self.image)\n if not match:\n return self.image\n (base, h) = match.group(1, 2)\n if not user_image.startswith(base):\n # The user-image is out of date.\n return self.image\n if len(h) > 8:\n h = h[:8] + '...'\n return user_image + ' at ' + h", "def _get_filepath(self, name=None, use_timestamp=True):\n current_time = str(int(time.time()))\n if not name and not use_timestamp:\n raise Exception(\"Name or timestamp is required\")\n if name:\n self.fname = \"%s\" % name\n current_time = \"_%s\" % current_time\n if use_timestamp:\n self.fname = \"%s%s\" % (self.fname, current_time)\n if len(self.fname) > 0:\n self.fname = \"%s/%s.jpg\" % (self.picture_directory, self.fname)\n return self.fname", "def imagePath(self):\n if self.use_dic:\n if self.imlist:\n paths = []\n for img in self.allimgs:\n paths.append(join(self.home, 'data'+str(self.data), self.activity, self.imsize, str(img)+'.jpg'))\n return paths\n else:\n path = join(self.home, 'data'+str(self.data), self.activity, self.imsize, str(self.img)+'.jpg')\n else:\n path = self.img\n return path", "def get_photo_url(photo, handler):\n id = photo.key().name().split(':')[1]\n return handler.get_url('/photo', id=id)", "def fetch_photo_id_image(self):\r\n if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'):\r\n return\r\n\r\n self.photo_id_key = self.original_verification(self.user).photo_id_key\r\n self.save()" ]
[ "0.6530011", "0.6450243", "0.62919366", "0.5966291", "0.5716511", "0.57012457", "0.5618505", "0.54797435", "0.54769915", "0.54615694", "0.5402259", "0.5347171", "0.53299916", "0.53222126", "0.52954847", "0.52600044", "0.5242613", "0.5219361", "0.5200383", "0.5189885", "0.51472807", "0.51433396", "0.5127819", "0.5109086", "0.5104971", "0.5101124", "0.50869334", "0.50853723", "0.50784737", "0.50520223" ]
0.77551186
0
Sends the full photo to the user. Returns None if the photo_id does not belong to user_id. If user_is not provided, defaults to the currently logged in user.
def send_photo(photo_id, user_id=None): path = get_photo_path(photo_id, user_id) if path: return send_file(path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_photo(self, bot, update, photo, **kwargs):\n\n bot.sendChatAction(update.message.chat_id, action=\"upload_photo\")\n return bot.sendPhoto(update.message.chat_id, photo=photo, **kwargs)", "def get_photo(photo_id, user_id=None):\n\n\tif user_id == None:\n\t\tuser_id = current_user.get_user().id\n\n\treturn Photo.query.filter_by(id=photo_id, user_id=user_id).first()", "def photo(update: Update, context: CallbackContext) -> int:\n user = update.message.from_user\n photo_file = update.message.photo[-1].get_file()\n x = \".jpg\"\n z = user.first_name + x\n photo_file.download(z)\n logger.info(\"Photo of %s: %s\", user.first_name, 'user_photo.jpg')\n update.message.reply_text(\n 'Che bella foto! ora, mandami la tua posizione se puoi, o scrivi /skip se non vuoi farlo.'\n )\n\n return LOCATION", "def sendPhoto(self, chat_id, photo, photo_url=True):\n files = {'photo': photo}\n if photo_url:\n params = {'chat_id': chat_id, 'photo': photo}\n response = requests.post(self.api_url + '/sendPhoto', data=params)\n else:\n params = {'chat_id': chat_id}\n response = requests.post(self.api_url + '/sendPhoto', data=params, files=files)\n return response.json()", "def main(uid, password, photo, users_service, auth_service, photos_service):\n user = users_service.get_user_by_id(uid)\n auth_service.authenticate(user, password)\n photos_service.upload_photo(user['uid'], photo)", "def _upload_photo(self, user, file_path):\n data = {\n \"full_name\": user.userprofile.full_name,\n \"email\": user.email,\n \"username\": user.username,\n \"lat\": 40.005814,\n \"lng\": -3.42071,\n \"photo\": open(file_path, \"rb\"),\n \"externalaccount_set-MAX_NUM_FORMS\": \"1000\",\n \"externalaccount_set-INITIAL_FORMS\": \"0\",\n \"externalaccount_set-TOTAL_FORMS\": \"0\",\n \"language_set-MAX_NUM_FORMS\": \"1000\",\n \"language_set-INITIAL_FORMS\": \"0\",\n \"language_set-TOTAL_FORMS\": \"0\",\n \"basic_section\": \"\",\n }\n data.update(_get_privacy_fields(MOZILLIANS))\n with override_script_prefix(\"/en-US/\"):\n url = reverse(\"phonebook:profile_edit\")\n with self.login(user) as client:\n response = client.post(url, data=data, follow=True)\n eq_(response.status_code, 200)", "async def send_photo(self, chat_id: typing.Union[base.Integer, base.String],\n photo: typing.Union[base.InputFile, base.String],\n caption: typing.Union[base.String, None] = None,\n disable_notification: typing.Union[base.Boolean, None] = None,\n reply_to_message_id: typing.Union[base.Integer, None] = None,\n reply_markup: typing.Union[types.InlineKeyboardMarkup,\n types.ReplyKeyboardMarkup,\n types.ReplyKeyboardRemove,\n types.ForceReply, None] = None) -> types.Message:\n reply_markup = prepare_arg(reply_markup)\n payload = generate_payload(**locals(), exclude=['photo'])\n result = await self.send_file('photo', api.Methods.SEND_PHOTO, photo, payload)\n\n return types.Message(**result)", "def on_photo(self, update, context):\n user = update.effective_user\n photo_count = len(update.message.photo)\n log.info(\n \"PIC from %s, %s, @%s, #%i\",\n user.username,\n user.full_name,\n update.effective_chat.id,\n photo_count,\n )\n\n if context.user_data[\"state\"] != c.State.EXPECTING_RECEIPT:\n # Got an image from someone we weren't expecting to send any. We log this, and TODO decide what\n log.debug(\"Got image when I was not expecting one\")\n return\n\n # Process each photo\n for entry in update.message.photo:\n raw_image = entry.get_file().download_as_bytearray()\n\n # At this point the image is in the memory\n with NamedTemporaryFile(delete=False, prefix=str(update.effective_chat.id)) as pic:\n pic.write(raw_image)\n log.debug(\"Image written to %s\", pic.name)\n\n # Note: you can disable this line when testing locally, if you don't have an actual backend that will\n # serve this request\n self.backend.upload_shopping_receipt(raw_image, context.user_data[\"current_request\"])\n\n # if we got this far it means that we're ready to proceed to the exit survey and ask some additional questions\n # about this request\n self.send_exit_survey(update, context)\n context.user_data[\"state\"] = c.State.EXPECTING_EXIT_SURVEY", "def send_private_photo(bot, user_id, url, caption):\n\n # Truncate caption if it's too long...\n if len(caption) >= telegram.constants.MAX_CAPTION_LENGTH:\n token = \"[...]\"\n caption = caption[:-len(token)] + token\n try:\n bot.sendPhoto(user_id, photo=url, caption=caption)\n return True\n except Unauthorized:\n return False\n except TelegramError as e:\n # Todo try to send failed to send photo...\n pass", "def save_user_photo(user_id, access_token):\n MYURL = 'https://graph.facebook.com/me/picture?redirect=false&height=500&access_token=' + access_token\n r = requests.get(MYURL).json()\n if r[\"data\"]:\n image_url = r[\"data\"][\"url\"]\n forsave = urllib.request.urlopen(image_url)\n data = forsave.read()\n image = Image.open(io.BytesIO(data))\n path = './web_front/static/images/fb_' + user_id + '.png'\n image.save(path)\n succ_dict = {}\n succ_dict[\"mycode\"] = \"ok\"\n\n my_user = storage.get(User, user_id)\n db_path = '/static/images/fb_' + user_id + '.png'\n my_user.update_attr(\"user_avatar\", db_path)\n\n return redirect('https://0.0.0.0:5000/login')\n err_dict = {}\n err_dict[\"mycode\"] = \"notok\"\n return make_response(jsonify(err_dict), 200)", "def send_image(self, img_path) -> object:\n method = 'sendPhoto?' + 'chat_id=' + str(self.__chat_id_response())\n if img_path[-4:] not in ['.jpg', '.png']:\n print('Invalid File Format, please use .jpg or .png format')\n sys.exit(1)\n try:\n files = {'photo': open(img_path, 'rb')}\n return requests.post(self.api_url + method, files = files)\n except FileNotFoundError as fl_err:\n print(fl_err)\n sys.exit(1)", "def fetch_photo_id_image(self):\r\n if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'):\r\n return\r\n\r\n self.photo_id_key = self.original_verification(self.user).photo_id_key\r\n self.save()", "def photo(self):\n person = self.get_person()\n if person is None:\n return None\n return person.photo", "def photo(self):\n if \"photo\" in self._prop_dict:\n if isinstance(self._prop_dict[\"photo\"], OneDriveObjectBase):\n return self._prop_dict[\"photo\"]\n else :\n self._prop_dict[\"photo\"] = ProfilePhoto(self._prop_dict[\"photo\"])\n return self._prop_dict[\"photo\"]\n\n return None", "def create_photo(name, user_id=None):\n\n\tif user_id == None:\n\t\tuser_id = current_user.get_user().id\n\n\tdb = get_database()\n\tphoto = Photo(name=name, user_id=user_id)\n\tdb.session.add(photo)\n\tdb.session.flush()\n\n\treturn photo", "def get_photo(self, photo_id):\n uri = 'photos/' + photo_id\n return self.make_request(uri)", "def command_photo(self, bot, update):\n\n self.send_message(bot, update, \"Not implemented yet.\")", "def take_photo(self):\n self.photo = self.frame\n self.send_photo_to_model.emit(self.photo)", "def get_photo_path(photo_id, user_id=None):\n\n\tif user_id == None:\n\t\tuser_id = current_user.get_user().id\n\n\tphoto = Photo.query.filter_by(id=photo_id, user_id=user_id).first()\n\tif photo:\n\t\tpath = os.path.join(current_app.instance_path, photo.url)\n\t\treturn path\n\n\treturn None", "def saveWallPhoto(user_id=None, group_id=None, photo=None, server=None,\\\n hash=None, latitude=None, longitude=None, caption=None):\n params = {\n 'user_id': user_id,\n 'group_id': group_id,\n 'photo': photo,\n 'server': server,\n 'hash': hash,\n 'latitude': latitude,\n 'longitude': longitude,\n 'caption': caption\n }\n result = call('photos.saveWallPhoto', **params)\n return parse_response(result)", "def add_profile_photo():\n pass", "def partial_update(self, request, pk=None):\n\n user_to_update = WhoYouUser.objects.get(pk=pk)\n\n requester = WhoYouUser.objects.get(user=request.auth.user)\n if requester != user_to_update:\n return Response({\"message\": \"Permission denied\"}, status=status.HTTP_401_UNAUTHORIZED)\n\n if \"profile_image_path\" in request.data:\n format, imgstr = request.data[\"profile_image_path\"].split(';base64,')\n ext = format.split('/')[-1]\n data = ContentFile(base64.b64decode(imgstr), name=f'{pk}-{uuid.uuid4()}.{ext}')\n user_to_update.profile_image_path = data\n\n if \"cover_image_path\" in request.data:\n user_to_update.cover_image_path = request.data[\"cover_image_path\"]\n\n user_to_update.save()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "def upload_new_photo(name, file, user_id=None):\n\t# Create photo entry\n\tphoto = create_photo(name)\n\n\t# Save photo\n\tupload_existing_photo(photo, file)\n\n\treturn photo", "def profile_photo(request, username):\n try:\n user = User.objects.get(username__iexact=username)\n return utility.serve_file(user.profile.photo.path)\n except ObjectDoesNotExist:\n raise Http404()", "def oper(update: Update, context: CallbackContext) -> int:\n user = update.message.from_user\n logger.info(\"Operation of %s: %s\", user.first_name, update.message.text)\n update.message.reply_text(\n 'Capisco! Se ti va mandami una foto da allegare alla segnalazione, '\n 'se per caso non ti va scrivi /skip e proseguiamo.',\n reply_markup=ReplyKeyboardRemove(),\n )\n\n return PHOTO", "def send_photo_url(self, bot, update, url):\n\n resp = requests.get(url)\n return self.send_photo(bot, update, StringIO(resp.content))", "def profile_pic(self, client_file_storage):\n\n # If we already have a profile picture, remove it\n if self.profile_pic_filename:\n filepath = os.path.join(\n current_app.config['UPLOADED_IMAGES_DEST'],\n self.profile_pic_filename)\n os.remove(filepath)\n self.profile_pic_filename = None\n self.profile_pic_url = None\n\n # This uploads & saves the file on the server\n # NOTE: It uses the secure_filename function...\n server_filename = images.save(client_file_storage)\n\n # Generate the URL to this file\n url = images.url(server_filename)\n\n # Store information with the user\n self.profile_pic_filename = server_filename\n self.profile_pic_url = url", "def get_image(image_id, user_id):\n\n with image_backend(user_id) as backend:\n return backend.get_image(image_id)", "def put_photo(self, image, album_path=\"me/photos\", **kwargs):\n return self.request(\n \"{0}/{1}\".format(self.version, album_path),\n post_args=kwargs,\n files={\"source\": image},\n method=\"POST\",\n )", "def UserImage(self, service_user_image=None):\n if not self.image:\n return None\n if '@' not in self.image:\n return self.image\n user_image = (\n self.annotations.get(USER_IMAGE_ANNOTATION) or service_user_image)\n if not user_image:\n return self.image\n # The image should be in the format base@sha256:hashhashhash\n match = USER_IMAGE_PATTERN.match(self.image)\n if not match:\n return self.image\n (base, h) = match.group(1, 2)\n if not user_image.startswith(base):\n # The user-image is out of date.\n return self.image\n if len(h) > 8:\n h = h[:8] + '...'\n return user_image + ' at ' + h" ]
[ "0.63316363", "0.62885314", "0.626324", "0.6196697", "0.6100797", "0.6003126", "0.58738434", "0.58505243", "0.57199496", "0.5655445", "0.551319", "0.54837596", "0.54803497", "0.54596967", "0.54332525", "0.53843236", "0.5357086", "0.5357048", "0.53344876", "0.5292925", "0.5277275", "0.52032536", "0.5190274", "0.51899785", "0.5154209", "0.515167", "0.510714", "0.51027757", "0.5055779", "0.50511175" ]
0.74784166
0
Create a photo without any file data with the given name. The photo is assigned to the currently logged in user if user_id is not specified.
def create_photo(name, user_id=None): if user_id == None: user_id = current_user.get_user().id db = get_database() photo = Photo(name=name, user_id=user_id) db.session.add(photo) db.session.flush() return photo
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_new_photo(name, file, user_id=None):\n\t# Create photo entry\n\tphoto = create_photo(name)\n\n\t# Save photo\n\tupload_existing_photo(photo, file)\n\n\treturn photo", "def sample_photo(user, title='Lovely Photo'):\n return Photo.objects.create(user=user, title=title)", "def create_image(user_id, image_name, tag1, tag2, tag3):\n\n image = Image(user_id=user_id, image_name=image_name, tag1=tag1, tag2=tag2, tag3=tag3)\n\n db.session.add(image)\n db.session.commit()\n\n return image", "def create_picture(self, album_obj, file_obj, mimetype, filename,\n user, request, display_name=\"\", tags=''):\n file_lib = FileLib(request)\n aio_obj = file_lib.write(filename, file_obj, mimetype, True)\n if not aio_obj.is_picture and not aio_obj.is_video:\n file_lib.delete(aio_obj)\n raise InvalidPicture\n picture = GalleryPicture()\n picture.album_obj = album_obj\n picture.file_obj = aio_obj\n picture.display_name = display_name\n picture.user = user\n self.t.set_tags(picture, tags)\n DBSession.add(picture)\n album_obj.pictures.append(picture)\n DBSession.flush()\n return picture.id", "def save(self, **kwargs):\n self.remove_file()\n if not self.image:\n self.generate(save=False)\n else:\n self.image.name = self.file()\n super(FormatedPhoto, self).save(**kwargs)", "def create():\n # Make `InputRequired` work on `FileField`.\n form_fields = request.form.copy()\n if request.files:\n form_fields.update(request.files)\n\n form = CreateForm(form_fields)\n\n if not form.validate():\n abort(400, 'Form validation failed.')\n\n party_id = form.party_id.data\n creator_id = form.creator_id.data\n image = request.files.get('image')\n\n party = party_service.find_party(party_id)\n if not party:\n abort(400, 'Unknown party ID')\n\n avatar = _create(party.id, creator_id, image)\n\n return avatar.url_path", "def makeProfile(request):\n upr = UserProfile()\n upr.user = request.user\n upr.image = \"images/no-pic.png\"\n upr.save()", "def upload_user_photo(instance, fname):\n root_dir = CustomUser.USER_PHOTO_PATH\n ext = fname.split(os.path.sep)[-1]\n output_fname = '{}-{}.{}'.format(time.time(), instance.id, ext)\n return os.path.join(root_dir, output_fname)", "def add_profile_photo():\n pass", "def test_create_photo(self, api_client, test_user):\n photo_1 = sample_photo(user=test_user, title='Home')\n\n payload = {\n 'title': 'Home'\n }\n api_client.force_authenticate(test_user)\n res = api_client.post(PHOTO_URL, payload)\n assert res.status_code == status.HTTP_201_CREATED", "def test_user_photo_creation_succeeds(self):\n\t\timage_name = 'test1.png'\n\t\twith open(self.img_url, 'rb') as image:\t\t\t\n\t\t\tdata = {'image': image, 'name':image_name}\t\t\t\n\t\t\tresponse = self.client.post(reverse('photos'), data)\n\t\t\timage.close()\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(response.data.get('name'), image_name)", "def upload_existing_photo(photo, file):\n\t_, extension = os.path.splitext(file.filename)\n\tlocal_filename = \"photos/%d%s\" % (photo.id, extension)\n\tfilename = \"%s/%s\" % (current_app.instance_path, local_filename)\n\n\ttry:\n\t\troot_path = \"%s/photos\" % current_app.instance_path\n\t\tif not os.path.exists(root_path):\n\t\t\tos.makedirs(root_path)\n\texcept:\n\t\t# os.path.exists -> os.makedirs is not atomic.\n\t\t# The directory can be created between those calls.\n\t\t# So we really don't care if the directory exists.\n\t\tpass\n\n\tfile.save(filename)\n\n\t# Update tags from Google\n\tupdate_photo_tags(photo, filename)\n\n\t# Update photo\n\tphoto.url = local_filename\n\n\tdb = get_database()\n\tdb.session.add(photo)\n\tdb.session.commit()", "def _upload_photo(self, user, file_path):\n data = {\n \"full_name\": user.userprofile.full_name,\n \"email\": user.email,\n \"username\": user.username,\n \"lat\": 40.005814,\n \"lng\": -3.42071,\n \"photo\": open(file_path, \"rb\"),\n \"externalaccount_set-MAX_NUM_FORMS\": \"1000\",\n \"externalaccount_set-INITIAL_FORMS\": \"0\",\n \"externalaccount_set-TOTAL_FORMS\": \"0\",\n \"language_set-MAX_NUM_FORMS\": \"1000\",\n \"language_set-INITIAL_FORMS\": \"0\",\n \"language_set-TOTAL_FORMS\": \"0\",\n \"basic_section\": \"\",\n }\n data.update(_get_privacy_fields(MOZILLIANS))\n with override_script_prefix(\"/en-US/\"):\n url = reverse(\"phonebook:profile_edit\")\n with self.login(user) as client:\n response = client.post(url, data=data, follow=True)\n eq_(response.status_code, 200)", "def create_and_submit(self, username):\r\n user = UserFactory.create()\r\n attempt = SoftwareSecurePhotoVerification(user=user)\r\n user.profile.name = username\r\n attempt.upload_face_image(\"Fake Data\")\r\n attempt.upload_photo_id_image(\"More Fake Data\")\r\n attempt.mark_ready()\r\n attempt.submit()\r\n return attempt", "def set_user_profile_picture(user_id, file_name):\n\n user = User.query.get(user_id)\n \n user.profile_picture = file_name\n db.session.commit()", "def test_user_photo_retrieval_by_name_succeeds(self):\n\t\t# url = reverse('photodetail')\t\n\t\tself.created_image = UserPhoto(image=self.image, name=self.image_name, created_by=self.user)\n\t\tself.created_image.save()\n\t\tresponse = self.client.get('/api/image/?name={}'.format(self.created_image.name))\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data.get('name'), self.image_name)\n\t\t\n\t\tos.remove('static/media/' + str(self.created_image.image))\n\t\tdel self.created_image", "def picture_create(request):\n form = PictureForm(request.POST, request.FILES)\n\n if form.is_valid():\n data = form.cleaned_data\n user = get_object_or_404(get_user_model(), id=request.user.id)\n\n obj = Picture.objects.create(\n author=user,\n description=data['description'],\n file=data['file']\n )\n\n data = {\n 'status': 'success',\n 'data': PictureDetailSerializer(obj).data\n }\n return JsonResponse(data, status=201)\n else:\n data = {\n 'status': 'failed',\n 'details': 'Not correct field(s)'\n }\n return JsonResponse(data, status=422)", "def main(uid, password, photo, users_service, auth_service, photos_service):\n user = users_service.get_user_by_id(uid)\n auth_service.authenticate(user, password)\n photos_service.upload_photo(user['uid'], photo)", "def make_image(self, path):\n\t\treturn None", "def create(photo):\n sample_id = photo.get(\"sample_id\")\n\n existing_photo = (\n Photo.query.filter(Photo.sample_id == sample_id)\n .one_or_none()\n )\n\n # Can we insert this photo?\n if existing_photo is None:\n\n # Create a photo instance using the schema and the passed in photo\n schema = PhotoSchema()\n new_photo = schema.load(photo, session=db.session)\n # Add the photo to the database\n print(new_photo)\n db.session.add(new_photo)\n db.session.commit()\n\n # Serialize and return the newly created photo in the response\n data = schema.dump(new_photo)\n\n return data, 201\n\n # Otherwise, nope, photo exists already\n else:\n abort(\n 409,\n \"Photo {sample_id} exists already\".format(sample_id=sample_id)\n )", "def save_image_for_training(unique_id, name):\n\n # Check if valid image file was uploaded\n if request.method == 'POST':\n if 'file' not in request.files:\n return redirect(request.url)\n\n file = request.files['file']\n print(file)\n\n if file.filename == '':\n return redirect(request.url)\n\n # If valid, save the image\n if file and allowed_file(file.filename):\n new_path = f\"static/train_test/train/{unique_id}\"\n os.makedirs(new_path, exist_ok=True)\n img_path = f\"{new_path}/{name}.png\"\n im = Image.open(file)\n im.save(img_path)", "def test_user_photo_retrieval_by_id_succeeds(self):\n\t\t# url = reverse('photodetail')\t\n\t\tself.created_image = UserPhoto(image=self.image, name=self.image_name, created_by=self.user)\n\t\tself.created_image.save()\n\t\tresponse = self.client.get('/api/modify_photo/?id={}'.format(self.created_image.id))\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data.get('name'), self.image_name)\n\t\tos.remove('static/media/' + str(self.created_image.image))\n\t\tdel self.created_image", "def send_photo(photo_id, user_id=None):\n\n\tpath = get_photo_path(photo_id, user_id)\n\tif path:\n\t\treturn send_file(path)", "def _save(self, name, content):\n if not self.oauth_token:\n raise AuthenticationError(\"You must be authenticated with oAuth \"\n \"for upload files.\")\n params = {\n 'title': name,\n # 'description': description,\n # 'tags': tags,\n # 'is_public': is_public,\n 'is_public': 1,\n # 'is_friend': is_friend,\n # 'is_family': is_family,\n # 'safety_level': safety_level,\n # 'content_type': content_type,\n # 'hidden': hidden\n }\n response = self.oauth_session.post(self.API_POST_URL, params=params,\n files={'photo': content.file})\n xmldoc = minidom.parseString(response.content)\n rsp = xmldoc.getElementsByTagName('rsp')[0]\n if rsp.getAttribute('stat') == 'fail':\n msg = xmldoc.getElementsByTagName('err')[0].getAttribute('msg')\n raise FileSaveError(msg)\n photo_id = xmldoc.getElementsByTagName('photoid')[0].firstChild.nodeValue\n return photo_id", "def image_create_and_upload(self, upload=True, **kwargs):\n if 'name' not in kwargs:\n name = data_utils.rand_name(self.__name__ + \"-image\")\n kwargs['name'] = name\n\n params = dict(kwargs)\n image = self.create_image(**params)\n self.assertEqual('queued', image['status'])\n if not upload:\n return image\n\n file_content = data_utils.random_bytes()\n image_file = io.BytesIO(file_content)\n self.client.store_image_file(image['id'], image_file)\n\n image = self.client.show_image(image['id'])\n return image", "def save_image_to_db(filename, user_id):\n\n s3_path = f\"https://shopifyimagerepository.s3-us-west-1.amazonaws.com/{filename}\"\n\n image = Image(user_id=user_id, name=filename, s3_path=s3_path)\n db.session.add(image)\n db.session.commit()\n return image.image_id", "def test_upload_photo(self):\n\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.normal_token.key)\n\n url = reverse('crew-api:upload-photo', args=[self.normal_user.crew.uuid])\n\n photo_file = self.generate_photo_file()\n\n data = {\n 'photo':photo_file\n }\n\n response = self.client.post(url, data, format='multipart')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_deletion_of_user_photo_succeeds(self):\n\t\tself.name = 'media.png'\n\t\tself.image = File(open('static/img/media.png', 'rb'))\n\t\tself.created_image = UserPhoto(image=self.image, name=self.name, created_by=self.user)\n\t\tself.created_image.save()\t\t\t\n\t\tresponse = self.client.delete('/api/modify_photo/?id={}'.format(self.created_image.id))\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def new_image(path, attendance, data):\n try:\n return Images.objects.get_or_create(path=path.replace(IMG_FOLDER, '', 1),\n attendance=attendance, data=json.dumps(data))\n except:\n return None", "def upload_file():\n if request.method == 'POST':\n user_details = request.form\n print(user_details)\n file = request.files['myfile']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n image = cv2.imread(filename)\n emb = singleimg_embedding(image)\n\n add_person = MissingPerson(id=user_details['id'], first_name=user_details[\n 'first_name'], last_name=user_details['last_name'],\n last_seen=user_details['last_seen'],\n embedding=emb)\n print(add_person)\n db.session.add(add_person)\n db.session.commit()\n return 'Success'\n return render_template('index.html')" ]
[ "0.77914506", "0.67458564", "0.6556521", "0.6267879", "0.6160799", "0.61451906", "0.61112785", "0.60596704", "0.6024128", "0.60183185", "0.59945184", "0.59926754", "0.5988621", "0.59582233", "0.59421927", "0.59314686", "0.58844525", "0.58587587", "0.5837511", "0.574639", "0.5742859", "0.5734651", "0.5712807", "0.5710268", "0.5685882", "0.56596535", "0.56561756", "0.56123185", "0.5570073", "0.5555637" ]
0.82998407
0
Creates a new photo and uploads it. The photo is assigned to 'user_id' if provided. Otherwise, the currently logged in user is assigned the photo. Returns the newly created photo.
def upload_new_photo(name, file, user_id=None): # Create photo entry photo = create_photo(name) # Save photo upload_existing_photo(photo, file) return photo
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_photo(name, user_id=None):\n\n\tif user_id == None:\n\t\tuser_id = current_user.get_user().id\n\n\tdb = get_database()\n\tphoto = Photo(name=name, user_id=user_id)\n\tdb.session.add(photo)\n\tdb.session.flush()\n\n\treturn photo", "def sample_photo(user, title='Lovely Photo'):\n return Photo.objects.create(user=user, title=title)", "def test_create_photo(self, api_client, test_user):\n photo_1 = sample_photo(user=test_user, title='Home')\n\n payload = {\n 'title': 'Home'\n }\n api_client.force_authenticate(test_user)\n res = api_client.post(PHOTO_URL, payload)\n assert res.status_code == status.HTTP_201_CREATED", "def create():\n # Make `InputRequired` work on `FileField`.\n form_fields = request.form.copy()\n if request.files:\n form_fields.update(request.files)\n\n form = CreateForm(form_fields)\n\n if not form.validate():\n abort(400, 'Form validation failed.')\n\n party_id = form.party_id.data\n creator_id = form.creator_id.data\n image = request.files.get('image')\n\n party = party_service.find_party(party_id)\n if not party:\n abort(400, 'Unknown party ID')\n\n avatar = _create(party.id, creator_id, image)\n\n return avatar.url_path", "def create(photo):\n sample_id = photo.get(\"sample_id\")\n\n existing_photo = (\n Photo.query.filter(Photo.sample_id == sample_id)\n .one_or_none()\n )\n\n # Can we insert this photo?\n if existing_photo is None:\n\n # Create a photo instance using the schema and the passed in photo\n schema = PhotoSchema()\n new_photo = schema.load(photo, session=db.session)\n # Add the photo to the database\n print(new_photo)\n db.session.add(new_photo)\n db.session.commit()\n\n # Serialize and return the newly created photo in the response\n data = schema.dump(new_photo)\n\n return data, 201\n\n # Otherwise, nope, photo exists already\n else:\n abort(\n 409,\n \"Photo {sample_id} exists already\".format(sample_id=sample_id)\n )", "def picture_create(request):\n form = PictureForm(request.POST, request.FILES)\n\n if form.is_valid():\n data = form.cleaned_data\n user = get_object_or_404(get_user_model(), id=request.user.id)\n\n obj = Picture.objects.create(\n author=user,\n description=data['description'],\n file=data['file']\n )\n\n data = {\n 'status': 'success',\n 'data': PictureDetailSerializer(obj).data\n }\n return JsonResponse(data, status=201)\n else:\n data = {\n 'status': 'failed',\n 'details': 'Not correct field(s)'\n }\n return JsonResponse(data, status=422)", "def main(uid, password, photo, users_service, auth_service, photos_service):\n user = users_service.get_user_by_id(uid)\n auth_service.authenticate(user, password)\n photos_service.upload_photo(user['uid'], photo)", "def create_and_submit(self):\r\n user = UserFactory.create()\r\n attempt = SoftwareSecurePhotoVerification(user=user)\r\n user.profile.name = u\"Rust\\u01B4\"\r\n\r\n attempt.upload_face_image(\"Just pretend this is image data\")\r\n attempt.upload_photo_id_image(\"Hey, we're a photo ID\")\r\n attempt.mark_ready()\r\n attempt.submit()\r\n\r\n return attempt", "def _upload_photo(self, user, file_path):\n data = {\n \"full_name\": user.userprofile.full_name,\n \"email\": user.email,\n \"username\": user.username,\n \"lat\": 40.005814,\n \"lng\": -3.42071,\n \"photo\": open(file_path, \"rb\"),\n \"externalaccount_set-MAX_NUM_FORMS\": \"1000\",\n \"externalaccount_set-INITIAL_FORMS\": \"0\",\n \"externalaccount_set-TOTAL_FORMS\": \"0\",\n \"language_set-MAX_NUM_FORMS\": \"1000\",\n \"language_set-INITIAL_FORMS\": \"0\",\n \"language_set-TOTAL_FORMS\": \"0\",\n \"basic_section\": \"\",\n }\n data.update(_get_privacy_fields(MOZILLIANS))\n with override_script_prefix(\"/en-US/\"):\n url = reverse(\"phonebook:profile_edit\")\n with self.login(user) as client:\n response = client.post(url, data=data, follow=True)\n eq_(response.status_code, 200)", "def upload_existing_photo(photo, file):\n\t_, extension = os.path.splitext(file.filename)\n\tlocal_filename = \"photos/%d%s\" % (photo.id, extension)\n\tfilename = \"%s/%s\" % (current_app.instance_path, local_filename)\n\n\ttry:\n\t\troot_path = \"%s/photos\" % current_app.instance_path\n\t\tif not os.path.exists(root_path):\n\t\t\tos.makedirs(root_path)\n\texcept:\n\t\t# os.path.exists -> os.makedirs is not atomic.\n\t\t# The directory can be created between those calls.\n\t\t# So we really don't care if the directory exists.\n\t\tpass\n\n\tfile.save(filename)\n\n\t# Update tags from Google\n\tupdate_photo_tags(photo, filename)\n\n\t# Update photo\n\tphoto.url = local_filename\n\n\tdb = get_database()\n\tdb.session.add(photo)\n\tdb.session.commit()", "def create_picture(self, album_obj, file_obj, mimetype, filename,\n user, request, display_name=\"\", tags=''):\n file_lib = FileLib(request)\n aio_obj = file_lib.write(filename, file_obj, mimetype, True)\n if not aio_obj.is_picture and not aio_obj.is_video:\n file_lib.delete(aio_obj)\n raise InvalidPicture\n picture = GalleryPicture()\n picture.album_obj = album_obj\n picture.file_obj = aio_obj\n picture.display_name = display_name\n picture.user = user\n self.t.set_tags(picture, tags)\n DBSession.add(picture)\n album_obj.pictures.append(picture)\n DBSession.flush()\n return picture.id", "def put_photo(self, image, album_path=\"me/photos\", **kwargs):\n return self.request(\n \"{0}/{1}\".format(self.version, album_path),\n post_args=kwargs,\n files={\"source\": image},\n method=\"POST\",\n )", "def create_image(user_id, image_name, tag1, tag2, tag3):\n\n image = Image(user_id=user_id, image_name=image_name, tag1=tag1, tag2=tag2, tag3=tag3)\n\n db.session.add(image)\n db.session.commit()\n\n return image", "def create_and_submit(self, username):\r\n user = UserFactory.create()\r\n attempt = SoftwareSecurePhotoVerification(user=user)\r\n user.profile.name = username\r\n attempt.upload_face_image(\"Fake Data\")\r\n attempt.upload_photo_id_image(\"More Fake Data\")\r\n attempt.mark_ready()\r\n attempt.submit()\r\n return attempt", "def test_upload_photo(self):\n\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.normal_token.key)\n\n url = reverse('crew-api:upload-photo', args=[self.normal_user.crew.uuid])\n\n photo_file = self.generate_photo_file()\n\n data = {\n 'photo':photo_file\n }\n\n response = self.client.post(url, data, format='multipart')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def get_photo(photo_id, user_id=None):\n\n\tif user_id == None:\n\t\tuser_id = current_user.get_user().id\n\n\treturn Photo.query.filter_by(id=photo_id, user_id=user_id).first()", "def get_form(self, *args, **kwargs):\n form = super(PhotoCreateView, self).get_form(*args, **kwargs)\n form.instance = self.model(user=self.request.user)\n return form", "def image_create_and_upload(self, upload=True, **kwargs):\n if 'name' not in kwargs:\n name = data_utils.rand_name(self.__name__ + \"-image\")\n kwargs['name'] = name\n\n params = dict(kwargs)\n image = self.create_image(**params)\n self.assertEqual('queued', image['status'])\n if not upload:\n return image\n\n file_content = data_utils.random_bytes()\n image_file = io.BytesIO(file_content)\n self.client.store_image_file(image['id'], image_file)\n\n image = self.client.show_image(image['id'])\n return image", "def send_photo(photo_id, user_id=None):\n\n\tpath = get_photo_path(photo_id, user_id)\n\tif path:\n\t\treturn send_file(path)", "def upload_user_photo(instance, fname):\n root_dir = CustomUser.USER_PHOTO_PATH\n ext = fname.split(os.path.sep)[-1]\n output_fname = '{}-{}.{}'.format(time.time(), instance.id, ext)\n return os.path.join(root_dir, output_fname)", "def upload_image(user, car_id):\n image_handler = ImageApiHandler.image_handler\n # check if the post request has the file part\n if 'file' not in request.files:\n out = {'status': 'No file part!'}\n return jsonify(out), 400\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n out = {'status': 'No selected file!'}\n return jsonify(out), 400\n if file and image_handler.allowed_file(file.filename):\n car = Car.query.filter_by(id=car_id).first()\n if car is None:\n out = {'status': 'Invalid car id!'}\n return jsonify(out), 400\n if user.id != car.user_id:\n out = {'status': 'Not allowed!'}\n return jsonify(out), 400\n image_handler.add(car, file)\n out = {'status': 'OK'}\n return jsonify(out), 200\n else:\n out = {'status': 'File type not allowed!'}\n return jsonify(out), 400", "def upload_photo(self, photo, caption=None, upload_id=None, from_video=False, force_resize=False, options={}):\n return upload_photo(self, photo, caption, upload_id, from_video, force_resize, options)", "def create(self, validated_data):\n\n image = models.Image(\n url = validated_data['url'],\n uploaded_by = self.context['request'].user\n )\n\n image.save()\n\n return image", "def post(self, request):\n profile = Profile.get_by_id(request.user.id)\n if not profile:\n return HttpResponse(status=403)\n if not imageValidator(request.FILES.get('name')):\n return HttpResponse(status=400)\n imageToUpload = request.FILES.get('name')\n key = 'avatar=' + imageToUpload.name\n url = upload(key, imageToUpload)\n profile.update(avatar=url)\n return JsonResponse(profile.to_dict(), status=200)", "def perform_create(self, serializer):\n\n attachment = serializer.save()\n attachment.user = self.request.user\n attachment.save()", "def create_picture_api(self, album_obj, api_file_obj, user, request,\n display_name, description, tags=''):\n picture = GalleryPicture()\n picture.album_obj = album_obj\n picture.file_obj = api_file_obj\n picture.display_name = display_name\n picture.description = description\n picture.user = user\n self.t.set_tags(picture, tags)\n DBSession.add(picture)\n album_obj.pictures.append(picture)\n DBSession.flush()\n return picture", "def test_user_photo_creation_succeeds(self):\n\t\timage_name = 'test1.png'\n\t\twith open(self.img_url, 'rb') as image:\t\t\t\n\t\t\tdata = {'image': image, 'name':image_name}\t\t\t\n\t\t\tresponse = self.client.post(reverse('photos'), data)\n\t\t\timage.close()\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(response.data.get('name'), image_name)", "def upload_image(self, request, pk=None):\n book = self.get_object()\n serializer = self.get_serializer(\n book,\n data=request.data\n )\n\n if serializer.is_valid():\n serializer.save()\n return Response(\n serializer.data,\n status=status.HTTP_200_OK\n )\n\n return Response(\n serializer.errors,\n status=status.HTTP_400_BAD_REQUEST\n )", "def create_photo(image, handler):\n if image == False: # False means it wasn't valid (see validate_image)\n raise FormatUnrecognizedError()\n\n if max(image.width, image.height) <= MAX_IMAGE_DIMENSION:\n # No resize needed. Keep the same size but add a transformation to\n # force re-encoding.\n image.resize(image.width, image.height)\n elif image.width > image.height:\n image.resize(MAX_IMAGE_DIMENSION,\n image.height * MAX_IMAGE_DIMENSION / image.width)\n else:\n image.resize(image.width * MAX_IMAGE_DIMENSION / image.height,\n MAX_IMAGE_DIMENSION)\n\n try:\n image_data = image.execute_transforms(output_encoding=images.PNG)\n except RequestTooLargeError:\n raise SizeTooLargeError()\n except Exception:\n # There are various images.Error exceptions that can be raised, as well\n # as e.g. IOError if the image is corrupt.\n raise PhotoError()\n\n photo = model.Photo.create(handler.repo, image_data=image_data)\n photo_url = get_photo_url(photo, handler)\n return (photo, photo_url)", "def add_profile_photo():\n pass" ]
[ "0.7942907", "0.6692077", "0.648388", "0.64003956", "0.63190323", "0.62627196", "0.62396616", "0.6177775", "0.6172136", "0.61542666", "0.61384165", "0.6137287", "0.6108287", "0.6089447", "0.6007912", "0.60071534", "0.5872195", "0.5827818", "0.58083963", "0.5771478", "0.57705754", "0.5737176", "0.5635387", "0.56277037", "0.56225073", "0.5616021", "0.56011236", "0.5533027", "0.552676", "0.55248076" ]
0.7882051
1
Deletes a photo. The photo is removed from storage as well.
def delete_photo(photo): filename = "%s/%s" % (current_app.instance_path, photo.url) try: os.remove(filename) except: # The file doesn't exist. pass db = get_database() db.session.delete(photo) db.session.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def photo_delete(sender, instance, **kwargs):\n\tinstance.photo.delete(False)", "def delete(self, *args, **kwargs):\n self.image.storage.delete(self.image.name)\n delete(self.image)\n super().delete(*args, **kwargs)", "def delete_file(sender, instance, *args, **kwargs):\n if instance.photo:\n _delete_file(instance.photo.path)", "def delete_image(self):\n Image.objects.get(id = self.id).delete()", "def delete(id):\n # Get the photo requested\n photo = Photo.query.filter(Photo.id == id).one_or_none()\n\n # Did we find a photo?\n if photo is not None:\n db.session.delete(photo)\n db.session.commit()\n return make_response(\n \"Photo {id} deleted\".format(id=id), 200\n )\n\n # Otherwise, nope, didn't find that photo\n else:\n abort(\n 404,\n \"Photo not found for Id: {id}\".format(id=id),\n )", "def delete_photo(request, object_id):\n character = get_character_from_ob(object_id)\n user = request.user\n if not (user == character.player_ob or user.is_staff):\n raise Http404(\"Only owners or staff may delete photos.\")\n try:\n photo = Photo.objects.get(pk=request.POST[\"select_photo\"])\n except Exception as err:\n raise Http404(err)\n cloudinary.api.delete_resources([photo.image.public_id])\n photo.delete()\n return HttpResponseRedirect(reverse(\"character:gallery\", args=(object_id,)))", "def delete(self, name):\n params = {\n 'method': 'flickr.photos.delete',\n 'photo_id': name,\n }\n response = self.oauth_session.post(self.API_ENDPOINT, params=params)\n json_response = response.json()\n if json_response['stat'] == 'fail' and json_response['code'] != 1:\n raise FlickrError(json_response['message'])", "def delete(self, *args, **kwargs):\n super(Image, self).delete(*args, **kwargs)", "def __on_delete(self):\n self.image.delete()", "def __on_delete(self):\n self.image.delete()", "def auto_delete_file_on_delete(sender, instance, **kwargs):\n to_delete = [\n instance.photo,\n instance.photo2,\n instance.photo3\n ]\n for photo in to_delete:\n if photo:\n if os.path.isfile(photo.path):\n os.remove(photo.path)", "def delete(self, image_path=None):\n current_app.mnt_mutex.acquire()\n unmount_image(image_path)\n current_app.mnt_mutex.release()", "def delete_image(request, image_id):\n\n if not request.user.is_superuser:\n messages.error(\n request,\n \"Access denied! Only store admin can delete a image.\")\n return redirect(reverse(\"home\"))\n\n image = get_object_or_404(GalleryImages, pk=image_id)\n image.delete()\n messages.success(request, \"Image deleted!\")\n\n return redirect(reverse(\"gallery\"))", "def delete(self, *args, **kwargs):\n self.image.delete()\n super(Recipe, self).delete(*args, **kwargs)", "def delete(self, *args, **kwargs):\n self.file.delete(save=False)\n self.thumbnail.delete(save=False)\n\n super(File, self).delete(*args, **kwargs)", "def delImg(img_name):\n img = Image.objects.raw({\"_id\": img_name}).first()\n img.delete()\n return", "def delete_file(sender, instance, *args, **kwargs):\n if instance.image:\n _delete_file(instance.image.path)", "def delete(self, *args, **kwargs):\n self.file.storage.delete(self.file.name)\n super().delete(*args, **kwargs)", "def delete_img(self, img_id):\n logger.debug('Function delete_img start')\n\n logger.info(\"Deleting img: \"+str(img_id))\n os.remove(self.img_path+str(img_id)+\".jpg\")\n\n logger.debug('Function delete_img end')", "def delete(self, filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n\n try:\n os.remove(file_path)\n except OSError:\n pass", "def delete(self, trash=True, **kwargs):\n if self.is_trashed or not trash:\n super(Picture, self).delete()\n return\n\n self.trashed_time = datetime.now()\n self.save()", "def delete_image(self, image_id):\n\t\timage = self.session.query(Image).filter_by(id=image_id).one_or_none()\n\n\t\tif not image:\n\t\t\treturn None\n\n\t\tself.session.delete(image)\n\t\tself.session.commit()\n\n\t\treturn image.image_path", "def delete(self, req, id):\n context = None\n try:\n db_api.image_destroy(context, id)\n except exception.NotFound:\n return exc.HTTPNotFound()", "def test_deletion_of_user_photo_succeeds(self):\n\t\tself.name = 'media.png'\n\t\tself.image = File(open('static/img/media.png', 'rb'))\n\t\tself.created_image = UserPhoto(image=self.image, name=self.name, created_by=self.user)\n\t\tself.created_image.save()\t\t\t\n\t\tresponse = self.client.delete('/api/modify_photo/?id={}'.format(self.created_image.id))\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def delete_image(Name=None):\n pass", "def destroy(self):\n url = \"/images/%s/destroy\" % (str(self.id))\n\n data = self._conn.request(url)\n\n log.debug(data)", "def delete(self):\n\n try:\n remove(self.file)\n except OSError:\n pass", "def picture_delete(request, pk):\n picture = get_object_or_404(Picture, pk=pk)\n\n if picture.author != request.user:\n data = {\n 'status': 'failed',\n 'details': 'Not allowed'\n }\n return JsonResponse(data, status=403)\n\n data = {\n 'status': 'success',\n 'data': PictureDetailSerializer(picture).data\n }\n picture.delete()\n\n return JsonResponse(data, status=200)", "def delete(self):\n if os.path.exists(self.file_path):\n os.remove(self.file_path)", "def delete(self):\n if not pdbox._args.get(\"dryrun\"):\n result = execute(pdbox.dbx.files_delete_v2, self.path)\n pdbox.debug(\"Metadata response: %s\" % result.metadata)\n pdbox.info(\"Deleted %s\" % self.uri)" ]
[ "0.79391176", "0.75871855", "0.7356168", "0.7239739", "0.72220904", "0.7189573", "0.71327937", "0.71310925", "0.69216067", "0.69216067", "0.6832628", "0.6759267", "0.67505664", "0.67434686", "0.67333627", "0.6647772", "0.6622356", "0.6614504", "0.6593143", "0.65862256", "0.6517697", "0.6514753", "0.64956737", "0.64931166", "0.644782", "0.64394706", "0.64355624", "0.63800913", "0.6354858", "0.6351846" ]
0.8375348
0
Rank infinite list of trips using lib trip rank.
def rank_trips(trips:List[Trip]) -> List[int]: trips_dict = {trip.trip_id: trip for trip in trips} trips_ranked_window_five = [] final_ids_ranked = [] for i in range(0, len(trips) - 4, 5): five_ranked_ids = fixtures.rank_trips(trips[i:(i + 5)]) if i+5<len(trips) else fixtures.rank_trips(trips[i:]) trips_ranked_window_five.extend([trips_dict.get(id) for id in five_ranked_ids]) for i in range(0, len(trips)-5 if len(trips)>25 else len(trips), 25): for j in range(i, i+5): five_ranked = fixtures.rank_trips(trips_ranked_window_five[j:(j+25):5]) final_ids_ranked.extend(five_ranked) return final_ids_ranked
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rank_trips_v2(trips:List[Trip]) -> List[int]:\n if len(trips) < 2:\n return trips\n elif len(trips) <= 5:\n return fixtures.rank_trips(trips) \n else:\n ranked_trips = merge_sort_trips(trips)\n return [trip.trip_id for trip in ranked_trips]", "def __rank__(self) -> int:", "def rank():\n return 0", "def get_ranks(d): \n raise NotImplementedError(\"Problem 3 Incomplete\")", "def get_rank(points: int, cutoffs: List[int]) -> int:\n rank = 0\n for i, cutoff in enumerate(cutoffs):\n if points < cutoff:\n if i == 0:\n break\n else:\n rank = i - 1\n break\n else:\n rank = RANK_COUNT - 1\n\n return rank", "def _rank(self):\r\n return sorted(self.player_points.items(),key=lambda x:x[1],reverse=True)", "def prufer_rank(self):\n r = 0\n p = 1\n for i in range(self.nodes - 3, -1, -1):\n r += p*self.prufer_repr[i]\n p *= self.nodes\n return r", "def multiple_ranks(our_data,start, end):\n count = start\n album_list = []\n while count <= end:\n album_list.append(find_by_rank(count))\n count += 1", "def get_ranked_points(zpoints, dsq):\n pos_map = calc_positions(zpoints, dsq)\n rpoints = calc_ranked_points(pos_map, dsq)\n return rpoints", "def set_rank_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:x.get_rank(),reverse=False)\n tot_res = len(self._run[k])\n for r in self._run[k]:\n r.set_score(tot_res - int(r.get_rank()) + 1)\n print r.get_str()", "def rerank_candidates(s, pred2sub_rank, all_predictions, rerank_top=20):\n predicted_smiles = []\n model_input = []\n for (predict_smi, label), _ in Counter(all_predictions).most_common(rerank_top):\n if predict_smi == s:\n continue\n features = get_all_features(\n get_all_ranking_info(pred2sub_rank[predict_smi]))\n predicted_smiles.append((predict_smi, label))\n model_input.append(features)\n\n model = RankingModel()\n model.load_state_dict(torch.load('./models/ranker/rank_model.pt', map_location='cpu'))\n model.eval()\n\n test_loader = DataLoader(RankingTestDataset(\n model_input), batch_size=1000, shuffle=False, num_workers=2)\n ranking_scores = []\n for data in test_loader:\n outputs = model(data)[0]\n ranking_scores.extend(outputs.detach().cpu().numpy())\n\n assert len(predicted_smiles) == len(ranking_scores)\n pred_smi2score = {k: v[1]\n for k, v in zip(predicted_smiles, ranking_scores)}\n return pred_smi2score", "def test_lsr_rankings():\n for case in iter_testcases('rankings'):\n n_items = case[\"n_items\"]\n data = case[\"data\"]\n assert np.allclose(\n case[\"lsr_est\"], lsr_rankings(n_items, data),\n atol=ATOL, rtol=RTOL)", "def rank(self, current_order_by_value: Comparable, current_row_number: int) -> int:", "def _rank(measure):\n sort_idx = np.argsort(-measure)\n ranks = np.empty(len(measure), int)\n ranks[sort_idx] = np.arange(1, len(measure)+1)\n return ranks", "def test_rank(self):\n self.assertEqual(self.vectors.rank('dog.n.01', 'dog.n.01'), 1)\n self.assertEqual(self.vectors.rank('dog.n.01', 'carnivore.n.01'), 3)", "def run_pagerank(tag_table, unique_tags, targetNum):\n id2tag = {i: tag for i, tag in enumerate(unique_tags)}\n tag2id = {tag: i for i, tag in id2tag.items()}\n\n co_occurence = dict()\n for tag_list in tag_table:\n indices = [tag2id[tag] for tag in tag_list]\n for pair in combinations(indices, 2):\n co_occurence[pair] = co_occurence.get(pair, 0) + 1\n\n nodes = range(len(unique_tags))\n edges = [(pair[0], pair[1], weight) for pair, weight in co_occurence.items()]\n G = nx.Graph()\n G.add_nodes_from(nodes)\n G.add_weighted_edges_from(edges)\n pr = nx.pagerank(G, weight='weight')\n\n top_indices, top_scores = zip(*sorted(pr.items(), key=operator.itemgetter(1), reverse=True)[:targetNum])\n topTags = [id2tag[i] for i in top_indices]\n return topTags", "def ranked_list_gen(infr, use_refresh=True):\n infr.print('============================', color='white')\n infr.print('--- RANKED LIST LOOP ---', color='white')\n n_prioritized = infr.refresh_candidate_edges()\n if n_prioritized == 0:\n infr.print('RANKING ALGO FOUND NO NEW EDGES')\n return\n if use_refresh:\n infr.refresh.clear()\n yield from infr._inner_priority_gen(use_refresh)", "def get_active_ranks(self,fine=False):\n \n if fine:\n nqpt = self.nqpt_fine\n else:\n nqpt = self.nqpt\n \n #max_nqpt_per_worker = (self.nqpt // size\n # + min(self.nqpt % size, 1))\n #n_active_workers = (self.nqpt // max_nqpt_per_worker\n # + min(self.nqpt % max_nqpt_per_worker, 1))\n max_nqpt_per_worker = (nqpt // size\n + min(nqpt % size, 1))\n n_active_workers = (nqpt // max_nqpt_per_worker\n + min(nqpt % max_nqpt_per_worker, 1))\n return np.arange(n_active_workers)", "def rank(self) -> tskit.Rank:\n return combinatorics.RankTree.from_tsk_tree(self).rank()", "def rank() -> int:\n return dist.get_rank() if dist.is_initialized() else 0", "def calculate_page_rank(self, iterations=5):\n # clear out the current page rank tables\n self.con.execute('drop table if exists pagerank')\n self.con.execute('create table pagerank(urlid primary key,score)')\n\n # initialize every url with a page rank of 1\n for (urlid,) in self.con.execute('select rowid from urllist'):\n self.con.execute('insert into pagerank(urlid,score) values (%d,1.0)' % urlid)\n self.dbcommit()\n\n for i in range(iterations):\n # Need multiple iterations, as the page ranks of pages linked to this\n # one will be consistently updated on each iteration\n print(\"Iteration %d\" % i)\n for (urlid,) in self.con.execute('select rowid from urllist'):\n # Default page rank\n page_rank = 0.15\n\n # Loop through all the pages that link to this one\n for (linker,) in self.con.execute('select distinct fromid from link where toid=%d'\n % urlid):\n # Get the page rank of the linker\n linkingpr = self.con.execute('select score from pagerank where urlid=%d'\n % linker).fetchone()[0]\n\n # Get the total number of links from the linker\n linkingcount = self.con.execute('select count(*) from link where fromid=%d'\n % linker).fetchone()[0]\n # add to page rank, accounting for the link count\n page_rank += 0.85 * (linkingpr / linkingcount)\n self.con.execute('update pagerank set score=%f where urlid=%d'\n % (page_rank, urlid))\n self.dbcommit()", "def recip_rank(recs, truth):\n good = recs['item'].isin(truth.index)\n npz, = np.nonzero(good)\n if len(npz):\n return 1.0 / (npz[0] + 1.0)\n else:\n return 0.0", "def unrank(self, rank, n):\n n, rank = as_int(n), as_int(rank)\n L = defaultdict(int)\n for i in range(n - 3, -1, -1):\n L[i] = rank % n\n rank = (rank - L[i])//n\n return Prufer([L[i] for i in range(len(L))])", "def generate_ranks(maximum: int, steps: int) -> List[int]:\n ranks = []\n\n for i in range(steps):\n ranks += [maximum]\n maximum = int(maximum * 0.75)\n\n RANK_CUTOFFS = list(reversed(ranks))\n return RANK_CUTOFFS", "def _update_ranks(sample_count):\n raise NotImplementedError", "def get_all_rankings(session: CondorSession) -> List[sc.Ranking]:\n return [sc.Ranking(matrix) for matrix in RankingMatrix.list(session)]", "def ranking(orig_data):\n data = np.copy(orig_data)\n values = np.sort(data)\n rank = np.zeros(data.shape)\n r = 0\n for i in range(values.shape[0]):\n for j in range(data.shape[0]):\n if data[j] == values[i]:\n rank[j] = r\n data[j] = 9223372036854775807 # MaxInt\n break\n if i < values.shape[0]-1 and values[i] < values[i+1]:\n r = i + 1\n return rank", "def get_ranks(w_vector):\n tmp = np.flip(w_vector.argsort())\n ranks = np.empty_like(tmp)\n ranks[tmp] = np.arange(len(w_vector))\n return ranks", "def rank(self):\n rank = 0\n rho = self.array_form[:]\n n = self.size - 1\n size = n + 1\n psize = int(ifac(n))\n for j in xrange(size - 1):\n rank += rho[j]*psize\n for i in xrange(j + 1, size):\n if rho[i] > rho[j]:\n rho[i] -= 1\n psize //= n\n n -= 1\n return rank", "def rankNeighbors(Data):\r\n strokeDist = []\r\n for i in range(len(Data)):\r\n strokeDist.append([])\r\n index = 0\r\n for point1 in Data:\r\n dist = []\r\n index1=0\r\n for point2 in Data:\r\n #dist.append(math.sqrt((center1[0]-center2[0])**2+(center1[1]-center2[1])**2))\r\n dist.append((index1,math.sqrt((point1[0]-point2[0])**2+(point1[1]-point2[1])**2+(point1[2]-point2[2])**2)))\r\n index1+=1\r\n #x = copy.deepcopy(dist)\r\n #print(x)\r\n dist.sort(key= lambda x:x[1])\r\n #print(x)\r\n # Get rank for each element\r\n idx1 =0\r\n for e in dist:\r\n #i = x.index(e)\r\n strokeDist[index].append(e)\r\n idx1 +=1\r\n index+=1\r\n return strokeDist" ]
[ "0.70010245", "0.61141443", "0.60002804", "0.58735156", "0.585651", "0.57537925", "0.5745264", "0.5699377", "0.5666956", "0.56610733", "0.55326897", "0.5523778", "0.54365784", "0.5432164", "0.5396764", "0.53788555", "0.5371973", "0.5337888", "0.531217", "0.5311249", "0.53088933", "0.5292738", "0.52736855", "0.52709776", "0.5260574", "0.5232052", "0.5210456", "0.52046144", "0.51929945", "0.51919085" ]
0.7134149
0
Encode a 'Signing' message into bytes.
def encode(msg: Message) -> bytes: msg = cast(SigningMessage, msg) signing_msg = signing_pb2.SigningMessage() signing_msg.message_id = msg.message_id dialogue_reference = msg.dialogue_reference signing_msg.dialogue_starter_reference = dialogue_reference[0] signing_msg.dialogue_responder_reference = dialogue_reference[1] signing_msg.target = msg.target performative_id = msg.performative if performative_id == SigningMessage.Performative.SIGN_TRANSACTION: performative = signing_pb2.SigningMessage.Sign_Transaction_Performative() # type: ignore skill_callback_ids = msg.skill_callback_ids performative.skill_callback_ids.extend(skill_callback_ids) skill_callback_info = msg.skill_callback_info performative.skill_callback_info.update(skill_callback_info) terms = msg.terms Terms.encode(performative.terms, terms) raw_transaction = msg.raw_transaction RawTransaction.encode(performative.raw_transaction, raw_transaction) signing_msg.sign_transaction.CopyFrom(performative) elif performative_id == SigningMessage.Performative.SIGN_MESSAGE: performative = signing_pb2.SigningMessage.Sign_Message_Performative() # type: ignore skill_callback_ids = msg.skill_callback_ids performative.skill_callback_ids.extend(skill_callback_ids) skill_callback_info = msg.skill_callback_info performative.skill_callback_info.update(skill_callback_info) terms = msg.terms Terms.encode(performative.terms, terms) raw_message = msg.raw_message RawMessage.encode(performative.raw_message, raw_message) signing_msg.sign_message.CopyFrom(performative) elif performative_id == SigningMessage.Performative.SIGNED_TRANSACTION: performative = signing_pb2.SigningMessage.Signed_Transaction_Performative() # type: ignore skill_callback_ids = msg.skill_callback_ids performative.skill_callback_ids.extend(skill_callback_ids) skill_callback_info = msg.skill_callback_info performative.skill_callback_info.update(skill_callback_info) signed_transaction = msg.signed_transaction SignedTransaction.encode( performative.signed_transaction, signed_transaction ) signing_msg.signed_transaction.CopyFrom(performative) elif performative_id == SigningMessage.Performative.SIGNED_MESSAGE: performative = signing_pb2.SigningMessage.Signed_Message_Performative() # type: ignore skill_callback_ids = msg.skill_callback_ids performative.skill_callback_ids.extend(skill_callback_ids) skill_callback_info = msg.skill_callback_info performative.skill_callback_info.update(skill_callback_info) signed_message = msg.signed_message SignedMessage.encode(performative.signed_message, signed_message) signing_msg.signed_message.CopyFrom(performative) elif performative_id == SigningMessage.Performative.ERROR: performative = signing_pb2.SigningMessage.Error_Performative() # type: ignore skill_callback_ids = msg.skill_callback_ids performative.skill_callback_ids.extend(skill_callback_ids) skill_callback_info = msg.skill_callback_info performative.skill_callback_info.update(skill_callback_info) error_code = msg.error_code ErrorCode.encode(performative.error_code, error_code) signing_msg.error.CopyFrom(performative) else: raise ValueError("Performative not valid: {}".format(performative_id)) signing_bytes = signing_msg.SerializeToString() return signing_bytes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sign_message(message: bytes, sender_private_key: RsaKey) -> bytes:\n return pkcs1_15.new(sender_private_key).sign(SHA256.new(message))", "def sign(self, msg: Dict) -> Dict:\n ser = serialize_msg_for_signing(msg, topLevelKeysToIgnore=[f.SIG.nm,\n f.SIGS.nm])\n bsig = self.naclSigner.signature(ser)\n sig = base58.b58encode(bsig).decode(\"utf-8\")\n return sig", "def sign(self):\n private_key = serialization.load_pem_private_key(\n binascii.unhexlify(self.sender_private_key.encode('utf8')),\n password=None,\n backend=default_backend()\n )\n signature = private_key.sign(\n str(self.to_dict()).encode('utf8'),\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n\n return signature", "def sign_transaction(self):\n private_key=RSA.importKey(binascii.unhexlify(self.sender_private_key))\n signer=PKCS1_v1_5.new(private_key)\n h=SHA.new(str(self.to_dict()).encode('utf8'))\n return binascii.hexlify(signer.sign(h)).decode('ascii')", "def sign(self, data: bytes) -> bytes:\n return self._signing_key.sign(data).signature", "def __sign(self, text):\n signature = HMAC.new(self.sign_key, text.encode('utf-8'), SHA256).digest()\n return base64.standard_b64encode(signature)", "def raw(self) -> bytes:\n return bytes(self._signing_key)", "def serialize(self, data):\n content_type, content_encoding, body = encode(\n data, serializer=self._serializer)\n # What we sign is the serialized body, not the body itself.\n # this way the receiver doesn't have to decode the contents\n # to verify the signature (and thus avoiding potential flaws\n # in the decoding step).\n #body = ensure_bytes(body)\n return self._pack(body, content_encoding=content_encoding, content_type=content_type)", "def sign(self, data):\n\n key_private = RsaPrivateKey.Read(self.sign_private)\n signature = key_private.Sign(data)\n return b64encode(signature)", "def sign(self, message):\n\n # if not already a byte string turn it to making sure\n if not isinstance(message, (bytes, str)):\n return None\n elif isinstance(message, str):\n message = message.encode()\n\n hash_of_message = SHA256.new(message)\n\n signer = DSS.new(self.privkey, mode=\"fips-186-3\")\n\n digital_signature = signer.sign(hash_of_message)\n digital_signature = base64.b85encode(digital_signature).decode()\n\n return digital_signature", "def sign(message: bytes, private_key: str) -> str:\n return (\n _get_module_from_key(private_key)\n .sign(\n message,\n private_key,\n )\n .hex()\n .upper()\n )", "def sign(self, msg):\n\n if type(msg) != type(b''):\n raise ValueError(\"msg should be a byte object!\")\n\n return self.gpg.sign(msg)", "def Sign(self, msg):\n return hmac.new(self.key_bytes, msg, sha1).digest()", "def Sign(self, bytes_to_sign, logf=None):\r\n # Implements PKCS1-v1_5 w/SHA256 over the bytes, and returns\r\n # the result as a base64url encoded bignum.\r\n\r\n self._Log(logf, 'bytes_to_sign = [%s]' % bytes_to_sign.encode('hex'))\r\n\r\n self._Log(logf, 'keypair size : %s' % self.keypair.size())\r\n\r\n # Generate the PKCS1-v1_5 compatible message, which includes\r\n # magic ASN.1 bytes and padding:\r\n emsa_msg = self._MakeEmsaMessageSha256(bytes_to_sign, self.keypair.size(), logf)\r\n # TODO(jpanzer): Check whether we need to use max keysize above\r\n # or just keypair.size\r\n\r\n self._Log(logf, 'emsa_msg = [%s]' % emsa_msg.encode('hex'))\r\n\r\n # Compute the signature:\r\n signature_long = self.keypair.sign(emsa_msg, None)[0]\r\n\r\n # Encode the signature as armored text:\r\n signature_bytes = number.long_to_bytes(signature_long)\r\n\r\n self._Log(logf, 'signature_bytes = [%s]' % signature_bytes.encode('hex'))\r\n\r\n return base64.urlsafe_b64encode(signature_bytes).encode('utf-8')", "def sign(self, encoded):\n signature = self._hmac.copy()\n signature.update(encoded)\n return signature.hexdigest().encode('utf-8')", "def sign(self, msg, key):\n\n if not isinstance(key, ec.EllipticCurvePrivateKey):\n raise TypeError(\"The private key must be an instance of \" \"ec.EllipticCurvePrivateKey\")\n\n self._cross_check(key.public_key())\n num_bits = key.curve.key_size\n num_bytes = (num_bits + 7) // 8\n asn1sig = key.sign(msg, ec.ECDSA(self.hash_algorithm()))\n # Cryptography returns ASN.1-encoded signature data; decode as JWS\n # uses raw signatures (r||s)\n (r, s) = decode_dss_signature(asn1sig)\n return int.to_bytes(r, num_bytes, \"big\") + int.to_bytes(s, num_bytes, \"big\")", "def serialize(self, data):\n assert self._key is not None\n assert self._cert is not None\n try:\n data = self._serialize(data)\n signature = b64encode(self._key.sign(data, self._digest))\n signer = self._cert.get_id()\n return self._serialize(dict(data=data,\n signer=signer,\n signature=signature))\n except Exception, exc:\n raise SecurityError(\"Unable to serialize: %r\" % (exc, ))", "def Sign(self, msg):\n return hmac.new(self.key_bytes, msg, sha1).digest()", "def sign(self, account_name: str, msg: bytes) -> bytes:\n\n private_key = self._get_key(account_name)\n return private_key.sign(msg)", "def sign(self, data):\n from base64 import urlsafe_b64encode\n\n if self.sign_private == \"\":\n raise ValueError(\"Error signing: No private signing key found for {}\".format(self))\n\n key_private = RsaPrivateKey.Read(self.sign_private)\n signature = key_private.Sign(data)\n return urlsafe_b64encode(signature)", "def sign_message(self, message):\n if self.private_key:\n if isinstance(message, str):\n utf8 = message.encode('utf-8')\n else:\n raise TypeError(\"message must be a string.\")\n signature = self.private_key.sign(utf8).to_base64()\n return signature\n else:\n return None", "def sign(key, data):\n h = hmac.new(key, data.encode('utf8'), sha1)\n return base64.b64encode(h.digest())", "def sign(self, message):\n return Signature(self._sk.sign(message))", "def base64sign(plaintext, private_key):\n shahash = SHA256.new(plaintext.encode('utf8'))\n signer = PKCS1_v1_5.new(private_key)\n signature_bytes = signer.sign(shahash)\n return base64.b64encode(signature_bytes)", "def Sign(self, msg):\n emsa_encoded = util.MakeEmsaMessage(msg, self.size)\n return util.BigIntToBytes(self.key.sign(emsa_encoded, None)[0])", "def gpgSignMessage(message):\n sig = gpg.sign(message, default_key=primary, passphrase=passphrase)\n if sig and sig.data:\n return sig.data", "def signed(self, encoded):\n signature = self.sign(encoded)\n return encoded + signature", "def wallet_sign(wallet_privkey, message):\n if not isinstance(message, (bytes, str)):\n return None\n elif isinstance(message, str):\n message = message.encode()\n\n hash_of_message = SHA256.new(message)\n\n signer = DSS.new(wallet_privkey, mode=\"fips-186-3\")\n\n digital_signature = signer.sign(hash_of_message)\n digital_signature = base64.b85encode(digital_signature).decode()\n\n return digital_signature", "def sign(private_key: RsaKey, content: dict) -> None:\n\n signer = PKCS1_v1_5.new(private_key)\n encoded_content = json.dumps(content, sort_keys=True).encode()\n h = SHA256.new(encoded_content)\n signature = signer.sign(h)\n\n return binascii.hexlify(signature).decode('ascii')", "def to_binary(self):\n c = containerize(exclude_fields(self))\n self.payload = MsgEcdsaSignature._parser.build(c)\n return self.pack()" ]
[ "0.71510535", "0.68013865", "0.67559075", "0.67180926", "0.6701305", "0.66629297", "0.662252", "0.66030496", "0.6597975", "0.6575549", "0.65619016", "0.6554209", "0.6548688", "0.650956", "0.6498251", "0.64480174", "0.63779855", "0.63758785", "0.63627595", "0.6349911", "0.6349461", "0.6317112", "0.6305967", "0.6293458", "0.62670565", "0.62234277", "0.6207132", "0.61530966", "0.6146832", "0.6130831" ]
0.73898894
0
Decode bytes into a 'Signing' message.
def decode(obj: bytes) -> Message: signing_pb = signing_pb2.SigningMessage() signing_pb.ParseFromString(obj) message_id = signing_pb.message_id dialogue_reference = ( signing_pb.dialogue_starter_reference, signing_pb.dialogue_responder_reference, ) target = signing_pb.target performative = signing_pb.WhichOneof("performative") performative_id = SigningMessage.Performative(str(performative)) performative_content = dict() # type: Dict[str, Any] if performative_id == SigningMessage.Performative.SIGN_TRANSACTION: skill_callback_ids = signing_pb.sign_transaction.skill_callback_ids skill_callback_ids_tuple = tuple(skill_callback_ids) performative_content["skill_callback_ids"] = skill_callback_ids_tuple skill_callback_info = signing_pb.sign_transaction.skill_callback_info skill_callback_info_dict = dict(skill_callback_info) performative_content["skill_callback_info"] = skill_callback_info_dict pb2_terms = signing_pb.sign_transaction.terms terms = Terms.decode(pb2_terms) performative_content["terms"] = terms pb2_raw_transaction = signing_pb.sign_transaction.raw_transaction raw_transaction = RawTransaction.decode(pb2_raw_transaction) performative_content["raw_transaction"] = raw_transaction elif performative_id == SigningMessage.Performative.SIGN_MESSAGE: skill_callback_ids = signing_pb.sign_message.skill_callback_ids skill_callback_ids_tuple = tuple(skill_callback_ids) performative_content["skill_callback_ids"] = skill_callback_ids_tuple skill_callback_info = signing_pb.sign_message.skill_callback_info skill_callback_info_dict = dict(skill_callback_info) performative_content["skill_callback_info"] = skill_callback_info_dict pb2_terms = signing_pb.sign_message.terms terms = Terms.decode(pb2_terms) performative_content["terms"] = terms pb2_raw_message = signing_pb.sign_message.raw_message raw_message = RawMessage.decode(pb2_raw_message) performative_content["raw_message"] = raw_message elif performative_id == SigningMessage.Performative.SIGNED_TRANSACTION: skill_callback_ids = signing_pb.signed_transaction.skill_callback_ids skill_callback_ids_tuple = tuple(skill_callback_ids) performative_content["skill_callback_ids"] = skill_callback_ids_tuple skill_callback_info = signing_pb.signed_transaction.skill_callback_info skill_callback_info_dict = dict(skill_callback_info) performative_content["skill_callback_info"] = skill_callback_info_dict pb2_signed_transaction = signing_pb.signed_transaction.signed_transaction signed_transaction = SignedTransaction.decode(pb2_signed_transaction) performative_content["signed_transaction"] = signed_transaction elif performative_id == SigningMessage.Performative.SIGNED_MESSAGE: skill_callback_ids = signing_pb.signed_message.skill_callback_ids skill_callback_ids_tuple = tuple(skill_callback_ids) performative_content["skill_callback_ids"] = skill_callback_ids_tuple skill_callback_info = signing_pb.signed_message.skill_callback_info skill_callback_info_dict = dict(skill_callback_info) performative_content["skill_callback_info"] = skill_callback_info_dict pb2_signed_message = signing_pb.signed_message.signed_message signed_message = SignedMessage.decode(pb2_signed_message) performative_content["signed_message"] = signed_message elif performative_id == SigningMessage.Performative.ERROR: skill_callback_ids = signing_pb.error.skill_callback_ids skill_callback_ids_tuple = tuple(skill_callback_ids) performative_content["skill_callback_ids"] = skill_callback_ids_tuple skill_callback_info = signing_pb.error.skill_callback_info skill_callback_info_dict = dict(skill_callback_info) performative_content["skill_callback_info"] = skill_callback_info_dict pb2_error_code = signing_pb.error.error_code error_code = ErrorCode.decode(pb2_error_code) performative_content["error_code"] = error_code else: raise ValueError("Performative not valid: {}.".format(performative_id)) return SigningMessage( message_id=message_id, dialogue_reference=dialogue_reference, target=target, performative=performative, **performative_content )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode(self, crypto):", "def decode_message(self, raw):\n return raw.decode('utf-8')", "def sign(self, bytes):\r\n if not self.hasPrivateKey():\r\n raise AssertionError()\r\n paddedBytes = self._addPKCS1Padding(bytes, 1)\r\n m = bytesToNumber(paddedBytes)\r\n if m >= self.n:\r\n raise ValueError()\r\n c = self._rawPrivateKeyOp(m)\r\n sigBytes = numberToByteArray(c, numBytes(self.n))\r\n return sigBytes", "def parse_signature(data: bytes):\n return base58_encode(data, b'sig').decode()", "def encode(msg: Message) -> bytes:\n msg = cast(SigningMessage, msg)\n signing_msg = signing_pb2.SigningMessage()\n signing_msg.message_id = msg.message_id\n dialogue_reference = msg.dialogue_reference\n signing_msg.dialogue_starter_reference = dialogue_reference[0]\n signing_msg.dialogue_responder_reference = dialogue_reference[1]\n signing_msg.target = msg.target\n\n performative_id = msg.performative\n if performative_id == SigningMessage.Performative.SIGN_TRANSACTION:\n performative = signing_pb2.SigningMessage.Sign_Transaction_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n terms = msg.terms\n Terms.encode(performative.terms, terms)\n raw_transaction = msg.raw_transaction\n RawTransaction.encode(performative.raw_transaction, raw_transaction)\n signing_msg.sign_transaction.CopyFrom(performative)\n elif performative_id == SigningMessage.Performative.SIGN_MESSAGE:\n performative = signing_pb2.SigningMessage.Sign_Message_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n terms = msg.terms\n Terms.encode(performative.terms, terms)\n raw_message = msg.raw_message\n RawMessage.encode(performative.raw_message, raw_message)\n signing_msg.sign_message.CopyFrom(performative)\n elif performative_id == SigningMessage.Performative.SIGNED_TRANSACTION:\n performative = signing_pb2.SigningMessage.Signed_Transaction_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n signed_transaction = msg.signed_transaction\n SignedTransaction.encode(\n performative.signed_transaction, signed_transaction\n )\n signing_msg.signed_transaction.CopyFrom(performative)\n elif performative_id == SigningMessage.Performative.SIGNED_MESSAGE:\n performative = signing_pb2.SigningMessage.Signed_Message_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n signed_message = msg.signed_message\n SignedMessage.encode(performative.signed_message, signed_message)\n signing_msg.signed_message.CopyFrom(performative)\n elif performative_id == SigningMessage.Performative.ERROR:\n performative = signing_pb2.SigningMessage.Error_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n error_code = msg.error_code\n ErrorCode.encode(performative.error_code, error_code)\n signing_msg.error.CopyFrom(performative)\n else:\n raise ValueError(\"Performative not valid: {}\".format(performative_id))\n\n signing_bytes = signing_msg.SerializeToString()\n return signing_bytes", "def parse(sigBytes, der):\n # minimal message is when both numbers are 1 bytes. adding up to:\n # 0x30 + len + 0x02 + 0x01 + <byte> + 0x2 + 0x01 + <byte>\n if len(sigBytes) < 8:\n raise DecredError(\"malformed signature: too short\")\n\n # 0x30\n index = 0\n if sigBytes[index] != 0x30:\n raise DecredError(\"malformed signature: no header magic\")\n index += 1\n # length of remaining message\n siglen = sigBytes[index]\n index += 1\n if siglen + 2 > len(sigBytes):\n raise DecredError(\"malformed signature: bad length\")\n # trim the slice we're working on so we only look at what matters.\n sigBytes = sigBytes[: siglen + 2]\n\n # 0x02\n if sigBytes[index] != 0x02:\n raise DecredError(\"malformed signature: no 1st int marker\")\n index += 1\n\n # Length of signature r.\n rLen = sigBytes[index]\n # must be positive, must be able to fit in another 0x2, <len> <s>\n # hence the -3. We assume that the length must be at least one byte.\n index += 1\n if rLen <= 0 or rLen > len(sigBytes) - index - 3:\n raise DecredError(\"malformed signature: bogus r length\")\n\n # Then r itself.\n rBytes = sigBytes[index : index + rLen]\n if der:\n try:\n canonicalPadding(rBytes)\n except Exception as e:\n raise DecredError(\n \"malformed signature: bogus r padding or sign: {}\".format(e)\n )\n\n index += rLen\n # 0x02. length already checked in previous if.\n if sigBytes[index] != 0x02:\n raise DecredError(\"malformed signature: no 2nd int marker\")\n index += 1\n\n # Length of signature s.\n sLen = sigBytes[index]\n index += 1\n # s should be the rest of the bytes.\n if sLen <= 0 or sLen > len(sigBytes) - index:\n raise DecredError(\"malformed signature: bogus S length\")\n\n # Then s itself.\n sBytes = sigBytes[index : index + sLen]\n if der:\n try:\n canonicalPadding(rBytes)\n except Exception as e:\n raise DecredError(\n \"malformed signature: bogus s padding or sign: {}\".format(e)\n )\n\n index += sLen\n # sanity check length parsing\n if index != len(sigBytes):\n raise DecredError(\n f\"malformed signature: bad final length {index} != {len(sigBytes)}\"\n )\n\n signature = Signature(rBytes, sBytes)\n\n # FWIW the ecdsa spec states that r and s must be | 1, N - 1 |\n if signature.r.int() < 1:\n raise DecredError(\"signature r is less than one\")\n if signature.s.int() < 1:\n raise DecredError(\"signature s is less than one\")\n if signature.r.int() >= Curve.N:\n raise DecredError(\"signature r is >= curve.N\")\n if signature.s.int() >= Curve.N:\n raise DecredError(\"signature s is >= curve.N\")\n\n return signature", "def Verify(self, signed_bytes, signature_b64):\r\n # Generate the PKCS1-v1_5 compatible message, which includes\r\n # magic ASN.1 bytes and padding:\r\n emsa_msg = self._MakeEmsaMessageSha256(signed_bytes,\r\n self.keypair.size())\r\n\r\n # Get putative signature:\r\n putative_signature = base64.urlsafe_b64decode(signature_b64.encode('utf-8'))\r\n putative_signature = number.bytes_to_long(putative_signature)\r\n\r\n # Verify signature given public key:\r\n return self.keypair.verify(emsa_msg, (putative_signature,))", "def sign_message(message: bytes, sender_private_key: RsaKey) -> bytes:\n return pkcs1_15.new(sender_private_key).sign(SHA256.new(message))", "def _decode(self, data):\n if not data:\n return None\n try:\n return self.signer.unsign_object(data, serializer=MessageSerializer)\n except (signing.BadSignature, binascii.Error, json.JSONDecodeError):\n pass\n # Mark the data as used (so it gets removed) since something was wrong\n # with the data.\n self.used = True\n return None", "def decode_sig(sig):\n table = maketrans(\"-._\", \"+/=\")\n sig = str(sig).translate(table)\n try:\n return b64decode(sig)\n except TypeError:\n raise MalformedResponseError(\"Signature is not a valid base-64 \"\n \"encoded string\")", "def sign_message(self, message):\n if self.private_key:\n if isinstance(message, str):\n utf8 = message.encode('utf-8')\n else:\n raise TypeError(\"message must be a string.\")\n signature = self.private_key.sign(utf8).to_base64()\n return signature\n else:\n return None", "def from_bytes(cls, bytes_in, output_path):\n\n pki_type = LEUnsigned.unpack(bytes_in[0])\n pki_id = LEUnsigned.unpack(bytes_in[1])\n content_len = LEUnsigned.unpack(bytes_in[2:4])\n raw_data = bytes_in[4:4+content_len]\n cls.output_file(pki_type, pki_id, raw_data, output_path)\n return bytes_in[4 + content_len + ((4 - (content_len % 4)) % 4):]", "def decode(self, data: bytes) -> bytes:\n ...", "def decode(raw_bytes, *, serialization=None, subtypes=tuple()):\n raise NotImplementedError", "def unsigned(self, encoded):\n message, _ = self.split(encoded)\n return message", "def sign(priv_key: rsa.RSAPrivateKey, msg: bytes) -> Signature:\n return priv_key.sign(msg, PADDING, HASH)", "def deserialize_from_signature(cls, payload):\n return payload", "def split(self, encoded):\n maxlen = len(encoded) - self.sig_size\n message = encoded[:maxlen]\n signature = encoded[-self.sig_size:]\n return message, signature", "def Verify(self, msg, sig_bytes):\n return self.VerifySignedData(self.Sign(msg), sig_bytes)", "def from_bytes(cls, bytes):\n construct = _constructs.Handshake.parse(bytes)\n return cls(\n msg_type=enums.HandshakeType(construct.msg_type),\n length=construct.length,\n body=cls._get_handshake_message(\n enums.HandshakeType(construct.msg_type), construct.body\n ),\n )", "def parse_Bytes(serialized_bytes, msg_class):\n if msg_class is None or msg_class is bytes:\n return serialized_bytes\n msg = msg_class()\n msg.ParseFromString(serialized_bytes)\n return msg", "def deserialize(self, data):\n assert self._cert_store is not None\n try:\n data = self._deserialize(data)\n signature = b64decode(data[\"signature\"])\n signer = data[\"signer\"]\n data = data[\"data\"]\n self._cert_store[signer].verify(data, signature, self._digest)\n return self._deserialize(data)\n except Exception, exc:\n raise SecurityError(\"Unable to deserialize: %r\" % (exc, ))", "def sign_and_verify(self, msg):\n ciphertext, tag = self.signer.encrypt_and_digest(msg.encode('utf-8'))\n plaintext = self.verifier.decrypt(ciphertext)\n try:\n self.verifier.verify(tag)\n print(\"The message is authentic: \", plaintext)\n except ValueError:\n print(\"Key incorrect or message corrupted\")", "def parse_and_decode(cls, data: bytes) -> \"Message\":\n if len(data) < cls.calc_size() + 1:\n raise NotEnoughData()\n if data[0] != cls.type:\n raise InvalidType()\n\n return cls(*unpack('<' + cls.fmt, data[1:cls.calc_size() + 1]))", "def sign(self, msg):\n\n if type(msg) != type(b''):\n raise ValueError(\"msg should be a byte object!\")\n\n return self.gpg.sign(msg)", "def sign(self, message):\n\n # if not already a byte string turn it to making sure\n if not isinstance(message, (bytes, str)):\n return None\n elif isinstance(message, str):\n message = message.encode()\n\n hash_of_message = SHA256.new(message)\n\n signer = DSS.new(self.privkey, mode=\"fips-186-3\")\n\n digital_signature = signer.sign(hash_of_message)\n digital_signature = base64.b85encode(digital_signature).decode()\n\n return digital_signature", "def dePem(s, name):\r\n prefix = \"-----BEGIN %s-----\" % name\r\n postfix = \"-----END %s-----\" % name \r\n start = s.find(prefix)\r\n if start == -1:\r\n raise SyntaxError(\"Missing PEM prefix\")\r\n end = s.find(postfix, start+len(prefix))\r\n if end == -1:\r\n raise SyntaxError(\"Missing PEM postfix\")\r\n s = s[start+len(\"-----BEGIN %s-----\" % name) : end]\r\n retBytes = a2b_base64(s) # May raise SyntaxError\r\n return retBytes", "def parseBinary(self, bytes):\r\n\r\n self.bytes = bytearray(bytes)\r\n p = ASN1Parser(bytes)\r\n\r\n #Get the tbsCertificate\r\n tbsCertificateP = p.getChild(0)\r\n\r\n #Is the optional version field present?\r\n #This determines which index the key is at.\r\n if tbsCertificateP.value[0]==0xA0:\r\n subjectPublicKeyInfoIndex = 6\r\n else:\r\n subjectPublicKeyInfoIndex = 5\r\n\r\n #Get the subject\r\n self.subject = tbsCertificateP.getChildBytes(\\\r\n subjectPublicKeyInfoIndex - 1)\r\n\r\n #Get the subjectPublicKeyInfo\r\n subjectPublicKeyInfoP = tbsCertificateP.getChild(\\\r\n subjectPublicKeyInfoIndex)\r\n\r\n #Get the algorithm\r\n algorithmP = subjectPublicKeyInfoP.getChild(0)\r\n rsaOID = algorithmP.value\r\n if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]:\r\n raise SyntaxError(\"Unrecognized AlgorithmIdentifier\")\r\n\r\n #Get the subjectPublicKey\r\n subjectPublicKeyP = subjectPublicKeyInfoP.getChild(1)\r\n\r\n #Adjust for BIT STRING encapsulation\r\n if (subjectPublicKeyP.value[0] !=0):\r\n raise SyntaxError()\r\n subjectPublicKeyP = ASN1Parser(subjectPublicKeyP.value[1:])\r\n\r\n #Get the modulus and exponent\r\n modulusP = subjectPublicKeyP.getChild(0)\r\n publicExponentP = subjectPublicKeyP.getChild(1)\r\n\r\n #Decode them into numbers\r\n n = bytesToNumber(modulusP.value)\r\n e = bytesToNumber(publicExponentP.value)\r\n\r\n #Create a public key instance\r\n self.publicKey = _createPublicRSAKey(n, e)", "def decode(self, bytes_, errors='strict'):\n decoder = self.IncrementalDecoder(errors=errors)\n return (\n decoder.decode(bytes_, final=True),\n len(bytes_),\n )", "def decode_packet(self, bytes):\n b64 = False\n if not isinstance(bytes, six.binary_type):\n bytes = bytes.encode('utf-8')\n\n packet_type = six.byte2int(bytes[0:1])\n if packet_type == ord('b'):\n binary = True\n bytes = bytes[1:]\n packet_type = int(chr(six.byte2int(bytes[0:1])))\n b64 = True\n elif packet_type >= ord('0'):\n packet_type = int(chr(packet_type))\n binary = False\n else:\n binary = True\n\n packet_data = None\n if len(bytes) > 1:\n if binary:\n if b64:\n packet_data = base64.b64decode(bytes[1:])\n else:\n packet_data = bytes[1:]\n else:\n packet_data = bytes[1:].decode('utf-8')\n\n return Packet(packet_type, packet_data, binary)" ]
[ "0.61183023", "0.59744656", "0.59650993", "0.595037", "0.5905722", "0.57893497", "0.5763979", "0.57533914", "0.5739624", "0.5720419", "0.5714346", "0.57035404", "0.56965363", "0.5655842", "0.5627187", "0.5618108", "0.56013894", "0.5586349", "0.5549213", "0.55436", "0.55422884", "0.55149424", "0.5511826", "0.5509764", "0.54942584", "0.5474168", "0.54666126", "0.54516256", "0.54491043", "0.5448054" ]
0.63024235
0
Tests that the authentication backend returns none if the token does not exists
def test_returns_none_if_no_such_token(self): result = PasswordlessAuthenticationBackend().authenticate('no-such-token') self.assertIsNone(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_token(self):\n api_response = requests.get(self.api_config.get_api_url() + \"greetings/isloggedin\",\n headers={\"Authorization\": \"Bearer \" + self.API_TOKEN})\n\n if api_response.status_code == 401 or 403:\n return False\n else:\n return True", "def test_no_token_auth_required(self, client):\n assert_hook_status(client, status=401)", "def test_returns_none_if_no_user(self):\r\n self.assertIsNone(PasswordlessAuthenticationBackend().get_user('[email protected]'))", "def test_api_call_without_token(self):\n res = self.client().get('/actors')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertFalse(data[\"success\"])\n self.assertEqual(data[\"message\"], \"Authentication error.\")", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_no_token_get_all(self):\n response = self.app.get('/api/v3/users')\n self.assertEqual(response.status_code, 401)", "def test_get_non_existing_token_authenticated_user(self):\r\n\r\n user_no_tokens = UserFactory.create_batch(2)[1]\r\n\r\n res = self.app.get('/api/token/twitter?api_key=' + user_no_tokens.api_key)\r\n error = json.loads(res.data)\r\n\r\n assert res.status_code == 404, error\r\n assert error['status'] == 'failed', error\r\n assert error['action'] == 'GET', error\r\n assert error['target'] == 'token', error\r\n assert error['exception_cls'] == 'NotFound', error", "def _validate_token(self):\n if not self.token:\n self.login()\n if not self.token:\n # TODO: create exception for this\n # Access is denied!!\n raise Exception(\"AccessDenied\")", "def test_no_auth(self) -> None:\n channel = self.make_request(\"GET\", self.url, {})\n\n self.assertEqual(401, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.MISSING_TOKEN, channel.json_body[\"errcode\"])", "def test_no_auth(self) -> None:\n channel = self.make_request(\"GET\", self.url, {})\n\n self.assertEqual(401, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.MISSING_TOKEN, channel.json_body[\"errcode\"])", "def check_auth():", "def _check_auth(self):\n if self.authToken:\n return True\n else:\n msg = \"you need to login\"\n self.raise_error(msg)", "def noauth(self):\n try:\n # some endpoints dont return json\n return self.json['response'].get('error_id') == 'NOAUTH'\n except:\n return False", "def test_create_token_missing_field(self):\n res = self.client.post(TOKEN_URL, {'email':'', 'password':\"\"})\n self.assertNotIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_token(authToken):\n url = endpoint('test')\n r = requests.get(url, headers={'authorizationToken': authToken}) \n if r.status_code == 403:\n print(\"403\")\n return False\n response = json.loads( r.content.decode() )\n return response", "def test_get_without_token(self):\n client = Client()\n response = client.get('/reviews/')\n self.assertEqual(response.status_code, 401)", "def test_create_token_missing_field(self):\n res = self.client.post(TOKEN_URL, {\n **self.mock_user,\n 'password': ''\n })\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertNotIn('token', res.data)", "def is_missing_token_service(request):\n if request.json == {}:\n return True\n schema = schema_utils.get_auth_schema()\n validator = Validator(schema, require_all=True)\n result = validator.validate(request.json)\n if validator.errors:\n logging.error(str(validator.errors))\n return not result", "def test_create_token_no_user(self):\n res = self.client.post(TOKEN_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertNotIn('token', res.data)", "def test_create_token_missing_field(self):\r\n res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})\r\n\r\n self.assertNotIn('token', res.data)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_get_specific_token_anonymous_user(self):\r\n\r\n res = self.app.get('/api/token/twitter')\r\n err = json.loads(res.data)\r\n\r\n assert res.status_code == 401, err\r\n assert err['status'] == 'failed', err\r\n assert err['status_code'] == 401, err\r\n assert err['exception_cls'] == 'Unauthorized', err\r\n assert err['target'] == 'token', err", "def test_create_token_missing_field(self):\n\n invalid_credentials = {'email': '[email protected]', 'password': ''}\n response = self.client.post(URL_TOKEN, invalid_credentials)\n\n # Check that the response is HTTP 400, and does not contain a token.\n self.assertNotIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_token_no_user(self):\n payload = {'email': '[email protected]', 'password': 'password'}\n res = self.client.post(TOKEN_URL, payload)\n\n self.assertNotIn('token', res.data)\n self.assertEquals(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_authentication_is_not_required(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def test_retrive_user_unauthenticated(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_create_token_for_not_user(self):\n\n credentials = {'email': '[email protected]', 'password': 'Testpass12'}\n response = self.client.post(URL_TOKEN, credentials)\n\n # Check that the response is HTTP 400, and does not contain a token.\n self.assertNotIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_access_token_empty(self):\n self.assertEqual(CloudCredentials.objects.count(), 0)\n with HTTMock(spark_cloud_mock):\n token = CloudCredentials.objects._access_token()\n self.assertEqual(token, None)", "def test_returns_existing_user_with_correct_email_if_token_exists(self):\r\n email = '[email protected]'\r\n existing_user = User.objects.create(email=email)\r\n token = Token.objects.create(email=email)\r\n user = PasswordlessAuthenticationBackend().authenticate(token.uid)\r\n self.assertEquals(user, existing_user)", "def check_token(token):\n return conn.hget('login:', token)" ]
[ "0.72637933", "0.7229849", "0.72107416", "0.71965224", "0.7169361", "0.7169361", "0.71229583", "0.7099967", "0.7085434", "0.70067036", "0.70067036", "0.70030606", "0.69727105", "0.6956898", "0.69200426", "0.6906309", "0.69053644", "0.68937737", "0.68885046", "0.6881696", "0.6877969", "0.68692815", "0.6850384", "0.6850342", "0.68442756", "0.68324673", "0.682942", "0.6825729", "0.68169224", "0.6813638" ]
0.8749138
0
Tests that the user with the correct email is returned if the token exists
def test_returns_new_user_with_correct_email_if_token_exists(self): email = '[email protected]' token = Token.objects.create(email=email) user = PasswordlessAuthenticationBackend().authenticate(token.uid) new_user = User.objects.get(email=email) self.assertEquals(user, new_user)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_returns_existing_user_with_correct_email_if_token_exists(self):\r\n email = '[email protected]'\r\n existing_user = User.objects.create(email=email)\r\n token = Token.objects.create(email=email)\r\n user = PasswordlessAuthenticationBackend().authenticate(token.uid)\r\n self.assertEquals(user, existing_user)", "def validate_email(self, token):\n decoded_token = manage_tokens.decode(token)\n if decoded_token is None:\n return {'error': 'Token is invalid'}\n\n self.user_in_db = User.users_db.get(self.email)\n if not self.user_in_db:\n # User does not exist\n return {'error': 'User does not exist'}\n\n if 'secret_token' not in decoded_token or decoded_token['secret_token'] != self.user_in_db['secret_token']:\n return {'error': 'Token is invalid'}\n\n self.user_in_db['secret_token'] = ''\n self.user_in_db['verified'] = True\n\n User.users_db.put(self.user_in_db)\n\n return decoded_token", "def verify_auth_token(token):\n s = Serializer(app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except BadSignature:\n return None # invalid token\n user = User.query.get(data['email'])\n return user", "def test_verifyEmailToken(self, testUser):\n test_token = testUser._get_email_verification_token()\n resulting_user, error = User.verify_email_verification_token(test_token)\n assert resulting_user == testUser\n assert error is None", "def verify_email(uid, token):\n return True", "def test_get_user_by_emailuser_email_get(self):\n pass", "def check_token(self, token):\n decoded_token = manage_tokens.decode(token)\n if decoded_token is None:\n return {'error': 'Token is invalid'}\n\n if 'email' not in decoded_token or 'expires' not in decoded_token \\\n or 'token' not in decoded_token:\n return {'error': 'Token is invalid'}\n\n self.email = decoded_token['email']\n self.user_in_db = User.users_db.get(decoded_token['email'])\n\n if not self.user_in_db:\n # User does not exist\n return {'error': 'User does not exist'}\n\n if self.user_in_db['token'] != decoded_token['token']:\n return {'error': 'Token is invalid'}\n\n if decoded_token['expires'] < time.time():\n return {'error': 'Token is expired'}\n\n return decoded_token", "def verify_token(*token): # pragma: no cover\n\n if current_app.config.get('IGNORE_AUTH') is True:\n return True\n\n g.user = APITokenModel.verify_token(token[0])\n\n if g.user is None:\n return False\n\n return g.user", "def request_verification(data):\n if 'email' in data:\n if user_exists(data['email']):\n return get_user_id(data['email'])\n else:\n return 401\n else:\n return 400", "def test_successful_email_verification(self):\n self.signup_a_user(self.user_data)\n time = datetime.now() + timedelta(hours=24)\n token = jwt.encode({\n \"email\": self.user_data['user']['email'],\n \"username\": self.user_data['user']['username'],\n \"exp\": int(time.strftime('%s'))\n }, settings.SECRET_KEY, algorithm='HS256').decode('utf-8')\n verification_url = reverse(\n 'authentication:verify_email', kwargs={'token': token})\n\n response = self.client.get(\n verification_url,\n HTTP_AUTHORIZATION=f'token {token}'\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def verify_auth_token(token):\n serializer = Serializer(SECRET_KEY)\n try:\n data = serializer.loads(token)\n except (SignatureExpired, BadSignature):\n return None\n else:\n user = User.get(User.id == data['id'])\n return user", "def test_already_validated_email(self):\n token = self.authenticate_user(self.auth_user_data).data[\"token\"]\n verification_url = reverse(\n 'authentication:verify_email', kwargs={'token': token})\n\n response = self.client.get(\n verification_url,\n HTTP_AUTHORIZATION=f'token {token}'\n )\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def verify_auth_token(cls, token):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except:\n return None\n user = User.query.get(data['id'])\n if user and user.session_token == token:\n return user\n return None", "def email_exist():\n if request.method == 'POST' and request.get_json():\n data = request.get_json()\n user = ecomap_user.get_user_by_email(data['email'])\n return jsonify(isValid=bool(user))", "def get(self, request):\n token = request.GET.get('token')\n payload = jwt.decode(token, settings.SECRET_KEY, algorithms='HS256')\n print(f'payload {payload}')\n user = User.objects.get(id = payload['user_id'])\n \n if not user.email_verified:\n user.email_verified = True\n user.save()\n return response.Response({'email': \"successful email verification\"}, status = status.HTTP_200_OK)\n return response.Response({'error': \"unsuccessful email verification\"}, status = status.HTTP_400_BAD_REQUEST)", "def email_from_invitation_token(token):\n ser = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = ser.loads(token.encode('utf-8'))\n except (BadSignature, SignatureExpired):\n return False\n user_email = data.get('user_email')\n if user_email is None:\n return False\n if User.query.filter_by(email=user_email).first() is not None:\n return False\n return user_email", "def verify_auth_token(token):\n\n s = Serializer(current_app.config['SECRET_KEY'])\n\n try:\n data = s.loads(token)\n except SignatureExpired:\n print \"EXP\", token\n return None\n except BadSignature:\n print \"BAD\", token\n return None\n\n user = User.query.get(data['id'])\n return user", "def verify_auth_token(token):\n ser = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = ser.loads(token)\n except (BadSignature, SignatureExpired):\n return None\n return User.query.get(data['id'])", "def user_exists(self, email):\n user = UserModels.fetch_user_by_email(email)\n if user:\n return {\n \"status\": 400,\n \"error\": \"That email already exists\"\n }", "def verify_user(uid, token_value):\n db = api.db.get_conn()\n\n token_user = api.token.find_key_by_token(\"email_verification\", token_value)\n if token_user is None:\n return False\n current_user = api.user.get_user(uid=uid)\n\n if token_user[\"uid\"] == current_user[\"uid\"]:\n db.users.find_one_and_update(\n {\"uid\": current_user[\"uid\"]}, {\"$set\": {\"verified\": True}}\n )\n api.token.delete_token({\"uid\": current_user[\"uid\"]}, \"email_verification\")\n return True\n else:\n return False", "def get(self, request, token):\n user, token = self._authenticate_credentials(request, token)\n\n if not user.is_valid:\n user.is_valid = True\n user.save()\n return Response({\"message\": \"youve been verified\",\n \"status\": 200}, status=status.HTTP_200_OK)\n else:\n return Response({'msg': 'account has already been verified'},\n status=status.HTTP_400_BAD_REQUEST)", "def validate_email(self, data):\n user = account_models.User.objects.filter(username__iexact=data, is_active=True)\n if user:\n return data\n raise serializers.ValidationError(\"Email address not verified for any user account\")", "def validate_user_existence():\n from sfa_api.utils.storage import get_storage\n storage = get_storage()\n if not storage.user_exists():\n try:\n info = request_user_info()\n except (requests.exceptions.HTTPError, JSONDecodeError):\n return False\n else:\n if not info.get('email_verified', False):\n # User has a valid token, but their email\n # is yet to be verified\n return False\n storage.create_new_user()\n return True", "def test_get_token_with_invalid_email(self):\n payload = {'email': '[email protected]', 'password': 'password231'}\n create_user(**payload)\n\n invalid_credentials = {'email': '[email protected]', 'password': 'password231'}\n token = self.client.post(TOKEN_URL, invalid_credentials)\n\n self.assertEqual(token.status_code, status.HTTP_400_BAD_REQUEST)", "def test_gets_user_by_email(self):\r\n User.objects.create(email='[email protected]')\r\n desired_user = User.objects.create(email='[email protected]')\r\n found_user = PasswordlessAuthenticationBackend().get_user('[email protected]')\r\n self.assertEqual(desired_user, found_user)", "def test_user_existing_email(self):\n data = json.dumps({\n \"username\" : \"john\", \"email\" : \"[email protected]\",\n \"password\" : \"secret12345\", \"confirm_password\" : \"secret12345\"})\n res = self.app.post( # pylint: disable=W0612\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def test_manage_user_with_email(self):\r\n # First with a new user\r\n user_data = dict(id=1, username='facebook',\r\n email='[email protected]', name='name')\r\n token = 't'\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['username'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.facebook_user_id == user_data['id'], user\r\n\r\n # Second with the same user\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['username'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.facebook_user_id == user_data['id'], user\r\n\r\n # Finally with a user that already is in the system\r\n user_data = dict(id=10, username=self.name,\r\n email=self.email_addr, name=self.fullname)\r\n token = 'tA'\r\n user = manage_user(token, user_data, None)\r\n err_msg = \"It should return the same user\"\r\n assert user.facebook_user_id == 10, err_msg", "def test_token_verification(self):\n db.session.add(self.user, self.user2)\n db.session.commit()\n user_token = self.user.generate_auth_token(1)\n self.assertEqual(self.user.verify_auth_token(user_token), self.user)\n self.assertIsNone(self.\n user.verify_auth_token('jdjdje230920093944334j'))", "def get_user_from_token(token):\n try:\n jwt_decode = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])\n except:\n return None\n email = jwt_decode['user']\n try:\n user = User.get(User.email == email)\n except:\n return None\n return user", "def check(self):\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n\n user_session = self.get()\n user = self.get_user()\n\n return user is not None and us.verify_auth_token(user_session.token, config.SESSION_EXPIRES)" ]
[ "0.8013532", "0.74645233", "0.7451442", "0.74479705", "0.73216754", "0.7300814", "0.72548413", "0.72535205", "0.72382295", "0.704606", "0.699168", "0.693598", "0.6931547", "0.6927095", "0.6908113", "0.68925977", "0.6881654", "0.6880594", "0.68702275", "0.6869365", "0.68570554", "0.6856913", "0.68497396", "0.6849124", "0.684579", "0.68301314", "0.6828805", "0.68159205", "0.68093693", "0.6795774" ]
0.79686344
1
tests that the correct user for the token is returned if the user already exists
def test_returns_existing_user_with_correct_email_if_token_exists(self): email = '[email protected]' existing_user = User.objects.create(email=email) token = Token.objects.create(email=email) user = PasswordlessAuthenticationBackend().authenticate(token.uid) self.assertEquals(user, existing_user)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_returns_new_user_with_correct_email_if_token_exists(self):\r\n email = '[email protected]'\r\n token = Token.objects.create(email=email)\r\n user = PasswordlessAuthenticationBackend().authenticate(token.uid)\r\n new_user = User.objects.get(email=email)\r\n self.assertEquals(user, new_user)", "def correct_token(name, token):\n if not User.created(name):\n return False\n user = User.get_user(name)\n return user.info['token'] == token", "def test_token_only_for_1_user(self):\n db.session.add(self.user, self.user2)\n db.session.commit()\n user_token = self.user.generate_auth_token(1)\n self.assertNotEqual(self.user.verify_auth_token(user_token),\n self.user2)", "def validate_user_existence():\n from sfa_api.utils.storage import get_storage\n storage = get_storage()\n if not storage.user_exists():\n try:\n info = request_user_info()\n except (requests.exceptions.HTTPError, JSONDecodeError):\n return False\n else:\n if not info.get('email_verified', False):\n # User has a valid token, but their email\n # is yet to be verified\n return False\n storage.create_new_user()\n return True", "def test_get_existing_token_authenticated_user(self):\r\n\r\n user = UserFactory.create_batch(2)[1]\r\n user.info = create_tokens_for(user)\r\n\r\n # If the token exists, it should be retrieved\r\n res = self.app.get('/api/token/twitter?api_key=' + user.api_key)\r\n data = json.loads(res.data)\r\n\r\n assert data.get('twitter_token') is not None, data\r\n assert data.get('twitter_token')['oauth_token'] == 'token-for-%s' % user.name\r\n assert data.get('twitter_token')['oauth_token_secret'] == 'secret-for-%s' % user.name\r\n # And no other tokens should\r\n assert data.get('facebook_token') is None, data", "def test_registration_when_user_already_exists(self):\n # register the user the first time\n self.register_user()\n # register the same user the second time\n result = self.client().post(AuthTestCase.registration, data=self.user)\n response_result = json.loads(result.data.decode())\n self.assertEqual(result.status_code, 409)\n self.assertEqual(response_result['message'], \"user already exists\")", "def test_manage_user(self):\r\n # First with a new user\r\n user_data = dict(id='1', name='google',\r\n email='[email protected]')\r\n token = 't'\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['name'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.google_user_id == user_data['id'], user\r\n\r\n # Second with the same user\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['name'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.google_user_id == user_data['id'], user\r\n\r\n # Finally with a user that already is in the system\r\n user_data = dict(id='10', name=self.name,\r\n email=self.email_addr)\r\n token = 'tA'\r\n user = manage_user(token, user_data, None)\r\n err_msg = \"User should be the same\"\r\n print user.google_user_id\r\n assert user.google_user_id == '10', err_msg", "def test_manage_user(self):\r\n # First with a new user\r\n user_data = dict(user_id=1, screen_name='twitter')\r\n token = dict(oauth_token='token', oauth_token_secret='secret')\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['screen_name'], user\r\n assert user.name == user_data['screen_name'], user\r\n assert user.fullname == user_data['screen_name'], user\r\n assert user.twitter_user_id == user_data['user_id'], user\r\n\r\n # Second with the same user\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['screen_name'], user\r\n assert user.name == user_data['screen_name'], user\r\n assert user.fullname == user_data['screen_name'], user\r\n assert user.twitter_user_id == user_data['user_id'], user\r\n\r\n # Finally with a user that already is in the system\r\n user_data = dict(user_id=10, screen_name=self.name)\r\n token = dict(oauth_token='token2', oauth_token_secret='secret2')\r\n user = manage_user(token, user_data, None)\r\n err_msg = \"It should return the same user\"\r\n assert user.twitter_user_id == 10, err_msg", "def test_get_user_if_exists(self):\n user = User.objects.create(username=self.username)\n actual = get_user_if_exists(None, self.details, user=user)\n self.assertDictEqual(actual, {'is_new': False})", "def test_bad_token(self):\n db.session.add(self.user, self.user2)\n db.session.commit()\n self.assertIsNone(self.\n user.verify_auth_token('jdjdje230920093944334j'))", "def test_existing_user(self):\n user = User.objects.create(username=self.username)\n actual = get_user_if_exists(None, self.details)\n self.assertDictEqual(actual, {'is_new': False, 'user': user})", "def test_token_verification(self):\n db.session.add(self.user, self.user2)\n db.session.commit()\n user_token = self.user.generate_auth_token(1)\n self.assertEqual(self.user.verify_auth_token(user_token), self.user)\n self.assertIsNone(self.\n user.verify_auth_token('jdjdje230920093944334j'))", "def test_user_already_exists(self):\n User.objects.create_user(\n '[email protected]',\n '[email protected]',\n '123existing'\n )\n response = self.client.post('/o/register', {\n 'email': '[email protected]',\n 'password': '123existing',\n 'terms_acceptance': True,\n })\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'auth/login.html')\n self.assertContains(\n response,\n 'Użytkownik o podanym emailu już istnieje',\n )\n self.assertNotIn('_auth_user_id', self.client.session)\n self.assertEqual(User.objects.all().count(), 1)", "def test_manage_user_without_email(self):\r\n # First with a new user\r\n user_data = dict(id=1, username='facebook', name='name')\r\n token = 't'\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['username'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.facebook_user_id == user_data['id'], user\r\n\r\n # Second with the same user\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['username'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.facebook_user_id == user_data['id'], user\r\n\r\n # Finally with a user that already is in the system\r\n user_data = dict(id=10, username=self.name,\r\n email=self.email_addr, name=self.fullname)\r\n token = 'tA'\r\n user = manage_user(token, user_data, None)\r\n err_msg = \"It should return the same user\"\r\n assert user.facebook_user_id == 10, err_msg", "def check(self):\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n\n user_session = self.get()\n user = self.get_user()\n\n return user is not None and us.verify_auth_token(user_session.token, config.SESSION_EXPIRES)", "def get_one_user():", "def test_manage_user_with_email(self):\r\n # First with a new user\r\n user_data = dict(id=1, username='facebook',\r\n email='[email protected]', name='name')\r\n token = 't'\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['username'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.facebook_user_id == user_data['id'], user\r\n\r\n # Second with the same user\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['username'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.facebook_user_id == user_data['id'], user\r\n\r\n # Finally with a user that already is in the system\r\n user_data = dict(id=10, username=self.name,\r\n email=self.email_addr, name=self.fullname)\r\n token = 'tA'\r\n user = manage_user(token, user_data, None)\r\n err_msg = \"It should return the same user\"\r\n assert user.facebook_user_id == 10, err_msg", "def test_registered_with_already_registered_user(self):\n\n print(\" ------------ Test 2 - Registration an user already registed ------------------\")\n\n user_id = uuid.uuid4()\n account = Account(user_id=user_id, password=\"my-precious\", currency=Currency(\"EUR\"))\n db.session.add(account)\n db.session.commit()\n\n response = register_user(user_id, \"my-precious\", \"EUR\")\n data = response.json()['message']\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'User already exists. Please Log in')\n self.assertTrue(response.headers['Content-Type'] == 'application/json')\n self.assertEqual(response.json()['code'], 202)", "def test_register_already_registered_user(self):\n register_1 = client.post(REGISTER_URL, json = self.post_data2)\n user_expected = db_session.query(User).filter_by(username=self.post_data2[\"username\"]).first()\n\n self.assertEqual(register_1.status_code, 201)\n self.assertIsNotNone(user_expected)\n register_2 = client.post(REGISTER_URL, json = self.post_data2)\n self.assertEqual(register_2.status_code, 401)\n self.assertEqual(register_2.get_json()[\"msg\"],\n \"User already registered\")", "def test_already_existing_user(self):\n self.user.registration(\n \"Githeri\", \"[email protected]\", \"iwantgitheri\", \"iwantgitheri\")\n msg = self.user.registration(\"Githeri\",\n \"[email protected]\",\n \"iwantgitheri\",\n \"iwantgitheri\")\n self.assertEqual(msg, \"Your Account Already Active. Proceed to login\")", "def test_user_cannot_register_twice(self):\n self.client.post(SIGNUP_URL,\n data=json.dumps(self.user_data), content_type='application/json')\n response2 = self.client.post(SIGNUP_URL,\n data=json.dumps(self.user_data), content_type='application/json')\n self.assertEqual(response2.status_code, 203)\n result = json.loads(response2.data.decode())\n self.assertEqual(result[\"message\"], \"User already exists\")", "def check_token(self, token):\n decoded_token = manage_tokens.decode(token)\n if decoded_token is None:\n return {'error': 'Token is invalid'}\n\n if 'email' not in decoded_token or 'expires' not in decoded_token \\\n or 'token' not in decoded_token:\n return {'error': 'Token is invalid'}\n\n self.email = decoded_token['email']\n self.user_in_db = User.users_db.get(decoded_token['email'])\n\n if not self.user_in_db:\n # User does not exist\n return {'error': 'User does not exist'}\n\n if self.user_in_db['token'] != decoded_token['token']:\n return {'error': 'Token is invalid'}\n\n if decoded_token['expires'] < time.time():\n return {'error': 'Token is expired'}\n\n return decoded_token", "def test_created_user_has_token_in_cache():\n _request_create_user(SEED_USER_DATA)\n token = SEED_USER_DATA['token']\n cache_key = f'access_token:{token}'\n\n expected_data = {\n 'email': SEED_USER_DATA['email'],\n 'sso_email_user_id': SEED_USER_DATA['sso_email_user_id'],\n }\n assert cache.get(cache_key) == expected_data", "def get_user(self, token: str) -> Optional[User]:", "def get_user(self, token: str) -> Optional[User]:", "def test_check_user(self):\n self.new_users = User('Dennis', 'Kiplangat', 'kiplangat18')\n self.new_users.save_user()\n user2 = User('Enock', 'kip', 'mankip')\n user2.save_user()\n\n for user in User.users_list:\n if user.first_name == user2.first_name and user.password == user2.password:\n current_user = user.first_name\n return current_user", "def test_already_signedup_user(self):\n response = self.client().post('/api/v1/auth/signup', data=self.user_data_2)\n response = self.client().post('/api/v1/auth/signup', data=self.user_data_2)\n self.assertEqual(response.status_code, 409)\n # return result in json format\n result = json.loads(response.data.decode())\n self.assertEqual(\n result[\"message\"], \"The email address has been used try another one.\")", "def same_user(user_id):\n return user_id == login_session['user_id']", "def authenticate_user(data):\n \n try:\n auth_token = data[\"auth_token\"]\n user_token = Token.objects.get(username=data[\"username\"])\n if user_token.token == auth_token:\n return True\n except:\n return False\n return False", "def test_registered_with_already_registered_user(self):\n self.client.post(\n '/api/v1/auth/register',\n data=json.dumps(dict(\n username='joe',\n password='123456'\n )),\n content_type='application/json'\n )\n response = self.client.post(\n '/api/v1/auth/register',\n data=json.dumps(dict(\n username='joe',\n password='123456'\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(\n data['message'] == 'User already exists. Please Log in.')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 202)" ]
[ "0.7521314", "0.7278974", "0.7100507", "0.70321304", "0.6936832", "0.6907738", "0.6905078", "0.68837535", "0.68488425", "0.6830636", "0.6752784", "0.6733311", "0.66693807", "0.666195", "0.665537", "0.66236097", "0.66234034", "0.6618708", "0.6615607", "0.6612222", "0.66108066", "0.6594045", "0.65710604", "0.65517324", "0.65517324", "0.6536248", "0.649842", "0.6477071", "0.64766216", "0.64761305" ]
0.7682538
0
tests the authentication class get_user function returns none if no user was found
def test_returns_none_if_no_user(self): self.assertIsNone(PasswordlessAuthenticationBackend().get_user('[email protected]'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user():\n try:\n userId = request.args.get('login_as')\n return users[int(userId)]\n except Exception:\n return None", "def test_get_user(self):\n user = User(self.client, \"test-user\", {})\n\n self.assertEqual(user.username, \"test-user\")\n self.assertEqual(user.email, \"[email protected]\")\n self.assertTrue(user.restricted)\n self.assertTrue(user.tfa_enabled)\n self.assertIsNotNone(user.ssh_keys)", "def get(self, no):\n user = get_a_user(no)\n if not user:\n api.abort(404)\n else:\n return user", "def get_user(self):\n return None", "def test_get_website_user_returns_user(self):\n user = get_website_user()\n self.assertTrue(user)", "def user(self):\n user = None\n if not 'user' in self.session and os.environ['APPLICATION_ID'].startswith('dev'):\n if self.request.get('paToken'):\n user = Github({'paToken': self.request.get('paToken')}).user()\n if user:\n logging.info(\"Read user data %s\" % json.dumps(user))\n user['paToken'] = self.request.get('paToken')\n self.session['user'] = user\n return user\n # No user for now\n return None\n \n if 'user' in self.session: \n return self.session['user']\n \n logging.info('No user detected; redirecting to /login')\n self.redirect('/login?%s' % urllib.urlencode({'r': self.request.path}), abort=True)", "def getUser():\n username = post_param('username', '')\n if username == '':\n username = get_param('username', '')\n password = get_param('password', '')\n else:\n password = post_param('password', '')\n \n if username == '':\n return None\n else:\n return User(username, password)", "def get(self):\r\n return get_user(request)", "def test_get(self):\n user = api.user.create(\n username='chuck',\n email='[email protected]',\n password='secret',\n )\n\n self.assertEqual(api.user.get('chuck'), user)", "def get_one_user():", "def get_user(cls, user_id):\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def test_get_user(self):\n print('(' + self.test_get_user.__name__+')', self.test_get_user.__doc__)\n # test for patient\n self.assertDictContainsSubset(\n self.connection.get_user(PATIENT_USERNAME), PATIENT)\n # test for doctor\n self.assertDictContainsSubset(\n self.connection.get_user(DOCTOR_USERNAME), DOCTOR)", "def get_user(self):\n\n user_session = self.get()\n if not user_session:\n return None\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n return us.single(user_session.login)", "def get_user():\n try:\n if 'gauth_token' in session:\n response = authenticate_with_users_service(\n session['gauth_token'])\n if response.status_code == 201:\n return response.json()\n return None # Not signed in\n except requests.exceptions.ConnectionError:\n return None # Can't connect to users service", "def get_user(self, user_id):\n try:\n User = get_user_model()\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def get_user(id):\n pass", "def getUser(self):\n user = users.get_current_user()\n if not user:\n self.redirect(users.create_login_url(self.request.uri))\n else:\n return user", "def get_user(self, user_id):\n User = get_user_model()\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def get_user(self, token: str) -> Optional[User]:", "def get_user(self, token: str) -> Optional[User]:", "def get_user(self, user_id):\n return None # noqa: WPS324", "def get_user(user: User) -> User:\n if user.is_authenticated:\n return user\n else:\n return get_anonymous_user()", "def get_user(self):\n raise NotImplementedError", "def get_user(self, username, password) -> bool:\n\t\tuser = self.users.find_one({\"username\":username})\n\t\treturn bcrypt.checkpw(password, user.get(\"password\"))", "def get_user(self, user_id=None):\n raise NotImplementedError", "def test_user_authentication(self):\n response = self.client.get(self.home_url)\n user = response.context.get('user')\n self.assertTrue(user.is_authenticated)", "def get_user(self, user_id):\n try:\n return Account.objects.get(pk=user_id)\n except Account.DoesNotExist:\n return None", "def api_auth():\n form = request.get_json(force=True)\n userdata = None\n if form['register']:\n userdata = userProvider.register_user(\n form['username'].encode('utf8'),\n form['password'].encode('utf8')\n )\n else:\n userdata = userProvider.load_authenticated_user(\n form['username'].encode('utf8'),\n form['password'].encode('utf8')\n )\n if userdata:\n user = userProvider.userdata_to_user(userdata)\n flask_login.login_user(user)\n return \"true\"\n raise Exception(\"No user loaded\")", "def test_get_user_by_username(self):\n\t\tusername_in_db = server.get_user_by_username('Natasha')\n\t\tself.assertTrue(username_in_db, 'Query did not fetch user object.')\n\t\tusername_not_in_db = server.get_user_by_username('xyz')\n\t\tself.assertFalse(username_not_in_db, 'Query fetched user that did not exist (xyz).')", "def test_api_user_get(self):\n pass" ]
[ "0.75293857", "0.74691415", "0.73994565", "0.72023684", "0.7175075", "0.7165074", "0.7147005", "0.714595", "0.7141002", "0.7135205", "0.70909756", "0.7087847", "0.7063624", "0.7053719", "0.7041671", "0.7037213", "0.6999133", "0.6980474", "0.6974275", "0.6974275", "0.6932183", "0.6929245", "0.691958", "0.691744", "0.6873316", "0.6853968", "0.6841784", "0.6827147", "0.67972803", "0.6772297" ]
0.7773204
0
Generates the QC_Trim commands
def generate_trim_commands(forward_seqs, reverse_seqs, map_file, out_dir, parameters): # we match filenames, samples, and run prefixes samples = make_read_pairs_per_sample(forward_seqs, reverse_seqs, map_file) cmds = [] param_string = _format_params(parameters, ATROPOS_PARAMS) for run_prefix, sample, f_fp, r_fp in samples: if r_fp is None: cmds.append("atropos trim %s -o %s -se %s" % ( param_string, join(out_dir, '%s.R1.fastq.gz' % run_prefix), f_fp)) else: cmds.append('atropos trim %s -o %s -p %s -pe1 %s -pe2 %s' % (param_string, join(out_dir, '%s.R1.fastq.gz' % run_prefix), join(out_dir, '%s.R2.fastq.gz' % run_prefix), f_fp, r_fp)) return cmds, samples
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trim(qclient, job_id, parameters, out_dir):\n # Step 1 get the rest of the information need to run Atropos\n qclient.update_job_step(job_id, \"Step 1 of 4: Collecting information\")\n artifact_id = parameters['input']\n del parameters['input']\n\n # Get the artifact filepath information\n artifact_info = qclient.get(\"/qiita_db/artifacts/%s/\" % artifact_id)\n fps = artifact_info['files']\n\n # Get the artifact metadata\n prep_info = qclient.get('/qiita_db/prep_template/%s/'\n % artifact_info['prep_information'][0])\n qiime_map = prep_info['qiime-map']\n\n # Step 2 generating command atropos\n qclient.update_job_step(job_id, \"Step 2 of 4: Generating\"\n \" QC_Trim commands\")\n rs = fps['raw_reverse_seqs'] if 'raw_reverse_seqs' in fps else []\n commands, samples = generate_trim_commands(fps['raw_forward_seqs'],\n rs, qiime_map, out_dir,\n parameters)\n\n # Step 3 execute atropos\n len_cmd = len(commands)\n msg = \"Step 3 of 4: Executing QC_Trim job (%d/{0})\".format(len_cmd)\n success, msg = _run_commands(qclient, job_id, commands, msg, 'QC_Trim')\n if not success:\n return False, None, msg\n\n # Step 4 generating artifacts\n msg = \"Step 4 of 4: Generating new artifacts (%d/{0})\".format(len_cmd)\n suffixes = ['%s.R1.fastq.gz', '%s.R2.fastq.gz']\n prg_name = 'Atropos'\n file_type_name = 'Adapter trimmed files'\n ainfo = _per_sample_ainfo(\n out_dir, samples, suffixes, prg_name, file_type_name, bool(rs))\n\n return True, ainfo, \"\"", "def trimReads(commands_list, fastq, fastqPaired = None, minQual = 5):\n \n leftMinQual = minQual\n rightMinQual = minQual\n sampleFolder = os.path.join(experimentFolder, genSampleID(fastq))\n outputFile = os.path.join(sampleFolder, genBaseName(fastq) + \".cleaned\") # Incomplete prefix of pprinseq to take in\n \n if fastqPaired is not None:\n trimCommand = \"prinseq-lite.pl -trim_qual_left {leftMinQual} -trim_qual_right {rightMinQual} \\\n -out_good {outputFileName} -fastq {fastq1} -fastq2 {fastqPaired}\".format(leftMinQual = leftMinQual, \n rightMinQual = rightMinQual, outputFileName = outputFile, fastq1= fastq, fastqPaired = fastqPaired)\n commands_list.append(trimCommand)\n return genCleanedOutputName(outputFile, paired = False)\n else:\n trimCommand = \"prinseq-lite.pl -trim_qual_left {leftMinQual} -trim_qual_right {rightMinQual} \\\n -out_good {outputFileName} -fastq {fastq1}\".format(leftMinQual = leftMinQual, \n rightMinQual = rightMinQual, outputFileName = outputFile, fastq1= fastq)\n commands_list.append(trimCommand)\n return genCleanedOutputName(outputFile, paired = True)\n # head, tail = ntpath.split(outputFile)\n # fileName = tail or ntpath.basename(head)\n # return os.path.join(sampleFolder, fileName)", "def trimAdapters(options):\n input_filename=options.input_library\n output_filename=options.adapter_trimmed_filename\n cmd=\"java -jar lib/trimmomatic/trimmomatic-0.38.jar SE -threads \"+str(options.CPU)+\" \"+input_filename\n cmd+=\" \"+output_filename\n cmd+=\" ILLUMINACLIP:Other/adapters.fasta:2:30:10 \"\n os.system(cmd)\n cmd=\"sed -n '1~4s/^@/>/p;2~4p' \"+options.adapter_trimmed_filename+\" > \"+options.output_directory+\"/\"+options.input_library.split(\"/\")[-1].split(\".\")[0]+\".fa\"\n os.system(cmd)", "def __build_cmd(self, infname, outdir):\n self._outdirname = os.path.join(outdir, \"trimmomatic_output\")\n cmd = [\"trimmomatic\",\n infname,\n \"-o\", self._outdirname]\n self._cmd = ' '.join(cmd)", "def trim(args) :\n from trimmer import trim_reads\n trim_reads(args)", "def qc_trimmed_reads(input_fastq, report):\n produce_fastqc_report(input_fastq, os.path.dirname(report))", "def trimming(input_file, threshold =30):\n output_file = \"trimmed{}.fq\".format(threshold)\n if os.path.exists(output_file): \n return\n command = 'fastq_quality_trimmer -Q64 -t {} -i {} -o{}'\\\n .format(threshold,input_file,output_file)\n e =subprocess.check_output(command,shell = True)\n return output_file", "def exo_qaqc(ds):\n\n varlist = [\n \"S_41\",\n \"C_51\",\n \"SpC_48\",\n \"T_28\",\n \"Turb\",\n \"fDOMRFU\",\n \"fDOMQSU\",\n \"CHLrfu\",\n \"Fch_906\",\n \"BGAPErfu\",\n \"BGAPE\",\n \"TALPErfu\",\n \"TALPE\",\n \"OST_62\",\n \"DO\",\n \"pH_159\",\n \"P_1ac\",\n \"P_1\",\n ]\n\n [varlist.append(k) for k in ds.data_vars if k not in varlist]\n\n for var in varlist:\n ds = qaqc.trim_min(ds, var)\n\n ds = qaqc.trim_max(ds, var)\n\n ds = qaqc.trim_min_diff(ds, var)\n\n ds = qaqc.trim_min_diff_pct(ds, var)\n\n ds = qaqc.trim_max_diff(ds, var)\n\n ds = qaqc.trim_max_diff_pct(ds, var)\n\n ds = qaqc.trim_med_diff(ds, var)\n\n ds = qaqc.trim_med_diff_pct(ds, var)\n\n ds = qaqc.trim_bad_ens(ds, var)\n\n for var in varlist:\n ds = qaqc.trim_by_any(\n ds, var\n ) # re-run and trim by other variables as necessary\n\n return ds", "def __createBaselineSeparationCommands(self):\n \n # Get the available baselines in MS (it will take selections into account)\n baselineList = self.__getBaselineList()\n\n # Make sure we have enough baselines to create the needed number of\n # subMSs. If not change the total expected.\n numSubMS = self._arg['numsubms']\n if isinstance(numSubMS,str) and numSubMS == 'auto':\n # Create the best load balance based on the number of nodes\n numSubMS = self.getNumberOfServers()\n if numSubMS == None:\n numSubMS = 8\n\n numSubMS = min(len(baselineList),numSubMS)\n\n # Create a map of the baselines to distribute in each subMS\n # Example of baselinePartitions\n # {0: [[0, 0]], 1: [[0, 1], [0, 2]], 2: [[0, 3]], 3: [[1, 1]], 4: [[1, 2]]}\n baselinePartitions = self.__partition1(baselineList, numSubMS) \n \n # Use the above list of baselines to construct a TaQL expression for each subMS\n submsBaselineMap = {}\n for subms in baselinePartitions.keys():\n submsBaselineMap[subms] = {}\n mytaql = []\n submsPair = baselinePartitions[subms]\n ant1ant2 = []\n for idx in range(len(submsPair)):\n ant1ant2 = submsPair[idx]\n if type(ant1ant2) == list:\n ant1 = ant1ant2[0]\n ant2 = ant1ant2[1] \n mytaql.append(('(ANTENNA1==%i && (ANTENNA2 IN %i))') % (ant1, ant2))\n \n mytaql = ' OR '.join(mytaql)\n submsBaselineMap[subms]['taql'] = mytaql\n \n # Create the commands for each SubMS (each engine)\n for output in xrange(numSubMS):\n mmsCmd = copy.copy(self._arg)\n mmsCmd['createmms'] = False\n mmsCmd['taql'] = submsBaselineMap[output]['taql']\n\n mmsCmd['outputvis'] = self.dataDir+'/%s.%04d.ms' \\\n % (self.outputBase, output)\n \n if not self._mpi_cluster:\n self._executionList.append(JobData(self._taskName, mmsCmd))\n else:\n self._executionList.append([self._taskName + '()',mmsCmd])", "def run(parser, args):\n if not args.basename:\n if '_' in os.path.basename(args.left):\n args.basename = os.path.basename(args.left).split('_')[0]\n elif '.' in os.path.basename(args.left):\n args.basename = os.path.basename(args.left).split('.')[0]\n else:\n args.basename = os.path.basename(args.left)\n\n total = countfastq(args.left)\n if args.right:\n total = total*2\n status(f'Loading {total:,} total reads')\n\n DEVNULL = open(os.devnull, 'w')\n if args.method == 'bbduk':\n if args.memory:\n MEM = f'-Xmx{args.memory}g'\n else:\n MEM = f'-Xmx{round(0.6*getRAM())}g'\n\n status('Adapter trimming using BBDuk')\n cmd = ['bbduk.sh', MEM,\n 'ref=adapters',\n f't={args.cpus}',\n 'ktrim=r',\n 'k=23',\n 'mink=11',\n f'minlen={args.minlen}',\n 'hdist=1',\n f'maq={args.avgqual}',\n 'ftm=5',\n 'tpe',\n 'tbo',\n 'overwrite=true']\n if args.left and args.right:\n cmd += [f'in1={args.left}',\n f'in2={args.right}',\n f'out1={args.basename}_1P.fastq.gz',\n f'out2={args.basename}_2P.fastq.gz']\n elif args.left:\n cmd += [f'in={args.left}',\n f'out={args.basename}_1U.fastq.gz']\n\n printCMD(cmd)\n if args.debug:\n subprocess.run(cmd)\n else:\n subprocess.run(cmd, stderr=DEVNULL)\n\n if args.right:\n clean = countfastq(f'{args.basename}_1P.fastq.gz')\n clean = clean*2\n status(f'{clean:,} reads remaining and writing to file')\n status('Trimming finished:\\n\\tFor: {:}\\n\\tRev {:}'.format(\n args.basename + '_1P.fastq.gz',\n args.basename + '_2P.fastq.gz'))\n if not args.pipe:\n status('Your next command might be:\\n\\t' +\n 'AAFTF filter -l {:} -r {:} -o {:} -c {:}\\n'.format(\n args.basename+'_1P.fastq.gz',\n args.basename+'_2P.fastq.gz',\n args.basename,\n args.cpus))\n else:\n clean = countfastq(f'{args.basename}_1U.fastq.gz')\n status(f'{clean:,} reads remaining and writing to file')\n status('Trimming finished:\\n\\tSingle: {:}'.format(\n args.basename+'_1U.fastq.gz'))\n if not args.pipe:\n status('Your next command might be:\\n\\t' +\n 'AAFTF filter -l {:} -o {:} -c {:}\\n'.format(\n args.basename+'_1U.fastq.gz',\n args.basename,\n args.cpus))\n\n elif args.method == 'trimmomatic':\n # find path\n trimmomatic_path = find_trimmomatic()\n if trimmomatic_path:\n jarfile = trimmomatic_path\n elif args.trimmomatic:\n jarfile = args.trimmomatic\n else:\n status('Trimmomatic cannot be found - ' +\n 'please provide location of trimmomatic.jar file.')\n sys.exit(1)\n\n if jarfile:\n path_to_adaptors = args.trimmomatic_adaptors\n leadingwindow = \"LEADING:%d\" % (args.trimmomatic_leadingwindow)\n trailingwindow = \"TRAILING:%d\" % (args.trimmomatic_trailingwindow)\n slidingwindow = \"SLIDINGWINDOW:%s\" % (\n args.trimmomatic_slidingwindow)\n\n quality = args.trimmomatic_quality\n quality = \"-%s\" % (quality) # add leading dash\n\n if not os.path.exists(path_to_adaptors):\n if args.right:\n path_to_adaptors = os.path.join(dirname(jarfile),\n TRIMMOMATIC_TRUSEQPE)\n else:\n path_to_adaptors = os.path.join(dirname(jarfile),\n TRIMMOMATIC_TRUSEQSE)\n\n if not os.path.exists(path_to_adaptors):\n findpath = dirname(jarfile)\n path_to_adaptors = \"\"\n while findpath:\n if os.path.exists(findpath + \"/share\"):\n if args.right:\n path_to_adaptors = os.path.join(\n findpath,\n \"/share/trimmomatic\",\n TRIMMOMATIC_TRUSEQPE)\n else:\n path_to_adaptors = os.path.join(\n findpath,\n \"/share/trimmomatic\",\n TRIMMOMATIC_TRUSEQSE)\n break\n findpath = dirname(findpath)\n\n if not os.path.exists(path_to_adaptors):\n status(\"Cannot find adaptors file please specify manually\")\n return\n\n clipstr = args.trimmomatic_clip % (path_to_adaptors)\n\n cmd = []\n\n if args.left and args.right:\n cmd = ['java', '-jar', jarfile, 'PE',\n '-threads', str(args.cpus), quality,\n args.left, args.right,\n args.basename+'_1P.fastq',\n args.basename+'_1U.fastq',\n args.basename+'_2P.fastq',\n args.basename+'_2U.fastq',\n clipstr, leadingwindow, trailingwindow, slidingwindow,\n \"MINLEN:%d\" % (args.minlen)]\n elif args.left and not args.right:\n cmd = ['java', '-jar', jarfile, 'SE',\n '-threads', str(args.cpus),\n quality, args.left,\n args.basename+'_1U.fastq',\n clipstr, leadingwindow, trailingwindow, slidingwindow,\n \"MINLEN:%d\" % (args.minlen)]\n else:\n status(\"Must provide left and right pairs or single read set\")\n return\n\n status('Running trimmomatic adapter and quality trimming')\n printCMD(cmd)\n if args.debug:\n subprocess.run(cmd)\n else:\n subprocess.run(cmd, stderr=DEVNULL)\n if args.right:\n status('Compressing trimmed PE FASTQ files')\n Fzip_inplace(args.basename+'_1P.fastq', args.cpus)\n Fzip_inplace(args.basename+'_2P.fastq', args.cpus)\n SafeRemove(args.basename+'_1U.fastq')\n SafeRemove(args.basename+'_2U.fastq')\n status('Trimming finished:\\n\\tFor: {:}\\n\\tRev {:}'.format(\n args.basename+'_1P.fastq.gz',\n args.basename+'_2P.fastq.gz'))\n if not args.pipe:\n status('Your next command might be:\\n\\t' +\n 'AAFTF filter -l {:} -r {:} -o {:} -c {:}\\n'.format(\n args.basename+'_1P.fastq.gz',\n args.basename+'_2P.fastq.gz',\n args.basename,\n args.cpus))\n else:\n status('Compressing trimmed SE FASTQ file')\n Fzip_inplace(args.basename + '_1U.fastq', args.cpus)\n status('Trimming finished:\\n\\tSingle: {:}'.format(\n args.basename + '_1U.fastq.gz'))\n if not args.pipe:\n status('Your next command might be:\\n\\t' +\n 'AAFTF filter -l {:} -o {:} -c {:}\\n'.format(\n args.basename+'_1U.fastq.gz',\n args.basename,\n args.cpus))\n\n elif args.method == 'fastp':\n status('Adapter trimming using fastp')\n cmd = ['fastp', '--low_complexity_filter',\n '-l', f'{args.minlen}',\n '--average_qual', f'{args.avgqual}',\n '-w', f'{args.cpus}']\n\n# '-wref=adapters', 't={:}'.format(args.cpus), 'ktrim=r',\n# 'k=23', 'mink=11', 'minlen={:}'.format(args.minlen), 'hdist=1',\n# 'ftm=5', 'tpe', 'tbo', 'overwrite=true']\n if args.left and args.right:\n # could add merging ...\n cmd += [f'--in1={args.left}',\n f'--in2={args.right}',\n f'--out1={args.basename}_1P.fastq.gz',\n f'--out2={args.basename}_2P.fastq.gz'\n ]\n if args.merge:\n cmd += ['--merge',\n f'--merged_out={args.basename}_MG.fastq.gz']\n\n elif args.left:\n cmd += [f'--in={args.left}',\n f'--out={args.basename}_1U.fastq.gz']\n if args.dedup:\n cmd += ['--dedup']\n if args.cutfront:\n cmd += ['--cut_front']\n if args.cuttail:\n cmd += ['--cut_tail']\n if args.cutright:\n cmd += ['--cut_right']\n\n cmd += [f'--html={args.basename}.fastp.html',\n f'--json={args.basename}.fastp.json']\n printCMD(cmd)\n if args.debug:\n subprocess.run(cmd)\n else:\n subprocess.run(cmd, stderr=DEVNULL)\n\n if args.right:\n clean = countfastq(f'{args.basename}_1P.fastq.gz')\n clean = clean*2\n status(f'{clean:,} reads remaining and writing to file')\n status('Trimming finished:\\n\\tFor: {:}\\n\\tRev {:}'.format(\n args.basename+'_1P.fastq.gz',\n args.basename+'_2P.fastq.gz'))\n if not args.pipe:\n status('Your next command might be:\\n\\t' +\n 'AAFTF filter -l {:} -r {:} -o {:} -c {:}\\n'.format(\n args.basename+'_1P.fastq.gz',\n args.basename+'_2P.fastq.gz',\n args.basename,\n args.cpus))\n else:\n clean = countfastq(f'{args.basename}_1U.fastq.gz')\n status(f'{clean:,} reads remaining and writing to file')\n status('Trimming finished:\\n\\tSingle: {:}'.format(\n args.basename + '_1U.fastq.gz'))\n if not args.pipe:\n status('Your next command might be:\\n\\t' +\n 'AAFTF filter --left {:} -o {:} -c {:}\\n'.format(\n args.basename+'_1U.fastq.gz',\n args.basename, args.cpus))\n\n else:\n status(f'Uknown trimming method: {args.method}')", "def trim_adaptor(in_file, config):\n adaptor = config[\"algorithm\"][\"adaptor\"]\n min_size = config[\"algorithm\"][\"adaptor_min_size\"]\n max_errors = config[\"algorithm\"][\"adaptor_max_errors\"]\n trim_out = \"%s-trim%s\" % os.path.splitext(in_file)\n if not os.path.exists(trim_out):\n cl = [config[\"programs\"][\"python\"], config[\"programs\"][\"trim_adaptor\"],\n in_file, trim_out, adaptor, str(max_errors),\n \"--min_size=%s\" % min_size]\n subprocess.check_call(cl)\n return trim_out", "def __createScanSeparationCommands(self):\n \n scanList = self.__selectionScanList\n if scanList is None:\n self.__selectMS()\n scanList = self.__getScanList()\n\n # Make sure we have enough scans to create the needed number of\n # subMSs. If not change the total expected.\n numSubMS = self._arg['numsubms']\n if isinstance(numSubMS,str) and numSubMS == 'auto':\n # Create the best load balance based on the number of nodes\n numSubMS = self.getNumberOfServers()\n if numSubMS == None:\n numSubMS = 8\n numSubMS = min(len(scanList),numSubMS)\n \n partitionedScans = self.__partition(scanList, numSubMS) \n for output in xrange(numSubMS):\n mmsCmd = copy.copy(self._arg)\n mmsCmd['createmms'] = False\n mmsCmd['scan']= ParallelTaskHelper.\\\n listToCasaString(partitionedScans[output]) \n mmsCmd['outputvis'] = self.dataDir+'/%s.%04d.ms' \\\n % (self.outputBase, output)\n if not self._mpi_cluster:\n self._executionList.append(JobData(self._taskName, mmsCmd))\n else:\n self._executionList.append([self._taskName + '()',mmsCmd])", "def _cleanup_command(self, crawl_id):\n cmd_line = (\"qsub -V -b y -cwd python {}/spidercleaner.py -r host:{},port:{} -c {}\"\n ).format(_spdr_engine_location(), self.engine_redis_host,\n self.engine_redis_port, crawl_id)\n if self.psuedo_dist:\n cmd_line += \" -d\"\n return cmd_line", "def fastq_qc(demultiplex_result, out_dir, config):\n\n pigz_cores = int(config['fastqTrim']['pigz_cores'])\n cutadapt_cores = int(config['fastqTrim']['cutadapt_cores'])\n\n r1_adapter = config['fastqTrim']['r1_adapter']\n r2_adapter = config['fastqTrim']['r1_adapter']\n length_threshold = config['fastqTrim']['length_threshold']\n quality_threshold = config['fastqTrim']['quality_threshold']\n r1_left_cut = config['fastqTrim']['r1_left_cut']\n r1_right_cut = config['fastqTrim']['r1_right_cut']\n r2_left_cut = config['fastqTrim']['r2_left_cut']\n r2_right_cut = config['fastqTrim']['r2_right_cut']\n overlap = config['fastqTrim']['overlap']\n total_reads_threshold = int(config['fastqTrim']['total_reads_threshold'])\n\n results = []\n for (uid, index_name), sub_df in demultiplex_result.groupby(['uid', 'index_name']):\n sample_demultiplex_total = sub_df['Trimmed'].sum()\n if sample_demultiplex_total < total_reads_threshold:\n log.info(f'In uid {uid}: index {index_name} skipped '\n f'due to too less reads: {sample_demultiplex_total}')\n continue\n # process R1\n r1_path_pattern = f'{out_dir}/{uid}_L*_{index_name}_R1.fq.gz'\n r1_out = f'{out_dir}/{uid}_{index_name}_R1.trimed.fq.gz'\n r1_cmd = f'pigz -cd -p {pigz_cores} {r1_path_pattern} | ' \\\n f'cutadapt -j {cutadapt_cores} --report=minimal -O {overlap} ' \\\n f'-q {quality_threshold} -u {r1_left_cut} ' \\\n f'-u -{r1_right_cut} -m {length_threshold} ' \\\n f'-a {r1_adapter} -o {r1_out} -'\n r1_result = subprocess.run(r1_cmd, stdout=subprocess.PIPE,\n encoding='utf8', shell=True, check=True)\n\n # get R1 result stat\n lines = []\n for line in r1_result.stdout.split('\\n'):\n ll = line.split('\\t')\n if len(ll) > 1:\n lines.append(ll)\n s = pd.Series({name: number for name, number in zip(*lines)})\n s['uid'] = uid\n s['index_name'] = index_name\n s['read_type'] = 'R1'\n results.append(s)\n\n # process R2\n r2_path_pattern = f'{out_dir}/{uid}_L*_{index_name}_R2.fq.gz'\n r2_out = f'{out_dir}/{uid}_{index_name}_R2.trimed.fq.gz'\n r2_cmd = f'pigz -cd -p {pigz_cores} {r2_path_pattern} | ' \\\n f'cutadapt -j {cutadapt_cores} --report=minimal -O {overlap} ' \\\n f'-q {quality_threshold} -u {r2_left_cut} ' \\\n f'-u -{r2_right_cut} -m {length_threshold} ' \\\n f'-a {r2_adapter} -o {r2_out} -'\n r2_result = subprocess.run(r2_cmd, stdout=subprocess.PIPE,\n encoding='utf8', shell=True, check=True)\n # get R2 result stat\n lines = []\n for line in r2_result.stdout.split('\\n'):\n ll = line.split('\\t')\n if len(ll) > 1:\n lines.append(ll)\n s = pd.Series({name: number for name, number in zip(*lines)})\n s['uid'] = uid\n s['index_name'] = index_name\n s['read_type'] = 'R2'\n results.append(s)\n\n fastq_final_result = pd.DataFrame(results)\n if len(results) == 0:\n # all sample skipped\n return fastq_final_result\n fastq_final_result['out_reads_rate'] = \\\n fastq_final_result['out_reads'].astype(int) / fastq_final_result['in_reads'].astype(int)\n fastq_final_result['out_bp_rate'] = \\\n fastq_final_result['out_reads'].astype(int) / fastq_final_result['in_reads'].astype(int)\n\n # clean up\n for (uid, index_name), sub_df in demultiplex_result.groupby(['uid', 'index_name']):\n r_path_pattern = f'{out_dir}/{uid}_L*_{index_name}_R*.fq.gz'\n r_rm_cmd = f'rm -f {r_path_pattern}'\n subprocess.run(r_rm_cmd, shell=True)\n # remove unknown reads\n r_path_pattern = f'{out_dir}/{uid}_L*_unknown_R*.fq.gz'\n r_rm_cmd = f'rm -f {r_path_pattern}'\n subprocess.run(r_rm_cmd, shell=True)\n\n return fastq_final_result", "def command_create(self):\n command = []\n for macro in self.my_xml.tool_data[self.shell_dict['short_name']]['pre_tmpls']:\n command.append(self.my_xml.chth_tmpl.substitute(macro=macro))\n command.extend(self.pre_chth)\n command.append(Template('@CMD_BEGIN@ $short_name').substitute(self.shell_dict))\n command.extend(self.tool_chth)\n for macro in self.my_xml.tool_data[self.shell_dict['short_name']]['post_tmpls']:\n command.append(self.my_xml.chth_tmpl.substitute(macro=macro))\n\n return '\\n'.join(command)", "def test_trim_fasta(self):\r\n expected = [\"\"\">HWUSI-EAS552R_0357:8:1:10040:6364#0/1\r\nGACGAG\r\n\"\"\",\r\n \"\"\">HWUSI-EAS552R_0357:8:1:10184:6365#0/1\r\nGTCTGA\r\n\"\"\"]\r\n\r\n self.assertEqual(list(trim_fasta(self.fasta_barcodes, 6)), expected)", "def test_adaptor_trimming(configuration_module, sample_id):\n config, _ = configuration_module\n expected_output = os.path.join(\n riboviz.test.SIMDATA_DIR,\n fastq.FASTQ_FORMAT.format(sample_id + \"_umi\"))\n actual_output = os.path.join(\n config[params.TMP_DIR],\n sample_id,\n workflow_files.ADAPTER_TRIM_FQ)\n fastq.equal_fastq(expected_output, actual_output)", "def _get_fastq_and_run_cutadapt_trim(\n analysis_info,analysis_description,analysis_name,run_id,\n fastq_input_dir_tag,fastq_output_dir_tag,singularity_image,\n use_ephemeral_space=False,r1_length=0,r2_length=0,\n cutadapt_exe='cutadapt',dry_run=False,\n cutadapt_options=('--cores=1',)):\n try:\n sample_info = \\\n analysis_info.get(analysis_name)\n if sample_info is None:\n raise ValueError(\n 'No feature {0} found in analysis_info'.\\\n format(analysis_info))\n sample_igf_id = sample_info.get('sample_igf_id')\n analysis_description = pd.DataFrame(analysis_description).fillna(0)\n analysis_entry = \\\n analysis_description[analysis_description['sample_igf_id']==sample_igf_id].copy()\n if 'r1-length' in analysis_entry.columns:\n r1_length = analysis_entry['r1-length'].values[0] # reset r1-length\n if 'r2-length' in analysis_entry.columns:\n r2_length = analysis_entry['r2-length'].values[0] # reset r2-length\n run = sample_info.get('runs').get(str(run_id))\n if run is None:\n raise ValueError(\n 'No run {0} found for feature {1} in analysis_info'.\\\n format(run,analysis_name))\n if isinstance(cutadapt_options,tuple):\n cutadapt_options = list(cutadapt_options)\n input_fastq_dir = run.get(fastq_input_dir_tag)\n output_fastq_dir = run.get(fastq_output_dir_tag)\n temp_output_dir = \\\n get_temp_dir(use_ephemeral_space=use_ephemeral_space)\n r1_file_name_pattern = \\\n re.compile(r'(\\S+)_S\\d+_L00\\d_R1_001\\.fastq\\.gz')\n r2_file_name_pattern = \\\n re.compile(r'(\\S+)_S\\d+_L00\\d_R2_001\\.fastq\\.gz')\n index_file_name_pattern = \\\n re.compile(r'(\\S+)_S\\d+_L00\\d_I(\\d)_001\\.fastq\\.gz')\n for fastq in os.listdir(input_fastq_dir):\n if fnmatch.fnmatch(fastq,'*.fastq.gz'):\n input_fastq_file = \\\n os.path.join(input_fastq_dir,fastq)\n output_fastq_file = \\\n os.path.join(output_fastq_dir,fastq)\n temp_fastq_file = \\\n os.path.join(temp_output_dir,fastq)\n if re.match(r1_file_name_pattern,fastq):\n # trim R1\n if r1_length > 0:\n cutadapt_options_r1 = None\n if len(cutadapt_options) > 0 :\n cutadapt_options_r1 = copy(cutadapt_options)\n cutadapt_options_r1.\\\n append('-l {0}'.format(int(r1_length)))\n _ = \\\n run_cutadapt(\n read1_fastq_in=input_fastq_file,\n read1_fastq_out=temp_fastq_file,\n cutadapt_options=cutadapt_options_r1,\n cutadapt_exe=cutadapt_exe,\n dry_run=dry_run,\n singularity_image_path=singularity_image)\n if not dry_run:\n copy_local_file(\n temp_fastq_file,\n output_fastq_file,\n force=True)\n else:\n copy_local_file(\n input_fastq_file,\n output_fastq_file,\n force=True)\n if re.match(r2_file_name_pattern,fastq):\n # trim R2\n if r2_length > 0:\n cutadapt_options_r2 = None\n if len(cutadapt_options) > 0 :\n cutadapt_options_r2 = copy(cutadapt_options)\n cutadapt_options_r2.\\\n append('-l {0}'.format(int(r2_length)))\n _ = \\\n run_cutadapt(\n read1_fastq_in=input_fastq_file,\n read1_fastq_out=temp_fastq_file,\n cutadapt_options=cutadapt_options_r2,\n cutadapt_exe=cutadapt_exe,\n dry_run=dry_run,\n singularity_image_path=singularity_image)\n if not dry_run:\n copy_local_file(\n temp_fastq_file,\n output_fastq_file,\n force=True)\n else:\n copy_local_file(\n input_fastq_file,\n output_fastq_file,\n force=True)\n if re.match(index_file_name_pattern,fastq):\n # copy I1 or I2\n copy_local_file(\n input_fastq_file,\n output_fastq_file,\n force=True)\n except Exception as e:\n raise ValueError(\n 'Failed to trim or copy reads, error: {0}'.\\\n format(e))", "def test_trim_filter(self):\n expected_filter = (\n \"{inspec} {trim}=start={pi}:duration={d},{setpts}=PTS-STARTPTS \"\n \"{outspec}\".format(\n inspec=self.segment.input_stream_specifier(),\n trim=self.segment._TRIM, setpts=self.segment._SETPTS,\n pi=self.EXPECTED_PUNCH_IN.total_seconds(),\n d=self.EXPECTED_DURATION,\n outspec=self.segment.output_stream_specifier()))\n self.assertEqual(self.segment.trim_filter(), expected_filter)", "def test_evaluate_trim_expression(self):\n value = self.evaluate_common(\"trim(' Steve\\t\\n\\r \\r\\n')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.String, \"Expected String\")\n self.assertTrue(value.value == ul(\"Steve\"))\n value = self.evaluate_common(ul(\"trim(' C a f \\xe9 ')\"))\n self.assertTrue(value.value == ul('C a f \\xe9'))\n try:\n value = self.evaluate_common(\"trim(3.14F)\")\n self.fail(\"floating trim\")\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\"trim('Steve','John')\")\n self.fail(\"2 parameters\")\n except odata.EvaluationError:\n pass", "def trim(self, start, end):\n for _var in self.inputs:\n _var.trim(start, end)", "def trimsplit(args):\n from jcvi.utils.cbook import SummaryStats\n\n p = OptionParser(trimsplit.__doc__)\n p.add_option(\n \"--minlength\", default=1000, type=\"int\", help=\"Min length of contigs to keep\"\n )\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n (fastafile,) = args\n minlength = opts.minlength\n\n fw = must_open(fastafile.rsplit(\".\", 1)[0] + \".split.fasta\", \"w\")\n ntotal = 0\n removed = []\n Ns = []\n for name, seq in parse_fasta(fastafile):\n stretches = []\n ntotal += len(seq)\n for lower, stretch in groupby(seq, key=lambda x: x.islower()):\n stretch = \"\".join(stretch)\n if lower or len(stretch) < minlength:\n removed.append(len(stretch))\n continue\n for isN, s in groupby(stretch, key=lambda x: x in \"Nn\"):\n s = \"\".join(s)\n if isN or len(s) < minlength:\n Ns.append(len(s))\n continue\n stretches.append(s)\n for i, seq in enumerate(stretches):\n id = \"{0}_{1}\".format(name.split(\"|\")[0], i)\n s = SeqRecord(Seq(seq), id=id, description=\"\")\n SeqIO.write([s], fw, \"fasta\")\n fw.close()\n\n # Reporting\n if removed:\n logging.debug(\n \"Total bases removed: {0}\".format(percentage(sum(removed), ntotal))\n )\n print(SummaryStats(removed), file=sys.stderr)\n if Ns:\n logging.debug(\"Total Ns removed: {0}\".format(percentage(sum(Ns), ntotal)))\n print(SummaryStats(Ns), file=sys.stderr)", "def test_delete_spaces(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.2\", \"3.2\"),\n after_sel=(\"3.0\", \"3.0\"),\n command_name=\"delete-spaces\",\n )", "def trim(args):\n\n from jcvi.algorithms.maxsum import max_sum\n\n p = OptionParser(trim.__doc__)\n p.add_option(\n \"-c\",\n dest=\"min_length\",\n type=\"int\",\n default=64,\n help=\"minimum sequence length after trimming\",\n )\n p.add_option(\"-s\", dest=\"score\", default=QUAL, help=\"quality trimming cutoff\")\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(p.print_help())\n\n fastafile, newfastafile = args\n qualfile = get_qual(fastafile)\n newqualfile = get_qual(newfastafile, check=False)\n\n logging.debug(\n \"Trim bad sequence from fasta file `%s` to `%s`\" % (fastafile, newfastafile)\n )\n\n fw = must_open(newfastafile, \"w\")\n fw_qual = open(newqualfile, \"w\")\n\n dropped = trimmed = 0\n\n for rec in iter_fasta_qual(fastafile, qualfile, modify=True):\n qv = [x - opts.score for x in rec.letter_annotations[\"phred_quality\"]]\n msum, trim_start, trim_end = max_sum(qv)\n score = trim_end - trim_start + 1\n\n if score < opts.min_length:\n dropped += 1\n continue\n\n if score < len(rec):\n trimmed += 1\n rec = rec[trim_start : trim_end + 1]\n\n write_fasta_qual(rec, fw, fw_qual)\n\n print(\"A total of %d sequences modified.\" % trimmed, file=sys.stderr)\n print(\n \"A total of %d sequences dropped (length < %d).\" % (dropped, opts.min_length),\n file=sys.stderr,\n )\n\n fw.close()\n fw_qual.close()", "def main():\n\n parser = argparse.ArgumentParser(prog='clean_questions')\n\n parser.add_argument('-f', '--force', dest='force_write',\n action='store_true', default=False,\n help=\"If true, rsmtool will not check if the \"\n \"output directory already contains the \"\n \"output of another rsmtool experiment. \")\n\n parser.add_argument('input_file',\n help=\"The path to the input file (.txt). \"\n \"Should have one line per question.\")\n\n parser.add_argument('output_file',\n help=\"The path to the output file (.txt or .xlsx). \"\n \"If .txt, then write out the cleaned text. \"\n \"If .xlsx, then write out both original and cleaned text.\")\n\n parser.add_argument('-c', '--min_char_len', dest='min_char_len',\n type=int, default=2,\n help=\"The minimum number of characters to consider, \"\n \"when determining whether to remove text\")\n\n parser.add_argument('-t', '--min_word_threshold', dest='min_word_threshold',\n type=float, default=0.20,\n help=\"The proportion of actual words to consider, \"\n \"when determining whether to remove text.\")\n\n parser.add_argument('-w', '--max_word_len', dest='max_word_len',\n type=int, default=100,\n help=\"The maximum number of words before we decided \"\n \"to take the last sentence.\")\n\n parser.add_argument('-r', '--removed_token', dest='removed_token',\n default=\"<REMOVED>\",\n help=\"A token to use in place of removed text.\")\n\n args = parser.parse_args()\n\n path_to_questions = Path(args.input_file)\n\n error_msg = f'The `input_file`, {args.input_file}, does not exist.'\n assert path_to_questions.exists(), error_msg\n\n with open(path_to_questions) as fb:\n questions = [line.strip() for line in fb.readlines()]\n\n # loop through all of the questions and perform cleaning\n cleaned_questions = []\n for question in tqdm(questions):\n cleaned_questions.append(clean_text(question,\n min_char_len=args.min_char_len,\n min_word_threshold=args.min_word_threshold,\n max_word_len=args.max_word_len,\n removed_token=args.removed_token))\n\n # if the output file is Excel, then write out a formatted file\n # with both the original and cleaned questions in separate columns\n if args.output_file.lower().endswith('xlsx'):\n\n df_output = pd.DataFrame({'original': questions, 'cleaned': cleaned_questions})\n\n writer = pd.ExcelWriter(args.output_file, engine='xlsxwriter')\n df_output.to_excel(writer, sheet_name='questions', index=False, encoding='utf-8')\n\n workbook = writer.book\n worksheet = writer.sheets['questions']\n formatting = workbook.add_format({'text_wrap': True, 'font_size': 18})\n\n worksheet.set_column('A:A', 100, formatting)\n worksheet.set_column('B:B', 100, formatting)\n worksheet.freeze_panes(1, 0)\n\n writer.save()\n\n # otherwise, just put the cleaned questions in each row\n else:\n\n df_output = pd.DataFrame({'questions': cleaned_questions})\n df_output.to_csv(args.output_file, index=False, header=False)", "def trim_fastq_files_to_length(desired_read_length = 36):\n\n mkdir(READ_LENGTH_TRIMMED_FASTQ_DIR)\n\n template = \"\"\"zcat {input_fastq} | fastx_trimmer -l {desired_read_length} -z -o {output_fastq}\"\"\"\n\n printp(\"\"\"\\n#\\n# trim reads to a certain length\\n#\"\"\")\n printp(\"\"\"\\n# drmr:label read-length-trimming\"\"\")\n printp(\"\"\"\\n# drmr:job time_limit=2h working_directory={}\"\"\".format(READ_LENGTH_TRIMMED_FASTQ_DIR))\n\n for sample, info in DATA.items():\n for x in ['treatment', 'control']:\n input_fastq = get_fastq(get_srr(sample)) if x == 'treatment' else get_fastq(get_input_control_srr(sample))\n output_fastq = get_read_length_trimmed_fastq(get_srr(sample)) if x == 'treatment' else get_read_length_trimmed_fastq(get_input_control_srr(sample))\n printp(template.format(**locals()))\n\n printp(\"\"\"\\n# drmr:wait\"\"\")", "def trim_fasta(fasta_lines, output_length):\r\n for seq_id, seq in parse_fasta(fasta_lines):\r\n yield '>%s\\n%s\\n' % (seq_id, seq[:output_length])", "def at_ftrim(seq):\n at(\"FTRIM\", seq, [])", "def test_clean_lines(self):\n before_b = \"\"\"\\\n # Should remove all trailing whitespace.\n\n a = 2 \n \n b = 3\n c = 4 \n d = 5\n e = 6 \n x\n \"\"\"\n after_b = \"\"\"\\\n # Should remove all trailing whitespace.\n\n a = 2\n\n b = 3\n c = 4\n d = 5\n e = 6\n x\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"1.0\", \"1.0\"),\n command_name=\"clean-lines\",\n )", "def _to_trim(self):\n self._status = 2\n fm.seg2trim(self._basename)\n self._status = 3" ]
[ "0.683585", "0.63266486", "0.6288418", "0.6186976", "0.58510095", "0.5763907", "0.5670237", "0.56626654", "0.55723935", "0.55261225", "0.5313643", "0.5313415", "0.5306112", "0.52896935", "0.528762", "0.5241077", "0.52357", "0.52154654", "0.5210487", "0.51883096", "0.5178333", "0.5166874", "0.51648986", "0.514855", "0.51461864", "0.51285446", "0.5128513", "0.5098319", "0.50926423", "0.5071349" ]
0.66786444
1
r"""Download a file from the `url` if it is not in the cache. The file at the `url` is downloaded to the `~/.jhML`.
def get_file(url, file_name=None): cache_dir = os.path.join(os.path.expanduser("~"), ".jhML") if file_name is None: file_name = url[url.rfind('/') + 1:] file_path = os.path.join(cache_dir, file_name) if not os.path.exists(cache_dir): os.mkdir(cache_dir) if os.path.exists(file_path): return file_path print("Downloading: " + file_name) try: urllib.request.urlretrieve(url, file_path, show_progress) except (Exception, KeyboardInterrupt) as e: if os.path.exists(file_path): os.remove(file_path) raise print(" Done") return file_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _download(url):\n \n filename = url.split('/')[-1]\n if os.path.isfile(filename):\n info('Using pre-existed file {} from local system.'.format(filename))\n else:\n info('Downloading {} from OMA Database.'.format(url.split('/')[-1]))\n filename, _ = urlretrieve(url, filename)\n return filename", "def download_from_url(path, url):\n filename = url.split(\"/\")[-1]\n found_file = find_file(path, filename, max_depth=0)\n if found_file is None:\n filename = os.path.join(path, filename)\n logging.info(\"Downloading from %s to %s.\" % (url, filename))\n inprogress_filepath = filename + \".incomplete\"\n inprogress_filepath, _ = urllib.request.urlretrieve(\n url, inprogress_filepath, reporthook=download_report_hook)\n # Print newline to clear the carriage return from the download progress.\n print()\n tf.gfile.Rename(inprogress_filepath, filename)\n return filename\n else:\n logging.info(\"Already downloaded: %s (at %s).\" % (url, found_file))\n return found_file", "def download_if_needed(url, filename):\n if os.path.exists(filename):\n print \"already exists\"\n else:\n wget.download(url)", "def _download_from_url(self, url):\n target_file_name = self.dir + \"/\" + url.split('/')[-1].split('?')[0]\n urllib.urlretrieve (url, target_file_name)", "def download(self, url):\n try:\n webFile = urllib.urlopen(url)\n localFile = open(self.workdir + \"/\" + url.split('/')[-1], 'w')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()\n except IOError:\n print(\"could not get url \" + url)", "def get_file(url):\n helpers.make_workdir() # create temp working directory\n file_url = url + constant.MALICIOUS_LOCATION\n print(file_url)\n filename = wget.download(file_url, out=constant.WORKDIR)\n return filename", "def download_file_cached(file_url: str, location: str) -> None:\n\titem = os.path.basename(location)\n\n\tlocal = cache_find(item)\n\n\tif local is None:\n\t\t# Cached item doesn't exist\n\t\tcache_create()\n\t\tdownload_file(file_url, \"Cached/\" + item)\n\t\tcopy_file(\"Cached/\" + item, location)\n\t\treturn\n\n\t# Copy file from cache to location\n\tuux.show_debug(\"Cache hit for \" + item)\n\tcopy_file(local, location)", "def _maybe_download(self, url):\n filename = os.path.basename(url)\n download_path = os.path.join(self._model_dir, filename)\n if os.path.exists(download_path):\n return download_path\n\n def _progress(count, block_size, total_size):\n sys.stdout.write(\n '\\r>> Downloading %s %.1f%%' %\n (filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n urllib.request.urlretrieve(url, download_path, _progress)\n statinfo = os.stat(download_path)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n return download_path", "def download_url(filename, url):\n latest_package_url = request.urlopen(url).read().decode(\"utf-8\")\n print(\"Downloading latest package:\\n{}\".format(latest_package_url))\n request.urlretrieve(latest_package_url, filename, reporthook=download_progress_callback)", "def download(url, filename=None):\n\t# requirements os, shutil, urllib.parse, urllib.request\n\tif not filename:\n\t\turl_parts = urllib.parse.urlparse(url)\n\t\tfilename = os.path.basename(url_parts.path)\n\turl_h = urllib.request.urlopen(url)\n\twith open(filename, 'wb') as file_h:\n\t\tshutil.copyfileobj(url_h, file_h)\n\turl_h.close()\n\treturn", "def fetch(self, url):\r\n fname = os.path.join(self._cachedir, self._formatter(url))\r\n if not os.path.exists(fname):\r\n time.sleep(self._sleep)\r\n html = urllib.urlopen(url).read()\r\n with codecs.open(fname, 'w', 'utf-8') as f:\r\n soup = BeautifulSoup(html)\r\n f.write(unicode(soup))\r\n return fname", "def download_file(url, local_filename, update=False):\n if os.path.isfile(local_filename):\n if not update:\n return\n else:\n os.remove(local_filename)\n\n r = requests.get(url, stream=True)\n # http://stackoverflow.com/questions/15352668/download-and-decompress-gzipped-file-in-memory\n with open(local_filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)", "def getfile(url):\n try:\n return urlreq.urlopen(url)\n except urlreq.HTTPError as e:\n safeprint(\"Sever returned with response code \" + str(e.getcode()) + \", download failed.\")", "def download_file(self, url, path):\n print('\\tDownloading: ', path)\n with open(path, 'w') as outfile:\n try:\n response = self._http_client.get(url)\n outfile.write(response.text)\n finally:\n response.close()\n outfile.close()\n gc.collect()", "def download(url: str, to_dir: str) -> str:\n to_file = os.path.join(to_dir, get_filename_from_url(url))\n logger.debug(\"Download %s to %s\", url, to_file)\n\n h = httplib2.Http(\".cache\")\n (_, content) = h.request(url, \"GET\")\n with open(to_file, 'wb') as f:\n f.write(content)\n return to_file", "def download(self, url):\n try:\n logging.info(self.log_format((\"downloading \" + url)))\n webFile = urllib.urlopen(url)\n localFile = open(self.paths['workspace'] + \"/\" + url.split('/')[-1], 'w')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()\n except IOError:\n logging.error(self.log_format((\"could not get url \" + url)))", "def download_if_not_exists(filename, url):\n if not os.path.exists(filename):\n download_file(filename, url)\n return True\n return False", "def download_if_not_exists(filename, url):\n if not os.path.exists(filename):\n download_file(filename, url)\n return True\n return False", "def download_file(url, file_name):\n conn = urllib3.PoolManager(\n cert_reqs='CERT_REQUIRED',\n ca_certs=certifi.where())\n\n with conn.request('GET', url, preload_content=False) as resp, open(file_name, 'wb') as out:\n shutil.copyfileobj(resp, out)", "def download_file(self, url, filename):\n r = requests.get(url, stream=True)\n r.raise_for_status()\n\n with open(filename, 'wb') as f:\n for chunk in r.iter_content():\n if chunk:\n f.write(chunk)\n f.flush()", "def download_file(url_path):\n local_filename = url_path.split('/')[-3] + \"-\" + url_path.split('/')[-1]\n local_filename = OUT_DIR + local_filename\n print local_filename\n url = \"https://commoncrawl.s3.amazonaws.com/\" + url_path\n # NOTE the stream=True parameter\n req = requests.get(url, stream=True)\n with open(local_filename, 'wb') as write_f:\n for chunk in req.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n write_f.write(chunk)\n write_f.close()\n return local_filename", "def cache_file(url, prefix):\n cache_filepath = _get_cached_filepath(\n prefix=prefix,\n url=url,\n )\n # If the file exists, return path.\n if os.path.isfile(cache_filepath):\n logger.info('Returning cached file for {}.'.format(url))\n return cache_filepath\n # If the file does not exist, download and return path.\n else:\n r = requests.get(url, verify=False)\n\n with open(cache_filepath, 'wb') as f:\n f.write(r.content)\n\n logger.info('Caching file for {}.'.format(url))\n return cache_filepath", "def fetch_save(url):\n\n name = url.split(\"/\")[-1]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(f\"{DATA_PATH}/{name}\", \"wb\") as f:\n f.write(response.raw.read())\n else:\n logging.info(f\"Failed {url} download\")", "def maybe_download(filepath, url):\n\n if os.path.exists(filepath):\n logger.info(\"Not downloading, file already found: %s\" % filepath)\n return filepath\n\n logger.info(\"Downloading %s to %s\" % (url, filepath))\n try:\n tf.gfile.Copy(url, filepath)\n except tf.errors.UnimplementedError:\n try:\n inprogress_filepath = filepath + \".incomplete\"\n # r = requests.get(url)\n # with open(inprogress_filepath, 'wb') as outfile:\n # outfile.write(r.content)\n\n inprogress_filepath, _ = urlretrieve(\n url, inprogress_filepath, reporthook=download_report_hook)\n # Print newline to clear the carriage return from the download progress\n print()\n os.rename(inprogress_filepath, filepath)\n except HTTPError:\n if url.startswith(\"http\"):\n os.system('wget --no-check-certificat ' + url+\" -O \"+filepath.replace(\" \", \"\\ \"))\n\n else:\n raise ValueError(\"Unrecognized URI: \" + filepath)\n statinfo = os.stat(filepath)\n logger.info(\"Successfully downloaded %s, %s bytes.\" %\n (os.path.basename(filepath), statinfo.st_size))\n return filepath", "def maybe_download(url, file_name, work_directory):\n\tif not os.path.exists(work_directory):\n\t\tos.mkdir(work_directory)\n\t\t\n\tfile_path = os.path.join(work_directory, file_name)\n\n\tif not os.path.exists(file_path):\n\t\tfile_path, _ = urllib.request.urlretrieve(url, file_path)\n\t\tstatinfo = os.stat(file_path)\n\t\tprint('Successfully downloaded', file_name, statinfo.st_size, 'bytes.')\n\t\n\tprint(\"{} existed\".format(file_path))\n\n\treturn file_path", "def download_file (url):\n\n '''\n Try and download the file given in the url,\n throw up an error if not possible.\n '''\n try:\n ret = urllib2.urlopen (url)\n except urllib2.HTTPError:\n return None\n except urllib2.URLError:\n return None\n\n print \"Downloaded \" + url\n\n return ret", "def fetch_and_cache(data_url, file, data_dir=\"data\", force=False):\n data_dir = Path(data_dir)\n data_dir.mkdir(exist_ok = True)\n file_path = data_dir / Path(file)\n if force and file_path.exists():\n file_path.unlink()\n if force or not file_path.exists():\n print('Downloading...', end=' ')\n resp = requests.get(data_url)\n with file_path.open('wb') as f:\n f.write(resp.content)\n print('Done!')\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n else:\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n print(\"Using cached version that was downloaded (UTC):\", last_modified_time)\n return file_path", "def download_file(url, fname):\n urllib.request.urlretrieve(url, fname)", "def download(self, url, filename):\n print(\"url\", url)\n print(\"filename\", filename)\n # open in binary mode\n with open(filename, \"wb\") as file:\n # get request\n try:\n r = requests.get(url)\n if r.status_code == 404:\n raise NotFoundException(\n \"URL: \", url, \" is not working. Status code 404\")\n # write to file\n file.write(r.content)\n print(\"file downloaded\")\n except ConnectionError as ex:\n print(ex)\n except NotFoundException as ex:\n print(ex)\n except Exception as ex:\n print(ex)", "def download_from_url(url, output_path):\n\n print('Pulling data from {} to {}'.format(url, output_path))\n wget.download(url, output_path)\n print('done')" ]
[ "0.7525367", "0.75082433", "0.74431115", "0.7382158", "0.7351716", "0.7316157", "0.7303105", "0.72857004", "0.72617024", "0.7246924", "0.7242186", "0.7224468", "0.7082853", "0.706728", "0.7057179", "0.70281047", "0.70252407", "0.70252407", "0.70156753", "0.70078397", "0.70072395", "0.6994987", "0.6994516", "0.6979833", "0.69762254", "0.69730693", "0.69633985", "0.6960344", "0.69599664", "0.6948128" ]
0.82472557
0
Creates a JWT token for account activation.
def create_jwt_for_account_activation(email: str) -> str: logging.debug('Creating a JWT for account activation - %s', email) return jwt.encode( key=config.JWT_ACTIVATION_SECRET_KEY, algorithm='HS256', payload={ 'sub': 'activation', 'email': email, 'exp': dt.datetime.now(pytz.timezone(config.TIMEZONE)) + dt.timedelta( minutes=config.JWT_ACTIVATION_MAX_AGE_MINUTES), 'nbf': dt.datetime.now(pytz.timezone(config.TIMEZONE)) } ).decode('utf-8')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_auth_token():\n data = get_request_data(request)\n address = data.get(\"address\")\n expiration = int(data.get(\"expiration\"))\n\n pk = get_provider_private_key(use_universal_key=True)\n token = jwt.encode({\"exp\": expiration, \"address\": address}, pk, algorithm=\"HS256\")\n token = token.decode(\"utf-8\") if isinstance(token, bytes) else token\n\n valid, message = is_token_valid(token, address)\n if not valid:\n if message == \"Token is deleted.\":\n force_restore_token(token)\n else:\n return jsonify(error=message), 400\n\n return jsonify(token=token)", "def _generate_jwt_token(self):\n import jwt\n from datetime import datetime, timedelta\n from django.conf import settings\n\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'username': self.username,\n 'exp': int(dt.strftime('%s')),\n }, settings.SECRET_KEY, algorithm='HS256')\n # print(token)\n return token", "def token(self):\n token = jwt.encode(\n {\n \"id\": self.pk,\n \"username\": self.get_full_name,\n \"email\": self.email,\n \"iat\": datetime.utcnow(),\n \"exp\": datetime.utcnow() + timedelta(minutes=int(os.getenv('TIME_DELTA')))\n },\n settings.SECRET_KEY, algorithm='HS256').decode()\n return token", "def generate_auth_token(self, expires_in=600):\n return jwt.encode(\n {'STULOGINID': self.STULOGINID, 'exp': time.time() + expires_in},\n app.config['SECRET_KEY'], algorithm='HS256')", "def _generate_jwt_token(self):\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'exp': int(dt.strftime('%s'))\n }, settings.SECRET_KEY, algorithm='HS256')\n\n return token.decode('utf-8')", "def generate_jwt(self):\n\n # Generate a random token\n random_token = secrets.token_hex(12)\n\n # Update database\n self.user_in_db.update({'token': random_token})\n User.users_db.put(self.user_in_db)\n\n # Create timestamps for the token\n generated = time.time()\n expires = generated + TWO_WEEKS\n\n # Return the generated jwt\n return manage_tokens.encode({\n 'email': self.email,\n 'token': random_token,\n 'generated': generated,\n 'expires': expires,\n })", "def _generate_jwt_token(self):\n payload = jwt_payload_handler(self)\n token = jwt_encode_handler(payload)\n return token", "def create_jwt(self, audience: List[str], additional_claims=None) -> str:\n iat = time.time()\n exp = iat + self.lifetime\n payload = additional_claims or {}\n payload.update({'iss': self.credentials[\"client_email\"],\n 'sub': self.credentials[\"client_email\"],\n 'aud': audience,\n 'iat': iat,\n 'exp': exp,\n 'scope': ['email', 'openid', 'offline_access'],\n 'email': self.credentials[\"client_email\"]\n })\n additional_headers = {'kid': self.credentials[\"private_key_id\"]}\n token = jwt.encode(\n payload,\n self.credentials[\"private_key\"],\n headers=additional_headers,\n algorithm='RS256').decode()\n return token", "def generate_auth_token(self, expires_in=600):\n return jwt.encode(\n {'loginid': self.loginid, 'exp': time.time() + expires_in},\n app.config['SECRET_KEY'], algorithm='HS256')", "def gen_verification_token(user):\n exp_date = timezone.now() + timedelta(days= 3)\n payload = {\n 'user': user.username,\n 'exp': int (exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm= 'HS256')\n return token", "def generateAuthToken(self):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=0, minutes=30),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n return jwt.encode(payload, current_app.config['SECRET_KEY'], algorithm='HS256').decode()\n except Exception as error:\n print(error)\n return error", "def token(self):\n payload = {\n 'id': str(self.id),\n 'username': self.username,\n \"exp\": datetime.now() + timedelta(days=2)\n }\n return jwt.encode(payload, SECRET_KEY).decode('utf-8')", "def generate_token(dictionary: dict, expiration: datetime.timedelta):\n\n dictionary['expiration'] = (datetime.datetime.utcnow() + expiration).timestamp()\n\n return jwt.encode(dictionary, current_app.config['TOKEN_SECRET_KEY'], algorithm='HS256')", "def gen_verification_token(user):\n exp_date = timezone.now() + timedelta(days=3)\n payload = {\n 'user': user.username,\n 'exp': int(exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')\n return token.decode()", "def generate_token(self):\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=100),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n # create the byte string token using the payload and the SECRET key\n jwt_bytes = jwt.encode(\n payload,\n os.environ.get('SECRET', 'test'),\n algorithm='HS256'\n )\n return jwt_bytes.decode('utf-8')\n except Exception as e:\n # return an error in string format if an exception occurs\n raise Exception(str(e))", "def create_token(self):\n ts_datetime = self.logged_at or self.created_at\n ts = int(mktime(ts_datetime.timetuple()))\n key = base64.encodestring(self.email)\n base = \"{}{}\".format(key, ts)\n salt, hsh = self.password.split('$')\n return \"{}$${}\".format(key, get_hexdigest(salt, base))", "def gen_verification_token(self, user):\n exp_date = timezone.now() + timedelta(days=3)\n payload = {\n 'user': user.username,\n 'exp': int(exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')\n return token", "def gen_verification_token(user):\n exp_date = timezone.now() + timedelta(days=3)\n payload = {\n 'user': user.username,\n 'exp': int(exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n # Generacion del token\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')\n return token", "def _create_jwt(request, user, expires_in):\n oauth_application = _get_login_oauth_client()\n access_token = create_dot_access_token(\n # Note: Scopes for JWT cookies do not require additional permissions\n request, user, oauth_application, expires_in=expires_in, scopes=['user_id', 'email', 'profile'],\n )\n return create_jwt_from_token(access_token, DOTAdapter(), use_asymmetric_key=True)", "def token_gen_call(username, password, exp=None):\n #pdb.set_trace()\n \n #username_set = params['AUTH']['username_set']\n #password_set = params['AUTH']['password_set']\n username_set = username\n password_set = password\n \"\"\"\n Creates JWT Token\n :return:\n \"\"\"\n if exp is None:\n exp = datetime.utcnow() + timedelta(seconds=3600)\n _token = {\n 'aud': JWT_AUDIENCE,\n 'exp': exp,\n 'iss': JWT_ISSUER,\n 'user': username,\n 'role': 'admin',\n 'time':time.time()\n }\n _token.update(_token)\n \n if password_set == password and username_set == username: # example, don't do this in production\n return {\"token\" : jwt.encode(_token, SECRET_KEY, algorithm=JWT_OPTIONS_ALGORITHM).decode('utf-8') }\n return 'Invalid username and/or password for user: {0}'.format(username)", "def generate_token(exp=None):\n\n secret_key = getenv('JWT_SECRET_KEY')\n user = {\n 'first_name': fake.name(),\n 'last_name': fake.name(),\n 'email': fake.email(),\n 'is_admin': IsAdmin.yes,\n 'password': fake.password()\n }\n\n payload = {'id': str(User.find_or_create(user, email=user['email']).id)}\n payload.__setitem__('exp', exp) if exp is not None else ''\n token = jwt.encode(payload, secret_key, algorithm='HS256').decode(CHARSET)\n return 'Bearer {0}'.format(token)", "def activate():\n try:\n body = request.get_json()\n\n activate_token = body[\"activate_token\"]\n password = body[\"password\"]\n\n if len(password) < 3 or len(password) > 50:\n return bad_request()\n\n if not models.token_exists(activate_token):\n\n return bad_request()\n\n student_hash = models.create_hash(password)\n models.save_hash(student_hash, activate_token)\n\n except KeyError:\n return bad_request()\n except Exception as e:\n print(e)\n return server_error()\n\n return created()", "def create_token(user):\n payload = {\n 'sub': user.id,\n 'iat': datetime.utcnow(),\n 'exp': datetime.utcnow() + timedelta(days=1)\n }\n token = jwt.encode(payload, config.SECRET_KEY, algorithm='HS256')\n return token.decode('unicode_escape')", "def create_jwt(user, secret):\n logger.debug(\"Create JWT with secret %s\" % secret)\n # username = request.POST['username']\n # password = request.POST['password'\n\n expiry = datetime.datetime.now() + datetime.timedelta(seconds=30)\n expiry_s = time.mktime(expiry.timetuple())\n if user.is_authenticated():\n internalid = user.authprofile.internalid\n payload = {'username': user.username, 'expiry': expiry_s, 'type': \"AuthenticatedUser\", 'internalid': internalid, 'email': user.email}\n token = jws.sign(payload, secret, algorithm='HS256')\n else:\n payload = {'expiry':expiry_s, 'type': \"AnonymousUser\", 'internalid': None, 'email': None}\n token = jws.sign(payload, secret, algorithm='HS256')\n logger.debug(\"Payload: %s\" % payload)\n # logger.info(\"Token: %s\" % token)\n return token", "def create_access_token(identity: Union[str,int], type_token: str, fresh: Optional[bool] = False) -> bytes:\n return AuthJWT.create_token(\n identity=identity,\n type_token=type_token,\n fresh=fresh,\n exp_time=timedelta(minutes=AuthJWT._ACCESS_TOKEN_EXPIRES)\n )", "def generate_token(user):\n if JWT_AUTH:\n payload = JWT_PAYLOAD_HANDLER(user)\n return JWT_ENCODE_HANDLER(payload)\n else:\n token = Token.objects.create(user=user)\n token.save()\n return token", "def create_token(identity: int, type_token: str, exp_time: timedelta, fresh: Optional[bool] = False) -> bytes:\n if type_token not in ['access','refresh']:\n raise ValueError(\"Type token must be between access or refresh\")\n\n payload = {\n \"iat\": AuthJWT.get_int_from_datetime(datetime.now(timezone.utc)),\n \"nbf\": AuthJWT.get_int_from_datetime(datetime.now(timezone.utc)),\n \"jti\": AuthJWT.get_jwt_id(),\n \"exp\": AuthJWT.get_int_from_datetime(datetime.now(timezone.utc) + exp_time),\n \"identity\": identity,\n \"type\": type_token\n }\n\n # for access_token only fresh needed\n if type_token == 'access':\n payload['fresh'] = fresh\n\n return jwt.encode(payload,AuthJWT._SECRET_KEY,algorithm=AuthJWT._ALGORITHM)", "def create_auth_token(\n username: str,\n admin: t.Optional[bool] = False,\n spotify: t.Optional[bool] = False,\n) -> JWT:\n auth_token: JWT = auth_manager.auth_token(\n username, {\"admin\": admin, \"spotify\": spotify}\n )\n return auth_token", "def create_fake_JWT_token(userEmail):\n pass", "def generate_auth_token(self):\n token = Serializer(\n app.config['API_SECRET_KEY'],\n expires_in=app.config['JWT_TOKEN_EXPIRATION']\n )\n return token.dumps({'id': self.id})" ]
[ "0.7311987", "0.71036845", "0.6983588", "0.68776363", "0.68750894", "0.68676454", "0.6818633", "0.6811045", "0.67879117", "0.67571187", "0.6742507", "0.6725463", "0.6708632", "0.66643095", "0.66524696", "0.6650436", "0.66484785", "0.6637267", "0.6597703", "0.65425956", "0.6527231", "0.647436", "0.64741075", "0.6468241", "0.6444107", "0.6443181", "0.64424753", "0.6442308", "0.6429265", "0.6382977" ]
0.8025082
0
Retrieves an email from the payload of a JWT token. Raises exception if the JWT token is not valid.
def retrieve_email_from_jwt_for_account_activation(token: str) -> str: logging.debug("Retrieving email from JWT for account activation.") try: payload = jwt.decode(token.encode('utf-8'), key=config.JWT_ACTIVATION_SECRET_KEY, algorithm='HS256') except Exception as e: # TODO logging.debug('JWT issue!') raise e logging.debug("Retrieved user - %s", payload['email']) return payload['email']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_email():\n headers = request.headers\n token = headers['Authorization'].split()[1]\n return Token.objects(access_token=token).first().email", "def get_email(self, token, uid):\n\n email_info_resp = get_remote(get_config('login.weibo.email_info_url') + token)\n email_info_resp_json = json.loads(email_info_resp)\n\n if email_info_resp_json.get(\"error\") is not None:\n raise Exception(email_info_resp_json)\n\n return email_info_resp_json['email']", "def get_email(self, token):\n resp = requests.get(self.emails_url, params={\"access_token\": token.token})\n emails = resp.json().get(\"values\", [])\n email = \"\"\n try:\n email = emails[0].get(\"email\")\n primary_emails = [e for e in emails if e.get(\"is_primary\", False)]\n email = primary_emails[0].get(\"email\")\n except (IndexError, TypeError, KeyError):\n return \"\"\n finally:\n return email", "async def signup_email(self, token: str) -> Optional[str]:\n db = self['db_engine']\n async with db.acquire() as connection:\n query = select([RegToken]).where(RegToken.token == token).where(\n RegToken.expired_at > datetime.now(timezone.utc))\n if reg_token := await(await connection.execute(query)).first():\n return reg_token.email\n raise web.HTTPBadRequest(reason=\"Wrong token\")", "def get_user_from_token(token):\n try:\n jwt_decode = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])\n except:\n return None\n email = jwt_decode['user']\n try:\n user = User.get(User.email == email)\n except:\n return None\n return user", "def email_from_invitation_token(token):\n ser = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = ser.loads(token.encode('utf-8'))\n except (BadSignature, SignatureExpired):\n return False\n user_email = data.get('user_email')\n if user_email is None:\n return False\n if User.query.filter_by(email=user_email).first() is not None:\n return False\n return user_email", "def email(self):\n return self._dict.get('email')", "def customer_email(customer):\n return customer.get(\"email\")", "def get_email(obj):\r\n return obj.user.email", "def get(self, request, token):\n try:\n primary_key = signing.loads(token, max_age=self.token_expires, salt=self.salt)\n user = get_user_data(primary_key)\n if user:\n return render_to_response(\n 'registration/email.html',\n {\n 'in_browser': True,\n 'site': RequestSite(request),\n 'user': user,\n 'token': signing.dumps(user.pk, salt=self.salt),\n 'secure': request.is_secure(),\n })\n except signing.BadSignature:\n return render_to_response(\n 'registration/show_message.html',\n {\n 'title': \"invalid token\",\n 'message': \"sorry invalid token try again to recover password\"})", "def get_email(self):\n return self.reference[REF_EMAIL_ADDRESS][REF_VALUE]", "def googleapis_email(url, params):\n from urllib2 import Request, urlopen\n from django.utils import simplejson\n\n request = Request(url + '?' + params, headers={'Authorization': params})\n try:\n return simplejson.loads(urlopen(request).read())\n except (ValueError, KeyError, IOError):\n return None", "def parse_password_reset_token(token):\n try:\n token = base64.urlsafe_b64decode(str(token))\n except TypeError:\n return None\n cred = _parse_security_token(token)\n return cred and cred.email", "def getRefreshToken( email):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=ApiJWTAuthentication.expirationTime_Refresh),\n 'email': email\n }\n jwtToken=jwt.encode(\n\n payload= payload, key=ApiJWTAuthentication.secretKey_Refresh, algorithm='HS256'\n );\n token=jwtToken.decode('utf-8')\n return {\"message\": \"success\", \"refresh_token\": token}\n except Exception as e:\n return {\"message\": \"exception\",\"Exception\": str(e)}", "def get(self):\n user_id = request.args.get('user_id')\n return get_email(user_id)", "def get(self, request):\n token = request.GET.get('token')\n payload = jwt.decode(token, settings.SECRET_KEY, algorithms='HS256')\n print(f'payload {payload}')\n user = User.objects.get(id = payload['user_id'])\n \n if not user.email_verified:\n user.email_verified = True\n user.save()\n return response.Response({'email': \"successful email verification\"}, status = status.HTTP_200_OK)\n return response.Response({'error': \"unsuccessful email verification\"}, status = status.HTTP_400_BAD_REQUEST)", "def get_email(self):\n return self.email", "def verify_password_reset_token(token: str) -> tp.Optional[str]:\n try:\n decoded_token = jwt.decode(token, config.SECRET_KEY,\n algorithms=[ALGORITHM])\n except InvalidTokenError:\n return None\n if decoded_token[\"sub\"] != PASSWORD_RESET_SUBJECT:\n return None\n return decoded_token[\"email\"]", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def get_user_email(self):\n member = self.get_user()\n if member:\n return member.getProperty('email')", "def get_email(self):\n return self._email", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def validate_email(self, token):\n decoded_token = manage_tokens.decode(token)\n if decoded_token is None:\n return {'error': 'Token is invalid'}\n\n self.user_in_db = User.users_db.get(self.email)\n if not self.user_in_db:\n # User does not exist\n return {'error': 'User does not exist'}\n\n if 'secret_token' not in decoded_token or decoded_token['secret_token'] != self.user_in_db['secret_token']:\n return {'error': 'Token is invalid'}\n\n self.user_in_db['secret_token'] = ''\n self.user_in_db['verified'] = True\n\n User.users_db.put(self.user_in_db)\n\n return decoded_token", "def get_account_for_email(cls, email):\n assert email\n key = '<%s>' % email\n return cls.get_by_key_name(key)", "def getEmail(self):\n return self.email" ]
[ "0.7630464", "0.7101012", "0.70788604", "0.6519509", "0.6437453", "0.6377152", "0.6299448", "0.62386537", "0.6229537", "0.61632866", "0.6106897", "0.60842735", "0.6023732", "0.60121953", "0.60055405", "0.5980921", "0.5976147", "0.5867186", "0.585634", "0.585634", "0.585634", "0.58465683", "0.584218", "0.5825681", "0.5825681", "0.5825681", "0.5825681", "0.5793493", "0.5793178", "0.57720506" ]
0.7602766
1
Changes settings that indicate what the solver should be solving for next. Save sol correctly. sol is the solution vector of the last step. sol = anything can be used on first step.
def _advance_settings(self, sol): if self.cond == True: # Save last solution... self.lst_tmp = sol # Check if all timesteps are complete. self.current_T += self.d_T self.step += 1 if self.current_T > self.max_T: return False # Set to not be conduction any more self.cond = False if len(self.fq_list) > 0: self.rad = 0 else: # There are radiation steps to do. self.cond = True return True # If we're here, we're either not done anything yet or have # just done a radiation step. if self.rad != None: # Save last solution self.lst_rad[self.rad] = sol # Advance to next radiation stage if one exists. Else cond. if self.rad + 1 != len(self.fq_list): self.rad += 1 else: self.rad = None self.cond = True return True # If we've made it to here, we must just setting the simulation # going. assert (len(self.fq_list) == len(self.lst_rad)) if len(self.lst_rad) > 0: assert (len(self.fq_list) == len(self.absorb_coeffs)) assert (self.refr_idx_vol >= 0.0) # Could set to zero, but that might limit restarts. Just check # Validity.... assert (self.step != None) assert (self.d_T > 0.0) assert (self.current_T != None) assert (self.max_T != None) assert (self.max_T > self.current_T) assert (self.diff_scale >= 0.0) assert (self.diff_scale <= 1.0) assert (self.thermal_conductivity > 0.0) assert (self.alpha >= 0.0) assert (self.refr_idx_background >= 0.0) # Set the ball rolling: if len(self.fq_list) > 0: # We can set solver for frequencies first... self.rad = 0 else: self.cond = True return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def advance(self, sol):\r\n self.data_saving(sol)\r\n simulation_continues = self._advance_settings(sol)\r\n self.redef_vars()\r\n self.reporting(sol)\r\n self.norm_reporting()\r\n return simulation_continues", "def update_current_sol_and_cost(self,sol=None):\n\n # Update current sol if argument given\n if sol is not None:\n self.current_sol = sol\n \n # Update residual and cost\n try:\n self.residual = self.sketch_reweighted - self.sketch_of_solution(self.current_sol)\n self.current_sol_cost = np.linalg.norm(self.residual)\n except AttributeError: # We are here if self.current_sol does not exist yet\n self.current_sol, self.residual = None, self.sketch_reweighted\n self.current_sol_cost = np.inf", "def solve(self):\n new_puzzle = self._puzzle.clone()\n self._solution = new_puzzle.solve_puzzle()\n del new_puzzle\n pass", "def solve(self, solver):\n solver.solve()", "def solve(self,**kwargs):\n if kwargs.pop('restart',False):\n self.nopt = 0\n savefile = kwargs.pop('savebase',os.path.abspath(self.filename)+('_%02d.cysolve.pkl' % self.nloop))\n\n if kwargs.has_key('savedir'):\n savedir = kwargs['savedir']\n for isub in range(self.nspec):\n kwargs['isub'] = isub\n self.loop(**kwargs)\n print \"Saving after nopt:\", self.nopt\n self.saveState(savefile)\n \n self.pp_ref = self.pp_int\n self.nloop += 1", "def __solve(self) -> None:\n pyo.TransformationFactory(\"contrib.detect_fixed_vars\").apply_to(self.model) # type: ignore\n pyo.TransformationFactory(\"contrib.deactivate_trivial_constraints\").apply_to(self.model) # type: ignore\n\n # initialise the solver object\n self._logger.debug(\"[ModelSolver] Solver object initiated...\")\n solver = Config.OPTIMISATION_MODEL_CONFIG['SOLVER_TYPE']\n opt = pyo.SolverFactory(solver)\n if Config.OPTIMISATION_MODEL_CONFIG['SOLVER_OPTION'].get(solver) is not None:\n for k, v in Config.OPTIMISATION_MODEL_CONFIG['SOLVER_OPTION'].get(solver).items():\n opt.options[k] = v\n\n try:\n start_time = datetime.now()\n self._logger.debug(\"[ModelSolver] Solver starting...\")\n results = opt.solve(self.model, tee=True)\n self.results = results\n end_time = datetime.now()\n self._logger.info(f\"[ModelSolver] Solver completed in {end_time - start_time}.\")\n except Exception as e:\n raise Exception(f\"Model optimisation failed with {solver} with error message {e}.\")\n\n if (results.solver.status == SolverStatus.ok) and (results.solver.termination_condition == TerminationCondition.optimal):\n self._logger.info(\"Solution is feasible and optimal\")\n results.write()\n elif results.solver.termination_condition == TerminationCondition.infeasible:\n raise ValueError(\"Model optimisation resulted into an infeasible solution\")\n\n self.model.optimised = True", "def updatesolutioninfo(self,whichsol_):\n res = __library__.MSK_XX_updatesolutioninfo(self.__nativep,whichsol_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def sketch_of_solution(self,sol=None):\n raise NotImplementedError", "def solve():\n game_state.is_solving = ~game_state.is_solving\n\n if game_state.is_solving:\n solve_button.set_label(\"Pause\")\n else:\n solve_button.set_label(\"Solve\")\n\n game_state.is_dirty = True\n\n return solve", "def Solve(self,iter_val=0):\n\n ### Save Files before solve ###\n self.fprint(\"Saving Input Data\",special=\"header\")\n if \"mesh\" in self.params.output:\n self.problem.dom.Save(val=iter_val)\n if \"initial_guess\" in self.params.output:\n self.problem.bd.SaveInitialGuess(val=iter_val)\n if \"height\" in self.params.output and self.problem.dom.dim == 3:\n self.problem.bd.SaveHeight(val=iter_val)\n if \"turbine_force\" in self.params.output:\n self.problem.farm.SaveRotorDisks(val=iter_val)\n self.fprint(\"Finished\",special=\"footer\")\n\n ####################################################################\n ### This is the better way to define a nonlinear problem but it \n ### doesn't play nice with dolfin_adjoint\n # ### Define Jacobian ###\n # dU = TrialFunction(self.problem.fs.W)\n # J = derivative(self.problem.F, self.problem.up_next, dU)\n\n # ### Setup nonlinear solver ###\n # nonlinear_problem = NonlinearVariationalProblem(self.problem.F, self.problem.up_next, self.problem.bd.bcs, J)\n # nonlinear_solver = NonlinearVariationalSolver(nonlinear_problem)\n\n # ### Set some parameters ###\n # solver_parameters = nonlinear_solver.parameters\n # solver_parameters[\"nonlinear_solver\"] = \"snes\"\n # solver_parameters[\"snes_solver\"][\"linear_solver\"] = \"mumps\"\n # solver_parameters[\"snes_solver\"][\"maximum_iterations\"] = 50\n # solver_parameters[\"snes_solver\"][\"error_on_nonconvergence\"] = False\n # solver_parameters[\"snes_solver\"][\"line_search\"] = \"bt\" # Available: basic, bt, cp, l2, nleqerr\n\n ### Solve the problem ###\n # self.fprint(\"Solving\",special=\"header\")\n # start = time.time()\n # iters, converged = nonlinear_solver.solve()\n # stop = time.time()\n # self.fprint(\"Total Nonlinear Iterations: {:d}\".format(iters))\n # self.fprint(\"Converged Successfully: {0}\".format(converged))\n ####################################################################\n\n\n nonlinear_solver = self.params[\"solver\"].get(\"nonlinear_solver\", \"snes\")\n relaxation = self.params[\"solver\"].get(\"newton_relaxation\", 1.0)\n\n self.fprint(\"Solving with {0}\".format(nonlinear_solver))\n if nonlinear_solver == \"newton\":\n self.fprint(\"Relaxation parameter = {: 1.2f}\".format(relaxation))\n\n newton_options = {\"relaxation_parameter\": relaxation,\n \"maximum_iterations\": 40,\n \"linear_solver\": \"mumps\",\n \"absolute_tolerance\": 1e-6,\n \"relative_tolerance\": 1e-5}\n \n solver_parameters = {\"nonlinear_solver\": \"newton\",\n \"newton_solver\": newton_options}\n\n elif nonlinear_solver == \"snes\":\n # ### Add some helper functions to solver options ###\n solver_parameters = {\"nonlinear_solver\": \"snes\",\n \"snes_solver\": {\n \"linear_solver\": \"mumps\", \n \"maximum_iterations\": 40,\n \"error_on_nonconvergence\": True,\n \"line_search\": \"bt\",\n }}\n \n else:\n raise ValueError(\"Unknown nonlinear solver type: {0}\".format(nonlinear_solver))\n\n ### Start the Solve Process ###\n self.fprint(\"Solving\",special=\"header\")\n start = time.time()\n \n # ### Solve the Baseline Problem ###\n # solve(self.problem.F_sans_tf == 0, self.problem.up_next, self.problem.bd.bcs, solver_parameters=solver_parameters, **self.extra_kwarg)\n\n # ### Store the Baseline and Assign for the real solve ###\n # self.up_baseline = self.problem.up_next.copy(deepcopy=True)\n # self.problem.up_next.assign(self.up_baseline)\n\n ### Solve the real problem ###\n solve(self.problem.F == 0, self.problem.up_next, self.problem.bd.bcs, solver_parameters=solver_parameters)\n stop = time.time()\n self.fprint(\"Solve Complete: {:1.2f} s\".format(stop-start),special=\"footer\")\n # self.u_next,self.p_next = self.problem.up_next.split(True)\n self.u_next,self.p_next = split(self.problem.up_next)\n # self.nu_T = project(self.problem.nu_T,self.problem.fs.Q,solver_type='mumps',**self.extra_kwarg)\n self.nu_T = None\n\n\n ### Save solutions ###\n if \"solution\" in self.params.output:\n self.fprint(\"Saving Solution\",special=\"header\")\n self.Save(val=iter_val)\n self.fprint(\"Finished\",special=\"footer\")\n\n ### calculate the power for each turbine ###\n ###################################\n ### Fix how angle is transfered ###\n ###################################\n if self.optimizing or self.save_power:\n self.J += -self.CalculatePowerFunctional((iter_val-self.problem.dom.init_wind)) \n\n # self.fprint(\"Speed Percent of Inflow Speed\")\n # ps = []\n # for i in range(6):\n # HH = self.problem.farm.HH[0]\n # RD = self.problem.farm.RD[0]\n # x_val = (i+1)*RD\n # vel = self.problem.up_next([x_val,0,HH])\n # vel = vel[0:3]\n # nom = np.linalg.norm(vel)\n # perc = nom/self.problem.bd.HH_vel\n # ps.append(perc)\n # self.fprint(\"Speed Percent at (\"+repr(int(x_val))+\", 0, \"+repr(HH)+\"): \"+repr(perc))\n # print(ps)", "def solve_optimisation(model, exe_path, project_dir, poses) -> None:\n opt = SolverFactory(\n 'ipopt',\n executable=exe_path\n )\n\n # solver options\n opt.options[\"print_level\"] = 5\n opt.options[\"max_iter\"] = 400\n opt.options[\"max_cpu_time\"] = 3600\n opt.options[\"tol\"] = 1e-1\n opt.options[\"OF_print_timing_statistics\"] = \"yes\"\n opt.options[\"OF_print_frequency_iter\"] = 10\n opt.options[\"OF_hessian_approximation\"] = \"limited-memory\"\n #opt.options[\"linear_solver\"] = \"ma86\"\n\n LOG_DIR = 'C://Users//user-pc//Documents//Scripts//FYP//logs'\n\n # --- This step may take a while! ---\n results = opt.solve(\n model, tee=True, \n keepfiles=True, \n logfile=os.path.join(LOG_DIR, \"solver.log\")\n )\n\n result_dir = os.path.join(project_dir, \"results\")\n save_data(model, file_path=os.path.join(result_dir, 'traj_results.pickle'), poses=poses)", "def Save(self,val=0):\n u,p = self.problem.up_next.split(True,**self.extra_kwarg)\n if self.first_save:\n self.u_file = self.params.Save(u,\"velocity\",subfolder=\"solutions/\",val=val)\n self.p_file = self.params.Save(p,\"pressure\",subfolder=\"solutions/\",val=val)\n # self.nuT_file = self.params.Save(self.nu_T,\"eddy_viscosity\",subfolder=\"solutions/\",val=val)\n self.first_save = False\n else:\n self.params.Save(u,\"velocity\",subfolder=\"solutions/\",val=val,file=self.u_file)\n self.params.Save(p,\"pressure\",subfolder=\"solutions/\",val=val,file=self.p_file)\n # self.params.Save(self.nu_T,\"eddy_viscosity\",subfolder=\"solutions/\",val=val,file=self.nuT_file)", "def notify_solution(self, sol):\n pass # pragma: no cover", "def solve(self):\n\n # Open status display\n fmtstr, nsep = self.display_start()\n\n # Start solve timer\n self.timer.start(['solve', 'solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Main optimisation iterations\n for self.k in range(self.k, self.k + self.opt['MaxMainIter']):\n\n # Update record of X and Y from previous iteration\n self.on_iteration_start()\n\n # Compute backtracking\n if self.opt['Backtrack'] is not None and self.k >= 0:\n self.timer.stop('solve_wo_btrack')\n # Compute backtracking\n self.backtrack.update(self)\n self.timer.start('solve_wo_btrack')\n else:\n # Compute just proximal step\n self.xstep()\n # Update by combining previous iterates\n self.ystep()\n\n # Compute residuals and stopping thresholds\n self.timer.stop(['solve_wo_rsdl', 'solve_wo_btrack'])\n if not self.opt['FastSolve']:\n frcxd, adapt_tol = self.compute_residuals()\n self.timer.start('solve_wo_rsdl')\n\n # Compute and record other iteration statistics and\n # display iteration stats if Verbose option enabled\n self.timer.stop(['solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n if not self.opt['FastSolve']:\n itst = self.iteration_stats(self.k, frcxd)\n self.itstat.append(itst)\n self.display_status(fmtstr, itst)\n self.timer.start(['solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Call callback function if defined\n if self.opt['Callback'] is not None:\n if self.opt['Callback'](self):\n break\n\n # Stop if residual-based stopping tolerances reached\n if not self.opt['FastSolve']:\n if frcxd < adapt_tol:\n break\n\n # Increment iteration count\n self.k += 1\n\n # Record solve time\n self.timer.stop(['solve', 'solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Print final separator string if Verbose option enabled\n self.display_end(nsep)\n\n return self.getmin()", "def solved(self, solvetype=\"solve\", debug=False, debugfunc=print):\n if debugfunc is print:\n e.setreturned()\n out = getcopy(self)\n if solvetype in self.solvetypes:\n self.solvetypes[solvetype](out, debug, debugfunc)\n else:\n raise ExecutionError(\"KeyError\", \"Unrecognized solve type \"+solvetype)\n return out", "def edit_settings(self):\n while True:\n os.system('cls' if os.name == 'nt' else 'clear')\n valid_numbers, number_setting_corr = self.print_settings()\n print('Which setting you want to change? Enter \"number, new value\" to modify, or \"done\" to exit.')\n print('Observe the possible values for each setting! They are case sensitive. '\n 'Inputting wrong values might break the program. \\n')\n choice = input('Input:')\n if choice == 'done':\n break\n if ',' not in choice:\n print('Invalid input. Place the number, followed by a comma, followed by its value. Eg: 1,TRUE')\n continue\n if len(choice.split(',')) != 2:\n print('Invalid input, must have only one comma')\n continue\n\n var, val = choice.split(',')\n if var not in valid_numbers:\n print('Invalid number.')\n continue\n real_var = number_setting_corr[var] # Changes from a number to the actual parameter\n if val.lower() == 'true':\n setattr(self, real_var, True)\n continue\n elif val.lower() == 'false':\n setattr(self, real_var, False)\n continue\n else:\n setattr(self, real_var, val)\n\n # todo: check for all possible values to avoid inputting wrong settings and messing everything up.\n # if val not in valid_options_nl_sorting:\n # print('Invalid nonlinear sorting option. Case sensitive! Be very precise.')\n # continue\n # if val not in valid_options_lin_sorting:\n # print('Invalid linear sorting option. Case sensitive! Be very precise.')\n # continue\n # if val not in models:\n # print('Invalid nonlinear fitting model. Case sensitive! Be very precise.')\n # continue\n\n print('===Final settings===')\n _, _ = self.print_settings()\n self.save_settings()\n return", "def updatesolutioninfo(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res = self.__obj.updatesolutioninfo(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def solve(puzzle, verbose=False):\n sol = puzzle.extensions()\n s = puzzle\n for i in sol:\n if verbose == True:\n print(i)\n if not i.is_solved():\n s = solve(i)\n if s != puzzle:\n break\n else:\n s = i\n break\n return s", "def writesolution(self,whichsol_,filename_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res = self.__obj.writesolution(whichsol_,filename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def solve(self) -> Dict:\n solution = self.opt.decision_variables.vec2dict(self._solve())\n\n if self._error_on_fail and (not self.did_solve()):\n raise RuntimeError(\"Solver failed!\")\n\n # Add full model state to the solution dictionary\n for model in self.opt.models:\n for d in model.time_derivs:\n n_s = model.state_name(d)\n n_s_x = model.state_optimized_name(d)\n if isinstance(model, RobotModel):\n if model.num_param_joints > 0:\n n_s_p = model.state_parameter_name(d)\n t = solution[n_s_x].shape[1]\n solution[n_s] = cs.DM.zeros(model.dim, t)\n solution[n_s][model.optimized_joint_indexes, :] = solution[\n n_s_x\n ]\n solution[n_s][model.parameter_joint_indexes, :] = self._p_dict[\n n_s_p\n ]\n else:\n solution[n_s] = solution[n_s_x]\n else:\n solution[n_s] = solution[n_s_x]\n\n return solution", "def solve(self):\n ...", "def setup_solver(self):\n option = Options()\n if logger.getEffectiveLevel() == logging.DEBUG:\n # option.printLevel = PrintLevel.HIGH\n option.printLevel = PrintLevel.NONE\n else:\n option.printLevel = PrintLevel.NONE\n self.solver_minimizing = SQProblem(self.nV, self.nC)\n self.solver_minimizing.setOptions(option)\n self.solver_maximizing = SQProblem(self.nV, self.nC)\n self.solver_maximizing.setOptions(option)\n\n self.solver_minimizing_recent_index = -2\n self.solver_maximizing_recent_index = -2", "def notify_solution(self, sol):\n self._solutions.append(sol)", "def add_solution(self, solution):\n if self.check_solution(solution):\n self._solution = solution\n self.solution_status = 'OK'\n else:\n self._solution = None\n self.solution_status = 'X'", "def solve(self):\n print(\"Attempting to solve problem instance with {} constraints\".format(len(self.constraints)))\n self.formulation.solve(solver='SCS')\n print(self.formulation.status)", "def solve(self):\n print(\"Attempting to solve problem instance with {} constraints\".format(len(self.constraints)))\n self.formulation.solve(solver='SCS')\n print(self.formulation.status)", "def solve(self):\n print(\"Attempting to solve problem instance with {} constraints\".format(len(self.constraints)))\n self.formulation.solve(solver='SCS')\n print(self.formulation.status)", "def solutiondef(self,whichsol_):\n isdef_ = ctypes.c_int32()\n res = __library__.MSK_XX_solutiondef(self.__nativep,whichsol_,ctypes.byref(isdef_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n isdef_ = isdef_.value\n _isdef_return_value = isdef_\n return (_isdef_return_value)", "def solve(self):\n if not self.solvable:\n print('Suduko not Solvable')\n return False\n res=self.back(0, 0)\n # if self.a[0][0]!=0:\n # res=self.back(0, 1)\n # else:\n # for i in range(1, 10):\n # self.a[0][0]=i\n # res=self.back(0, 1)\n # if res:\n # break\n if res:\n self.check_if_solvable()\n print(\"Sudoku Solved!\")\n print(self.a)\n return self.a\n else: print(\"Not Solvable\")\n return False", "def set_solver(self, solver):\n self.solver = solver\n solver.associate_with_model(self)\n self.set_free_variable(0) # A reasonable initial assumption; can be overridden by simulations" ]
[ "0.66913146", "0.62749034", "0.6065411", "0.60483533", "0.590894", "0.5823087", "0.57761306", "0.5760221", "0.5688091", "0.5680677", "0.56508654", "0.56463283", "0.56280655", "0.56020236", "0.55638695", "0.55544627", "0.55511516", "0.55404", "0.5522256", "0.54638934", "0.5443103", "0.5435673", "0.54294086", "0.54289424", "0.5401213", "0.5401213", "0.5401213", "0.5390414", "0.53599554", "0.5347863" ]
0.68487495
0
Zero step counter and current time
def zero_timings(self): self.step = 0 self.current_T = 0.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n self._timestep = np.array([0])", "def zero(self):\n real_dt = self.clock.tick(self.framerate)\n #self.notify(GameClock.REAL_TIME_ADVANCE, real_dt)", "def pre_step(self,status):\n self.t0 = time.time()\n pass", "def _reset(self) -> ts.TimeStep:", "def reset(self):\n self.cumtime = 0\n self.start_time = self.time()", "def reset(self) -> ts.TimeStep:\n self._current_time_step = self._reset()\n return self._current_time_step", "def test_counter_start_at_zero(self):\n pass", "def reset() -> None:\n global t0\n\n t0 = time.perf_counter()", "def reset(self, time):\n for key in self.data['step']:\n self.data['step'][key] = None\n\n self.time = time", "def reset_timer(self):\r\n self.time_minutes = 0\r\n self.time_seconds = 0", "def onTimeStepStart(self, timeStep):\n self.queuedInTimeStep = 0\n self.dequeuedInTimeStep = 0\n \n pass", "def time_step_output(self, current_time, time_step):\n pass", "def _reset(self):\n self.spot_supervisor.reset()\n return ts.TimeStep(ts.StepType.FIRST, np.float32(0.0), DISCOUNT,\n np.zeros(23, dtype=np.float32))", "def reset_counter(self) -> None:", "def reset(self):\n\t\tself._initial = None\n\t\tself._start = None\n\t\tself._time = 0\n\t\tself._total = 0\n\t\treturn self", "def t0(self):\n return self._time_axis.start", "def __init__(self) -> None:\n self.time_passed = -1.0", "def reset(self):\n\n self.elapsed_time = 0", "def __init__(self, step_time, step=None):\n self.step_vector = step\n self.step_time = step_time\n self.ref_timer = None", "def reset(self):\n self.start_times = {}\n self.stats = defaultdict(OnlineMeter) # float defaults to 0", "def reset(self):\n self._proportional = 0\n self._integral = 0\n self._derivative = 0\n\n self._last_time = self._current_time()\n self._last_output = None\n self._last_input = None", "def initialTime(self):\n return self.params['t0']", "def test_last_is_zero(self):\n countdown = [x for x in generators.countdown(10)]\n self.assertEqual(countdown[::-1][0], 0)", "def test_last_is_zero(self):\n countdown = [x for x in iterators.CountdownIterator(10)]\n self.assertEqual(countdown[::-1][0], 0)", "def reset_timer():\n resetTimer = time.time()\n target_time.clear()\n target_time.append(resetTimer)", "def reset(self):\n self._start_time = None\n self.time_left = None", "def zero(self):\n raise NotImplementedError(\"Not implemented yet.\")", "def Reset():\n #if timer.is_running():\n timer.stop()\n global n,message\n global total_stop\n global success_stop\n n = 0\n message = \"0:00.0\"\n total_stop=0\n success_stop=0", "def reset(self):\n self.steps = 0\n self.state = 0\n self.trajectory = []", "def reset(self):\n self.t = 0.0\n self.last_t = None\n self.current_y = np.copy(self.start_y)\n self.current_yd = np.copy(self.start_yd)" ]
[ "0.7400027", "0.7304187", "0.71039325", "0.6968158", "0.6873948", "0.66307056", "0.6536727", "0.65258205", "0.6445208", "0.6304525", "0.6256326", "0.6243078", "0.62241364", "0.62081534", "0.62007654", "0.618742", "0.61800855", "0.60946125", "0.6082498", "0.6034064", "0.592827", "0.5919725", "0.591376", "0.590946", "0.58297044", "0.58131164", "0.5812579", "0.58119625", "0.58075315", "0.58031803" ]
0.8421047
0
Use matplotlib to spy a matrix
def matrix_spy(self, mtrx): import matplotlib.pylab as pl pl.spy(mtrx,precision=0.01, markersize=1) pl.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_matrix_method(pulse, trap, ToP):\n n0, d = trap.matrix_method(pulse)\n for k in range(len(d)):\n ave_list = []\n timestep = np.arange(0, trap.N+1, 1)\n for i in range(len(d[k])):\n sum2 = 0\n for j in range(len(d[k][i])):\n sum2 += (j) * d[k][i][j]\n ave_list.append(sum2)\n if ToP == 'a':\n plt.plot(timestep * pulse.t * 1e3, ave_list, label = pulse.t)\n if ToP == 'b':\n plt.plot(timestep * pulse.t * 1e3, ave_list, color = 'black', label = 'Matrix')\n if ToP == 'c':\n plt.plot(timestep * pulse.t * 1e3, ave_list, color = 'b')\n # plt.legend()\n # plt.xlabel('time (ms)')\n # plt.ylabel('n')\n #plt.xlim(0, 10) ", "def test_make_sparam_plot_2plots(self):\n print(sys._getframe().f_code.co_name)\n try:\n s11_1 = np.array([np.arange(0,6),np.arange(0,6)])\n s22_1 = np.array([np.arange(0,6),np.arange(0,6)])\n s21_1 = np.array([np.arange(0,6),np.arange(0,6)])\n s12_1 = np.array([np.arange(0,6),np.arange(0,6)])\n s11_2 = np.array([np.arange(0,6),np.arange(0,6)])*2\n s22_2 = np.array([np.arange(0,6),np.arange(0,6)])*2\n s21_2 = np.array([np.arange(0,6),np.arange(0,6)])*2\n s12_2 = np.array([np.arange(0,6),np.arange(0,6)])*2\n freq = np.arange(0,6)\n s11 = [s11_1,s11_2]\n s22 = [s22_1,s22_2]\n s21 = [s21_1,s21_2]\n s12 = [s12_1,s12_2]\n pp.make_sparam_plot(freq,s11,s22,s21,s12)\n except Exception as e:\n raise\n plt.close('all')", "def plot_matrix(loc_list):\n x_list = [x[0] for x in loc_list]\n y_list = [y[1] for y in loc_list]\n\n # print(x_list, y_list)\n # plt.figure()\n\n plt.plot(x_list, y_list)", "def plot_mat(self, parameter='s', fig=None, ylim=1.1, label=None):\n if parameter not in ['s', 'y']:\n raise Exception('Invalid parameter.')\n matrix = getattr(self, parameter)\n if fig is None:\n fig = plt.figure(figsize=(15.0, 10.0))\n for i in range(2):\n for j in range(2):\n subplotnum = 2*i+j+1 # add_subplot needs the +1 as indexing starts with 1\n ax = fig.add_subplot(2,2,subplotnum)\n ax.plot(self.f/1e9, matrix[:,i,j].real, label=('Re '+label if label else None))\n ax.plot(self.f/1e9, matrix[:,i,j].imag, label=('Im '+label if label else None))\n ax.set_xlabel('f [GHz]')\n ax.set_ylabel(parameter.upper()+r'$_{%d%d}$'%(i+1,j+1))\n ax.set_ylim([-ylim,ylim]) \n ax.set_xlim([min(self.f/1e9), max(self.f/1e9)])\n fig.tight_layout() \n\n return fig", "def scatterplot_matrix():\r\n\r\n # load data\r\n iris_dataset = load_iris()\r\n data = iris_dataset\r\n setosa = data['data'][data['target'] == 0]\r\n versicolor = data['data'][data['target'] == 1]\r\n virginica = data['data'][data['target'] == 2]\r\n\r\n # set picture frame\r\n num = 4\r\n fig, axes = plt.subplots(nrows=num, ncols=num, figsize=(18, 18))\r\n fig.subplots_adjust(hspace=0.5, wspace=0.25)\r\n\r\n # set scatter plot\r\n for i in range(0, num):\r\n for j in range(0, num):\r\n if i == j:\r\n continue\r\n axes[j, i].plot(setosa[:, j], setosa[:, i], color='navy', marker='o', linestyle='none')\r\n axes[j, i].plot(versicolor[:, j], versicolor[:, i], color='purple', marker='*', linestyle='none')\r\n axes[j, i].plot(virginica[:, j], virginica[:, i], color='pink', marker='s', linestyle='none')\r\n\r\n # set histgram on the diagram\r\n for i in range(0, num):\r\n axes[i, i].hist(setosa[:, i], color='navy')\r\n axes[i, i].hist(versicolor[:, i], color='purple')\r\n axes[i, i].hist(virginica[:, i], color='pink')\r\n\r\n axes[0, 0].set_title('Sepal length')\r\n axes[1, 1].set_title('Sepal width')\r\n axes[2, 2].set_title('Petal length')\r\n axes[3, 3].set_title('Petal width')\r\n\r\n plt.legend(('Setosa', 'Virginica', 'Versicolor')) # add legend\r\n\r\n # add Main title\r\n fig.suptitle('Iris Plots, measurements in cm', size=20)\r\n plt.show()", "def drawSparseMatrix(ax, mat, **kwargs):\n row = kwargs.pop('rowOffset', 0)\n col = kwargs.pop('colOffset', 0)\n color = kwargs.pop('color', None)\n\n mat = pg.utils.sparseMatrix2coo(mat)\n mat.row += row\n mat.col += col\n gci = ax.spy(mat, color=color, **kwargs)\n\n ax.autoscale(enable=True, axis='both', tight=True)\n return gci", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def plot_main_matrix(coords):\r\n print('\\nGenerating Initial Matrix')\r\n\r\n xs = list()\r\n ys = list()\r\n\r\n for coord in coords:\r\n xs.append(coord[0])\r\n ys.append(coord[1])\r\n\r\n plt.plot(xs, ys, 'bd')\r\n plt.savefig('original_matrix.png')\r\n print('Saving file original_matrix.png')", "def plot(self):\n\t\tself.plotOfSpect()", "def plot_stability_matrix(self, file_name=None):\n size = len(self.seq) / 2.5\n plt.figure(figsize=(size, 2.5))\n plt.imshow(self.matrix,\n interpolation='none',\n cmap=plt.get_cmap('YlOrRd'))\n plt.yticks(range(4), ['A', 'C', 'G', 'U'], fontsize=12)\n plt.xticks(range(len(self.seq)), fontsize=12)\n if file_name is None:\n plt.show()\n else:\n plt.savefig(file_name,\n bbox_inches='tight',\n transparent=True,\n pad_inches=0)\n plt.close()", "def plot_matrix(self, matrix: np.ndarray):\n sns.heatmap(matrix, annot=True)\n plt.show()", "def cov_plot(self, matrix, station=\"\", hour = \"\", date=\"\" , averaged = \"\" ):\n var = self.var_dics[self.var]['name'] \n fig,ax = plt.subplots()\n date = self.date_prettyfier(date)\n hour = str(hour).replace('0','00:00').replace('1','12:00')\n if not averaged:\n title = \"Stat: \" + station + ', H: ' + hour + ', Date: ' + date + ', ' + var\n filename = 'Cov_' + station + '_hour_' + hour.replace(':','') + '_date_' + str(date).replace('/','') + '_' +var\n \n elif averaged :\n title = var.replace('temp','Temp.') + \" , Stat: \" + station + ', H: ' + str(hour) + ', Date: ' + str(date)\n filename ='Cov_' + station + '_hour_' + str(hour).replace(':','') + '_averaged_' + str(date).replace('/','') + '_' + var \n\n plt.title(title.replace('_', ' ' ), y=1.03, fontsize = self.font-2)\n\n num = len(matrix[0,:])\n Num = range(num)\n\n vmin, vmax = -3, 3\n if self.var == 'direction': \n vmin, vmax = -10, 10\n color_map= plt.imshow(matrix, interpolation= 'nearest', cmap = 'RdYlBu', vmin = vmin, vmax = vmax ) # nearest serves for discreete grid # cmaps blue, seismic \n plt.ylim(-0.5, 15.5)\n plt.xlim(-0.5, 15.5)\n plt.xticks(Num, Num)\n plt.xlabel('Pressure level an_dep [hPa]', fontsize = self.font-2)\n plt.yticks(Num, Num)\n plt.ylabel('Pressure level fg_dep [hPa]', fontsize = self.font-2)\n ax.set_xticklabels(labels = self.pretty_pressure, fontsize = self.font-4, rotation=45)\n ax.set_yticklabels(labels = self.pretty_pressure, fontsize = self.font-4)\n\n bar = plt.colorbar()\n bar.ax.set_ylabel(\"Covariance\", fontsize = self.font)\n \n for i in Num: # creating text labels\n for j in Num:\n value = '{0:.2f}'.format(matrix[i,j])\n text = ax.text( j,i, value , ha = 'center' , va = 'center', color = 'black', fontsize = 5)\n\n if not os.path.isdir('plots/covariances/'+station): os.mkdir('plots/covariances/'+station)\n plt.savefig('plots/covariances/' + station + '/' + filename + '.png', bbox_inches='tight', dpi = 200)\n plt.close()", "def drawRegularSurface(matrix, nx, ny, xinterp, yinterp):\n dislin.surmat(matrix, nx, ny, xinterp, yinterp)", "def plot_and_compare(xs,ys, m,b):\n\n plt.scatter(xs, ys, color = 'blue')\n\n plt.plot(xs, (xs * m) + b, color = 'red')", "def plot_real_matrix(M, name='', outpath=None, fig='auto', climv=None, cmap=\"coolwarm\", show=False, close=True,\n fontsize=None):\n import lepm.plotting.plotting as leplt\n return leplt.plot_real_matrix(M, name='', outpath=None, fig='auto', climv=None, cmap=\"coolwarm\", show=False, close=True,\n fontsize=None)", "def plotScatterMatrix(scatterMatrix, scatterTypeLabel=\"\", fName=None):\n from matplotlib import pyplot\n\n pyplot.imshow(scatterMatrix.todense(), interpolation=\"nearest\")\n pyplot.grid(color=\"0.70\")\n pyplot.xlabel(\"From group\")\n pyplot.ylabel(\"To group\")\n pyplot.title(\"{0} scattering XS\".format(scatterTypeLabel))\n pyplot.colorbar()\n if fName:\n pyplot.savefig(fName)\n else:\n pyplot.show()\n\n pyplot.close()", "def plot_interaction_map(model, name, matrix, output_name, first_variable, second_variable, x_coord, y_coord, output_path): \n import matplotlib\n import matplotlib.cm as cm\n import matplotlib.pyplot as plt\n\n font = {'size' : 14}\n\n matplotlib.rc('font', **font)\n fig = plt.figure(figsize=(5,5))\n ax = plt.subplot()\n\n maxValue = np.max(np.abs(matrix))\n img = ax.imshow((matrix), cmap = cm.bwr, origin='lower', vmin = -min(maxValue, 6), vmax = min(maxValue, 6), interpolation='spline16')\n\n first_variable = '{}'.format(first_variable)\n second_variable = '{}'.format(second_variable)\n ax.set_ylabel(r'$x_i$ = ' + first_variable)\n ax.set_xlabel(r'$y_i$ = ' + second_variable)\n ax.axes.set_xticks([0, 50, 99])\n ax.axes.set_yticks([0, 50, 99])\n xticks = np.linspace(np.array(model.feature_limits[first_variable]).min(), np.array(model.feature_limits[first_variable]).max(), 3)\n yticks = np.linspace(np.array(model.feature_limits[second_variable]).min(), np.array(model.feature_limits[second_variable]).max(), 3)\n ax.scatter([x_coord], [y_coord], marker='o', color='white', s = 250, edgecolors='black', linewidth=3)\n\n ax.set_yticklabels([xticks[tind] for tind in range(3)])\n ax.set_xticklabels([yticks[tind] for tind in range(3)])\n ax.axis([0, (100) - 1, 0, (100) - 1])\n\n # ax.scatter([x_coord_linear], [y_coord_linear], marker='o', color='blue', s = 250, edgecolors='black', linewidth=3)\n t = ax.set_title(r'$\\mathregular{\\frac{\\delta ^2 F(\\bar{x})}{\\delta x_i \\delta x_j}}$')\n # t = ax.set_title('{} and {} - '.format(first_variable, second_variable) + r'$\\mathregular{\\frac{\\delta ^2 F(\\bar{x})}{\\delta x_i \\delta x_j}}$')\n t.set_position([.5, 1.025])\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cb = plt.colorbar(img, cax=cax)\n cb.set_label(\"Nomralized mixed derivative\", rotation=90)\n plt.savefig('{}/{}_{}_{}_{}_nonlinear_map.pdf'.format(output_path, name, output_name, first_variable, second_variable), transparent=True, bbox_inches='tight', format='pdf', dpi=600)\n # plt.close('all')", "def test_lightcurve_seismology_plot():\n KeplerLightCurveFile(TABBY_Q8).PDCSAP_FLUX.periodogram().plot()", "def test_plot():\n fig = plt.figure()\n ax = fig.add_subplot(projection='ternary')\n tn0, tn1, tn2 = get_spiral()\n ax.plot(tn0, tn1, tn2)", "def visualize_svd():", "def plot(self):\n\t\tself.plotOfCos1().plot()", "def _plot_scatter_matrix(self, ax=None, alpha=0.5, figsize=None,\n diagonal='kde', pretty=True):\n response_variable = self._response_variable\n response_variable_transform, raw_response_variable = find_raw_variable(response_variable)\n explanatory_df = self._model_dataset\n # format response\n transform_func = TRANSFORM_FUNCTIONS[response_variable_transform]\n # transform response variable\n response_df = transform_func(explanatory_df[[raw_response_variable]])\n # and rename the columns to reflect the transformation\n response_df.rename(columns={raw_response_variable:response_variable}, inplace=True)\n exogenous_df = self._get_exogenous_matrix(explanatory_df)\n # join exogenous variables and response variable\n variable_df = response_df[[response_variable]].join(exogenous_df)\n # omit the constant column from the plot\n variable_df.drop(labels='const', axis=1, inplace=True)\n\n # make the scatter plot matrix\n sm = scatter_matrix(variable_df, alpha=alpha, figsize=figsize, ax=ax,\n diagonal=diagonal)\n if pretty:\n # rotate the labels\n [s.xaxis.label.set_rotation(45) for s in sm.reshape(-1)]\n [s.yaxis.label.set_rotation(45) for s in sm.reshape(-1)]\n # offset y label\n [s.get_yaxis().set_label_coords(-.5, 0.5) for s in sm.reshape(-1)]", "def test_make_sparam_plot_2plots_labels(self):\n print(sys._getframe().f_code.co_name)\n try:\n s11_1 = np.array([np.arange(0,6),np.arange(0,6)])\n s22_1 = np.array([np.arange(0,6),np.arange(0,6)])\n s21_1 = np.array([np.arange(0,6),np.arange(0,6)])\n s12_1 = np.array([np.arange(0,6),np.arange(0,6)])\n s11_2 = np.array([np.arange(0,6),np.arange(0,6)])*2\n s22_2 = np.array([np.arange(0,6),np.arange(0,6)])*2\n s21_2 = np.array([np.arange(0,6),np.arange(0,6)])*2\n s12_2 = np.array([np.arange(0,6),np.arange(0,6)])*2\n freq = np.arange(0,6)\n s11 = [s11_1,s11_2]\n s22 = [s22_1,s22_2]\n s21 = [s21_1,s21_2]\n s12 = [s12_1,s12_2]\n pp.make_sparam_plot(freq,s11,s22,s21,s12,label=['test1','test2'])\n except Exception as e:\n raise\n plt.close('all')", "def plot():\n pass", "def plot(self):\n\t\tself.plotOfXray().plot()", "def _prepare_plot_matrix(self):\n tx = self.i_border[0]+self.o_wh[1]*self.i_origin[0]/self.i_axis[0]*self._scaling[0]\n ty = -self.i_border[1]-(self.o_wh[1]-self.o_wh[1]*self.i_origin[1]/self.i_axis[1])*self._scaling[1]\n\n sx = (self.o_wh[0])*self._scaling[0]/self.i_axis[0]\n sy = (self.o_wh[1])*self._scaling[1]/self.i_axis[1]\n\n self._mat_plot = numpy.array([\n sx, 0, 0, 0,\n 0, sy, 0, 0,\n 0, 0, 1, 0,\n tx, ty, 0, 1\n ], dtype=numpy.float32)", "def plot_matrix(A, O, word_dict, normalize=True):\n Osize = O.shape\n Onew = O\n\n if normalize:\n Onew = np.zeros(Osize)\n Anew = np.zeros(A.shape)\n for row in range(Osize[0]):\n Onew[row, :] = O[row, :]/max(O[row, :])\n Anew[row, :] = A[row, :]/max(A[row, :])\n\n plt.imshow(Onew, aspect='auto', cmap='magma', interpolation='nearest')\n plt.colorbar(orientation='horizontal', aspect=100)\n plt.clim(vmin=0, vmax=1)\n plt.tight_layout()\n plt.show()\n\n fig, ax1 = plt.subplots(1, 1)\n ax1.imshow(Onew[:, :100], aspect='auto', cmap='magma', interpolation='nearest', vmin=0.0, vmax=1.0)\n ax1.set_xticks(range(100))\n ax1.set_xticklabels(word_dict[:100], rotation=90)\n plt.show() # display\n\n plt.matshow(A, aspect='auto', cmap='magma')\n plt.colorbar()\n plt.show()", "def exercise11():\n x = np.arange(0, 10, 0.01);\n y = np.sin(x)\n\n plt.figure()\n plt.xlabel(\"x values\")\n plt.ylabel(\"sin(x)\")\n plt.title(\"Sine Function for x from 0.0 to 10.0\")\n plt.plot(x, y)\n plt.show()", "def make_plot(x,y):", "def plot_matrix(matrix, yaxis=None, xaxis=None, **kwargs):\n\n # Make new matplotlib figure.\n fig = pyplot.figure()\n ax = fig.add_subplot(1, 1, 1)\n fig.subplots_adjust(top=0.85)\n cax = ax.matshow(matrix, interpolation=kwargs.get('interpolation', 'bilinear'))\n cb = fig.colorbar(cax)\n cb.set_label(kwargs.get('cblabel', ''))\n\n # Set figure and axis titles\n fig.suptitle(kwargs.get('title', ''))\n ax.set_title(kwargs.get('subtitle', ''), fontsize=8)\n ax.set_ylabel(kwargs.get('ylabel', ''), fontsize=10)\n ax.set_xlabel(kwargs.get('xlabel', ''), fontsize=10)\n\n # Set the ticks and tick labels. Reverse y axis to align x/y origin\n yaxis_locs = range(0, len(yaxis), int(len(yaxis) / 10))\n ax.yaxis.set_ticks_position('left')\n ax.yaxis.set_major_locator(mticker.FixedLocator(yaxis_locs))\n ax.yaxis.set_major_formatter(mticker.FixedFormatter(['%1.2f' % yaxis[x] for x in yaxis_locs]))\n ax.invert_yaxis()\n\n xaxis_locs = range(0, len(xaxis), int(len(xaxis) / 10))\n ax.xaxis.set_ticks_position('bottom')\n ax.xaxis.set_major_locator(mticker.FixedLocator(xaxis_locs))\n ax.xaxis.set_major_formatter(mticker.FixedFormatter(['%1.2f' % xaxis[x] for x in xaxis_locs]))\n ax.grid(None)\n\n return fig" ]
[ "0.6077472", "0.5964652", "0.58163995", "0.58161247", "0.5802232", "0.5707331", "0.5702342", "0.5663303", "0.56258637", "0.5592297", "0.5566225", "0.5565246", "0.55405426", "0.5539117", "0.5531702", "0.5489525", "0.5462974", "0.5456028", "0.5421599", "0.5407301", "0.53862625", "0.5341854", "0.5331975", "0.53278035", "0.5327715", "0.5323519", "0.5322625", "0.53183585", "0.5303969", "0.52991104" ]
0.81376123
0
Check stability of solution. Finds primary eigenvalue of system. Asserts if more than 1.
def check_k_matrix_stability(self): K = self.make_k_matrix() vals, vects = scipy_sparse_eigens(K) principal_val = vals.max() print("ht3_solver:\t'Stiffness' matrix principal eigenvalue was " + str(principal_val)) if principal_val > 1: print("##########################################################") print("ht3_solver:\tWARNING") print("ht3_solver:\tPrincipal eigenvalue is more than one.") print("ht3_solver:\tThe analysis will be unstable.") print("ht3_solver:\tIf this is OK, just go and modify the code " + "or something.") print("##########################################################") raise(AssertionError)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check(mat, otp):\n prd = mat*otp\n eigval = prd[0]/otp[0]\n print 'computed eigenvalue :' , eigval\n [eigs, vecs] = np.linalg.eig(mat)\n abseigs = list(abs(eigs))\n ind = abseigs.index(max(abseigs))\n print ' largest eigenvalue :', eigs[ind]", "def test_solvers():\n # With P1 elements we have an error E-15 with Krylov solver\n # tolerances of 1E-12, but with P2 elements the error is E-6.\n # P3 elements drive the tolerance down to E-3.\n # For higher mesh resolution we also need reduced tolerances.\n # The tol dict maps degree to expected tolerance for the coarse\n # meshes in the test.\n tol = {'direct': {1: 1E-11, 2: 1E-11, 3: 1E-11},\n 'Krylov': {1: 1E-14, 2: 1E-05, 3: 1E-03}}\n u_D = Expression('1 + x[0]*x[0] + 2*x[1]*x[1]')\n kappa = Expression('x[0] + x[1]')\n f = Expression('-8*x[0] - 10*x[1]')\n for Nx, Ny in [(3,3), (3,5), (5,3)]:\n for degree in 1, 2, 3:\n for linear_solver in 'direct', 'Krylov':\n for solver_func in solver, solver_objects:\n print('solving on 2(%dx%dx) mesh with P%d elements'\n % (Nx, Ny, degree)),\n print(' %s solver, %s function' %\n (linear_solver, solver_func.__name__))\n # Important: Krylov solver error must be smaller\n # than tol!\n u = solver_func(\n kappa, f, u_D, Nx, Ny, degree,\n linear_solver=linear_solver,\n abs_tol=0.1*tol[linear_solver][degree],\n rel_tol=0.1*tol[linear_solver][degree])\n # Make a finite element function of the exact u_D\n V = u.function_space()\n u_D_Function = interpolate(u_D, V) # exact solution\n # Check that dof arrays are equal\n u_D_array = u_D_Function.vector().array() # dof values\n max_error = (u_D_array - u.vector().array()).max()\n msg = 'max error: %g for 2(%dx%d) mesh, degree=%d,'\\\n ' %s solver, %s' % \\\n (max_error, Nx, Ny, degree, linear_solver,\n solver_func.__name__)\n print(msg)\n assert max_error < tol[linear_solver][degree], msg", "def run_exact(self):\n self.operator, var_form, opt = self.generate_VQE_args()\n\n exact_eigensolver = ExactEigensolver(self.operator, k=1)\n self.result = exact_eigensolver.run()\n\n solution = self.extract_solution(self.result, True)\n return solution", "def test_eigenvalues_of_too_few_points_results_in_0():\n a = np.array([5])\n pc = create_point_cloud(a, a, a)\n\n compute_features(pc, [[0]], pc, [\"eigenv_1\", \"eigenv_2\", \"eigenv_3\"], InfiniteCylinder(5))\n\n eigen_val_123 = np.array([pc[keys.point]['eigenv_{}'.format(i)]['data'] for i in [1, 2, 3]])\n assert not np.any(np.isnan(eigen_val_123))\n assert not np.any(np.isinf(eigen_val_123))", "def test_exact_numerical_solution():\n a = 0.2\n b = 3\n\n def f(t, u):\n return a # + (u - u_exact(t))**5\n\n def u_exact(t):\n \"\"\"Exact u(t) corresponding to f above.\"\"\"\n return a * t + b\n\n u0 = u_exact(0)\n T = 8\n N = 10\n tol = 1E-15\n #t_points = np.linspace(0, T, N)\n t_span = (0, T)\n for solver_class in registered_solver_classes:\n solver = solver_class(f)\n solver.set_initial_condition(u0)\n t, u = solver.solve(t_span, N)\n u_e = u_exact(t)\n max_error = (u_e - u).max()\n msg = f'{solver.__class__.__name__} failed with max_error={max_error}'\n assert max_error < tol, msg", "def test_failure_and_non_convergence(self):\n\n # Set up the problem of finding the square roots of three numbers.\n constants = np.array([4.0, 9.0, 16.0])\n # Choose a bad initial position.\n initial_values = np.zeros(len(constants))\n\n def objective_and_gradient(values):\n objective = values**2 - constants\n gradient = 2.0 * values\n return objective, gradient\n\n # Obtain and evaluate a tensor containing the roots.\n roots = newton_root_finder(objective_and_gradient, initial_values)\n _, converged, failed = self.evaluate(roots)\n\n # Reference values - we should not have converged and should have failed.\n converged_bench = np.array([False, False, False])\n failed_bench = np.array([True, True, True])\n\n # Assert that the values we obtained are close to the true values.\n np.testing.assert_array_equal(converged, converged_bench)\n np.testing.assert_array_equal(failed, failed_bench)", "def convergence_check(self):\n air = self.air_alias.val\n flue_gas = self.fuel_alias.val + '_fg'\n fuel = self.fuel_alias.val\n\n for c in self.outl:\n if not c.fluid.val_set[air]:\n if c.fluid.val[air] > 0.95:\n c.fluid.val[air] = 0.95\n if c.fluid.val[air] < 0.5:\n c.fluid.val[air] = 0.5\n\n if not c.fluid.val_set[flue_gas]:\n if c.fluid.val[flue_gas] > 0.5:\n c.fluid.val[flue_gas] = 0.5\n if c.fluid.val[flue_gas] < 0.05:\n c.fluid.val[flue_gas] = 0.05\n\n if not c.fluid.val_set[fuel]:\n if c.fluid.val[fuel] > 0:\n c.fluid.val[fuel] = 0\n\n c.target.propagate_fluid_to_target(c, c.target)\n\n for i in self.inl:\n if i.m.val_SI < 0 and not i.m.val_set:\n i.m.val_SI = 0.01\n\n for c in self.outl:\n if c.m.val_SI < 0 and not c.m.val_set:\n c.m.val_SI = 10\n c.target.propagate_fluid_to_target(c, c.target)\n\n if self.lamb.val < 1 and not self.lamb.is_set:\n self.lamb.val = 2", "def _check_init_solution(self):\r\n B = np.array([self._A[:, j] for j in self._basic_vars]).transpose()\r\n self._B_inv = np.linalg.inv(B)\r\n x_B = self._B_inv @ self._b\r\n for x in x_B:\r\n if x < 0:\r\n raise AssertionError(\"Initial solution is not feasible!\")", "def test_compare_solver():\n # initialize all solver\n solver_1 = EulerSolver()\n solver_2 = ScipyOdeSolver('dopri5')\n solver_3 = ScipySolveIvpSolver(method='Radau')\n solver_4 = ScipyOdeIntSolver()\n solver = [solver_1, solver_2, solver_3, solver_4]\n # define the integration steps\n time_step = [g_initial_time + step for step in g_time_steps]\n u = 0.0 # initial input\n # set the system equation and jacobian\n system_equation = lambda t, state, u: system(t, state, u)\n system_jacobian = lambda t, state, u: jacobian(t, state, u)\n [solve.set_system_equation(system_equation, system_jacobian) for solve in solver]\n # set initial values of the solver\n [solve.set_initial_value(g_initial_value, g_initial_time) for solve in solver]\n # integrate for given steps\n for steps in time_step:\n u += 0.8 # increase the input in each step\n [solve.set_f_params(u) for solve in solver]\n [solve.integrate(steps) for solve in solver]\n # test if all solver integrated for the same time\n assert solver_1.t == solver_2.t == solver_3.t == solver_4.t\n # test if the relative difference between the states is small\n abs_values = [sum(abs(solver_1.y)), sum(abs(solver_2.y)), sum(abs(solver_3.y)), sum(abs(solver_4.y))]\n assert max(abs_values) / min(abs_values) - 1 < 1E-4, \"Time step at the error: \" + str(steps)", "def FindEigenstates(**args):\n\tprop = SetupProblem(**args)\n\n\t#use custom initial residual if provided\n\tinitialResidual = args.get(\"initialResidual\")\n\tif initialResidual != None:\n\t\tprop.psi.GetData()[:] = initialResidual.GetData()\n\n\t#find eigenstates\n\t#solver = pyprop.ArpackSolver(prop)\n\tsolver = pyprop.PiramSolver(prop)\n\tsolver.Solve()\n\treturn solver", "def mbed_solve (A, budgets, S, verbose=True):\n # print(S)\n start_time = time.time()\n x_v, C = initialize(A, S)\n if (verbose):\n print(\"Initialized\")\n print(\"V1: \", np.sum(x_v == 1), \" ,V2: \", np.sum(x_v == -1))\n results_info, S_new, Ad, edges_removed = random_choose_candidate_solve (x_v, C, A, S, budgets, start_time, verbose=verbose)\n return results_info, S_new, Ad, edges_removed", "def stability(jacobian):\n \n eigv = np.linalg.eigvals(jacobian)\n \n \n if all(np.real(eigv)==0) and all(np.imag(eigv)!=0):\n nature = \"Center\" \n elif np.real(eigv)[0]*np.real(eigv)[1]<0:\n nature = \"Saddle\"\n else: \n stability = 'Unstable' if all(np.real(eigv)>0) else 'Stable'\n nature = stability + (' focus' if all(np.imag(eigv)!=0) else ' node')\n return nature", "def regular(P):\n try:\n cols = P.shape[0]\n ans = np.ones((1, cols))\n # eq = np.matmul(ans, P)\n # s = np.array(np.arange(1, cols + 1))\n eq = np.vstack([P.T - np.identity(cols), ans])\n # va, vec = np.linalg .eig(P)\n results = np.zeros((cols, 1))\n results = np.vstack([results, np.array([1])])\n statetionary = np.linalg.solve(eq.T.dot(eq), eq.T.dot(results)).T\n # print(statetionary)\n # print(np.argwhere(statetionary < 0))\n if len(np.argwhere(statetionary < 0)) > 0:\n return None\n return statetionary\n except Exception as e:\n return None", "def test_get_solution(self):\n pass", "def CheckCouplings(params, verbose=False):\n l1 = params[0]\n l2 = params[1]\n l3 = params[2]\n gx = params[3]\n m = model_2f(l1, l2, l3, y_t_interpol(np.log(v/mz)), gx)\n minima, success = m.findMinimum() #the boolean success is added because we cannot trust the minima if numpy.optimize.minimize has failed\n if not verbose:\n tolvevh = 2.0\n tolmh = 2.0\n condition0 = abs(minima-v) < tolvevh\n if condition0.any() and success:\n ddVtot = nd.Hessian(m.Vtot_0T)\n hess = ddVtot(minima)\n masses = np.linalg.eigvalsh(hess) #computes masses...\n positive_condition = masses > 0\n if(positive_condition.all()): #we will only check them IF they are positive\n masses = np.sqrt(np.abs(masses))\n condition1 = abs(masses-mh) < tolmh\n if condition1.any():\n stability = m.CheckStability() #we check the stability of the model\n f = open(file_name, 'a')\n line0 = str(l1)+' '+str(l2)+' '+str(l3)+' '+str(gx)+' '+str(minima[0])+' '+str(minima[1])+' '+str(masses[0])+' '+str(masses[1]) #we print everything\n line0 = line0 + ' '+str(stability)\n f.write(line0+'\\n')\n f.write('-'*90+'\\n')\n f.close()\n else:\n \"\"\"\n Just checks the minima of the model m, the masses of the particles and whether it is stable or not\n Output: prints the information\n \"\"\"\n print \"Minimum at T = 0.0: \", minima, success\n print \"Masses: \"\n ddVtot = nd.Hessian(m.Vtot_0T)\n hess = ddVtot(minima)\n print np.sqrt(np.linalg.eigvalsh(hess))\n print 'Stable: ', m.CheckStability()==1", "def test_solver():\n # Choice of nonlinear coefficient\n m = 2\n\n def q(u):\n return (1+u)**m\n\n def Dq(u):\n return m*(1+u)**(m-1)\n\n u_exact = Expression(\n 'pow((pow(2, m+1)-1)*x[0] + 1, 1.0/(m+1)) - 1', m=m)\n linear_solver = 'direct'\n errors = []\n for method in 'alg_Newton', 'pde_Newton':\n for J_comp in 'manual', 'automatic':\n for degree in 1, 2, 3:\n error_prev = -1\n for divisions in [(10, 10), (20, 20), (40, 40)]:\n u = solver(\n q, Dq, f, divisions, degree,\n method, J_comp,\n linear_solver,\n abs_tol_Krylov=1E-10,\n rel_tol_Krylov=1E-10,\n abs_tol_Newton=1E-10,\n rel_tol_Newton=1E-10)\n\n # Find max error\n u_e = interpolate(u_exact, u.function_space())\n import numpy as np\n error = np.abs(u_e.vector().array() -\n u.vector().array()).max()\n # Expect convergence as h**(degree+1)\n if error_prev > 0:\n frac = abs(error - error_prev/2**(degree+1))\n errors.append(frac)\n error_prev = error\n tol = 4E-5\n for error_reduction in errors:\n assert error_reduction < tol, error_reduction", "def converged(M,L,S, verbose = True, tol=10e-6):\n error = frobeniusNorm(M - L - S) / frobeniusNorm(M)\n if verbose:\n print (\"error =\", error)\n return error <= tol", "def test1(debug_solve=False):\n from numpy import sqrt\n for x0 in [1., 2., 100.]:\n print \" \" # blank line\n x,iters = solve(fvals_sqrt, x0, debug=debug_solve)\n print \"solve returns x = %22.15e after %i iterations \" % (x,iters)\n fx,fpx = fvals_sqrt(x)\n print \"the value of f(x) is %22.15e\" % fx\n assert abs(x-2.) < 1e-14, \"*** Unexpected result: x = %22.15e\" % x", "def _check_stability(model, K):\n\n # Initialize the `unstable` flag to `False`\n unstable = False\n\n # Step through each diagonal term in the stiffness matrix\n for i in range(K.shape[0]):\n \n # Determine which node this term belongs to\n node = [node for node in model.Nodes.values() if node.ID == int(i/6)][0]\n\n # Determine which degree of freedom this term belongs to\n dof = i%6\n\n # Check to see if this degree of freedom is supported\n if dof == 0:\n supported = node.support_DX\n elif dof == 1:\n supported = node.support_DY\n elif dof == 2:\n supported = node.support_DZ\n elif dof == 3:\n supported = node.support_RX\n elif dof == 4:\n supported = node.support_RY\n elif dof == 5:\n supported = node.support_RZ\n\n # Check if the degree of freedom on this diagonal is unstable\n if isclose(K[i, i], 0) and not supported:\n\n # Flag the model as unstable\n unstable = True\n\n # Identify which direction this instability effects\n if i%6 == 0: direction = 'for translation in the global X direction.'\n if i%6 == 1: direction = 'for translation in the global Y direction.'\n if i%6 == 2: direction = 'for translation in the global Z direction.'\n if i%6 == 3: direction = 'for rotation about the global X axis.'\n if i%6 == 4: direction = 'for rotation about the global Y axis.'\n if i%6 == 5: direction = 'for rotation about the global Z axis.'\n\n # Print a message to the console\n print('* Nodal instability detected: node ' + node.name + ' is unstable ' + direction)\n\n if unstable:\n raise Exception('Unstable node(s). See console output for details.')\n\n return", "def test_j0_root():\n import time\n t1 = time.time()\n\n # Our version uses tabulated values up to 40, so a useful test of the extrapolation\n # requires this to have more than 40 items.\n vals1 = [ galsim.bessel.j0_root(s) for s in range(1,51) ]\n print 'vals1 = ',vals1\n\n try:\n import scipy.special\n vals2 = scipy.special.jn_zeros(0,50)\n print 'vals2 = ',vals2\n np.testing.assert_almost_equal(\n vals1, vals2, 8, \"bessel.j0_root disagrees with scipy.special.jn_zeros\")\n except ImportError:\n print 'Unable to import scipy. Skipping scipy tests of j0_root.'\n\n # These values are what scipy returns. Check against these, so not require scipy.\n vals2 = [ 2.40482556, 5.52007811, 8.65372791, 11.79153444,\n 14.93091771, 18.07106397, 21.21163663, 24.35247153,\n 27.49347913, 30.63460647, 33.77582021, 36.91709835,\n 40.05842576, 43.19979171, 46.34118837, 49.4826099 ,\n 52.62405184, 55.76551076, 58.90698393, 62.04846919,\n 65.1899648 , 68.33146933, 71.4729816 , 74.61450064,\n 77.75602563, 80.89755587, 84.03909078, 87.18062984,\n 90.32217264, 93.46371878, 96.60526795, 99.74681986,\n 102.88837425, 106.02993092, 109.17148965, 112.31305028,\n 115.45461265, 118.59617663, 121.73774209, 124.87930891,\n 128.02087701, 131.16244628, 134.30401664, 137.44558802,\n 140.58716035, 143.72873357, 146.87030763, 150.01188246,\n 153.15345802, 156.29503427\n ]\n np.testing.assert_almost_equal(\n vals1, vals2, 8, \"bessel.j0_root disagrees with reference values\")\n\n t2 = time.time()\n print 'time for %s = %.2f'%(funcname(),t2-t1)", "def test_uniformly_distributed_load_with_analytical_solution(engine, n_attempts):\n file_name = 'uniform_load_analytical_model.json'\n load_file_name = 'uniform_load_analytical_model_loads.json'\n\n root_dir = os.path.dirname(os.path.abspath(__file__))\n json_path = os.path.join(root_dir, '..', 'test_data', file_name)\n load_json_path = os.path.join(root_dir, '..', 'test_data', load_file_name)\n\n sc = StiffnessChecker.from_json(json_file_path=json_path, checker_engine=engine)\n lc = LoadCase.from_json(load_json_path)\n point_load, uniform_element_load = lc.point_loads, lc.uniform_element_loads\n sc.set_loads(LoadCase(point_loads=point_load, uniform_element_loads=uniform_element_load, gravity_load=None))\n sc.set_nodal_displacement_tol(trans_tol=0.024, rot_tol=0.006)\n\n print('element tag:', sc._sc_ins.get_elem_ind_from_elem_tag())\n print(sc.get_element_material(0))\n print(sc.get_element_material(1))\n print(sc.get_element_crosssec(0))\n print(sc.get_element_crosssec(1))\n\n def compare_analytical_sol(pass_criteria, nD, fR, eR, nodal_loads, check_decimal=1):\n # vertical force equilibrium\n assert_almost_equal(fR[0][2] + fR[2][2], 5 + 3 * 5)\n\n assert_equal(nodal_loads[1][2], -12.5)\n assert_equal(nodal_loads[1][3], 0.0)\n assert_equal(nodal_loads[1][4], -6.250)\n\n assert pass_criteria\n assert_equal(nD[0], [0] * 6)\n assert_almost_equal(nD[1], [0, 0, -0.02237, 4.195*1e-3, 5.931*1e-3, 0], decimal=check_decimal)\n assert_equal(nD[2], [0] * 6)\n assert_almost_equal(fR[0], [0, 0, 14.74, -6.45*1e-3, -36.21, 0], decimal=check_decimal)\n assert_almost_equal(fR[2], [0, 0, 5.25, -41.94, -17.11*1e-3, 0], decimal=check_decimal)\n\n cprint('compare analytical res: w/o reinit', 'yellow')\n for _ in range(n_attempts):\n sc.solve()\n pass_criteria, nD, fR, eR = sc.get_solved_results()\n nodal_loads = sc.get_nodal_loads() \n compare_analytical_sol(pass_criteria, nD, fR, eR, nodal_loads)\n\n cprint('compare analytical res: w reinit', 'yellow')\n for _ in range(n_attempts):\n re_sc = StiffnessChecker.from_json(json_file_path=json_path, checker_engine=engine)\n re_sc.set_loads(LoadCase(point_loads=point_load, uniform_element_loads=uniform_element_load, gravity_load=None))\n re_sc.set_nodal_displacement_tol(trans_tol=0.024, rot_tol=0.006)\n\n re_sc.solve()\n pass_criteria, nD, fR, eR = re_sc.get_solved_results()\n nodal_loads = re_sc.get_nodal_loads() \n compare_analytical_sol(pass_criteria, nD, fR, eR, nodal_loads)", "def FindBoundstatesExample():\n\t#Load config \n\tconf = pyprop.Load(\"eigenvalues.ini\")\n\n\t#Compute eigenvalues\n\tsolver, siSOlver, eigenVals = FindEigenvaluesInverseIterationsPiram(conf)\n\n\t#Print eigenvalues\n\tpyprop.PrintOut(\"\\nFound eigenvalues:\")\n\tpyprop.PrintOut(\" \".join([\"%f\" % E for E in eigenVals.real]))", "def test_analytic_simplifed(self, tolerance=0.001):\n\n # solve for the different cases\n nv1, nz1 = self.simplified_chelyabinsk(solver='ode', rtol=1e-5)\n av = self.analytic_chelyabinsk(nz1, [19.2e3, 95e3])\n\n diff = abs(nv1 - av)\n assert np.all(diff < tolerance * av), \\\n 'Odeint does not agree with the analytic case'", "def has_solution(self) -> bool:\n pass", "def test_check_validity(game):\n\n game.solve()\n assert game.check_validity()", "def test_secant_system(testFunctions, tol, printFlag): \n pass", "def test_solve_ex_2_11(self):\n\n def f_a(x):\n return x - x ** 3 - 4 * x ** 2 + 10\n\n def f_b(x):\n inner = 10 / x - 4 * x\n logger.info(\"Performing sqrt({})\".format(inner))\n return math.sqrt(inner)\n\n logger.info('-' * 40)\n # f_a(x) cannot be used to solve x^3 + 4x^2 - 10 = 0 as it diverges and oscillates.\n iterate.solve(f_a, estimate=1.5, iterations=5, logger=logger)\n logger.info('-' * 40)\n\n with self.assertRaises(ValueError):\n # f_b(x) cannot be used to solve x^3 + 4x^2 - 10 = 0 as the 3rd iteration attempts to root a -ve number.\n iterate.solve(f_b, estimate=1.5, iterations=5, logger=logger)\n logger.info('-' * 40)", "def test_solve_quadratic(self):\n iden1 = Identity()\n iden2 = Identity()\n iden3 = Identity()\n iden1.x.fixed = False\n iden2.x.fixed = False\n iden3.x.fixed = False\n term1 = LeastSquaresTerm(iden1.target, 1, 1)\n term2 = LeastSquaresTerm(iden2.target, 2, 2)\n term3 = LeastSquaresTerm(iden3.target, 3, 3)\n prob = LeastSquaresProblem([term1, term2, term3])\n prob.solve()\n self.assertAlmostEqual(prob.objective, 0)\n self.assertAlmostEqual(iden1.x.val, 1)\n self.assertAlmostEqual(iden2.x.val, 2)\n self.assertAlmostEqual(iden3.x.val, 3)", "def test_schwefel(self):\n fun = get_problem('schwefel', self.dimension)\n self.assertAlmostEqual(fun(self.array3), 0.0, places=3)", "def one_step(self):\r\n assert (self.uv_vol is not None)\r\n assert (self.guv_vol is not None)\r\n assert (self.uv_bound is not None)\r\n assert (self.vf_vect_bound is not None)\r\n assert (self.vF_vect_vol is not None)\r\n # Shape checks\r\n assert (self.vF_vect_vol.size == self.vF_vect_vol.shape[0])\r\n assert (self.vf_vect_bound.size == self.vf_vect_bound.shape[0])\r\n assert (self.vF_vect_vol.shape == self.vf_vect_bound.shape)\r\n assert (self.uv_vol.shape[0] == self.uv_vol.shape[1])\r\n assert (self.uv_vol.shape == self.guv_vol.shape)\r\n assert (self.uv_vol.shape == self.uv_bound.shape)\r\n assert (self.uv_vol.shape[0] == self.vF_vect_vol.shape[0])\r\n \r\n if self.step == 0:\r\n self.check_k_matrix_stability()\r\n # print(\"Epsilon is :\"+str(self.Epsilon))\r\n # print(\"Beta is :\"+str(self.Beta))\r\n\r\n # Form \"Stiffness\" matrix:\r\n K = self.make_k_matrix()\r\n # Form \"Force\" vector: \r\n f = self.vF_vect_vol + (self.Epsilon / self.Beta) * self.vf_vect_bound\r\n\r\n # print(\"FORCE VECTOR:\")\r\n # print(f)\r\n # print(\"STIFFNESS MATRIX\")\r\n # print(K)\r\n # print(\"UV_VOL\")\r\n # print(self.uv_vol)\r\n # print(\"EPSILON * GUV_VOL\")\r\n # print(self.Epsilon * self.guv_vol)\r\n # print(\"UV_BOUND * COEFF\")\r\n # print((self.Epsilon / self.Beta) * self.uv_bound)\r\n sol = scipy_sparse_linsolve(K, f)\r\n # print(\"SOLUTION\")\r\n # print(sol)\r\n return sol" ]
[ "0.64764154", "0.64108884", "0.62699133", "0.62134963", "0.6154984", "0.61546236", "0.61504465", "0.61401653", "0.6127186", "0.6122965", "0.6071393", "0.6069818", "0.601245", "0.5990802", "0.59771967", "0.5934318", "0.59312296", "0.5915107", "0.59050804", "0.58882725", "0.5884579", "0.5865576", "0.58459866", "0.5842296", "0.5804693", "0.5783414", "0.57816154", "0.5773971", "0.57715523", "0.5770476" ]
0.68899626
0
Calculate L1, L2 and Linf norms and print to file. File is given by self.norm_path If an expected solution is given, expected L1, L2 and abs erros will also be computed. Expected solution is f(x, t) where x is global coordinate and t is time.
def norm_reporting(self): if self.norm_saving_rule is not None: norm_rule = self.norm_saving_rule(self.step, self.d_T) else: norm_rule = True if self.norm_path is not None and norm_rule: f = open(self.norm_path, 'a', newline="") csvf = csv.writer(f) if self.step == 0: out_row = ["Step", "Time (s)", "Matrix condition", "L1 u", "L2 u", "Linf u"] if self.expected_solution is not None: out_row.append("L1 Expected") out_row.append("L2 Expected") out_row.append("L1 Error") out_row.append("L2 Error") out_row.append("L1 Abs Error") out_row.append("L2 Abs Error") csvf.writerow(out_row) condition_number = np.linalg.cond((self.uv_vol + self.Epsilon * self.guv_vol + \ (self.Epsilon / self.Beta) * self.uv_bound).todense()) out_row = [self.step, self.current_T, condition_number] # Calculate the l2 norm or l2 error norm: def current_u(elem, eta): T = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0] return T current_u2 = lambda elem, eta: current_u(elem, eta) ** 2 cu_i = 0 cu2_i = 0 cuinf = 0 if self.expected_solution is not None: def expct(elem, eta): glob_x = elem.local_to_global(eta) true_sol = self.expected_solution(glob_x, self.current_T) return true_sol # A bunch of expressons that we can integrate over. expct2 = lambda elem, eta: expct(elem, eta) ** 2 l1_err = lambda elem, eta: current_u(elem, eta) \ - expct(elem, eta) l2_err = lambda elem, eta: l1_err(elem, eta) ** 2 # Initialise variables for reduction to zero. expct_i = 0 expct2_i = 0 l1_err_i = 0 l2_err_i = 0 l1_abs_i = 0 l2_abs_i = 0 for elem in self.mesh.elems.values(): cu_i += et.integrate_elem(elem, current_u) cu2_i += et.integrate_elem(elem, current_u2) for loc in elem.node_locals(): tmp_u = current_u(elem, loc) if tmp_u > cuinf: cuinf = tmp_u cu2_i = np.sqrt(cu2_i) out_row.append(cu_i) out_row.append(cu2_i) out_row.append(cuinf) if self.expected_solution is not None: for elem in self.mesh.elems.values(): expct_i += et.integrate_elem(elem, expct, gauss_mult=2) expct2_i += et.integrate_elem(elem, expct2, gauss_mult=2) l1_err_i += et.integrate_elem(elem, l1_err, gauss_mult=2) l2_err_i += et.integrate_elem(elem, l2_err, gauss_mult=2) expct2_i = np.sqrt(expct2_i) l2_err_i = np.sqrt(l2_err_i) l1_abs_i = abs(l1_err_i) / abs(expct_i) l2_abs_i = abs(l2_err_i) / abs(expct2_i) out_row.append(expct_i) out_row.append(expct2_i) out_row.append(l1_err_i) out_row.append(l2_err_i) out_row.append(l1_abs_i) out_row.append(l2_abs_i) csvf.writerow(out_row) print("Norm reporting: Wrote norms to " + self.norm_path + ".") f.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_norm(self):\n # loop through all the filename\n for f5 in self.train_database:\n\n # get the precalculated data\n fdata = os.path.splitext(f5)[0] + '_norm.pckl'\n\n # if the file doesn't exist we create it\n if not os.path.isfile(fdata):\n logger.info(f\" Computing norm for {f5}\")\n norm = NormalizeData(f5, shape=self._grid_shape)\n norm.get()\n\n # read the data\n data = pickle.load(open(fdata, 'rb'))\n\n # handle the features\n for feat_type, feat_names in self.select_feature.items():\n for name in feat_names:\n mean = data['features'][feat_type][name].mean\n var = data['features'][feat_type][name].var\n if var == 0:\n logger.info(\n ' : STD is null for %s in %s' % (name, f5))\n self.param_norm['features'][feat_type][name].add(\n mean, var)\n\n # handle the target\n minv = data['targets'][self.select_target].min\n maxv = data['targets'][self.select_target].max\n self.param_norm['targets'][self.select_target].update(\n minv)\n self.param_norm['targets'][self.select_target].update(\n maxv)\n\n # process the std\n nfile = len(self.train_database)\n for feat_types, feat_dict in self.param_norm['features'].items():\n for feat in feat_dict:\n self.param_norm['features'][feat_types][feat].process(\n nfile)\n if self.param_norm['features'][feat_types][feat].std == 0:\n logger.info(\n ' Final STD Null for %s/%s. Changed it to 1' %\n (feat_types, feat))\n self.param_norm['features'][feat_types][feat].std = 1", "def flusi_error_vs_flusi(fname_flusi1, fname_flusi2, norm=2, dim=2):\n import numpy as np\n import insect_tools\n\n # read in flusi's reference solution\n time_ref, box_ref, origin_ref, data_ref = insect_tools.read_flusi_HDF5( fname_flusi1 )\n\n time, box, origin, data_dense = insect_tools.read_flusi_HDF5( fname_flusi2 )\n\n if len(data_ref) is not len(data_dense):\n raise ValueError(\"ERROR! Both fields are not a the same resolutionn\")\n\n err = np.ndarray.flatten(data_dense-data_ref)\n exc = np.ndarray.flatten(data_ref)\n\n err = np.linalg.norm(err, ord=norm) / np.linalg.norm(exc, ord=norm)\n\n print( \"error was e=%e\" % (err) )\n\n return err", "def compute_norms(err, vector_norms=[\"l2\", \"linf\"],\n function_norms=[\"L2\", \"H1\"], show=True,\n tablefmt=\"simple\", save=False):\n info_split(\"Vector norms:\", \", \".join(vector_norms))\n info_split(\"Function norms:\", \", \".join(function_norms))\n\n headers = [\"Fields\"] + vector_norms + function_norms\n\n table = []\n for field in err.keys():\n row = [field]\n for norm_type in vector_norms:\n row.append(df.norm(err[field].vector(), norm_type=norm_type))\n for norm_type in function_norms:\n row.append(df.norm(err[field], norm_type=norm_type))\n table.append(row)\n\n from tabulate import tabulate\n tab_string = tabulate(table, headers, tablefmt=tablefmt, floatfmt=\"e\")\n if show:\n info(\"\\n\" + tab_string + \"\\n\")\n\n if save and rank == 0:\n info_split(\"Saving to file:\", save)\n with open(save, \"w\") as outfile:\n outfile.write(tab_string)", "def generate_test_norms(testname):\n logname = testname + \".log\"\n norm_name = testname + \".norm\"\n cmdline = \"\"\"awk '/Mean System Norm:/ { print $4, $5, $6; }' %s > %s \"\"\"%(\n logname, norm_name)\n os.system(cmdline)\n args = parse_arguments()\n if (args.save_norm_file != None):\n copyfile(norm_name, args.save_norm_file)\n\n return load_norm_file(norm_name)", "def full_solver(output_folder, prior_filename, data_filename, resume=True, test_plot=False):\n\n\n def log_prior(cube, ndim, nparams):\n cube[0] = cube[0]*(L_lim[1] - L_lim[0]) + L_lim[0]\n cube[1] = cube[1]*(d_lim[1] - d_lim[0]) + d_lim[0]\n cube[2] = cube[2]*(F_lim[1] - F_lim[0]) + F_lim[0]\n cube[3] = cube[3]*(A_lim[1] - A_lim[0]) + A_lim[0]\n cube[4] = cube[4]*(Arel_lim[1] - Arel_lim[0]) + Arel_lim[0]\n cube[5] = cube[5]*(Ti_lim[1] - Ti_lim[0]) + Ti_lim[0]\n #cube[6] = cube[6]*(off_lim[1] - off_lim[0]) + off_lim[0]\n cube[6] = cube[6]*(Brel_lim[1] - Brel_lim[0]) + Brel_lim[0]\n #cube[7] = cube[7]*(Brel_lim[1] - Brel_lim[0]) + Brel_lim[0]\n\n\n def log_likelihood(cube, ndim, nparams):\n #vals = forward_model(r, cube[0], cube[1], cube[2], w0, mu, [cube[3]*cube[4], cube[3]], [Ti_Th, cube[5]], [0.0, 0.0], nlambda=2000)\n #vals = forward_model(r, cube[0], cube[1], cube[2], w0, mu, [cube[3]*cube[4], cube[3], cube[3]*cube[7]], [Ti_Th, cube[5], Ti_Th],\n # [0.0, 0.0, 0.0], nlambda=2000)\n vals = forward_model(r, cube[0], cube[1], cube[2], w0, mu, [cube[3]*cube[4], cube[3], cube[3]*cube[6]], [Ti_Th, cube[5], Ti_Th],\n [0.0, 0.0, 0.0], nlambda=2000)\n #vals = offset_forward_model(r, cube[0], cube[1], cube[2], w0, mu, [cube[3]*cube[4], cube[3]], [Ti_Th, cube[5]],\n # [0.0, 0.0], sm_ang=False, nlambda=2000, coeff=0.5)\n #vals += cube[6]\n chisq = np.sum((vals - sig)**2 / error**2)\n return -chisq / 2.0\n\n data = io.h5_2_dict(data_filename)\n\n ix = data['fit_ix']['0']#[0:-1:2]\n r = data['r'][ix]\n sig = data['sig'][ix]\n error = data['sig_sd'][ix]\n\n Ti_Th = 0.025*1000.0 / 300.0\n\n px_size = 0.004# * 3 \n L_lim = [147.0, 153.0]\n L_lim = [x / px_size for x in L_lim]\n\n d_lim = [0.7, 1.0]\n\n F_lim = [17.0, 26.0]\n\n Amax = np.max(sig)\n A_lim = [0.75*Amax, 2.0*Amax]\n\n Arel_lim = [0.005, 0.6]\n Brel_lim = [0.001, 0.2]\n\n Ti_lim = [0.025, 1.0]\n #min_val = np.abs(np.min(data['sig'][ix]))\n min_val = 50.0\n off_lim = [0.0, min_val]\n #n_params = 6\n n_params = 7\n #n_params = 8\n folder = abspath(output_folder)\n\n if test_plot:\n pass\n # npts = 100\n # test_sig = np.zeros((npts, len(r)))\n # for i in xrange(npts):\n # cube = [random.random() for _ in xrange(n_params)] \n # log_prior(cube, None, None)\n # test_sig[i, :] = forward_model(r, cube[0], cube[1], cube[2], w0, mu, [cube[3]*cube[4], cube[3]],\n # [Ti_Th, cube[5]], [0.0, 0.0], nlambda=2000)\n\n # fig, ax = plt.subplots()\n # for i in xrange(npts):\n # ax.plot(r, test_sig[i, :], 'C0')\n # ax.errorbar(r, sig, yerr=error, fmt='', ecolor='C2', color='C1')\n # plt.show()\n\n else:\n pymultinest.run(log_likelihood, log_prior, n_params, importance_nested_sampling=False,\n resume=resume, verbose=True, sampling_efficiency='model', n_live_points=1000,\n outputfiles_basename=join(folder, 'full_'))", "def test_l1norm () :\n n = 10\n rfs = RewardFnSpace(list(range(n)))\n for i in range(10): \n b = rfs.bs[i]\n rfs.lp += b == 0\n rfs.lp.solve()\n rfs._setCoeffs()\n coeffs = np.array(rfs.coeffs)\n assert(np.linalg.norm(coeffs - np.ones(n)) < 1e-4)", "def write_lammps_potential_file(self):\n raise NotImplementedError", "def final_fmllr_est_func(\n log_path: str,\n dictionaries: List[str],\n feature_strings: Dict[str, str],\n model_path: str,\n fmllr_options: ConfigDict,\n trans_paths: Dict[str, str],\n spk2utt_paths: Dict[str, str],\n tmp_lat_paths: Dict[str, str],\n) -> None:\n with open(log_path, \"w\", encoding=\"utf8\") as log_file:\n for dict_name in dictionaries:\n feature_string = feature_strings[dict_name]\n trans_path = trans_paths[dict_name]\n temp_trans_path = trans_path + \".temp\"\n spk2utt_path = spk2utt_paths[dict_name]\n tmp_lat_path = tmp_lat_paths[dict_name]\n determinize_proc = subprocess.Popen(\n [\n thirdparty_binary(\"lattice-determinize-pruned\"),\n f\"--acoustic-scale={fmllr_options['acoustic_scale']}\",\n \"--beam=4.0\",\n f\"ark:{tmp_lat_path}\",\n \"ark:-\",\n ],\n stderr=log_file,\n stdout=subprocess.PIPE,\n env=os.environ,\n )\n\n latt_post_proc = subprocess.Popen(\n [\n thirdparty_binary(\"lattice-to-post\"),\n f\"--acoustic-scale={fmllr_options['acoustic_scale']}\",\n \"ark:-\",\n \"ark:-\",\n ],\n stdin=determinize_proc.stdout,\n stdout=subprocess.PIPE,\n stderr=log_file,\n env=os.environ,\n )\n weight_silence_proc = subprocess.Popen(\n [\n thirdparty_binary(\"weight-silence-post\"),\n f\"{fmllr_options['silence_weight']}\",\n fmllr_options[\"sil_phones\"],\n model_path,\n \"ark:-\",\n \"ark:-\",\n ],\n stdin=latt_post_proc.stdout,\n stdout=subprocess.PIPE,\n stderr=log_file,\n env=os.environ,\n )\n fmllr_proc = subprocess.Popen(\n [\n thirdparty_binary(\"gmm-est-fmllr\"),\n f\"--fmllr-update-type={fmllr_options['fmllr_update_type']}\",\n f\"--spk2utt=ark:{spk2utt_path}\",\n model_path,\n feature_string,\n \"ark,s,cs:-\",\n \"ark:-\",\n ],\n stdin=weight_silence_proc.stdout,\n stdout=subprocess.PIPE,\n stderr=log_file,\n env=os.environ,\n )\n\n compose_proc = subprocess.Popen(\n [\n thirdparty_binary(\"compose-transforms\"),\n \"--b-is-affine=true\",\n \"ark:-\",\n f\"ark:{trans_path}\",\n f\"ark:{temp_trans_path}\",\n ],\n stderr=log_file,\n stdin=fmllr_proc.stdout,\n env=os.environ,\n )\n compose_proc.communicate()\n os.remove(trans_path)\n os.rename(temp_trans_path, trans_path)", "def L2_norm(self):\n analyticals = self.analytical(self.x_values, self.C, self.D)\n error = analyticals - self.numerical\n self.L2 = np.sqrt((1/self.gp)*np.sum(error**2))", "def writeNormScore(self,fin,fout):\n\n for line in fin:\n [sv, en, score] = re.split(r'\\t|,',line)\n self.count[sv][en] += float(score)\n self.en_sum[en] += float(score)\n self.sv_sum[sv] += float(score)\n\n for sv, ens in self.count.iteritems():\n for en in ens.keys():\n fout.write(sv + \",\" + en + \"\\t\" + str(self.count[sv][en] / self.sv_sum[sv] * self.en_sum[en]) + \"\\n\")", "def normalizeL2(f):\r\n \r\n f=np.array(f)\r\n fsum=np.sum(np.abs(f))\r\n if fsum==0:\r\n fnorm=f\r\n else:\r\n fnorm=f/np.sqrt(np.sum(np.abs(f)**2))\r\n \r\n return fnorm", "def compute_norm(self):\n\n # logger.info(\" Normalization factor:\")\n\n # loop over all the complexes in the database\n first = True\n for comp in tqdm(self.index_complexes):\n fname, molname = comp[0], comp[1]\n\n # get the feature/target\n if self.mapfly:\n feature, target = self.map_one_molecule(\n fname, mol=molname)\n else:\n feature, target = self.load_one_molecule(\n fname, mol=molname)\n\n # create the norm isntances at the first passage\n if first:\n self.param_norm = {'features': [], 'targets': None}\n for ifeat in range(feature.shape[0]):\n self.param_norm['features'].append(NormParam())\n self.param_norm['targets'] = MinMaxParam()\n first = False\n\n # update the norm instances\n for ifeat, mat in enumerate(feature):\n self.param_norm['features'][ifeat].add(\n np.mean(mat), np.var(mat))\n self.param_norm['targets'].update(target)\n\n # process the std of the features and make array for fast access\n nfeat, ncomplex = len(\n self.param_norm['features']), len(self.index_complexes)\n self.feature_mean, self.feature_std = [], []\n for ifeat in range(nfeat):\n\n # process the std and check\n self.param_norm['features'][ifeat].process(ncomplex)\n if self.param_norm['features'][ifeat].std == 0:\n logger.info(' Final STD Null. Changed it to 1')\n self.param_norm['features'][ifeat].std = 1\n\n # store as array for fast access\n self.feature_mean.append(\n self.param_norm['features'][ifeat].mean)\n self.feature_std.append(\n self.param_norm['features'][ifeat].std)\n\n self.target_min = self.param_norm['targets'].min[0]\n self.target_max = self.param_norm['targets'].max[0]\n\n logger.info(f'{self.target_min}, {self.target_max}')", "def automatic_results(self):\n self.find_coefficients()\n self.calculate_numerical_solution()\n self.plot_solution()\n self.L2_norm()", "def write_renormalization(self):\n create_directory(self.ren_dat)\n\n with open(self.ren_dat, \"w\") as f:\n\n if self.zero_point_renormalization is not None:\n\n f.write(\"Total zero point renormalization (eV) for \"\n \"{} Q points\\n\".format(self.nqpt))\n\n for ikpt, kpt in enumerate(self.kpts):\n f.write('Kpt: {0[0]} {0[1]} {0[2]}\\n'.format(kpt))\n\n for line in formatted_array_lines(\n self.zero_point_renormalization[ikpt,:].real*Ha2eV):\n\n f.write(line)\n\n if self.temperature_dependent_renormalization is not None:\n f.write(\"Temperature dependence at Gamma (eV)\\n\")\n\n for iband in range(self.nband):\n f.write('Band: {}\\n'.format(iband))\n\n for tt, T in enumerate(self.temperatures):\n\n ren = (\n self.temperature_dependent_renormalization[0,iband,tt]\n .real * Ha2eV)\n f.write(\"{:>8.1f} {:>12.8f}\\n\".format(T, ren))\n\n return", "def _log_convergence(self) -> None:\n assert (\n self._coords is not None\n ), \"Must have coordinates to log convergence\"\n log_string = f\"{self.iteration}\\t\"\n\n if len(self._history) > 1:\n assert self._coords.e and self._history.penultimate.e, \"Need ∆E\"\n de: PotentialEnergy = self._coords.e - self._history.penultimate.e\n log_string += f'{de.to(\"kcal mol-1\"):.3f}\\t{self._g_norm:.5f}'\n\n logger.info(log_string)\n return None", "def lfq_parser():\n # from tools import file_importer, file_outporter\n from random import randint\n import os.path\n # from math import log10\n \n print(\"this is lfq parser\")\n \n \"\"\"\n relPath = \"bob/processed/OST-24-05-2017_combined.csv\"\n outPath = \"bob/processed/OST-24-05-2017_combined_no0_1000.csv\"\n inpF = file_importer(relPath)\n outF = file_outporter(outPath) \n \"\"\"\n inpF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"cav1ko-1.csv\"),\"r\")\n outF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"cav1ko-1_no0.csv\"),\"w\")\n \n headerFlag = True\n rowCount = 0\n for inpLine in inpF:\n if headerFlag: \n headerFlag = False\n lfqColCount = 0\n inpList = inpLine.split(\"\\t\")\n for headerI in inpList:\n if \"LFQ intensity\" in headerI:\n break\n else: lfqColCount += 1\n outF.write(\"ID,Protein ID, Gene name,\") # write header into output file\n if len(inpList[lfqColCount].split(\"|\")) > 1:\n for headI in inpList[lfqColCount].split(\"|\"):\n outF.write(headI + \",\")\n for headI in inpList[lfqColCount + 1].split(\"|\")[:-1]:\n outF.write(headI + \",\")\n outF.write(inpList[lfqColCount + 1].split(\"|\")[-1] + \"\\n\")\n else:\n outF.write(inpList[lfqColCount] + \",\")\n outF.write(inpList[lfqColCount + 1] + \",\")\n outF.write(inpList[lfqColCount + 2] + \"\\n\")\n rowCount += 1\n continue\n \n outF.write(str(rowCount) + \",\")\n \n inpLine = inpLine.strip()\n inpItems = inpLine.split(\"\\t\")\n inpName = max(inpItems[0].split(\"|\"), key=len) # get unique protein ID\n inpGeneName = max(inpItems[6].split(\"|\"), key=len) # and gene name\n outF.write(inpName + \",\" + inpGeneName + \",\")\n \n inpLFQ = inpItems[lfqColCount].split(\"|\") + inpItems[lfqColCount + 1].split(\"|\") + inpItems[lfqColCount + 2].split(\"|\")# get lfq intensity scores\n # print inpLFQ\n for lfqI in inpLFQ[:-1]: # write lfq values\n if lfqI == \"_\" or lfqI == \"0\":\n outF.write(str(randint(1,1000)) + \",\") ################## try with log10 values this time\n else:\n try:\n outF.write(str(int(lfqI)) + \",\")\n except ValueError:\n print(inpItems)\n raise\n \n if inpLFQ[-1] == \"_\" or inpLFQ[-1] == \"0\": outF.write(str(randint(1,1000)) + \"\\n\")\n else: outF.write(str(inpLFQ[-1]) + \"\\n\")\n \n \n \n rowCount += 1\n\n \n \n \"\"\"\n \n for inpI in inpItems[0:4]: # copy over gene name and such to new file\n outF.write(inpI)\n outF.write(\",\")\n \n commaCount = 0\n for inpJ in inpItems[4:]: # copy over lfq values while replacing 0-s with random values\n commaCount += 1\n if int(inpJ) == 0:\n randNum = randint(1,100)\n outF.write(str(randNum))\n else:\n outF.write(inpJ)\n if commaCount < 6:\n outF.write(\",\")\n outF.write(\"\\n\")\n inpF.close()\n outF.close()\n \n \"\"\"\n \n print(\"lfq parser finished successfully\")", "def write_file(self):\n\n running_time = str(self.running_time_end - self.running_time_start)\n rounded_running_time = '{:.10}'.format(running_time)\n output = 'path_to_goal: ' + str(self.path_to_goal) + '\\n'\n output += 'cost_of_path: ' + str(self.cost_of_path) + '\\n'\n output += 'nodes_expanded: ' + str(self.nodes_expanded) + '\\n'\n output += 'fringe_size: ' + str(self.fringe_size) + '\\n'\n output += 'max_fringe_size: ' + str(self.max_fringe_size) + '\\n'\n output += 'search_depth: ' + str(self.search_depth) + '\\n'\n output += 'max_search_depth: ' + str(self.max_search_depth) + '\\n'\n output += 'running_time: ' + rounded_running_time + '\\n'\n\n system_name = system()\n if system_name == 'Windows':\n output += 'max_ram_usage: (Not available on Windows OS)'\n elif system_name == 'Linux':\n output += 'max_ram_usage: ' + \\\n str(getrusage(RUSAGE_SELF).ru_maxrss / 1024) + '\\n'\n\n file = open('output.txt', 'w+')\n file.write(output)\n print(output)", "def check_all_files_and_dirs(self):\n err = 0\n err_m = ''\n warning = 0\n warning_m = ''\n # Check the pdb file for refinement\n if self.refine_pdb_in == None:\n err = 1\n err_m += '\\nPdb file should be supplied'\n else:\n if self.check_single_file(self.refine_pdb_in):\n self.refine_pdb_in = os.path.abspath(self.refine_pdb_in)\n else:\n err = 1\n err_m += '\\nFile not found: %s' %(self.refine_pdb_in)\n\n # Check the pdb file for distance analysis\n if self.check_single_file(self.X8_pdb_in):\n self.X8_pdb_in = os.path.abspath(self.X8_pdb_in)\n else:\n self.X8_pdb_in != None\n warning = 1\n warning_m += '\\nXtrapol8 pdb_in not found. No additional analysis will be applied'\n\n # Check additional files and append them to a string\n additional = \"\"\n for fle in self.additional:\n if len(fle)>0:\n if self.check_single_file(fle):\n new_add = os.path.abspath(fle)\n additional = additional + \"%s \" % (new_add)\n else:\n err = 1\n err_m += '\\nFile not found: %s' %(fle)\n self.additional = additional\n\n #Check the output directory\n if os.path.isdir(self.outdir):\n self.outdir = os.path.abspath(self.outdir)\n else:\n err = 1\n err_m += \"\\nXtrapol8 output directory cannot be found.\" \\\n \"Please run this from the same directory from which you ran Xtrapol8.\"\n\n #Check the phil file for reciprocal space refinement\n if self.check_single_file(self.reciprocal_space_phil):\n self.reciprocal_space_phil = os.path.abspath(self.reciprocal_space_phil)\n else:\n self.reciprocal_space_phil = ''\n warning = 1\n warning_m += '\\nPhil for reciprocal space refinement not found. Refinement will use default parameters.'\n\n\n #Check the phil file for real space refinement\n if self.check_single_file(self.real_space_phil):\n self.real_space_phil = os.path.abspath(self.real_space_phil)\n else:\n self.real_space_phil = ''\n warning = 1\n warning_m += '\\nPhil for real space refinement not found. Refinement will use default parameters.'\n\n #Check the residue list for distance analysis\n if self.check_single_file(self.residue_list):\n self.residue_list = os.path.abspath(self.residue_list)\n else:\n self.residue_list = None\n warning = 1\n warning_m += '\\nResidue list not found. Distance analysis (if required) will be performed without residue list.'\n\n return err, err_m, warning, warning_m", "def normalize_l2norm(data,tol=0):\n data_sqrt=np.sqrt(np.square(data).sum(axis=1))\n data_sqrt.shape=(data_sqrt.shape[0],1)\n #tol=0#1e-8\n data=data/(data_sqrt+tol)\n return data", "def write_lammps_files(self): \n lammps_file = self.file_name\n with open( lammps_file, 'w' ) as f:\n f.write(self.input_string())", "def experiment_linear_linf(_):\n # Attack epsilon is manually set according to the norm of the min-norm\n # solution found using cvxpy for d/n=10. That is max-margin=1/min-norm.\n # Min linf-norm solution found (norm=0.0422)\n # Min l2-norm solution found (norm=0.3411)\n # Min l1-norm solution found (norm=1.8497)\n # Min l4-norm solution found (norm=0.0002)\n # Min l1.5-norm solution found (norm=0.5274)\n return experiment_linear_lp(\n adv_norm_type='linf',\n dual_norm_type='l1',\n baseline_norm_types=['l2'],\n attack_step_dir='sign_grad')", "def sqf_norm(self, f):\n domain = self.domain\n\n if not domain.is_AlgebraicField:\n raise DomainError(f'ground domain must be algebraic, got {domain}')\n\n new_ring = self.to_ground().inject(*domain.symbols, front=True)\n g = domain.mod.set_ring(new_ring)\n s = 0\n\n while True:\n h = f.inject(front=True)\n r = g.resultant(h)\n\n if r.is_squarefree:\n return s, f, r\n f = f.compose({x: x - domain.unit for x in self.gens})\n s += 1", "def runLM(self):\n\n self.resetOutputData()\n if os.path.isfile(self.LMOutputFName):\n os.remove(self.LMOutputFName)\n if os.path.isfile(self.LMLogFName):\n os.remove(self.LMLogFName)\n\n LMLogFle = open(self.LMLogFName, 'w')\n subprocess.call([self.packagePrefix + self.LMPath + self.LMExec, self.LMInputFName], \\\n stdout=LMLogFle, stderr=LMLogFle)\n\n try:\n self.LMOutputFile = open(self.LMOutputFName, 'r')\n self.LMOutputFile.close()\n except:\n print('No Output file created by Lmeasure. Check \\'tmp/LMLog.txt\\'')\n exit(1)\n\n LMLogFle.close()", "def wabbit_error_vs_flusi(fname_wabbit, fname_flusi, norm=2, dim=2):\n import numpy as np\n import insect_tools\n import matplotlib.pyplot as plt\n\n if dim==3:\n print('I think due to fft2usapmle, this routine works only in 2D')\n raise ValueError\n\n # read in flusi's reference solution\n time_ref, box_ref, origin_ref, data_ref = insect_tools.read_flusi_HDF5( fname_flusi )\n print(data_ref.shape)\n ny = data_ref.shape[1]\n\n # wabbit field to be analyzed: note has to be full already\n time, x0, dx, box, data, treecode = read_wabbit_hdf5( fname_wabbit )\n Bs = data.shape[1]\n Jflusi = (np.log2(ny/(Bs-1)))\n print(\"Flusi resolution: %i %i %i so desired level is Jmax=%f\" % (data_ref.shape[0], data_ref.shape[2], data_ref.shape[2], Jflusi) )\n\n if dim==2:\n # squeeze 3D flusi field (where dim0 == 1) to true 2d data\n data_ref = data_ref[0,:,:].copy().transpose()\n box_ref = box_ref[1:2].copy()\n\n # convert wabbit to dense field\n data_dense, box_dense = dense_matrix( x0, dx, data, treecode, dim )\n \n if data_dense.shape[0] < data_ref.shape[0]:\n # both datasets have different size\n s = int( data_ref.shape[0] / data_dense.shape[0] )\n data_ref = data_ref[::s, ::s].copy()\n raise ValueError(\"ERROR! Both fields are not a the same resolutionn\")\n\n if data_dense.shape[0] > data_ref.shape[0]:\n warn(\"WARNING! The reference solution is not fine enough for the comparison! UPSAMPLING!\")\n import fourier_tools\n print(data_ref.shape)\n data_ref = fourier_tools.fft2_resample( data_ref, data_dense.shape[1] )\n\n err = np.ndarray.flatten(data_ref-data_dense)\n exc = np.ndarray.flatten(data_ref)\n\n err = np.linalg.norm(err, ord=norm) / np.linalg.norm(exc, ord=norm)\n print( \"error was e=%e\" % (err) )\n\n return err", "def lfq_parser_2x():\n from tools import file_importer, file_outporter\n # from random import random\n from math import log10\n \n print(\"this is lfq parser_2x\")\n \n relPath = \"bob/processed/OST-24-05-2017_combined.csv\"\n outPath = \"bob/processed/OST-24-05-2017_combined_no0_2.csv\"\n inpF = file_importer(relPath)\n outF = file_outporter(outPath) \n headerFlag = True\n rowCount = 0\n for inpLine in inpF:\n if headerFlag: \n headerFlag = False\n lfqColCount = 0\n inpList = inpLine.split(\"\\t\")\n for headerI in inpList:\n if \"LFQ intensity\" in headerI:\n break\n else: lfqColCount += 1\n outF.write(\"ID,Protein ID, Gene name,\") # write header into output file\n for headI in inpList[lfqColCount].split(\"|\"):\n outF.write(headI + \",\")\n for headI in inpList[lfqColCount + 1].split(\"|\")[:-1]:\n outF.write(headI + \",\")\n outF.write(inpList[lfqColCount + 1].split(\"|\")[-1] + \"\\n\")\n rowCount += 1\n continue\n \n inpLine = inpLine.strip()\n inpItems = inpLine.split(\"\\t\") \n inpLFQ = inpItems[lfqColCount].split(\"|\") + inpItems[lfqColCount + 1].split(\"|\") # get lfq intensity scores\n \n procLFQ = []\n for lfqi in inpLFQ:\n if lfqi == \"_\": procLFQ.append(0)\n else: procLFQ.append(int(lfqi))\n if sum(procLFQ[:3])<=sum(procLFQ[3:]): continue # get rid of contaminants in control sample\n \n sumOne = inpLFQ[1] + inpLFQ[2]\n sumTwo = inpLFQ[1] + inpLFQ[3]\n sumThree = inpLFQ[2] + inpLFQ[3]\n \n if sumOne == \"__\" or sumTwo == \"__\" or sumThree == \"__\": continue # test if protein is being detected in at least 2 OST samples\n \n outF.write(str(rowCount) + \",\")\n \n\n inpName = max(inpItems[0].split(\"|\"), key=len) # get unique protein ID\n inpGeneName = max(inpItems[6].split(\"|\"), key=len) # and gene name\n outF.write(inpName + \",\" + inpGeneName + \",\")\n \n inpLFQ = inpItems[lfqColCount].split(\"|\") + inpItems[lfqColCount + 1].split(\"|\") \n # print inpLFQ\n for lfqI in inpLFQ[:-1]: # write lfq values\n if lfqI == \"_\" or lfqI == \"0\":\n outF.write(str(0) + \",\") ################## try with log2 values this time\n else:\n try:\n outF.write(str(round(log10(int(lfqI)))) + \",\")\n except ValueError:\n print(inpItems)\n raise\n \n if inpLFQ[-1] == \"_\" or inpLFQ[-1] == \"0\": outF.write(str(0) + \"\\n\")\n else: outF.write(str(round(log10(int(inpLFQ[-1])))) + \"\\n\")\n \n \n rowCount += 1\n\n print(\"lfq parser 2x finished successfully\")", "def check_normalize(self):\n # generate array for easier handling\n values = np.swapaxes(self.psf.psf_value, 0, 2)\n\n # init fail count\n fail_count = 0\n\n # loop over energies\n for i, arr in enumerate(values):\n energy_hi = self.psf.energy_hi[i]\n energy_lo = self.psf.energy_lo[i]\n\n # check if energy is outside of safe energy threshold\n if self.psf.energy_thresh_lo > energy_hi:\n continue\n if self.psf.energy_thresh_hi < energy_lo:\n continue\n\n # loop over offsets\n for arr2 in arr:\n\n # init integral\n sum = 0\n\n # loop over deltas\n for j, v in enumerate(arr2):\n # calculate contribution to integral\n width = self.psf.rad_hi[j].rad - self.psf.rad_lo[j].rad\n rad = 0.5 * (self.psf.rad_hi[j].rad + self.psf.rad_lo[j].rad)\n sum += v.value * width * rad * 2 * np.pi\n\n # check if integral is close enough to 1\n if np.abs(sum - 1.0) > self.config[\"d_norm\"]:\n # add to fail counter\n fail_count += 1\n\n # write results to dict\n results = {}\n if fail_count == 0:\n results[\"status\"] = \"ok\"\n else:\n results[\"status\"] = \"failed\"\n results[\"n_failed_bins\"] = fail_count\n self.results[\"normalize\"] = results", "def _log_epoch(self, logger: H5Logger, epoch_results: Dict[str, float]):\n for data_kind in \"train\", \"test\":\n if data_kind + \"_F\" not in epoch_results:\n continue\n\n # log_kind is one of \"train\", \"valid\" or \"test\"\n # (while data_kind is one of \"train\" or \"test\")\n log_kind = \"valid\" if data_kind == \"test\" and self.train_data is not None else data_kind\n\n # log F and subs to stdout and file\n F, subs = get(epoch_results, f\"{data_kind}_F\", f\"{data_kind}_subs\")\n assert not (math.isnan(F) or math.isinf(F)), f\"{log_kind} free energy is invalid!\"\n F_and_subs_dict = {f\"{log_kind}_F\": to.tensor(F), f\"{log_kind}_subs\": to.tensor(subs)}\n logger.append(**F_and_subs_dict)\n\n # log latest states and lpj to file\n states = getattr(self, f\"{data_kind}_states\")\n if f\"{log_kind}_states\" not in self._conf.log_blacklist:\n K = gather_from_processes(states.K)\n logger.set(**{f\"{log_kind}_states\": K})\n else:\n K = None\n if f\"{log_kind}_lpj\" not in self._conf.log_blacklist:\n logger.set(**{f\"{log_kind}_lpj\": gather_from_processes(states.lpj)})\n\n if self._conf.keep_best_states:\n best_F_name = f\"best_{log_kind}_F\"\n best_F = getattr(self, f\"_{best_F_name}\", None)\n if best_F is None or F > best_F:\n rank = dist.get_rank() if dist.is_initialized() else 0\n if K is None:\n K = gather_from_processes(states.K)\n if rank == 0:\n assert isinstance(K, to.Tensor) # to make mypy happy\n best_states_dict = {\n best_F_name: to.tensor(F),\n f\"best_{log_kind}_states\": K.cpu().clone(),\n }\n logger.set(**best_states_dict)\n setattr(self, f\"_{best_F_name}\", F)\n\n # log data reconstructions\n reco_dict = {}\n if (\n f\"{log_kind}_reconstruction\" not in self._conf.log_blacklist\n and f\"{data_kind}_rec\" in epoch_results\n ):\n reco_dict[f\"{log_kind}_reconstruction\"] = gather_from_processes(\n epoch_results[f\"{data_kind}_rec\"]\n )\n logger.set(**reco_dict)\n\n log_theta_fn = logger.set if self._conf.log_only_latest_theta else logger.append\n log_theta_fn(theta=self.model.theta)\n logger.write()", "def main():\n args = get_args()\n debugging = args.debug\n f1, f2 = args.files\n\n logging.basicConfig(\n filename='.log',\n filemode='w',\n level=logging.DEBUG if args.debug else logging.CRITICAL\n )\n\n if f1 is None or not os.path.isfile(f1):\n print(f'\\\"{f1}\\\" is not a file')\n exit(1)\n if f2 is None or not os.path.isfile(f2):\n print(f'\\\"{f2}\\\" is not a file')\n exit(1)\n\n logging.debug('file1 = {}, file1 = {}'.format(f1,f2))\n\n in1 = open(f1, 'r').read().split()\n in2 = open(f2, 'r').read().split()\n\n dist_sum = 0\n for i in range(len(in1)):\n dist_sum += dist(in1[i], in2[i])\n print(dist_sum)", "def l1_norm(f):\n return dmp_l1_norm(f.rep, f.lev, f.dom)", "def experiment_linear_conv_ls(_):\n # Min dft1-norm solution found (norm=1.9895)\n adv_norm_type = 'dftinf'\n dual_norm_type = 'dft1'\n attack_step_dir = 'dftinf_sd' # 'dftinf'\n\n module_name = 'train'\n # log_dir = 'runs_linear_conv_ls_%s' % adv_norm_type\n log_dir = 'runs_linear_conv_ls_normfix_%s' % adv_norm_type\n exclude = '*'\n\n d_over_n = [1, 2, 4, 8, 16, 32] # separable >= 1\n dim = 100\n num_train = [int(dim / p) for p in d_over_n]\n\n # Config params\n shared_params = []\n shared_params += [\n ('config', './config.py'),\n ('seed', list(range(3))),\n ]\n\n # Data hyper-parameters\n shared_params += [\n ('temperature', 0.0001),\n ('num_test', 1), # 500\n ('dim', dim),\n ('num_train', num_train),\n ]\n\n # Adversarial configuration: test\n shared_params += nameit('adv', [\n ('norm_type', adv_norm_type),\n # ('lr', 0.1),\n ('niters', 1), # 10\n # ('eps_iter', attack_eps), # Overwritten by cvxpy\n # ('eps_tot', attack_eps), # Overwritten by cvxpy\n ('pre_normalize', True), # multi attacks\n ('post_normalize', True),\n ('eps_from_cvxpy', True),\n ('step_dir', attack_step_dir),\n ])\n\n # Logging to standard output\n shared_params += [\n ('log_interval', 10000), # 1000),\n ('log_keys', '\\'(\"%s\")\\'' % ('\",\"'.join([\n 'risk/train/zero_one',\n 'risk/train/adv/%s' % adv_norm_type,\n 'weight/linear/norm/%s' % dual_norm_type,\n 'margin/%s' % dual_norm_type,\n ]))),\n # Compare with cvxpy\n ('enable_cvxpy', True),\n ]\n\n # Model hyper-parameters\n conv_linear_params = nameit('model', [\n ('arch', 'conv_linear'),\n ('nlayers', 2),\n ('regularizer', 'none'),\n ])\n\n params = []\n\n # GD line search implicit bias\n gd_ls = nameit('optim', [\n ('name', 'gd_ls'),\n ('niters', 100000),\n ('bound_step', True),\n ])\n params += [OrderedDict(shared_params+conv_linear_params+gd_ls)]\n\n return params, log_dir, module_name, exclude" ]
[ "0.55692875", "0.54376626", "0.50235313", "0.4963999", "0.4872999", "0.48711163", "0.48684964", "0.48494893", "0.4843204", "0.48130316", "0.48026887", "0.47972482", "0.4758948", "0.47379422", "0.47259918", "0.46990478", "0.46983588", "0.46347976", "0.46319932", "0.46254238", "0.46174806", "0.46104428", "0.45992187", "0.45778367", "0.4566062", "0.45608166", "0.4557898", "0.45478457", "0.4527692", "0.45224902" ]
0.71914333
0
Compare a solution to a FEM reference solution
def compare_solutions(self, FEM_ref_sol, time, series='Temperature', path=None, mesh_mapping=None, save_as_vtu=True): if path == None: path = self.save_path+"_comp_sols.csv" print("ht3_solver:\tComparing solutions as t = " \ + str(time) + " for series " + str(series) \ + " and writing to " + path, flush=True) step_no_this = int(np.floor(time / self.d_T)) step_no_FEM = int(np.floor(time / FEM_ref_sol.d_T)) # First, map between FEM_ref solution nodes and this solution. if mesh_mapping == None: mapping = self.mesh.project_points(FEM_ref_sol.mesh.nodes, failure_rule='closest') else: mapping = mesh_mapping # Project xfem solution onto these points. this_sol = self.saved_data[series][step_no_this].data xfem_mapped_sol = np.zeros(FEM_ref_sol.node_map.num()) for nid, info in mapping.items(): elem, loc_coord = info p_val = elem.eval_elem(self.node_map, \ this_sol, \ (loc_coord,))[0] idx = FEM_ref_sol.node_map.tag_to_idx((nid, 0)) xfem_mapped_sol[idx] = p_val fem_sol = FEM_ref_sol.saved_data[series][step_no_FEM].data # Now we have two solutions, with all value valid at nodes: # fem_sol and xfem_mapped_sol f = open(path, 'a', newline="") def write_pair(a,b): f.write(a + ", "+ str(b)+", ") write_pair("Series", series) write_pair("Time", time) # L2 errors Err = fem_sol - xfem_mapped_sol Ex = fem_sol #Save into vtu... if save_as_vtu == True: data_err = {} for nid in FEM_ref_sol.mesh.nodes.keys(): idx = FEM_ref_sol.node_map.tag_to_idx((nid, 0)) data_err[nid] = Err[idx] data_abs = {} for nid in FEM_ref_sol.mesh.nodes.keys(): idx = FEM_ref_sol.node_map.tag_to_idx((nid, 0)) data_abs[nid] = Ex[idx] data_calc = {} for nid in FEM_ref_sol.mesh.nodes.keys(): idx = FEM_ref_sol.node_map.tag_to_idx((nid, 0)) data_calc[nid] = xfem_mapped_sol[idx] expt_data = {"Error": data_err, "Reference": data_abs, "Calculated": data_calc} FEM_ref_sol.mesh.export_to_vtk(self.save_path + str(step_no_this)+"comp", expt_data) L2Ex = 0.0 L2Abs = 0.0 def ev_elemSqErr(elem, eta): return np.square(elem.eval_elem(FEM_ref_sol.node_map, Err, [eta])[0]) def ev_elemSqEx(elem, eta): return np.square(elem.eval_elem(FEM_ref_sol.node_map, Ex, [eta])[0]) for elem in FEM_ref_sol.mesh.elems.values(): L2Ex += et.integrate_elem(elem, ev_elemSqEx, gauss_mult=2) L2Abs += et.integrate_elem(elem, ev_elemSqErr, gauss_mult=2) L2Ex = np.sqrt(L2Ex) L2Abs = np.sqrt(L2Abs) write_pair("L2 Err", L2Abs) write_pair("L2 Abs Err", L2Abs / L2Ex) f.write("\n") f.close() return mapping
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compare_solutions(solution, filename):\n with open(filename) as f:\n content = ast.literal_eval(f.read()) # parse string repr of the output to a list\n return solution == content", "def test_get_solution(self):\n pass", "def solution(self, solution: Path, bundle: Bundle):\n pass", "def fileCmp (working, ref, compare_content=0, verbose=0):\n\tif verbose and working and ref:\n\t\tprint \"fileCmp\\n\\t working: %s\\n\\tref: %s\" % (\n\t\t\tworking.path or \"no working path\", \n\t\t\tref.path or \"no reference path\")\n\t\t\n\tflag = \"UNASSIGNED\"\n\tdebugging = 0\n\t\n\tif ref and not working:\n\t\tflag = \"missing\"\n\t\n\telif not ref: # or not os.path.exists(ref.path):\n\t\tflag = \"new\"\n\t\t\n\telif isinstance (working, JloFile):\n\t\t# print \"ref: %s\" % ref.__class__.__name__\n\t\tif debugging:\n\t\t\tif not working.equals (ref):\n\t\t\t\tprint \"working file is different\"\n\t\t\t\t\n\t\t\tif not working.newerthan (ref):\n\t\t\t\tprint \"working file has same date as ref\"\n\t\t\n\t\t\tif working.modtime == ref.modtime:\n\t\t\t\tprint \"mods dates match\"\n\t\t\telse:\n\t\t\t\t# print \"wrk: %d ref: %d\" % (working.modtime,ref.modtime)\n\t\t\t\tprint \"wrk: %s ref: %s\" % \\\n\t\t\t\t\t(working.ppDate (working.modtime),\n\t\t\t\t\t working.ppDate (ref.modtime))\n\t\t\n\t\tif compare_content:\n\t\t\tif working.equals (ref):\n\t\t\t\tflag = \"\"\n\t\t\telse:\n\t\t\t\tflag = \"modified\"\n\t\t\t\t\n\t\telse:\n\t\t\tflag = \"\"\n\n\t\t\t\n\t\t\t\n\t\t\t# elif not working.newerthan (ref):\n\t\t\t# flag = \"obsolete-check\"\n\t\t# elif working.newerthan (ref) and not working.equals (ref):\n\t\t\t# flag = \"modified\"\n\t\t# elif not working.equals (ref):\n\t\t\t# print \"not modified\"\n\t\t\t# flag = \"different\"\n\t\t# elif working.newerthan (ref):\n\t\t\t# flag = \"modified\"\n\tif verbose and working:\n\t\tprint \"%s --> %s\" % (working.name, flag)\n\treturn flag", "def test_compare_definition_to_actual(self):\n compare_definition_to_actual(self.definition, self.actual)", "def test_initialization_of_homework_result_solution():\n assert result_1.solution == \"I have done this hw\"", "def test_fabsim():\n assert(\"plugins\" in get_plugin_path(\"FabDummy\"))\n assert(\"FabDummy\" in get_plugin_path(\"FabDummy\"))\n assert(len(get_fabsim_git_hash()) > 0)", "def test_part_1(arguments, output):\n # assert part_1.solution(arguments) == output\n assert part_1.solution(arguments) == output", "def test_solution_usage(self, test_data):\n for finput in test_data[\"EKFSLAM.EKFSLAM.h\"][:1]:\n params = finput\n\n solution.used[\"EKFSLAM.EKFSLAM.h\"] = False\n\n EKFSLAM.EKFSLAM.h(**params)\n\n assert not solution.used[\"EKFSLAM.EKFSLAM.h\"], \"The function uses the solution\"", "def compare_with_ref(\n self, response, response_checker=default_checker.default_journey_checker\n ):\n\n def ref_resp2files(output_file, output_json):\n \"\"\"\n Create a file for the filtered response and for the filtered reference\n \"\"\"\n with open(output_file, \"w\") as reference_text:\n reference_text.write(output_json)\n\n def print_diff(ref_file, resp_file):\n \"\"\"\n Print differences between reference and response in console\n \"\"\"\n # open reference\n with open(ref_file) as reference_text:\n reference = reference_text.readlines()\n # open response\n with open(resp_file) as response_text:\n response = response_text.readlines()\n\n # Print failed test name\n print_color(\"\\n\\n\" + str(file_name) + \" failed :\" + \"\\n\\n\", Colors.PINK)\n\n symbol2color = {\"+\": Colors.GREEN, \"-\": Colors.RED}\n for line in difflib.unified_diff(reference, response):\n print_color(line, symbol2color.get(line[0], Colors.DEFAULT))\n\n # Filtering the answer. (We compare to a reference also filtered with the same filter)\n filtered_response = response_checker.filter(response)\n\n # Get the reference\n\n # Create the file name\n filename = self.get_file_name()\n filepath = os.path.join(config[\"REFERENCE_FILE_PATH\"], filename)\n\n assert os.path.isfile(filepath), \"{} is not a file\".format(filepath)\n\n with open(filepath, \"r\") as f:\n raw_reference = f.read()\n\n # Transform the string into a dictionary\n dict_ref = json.loads(raw_reference)\n\n # Get only the full_response part from the ref\n ref_full_response = dict_ref[\"full_response\"]\n\n # Filtering the reference\n filtered_reference = response_checker.filter(ref_full_response)\n\n # Compare response and reference\n try:\n response_checker.compare(filtered_response, filtered_reference)\n except AssertionError as e:\n # print the assertion error message\n logging.error(\"Assertion Error: %s\" % str(e))\n # find name of test\n file_name = filename.split(\"/\")[-1]\n file_name = file_name[:-5]\n\n # create a folder\n dir_path = config[\"RESPONSE_FILE_PATH\"]\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n # create path to ref and resp\n full_file_name_ref = dir_path + \"/reference_\" + file_name + \".txt\"\n full_file_name_resp = dir_path + \"/response_\" + file_name + \".txt\"\n\n json_filtered_reference = json.dumps(filtered_reference, indent=4)\n json_filtered_response = json.dumps(filtered_response, indent=4)\n\n # Save resp and ref as txt files in folder named outputs\n ref_resp2files(full_file_name_ref, json_filtered_reference)\n ref_resp2files(full_file_name_resp, json_filtered_response)\n\n # Print difference in console\n print_diff(full_file_name_ref, full_file_name_resp)\n\n raise", "def test_compare_local_version_is_same(self):\n\n given = \"1.0.0.dev (Hello, World)\"\n expected = None\n actual = Version.compare(given)\n\n self.assertEqual(expected, actual)", "def test_part_2(arguments, distance, output):\n assert part_2.solution(arguments, distance) == output", "def run_and_compare_fems(\n bdf_model: str,\n out_model: str,\n debug: bool=False,\n xref: bool=True,\n check: bool=True,\n punch: bool=False,\n mesh_form: str='combined',\n print_stats: bool=False,\n encoding=None,\n sum_load: bool=True,\n size: int=8,\n is_double: bool=False,\n save_file_structure: bool=False,\n stop: bool=False,\n nastran: str='',\n post: int=-1,\n hdf5: bool=False,\n dynamic_vars=None,\n quiet: bool=False,\n dumplines: bool=False,\n dictsort: bool=False,\n nerrors: int=0,\n dev: bool=False,\n crash_cards=None,\n version: Optional[str]=None,\n limit_mesh_opt: bool=False,\n safe_xref: bool=True,\n run_extract_bodies: bool=False,\n run_skin_solids: bool=True,\n pickle_obj: bool=False,\n validate_case_control: bool=True,\n stop_on_failure: bool=True,\n log: Optional[SimpleLogger]=None,\n name: str=''):\n assert os.path.exists(bdf_model), f'{bdf_model!r} doesnt exist\\n%s' % print_bad_path(bdf_model)\n fem1 = BDF(debug=debug, log=log)\n if version:\n map_version(fem1, version)\n fem1.dumplines = dumplines\n\n fem1.set_error_storage(nparse_errors=nerrors, stop_on_parsing_error=True,\n nxref_errors=nerrors, stop_on_xref_error=True)\n if dynamic_vars:\n fem1.set_dynamic_syntax(dynamic_vars)\n\n if not quiet:\n fem1.log.info('starting fem1 (read/write)')\n sys.stdout.flush()\n fem2 = None\n diff_cards = []\n\n #nastran_cmd = 'nastran scr=yes bat=no old=no news=no '\n nastran_cmd = ''\n try:\n #try:\n fem1.log.info('running fem1 (read/write)')\n fem1 = run_fem1(fem1, bdf_model, out_model, mesh_form, xref, punch, sum_load,\n size, is_double,\n run_extract_bodies=run_extract_bodies,\n run_skin_solids=run_skin_solids,\n save_file_structure=save_file_structure,\n hdf5=hdf5,\n encoding=encoding, crash_cards=crash_cards, safe_xref=safe_xref,\n limit_mesh_opt=limit_mesh_opt,\n pickle_obj=pickle_obj, stop=stop, name=name)\n\n if stop:\n if not quiet:\n print('card_count:')\n print('-----------')\n for card_name, card_count in sorted(fem1.card_count.items()):\n print('key=%-8s value=%s' % (card_name, card_count))\n return fem1, None, None\n\n ierror = 0\n fem1.log.info('running fem2')\n fem2 = run_fem2(bdf_model, out_model, xref, punch, sum_load, size, is_double, mesh_form,\n safe_xref=safe_xref,\n encoding=encoding, debug=debug, quiet=quiet,\n ierror=ierror, nerrors=nerrors,\n stop_on_failure=stop_on_failure,\n validate_case_control=validate_case_control, log=log)\n\n diff_cards = compare(fem1, fem2, xref=xref, check=check,\n print_stats=print_stats, quiet=quiet)\n test_get_cards_by_card_types(fem2)\n\n fem2.update_model_by_desvars(xref)\n #except Exception:\n #return 1, 2, 3\n\n run_nastran(bdf_model, nastran_cmd, post, size, is_double)\n\n except KeyboardInterrupt:\n sys.exit('KeyboardInterrupt...sys.exit()')\n except IOError: # only temporarily uncomment this when running lots of tests\n if not dev:\n raise\n except CardParseSyntaxError: # only temporarily uncomment this when running lots of tests\n if not dev:\n raise\n print('failed test because CardParseSyntaxError...ignoring')\n except UnsupportedCard:\n if not dev:\n raise\n print('failed test because UnsupportedCard /...ignoring')\n except MissingDeckSections:\n if not dev:\n raise\n print('failed test because MissingDeckSections...ignoring')\n except DuplicateIDsError as e:\n # only temporarily uncomment this when running lots of tests\n if not dev:\n raise\n #elif is_mesh_opt:\n #print('failed test because mesh adaption (GRIDG,CGEN,SPCG)...ignoring')\n #print(e)\n else:\n print('failed test because DuplicateIDsError...ignoring')\n except MeshOptimizationError:\n print('failed test because mesh adaption (GRIDG,CGEN,SPCG)...ignoring')\n except DisabledCardError as e:\n if not dev:\n raise\n except EnvironmentVariableError:\n if not dev:\n raise\n #except RuntimeError as e:\n # only temporarily uncomment this when running lots of tests\n #if not dev:\n #raise\n #elif is_mesh_opt:\n #print('failed test because mesh adaption (GRIDG,CGEN,SPCG)...ignoring')\n #print(e)\n #else:\n #raise\n #except AttributeError: # only temporarily uncomment this when running lots of tests\n #pass\n #except SyntaxError as e:\n # only temporarily uncomment this when running lots of tests\n #if not dev:\n #raise\n #elif is_mesh_opt:\n #print('failed test because mesh adaption (GRIDG,CGEN,SPCG)...ignoring')\n #print(e)\n #else:\n #raise\n #except KeyError as e: # only temporarily uncomment this when running lots of tests\n #if not dev:\n #raise\n #elif is_mesh_opt:\n #print('failed test because mesh adaption (GRIDG,CGEN,SPCG)...ignoring')\n #print(e)\n #else:\n #raise\n #except AssertionError: # only temporarily uncomment this when running lots of tests\n #pass\n except SystemExit:\n sys.exit('sys.exit...')\n except Exception:\n #exc_type, exc_value, exc_traceback = sys.exc_info()\n #print \"\\n\"\n traceback.print_exc(file=sys.stdout)\n #print msg\n print(\"-\" * 80)\n raise\n\n if not quiet:\n print(\"-\" * 80)\n return (fem1, fem2, diff_cards)", "def exercise():\n pi_good = get_pdb_inputs(pdb_str=pdb_str_answer, restraints=False)\n map_data = get_map(xrs=pi_good.xrs)\n xrs_good = pi_good.xrs.deep_copy_scatterers()\n pi_good.ph.write_pdb_file(file_name=\"answer.pdb\",\n crystal_symmetry=xrs_good.crystal_symmetry())\n #\n pi_poor = get_pdb_inputs(pdb_str=pdb_str_poor, restraints=True)\n pi_poor.ph.write_pdb_file(file_name=\"poor.pdb\")\n xrs_poor = pi_poor.xrs.deep_copy_scatterers()\n #\n d = xrs_good.distances(other=xrs_poor)\n print(d.min_max_mean().as_tuple())\n assert flex.max(d)>2\n assert flex.mean(d)>0.7\n #\n xrs_refined = xrs_poor\n for i in range(3):\n ero = individual_sites.easy(\n map_data = map_data,\n xray_structure = xrs_refined,\n pdb_hierarchy = pi_poor.ph,\n geometry_restraints_manager = pi_poor.grm)\n xrs_refined = ero.xray_structure\n # comapre\n d = xrs_good.distances(other=xrs_refined)\n print(d.min_max_mean().as_tuple())\n assert flex.max(d)<0.15\n assert flex.mean(d)<0.03\n ero.pdb_hierarchy.write_pdb_file(file_name=\"refined.pdb\",\n crystal_symmetry=xrs_good.crystal_symmetry())", "def test_commonfs_truecase():\n f1 = tempfile.mkstemp()\n f2 = tempfile.mkstemp()\n rc1 = r.RemovalCandidate(f1[1])\n rc2 = r.RemovalCandidate(f2[1])\n assert r.commonfs([rc1,rc2])", "def compare(fem1, fem2, xref=True, check=True, print_stats=True, quiet=False):\n diff_cards = compare_card_count(fem1, fem2, print_stats=print_stats, quiet=quiet)\n if xref and check:\n get_element_stats(fem1, fem2, quiet=quiet)\n get_matrix_stats(fem1, fem2)\n compare_card_content(fem1, fem2)\n #compare_params(fem1, fem2)\n #print_points(fem1, fem2)\n return diff_cards", "def test_Q4_supported_square_modal_solution(self):\n # Simply supported\n REF_W = np.array([2.377, 5.942, 5.942, 9.507, 11.884, 11.884, 15.449, 15.449])\n\n mesh = FEMOL.mesh.rectangle_Q4(10, 10, 15, 15)\n\n problem = FEMOL.FEM_Problem(mesh=mesh, physics='modal', model='plate')\n\n material = FEMOL.materials.IsotropicMaterial(200e9, 0.3, 8000)\n problem.define_materials(material)\n problem.define_tensors(0.05)\n\n problem.add_fixed_domain(FEMOL.domains.inside_box([[0, 10]], [[0, 10]]), ddls=[0, 1, 5])\n problem.add_fixed_domain(FEMOL.domains.inside_box([0, 10], [[0, 10]]), ddls=[2, 4])\n problem.add_fixed_domain(FEMOL.domains.inside_box([[0, 10]], [0, 10]), ddls=[2, 3])\n\n w, v = problem.solve(verbose=False, filtre=1)\n\n diff = 100 * np.abs(REF_W - w[:8]) / w[:8]\n\n self.assertTrue((diff < 5).all())", "def compareTwoReco(reference, new, histos, debug=1):\n\n # Tracks with index False are the ones that have been matched to the reference track collection\n new_valid = [True for i in new]\n\n # Tracks with index False are the ones that have been matched to the comparison track collection\n original_valid = [True for i in reference]\n print \" \".join(\"%10s\" % k for k in variables)\n debug_verbose = checkDebug(debug, 'Verbose')\n debug_ordinary = checkDebug(debug, 'Ordinary')\n debug_recovery = checkDebug(debug, 'Recovery')\n debug_lost = checkDebug(debug, 'Lost')\n debug_fake = checkDebug(debug, 'Fake')\n\n for original_index, original in enumerate(reference):\n # Fill in cumulative plots for the reference sample first\n histos['reference_hits_vs_algo'].Fill(original.algo, original.hits)\n histos['reference_hits_vs_orialgo'].Fill(original.orialgo, original.hits)\n histos['reference_hits_vs_pt'].Fill(original.pt, original.hits)\n histos['den'].Fill(original.pt)\n histos['den_eta'].Fill(original.eta)\n histos['den_phi'].Fill(original.phi)\n histos['den_hits'].Fill(original.hits)\n histos['den_algo'].Fill(original.algo)\n histos['den_orialgo'].Fill(original.orialgo)\n\n # Now start to look for a matching track in the comparison track collection\n window_depth = 400 # elements to span to look for best candidate\n iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch = -1, 100, 100\n if debug_verbose:\n print original\n for i,j in enumerate(new):\n if new_valid[i] == True:\n if debug_verbose:\n print \" \", i, j\n if window_depth == 0:\n break\n dr_squared, dPt_over_pt = match(original, j)\n if dr_squared < bestDeltaRMatch*bestDeltaRMatch and dPt_over_pt < DELTA_PT_OVER_PT_CUT:\n iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch = i, dr_squared, dPt_over_pt\n if debug_verbose:\n print \" \", window_depth, iBest, bestDeltaRMatch, dr_squared, bestDeltaPt_over_PtMatch, dPt_over_pt\n if bestDeltaRMatch <= 0.0001 or bestDeltaPt_over_PtMatch == 0.0001:\n break\n window_depth -= 1\n if iBest != -1 and bestDeltaRMatch < DELTA_R_CUT:\n # These are the tracks in the reference track collection\n # that have been matched to a track in the comparison\n # track collection\n new_valid[iBest] = False\n original_valid[original_index] = False\n assert original.run == new[iBest].run, \"run mismatch\"\n assert original.ls == new[iBest].ls, \"ls mismatch\"\n assert original.event == new[iBest].event, \"event mismatch\"\n if debug_ordinary:\n print original\n print new[iBest]\n print iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch, '\\n'\n histos['num'].Fill(original.pt)\n histos['num_eta'].Fill(original.eta)\n histos['num_phi'].Fill(original.phi)\n histos['num_hits'].Fill(original.hits)\n histos['num_algo'].Fill(original.algo)\n histos['num_orialgo'].Fill(original.orialgo)\n histos['fake_num'].Fill(new[iBest].pt)\n histos['fake_num_eta'].Fill(new[iBest].eta)\n histos['fake_num_phi'].Fill(new[iBest].phi)\n histos['fake_num_hits'].Fill(new[iBest].hits)\n histos['fake_num_algo'].Fill(new[iBest].algo)\n histos['fake_num_orialgo'].Fill(new[iBest].orialgo)\n histos['comparison_algo_vs_reference_algo'].Fill(original.algo, new[iBest].algo)\n histos['comparison_orialgo_vs_reference_orialgo'].Fill(original.orialgo, new[iBest].orialgo)\n histos['comparison_hits_vs_reference_hits'].Fill(original.hits, new[iBest].hits)\n\n # Let's try a recovery loop with somewhat lesser stringent cuts\n for original_index, original in enumerate(reference):\n if original_valid[original_index]:\n # Now start to look for a matching track in the comparison track collection\n window_depth = 300 # elements to span to look for best candidate\n iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch = -1, 100, 100\n if debug_verbose:\n print \"Recovery \", original\n for i,j in enumerate(new):\n if new_valid[i] == True:\n if debug_verbose:\n print \"Recovery \", i, j\n if window_depth == 0:\n break\n dr_squared, dPt_over_pt = match(original, j)\n if dr_squared < bestDeltaRMatch*bestDeltaRMatch and dPt_over_pt < DELTA_PT_OVER_PT_CUT*6:\n iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch = i, dr_squared, dPt_over_pt\n if debug_verbose:\n print \"Recovery \", window_depth, iBest, bestDeltaRMatch, dr_squared, bestDeltaPt_over_PtMatch, dPt_over_pt\n if bestDeltaRMatch <= 0.0001 or bestDeltaPt_over_PtMatch == 0.0001:\n break\n window_depth -= 1\n if iBest != -1 and bestDeltaRMatch < DELTA_R_CUT*10: # inflate cut on DeltaR to recover some good-medium matching\n # These are the tracks in the reference track collection\n # that have been matched to a track in the comparison\n # track collection\n new_valid[iBest] = False\n original_valid[original_index] = False\n if debug_recovery:\n print \"Recovery \", original\n print \"Recovery \", new[iBest]\n print \"Recovery \", iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch\n histos['num'].Fill(original.pt)\n histos['num_eta'].Fill(original.eta)\n histos['num_phi'].Fill(original.phi)\n histos['num_hits'].Fill(original.hits)\n histos['num_algo'].Fill(original.algo)\n histos['num_orialgo'].Fill(original.orialgo)\n histos['fake_num'].Fill(new[iBest].pt)\n histos['fake_num_eta'].Fill(new[iBest].eta)\n histos['fake_num_hits'].Fill(new[iBest].hits)\n histos['fake_num_algo'].Fill(new[iBest].algo)\n histos['fake_num_orialgo'].Fill(new[iBest].orialgo)\n histos['comparison_algo_vs_reference_algo'].Fill(original.algo, new[iBest].algo)\n histos['comparison_orialgo_vs_reference_orialgo'].Fill(original.orialgo, new[iBest].orialgo)\n histos['comparison_hits_vs_reference_hits'].Fill(original.hits, new[iBest].hits)\n\n\n # These are the tracks in the reference track collection\n # that have *not* been associated to any track in the\n # comparison collection == > LOST TRACKS\n reference_not_assigned = [j for i,j in enumerate(reference) if original_valid[i]]\n reference_not_assigned.sort(key=lambda tr: tr.algo)\n if debug_lost:\n print \"**** Lost tracks **** %d\" % len(reference_not_assigned)\n for j in reference_not_assigned:\n histos['lost_hits_vs_algo'].Fill(j.algo, j.hits)\n histos['lost_hits_vs_orialgo'].Fill(j.orialgo, j.hits)\n histos['lost_hits_vs_pt'].Fill(j.pt, j.hits)\n histos['lost_eta'].Fill(j.eta)\n if debug:\n print j\n if debug_lost:\n print \"**** End of Lost tracks ****\"\n\n # Fake Tracks\n for i, j in enumerate(new):\n # Fill in the cumulative plots related to tracks in the comparison track collection\n histos['comparison_hits_vs_algo'].Fill(j.algo, j.hits)\n histos['comparison_hits_vs_orialgo'].Fill(j.orialgo, j.hits)\n histos['comparison_hits_vs_pt'].Fill(j.pt, j.hits)\n histos['fake_den'].Fill(j.pt)\n histos['fake_den_eta'].Fill(j.eta)\n histos['fake_den_phi'].Fill(j.phi)\n histos['fake_den_hits'].Fill(j.hits)\n histos['fake_den_algo'].Fill(j.algo)\n histos['fake_den_orialgo'].Fill(j.orialgo)\n\n # These are the tracks in the comparison track collection\n # that have *not* been associated to any track in the\n # reference collection ==> FAKE TRACKS\n new_not_assigned = [j for i,j in enumerate(new) if new_valid[i]]\n new_not_assigned.sort(key=lambda tr: tr.algo)\n if debug_fake:\n print \"**** Fake tracks **** %d\" % len(new_not_assigned)\n for j in new_not_assigned:\n histos['fake_hits_vs_algo'].Fill(j.algo, j.hits)\n histos['fake_hits_vs_orialgo'].Fill(j.orialgo, j.hits)\n histos['fake_hits_vs_pt'].Fill(j.pt, j.hits)\n if debug:\n print j\n if debug_fake:\n print \"**** End of Fake tracks ****\"", "def test_component_resolution_same_file():\n\n assert snippet_eval(ComponentSnippet(modulea.ComponentResolutionSameFile())) == \"hi\\n\"", "def all_equals(solution1, solution2, tol=0.001):\n tokens1 = solution1.split()\n tokens2 = solution2.split()\n\n for token1, token2 in zip(tokens1, tokens2):\n if not equals(token1, token2, tol=tol):\n print(token1, token2)\n return False\n\n return True", "def test_same_models_are_equal(dbdiskrepo):\n fit1 = fit_model()\n fit2 = fit_model()\n assert fit1.artifact.id == fit2.artifact.id\n assert fit1.artifact.value_id == fit2.artifact.value_id\n assert hash(fit1) == hash(fit2)", "def verify(problem):\n filename = get_filename(problem)\n\n if not os.path.isfile(filename):\n click.secho('Error: \"{0}\" not found.'.format(filename), fg='red')\n sys.exit(1)\n\n # get_solution() will exit here if the solution does not exist\n solution = get_solution(problem)\n\n click.echo('Checking \"{0}\" against solution: '.format(filename), nl=False)\n\n cmd = [sys.executable or 'python', filename]\n start = clock()\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n stdout, stderr = proc.communicate()\n end = clock()\n time_info = format_time(start, end)\n\n # Python 3 returns bytes; use a valid encoding like ASCII as the output\n # will fall in that range\n if isinstance(stdout, bytes):\n output = stdout.decode('ascii')\n\n # Return value of anything other than 0 indicates an error\n if proc.poll() != 0:\n click.secho('Error calling \"{0}\".'.format(filename), fg='red')\n click.secho(time_info, fg='cyan')\n sys.exit(1)\n\n # Split output lines into array; make empty output more readable\n output_lines = output.splitlines() if output else ['[no output]']\n\n # If output is multi-lined, print the first line of the output on a\n # separate line from the \"checking against solution\" message, and\n # skip the solution check (multi-line solution won't be correct)\n if len(output_lines) > 1:\n is_correct = False\n click.echo('') # force output to start on next line\n for line in output_lines:\n click.secho(line, bold=True, fg='red')\n else:\n is_correct = output_lines[0] == solution\n fg_colour = 'green' if is_correct else 'red'\n click.secho(output_lines[0], bold=True, fg=fg_colour)\n\n click.secho(time_info, fg='cyan')\n\n # Exit here if answer was incorrect\n if is_correct:\n return True\n else:\n sys.exit(1)", "def test_solution_usage(self, test_data):\n for finput in test_data[\"EKFSLAM.EKFSLAM.update\"][:1]:\n params = finput\n\n solution.used[\"EKFSLAM.EKFSLAM.update\"] = False\n\n EKFSLAM.EKFSLAM.update(**params)\n\n assert not solution.used[\"EKFSLAM.EKFSLAM.update\"], \"The function uses the solution\"", "def compareDesigns(self):\n if len(self.Designs) < 2:\n print(\"compareDesigns() ERROR: self.Designs < 2 - aborting !\")\n return\n # Finished parsing the designs, let's do some initial comparison:\n print(self.Designs)\n self.DesignsetIntersection = self.Designs[0].Designset & self.Designs[1].Designset # set.intersection(other_set)\n self.DesignsetSymmetricDifference = self.Designs[0].Designset ^ self.Designs[1].Designset # set.symmetric_difference(other_set)\n\n # find common module names...\n self.CommonModuleNames = set(self.Designs[0].Modulesets.keys()) & set(self.Designs[1].Modulesets.keys())\n self.NewModuleNames = set(self.Designs[0].Modulesets.keys()) ^ set(self.Designs[1].Modulesets.keys())\n self.UnchangedModules = set()\n\n self.Designs[0].UniqueModules = set(self.Designs[0].Modulesets.keys()) - set(self.Designs[1].Modulesets.keys())\n self.Designs[1].UniqueModules = set(self.Designs[1].Modulesets.keys()) - set(self.Designs[0].Modulesets.keys())\n\n for i, design in enumerate(self.Designs):\n # Changes in modules (module names)\n design.UnchangedModules = set()\n design.ChangedModules = set()\n other_design = self.Designs[1] if i == 0 else self.Designs[0]\n print(\"\\n----\" + '-'*len(\"Report for \" + design.Filename) + \"-\"*6)\n print(\"--- Report for \" + design.Filename + \" \" + \"-\"*5)\n print(\"----\" + '-'*len(\"Report for \" + design.Filename) + \"-\"*6)\n print(\"Compared against: \" + other_design.Filename)\n print(\"Modules in this design: \" + \", \".join(sorted(list(design.Modulesets.keys()))))\n print(\"COMMON/SHARED MODULES (modules found in both designs): {}\".format(\n \", \".join(sorted(list(self.CommonModuleNames))) if self.CommonModuleNames else '<none>'))\n print(\"ADDED MODULES (modules unique to this design, not found in the other design): {}\".format(\n sorted(design.UniqueModules) if design.UniqueModules else '<none>'))\n print(\"REMOVED MODULES (modules in other design not found in this design): {}\".format(\n sorted(other_design.UniqueModules) if other_design.UniqueModules else '<none>'))\n\n print(\"\\nPer oligo sequence changes for common/shared modules:\")\n for module in self.CommonModuleNames: # (set(design.Modulesets.keys()) - design.UniqueModules())\n if len(design.Modulesets[module] ^ other_design.Modulesets[module]) == 0:\n # Edit: Now using symmetric_difference (^) instead of difference (-)\n design.UnchangedModules.add(module)\n else:\n design.ChangedModules.add(module)\n if (not design.UniqueModules) and (not design.ChangedModules):\n print(\"( And all modules in the two designs include identical sets of oligos. Bye! )\")\n return\n print(\"Common/shared modules that are completely unchanged: {}\".format(\n \", \".join(sorted(list(design.UnchangedModules))) if design.UnchangedModules else \"<none>\"))\n print(\"Common/shared modules with changed oligos in this design: \" + \", \".join(sorted(list(design.ChangedModules))))\n #if design.UniqueModules:\n # print \"New (unique) module names/classes in this design: \" + \", \".join(design.UniqueModules)\n # print \"(Removed module classes in this design: \" + \", \".join(other_design.UniqueModules) + \")\"\n for module in design.ChangedModules:\n print(\"+New oligos in module\\t\" + module + \"\\t\\t (Start pos noted afterwards)\")\n print(\"++ \" + \"\\n++ \".join([oligo +\n (\"\\t\\t\" + \"-\".join([design.OligoInfo[oligo]['Start'] if 'Start' in design.OligoInfo[oligo] else '',\n design.OligoInfo[oligo]['End'] if 'End' in design.OligoInfo[oligo] else '(no-end-info)']))\n for oligo in (design.Modulesets[module] - other_design.Modulesets[module])]))\n print(\"-Oligos removed from module: \" + module)\n print(\"-- \" + \"\\n-- \".join([oligo +\n (\"\\t\\t\" + \"-\".join([other_design.OligoInfo[oligo]['Start'] if 'Start' in other_design.OligoInfo[oligo] else '',\n other_design.OligoInfo[oligo]['End'] if 'End' in other_design.OligoInfo[oligo] else '(no-end-info)']))\n for oligo in (other_design.Modulesets[module] - design.Modulesets[module])]))\n print(\"\")\n if design.UniqueModules:\n print(\"Further details for the new/unique module-classes in this design:\")\n #print \"Per oligo sequence changes (unique module classes):\"\n for module in design.UniqueModules:\n if module is None:\n print(\"Error: module is None. design.UniqueModules is: {}\".format(list(design.UniqueModules.keys())))\n continue\n similar_module = None\n n_best = 0\n for other_module in other_design.UniqueModules:\n n_common_oligos = len(design.Modulesets[module] & other_design.Modulesets[other_module])\n if n_common_oligos > n_best:\n n_best = n_common_oligos\n similar_module = other_module\n if n_best == len(design.Modulesets[module]) and len(design.Modulesets[module]) == len(other_design.Modulesets[other_module]):\n print(\"\".join([\"Module \", module, \" is IDENTICAL TO \", similar_module, \" (\", str(n_best), \" of \", str(len(design.Modulesets[module])), \" in common)\"]))\n continue\n if similar_module:\n print(\"Module %s is similar to %s (%s of %s in common)\" % (module, similar_module, n_best, len(design.Modulesets[module]) ))\n #print \"\".join([\"Module \", module, \" is similar to \", similar_module, \" (\", str(n_best), \" of \", str(len(design.Modulesets[module])), \" in common)\"])\n print(\"+New oligos (%s minus %s):\" % (module, similar_module))\n #print \"+New oligos: (\" + module + \" minus \" + similar_module + \")\"\n print(\"++\" + \"\\n++\".join(design.Modulesets[module] - other_design.Modulesets[similar_module]))\n print(\"-Removed oligos: ({} minus {})\\n--{}\\n\".format(similar_module,\n module, \"\\n--\".join(other_design.Modulesets[similar_module] - design.Modulesets[module])))\n #print \"-Removed oligos: (\" + similar_module + \" minus \" + module + \")\"\n #print \"--\" +\n #print \"\"", "def test_equal(self):\n\n qs = FBO(path=TEST_FILES_ROOT, glob='*.md').order_by('name')\n qs2 = FBO(path=TEST_FILES_ROOT, glob='*.md').order_by('name')\n self.assertEqual(\n qs[0],\n qs2[0],\n )", "def verify_implementations(I, f, c, bc, Lx, Ly, nx, ny, tstop):\n\n class Action:\n def __init__(self):\n self.solutions = {}\n def init(self, version):\n \"\"\"Init list for solutions.\"\"\"\n self._version = version\n self.solutions[self._version] = []\n def __call__(self, u, x, y, t):\n # takes time...\n self.solutions[self._version].append(u.copy())\n\n action = Action()\n versions = ('weave', 'scalar', 'vec', 'f77')\n implementation = {}\n for version in versions:\n for key in 'ic', 'inner', 'bc':\n implementation[key] = version\n action.init(version)\n print version\n solver(I, f, c, bc, Lx, Ly, nx, ny, 0, tstop,\n action, implementation)\n # compare solutions:\n ref = action.solutions[versions[0]]\n for version in versions[1:]:\n sol = action.solutions[version]\n # compare sol with ref:\n for timestep in range(len(sol)):\n error = ref[timestep] - sol[timestep]\n error_measure = sqrt(dot(error.flat, error.flat))\n if error_measure < 1.0E-14:\n error_measure = 0.0\n print \"'%s' compared with '%s' at timestep \"\\\n \"%02d: difference=%g\" % \\\n (versions[0], version, timestep, error_measure)", "def compare(self):\n self.success = True\n\n # evaluate if comparison should be made\n if not self.make_comparison:\n return\n\n msgall = \"\"\n msg = sfmt.format(\"Comparison test\", self.name)\n print(msg)\n\n if self.action is not None:\n cpth = os.path.join(self.simpath, self.action)\n files_cmp = None\n if self.action.lower() == \"compare\":\n files_cmp = []\n files = os.listdir(cpth)\n for file in files:\n files_cmp.append(file)\n elif \"mf6\" in self.action:\n fpth = os.path.join(cpth, \"mfsim.nam\")\n cinp, self.coutp = get_mf6_files(fpth)\n\n head_extensions = (\n \"hds\",\n \"hed\",\n \"bhd\",\n \"ahd\",\n \"bin\",\n )\n if \"mf6_regression\" in self.action:\n success, msgall = self._compare_heads(\n msgall,\n extensions=head_extensions,\n )\n if not success:\n self.success = False\n # non-regression runs - for new features\n else:\n files1 = []\n files2 = []\n exfiles = []\n ipos = 0\n for file1 in self.outp:\n ext = os.path.splitext(file1)[1][1:]\n\n if ext.lower() in head_extensions:\n\n # simulation file\n pth = os.path.join(self.simpath, file1)\n files1.append(pth)\n\n # look for an exclusion file\n pth = os.path.join(self.simpath, file1 + \".ex\")\n if os.path.isfile(pth):\n exfiles.append(pth)\n else:\n exfiles.append(None)\n\n # Check to see if there is a corresponding compare file\n if files_cmp is not None:\n\n if file1 + \".cmp\" in files_cmp:\n # compare file\n idx = files_cmp.index(file1 + \".cmp\")\n pth = os.path.join(cpth, files_cmp[idx])\n files2.append(pth)\n txt = sfmt.format(\n f\"Comparison file {ipos + 1}\",\n os.path.basename(pth),\n )\n print(txt)\n else:\n if self.coutp is not None:\n for file2 in self.coutp:\n ext = os.path.splitext(file2)[1][1:]\n\n if ext.lower() in head_extensions:\n # simulation file\n pth = os.path.join(cpth, file2)\n files2.append(pth)\n\n else:\n files2.append(None)\n\n if self.nam_cmp is None:\n pth = None\n else:\n pth = os.path.join(cpth, self.nam_cmp)\n\n for ipos in range(len(files1)):\n file1 = files1[ipos]\n ext = os.path.splitext(file1)[1][1:].lower()\n outfile = os.path.splitext(os.path.basename(file1))[0]\n outfile = os.path.join(\n self.simpath, outfile + \".\" + ext + \".cmp.out\"\n )\n if files2 is None:\n file2 = None\n else:\n file2 = files2[ipos]\n\n # set exfile\n exfile = None\n if file2 is None:\n if len(exfiles) > 0:\n exfile = exfiles[ipos]\n if exfile is not None:\n txt = sfmt.format(\n f\"Exclusion file {ipos + 1}\",\n os.path.basename(exfile),\n )\n print(txt)\n\n # make comparison\n success_tst = compare_heads(\n None,\n pth,\n precision=\"double\",\n text=extdict[ext],\n outfile=outfile,\n files1=file1,\n files2=file2,\n htol=self.htol,\n difftol=True,\n # Change to true to have list of all nodes exceeding htol\n verbose=self.cmp_verbose,\n exfile=exfile,\n )\n msg = sfmt.format(\n f\"{extdict[ext]} comparison {ipos + 1}\",\n self.name,\n )\n print(msg)\n\n if not success_tst:\n self.success = False\n msgall += msg + \" ... FAILED\\n\"\n\n # compare concentrations\n if \"mf6_regression\" in self.action:\n success, msgall = self._compare_concentrations(msgall)\n if not success:\n self.success = False\n\n # compare cbc files\n if \"mf6_regression\" in self.action:\n cbc_extensions = (\n \"cbc\",\n \"bud\",\n )\n success, msgall = self._compare_budgets(\n msgall, extensions=cbc_extensions\n )\n if not success:\n self.success = False\n\n assert self.success, msgall\n return", "def target_found( self ):\n print( \"Solution: \" + self.path );", "def test_solution_usage(self, test_data):\n for finput in test_data[\"EKFSLAM.EKFSLAM.f\"][:1]:\n params = finput\n\n solution.used[\"EKFSLAM.EKFSLAM.f\"] = False\n\n EKFSLAM.EKFSLAM.f(**params)\n\n assert not solution.used[\"EKFSLAM.EKFSLAM.f\"], \"The function uses the solution\"", "def _compare_structure(sample, reference):\n paths = MappingValidator._find_all_paths(reference)\n result = True\n for path in paths:\n result = result and MappingValidator._validate_key(sample, path)\n if not result:\n break\n return result" ]
[ "0.6210738", "0.57919294", "0.5745451", "0.56757915", "0.565961", "0.56125337", "0.55110097", "0.54660773", "0.542077", "0.54162514", "0.5359694", "0.53146446", "0.5287936", "0.52809936", "0.5276253", "0.5276163", "0.5265088", "0.5264816", "0.52463746", "0.5229991", "0.5228973", "0.52195543", "0.5185835", "0.51757777", "0.51748264", "0.5170062", "0.5161271", "0.5153174", "0.5146476", "0.51461136" ]
0.605295
1
Rendering text in a window and placing data entry fields.
def main_window_text(self) -> None: tk.Label(text='Название книги:').grid(row=0, column=0, padx=10, pady=10) tk.Label(text='Автор:').grid(row=1, column=0, padx=10) tk.Label(text='Жанр:').grid(row=2, column=0, padx=10, pady=10) entry_title = tk.Entry(width=45) entry_title.grid(row=0, column=1, sticky=tk.W) entry_author = tk.Entry(width=45) entry_author.grid(row=1, column=1, sticky=tk.W) entry_genre = tk.Entry(width=45) entry_genre.grid(row=2, column=1, sticky=tk.W)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def displayText(self):\n if self.entryWidget.get().strip() == \"\":\n tkMessageBox.showerror(\"Tkinter Entry Widget\", \"Enter a text value\")\n else:\n self.file_com.write(self.entryWidget.get().strip()+'\\n')", "def textentry(self, parent, variable, label):\n # pack a label and entry horizontally in a frame:\n f = Frame(parent)\n f.pack(side='top', padx=2, pady=2)\n l = Label(f, text=label)\n l.pack(side='left')\n widget = Entry(f, textvariable=variable, width=8)\n widget.pack(side='left', anchor='w')\n return widget", "def Add_Text( self, th ):\r\n self.text_handle = th", "def chat_window(window, chat_lines, write_box):\n for i in xrange(25):\n chat_lines[i] = Entry(Point(130,245-(i*9)),80)\n chat_lines[i].draw(window)\n chat_lines[i].setFill(\"white\")\n write_box.draw(window) # draw it to the window\n help(chat_lines)", "def on_text_box(self, event):\n text_box_value = self.text_box.GetValue()\n text = \"\".join([_(u\"New text box value: \"), text_box_value])\n if self.state == 0:\n self.canvas_2d.render(text)\n else:\n self.canvas_3d.render()", "def text_box(self, grid: object, name: str, xposition: int, yposition: int, synchronize: bool = False, xspan: int = 1,\n yspan: int = 1) -> QtWidgets.QPlainTextEdit:\n label = QtWidgets.QLabel()\n label.setText(TR().tr(name) + ':')\n grid.addWidget(label, yposition, xposition, 1, 1)\n input = QtWidgets.QPlainTextEdit()\n input.setObjectName(name)\n grid.addWidget(input, yposition, xposition + 1, yspan, xspan)\n input.textChanged.connect(self.data_changed)\n if synchronize:\n self.synchronize(input)\n\n return input", "def display_text(self, text, size=None, colr=None,\r\n x = None, y = None,\r\n new_line = None):\r\n if size is None:\r\n size = self.dt_size\r\n self.size = size\r\n if colr is None:\r\n colr = self.text_color\r\n self.text_color = colr\r\n if new_line is not None:\r\n if x is not None or y is not None:\r\n raise Exeception(\"Must not have new_line and x,y\")\r\n else:\r\n if x is not None or y is not None:\r\n new_line = False\r\n else:\r\n new_line = True\r\n if new_line:\r\n x = self.dt_x = self.disp_left\r\n self.dt_y -= size*self.font_size_to_ch\r\n y = self.dt_y\r\n #print(f\"new_line: y:{y} dt_y:{self.dt_y}\")\r\n else:\r\n if x is None:\r\n x = dt_x\r\n self.dt_x = x\r\n if y is None:\r\n y = self.dt_y\r\n self.dt_y = y\r\n #print(f\"display_text: text:{text} x:{x}, y:{y}\")\r\n tu.penup()\r\n if y < self.disp_bottom + self.disp_boarder:\r\n continue_msg = \"Press ENTER to continue\"\r\n inp = input(continue_msg)\r\n self.clear_text() # Only option \r\n \r\n tu.goto(x,y)\r\n tu.pendown()\r\n \r\n tu.color(colr)\r\n font = (\"Arial\", size, \"normal\")\r\n #print(f\"colr:{colr} text:{text} font:{font}\")\r\n #print(f\"xcor():{tu.xcor()} ycor():{tu.ycor()}\")\r\n tu.write(text, align=\"left\", font=font)", "def _create_entry_popup(self, idx):\n entry = self._items[idx]\n\n frame = style.Frame(self._entry_popup, style=self._style)\n\n # Make only the Text widget (which holds the entry's value) resizeable.\n frame.grid_columnconfigure(1, weight=1)\n frame.grid_rowconfigure(1, weight=1)\n\n style.Label(frame, style=self._header_style, text=\"TYPE:\").grid(row=0, column=0)\n style.Label(frame, style=self._label_style, text=ntutils.type_constant_to_str(entry.getType()))\\\n .grid(row=0, column=1)\n\n style.Label(frame, style=self._header_style, text=\"DATA:\").grid(row=1, column=0)\n data = style.Text(frame, style=self._info_text_style, width=self.INFO_DATA_WIDTH, height=self.INFO_DATA_HEIGHT)\n data.insert(tk.END, str(entry.value))\n # Disable the text widget so it cannot be edited.\n data.configure(state=tk.DISABLED)\n data.grid(row=1, column=1, sticky=tk.NSEW)\n\n # Forcefully close an already-open popup window, should one exists. PopupManager prevents having more than one\n # popup open at once and queues any subsequent popups, which would be weird in this case.\n if self._entry_popup.popup_open:\n self._entry_popup.close_current_popup()\n\n # Create the popup window with the entry's name as the title of the window.\n self._entry_popup.create(frame, entry.getName())", "def create_text_box(self, box_pos, text_font):\n self.textBox = tk.Text(self.top, height=1, width=17,\n font=('Helvetica', text_font))\n self.textBox.grid(row=box_pos[0], column=box_pos[1],\n columnspan=box_pos[2], rowspan=box_pos[3])", "def draw(self, screen):\n lines = self.text.strip().split('\\n')\n y = self.y\n for line in lines:\n self.ui.show_text(line, (self.x, y), 30)\n y += 32", "def initWidgets(self):\n self.lambdtext.setText(str(self.lambd))\n self.ptext.setText(str(self.p))", "def createFrame(self,message):\n \n f = self.frame\n \n label = Tk.Label(f,text=message)\n label.pack(pady=10)\n \n self.id_entry = text = Tk.Entry(f,width=20)\n text.pack()", "def add_text_box(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0, initial_text = '', password = False):\n\n id = 'Widget{}'.format(len(self.widgets.keys()))\n new_text_box = widgets.TextBox(id, title, self.grid, row, column, row_span, column_span, padx, pady, initial_text, password)\n self.widgets[id] = new_text_box\n if self.selected_widget is None:\n self.set_selected_widget(id)\n return new_text_box", "def generate_output(self, title):\n try:\n # First create the window inside the application used to hold it.\n # Then proceed to show the newly created window and fill it with\n # the data we want to display.\n self.title = title\n\n crea = self.create()\n\n if not crea:\n self.ClearLines()\n #self.add_line(\"hola manola\")\n self.refresh()\n else:\n #print \"1 OK...\"\n pass\n\n self.show()\n self.colorize() # This must be implemented in the\n # derived class.\n #self.refresh()\n\n except Exception, err:\n if self.debug:\n print format_exc()\n self.close()\n raise TextOutputMediaException(\n \"Error creating viewer called \\\"%s\\\"\" % title)", "def populate_contents(self):\n\n data_table = self.data_table\n world = self.world\n\n self.add_text_row('World Name', data_table.world_name_label.text())\n self.add_text_row('Coordinates', data_table.world_coords_label.text())\n self.add_text_row('World Type', data_table.world_type_label.text())\n if data_table.world_extra_label.text() != '':\n self.add_text_row('Extra Info', data_table.world_extra_label.text())\n self.add_text_row('Filename', world.base_filename)\n self.add_text_row('Size', '{}x{}'.format(*world.info.size))\n\n if len(world.info.dungeons) > 0:\n dungeons = self.add_text_row('Dungeons', '<br/>'.join(sorted(world.info.dungeons)))\n else:\n self.add_text_row('Dungeons', '-')\n\n if len(world.info.biomes) > 0:\n biomes = self.add_text_row('Biomes', '<br/>'.join(sorted(world.info.biomes)))\n else:\n self.add_text_row('Biomes', '-')", "def draw(self, win):\n self.rect.draw(win)\n self.text.draw(win)", "def create_ui(self):\n main_sizer = wx.BoxSizer(wx.VERTICAL)\n\n self.text_ctrl = wx.TextCtrl(self, style=wx.TE_MULTILINE)\n self.text_ctrl.Bind(wx.EVT_TEXT, self.on_text)\n main_sizer.Add(self.text_ctrl, 1, wx.ALL | wx.EXPAND, 5)\n\n self.SetSizer(main_sizer)", "def __inputBox(gwin,height,word): \r\n prompt=Text(Point(155,height),word)\r\n prompt.setSize(10)\r\n prompt.draw(gwin)\r\n inputBox=Entry(Point(450,height),2)\r\n inputBox.setSize(60)\r\n inputBox.setFill(\"white\")\r\n inputBox.draw(gwin)\r\n return inputBox", "def text_draw(self, x, y, text, style={}):", "def SetContent(self, window):\n window.SetName(\"content\")\n window.SetBackgroundColour(wx.GetApp().settings.bg_color)\n window.SetForegroundColour(wx.GetApp().settings.fg_color)\n window.SetFont(wx.GetApp().settings.text_font)", "def create_widgets(self):\n self.instruction = Label(self, text=\"Enter the password \")\n self.instruction.grid(row=0, column=0, columnspan=2, sticky=W)\n\n self.password_entry = Entry(self)\n self.password_entry.grid(row=0, column=1, sticky=W)\n\n self.submit_button = Button(self, text=\"Submit\",\n command=self.reveal, width=10)\n self.submit_button.grid(row=2, column=0, sticky=W)\n\n self.exit_button = Button(self, text=\"Exit\",\n command=self.quit, width=10)\n self.exit_button.grid(row=2, column=1, sticky=W)\n\n #self.close_button = Button(self, text = \"Close\", command = self.quit)\n #self.close_button.grid(row = 2, column = 0, sticky = E)\n\n self.text = Text(self, width=35, height=5, wrap=WORD)\n self.text.grid(row=3, column=0, columnspan=2, sticky=W)", "def createFrame (self,message):\n \n f = self.frame\n \n lab = Tk.Label(f,text=message)\n lab.pack(pady=10,side=\"left\")\n \n self.number_entry = t = Tk.Entry(f,width=20)\n t.pack(side=\"left\")", "def text(self) -> None:\n label_space = tk.Label(self)\n label_space.grid(row=0)\n label_book_number = tk.Label(self, text=f'Номер книги:')\n label_book_number.grid(row=1, column=0, ipady=5)\n label_title = tk.Label(self, text='Название книги:')\n label_title.grid(row=2, column=0, padx=5)\n label_author = tk.Label(self, text='Автор:')\n label_author.grid(row=3, column=0, pady=5)\n label_genre = tk.Label(self, text='Жанр:')\n label_genre.grid(row=4, column=0)", "def __init__(self, parent, title, fileName, data=None, xsize = 625, ysize = 500, font = None):\n Toplevel.__init__(self, parent)\n self.configure(borderwidth=5)\n self.geometry(\"=%dx%d+%d+%d\" % (xsize, ysize,\n parent.winfo_rootx() + 10,\n parent.winfo_rooty() + 10))\n #elguavas - config placeholders til config stuff completed\n self.bg = '#ffffff'\n self.fg = '#000000'\n\n self.font = font\n self.CreateWidgets()\n self.title(title)\n self.transient(parent)\n self.grab_set()\n self.protocol(\"WM_DELETE_WINDOW\", self.Ok)\n self.parent = parent\n self.textView.focus_set()\n #key bindings for this dialog\n self.bind('<Return>',self.Ok) #dismiss dialog\n self.bind('<Escape>',self.Ok) #dismiss dialog\n if data:\n self.textView.insert(0.0, data)\n else:\n self.LoadTextFile(fileName)\n self.textView.config(state=DISABLED)\n self.wait_window()", "def update_text(self: object, widget: Text, new_text: str) -> None:\n widget.delete(\"1.0\", END) #Clear the text window so we can write.\n widget.insert(END,new_text)", "def createFrame (self):\n \n frame = self.frame\n theCopyright = self.copyright ; email = self.email\n url = self.url ; version = self.version\n \n # Calculate the approximate height & width. (There are bugs in Tk here.)\n lines = string.split(theCopyright,'\\n')\n height = len(lines) + 8 # Add lines for version,url,email,spacing.\n width = 0\n for line in lines:\n width = max(width,len(line))\n width = max(width,len(url))\n width += 10 # 9/9/02\n \n frame.pack(padx=6,pady=4)\n \n self.text = text = Tk.Text(frame,height=height,width=width,bd=0,bg=frame.cget(\"background\"))\n text.pack(pady=10)\n \n try:\n bitmap_name = g.os_path_join(g.app.loadDir,\"..\",\"Icons\",\"Leoapp.GIF\") # 5/12/03\n image = Tk.PhotoImage(file=bitmap_name)\n text.image_create(\"1.0\",image=image,padx=10)\n except:\n g.es(\"exception getting icon\")\n g.es_exception()\n \n text.insert(\"end\",version,\"version\")\n text.insert(\"end\",theCopyright,\"copyright\")\n text.insert(\"end\",'\\n')\n text.insert(\"end\",url,\"url\") # Add \"url\" tag.\n text.insert(\"end\",'\\n')\n text.insert(\"end\",email,\"email\") # Add \"email\" tag.\n \n text.tag_config(\"version\",justify=\"center\")\n text.tag_config(\"copyright\",justify=\"center\",spacing1=\"3\")\n \n text.tag_config(\"url\",underline=1,justify=\"center\",spacing1=\"10\")\n text.tag_bind(\"url\",\"<Button-1>\",self.onAboutLeoUrl)\n text.tag_bind(\"url\",\"<Enter>\",self.setArrowCursor)\n text.tag_bind(\"url\",\"<Leave>\",self.setDefaultCursor)\n \n text.tag_config(\"email\",underline=1,justify=\"center\",spacing1=\"10\")\n text.tag_bind(\"email\",\"<Button-1>\",self.onAboutLeoEmail)\n text.tag_bind(\"email\",\"<Enter>\",self.setArrowCursor)\n text.tag_bind(\"email\",\"<Leave>\",self.setDefaultCursor)\n \n text.configure(state=\"disabled\")", "def add_text(self, text, color, pos, font):\n text = font.render(text, True, color)\n text_rec = text.get_rect(center=pos)\n self.window.blit(text, text_rec)", "def add_text_data(self, text):\n data = QtWidgets.QLabel(text)\n data.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)\n self.grid.addWidget(data, self.cur_row, 1, QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)\n return data", "def display_text(text, x, y, size):\r\n font = pygame.font.Font('freesansbold.ttf', size)\r\n text_surf, text_rect = text_objects(text, font)\r\n text_rect.center = (x, y)\r\n display.blit(text_surf, text_rect)", "def displayTextToScreen(wof_settings,screen,title_image,text,text_font,text_font_size):\n \n # Position the title image\n title_rect = pygame.image.load(title_image).get_rect()\n top_coord = 50\n title_rect.top = top_coord\n title_rect.centerx = wof_settings.width/2\n top_coord += title_rect.height\n \n # Start with drawing a blank color to the entire window:\n screen.fill(wof_settings.titleScreenBgColor)\n \n # Title image\n screen.blit(pygame.image.load(title_image), title_rect)\n \n # Position and draw the text\n for i in range(len(text)):\n title_font = pygame.font.Font(text_font, text_font_size)\n text_surf = title_font.render(text[i], 1, wof_settings.titleTextColor)\n text_rect = text_surf.get_rect()\n top_coord += 10\n text_rect.top = top_coord\n text_rect.centerx = wof_settings.width/2\n top_coord += text_rect.height\n screen.blit(text_surf, text_rect)\n \n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n terminate()\n return # user has pressed a key, so return.\n \n # Display the contents to the actual screen.\n pygame.display.flip()" ]
[ "0.6733427", "0.6384407", "0.63377374", "0.63295066", "0.6320589", "0.62267315", "0.6192366", "0.61501503", "0.61278635", "0.61269283", "0.6098317", "0.6081952", "0.60348564", "0.59971297", "0.5979407", "0.5946554", "0.5944347", "0.59384257", "0.5934913", "0.59174144", "0.5904648", "0.5883109", "0.58823997", "0.58782065", "0.5876396", "0.5864052", "0.5852077", "0.58343446", "0.58110946", "0.5808503" ]
0.6892847
0
Clears the search bar.
def _clear_search_bar(self): self.search_bar.setText("") self.search_bar.setFocus()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __searchClearMarkers(self):\n self.activeWindow().clearSearchIndicators()", "def resetSearch(entry,searchCommand):\n\taddDataToWidget(entry,\"\")\n\trunCommand(searchCommand)", "def _(event):\n search_buffer = event.cli.buffers[SEARCH_BUFFER]\n\n search_buffer.reset()\n event.cli.pop_focus()", "def actionClear(self):\n self.setText(\"\")", "def clear(self):\n self.results.clear()", "def clear(self):\n for inp in self.inputlst:\n inp.setText(\"\")", "def clear(self) -> None:\n self._used = set()\n self.search_pos = 1", "def clear_dialog(self) -> None:\n self._client.results = {}\n self._client.clear_elements()", "def clear(self):\n self.call('clear')", "def on_pre_enter(self, *args):\n self.ids['search'].text = ''\n self.filter()", "def Clear_input(self):\r\n self.root.ids.place_name.text = '' # Clear input\r\n self.root.ids.place_country.text = ''\r\n self.root.ids.place_priority.text = ''", "def clear(self):\n ...", "def clear():\n\n os.system(\"clear\")", "def Clear(self) -> None:", "def clear_result(self):\n\n self.ui.plainTextEdit.clear()", "def clear(self) -> None:\n ...", "def clear():\n click.clear()", "def clear():\n clear_output()", "def new_search(self, widget, data=None):\n self.artist_name.set_text(\"\")\n self.song_name.set_text(\"\")\n self.status_bar.hide()\n self.lyrics_view.hide()\n self.scroll.hide()\n self.window.resize(self.width, self.height)", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def reset_search(self):\n self._state = IdleTracker._INITIAL_STATE\n self._scan_count = 0\n self._candidate_list = None", "def reset_widgets(self):\n\n widgets = [\n self.test_input,\n self.results_input\n ]\n\n for widget in widgets:\n clear_text(widget)", "def onSpaceBarClear(self):\n if self.validateSelection():\n self.resetCamera()\n self.clearMouseSelection()\n self.zoomOutCamera()\n self.setShortcuts()\n self.enableScrollWheelZoom = 1", "def clear(self) -> None:\n pass" ]
[ "0.7494229", "0.72530705", "0.68444264", "0.663171", "0.6616456", "0.6569816", "0.6538454", "0.65167445", "0.64899653", "0.64673346", "0.64512825", "0.64437383", "0.64305097", "0.64063007", "0.64059985", "0.640341", "0.6392905", "0.6386323", "0.637754", "0.636638", "0.636638", "0.636638", "0.636638", "0.636638", "0.636638", "0.636638", "0.6348106", "0.6343008", "0.6342694", "0.6338552" ]
0.9045618
0
r""" Initialize the stream with the ast that will produce this stream of objects. The user will almost never use this initializer.
def __init__(self, the_ast): self._ast = the_ast
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__ (self, stream):\n self.classes = [ ]\n self.functions = [ ]\n self.classDocs = [ ]\n\n if stream is not None:\n self.read(stream)", "def __init__(self):\n log.msg(\"Initializing Twitch parser.\")\n\n # initialize our data members\n self.streams = tuple()\n self.crc32 = 0", "def __init__(self):\n\n self.loops = []\n self.ast_util = ast_util.ASTUtil()", "def __init__(self, input_file_name, ast_root_node):\n\n self.input_file_name = input_file_name\n self.ast_root_node = ast_root_node\n self.object_file = ObjectFile(self.input_file_name)\n self.assembler = X64Assembler()", "def __init__(self, stream):\n self.stream = stream", "def __init__(self, stream):\n self.stream = stream", "def __init__(self):\n self._header = self._Node(None, None, None)\n self._trailer = self._Node(None, None, None)\n self._header._next = self._trailer\n self._trailer._prev = self._header\n self._size = 0", "def __init__(self):\n self._header = self._Node(None, None, None)\n self._trailer = self._Node(None, None, None)\n self._header._next = self._trailer # trailer is after header\n self._trailer._previous = self._header # header is before trailer\n self._size = 0 # set size to 0", "def __init__(self):\n this = _libsbml.new_RDFAnnotationParser()\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n self.entries = []\n self.source_path = None\n self.source_file = None\n self.source_file_mtime = None\n self.parser = None", "def __init__(self):\n\n self._header = self._Node(None, None, None)\n self._trailer = self._Node(None, None, None)\n self._header._next = self._trailer # trailer is the next node after header\n self._trailer._prev = self._header # header is the node before trailer\n self._size = 0 # keep track of the number of elements", "def __init__(self, init=None):\n # TODO enable passing of starting node semantics\n super().__init__(init)\n self._internal_trie = Trie(FactNode)\n # parser : PyParsing.ParserElement\n self._main_parser = None\n self._query_parser = None\n\n if init is not None:\n self.add(init)", "def __init__( self, stream=None ):\n if stream is not None:\n self.stream = stream\n else:\n self.stream = StringIO()", "def __init__(self, *args):\n this = _libsbml.new_XMLOutputStream(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, stream, node=None):\r\n super(_StreamFork, self).__init__()\r\n self.stream = stream\r\n self.node = node", "def __init__(self, *args):\n this = _libsbml.new_XMLOwningOutputFileStream(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self,stream=None,tabwidth=4):\n\n if stream is None:\n from sys import stdout\n stream = stdout\n self.stream = stream\n self.tabwidth = tabwidth\n self.stack = [0]", "def __init__(self, nodes=None, connections=None):\r\n super(Stream, self).__init__(nodes, connections)\r\n self.logger = get_logger()\r\n\r\n self.exceptions = []", "def __init__(self, stream, lookahead=1):\n # type: (Iterator[T], int) -> None\n self.stream = stream\n self.buffer = deque() # type: deque\n self.lookahead = lookahead\n self._buffer_to(lookahead)", "def __init__(self, indent=1, width=80, depth=None, stream=None):\n\n PrettyPrinter.__init__(self,indent=1, width=80, depth=None, stream=None)", "def __init__(self, *args):\n this = _libsbml.new_InitialAssignment(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, token_iterable, nesting_level=None, filename=None,\n compat_mode=False):\n if compat_mode:\n token_elements = []\n for count, t in enumerate(token_iterable):\n token_elements.append(t)\n if count == 2:\n break\n token_elements += [None]*3\n else:\n token_elements = [t for t in token_iterable]\n self.type = token_elements[0]\n self.string = token_elements[1]\n self.start = token_elements[2]\n self.end = token_elements[3]\n self.line = token_elements[4]\n self.type_name = tok_name[self.type]\n self.nesting_level = nesting_level\n self.filename = filename\n self.compat_mode = compat_mode", "def __init__(self):\n self.__rtags = []\n self.__nodedata = []\n self.__ltags = []", "def __init__(self):\n self.data = None\n self.compiled = None", "def __init__(self):\n self.root = [None, dict(), False] # val, sons, end-able", "def init(self) -> None:", "def __init__(self, ast, id=None):\n if ast:\n self.id = self._find_first(ast, 'Id') if ast is not None else None\n else:\n # Support synthesized IDLNode created w/o an AST (e.g., setlike support).\n self.id = id", "def __init__(self, *args):\n this = _libsbml.new_SBMLInitialAssignmentConverter(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\r\n\r\n super(Node, self).__init__()\r\n self.inputs = []\r\n self.outputs = []\r\n self._active_outputs = []\r\n self.description = None\r\n\r\n # Experimental: dictionary to be used to retype output fields\r\n # Currently used only in CSV source node.\r\n self._retype_dictionary = {}", "def __init__(self):\n this = _libsbml.new_SBMLReader()\n try: self.this.append(this)\n except: self.this = this" ]
[ "0.68969566", "0.6610674", "0.6556938", "0.6385011", "0.6377803", "0.6377803", "0.63255113", "0.6307727", "0.62828296", "0.6246137", "0.6227066", "0.6206207", "0.6201002", "0.6200042", "0.6173093", "0.6107011", "0.6083679", "0.6070142", "0.602574", "0.6022117", "0.6009696", "0.6008491", "0.599958", "0.59739745", "0.59699863", "0.5961601", "0.5959732", "0.595902", "0.5958963", "0.5958035" ]
0.74345356
0
r""" Return the sequence of items as a ROOT TTree. Each item in the ObjectStream will get one entry in the file. The items must be of types that the infrastructure
def AsROOTTTree(self, filename, treename, columns=[]): return ObjectStream(ResultTTree(self._ast, columns, treename, filename))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_file(filename, items, folders):\n filename_abs = os.path.join(ROOT_DATA, filename)\n if os.path.isfile(filename_abs):\n root = ROOT.TFile.Open(filename_abs, \"READ\")\n if not root:\n print(\"File '%s' is not a root file\" % filename_abs)\n return None\n json_items = {}\n for item in items:\n json_item = process_item(root, item)\n if json_item:\n json_items[item] = json_item\n json_folders = {}\n for folder in folders:\n if folder != \"\":\n json_folder = process_folder(root, folder)\n if json_folder:\n json_folders[folder] = json_folder\n return {\"root\": filename, \"items\": json_items, \"trees\": json_folders}\n else:\n print(\"File '%s' does not exists\" % filename_abs)\n return None", "def get_items(self):\n export_file = self.cmdline_args.file # see setup_parser\n for a in get_articles(export_file):\n yield node(\n heading=dt_heading(\n a.added,\n # 'pocket' permalink is pretty convenient to jump straight into Pocket app\n link(title='pocket', url=a.pocket_link) + ' · ' + link(title=a.title, url=a.url),\n ),\n children=[node( # comments are displayed as org-mode child entries\n heading=dt_heading(hl.created, hl.text)\n ) for hl in a.highlights]\n )", "def nxroot():\n with h5py.File('dummy.nxs', mode='w', driver=\"core\", backing_store=False) as f:\n root = make_group(f)\n root.create_class('entry', NXentry)\n yield root", "def getAllItems(self):\r\n\r\n control = []\r\n\r\n for folder in self.get_items():\r\n control.append(folder.id)\r\n\r\n while len(control) > 0:\r\n try:\r\n for item in self.get_items(parent=control.pop()):\r\n print(\"Getting content for: \" + item.name)\r\n if item.type == \"folder\":\r\n control.insert(0,item.id)\r\n else:\r\n self.contents[item.id] = {\r\n 'name': item.name, 'parentID': item.parent_id,\r\n 'size': item.size, 'uri': item.download_url\r\n }\r\n except putio.PutioError as err:\r\n print(\"Can't get content for Directory\")\r\n pass\r\n return self.contents", "def run(self, flow):\n from ROOT import TFile\n from lena.context import get_recursively, update_recursively\n from lena.flow import get_data_context\n from copy import deepcopy\n for val in flow:\n data, context = get_data_context(val)\n\n # skip not ROOT files\n if sys.version_info.major == 2:\n str_type = basestring\n else:\n str_type = str\n if not isinstance(data, str_type) or not \\\n get_recursively(context, \"input.read_root_file\",\n True):\n yield val\n continue\n\n root_file = TFile(data, \"read\")\n # context of separate keys shall be updated\n # when they are transformed to other types\n # in other elements\n update_recursively(\n context, {\"input\": {\"root_file_path\": data}}\n )\n\n def get_key_names(fil):\n return [key.GetName() for key in fil.GetListOfKeys()]\n key_names = get_key_names(root_file)\n\n for key_name in key_names:\n # result of TFile.Get is not a TKey, but a proper type\n obj = root_file.Get(key_name)\n if self._selector:\n if not self._selector(obj):\n continue\n yield (obj, deepcopy(context))\n\n # will be closed after\n # following elements used its data\n root_file.Close()", "def top_level_items(self, stix_package):\n \n stix_header = stix_package.stix_header\n \"\"\":type : STIXHeader\"\"\"\n if stix_header.title or stix_header.description or stix_header.short_description:\n \"\"\"We have a stix package!\"\"\"\n yield (\"STIXHeader\", stix_package, )\n\n for indicator in stix_package.indicators or []:\n yield (\"Indicator\", indicator, )\n\n for observable in stix_package.observables or []:\n yield (\"Observable\", observable, )\n\n for ttp in stix_package.ttps or []:\n yield (\"TTP\", ttp, )\n\n for incident in stix_package.incidents or []:\n yield (\"Incident\", incident,)\n\n for campaign in stix_package.campaigns or []:\n yield (\"Campaign\", campaign, )\n\n for course_of_action in stix_package.courses_of_action or []:\n yield (\"CourseOfAction\", course_of_action, )\n\n for exploit_target in stix_package.exploit_targets or []:\n yield (\"ExploitTarget\", exploit_target, )\n\n for related_package in stix_package.related_packages or []:\n yield (\"RelatedPackage\", related_package, )\n\n for threat_actor in stix_package.threat_actors or []:\n yield (\"ThreatActor\", threat_actor, )", "def filetree(self) -> P:\n ...", "def __parse(self):\n # raw/objects: detect name, type, use major tag for type as parent node\n # raw/graphics: as object raw, but add TILE_PAGE\n # init: usually flat file, except\n # embark_profiles.txt: [PROFILE] is parent\n # interface.txt: [BIND] is parent (legacy will be flat)\n # world_gen.txt: [WORLD_GEN] is parent\n # Non-raw files (unsupported): init/arena.txt, subdirs of raw/objects\n parse_raw(self, self.read(self.filename))", "def makeTree(self):\n return makeTree(self.events,self.outTree)", "def tar_file_and_group(data):\n for sample in data:\n assert \"stream\" in sample\n stream = tarfile.open(fileobj=sample[\"stream\"], mode=\"r|*\")\n prev_prefix = None\n example = {}\n valid = True\n for tarinfo in stream:\n name = tarinfo.name\n pos = name.rfind(\".\")\n assert pos > 0\n prefix, postfix = name[:pos], name[pos + 1 :]\n if prev_prefix is not None and prefix != prev_prefix:\n example[\"key\"] = prev_prefix\n if valid:\n yield example\n example = {}\n valid = True\n with stream.extractfile(tarinfo) as file_obj:\n try:\n if postfix == \"txt\":\n example[\"txt\"] = file_obj.read().decode(\"utf8\").strip()\n elif postfix in AUDIO_FORMAT_SETS:\n waveform, sample_rate = torchaudio.load(file_obj)\n example[\"wav\"] = waveform\n example[\"sample_rate\"] = sample_rate\n else:\n example[postfix] = file_obj.read()\n except Exception as ex:\n valid = False\n logging.warning(\"error to parse {}\".format(name))\n prev_prefix = prefix\n if prev_prefix is not None:\n example[\"key\"] = prev_prefix\n yield example\n stream.close()\n if \"process\" in sample:\n sample[\"process\"].communicate()\n sample[\"stream\"].close()", "def iterate_item(tree_item):\n if not tree_item.is_construct():\n return\n for index, arg in enumerate(tree_item.construct.args):\n if isinstance(arg, syntax.Construct):\n yield from iterate_item(TreeItem(arg, tree_item, index))\n elif isinstance(arg, list):\n for i, litem in enumerate(arg):\n yield from iterate_item(TreeItem(litem, tree_item, index, i))\n yield tree_item", "def generate(self):\n tree = self._generator.build_tree()\n\n for entry in tree:\n print(entry)", "def blobs(self):\n blobs = pipeline(\n ftype_selector([FILE]),\n fmap(first),\n fmap(self.reverser),\n )(self.root.entries())\n return blobs", "def read_all_trees(self, **kwargs):\n return self.read_single_tree(-1, halonum=None, **kwargs)", "def iter_children_or_self(self) -> Iterator[Optional[ItemType]]:\n if self.axis is not None:\n yield self.item\n elif isinstance(self.item, (ElementNode, DocumentNode)):\n _status = self.item, self.axis\n self.axis = 'child'\n\n for self.item in self.item:\n yield self.item\n\n self.item, self.axis = _status\n\n elif self.item is None:\n self.axis = 'child'\n\n if isinstance(self.root, DocumentNode):\n for self.item in self.root:\n yield self.item\n else:\n # document position without a document node -> yield root ElementNode\n yield self.root\n\n self.item = self.axis = None", "def items(self):\n current = self.first\n output = []\n\n while current is not None:\n output.append(current.item)\n current = current.next_node\n\n return output", "def _buildindex( self ):\n try:\n import ROOT as rt\n except:\n print \"Could not load ROOT\"\n sys.exit(-1)\n \n # sigh. this is a mess\n self.producers = [] # all producer names found in ROOT files\n self.datatypes = [] # all data types\n self.flavors = [] # flavor = hash of string listing set of trees found in a given file\n self.flavor_def = {} # map from flavor to list of tree names\n self.rawdigits_entrymap = {} # only used if file type is raw digits. maps rse to (position,wfms) in data tree\n self.rawdigits_tpcindex = {}\n flavor_eventset = {}\n eventsets = []\n events_to_files = {}\n events_to_flavors = {}\n\n # this loop is going into each file in our list and\n # - taking the list of trees in the file and making a has out of their names\n # - this hash is used to define the 'flavor' of the file\n # - we also make a list of events in the tree, labeling each entry with (run,subrun,event) ID\n # - we keep track of such list of entries and group files (and flavors) with the same event list\n # - determine filetype: LArCV or LArLite\n self.filetype = None\n for f in self.larlitefilelist:\n r = rt.TFile(f)\n nfkeys = r.GetListOfKeys().GetEntries()\n\n # now here we parse the type of objects in the ROOT file\n # we are looking to determine three file types supported by pylard\n # (1) larlite (2) larcv (3) rawdigitreader\n trees = []\n for i in range(nfkeys):\n keyname = r.GetListOfKeys().At(i).GetName()\n if keyname==\"larlite_id_tree\":\n found_id_tree = True\n elif \"_tree\" in keyname:\n producer = keyname.split(\"_\")[1]\n dtype = keyname.split(\"_\")[0]\n if producer not in self.producers:\n self.producers.append( producer )\n if dtype not in self.datatypes:\n self.datatypes.append( dtype )\n elif \"rawdigitwriter\" in keyname:\n trees.append( \"rawdigitwriter/RawDigits\" )\n trees.append( \"rawdigitwriter/OpDetWaveforms\" )\n trees.append( \"rawdigitwriter/IndexRawDigits\" )\n trees.append( \"rawdigitwriter/IndexOpDetWfms\" )\n if keyname not in trees:\n trees.append(keyname)\n hashstr = \"\"\n trees.sort()\n for keyname in trees:\n hashstr += keyname +\";\"\n\n # determine filetype from type of keys we see\n is_supported_rootfile = False\n idtreename = None\n if \"larlite_id_tree\" in trees:\n thisfiletype = \"LARLITE\"\n is_supported_rootfile = True\n if \"image2d\" in self.datatypes:\n thisfiletype = \"LARCV\"\n is_supported_rootfile = True\n if \"partroi\" in self.datatypes:\n thisfiletype = \"LARCV\"\n is_supported_rootfile = True\n if \"rawdigitwriter/OpDetWaveforms\" in trees:\n thisfiletype = \"RAWDIGITS\"\n is_supported_rootfile = True\n if not is_supported_rootfile:\n continue\n\n if self.filetype is not None and self.filetype!=thisfiletype:\n print \"Error in parsing filelist: Cannot mix filetypes (LArCV/LArLite/RawDigitTree)\"\n return\n elif self.filetype is None:\n self.filetype = thisfiletype\n \n # now we determine the idtree to use\n if self.filetype==\"LARLITE\":\n idtreename = \"larlite_id_tree\"\n elif self.filetype==\"LARCV\":\n if self.loaded_larcv == False:\n s = time.time()\n import larcv as larcv\n print \"LOADING LARCV: \",time.time()-s,\"secs\"\n self.loaded_larcv = True\n for treename in trees:\n if \"image2d\" in treename:\n if idtreename is None:\n idtreename = treename\n else:\n pass # we only use this if we have to\n if \"partroi\" in treename:\n idtreename = treename # we prefer to use this tree for speed\n break\n elif self.filetype==\"RAWDIGITS\":\n idtreename = \"rawdigitwriter/IndexOpDetWfms\"\n\n if idtreename is None:\n print \"Error: Could not setup a proper ID tree for this file\"\n continue\n\n # now we parse the tree contents. define a flavor for it based on all the trees\n # we also get the (run,subrun,event) id for the event\n m = hashlib.md5()\n m.update(hashstr)\n flavor = m.digest()\n if flavor not in self.flavors:\n self.flavors.append( flavor )\n flavor_eventset[flavor] = []\n self.flavor_def[flavor] = hashstr\n if self.filetype==\"LARLITE\":\n idtree = r.Get(idtreename)\n elif self.filetype==\"LARCV\":\n idtree = r.Get(idtreename)\n elif self.filetype==\"RAWDIGITS\":\n idtree = r.Get(idtreename)\n \n eventset = [] # list of events\n for n in range(idtree.GetEntries()):\n idtree.GetEntry(n)\n if self.filetype==\"LARLITE\":\n rse = ( idtree._run_id, idtree._subrun_id, idtree._event_id )\n elif self.filetype==\"LARCV\":\n idbranchname = idtreename.replace(\"_tree\",\"_branch\")\n idbranch = None\n exec(\"idbranch=idtree.%s\"%(idbranchname))\n rse = ( idbranch.run(), idbranch.subrun(), idbranch.event() )\n elif self.filetype==\"RAWDIGITS\":\n rse = ( idtree.idx_run, idtree.idx_subrun, idtree.idx_event )\n self.rawdigits_entrymap[rse] = (idtree.entrystart, idtree.nentries )\n eventset.append(rse)\n if rse not in flavor_eventset[flavor]:\n flavor_eventset[flavor].append( rse )\n else:\n raise ValueError( \"found a repeated run/subrun/event index (%s). what?\"%( str(rse) ) )\n if self.filetype==\"RAWDIGITS\":\n # rawdigits has another tree index for the TPC\n tpcindex = r.Get(\"rawdigitwriter/IndexRawDigits\")\n for n in range(tpcindex.GetEntries()):\n tpcindex.GetEntry(n)\n rse = ( tpcindex.idx_run, tpcindex.idx_subrun, tpcindex.idx_event )\n self.rawdigits_tpcindex[rse] = (tpcindex.entrystart, tpcindex.nentries)\n \n eventset = tuple(eventset)\n if eventset not in events_to_files:\n events_to_files[eventset] = {}\n events_to_flavors[eventset] = []\n eventsets.append( eventset )\n events_to_files[eventset][flavor] = f\n events_to_flavors[eventset].append( flavor )\n del idtree\n r.Close()\n self.parsed = True\n\n # now we take our collection of event lists and\n # - sort the event lists\n # - make lists of files with the same set of events in the order of the sorted event list\n # - for each list we also make a dictionary between (run,subrun,event) index to the entry number\n # - we pick the list with the biggest number of events as the \"official\" file list\n eventsets.sort()\n flavorfiles = {}\n flavorsets = []\n\n flavorset_rse_dict = {}\n flavorset_entry_dict = {}\n for eventset in eventsets:\n events_to_flavors[eventset].sort() # sort the flavors with this event-set\n flavorset = tuple( events_to_flavors[eventset] )\n if flavorset not in flavorfiles:\n flavorfiles[flavorset] = []\n flavorsets.append(flavorset)\n flavorset_rse_dict[flavorset] = {}\n flavorset_entry_dict[flavorset] = {}\n for flavor in flavorset:\n flavorfiles[flavorset].append( events_to_files[eventset][flavor] )\n for rse in eventset:\n ientry = len( flavorset_rse_dict[flavorset] )\n flavorset_rse_dict[flavorset][rse] = ientry\n flavorset_entry_dict[flavorset][ientry] = rse\n\n # look for largest fileset\n maxset = None\n nfiles = 0\n for fset in flavorsets:\n n = len(flavorfiles[fset])\n if n>nfiles:\n nfiles = n\n maxset = fset\n # these are the final file list and event dictionary we want\n self.sorted_filelist = flavorfiles[maxset]\n self.rse_dict = flavorset_rse_dict[maxset]\n self.entry_dict = flavorset_entry_dict[maxset]\n\n # for rawdigits, we also build the entry to data map\n if self.filetype==\"RAWDIGITS\":\n treepos = 0\n treepos_tpc = 0\n for entry in range(len(self.entry_dict)):\n rse = self.entry_dict[entry] \n # update OPDET tree\n pos_entries = self.rawdigits_entrymap[rse] # pos is from start of file, nentries is for the event block\n merged_pos_entries = ( treepos, pos_entries[1] )\n treepos += pos_entries[1]\n self.rawdigits_entrymap[rse] = merged_pos_entries # update \n # update TPC tree\n pos_entries = self.rawdigits_tpcindex[rse]\n merged_pos_entries = ( treepos_tpc, pos_entries[1] )\n treepos_tpc += pos_entries[1]\n self.rawdigits_tpcindex[rse] = merged_pos_entries # update", "def getObjs(infile, verbose=False):\n\n # Try to Open File\n fin = ROOT.TFile(infile, \"READ\")\n file_open = fin.IsOpen()\n err_mess = \"\\n***Can not open ROOT file %s***\\n\" %(infile)\n assert (file_open),err_mess\n\n # List object for directories\n obj_list = []\n\n # Loop through keys and get directories\n for key in fin.GetListOfKeys():\n kname = key.GetName()\n if key.IsFolder():\n continue\n if ( kname != ''):\n obj_list.append(kname)\n\n fin.Close()\n\n err_mess = \"\\n*** There are no objects in this %s ***\\n\" %(infile)\n assert (obj_list),err_mess\n\n return obj_list", "def items(self):\n return self.root.items()", "def __iter__(self):\n # Wait to open file until needed. Automatically close file when done.\n with open(self.obo_file) as fstream:\n rec_curr = None # Stores current GO Term\n typedef_curr = None # Stores current typedef\n for line in fstream:\n # obo lines start with any of: [Term], [Typedef], /^\\S+:/, or /^\\s*/\n if self.data_version is None:\n self._init_obo_version(line)\n if rec_curr is None and line[0:6].lower() == \"[term]\":\n rec_curr = GOTerm()\n if self.optobj:\n self.optobj.init_datamembers(rec_curr)\n elif typedef_curr is None and line[0:9].lower() == \"[typedef]\":\n typedef_curr = TypeDef()\n elif rec_curr is not None or typedef_curr is not None:\n line = line.rstrip() # chomp\n if line:\n self._add_to_obj(rec_curr, typedef_curr, line)\n else:\n if rec_curr is not None:\n yield rec_curr\n rec_curr = None\n elif typedef_curr is not None:\n # Save typedef.\n self.typedefs[typedef_curr.id] = typedef_curr\n typedef_curr = None\n # Return last record, if necessary\n if rec_curr is not None:\n yield rec_curr", "def flatten(items):\n for x in items:\n if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):\n for sub_x in flatten(x):\n yield sub_x\n else:\n yield x", "def get_object(directory):\r\n path = join(self.base_path, directory)\r\n for f in listdir(path):\r\n if isfile(join(path, f)):\r\n yield \"file\", f\r\n else:\r\n yield \"subdir\", join(directory, f)", "def files_and_folders(self):\n yield from self._root.files_and_folders(0)", "def items(self, _prec=\"\"):\n if self.isLeaf:\n yield (_prec + self.ch, self.value)\n\n for chld in self.children.values():\n yield from chld.items(_prec + self.ch)", "def __init__(self, items=None):\n self.root = None\n self.size = 0\n if items is not None:\n for item in items:\n self.insert(item)", "def serialize(self, root):\n res=[]\n q=Queue()\n q.put(root)\n i=1\n length=self.__findlast(root)\n res=[None]*lenth", "def iterate_item_bfs(tree_item):\n if not tree_item.is_construct():\n return\n yield tree_item\n for index, arg in enumerate(tree_item.construct.args):\n if isinstance(arg, syntax.Construct):\n yield from iterate_item_bfs(TreeItem(arg, tree_item, index))\n elif isinstance(arg, list):\n for i, litem in enumerate(arg):\n yield from iterate_item_bfs(TreeItem(litem, tree_item, index, i))", "def __init__(self, items=None):\n self.root = None\n if items is not None:\n for item in items:\n self.insert(item)", "def __init__(self, container=[]):\n # Initialize empty tree.\n self.root = None\n # Insert every item from container.\n for item in container:\n self.insert(item)", "def items(self):\n items = []\n for item in self.contents:\n items.append((item, self.contents[item]))\n return items" ]
[ "0.5508605", "0.5405508", "0.53563607", "0.5312198", "0.5302163", "0.5286656", "0.52832603", "0.5263575", "0.5256124", "0.5242858", "0.5220302", "0.5193929", "0.5183157", "0.5164565", "0.5151628", "0.5146107", "0.5141552", "0.51360893", "0.512639", "0.509513", "0.5089428", "0.50693935", "0.5056433", "0.5028591", "0.50212747", "0.5010693", "0.50060534", "0.4993082", "0.499211", "0.4981698" ]
0.6185486
0
We get the notification that the file has been uploaded.
def notify_upload(): file_type = request.json.get("file_type") file_name = request.json.get("file_name") file_path = request.json.get("file_path") file_size = request.json.get("file_size") print("File was uploaded:", file_path, file_name, file_type) # Marks the upload as confirmed. d = datetime.datetime.utcnow() db.upload.update_or_insert( ((db.upload.owner == get_user_email()) & (db.upload.file_path == file_path)), owner=get_user_email(), file_path=file_path, file_name=file_name, file_type=file_type, file_date=d, file_size=file_size, confirmed=True, ) # Returns the file information. return dict( download_url=gcs_url(GCS_KEYS, file_path, verb='GET'), file_date=d, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _on_file_upload_status(\n self, file_name: str, status: FileManagementStatus\n ) -> None:\n message = self.message_factory.make_from_file_management_status(\n status, file_name\n )\n if not self.connectivity_service.publish(message):\n self.message_queue.put(message)\n if (\n status.status == FileManagementStatusType.FILE_READY\n and self.file_management\n ):\n file_list = self.file_management.get_file_list()\n message = self.message_factory.make_from_file_list(file_list)\n if not self.connectivity_service.publish(message):\n self.message_queue.put(message)", "def uploaded(self, filename):\n return send_from_directory(self.upload_path, filename)", "def completed_file(self, context):", "def notify_file_transfer_completed(self):\n self.presentation.load() if len(self.presentation.presentation_elements) == 0 else self.presentation.reload()", "def upload_finish(self, cloud_file):", "def upload_progress(self, cloud_file, size, uploaded):", "def file_upload(self, req, folder_path):\n\t\tresult, filename = self.handle_upload(req, folder_path)\n\t\tfile_url = self.selected_root['url_callback'](req, folder_path, filename)\n\t\t\n\t\tself.content_type = 'text/html'\n\t\tself.content = [str(tags.script(type=\"text/javascript\")[\n\t\t\t\t\t\t\"window.parent.frames['frmUpload'].OnUploadCompleted(%s, '%s');\\n\" % (result, filename)\n\t\t\t\t\t\t])]", "def upload(self, filename, file_path):\n return", "def upload_file(self):\n \n try:\n with open(self.full_path_of_file, 'rb') as f:\n r = requests.post(self.upload_url, files={'file': f})\n\n self.selected_file_name.configure(text=\"<Selected file name>\")\n\n if r.status_code == requests.codes.ok:\n self.__set_full_path_of_file(None)\n messagebox.showinfo(\"Information\", \"File uploaded successfully!\")\n else:\n messagebox.showerror(\"Error\", \"Could not upload file\")\n except AttributeError:\n # this exceptions is raised when the 'Upload file' button was pressed but\n # no file was previously selected\n pass\n except TypeError:\n # this exceptions is raised when the 'Upload file' button was pressed \n # after the user already uploaded a file. Now a new file shoud be selected\n # and uploaded or just go Back to the main menu\n pass", "def complete_upload(self):\r\n xml = self.to_xml()\r\n return self.bucket.complete_multipart_upload(self.key_name,\r\n self.id, xml)", "def test_successfulUpload(self):\n def _cb(ign):\n self.assertEquals(\n self.backendStore.events,\n [('storeObject',\n self.backendStore,\n 'somecontent',\n u'application/octet-stream',\n {},\n self.testObject.created,\n self.testObject.objectId)])\n self.assertRaises(ItemNotFound,\n self.store.findUnique,\n _PendingUpload)\n return self.pendingUpload.attemptUpload().addCallback(_cb)", "def put_upload(self):\n # print \"starting upload...\", self.current_upload['filepath']\n self.touch()\n self.log(\"STARTING_UPLOAD\", level=INFO)\n try:\n Backend.put_file(self.fileobj, self.current_upload[\"gcs_url\"])\n except exceptions.FilePutError as err:\n self.handle_put_error(err, self.fileobj)\n raise", "def handle_put_progress(self, filegen):\n # print \"bytes so-far: \", filegen.bytes_read\n\n if self.maybe_touch():\n self.log(\"UPLOAD_PROGRESS\", level=INFO)\n self.touch()\n Backend.touch(\n self.current_upload,\n bytes_downloaded=filegen.bytes_read,\n location=self.location)", "def take_action(self, parsed_args):\n if parsed_args.file:\n for file in parsed_args.file:\n if not os.path.exists(file):\n self.logger.error('Specified file does not exist: {}'.format(file))\n continue\n self.logger.info('File uploading is started: {}'.format(file))\n file_id = self.app.metagen.upload_files(file)\n if not file_id:\n return False\n self.logger.info('File {} has been sent to analysis.'.format(file))\n self.logger.info('Use File ID to get Analysis Result: {}'.format(file_id))\n self.logger.info('Task Done')", "async def _file_upload_complete(\n self,\n turn_context: TurnContext,\n file_consent_card_response: FileConsentCardResponse\n ):\n\n name = file_consent_card_response.upload_info.name\n\n download_card = FileInfoCard(\n unique_id=file_consent_card_response.upload_info.unique_id,\n file_type=file_consent_card_response.upload_info.file_type\n )\n\n as_attachment = Attachment(\n content=download_card.serialize(),\n content_type=ContentType.FILE_INFO_CARD,\n name=name,\n content_url=file_consent_card_response.upload_info.content_url\n )\n\n reply = self._create_reply(\n turn_context.activity,\n f\"<b>Report uploaded to your OneDrive.</b> Your report <b>{name}</b> is ready to download\",\n \"xml\"\n )\n reply.attachments = [as_attachment]\n\n await turn_context.send_activity(reply)\n reply = self._create_reply(\n turn_context.activity, f\"Would you like to update report parameters or the options for this report? Then Please update the template and upload...\", \"xml\"\n )\n await turn_context.send_activity(reply)", "def upload(self):\n\n # Try to retrieve the file from the request\n file = None\n try:\n file = request.files[\"file\"]\n except KeyError as e:\n logging.warning(f\"File was not found in request: {e}.\")\n flash(\"No file given.\", \"error\")\n return redirect(request.url)\n except AttributeError as e:\n logging.warning(f\"Error: Request did not contain any files: {e}.\")\n flash(\"No file given.\", \"error\")\n return redirect(request.url)\n\n # Check if file was correctly uploaded\n if not file or len(file.filename) == 0:\n flash(\"No file selected for upload.\", \"message\")\n return redirect(request.url)\n\n \"\"\" Check if file has correct extension. Allowed extensions depend on\n the connector. To make the code more readable, group connectors\n with the same allowed file extensions together like this:\n if connector in ['someconnector', 'someotherconnector']:\n extensions = [...] \"\"\"\n\n if self.connector in [\"overtime\"]:\n allowed_extensions = OVERTIME_SUPPORTED_EXTENSIONS\n else:\n allowed_extensions = []\n\n if not allowed_file(file, allowed_extensions=allowed_extensions):\n flash(\"File extension not allowed.\", \"warning\")\n return redirect(request.url)\n\n \"\"\" File seems uploaded correctly and has correct extension.\n Generate a new record ID to keep track of the uploaded file.\n \"\"\"\n rec_id = generate_temp_record_id()\n\n # Save file to disk\n path = store_temp_file(file, record_id=rec_id)\n\n if not path:\n flash(\"Error saving file!\", \"error\")\n return redirect(request.url)\n\n \"\"\" If everything ended successfully, send the user to the\n confirmation page so he can review his changes \"\"\"\n\n return redirect(url_for(f\"{self.endpoint}.upload_confirm\", rec_id=rec_id))", "def MarkUploadComplete(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def files_sent(self, yes):\n self.files_have_been_sent = yes", "def post(self):\n\n upload_files = self.get_uploads('file')\n blob_info = upload_files[0]\n self.redirect('/?upload_info=%s' % urllib.quote(blob_info.filename))", "def upload_file(self):\n request = copy.deepcopy(self.request_template)\n data = json.dumps(request)\n curr_file = {\n 'request': data,\n 'file': open(self.file_path, 'rb')\n }\n print(\"Sending Upload request of av for file {}\".format(self.file_name))\n try:\n response = requests.post(url=self.url + \"upload\", files=curr_file, verify=False)\n except Exception as E:\n print(\"Upload file failed. file: {} , failure: {}\".format(self.file_name, E))\n raise\n response_j = response.json()\n print(\"av Upload response status for file {} : {}\".format(self.file_name,\n response_j[\"response\"][0][\"status\"][\"label\"]))\n return response_j", "def upload_confirm(self, rec_id: str): # pylint: disable=unused-variable\n # Process the uploaded file\n if self.connector == \"overtime\":\n importer = action_process_overtime_data(\n get_record_path(rec_id),\n output=print,\n show_status=False,\n importer_class=OTImporterAdminPage,\n )\n else:\n return \"Unknown upload file type :(\"\n\n # Build string of status messages\n status = \"\\n\".join(importer.status(silent=True))\n\n # Show template with status and ask for confirmation\n return self.render(\n \"cadmin/upload_confirm.html\",\n confirm_url=url_for(f\"{self.endpoint}.upload_process\", rec_id=rec_id),\n status=status,\n )", "def onPreparationSucceeded(self, eventId, fileType, fileData_1, fileData_2, fileData_3):\n fileData = (fileData_1, fileData_2, fileData_3)\n self.statusLabel.setText('Sending result')\n failed = False\n self.dataSent = 0.0\n self.ilCon.uploadData(eventId, fileType, fileData)", "def upload(self) :\n\t\ttry :\n\t\t\treturn self._upload\n\t\texcept Exception as e:\n\t\t\traise e", "def post(self, request: HttpRequest) -> HttpResponse:\n for __, _file in request.FILES.items():\n new_upload, _created = self.handle_post_file(_file)\n new_upload.user = request.user\n new_upload.save()\n # Count initial view\n ObjectViewFile.count_view(new_upload, request)\n LOGGER.info(\"Uploaded %s\", new_upload.filename)\n return HttpResponse(status=204)", "def post(self):\n data = self.post_parser.parse_args()\n\n try:\n LOGGER.debug('Trying to upload file to storage')\n self.storage.upload(data.file)\n LOGGER.debug('The file was uploaded with success')\n return {\n 'filename': data.file.filename,\n 'message': 'The file was uploaded with success'\n }\n except BaseException:\n abort(500, message='The file was not uploaded')\n LOGGER.error('A generic exception has occurred.', exc_info=True)", "def handle_potential_upload(self):\n try:\n self.fileobj = FileGenerator(\n self.current_upload, event_handler=self.handle_upload_event)\n return self.maybe_upload()\n\n except exceptions.UploaderMissingFile as err:\n self.log(\"LOCAL_FILE_MISSING\", level=WARNING)\n if not self.current_upload.get(\"id\"):\n Backend.fail_unsigned(\n self.current_upload, location=self.location)\n else:\n Backend.fail(\n self.current_upload,\n bytes_downloaded=0,\n location=self.location)\n\n except exceptions.UploaderFileModified as err:\n self.log(\"LOCAL_FILE_CHANGED msg=%s\" % err, level=WARNING)\n Backend.fail(\n self.current_upload,\n bytes_downloaded=0,\n location=self.location)", "def on_file_changed(self, path):\n\t\tpass", "def webhook_upload(user, application, complete_path, init_es, tool, scan_name, user_host, to_name,hook_log):\n hook_log = WebhookLog.objects.get(id=hook_log)\n hook_log.file_upload_event = True\n hook_log.file_upload_datetime = timezone.now()\n hook_log.save()\n process_files(user, application, complete_path, init_es, tool, scan_name, user_host, to_name,hook_log=hook_log)\n info_debug_log(event='Webhook upload',status='success')", "def tell(self):\n return self._upload_position", "def upload():\n\n # Get CAS user roles from session\n cas_attributes = session.get('CAS_ATTRIBUTES', None)\n\n # login_required should mean we're authenticated, but check this\n if not cas_attributes:\n abort(403)\n\n roles = cas_attributes['cas:authority'].split(',')\n\n APP.logger.debug(\n f'User has CAS roles: {roles} ')\n\n # Stop unauthorized users\n if CONFIG.UPLOAD_ROLE not in roles:\n abort(403)\n\n form = UploadForm()\n\n if request.method != 'POST':\n return render_template('upload.html', form=form, upload_error=False)\n\n # User has tried to submit file (POST)\n # Check if it passes (simple) validation in forms.py\n if not form.validate_on_submit():\n return render_template('upload.html', form=form)\n\n # Get filename\n f = form.file.data\n filename = secure_filename(f.filename)\n # Add user email & time\n email = cas_attributes['cas:email']\n upload_time = dt.now().strftime(\"%y%m%d-%H%M%S\")\n ext_filename = email + '_' + upload_time + '_' + filename\n\n # Save file\n try:\n f.save(os.path.join(CONFIG.UPLOAD_PATH, ext_filename))\n except Exception as ex:\n APP.logger.error(\n f'File {ext_filename} could not be saved due to {ex}')\n else:\n APP.logger.info(f'Successfully uploaded file {ext_filename}')\n\n # Notify admin\n msg = Message('New file upload',\n sender=APP.mail.username,\n recipients=CONFIG.UPLOAD_EMAIL)\n msg.body = f\"\"\"\n Hello SBDI-MOL colleagues,\n\n A new file has been uploaded to the ASV portal:\n\n Provider email: {email}\n Upload time: {upload_time}\n Original filename: {filename}\n Saved as: {ext_filename}\n\n Have a nice day!\n\n / Swedish ASV portal\n \"\"\"\n try:\n APP.mail.send(msg)\n except Exception as ex:\n APP.logger.error(f\"Could not send upload notification due to {ex}\")\n else:\n APP.logger.info('Successfully sent upload notification.')\n # Display 'success page' only if upload AND notification worked\n return render_template('uploaded.html', filename=filename)\n\n # Display error msg if EITHER upload OR email failed, so that data\n # providers get a chance to tell us about uploaded files\n return render_template('upload.html', form=form, upload_error=True)" ]
[ "0.72332937", "0.6826099", "0.67252356", "0.669396", "0.6651103", "0.64940315", "0.64732903", "0.6469971", "0.6466714", "0.6447727", "0.64468664", "0.63912904", "0.63696146", "0.63658977", "0.6341158", "0.6338254", "0.6318352", "0.6298421", "0.6280171", "0.62607", "0.61797124", "0.61621064", "0.6157702", "0.6149268", "0.6139376", "0.609094", "0.60897464", "0.607966", "0.60637236", "0.60372037" ]
0.78712845
0
Calculate the probability of achieving all successes Given a certain number of dice, their probability of a success, and the allowed number of rolls/rerolls, calculates the probability of achieving all successes. This implements a recursion I solved when thinking about this problem. f(p,1,n) = p^n, the probability of rolling all successes in one roll f(p,r,0) = 1, if we need to achieve 0 successful rolls, it is always achieved f(p,r,n) = SUM_{x=0}^{n} p^x (1p)^x f(p,r1,nx) For the final equation, we add up the probabilities of achieving each number of success in the current roll, and then achieving all successes with the remaining rolls by applying recursion. We could use dynamic programming if we were dealing with larger numbers and wanted to make this program more efficient
def probability_of_all_successes(p: float, r: int, n: int) -> float: if r == 1: return pow(p, n) elif n == 0: return 1 else: result = 0 for x in range(0, n+1): result += pow(p, x) * pow(1-p, n-x) * probability_of_all_successes(p, r-1, n-x) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prob_of_roll(dice: Tuple[int, ...], total: Optional[int] = None) -> pd.DataFrame:\n dice = {f'{i}_d{d}': range(1, d + 1) for i, d in enumerate(dice)}\n combinations = list(itertools.product(*dice.values()))\n df = (pd.DataFrame(combinations, columns=dice.keys())\n .assign(total=lambda x: x.sum(1))\n .assign(dist=lambda x: x.groupby('total').transform('count').iloc[:, 0])\n .assign(prob=lambda x: x['dist'] / x.shape[0])\n .drop(dice.keys(), 1)\n .drop_duplicates()\n .set_index('total')\n .sort_index()\n )\n if total:\n return df.loc[total]\n return df", "def average_simulate_rerolling(probability: float, num_objects: int, trials: int):\n\n results = []\n for _ in range(trials):\n results.append(simulate_rerolling(probability, num_objects))\n print(f\"Given {num_objects} dice with a probability of success of {probability}, after {trials} trials\")\n print(f\"Number of expected rolls to have all successes is {sum(results)/len(results)}\")", "def roll_the_dices(num_of_iterations: int) -> None:\n # initial variables\n player_wins: int = 0\n theoretical_win_chance: float = round(15/36, 4)\n\n # main loop\n for _ in range(num_of_iterations):\n croupier_roll = random.randint(1, 6)\n player_roll = random.randint(1, 6)\n if player_roll < croupier_roll:\n player_wins += 1\n\n experimental_win_chance = round(player_wins / num_of_iterations, 4)\n print(f\"Results: \\n\"\n f\"Theoretical probability of winning a single game: {theoretical_win_chance:.2%}\\n\"\n f\"Experimental probability of winning a single game: {experimental_win_chance:.2%}\")", "def biased_rolls(prob_list, s, n):\n random.seed(s)\n divisions = []\n div = 0\n # Creating a list 'Divisions' with the upper and lower limits\n # (boundaries) for the specific roll outputs.\n for num in prob_list:\n div = div + float(num)\n divisions.append(div)\n\n m = len(prob_list)\n # Generating and storing the output for each n rolls in a list\n\n rolls = []\n # Generating each random number one by one\n for j in range(n):\n r_num = random.random()\n # Resetting the lower limit every time the code generates a\n # new random number between 0 and 1.\n lower = 0\n # Deciding the output of the roll from the generated random\n # number and then storing it in a list\n for i in range(m):\n if r_num >= lower and r_num < float(divisions[i]):\n lower = divisions[i]\n rolls.append(i+1)\n # return the resulting rolls\n return rolls", "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN PROBLEM 1\n \"*** YOUR CODE HERE ***\"\n count, return_sum = 0, 0\n while count < num_rolls:\n roll = dice()\n if roll == 1:\n count += 1\n while count < num_rolls:\n dice()\n count += 1\n return 1\n return_sum += roll\n count += 1\n return return_sum\n # END PROBLEM 1", "def game1(n):\r\n\twin=0\r\n\tfor i in range(n):\r\n\t\tif game(1)==1:\r\n\t\t\twin+=1\r\n\tprob1=win/n\r\n\treturn prob1", "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN PROBLEM 1\n num_roll = 0\n sum = 0\n pig_out = False # Pig Out rule\n while num_roll < num_rolls:\n roll = dice()\n if roll == 1:\n pig_out = True\n sum += roll\n num_roll += 1\n if pig_out: return 1\n else: return sum\n # END PROBLEM 1", "def chance(dice):\n return sum(dice)", "def perform_bernoulli_trials(n, p):\n # Initialize number of successes: n_success\n n_success = 0\n\n\n # Perform trials\n for i in range(n):\n # Choose random number between zero and one: random_number\n random_number = np.random.random()\n\n # If less than p, it's a success so add one to n_success\n if random_number< p:\n n_success += 1\n\n return n_success", "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN PROBLEM 1\n roll_sum = 0 # sums values of rolled dice\n ones_total = 0 # counts number of times the value 1 is rolled\n while num_rolls>0:\n current_roll = dice()\n if current_roll==1:\n ones_total += 1\n roll_sum += current_roll\n num_rolls -= 1\n if ones_total > 0:\n return ones_total\n else:\n return roll_sum\n # END PROBLEM 1", "def perform_bernoulli_trials(n, p):\n # Initialize number of successes: n_success\n n_success = 0\n\n\n # Perform trials\n for i in range(n):\n # Choose random number between zero and one: random_number\n random_number = np.random.random()\n\n\n # If less than p, it's a success so add one to n_success\n if random_number < p:\n n_success += 1\n\n return n_success", "def perform_bernoulli_trials(n, p):\n # Initialize number of successes: n_success\n n_success = 0\n # Perform trials\n for i in range(n):\n # Choose random number between zero and one: random_number\n random_number = np.random.random()\n # If less than p, it's a success so add one to n_success\n if random_number < p:\n n_success += 1\n\n return n_success", "def fn(n, x, r):\n if n == 0: return 1\n ans = 0\n for xx in range(6): \n if xx != x: ans += fn(n-1, xx, 1)\n elif xx == x and r < rollMax[x]: ans += fn(n-1, x, r+1)\n return ans", "def perform_bernoulli_trials(n, p):\n # Initialize number of successes: n_success\n n_success = 0\n\n # Perform trials\n for i in range(n):\n # Choose random number between zero and one: random_number\n random_number = np.random.random()\n\n # If less than p, it's a success so add one to n_success\n if random_number < p:\n n_success += 1\n\n return n_success", "def roll_die(number_of_rolls, number_of_sides):\n\n roll = random.randint(1, number_of_sides) # Used recursion for this\n if number_of_rolls == 0:\n return 0 # Base case is 0. If it's 1, then I can roll a 7 with 6 sides\n else:\n return roll + roll_die(number_of_rolls - 1, number_of_sides) # Subtract 1 roll and keep calling function", "def roll_dice(num_rolls, dice=six_sided_dice, who='Boss Hogg'):\r\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\r\n assert num_rolls > 0, 'Must roll at least once.'\r\n assert num_rolls <= 10, 'Cannot roll more than 10 dice.'\r\n total, is_one = 0, False\r\n while num_rolls >= 1:\r\n x = dice()\r\n if commentary:\r\n announce(x, who)\r\n if x==1:\r\n is_one = True\r\n total = total + x\r\n num_rolls = num_rolls - 1\r\n if is_one: return 1\r\n else: return total", "def test_probability_of_all_successes():\n\n assert(probability_of_all_successes(1/2,1,2) == 0.25)\n assert(are_close(probability_of_all_successes(1/6,1,2), 1/36, 0.001))\n assert(are_close(probability_of_all_successes(1/2,2,2), 7/16, 0.001))", "def roll_1d10() -> int:\n ten_percent = Die(10)\n ten_percent.roll_die()\n chance = ten_percent.get_value()\n return chance", "def chance(n, p):\n total = 0.0\n for k in range(n+1):\n total += comb(n, k, exact=False) * p**k * (1-p) ** (n-k)\n return total", "def chance_points(dice_list):\n return sum(dice_list)", "def yatzy_rule(n):\n def ones(dice):\n \"\"\" Count ones in list. \"\"\"\n return sum([x for x in dice if x == 1])\n\n def twos(dice):\n \"\"\" Count twos in list. \"\"\"\n return sum([x for x in dice if x == 2])\n\n def threes(dice):\n \"\"\" Count threes in list. \"\"\"\n return sum([x for x in dice if x == 3])\n\n def fours(dice):\n \"\"\" Count fours in list. \"\"\"\n return sum([x for x in dice if x == 4])\n\n def fives(dice):\n \"\"\" Count fives in list. \"\"\"\n return sum([x for x in dice if x == 5])\n\n def sixes(dice):\n \"\"\" Count sixes in list. \"\"\"\n return sum([x for x in dice if x == 6])\n\n def pair(dice):\n \"\"\" Return sum of highest pair in list. \"\"\"\n\n def max_or_zero(list):\n \"\"\" Returns maximum value of a list; 0 if list is empty. \"\"\"\n try:\n return max(list)\n except ValueError:\n return 0\n\n return 2 * max_or_zero([i for i, j in combinations(dice, 2) if i == j])\n \n def double_pair(dice):\n \"\"\" TODO! \"\"\"\n\n # Sentinel value.\n return 1\n\n def threes(dice):\n \"\"\" Find a set of three equal values in list dice\n and return its sum. Returns 0 if nothing found.\"\"\"\n for i, j, k in combinations(dice, 3):\n if i == j == k:\n return 3 * i\n\n return 0\n\n def fours(dice):\n \"\"\" Find a set of four equal values in list dice\n and return its sum. Returns 0 if nothing found.\"\"\"\n for i, j, k, l in combinations(dice, 4):\n if i == j == k == l:\n return 4 * i\n\n return 0\n\n def small_straight(dice):\n \"\"\" Checks the list dice for the exact combination\n [1, 2, 3, 4, 5] (the small straight) and returns\n its sum. Returns 0 if nothing found.\"\"\"\n if sorted(dice) == [1, 2, 3, 4, 5]:\n return sum(dice)\n return 0\n\n def big_straight(dice):\n \"\"\" Checks the list dice for the exact combination\n [2, 3, 4, 5, 6] (the large straight) and returns\n its sum. Returns 0 if nothing found.\"\"\"\n if sorted(dice) == [2, 3, 4, 5, 6]:\n return sum(dice)\n return 0\n\n def house(dice):\n \"\"\" Try to find a house in the list of cards\n i.e. [2, 2, 2, 3, 3] or [5, 5, 4, 4, 4] and\n return its sum. Returns 0 if nothing found.\"\"\"\n s = sorted(dice)\n if ((s[0] == s[1] == s[2] and s[3] == s[4]) or\n (s[0] == s[1] and s[2] == s[3] == s[4])):\n return sum(dice)\n return 0\n\n def chance(dice):\n \"\"\" Returns the sum of dice. \"\"\"\n return sum(dice)\n\n def yatzy(dice):\n \"\"\" If every value in list dice is equal, return its sum.\n Else, return 0. \"\"\"\n if (dice[0] == dice[1] == dice[2] == dice[3] == dice[4]):\n return 50\n return 0\n\n return [ones, twos, threes, fours, fives, sixes, pair, double_pair,\n threes, fours, small_straight, big_straight, house, chance, yatzy][n]", "def roll(n, val):\n\tcount = 0\n\tfor i in range(n):\n\t\troll = randint(1, 6) + randint(1, 6)\n\t\tif roll == val: count += 1\n\treturn count", "def easy_solve(n=30):\n # Use p1 and p2 to store expected payoffs per player\n p_1 = []\n p_2 = []\n # Get each possible dice roll in order\n arr = list(range(1, n + 1))\n x = (1/2)*(np.sqrt(2*n**2 + 2*n - 3) - 1)\n for i in range(n):\n # Let player 1 choose ith possible integer\n player_1 = arr[i]\n # Use aforementioned strategy\n if arr[i] <= int(x):\n player_2 = arr[i] + 1\n # Append expected value per player according to which integer is larger\n p_1.append((1/n)*sum(arr[:player_1]))\n p_2.append((1/n)*sum(arr[player_2-1:]))\n else:\n player_2 = arr[i] - 1\n p_2.append((1/n)*sum(arr[:player_2]))\n p_1.append((1/n)*sum(arr[player_1-1:]))\n return p_1, p_2", "def rollAndTallyOutcomes(outcomeCounts):\n## ADD CODE HERE\n for k in range(NUMBER_OF_ROLLS):\n t = randint(1,6) + randint(1,6)\n outcomeCounts[t] = outcomeCounts[t] +1", "def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = get_outcomes(num_die_sides)\n print \"outcomes:\", outcomes\n\n # generate all possible sequences of rolls\n all_rolls = list(gen_all_sequences(outcomes, num_free_dice))\n results = [max_repeats(roll) for roll in all_rolls]\n value = 0.0 \n\n\n for result in all_rolls:\n curr_hand = tuple(list(held_dice) + list(result))\n value += score(curr_hand)\n\n return value / len(all_rolls)", "def prob1(n):\n#raise NotImplementedError(\"Problem 1 Incomplete\")\n if n == 0 :\n raise ValueError(\"Sampling 0 points is not defined.\")\n total = 0\n for i in xrange(n) :\n if np.random.normal() > 3 :\n total += 1\n return float(total)/n", "def pretty_print_poas_result(probability: float, rerolls: int, num_objects: int):\n\n result = probability_of_all_successes(probability, rerolls, num_objects)\n print(f\"Given {num_objects} dice with a probability of success of {probability} and {rerolls} rolls\")\n print(f\"The probability of achieving all successes is {result}\")", "def simple_roll(dice):\n return roll(dice).total", "def get_success_probabilities_from_results(results: Sequence[Sequence[Sequence[int]]]) \\\n -> Sequence[float]:\n num_shots = len(results[0])\n n_bits = len(results[0][0]) - 1\n\n probabilities = []\n # loop over all binary strings of length n_bits\n for result, bits in zip(results, all_bitstrings(2 * n_bits)):\n # Input nums are written from (MSB .... LSB) = (a_n, ..., a_1, a_0)\n num_a = bit_array_to_int(bits[:n_bits])\n num_b = bit_array_to_int(bits[n_bits:])\n\n # add the numbers\n ans = num_a + num_b\n ans_bits = int_to_bit_array(ans, n_bits + 1)\n\n # a success occurs if a shot matches the expected ans bit for bit\n probability = 0\n for shot in result:\n if np.array_equal(ans_bits, shot):\n probability += 1. / num_shots\n probabilities.append(probability)\n\n return probabilities", "def prob(throw, n, d=6, type='classical'):\n count = 0\n table = throw_table(n, d, type)\n for t in table:\n if sum(t) == throw:\n count += 1\n \n return float(count)/len(table)" ]
[ "0.67905885", "0.6688306", "0.6612852", "0.63708943", "0.63187337", "0.6314003", "0.6271191", "0.6218371", "0.6189899", "0.6186982", "0.61848557", "0.6174344", "0.6172727", "0.61723155", "0.6090661", "0.60187304", "0.59495205", "0.5918823", "0.5900799", "0.58694553", "0.5866056", "0.58373135", "0.58368826", "0.5777342", "0.57685", "0.5752437", "0.5722686", "0.57208973", "0.5682927", "0.56511724" ]
0.7424197
0
Pretty print the results of probability_of_all_successes
def pretty_print_poas_result(probability: float, rerolls: int, num_objects: int): result = probability_of_all_successes(probability, rerolls, num_objects) print(f"Given {num_objects} dice with a probability of success of {probability} and {rerolls} rolls") print(f"The probability of achieving all successes is {result}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_results(self):\n self.accuracy = round(accuracy_score(self.y_val, self.y_pred, 'weighted'), 4)\n self.f1 = round(f1_score(self.y_val, self.y_pred, average='weighted'), 4)\n self.precision = round(precision_score(self.y_val, self.y_pred, average='weighted'), 4)\n\n print(f'Results for {self.title}:')\n print(f'{self.title} accuracy: {self.accuracy}')\n print(f'{self.title} f-score: {self.f1}')\n print(f'{self.title} precision: {self.precision}')", "def print_results(self) -> None:\n print(\"=\" * 70, file=sys.stderr)\n total = 0.0\n max_points = 0.0\n for problem in self.problems:\n total += problem.run_tests()\n max_points += problem.max_grade\n print(f\"Total Grade: {total}/{max_points}\", file=sys.stderr)", "def print_results():\n now_time = time.time()\n diff_time_in_sec = now_time - start_time\n generated_per_second = total / diff_time_in_sec\n generated_per_hour = 3600 * generated_per_second\n saved_per_second = success / diff_time_in_sec\n saved_per_hour = 3600 * saved_per_second\n\n os.system('cls' if os.name == 'nt' else 'clear')\n print(f\"{'Generated:' : <16}{total : <12}\")\n print(f\"{'New graphs:' : <16}{success : <12}\")\n print(f\"{'Success rate:' : <16}{round((success / total) * 100, 3) : <7} %\")\n print(f\"{'Speed:' : <16}{round(generated_per_hour) : <7} graphs/h\")\n print(f\"{'Save speed:' : <16}{round(saved_per_hour) : <7} graphs/h\")", "def test_probability_of_all_successes():\n\n assert(probability_of_all_successes(1/2,1,2) == 0.25)\n assert(are_close(probability_of_all_successes(1/6,1,2), 1/36, 0.001))\n assert(are_close(probability_of_all_successes(1/2,2,2), 7/16, 0.001))", "def print_results(precision, recall, f1):\n print \"Precision:\", precision\n print \"Recall:\", recall\n print \"F1 score:\", f1", "def pretty_print(results: List[Tuple[str, torch.Tensor]]):\n for item in results:\n print(\"...[%.2f] - %s\" % (item[1], item[0]))", "def generateStats(self):\n\t\tn = float(self.n)\n\t\tm = float(self.m)\n\t\tk = float(self.k)\n\t\tp_fp = math.pow(1.0 - math.exp(-(k*n)/m), k)\n\t\tprint \"Probability of false positives: \", p_fp\n\t\tprint \"Predicted false positive rate: \", p_fp * 100.0\n\t\tprint \"Number of elements entered in filter: \", n\n\t\tprint \"Number of bits in filter: \", m\n\t\tprint \"Number of hashes in filter: \", k", "def printing_p_matrix(new_all_results):\n\tprint(\"________________________________PROBABILITY MATRIX__________________________________ \")\n\tfor i in range(len(new_all_results)):\n\t\tprint(\"Row Number: \", i+1)\n\t\tprint(\"Vector: \", all_states_explored[i])\n\t\tprint(\"Number of columns: \", len(new_all_results[i]))\n\t\tprint(\"Result: \", new_all_results[i])\n\t\tprint(\"-------------------------------------------------------------------------------------\")\n\tprint(\"____________________________________________________________________________________\")", "def print_results(results, random_counterpart=None, random_concepts=None, num_random_exp=100,\n min_p_val=0.05):\n\n # helper function, returns if this is a random concept\n def is_random_concept(concept):\n if random_counterpart:\n return random_counterpart == concept\n\n elif random_concepts:\n return concept in random_concepts\n\n else:\n return 'random500_' in concept\n\n # print class, it will be the same for all\n print(\"Class =\", results[0]['target_class'])\n\n # prepare data\n # dict with keys of concepts containing dict with bottlenecks\n result_summary = {}\n\n # random\n random_i_ups = {}\n\n for result in results:\n if result['cav_concept'] not in result_summary:\n result_summary[result['cav_concept']] = {}\n\n if result['bottleneck'] not in result_summary[result['cav_concept']]:\n result_summary[result['cav_concept']][result['bottleneck']] = []\n\n result_summary[result['cav_concept']][result['bottleneck']].append(result)\n\n # store random\n if is_random_concept(result['cav_concept']):\n if result['bottleneck'] not in random_i_ups:\n random_i_ups[result['bottleneck']] = []\n\n random_i_ups[result['bottleneck']].append(result['i_up'])\n\n # print concepts and classes with indentation\n for concept in result_summary:\n\n # if not random\n if not is_random_concept(concept):\n print(\" \", \"Concept =\", concept)\n\n for bottleneck in result_summary[concept]:\n i_ups = [item['i_up'] for item in result_summary[concept][bottleneck]]\n\n # Calculate statistical significance\n _, p_val = ttest_ind(random_i_ups[bottleneck], i_ups)\n\n print(3 * \" \", \"Bottleneck =\", (\"%s. TCAV Score = %.2f (+- %.2f), \"\n \"random was %.2f (+- %.2f). p-val = %.3f (%s)\") % (\n bottleneck, np.mean(i_ups), np.std(i_ups),\n np.mean(random_i_ups[bottleneck]),\n np.std(random_i_ups[bottleneck]), p_val,\n \"not significant\" if p_val > min_p_val else \"significant\"))", "def print_summary(self):\n #outcomes = self.get_outcomes()\n #passes = 'Passes: %i' % sum(1 for outcome in outcomes if outcome == Result.PASS)\n #untested = 'Untested: %i' % sum(1 for outcome in outcomes if outcome == Result.UNTESTED)\n #errors = 'Errors: %i' % sum(1 for outcome in outcomes if outcome == Result.ERROR)\n #fails = 'Fails: %i' % sum(1 for outcome in outcomes if outcome == Result.FAIL)\n print('')\n print ('Passes: %i' % self.get_pass_count())\n print ('Fails: %i' % self.get_fail_count())\n print ('Errors: %i' % self.get_error_count())\n print ('Untested: %i' % self.get_untested_count())\n print ('Skipped: %i' % self.get_skipped_count())", "def print_predict(classes, probs):\n predictions = list(zip(classes, probs))\n for i in range(len(predictions)):\n print('{} : {:.3%}'.format(predictions[i][0], predictions[i][1]))\n pass", "def ProbCorrectTable():\n efficacies = [3, 1.5, 0, -1.5, -3]\n difficulties = [-1.85, -0.05, 1.75]\n\n for eff in efficacies:\n print('%0.2f & ' % eff, end=' ') \n for diff in difficulties:\n p = ProbCorrect(eff, diff)\n print('%0.2f & ' % p, end=' ') \n print(r'\\\\')", "def display(self):\n for value, prob in self.items():\n print(value, prob)", "def print_table(self) -> None:\n if (self.probability_links == None):\n print(\"+--------+\")\n print(f\"| P({self.key:1s}) |\")\n print(\"+--------+\")\n print(f\"| {self.probability_values[0]:0.04f} |\")\n print(\"+--------+\")\n else:\n arg_len = 2 + len(' '.join(self.probability_links.keys()))\n param_len = 2 + \\\n max(6, len(\"P(A|)\" + \",\".join(self.probability_links.keys())))\n print(f\"+{'-'*arg_len}+{'-'*param_len}+\")\n print(\n f\"| {' '.join(self.probability_links.keys())} | P({self.key}|{','.join(self.probability_links.keys())}) |\")\n print(f\"+{'-'*arg_len}+{'-'*param_len}+\")\n for i in range(2**len(self.probability_links.keys())):\n # Gives us a string binary value to make truth table off of\n bool_key = f\"{i:0{len(self.probability_links.keys())}b}\"\n print(\n f\"| {' '.join(['T' if bool_key[j] == '0' else 'F' for j in range(len(self.probability_links.keys()))])} | {f'{self.probability_values[i]:0.04f}':<{param_len-1}s}|\")\n print(f\"+{'-'*arg_len}+{'-'*param_len}+\")", "def output(results):\n\n text_file = open(\"problem_1_B_output.txt\", \"w\")\n\n out = \"\"\n\n for i, line in enumerate(results):\n\n string = \"Sample {}: {}, posterior probability of {:.4f}\".format(i + 1,\n line[0],\n line[1])\n\n out += (string + \"\\n\")\n\n text_file.write(out)\n\n text_file.close()", "def test_print_results(self):\n calculated = super().predict_and_print()\n self.assertEqual(calculated, EXP_PRINT_OUTPUT_BASE.format(.18, .1, 0.186, self.test_model.model.train_time) +\n \"Max tree max_depth: 1\\n\"\n \"Number of n_estimators: 1\\n\"\n \"Impurity method: entropy\\n\")", "def show_results(results, n=10, print_results=True):\n # Print headline\n s = \"\"\n if len(results) == 0:\n s += \"-- No results --\"\n else:\n s += \"{0:18s} {1:7s}\\n\".format(\"Class\", \"Prob\")\n s += \"#\"*50 + \"\\n\"\n for entry in results:\n if n == 0:\n break\n else:\n n -= 1\n s += \"{0:18s} {1:>7.4f}%\\n\".format(entry['semantics'],\n entry['probability']*100)\n s += \"#\"*50\n if print_results:\n print(s)\n return s", "def print_scores(result_collector):\n # print(\"\\n# Metric: Cohen's kappa\")\n # result_collector.set_metric(['k_cohen', 'k'])\n # result_collector.print_all_results()\n print(\"\\n# Metric: Macro avg. F1\")\n result_collector.set_metric([\"macro_avg\", \"fscore\"])\n # result_collector.print_all_results()\n result_collector.print_result_for_level(\"cc\")\n result_collector.print_result_for_level(\"ro\", print_header=False)\n result_collector.print_result_for_level(\"fu\", print_header=False)\n result_collector.print_result_for_level(\"at\", print_header=False)\n\n # print(\"\\nMetric: Positive attachment F1\")\n # result_collector.set_metric(['classwise', '1', 'fscore'])\n # result_collector.print_result_for_level('at')\n print(\"\\n# Metric: Labelled attachment score\")\n result_collector.set_metric([\"accuracy\"])\n result_collector.print_result_for_level(\"lat\")", "def result(self):\n return (\"Precision@\" + str(self.length) + \": \"), (self.hit / self.test)", "def _print_summary(results):\n if not len(results) > 0:\n print 'No results to show in summary.'\n return\n\n table = {}\n for res in results:\n for k, v in res.iteritems():\n table.setdefault(k, []).append(v)\n print tabulate(table, headers='keys', tablefmt=\"simple\")", "def print_metrics(result):\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '------------------------------------------------')\n logging.log(LOG_LEVEL_OUTPUT_INFO, ' KEY METRICS: ')\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '------------------------------------------------')\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* pages_count: %d',\n get_counter_metric(result, 'pages_count'))\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_count: %d',\n get_counter_metric(result, 'revisions_count'))\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* very_long_page_histories_count: %d',\n get_counter_metric(result, 'very_long_page_histories_count'))\n revisions_per_page_distr = get_distributions_metric(\n result, 'revisions_per_page_distr')\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_per_page_distr.mean: %d',\n revisions_per_page_distr.mean)\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_per_page_distr.sum: %d',\n revisions_per_page_distr.sum)\n cumulative_page_rev_size_distr = get_distributions_metric(\n result, 'cumulative_page_rev_size_distr')\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '* cumulative_page_rev_size_distr.mean: %d',\n cumulative_page_rev_size_distr.mean)\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* cumulative_page_rev_size_distr.sum: %d',\n cumulative_page_rev_size_distr.sum)", "def print_stats(session, feature_batch, label_batch, cost, accuracy, x, y, keep_prob, valid_features, valid_labels):\n result_cost = session.run(cost, feed_dict={x:feature_batch, y:label_batch, keep_prob: 1.0})\n result_accuracy = session.run(accuracy, feed_dict={x:valid_features, y:valid_labels, keep_prob: 1.0})\n print(\"Cost: {0:.2f} Accuracy: {1:.2f}% \" .format(result_cost,result_accuracy*100), end='')\n return result_cost, result_accuracy", "def print_results(self):\n for test_cases in self._tests:\n for test_case in test_cases:\n print('{} ...ok'.format(test_case.get_name()))\n return 0", "def print_score(classifier,X_test,y_test):\n print(\"Test results:\\n\")\n print('Accuracy Score: {0:.4f}\\n'.format(accuracy_score(y_test,classifier.predict(X_test))))\n print('Classification Report:\\n{}\\n'.format(classification_report(y_test,classifier.predict(X_test))))\n print('Confusion Matrix:\\n{}\\n'.format(confusion_matrix(y_test,classifier.predict(X_test))))", "def __printResults(files, expected, actual, similarity):\n if (showIndividualResults):\n for i in range(len(files)):\n print \"\\nExpected = %s\\nActual = %s \\nSimilarity = %f\" % (expected[i], actual[i], similarity[i])\n print \"\\nMean Similarity = %f\" % np.mean(similarity)", "def print_run_perfs(verbose_dico):\n for item_strat, v1 in verbose_dico.items():\n print(item_strat)\n list_of_perfs = []\n list_of_prints = []\n for skill_strat, v2 in v1.items():\n list_of_params = []\n acpl_perfs = []\n acpr_perfs = []\n for param_value, v3 in v2.items():\n list_of_params.append(param_value)\n for period, v4 in v3.items():\n strat_perf = np.mean(verbose_dico[item_strat][skill_strat][param_value][period])\n if period == \"learning\":\n acpl_perfs.append(strat_perf)\n if period == \"retention\":\n acpr_perfs.append(strat_perf)\n # Find best parameters\n best_param_index = np.argmax(acpr_perfs)\n list_of_perfs.append(np.around(acpr_perfs[best_param_index],3))\n list_of_prints.append(\"\\t {0:>26} | Best param : {1:>3} | ACPL = {2:>6} | ACPR = {3:>6}\".format(skill_strat,\n np.around(list_of_params[best_param_index],2),\n np.around(acpl_perfs[best_param_index],3),\n np.around(acpr_perfs[best_param_index],3)))\n for strat_index in np.argsort(list_of_perfs)[::-1]:\n print(list_of_prints[strat_index])", "def print_results(self):\n pass", "def print_results(self, f1_score, precision_score, recall_score):\n print(\"Algorithm: %s\" % self.name)\n self.print_score(f1_score, \"F1 Score\")\n self.print_score(precision_score, \"Precision Score\")\n self.print_score(recall_score, \"Recall Score\")", "def print_file_distribution(num_imgs_total, num_imgs_train, num_imgs_val, num_imgs_test):\n print('Total images: ', num_imgs_total)\n print('Training: ', num_imgs_train)\n print('Validation: ', num_imgs_val)\n print('Testing: ', num_imgs_test)", "def printResults(self):\n for key in self.mDict.keys():\n print ('for {:d}, entries = {:d} and exits = {:d}'.format (key, self.mDict.get(key).get ('entries'), self.mDict.get(key).get ('exits')))" ]
[ "0.6881748", "0.67664963", "0.6735734", "0.67205274", "0.6649825", "0.661655", "0.65694964", "0.6528802", "0.65250605", "0.65111965", "0.6422836", "0.6419955", "0.63964987", "0.63843995", "0.6353819", "0.63460255", "0.63327503", "0.63321286", "0.6326144", "0.63111883", "0.629962", "0.62852323", "0.6284611", "0.62576556", "0.6244387", "0.62121606", "0.6196799", "0.6148179", "0.61387277", "0.61318445" ]
0.7405338
0
Run simulate_rerolling for a given number of trials and print the result For a given number of trials, run the simulate_rerolling simulation and average all of the results. This gives the expected number of rolls/rerolls we must perform before we have all successes
def average_simulate_rerolling(probability: float, num_objects: int, trials: int): results = [] for _ in range(trials): results.append(simulate_rerolling(probability, num_objects)) print(f"Given {num_objects} dice with a probability of success of {probability}, after {trials} trials") print(f"Number of expected rolls to have all successes is {sum(results)/len(results)}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test(numTrials):\n # Your Code Here\n hits = 0.0\n for i in range(numTrials):\n result = trial()\n #print result\n hits += result\n return hits / numTrials", "def simulate(self, num_games):\r\n # self.runs = num_games #Initializes a tracker for the number of runs\r\n for _ in range(num_games):\r\n self.results.append(self._simulate_once())\r\n return self.results", "def test(self, n_test_runs: int = 10) -> None:\n steps: np.ndarray = np.zeros(n_test_runs)\n rewards: np.ndarray = np.zeros(n_test_runs)\n for t in range(n_test_runs):\n steps[t], rewards[t] = self.step(collect=False)\n\n self.get_logger().warn('---------- TEST RUN RESULTS ----------')\n self.get_logger().warn(f'Average: {steps.mean()}')\n self.get_logger().warn(f'STD: {steps.std()}')\n self.get_logger().warn(f'Median: {np.median(steps)}')\n self.get_logger().warn(f'Average Reward: {rewards.mean()}')", "def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)", "def run():\n trials = 100\n\n multipliers = [0.25, 0.3, 0.35, 0.5, 0.75, 1, 1.25, 1.45, 1.5, 1.55, 1.6] # Coefficients for learning rate\n\n mean_penalty = []\n median_penalty = []\n std_penalty = []\n\n mean_trial_time = []\n median_trial_time = []\n std_trial_time = []\n\n mean_success_rate = []\n median_success_rate = []\n std_success_rate = []\n\n for m in multipliers:\n all_penalties = [] # All penalties from trail sets\n all_average_trial_time = []\n all_success_rates = []\n\n for i in range(0, 20):\n # print \"Trial set:\", i\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n agent = e.create_agent(LearnerAgent) # create agent\n agent.mult = m\n e.set_primary_agent(agent, enforce_deadline=True) # specify agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0, display=False) # create simulator (uses pygame when display=True, if available)\n\n sim.run(n_trials=trials) # run for a specified number of trials\n\n all_penalties.append(agent.all_trails_penalties)\n all_average_trial_time.append(agent.time/float(trials))\n all_success_rates.append(float(trials-agent.aborted_trials)/trials)\n\n mean_penalty.append(np.mean(all_penalties))\n median_penalty.append(np.median(all_penalties))\n std_penalty.append(np.std(all_penalties))\n\n mean_trial_time.append(np.mean(all_average_trial_time))\n median_trial_time.append(np.median(all_average_trial_time))\n std_trial_time.append(np.std(all_average_trial_time))\n\n mean_success_rate.append(np.mean(all_success_rates))\n median_success_rate.append(np.median(all_success_rates))\n std_success_rate.append(np.std(all_success_rates))\n\n for i in range(0, len(multipliers)):\n print \"\"\n print \"Multiplier:\", multipliers[i]\n print \"\"\n print \"Mean penalty per {} trials:\".format(trials), mean_penalty[i]\n print \"Median penalty per {} trials:\".format(trials), median_penalty[i]\n print \"Std.Dev. penalty per {} trials:\".format(trials), std_penalty[i]\n\n print \"\"\n print \"Mean trial time:\", mean_trial_time[i]\n print \"Median trial time:\", median_trial_time[i]\n print \"Std.Dev. trial time:\", std_trial_time[i]\n\n print \"\"\n print \"Mean success rate per {} trials:\".format(trials), mean_success_rate[i]\n print \"Median success rate per {} trials:\".format(trials), median_success_rate[i]\n print \"Std.Dev. success rate per {} trials:\".format(trials), std_success_rate[i]", "def run():\n\n Number_repetitions = 1\n Rate = np.zeros((Number_repetitions,1))\n Rate20 = np.zeros((Number_repetitions,1))\n Penalty20 = np.zeros((Number_repetitions, 1))\n\n # Loop to average\n for idx in np.arange(0,Number_repetitions,1):\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0000001, display=True) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n # I've edited the enviroment variable to do the plot creating an completions array\n completions = np.array(e.completions)\n rate = float(completions.sum())/float((len(completions)))\n rate20 = float(completions[-20:].sum())/20\n\n Rate[idx] = rate\n Rate20[idx] = rate20\n\n Wrong = np.array(a.wrong_moves_per_run[-20:]).mean()\n Penalty20[idx] = Wrong\n\n plt.scatter(np.arange(0,len(completions)),completions)\n plt.plot(Wrong)\n plt.xlabel('Trial')\n plt.ylabel('1 = Get in the destination, 0 = did not get')\n plt.title('Reiforcement learning progress')\n plt.legend(['Rate of completion: ' + str(rate) + '. Rate last 20: ' + str(rate20) + '.Mean penalty last 20: ' + str(Wrong)])\n plt.show()\n\n #print 'Accuracy: ' + str(Rate) + '. Mean: ' + str(np.mean(Rate))\n #print 'Mean 20: ' + str(np.mean(Rate20))#'Accuracy 20: ' + str(Rate20) + '. Mean 20: ' + str(np.mean(Rate20))\n #print 'Mean_penalty: ' + str(np.mean(Penalty20))\n\n # Print state table with actions\n #t = 0\n #for state in a.states:\n #print 'State ' + str(state) + '. Best action: ' + str((str(np.argmax(a.QTable[t][:]))))\n #t += 1", "def run_simulation(self, num_games=10):\n for _ in range(num_games):\n self.result.append(self.single_game())", "def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)", "def simulationDelayedTreatment(numTrials):\n \n \n results = []\n gutresults = []\n for a in range(300):\n results.append([])\n gutresults.append([])\n for b in range(numTrials):\n viruses = []\n for c in range(10000):\n resistances = {'guttagonol': False}\n vir = ResistantVirus(.1, .05, resistances, .005)\n viruses.append(vir)\n \n Mark = TreatedPatient(viruses, 1000)\n \n for d in range(150):\n pop = Mark.update()\n results[d].append(pop)\n gutpop = Mark.getResistPop(['guttagonol'])\n gutresults[d].append(gutpop)\n \n Mark.addPrescription('guttagonol')\n \n for e in range(150, 300):\n newpop = Mark.update()\n results[e].append(newpop)\n newgutpop = Mark.getResistPop(['guttagonol'])\n gutresults[e].append(newgutpop)\n \n FinalResults = results[299]\n print len(FinalResults)\n \n \n \n pylab.figure(5)\n pylab.hist(FinalResults, bins = 10)\n pylab.title('Simulation with Drugs - Frequency')\n pylab.xlabel('Virus Population')\n pylab.ylabel('Number of Trials with Population') \n pylab.legend()\n pylab.show()", "def simulationTwoDrugsDelayedTreatment(numTrials):\n #Initialization\n delayList = [300, 150, 75, 0]\n #delayList = [150]\n #Patient init variables\n numViruses = 100\n maxPop = 1000\n #Virus init variables\n maxBirthProb = 0.1\n clearProb = 0.05\n #clearProb = 0.10\n resistances = { 'guttagonol': False, 'grimpex' : False }\n #mutProb = 0.005\n mutProb = 0.010\n \n results = {}\n \n for n in delayList:\n cured = 0\n popList = []\n print \"Running trials for delay %(delay)d\" % {'delay' : n}\n for i in range(numTrials):\n #print \"Trial: \" + str(i)\n pop = runTrialTwoDrugs(n, numViruses, maxPop, maxBirthProb, clearProb, resistances, mutProb)\n popList.append(pop)\n if pop < 50:\n cured +=1\n results[n] = popList\n #print popList\n print \"Delay : %(delay)d Percentage cured %(percent)2f\" % {\"delay\" : n, \"percent\" : cured/float(numTrials) }\n \n\n drawHist(results, numTrials)", "def simulate(self, ntrs):\n self.trtimes = list(np.arange(ntrs)*self.expectedtr)", "def simulate(player,environment,n_trials=1000,verbose=False):\n environment.player = player\n rewards = []\n \n for i in range(1,n_trials+1):\n \n if i % (n_trials/5) == 0:\n if verbose:\n print (\"Loading game {}\".format(i))\n try:\n result = environment.play_game()\n rewards.append(result)\n except Exception:\n tb.print_exc(file=sys.stdout)\n \n return rewards", "def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type):\n trialsRecord = []\n for trail in range(num_trials):\n #VISUALIZING ROBOTS - refer course pdf note 'Optional_Visualizing Robots Problem Set 2.pdf'\n #anim = ps2_visualize.RobotVisualization(num_robots, width, height)\n #create room\n room = RectangularRoom(width, height)\n #create robots & store in array\n robots = []\n count = 0\n for i in range(num_robots):\n robots.append(robot_type(room, speed))\n #NB: how does robot_type(room, speed) create a robot object???? what magic is this???\n #while calcualted coverage is < min_coverage, update positions & repeat\n while float(room.getNumCleanedTiles()) / room.getNumTiles() < min_coverage:\n #anim.update(room, robots)\n #do more cleaning - update robot positions\n for robot in robots:\n robot.updatePositionAndClean()\n count += 1\n trialsRecord.append(count)#record number of steps to achieve min_coverage in this trial.\n #after loop, close animation\n #anim.done()\n #calculate average number of steps over trials.\n return sum(trialsRecord)/float(len(trialsRecord))\n #raise NotImplementedError", "def simulationTwoDrugsDelayedTreatment(numTrials):\n results = []\n gutresults = []\n \n for a in range(375):\n results.append([])\n gutresults.append([])\n \n for b in range(numTrials):\n viruses = []\n for c in range(100):\n resistances = {'guttagonol': False, 'grimpex': False}\n vir = ResistantVirus(.1, .05, resistances, .02)\n viruses.append(vir)\n \n Mark = TreatedPatient(viruses, 1000)\n \n for d in range(150):\n pop = Mark.update()\n results[d].append(pop)\n gutpop = Mark.getResistPop(['guttagonol'])\n gutresults[d].append(gutpop)\n \n Mark.addPrescription('guttagonol')\n \n for e in range(150, 225):\n newpop = Mark.update()\n results[e].append(newpop)\n newgutpop = Mark.getResistPop(['guttagonol'])\n gutresults[e].append(newgutpop)\n \n Mark.addPrescription('grimpex')\n \n for f in range(225, 375):\n newpop = Mark.update()\n results[f].append(newpop)\n \n \n FinalResults = results[374]\n print len(FinalResults)\n \n \n pylab.figure(6)\n pylab.hist(FinalResults, bins = 10)\n pylab.title('300 day delay')\n pylab.xlabel('Virus Population')\n pylab.ylabel('Number of Trials with Population') \n pylab.show()", "def run():\n\n for simulation in range(0, N_SIMULATIONS):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n # TODO: Change later enforce_deadline=True\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=N_TRIALS) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n if simulation == N_SIMULATIONS - 1:\n\n with open('results.csv', 'a') as csvfile:\n fieldnames = ['alpha', 'gamma', 'epsilon', 'success_rate', 'last_failure']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n for index in range(0,len(simulation_rates)):\n writer.writerow({\n 'alpha': get_simulation_params(0)[0],\n 'gamma': get_simulation_params(0)[1],\n 'epsilon': get_simulation_params(0)[2],\n 'success_rate': simulation_rates[index],\n 'last_failure': last_errors[index]})\n\n\n if N_SIMULATIONS > 1: #multiple simulation AND last simulation\n\n plt.figure(1)\n\n plt.subplot(211)\n plt.plot(simulation_rates)\n plt.title('Success Rate/Simulation')\n plt.xlabel('# Simulation')\n plt.ylabel('Success Rate')\n\n plt.subplot(212)\n plt.plot(last_errors)\n plt.title('Last failed trial per simulation')\n plt.xlabel('# Simulation')\n plt.ylabel('Last failed trial')\n\n plt.show()", "def simulationDelayedTreatment(numTrials):\n \n #Initialization\n #delayList = [300, 150, 75, 0]\n delayList = [150]\n #Patient init variables\n numViruses = 100\n maxPop = 1000\n #Virus init variables\n maxBirthProb = 0.1\n clearProb = 0.05\n #clearProb = 0.10\n resistances = { 'guttagonol': True }\n mutProb = 0.005\n \n results = {}\n \n for n in delayList:\n cured = 0\n popList = []\n for i in range(numTrials):\n pop = runTrial(n, numViruses, maxPop, maxBirthProb, clearProb, resistances, mutProb)\n popList.append(pop)\n if pop == 0:\n cured +=1\n results[n] = popList\n #print popList\n print \"Delay : %(delay)d Percentage cured %(percent)2f\" % {\"delay\" : n, \"percent\" : cured/float(numTrials) }\n \n\n drawHist(results, numTrials)", "def simulator_roulette(cnt_simulate, win_prob): \n \n winnings_all = []\n cnt = 0\n \n while cnt < cnt_simulate: \n winnings = 0\n winnings_trk = [winnings]\n \n while winnings < 80:\n tfwin = False\n bet = 1\n while not tfwin:\n # Roll the roulette\n tfwin = get_spin_result(win_prob)\n if tfwin:\n winnings += bet \n else:\n winnings -= bet\n bet *= 2\n \n # keep track of winnings in each simulate\n winnings_trk.append(winnings)\n \n # keep track of all simulation\n winnings_all.append(winnings_trk)\n cnt += 1\n\n return winnings_all", "def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type, visualize):\n #initialization of variables\n list_of_results = []\n \n #trial loop\n for i in range(num_trials):\n list_of_results.append(singleSimulation(num_robots, speed, width, height, min_coverage, robot_type, visualize))\n return list_of_results", "def run_test(test_name, outdir, output_file_name, num_rollouts,\n rllib_config, checkpoint, env_modifier, render, adv_num=None):\n # First compute a baseline score to compare against\n print(\n \"**********************************************************\\n\"\n \"**********************************************************\\n\"\n \"**********************************************************\\n\"\n \"Running the {} score!\\n\"\n \"**********************************************************\\n\"\n \"**********************************************************\\n\"\n \"**********************************************************\".format(test_name)\n )\n\n env, agent, multiagent, use_lstm, policy_agent_mapping, state_init, action_init = instantiate_rollout(rllib_config, checkpoint)\n if adv_num:\n reset_env(env, 1)\n # high = np.array([1.0, 90.0, env.max_cart_vel, env.max_pole_vel])\n # env.observation_space = spaces.Box(low=-1 * high, high=high, dtype=env.observation_space.dtype)\n if callable(env_modifier):\n env_modifier(env)\n elif type(env) is MultiarmBandit:\n env.transfer = env_modifier\n elif len(env_modifier) > 0:\n setattr(env, env_modifier[0], env_modifier[1])\n rewards, step_num = run_rollout(env, agent, multiagent, use_lstm, policy_agent_mapping,\n state_init, action_init, num_rollouts, render, adv_num)\n\n with open('{}/{}_{}_rew.txt'.format(outdir, output_file_name, test_name),\n 'wb') as file:\n np.savetxt(file, rewards, delimiter=', ')\n\n print('The average reward for task {} is {}'.format(test_name, np.mean(rewards)))\n print('The average step length for task {} is {}'.format(test_name, np.mean(step_num)))\n\n return np.mean(rewards), np.std(rewards), np.mean(step_num), np.std(step_num)", "def run(num_trials):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.1, display=True) \n # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=num_trials) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n a.performace_report(num_trials)", "def run_experiments():\n if True: # Change to False when done finding max_scoring_num_rolls\n six_sided_max = max_scoring_num_rolls(six_sided)\n print('Max scoring num rolls for six-sided dice:', six_sided_max)\n rerolled_max = max_scoring_num_rolls(reroll(six_sided))\n print('Max scoring num rolls for re-rolled dice:', rerolled_max)\n\n if False: # Change to True to test always_roll(8)\n print('always_roll(8) win rate:', average_win_rate(always_roll(8)))\n\n if False: # Change to True to test bacon_strategy\n print('bacon_strategy win rate:', average_win_rate(bacon_strategy))\n\n if False: # Change to True to test swap_strategy\n print('swap_strategy win rate:', average_win_rate(swap_strategy))\n\n \"*** You may add additional experiments as you wish ***\"", "def run_experiments():\n if True: # Change to False when done finding max_scoring_num_rolls\n six_sided_max = max_scoring_num_rolls(six_sided)\n print('Max scoring num rolls for six-sided dice:', six_sided_max)\n four_sided_max = max_scoring_num_rolls(four_sided)\n print('Max scoring num rolls for four-sided dice:', four_sided_max)\n\n if False: # Change to True to test always_roll(8)\n print('always_roll(8) win rate:', average_win_rate(always_roll(8)))\n\n if True: # Change to True to test bacon_strategy\n print('bacon_strategy win rate:', average_win_rate(bacon_strategy))\n\n if True: # Change to True to test swap_strategy\n print('swap_strategy win rate:', average_win_rate(swap_strategy))\n\n if True:\n print('final_strategy win rate:', average_win_rate(final_strategy))\n\n \"*** You may add additional experiments as you wish ***\"", "def simulationDelayedTreatment(numTrials):\n\n delays = [300,150,75,0]\n results = [[],[],[],[]]\n for place in range(0, 4):\n for trial in range(numTrials):\n viruses = []\n for num in range(100):\n viruses.append(ResistantVirus(0.1,0.05, {'guttagonol': False}, 0.005))\n patient = TreatedPatient(viruses, 1000)\n for delay in range(delays[place]):\n patient.update()\n patient.addPrescription(\"guttagonol\") \n for l in range(150):\n patient.update()\n results[place].append(patient.getTotalPop())\n pylab.hist(results[0])\n pylab.hist(results[1])\n pylab.hist(results[2])\n pylab.hist(results[3])\n pylab.show()\n for x in range(0, 10):", "def simulationTwoDrugsDelayedTreatment(numTrials):\n numViruses = 100\n maxPop = 1000\n maxBirthProb = 0.1\n clearProb = 0.05\n resistances = {'guttagonol': False, 'grimpex': False}\n mutProb = 0.005\n\n first_drug = 150\n second_drug = 300\n steps = first_drug + second_drug\n total_vs = [0 for i in range(steps)]\n resis_vs = list(total_vs)\n results = list(total_vs)\n\n for trial in range(numTrials):\n viruses = []\n for i in range(numViruses):\n viruses.append(ResistantVirus(maxBirthProb, clearProb, resistances, mutProb))\n patient = TreatedPatient(viruses, maxPop)\n\n for step in range(steps):\n if step == first_drug:\n patient.addPrescription('guttagonol')\n elif step == second_drug:\n patient.addPrescription('grimpex')\n patient.update()\n total_vs[step] += patient.getTotalPop()\n resis_vs[step] += patient.getResistPop(['guttagonol'])\n resis_vs[step] += patient.getResistPop(['grimpex'])\n\n results.append(patient.getTotalPop())\n\n pylab.hist(results, 9)\n pylab.show()", "def run():\n step = 0\n while traci.simulation.getMinExpectedNumber() > 0:\n traci.simulationStep()\n step+=1\n traci.close()\n sys.stdout.flush()", "def simulationTwoDrugsDelayedTreatment(numTrials):\n # TODO", "def main():\n\tresults = []\n\n\tconfig = configparser.ConfigParser()\n\tconfig.read(\"simulation.ini\")\n\tsettings = config['sim']\n\n\tcompleted_obj_hw = int(settings[\"ClientsPerCampaign\"]) * float(settings[\"CompletedPctgHW\"])\n\texceeded_obj_hw = float(settings[\"ExceededPctgHW\"])\n\tsignificance_level = float(settings[\"SignificanceLevel\"])\n\tz_val_two_tails = scipy.stats.norm.ppf(1 - (significance_level / 2))\n\n\tprint(\"Completed Target HW: \" + str(completed_obj_hw))\n\tprint(\"Exceeded Target HW: \" + str(exceeded_obj_hw))\n\n\tcompleted_vals = []\n\texceeded_vals = []\n\tdone = False\n\n\tcompleted_avg = 0\n\texceeded_avg = 0\n\tcompleted_hw = 0\n\texceeded_hw = 0\n\n\ti = 0\n\twhile not done:\n\t\tprint(\"RUN: \" + str(i + 1))\n\t\tenv = simpy.Environment()\n\t\tsim = Simulation(env, settings, i == 0)\n\t\tsim.run()\n\t\tresults.append(sim.results)\n\t\ti += 1\n\n\t\tif settings['RunOnce'] == 'yes':\n\t\t\tprint(\"RUN ONCE\")\n\t\t\tsys.exit()\n\n\t\tcompleted_vals.append(sim.results['completed_count'])\n\t\texceeded_vals.append(sim.results['exceeded_proportion'])\n\n\t\tif i < 2:\n\t\t\tprint(\"---------------\")\n\t\t\tcontinue\n\n\t\tcompleted_avg = sum(completed_vals) / len(completed_vals)\n\t\tcompleted_S = sum([(v - completed_avg) ** 2 for v in completed_vals]) / (i - 1)\n\t\tcompleted_S = math.sqrt(completed_S)\n\t\tcompleted_hw = (z_val_two_tails * completed_S) / math.sqrt(i)\n\t\tprint(\"runs: \" + str(i) + \" completed HW: \" + str(completed_hw))\n\n\t\texceeded_avg = sum(exceeded_vals) / len(exceeded_vals)\n\t\texceeded_S = math.sqrt(exceeded_avg * (1 - exceeded_avg))\n\t\texceeded_hw = (z_val_two_tails * exceeded_S) / math.sqrt(i)\n\t\tprint(\"runs: \" + str(i) + \" exceeded HW: \" + str(exceeded_hw))\n\n\t\tif completed_hw < completed_obj_hw and exceeded_hw < exceeded_obj_hw:\n\t\t\tprint(\"END ITERATIONS\")\n\t\t\tdone = True\n\n\t\tprint(\"---------------\")\n\n\n\tfilename = 'results/Results_' + settings['FileSizeGB'] + '_' + settings['TorrentThreshold'] + '_' + settings['HTTPDownThreshold'] \\\n\t\t+ '_' + settings['HTTPUp'] + '_' + str(random.randint(0,10000)) + '.xlsx'\n\n\tprint(\"Saving XLSX to: \" + filename)\n\twb = xs.Workbook(filename)\n\n\tws = wb.add_worksheet()\n\n\tws.write(0, 1, 'Exceded')\n\tws.write(0, 2, 'Completed')\n\n\ti = 1\n\tfor result in results:\n\t\tws.write(i, 0, i)\n\t\tws.write(i, 1, result['exceeded_proportion'])\n\t\tws.write(i, 2, result['completed_count'])\n\t\ti += 1\n\n\tws.write(i, 0, 'average')\n\tws.write(i, 1, exceeded_avg)\n\tws.write(i, 2, completed_avg)\n\ti += 1\n\tws.write(i, 0, 'half width')\n\tws.write(i, 1, exceeded_hw)\n\tws.write(i, 2, completed_hw)\n\n\twb.close()", "def run(self, r, niters=10000):\n validator.validate_type(r, rng, param_name='r')\n validator.validate_positive(niters, param_name='niters')\n for _ in xrange(niters):\n # This goes against every object-oriented bone in my body, but the interface must be satisfied\n # And actually Python won't even let me do this because I'm accessing a method in a C++ class...\n # I'd have to write this whole thing in Cython or change the state interface to expose all these\n # functions separately...which might actually be worth doing.\n self._latent._thisptr.get()[0].sample_aux()\n self._latent._thisptr.get()[0].sample_state()\n self._latent._thisptr.get()[0].clear_empty_states()\n self._latent._thisptr.get()[0].sample_hypers(20)\n self._latent._thisptr.get()[0].sample_pi()\n self._latent._thisptr.get()[0].sample_phi()", "def experiment(agent, steps, runs, initialize=None):\n result = 0\n for r in range(runs):\n result += simulate(agent, steps, initialize)\n return result / runs", "def run_experiments():\n if False: # Change to False when done finding max_scoring_num_rolls\n six_sided_max = max_scoring_num_rolls(six_sided)\n print('Max scoring num rolls for six-sided dice:', six_sided_max)\n\n if False: # Change to True to test always_roll(8)\n print('always_roll(8) win rate:', average_win_rate(always_roll(8)))\n\n if False: # Change to True to test bacon_strategy\n print('bacon_strategy win rate:', average_win_rate(bacon_strategy))\n\n if False: # Change to True to test swap_strategy\n print('swap_strategy win rate:', average_win_rate(swap_strategy))\n\n if True: # Change to True to test final_strategy\n print('final_strategy win rate:', average_win_rate(final_strategy))\n\n \"*** You may add additional experiments as you wish ***\"" ]
[ "0.65833175", "0.6366467", "0.6313566", "0.61828446", "0.61517274", "0.61462194", "0.6137604", "0.61247057", "0.6109722", "0.60761017", "0.60535055", "0.6050241", "0.6050134", "0.602638", "0.60069436", "0.59209996", "0.59156096", "0.5860882", "0.5851478", "0.58401924", "0.58390415", "0.5827515", "0.58208793", "0.57845104", "0.5768721", "0.5757052", "0.57544047", "0.5749319", "0.57275766", "0.5725325" ]
0.7893863
0
Check if two numbers are within a certain error of each other
def are_close(num1: float, num2: float, error: float) -> bool: if abs(num1-num2) < error: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def consistancy_test(a, b, aErr, bErr = 0):#TODO: fully test this aproach\n return int(np.ceil(np.abs(b - a) / np.sqrt(bErr**2 + aErr**2)))", "def within_value(v1, v2):\n percentage = 0.1\n error_allowed = percentage * v1\n high = v1 + error_allowed\n low = v1 - error_allowed\n\n return low <= v2 <= high", "def validate_points(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\r\n\treturn (diff_y == 0 and diff_x != 0) or (diff_x == 0 and diff_y != 0) or abs(diff_x) == abs(diff_y)", "def arecloseenough(x1, x2):\n\n if abs(x1 - x2) <= VERYSMALL:\n return True\n \n return False", "def error(a, b, sqrt=False, both_signs=True):\n e = _sign_error(a, b, -1, sqrt)\n if both_signs:\n plus = _sign_error(a, b, 1, sqrt)\n e = min(e, plus)\n return e", "def divergence(a, b):\n return np.sign(a) != np.sign(b) and a != 0 and b != 0", "def nearlyEqual(self, x, y):\n return abs(x-y) < self.absoluteerrorrange", "def isGE(self, a : float, b : float) -> bool:\n return (a >= b - self.tol * max(abs(a), abs(b), 1.0)) #and (a >= b - 0.1)", "def expected(A, B):\n return 1 / (1 + 10 ** ((B - A) / 150))", "def equalWithinTolerance(a, b, tol):\n return abs(a - b) <= tol", "def is_error(ranking, references):\n return 1 if average_precision(ranking, references) < 1 else 0", "def close(a,b):\n return abs(a-b) < epsilon", "def almost_eq(e1,e2) :\n\treturn round(e1-e2,4) == 0.0", "def is_consistant(a, b, aErr, bErr, sigma):\n return consistancy_test(a, b, aErr, bErr) <= sigma", "def equals(a, b, tol=1e-10):\n return np.abs(a-b) <= tol", "def equalEnough(numA, numB, tol=0.000001):\r\n return math.fabs(numA - numB) <= tol", "def checarPs(self,p1,p2):\n return abs(p1-p2) < 0.00001", "def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12", "def eq(a, b):\n return abs(a - b) < .05", "def abs_rel_err(a, b, eps=1.0e-10):\r\n return abs(a - b) / (abs(a) + abs(b) + eps)", "def abs_rel_err(a, b, eps=1.0e-10):\r\n return abs(a - b) / (abs(a) + abs(b) + eps)", "def almost_equal_values(x, y, precision):\n return round(x - y, precision) == 0", "def perform_wilcoxon_validation(series1, series2):\n differences, sorted_diffs = ExperimentUtil._calculate_differences(series1, series2)\n sorted_diffs.sort()\n position_diffs = ExperimentUtil._calculate_position_differences(differences, sorted_diffs)\n\n for index, score in enumerate(differences):\n if score < 0:\n position_diffs[index] = position_diffs[index] * -1\n\n sum_positive, sum_negative = ExperimentUtil._calculate_positive_negative_sum(position_diffs)\n T = min(sum_positive, sum_negative)\n # TODO: Se o tamanho de n for maior que 30, seria preciso usar a tabela T-Student\n if len(position_diffs) <= 30:\n # TODO: Com o valor de T, precisamos ver qual o valor critico e elaborar melhor a resposta no relatorio\n return T < ExperimentUtil.wilcox_table[len(position_diffs)]", "def almost_equal(x, y):\n return abs(x-y) < FP_PREC", "def nearly_equal(a, b, sig_fig=5):\n return a == b or int(a*10**sig_fig) == int(b*10**sig_fig)", "def abs_error(x, y):\n return np.sum(np.abs(x - y))", "def error(Y, X):\n return (Y - X) ** 2", "def almost_equal(a, b, tol=0.0001):\n \n try:\n return a==b or (max(a, b)/min(a, b)) - 1 < tol\n except ZeroDivisionError:\n return False", "def almostlte(a, b):\n return np.all(np.logical_or(a < b, almosteq(a, b)))", "def check_valid(x2,x1):\r\n if x2 < x1 or x2==x1:\r\n return True" ]
[ "0.72634363", "0.70664", "0.67628", "0.67364", "0.66185075", "0.6564737", "0.6547066", "0.65314746", "0.6495302", "0.6487915", "0.64297456", "0.6420305", "0.64202654", "0.6413313", "0.63957506", "0.63926774", "0.63817656", "0.6378069", "0.6351713", "0.63172364", "0.63172364", "0.63051784", "0.6296244", "0.6238013", "0.6232678", "0.62267995", "0.62141794", "0.6199312", "0.61918926", "0.61874616" ]
0.76745415
0
Some test cases for probability_of_all_successes
def test_probability_of_all_successes(): assert(probability_of_all_successes(1/2,1,2) == 0.25) assert(are_close(probability_of_all_successes(1/6,1,2), 1/36, 0.001)) assert(are_close(probability_of_all_successes(1/2,2,2), 7/16, 0.001))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def probability_of_all_successes(p: float, r: int, n: int) -> float:\n\n if r == 1:\n return pow(p, n)\n elif n == 0:\n return 1\n else:\n result = 0\n for x in range(0, n+1):\n result += pow(p, x) * pow(1-p, n-x) * probability_of_all_successes(p, r-1, n-x)\n return result", "def perform_bernoulli_trials(n, p):\n # Initialize number of successes: n_success\n n_success = 0\n\n\n # Perform trials\n for i in range(n):\n # Choose random number between zero and one: random_number\n random_number = np.random.random()\n\n # If less than p, it's a success so add one to n_success\n if random_number< p:\n n_success += 1\n\n return n_success", "def perform_bernoulli_trials(n, p):\n # Initialize number of successes: n_success\n n_success = 0\n\n\n # Perform trials\n for i in range(n):\n # Choose random number between zero and one: random_number\n random_number = np.random.random()\n\n\n # If less than p, it's a success so add one to n_success\n if random_number < p:\n n_success += 1\n\n return n_success", "def perform_bernoulli_trials(n, p):\n # Initialize number of successes: n_success\n n_success = 0\n # Perform trials\n for i in range(n):\n # Choose random number between zero and one: random_number\n random_number = np.random.random()\n # If less than p, it's a success so add one to n_success\n if random_number < p:\n n_success += 1\n\n return n_success", "def test_11(self):\n for _ in range(1000):\n num_types = np.random.randint(1, 10)\n edu_start = np.random.randint(10, 100)\n type_shares = np.random.normal(0, 1, size=num_types * 2)\n\n args = [type_shares, np.array([edu_start])]\n\n py = get_conditional_probabilities(*args)\n fort = fort_debug.wrapper_get_conditional_probabilities(*args + [num_types])\n\n assert_almost_equal(np.sum(py), 1.0)\n assert_almost_equal(py, fort)", "def perform_bernoulli_trials(n, p):\n # Initialize number of successes: n_success\n n_success = 0\n\n # Perform trials\n for i in range(n):\n # Choose random number between zero and one: random_number\n random_number = np.random.random()\n\n # If less than p, it's a success so add one to n_success\n if random_number < p:\n n_success += 1\n\n return n_success", "def test(numTrials):\n # Your Code Here\n hits = 0.0\n for i in range(numTrials):\n result = trial()\n #print result\n hits += result\n return hits / numTrials", "def test_probabilities_are_ok(self, seed):\n bins = defaultdict(int)\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", \"2\", \"3\", \"4\")\n categories = OrderedDict(zip(categories, probs))\n dim = Categorical(\"yolo\", categories)\n for _ in range(500):\n sample = dim.sample(seed=seed)[0]\n bins[sample] += 1\n for keys in bins.keys():\n bins[keys] /= float(500)\n for key, value in categories.items():\n assert abs(bins[key] - value) < 0.01", "def __call__(self, results):\n if np.random.rand() > self.prob:\n return results\n self._imequalize(results)\n return results", "def test_strategy(self):\n self.responses_test([C, C, C, C], [C, C, C, C], [C])\n self.responses_test([C, C, C, C, D], [C, C, C, D, C], [D])\n self.responses_test([C] * 11, [C] * 10 + [D], [C])", "def is_successful(self):\n\t\treturn randint(1, 100) <= self.get_success_probability()", "def compute_expected_return_(domain, N, policy, p_init, s_init):\r\n pos = p_init\r\n speed = s_init\r\n expected_return = 0\r\n\r\n for i in range(N):\r\n if domain.terminalState(pos, speed):\r\n break\r\n action = policy((pos, speed))\r\n if isinstance(action,str):\r\n action = domain.getAction(action)\r\n new_pos, new_speed = domain.getNextState(pos, speed, action)\r\n r = domain.getReward(pos, speed, action, new_pos, new_speed)\r\n expected_return += ((domain.discount_factor)**i)*r\r\n pos = new_pos\r\n speed = new_speed\r\n \r\n return expected_return", "def successes(predictions,truth):\n\ttotal = len(predictions)\n\tcorrect = 0.0\n\tfor p in predictions:\n\t\tif p == truth:\n\t\t\tcorrect += 1\n\t\telse:\n\t\t\tprint truth,\"\\t\",p\n\treturn correct", "def check(self):\r\n self.check_probabilities()\r\n self.check_sum()", "def run_tests(test_count=1000, buyer_count=10):\n\n found_error = False\n\n for i in range(test_count):\n\n bp, sp, bw = get_preferences(buyer_count)\n matches = get_matches(bp, sp, bw)\n\n if not check_stability(bp, sp, matches):\n print('ERROR!!!')\n found_error = True\n\n if not found_error:\n print('Executed {} tests without errors'.format(test_count))", "def check_overall_probability(self, x, expected_class):\n class_probability = self.calculate_class_apriori_probability(expected_class)\n view1_probability = self.check_probability(x[0], self.best_neighbours[0], expected_class, self.mfeat_fac_classifier, 0)\n view2_probability = self.check_probability(x[1], self.best_neighbours[1], expected_class, self.mfeat_fou_classifier, 1)\n view3_probability = self.check_probability(x[2], self.best_neighbours[2], expected_class, self.mfeat_kar_classifier, 2)\n return ((1 - 3) * class_probability) + view1_probability + view2_probability + view3_probability", "def test_affect_of_strategy(self):\n self.responses_test([C, C, C], [C, C, C], [C, C, C])\n # Make sure that the retaliations are increasing\n # Retaliate once and forgive\n self.responses_test([C], [D], [D])\n self.responses_test([C, D], [D, C], [C])\n self.responses_test([C, D, C], [D, C, C], [C])\n # Retaliate twice and forgive\n self.responses_test([C, D, C], [D, C, D], [D, D])\n self.responses_test([C, D, C, D, D], [D, C, D, C, C], [C])\n # Opponent defection during retaliation doesn't increase retaliation period\n self.responses_test([C, D, C, D, D], [D, C, D, D, C], [C])\n # Retaliate thrice and forgive\n self.responses_test([C, D, C, D, D, C], [D, C, D, C, C, D], [D, D, D])\n history_1 = [C, D, C, D, D, C, D, D, D]\n history_2 = [D, C, D, C, C, D, C, C, C]\n self.responses_test(history_1, history_2, [C])", "def test_probability(self, dataset = None):\n\n\t\tdataset = self.vectorize(dataset) if (dataset != None) else self.testing_set_vector;\n\n\t\tprediction = self.classifier.decision_function(dataset)\n\n\t\treturn list(map(lambda p: (1 / (1 + math.exp(-p))), prediction))", "def test_bp_example():\n signal = np.array([4, 7, 9, 10, 6, 11, 3])\n\n pe = permutation_entropy(signal, 2)\n\n assert 0.91 < pe < 0.92 # Should be approx 0.918.\n\n pe = permutation_entropy(signal, 3)\n\n assert 1.52 < pe < 1.53 # Should be approx 1.522.", "def test_handcrafted_examples(self):\n for i in range(1000):\n self.assertEqual(perfectd(0), True)\n self.assertEqual(prime(0), False)\n self.assertEqual(prime(2), True)\n self.assertEqual(prime(7), True)\n self.assertEqual(prime(15), False)\n self.assertEqual(perfectd(6), True)\n self.assertEqual(perfectd(15), False)", "def test_performance(env, policy, nb_episodes=500, max_steps=500):\n sum_returns = 0\n for i in range(nb_episodes):\n state = env.reset()\n done = False\n for j in range(max_steps):\n action = policy[state]\n state, reward, done, info = env.step(action)\n if done:\n sum_returns += reward\n break\n\n print(\"The success rate of the policy across {} episodes was {:.2f} percent.\".format(nb_episodes,sum_returns/nb_episodes*100))\n \n return sum_returns/nb_episodes*100", "def test_LM(self):\n\t\t\n\t\tprecision = 10**-8\n\t\t\t\t \n\t\tif self.n == 1:\n\t\t\t\t \n\t\t\tP_sum = sum(self.estimate_prob('', w) for w in self.vocab)\n\t\t\t\n\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one.'\n\t\t\t\t \n\t\telif self.n == 2:\n\t\t\thistories = ['the', 'in', 'at', 'blue', 'white']\n\t\t\t\t \n\t\t\tfor h in histories:\n\t\t\t\t \n\t\t\t\tP_sum = sum(self.estimate_prob(h, w) for w in self.vocab)\n\t\t\t\t\n\t\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one for history' + h\n\t\t\t\t\t \n\t\tprint('TEST SUCCESSFUL!')", "def get_success_probabilities_from_results(results: Sequence[Sequence[Sequence[int]]]) \\\n -> Sequence[float]:\n num_shots = len(results[0])\n n_bits = len(results[0][0]) - 1\n\n probabilities = []\n # loop over all binary strings of length n_bits\n for result, bits in zip(results, all_bitstrings(2 * n_bits)):\n # Input nums are written from (MSB .... LSB) = (a_n, ..., a_1, a_0)\n num_a = bit_array_to_int(bits[:n_bits])\n num_b = bit_array_to_int(bits[n_bits:])\n\n # add the numbers\n ans = num_a + num_b\n ans_bits = int_to_bit_array(ans, n_bits + 1)\n\n # a success occurs if a shot matches the expected ans bit for bit\n probability = 0\n for shot in result:\n if np.array_equal(ans_bits, shot):\n probability += 1. / num_shots\n probabilities.append(probability)\n\n return probabilities", "def test_all_pairs_t_test_few_perms(self):\r\n exp = \"\"\"# The tests of significance were performed using a one-sided (low) Student's two-sample t-test.\r\n# Alternative hypothesis: Group 1 mean < Group 2 mean\r\n# The nonparametric p-values were calculated using 5 Monte Carlo permutations.\r\n# The nonparametric p-values contain the correct number of significant digits.\r\n# Entries marked with \"N/A\" could not be calculated because at least one of the groups\r\n# of distances was empty, both groups each contained only a single distance, or\r\n# the test could not be performed (e.g. no variance in groups with the same mean).\r\nGroup 1\tGroup 2\tt statistic\tParametric p-value\tParametric p-value (Bonferroni-corrected)\tNonparametric p-value\tNonparametric p-value (Bonferroni-corrected)\r\nfoo\tbar\t-6.6\t0.00354023978206\t0.0106207193462\tToo few iters to compute p-value (num_iters=5)\tToo few iters to compute p-value (num_iters=5)\r\nfoo\tbaz\t-9.79795897113\t0.000304092472232\t0.000912277416695\tToo few iters to compute p-value (num_iters=5)\tToo few iters to compute p-value (num_iters=5)\r\nbar\tbaz\t-3.0\t0.0288344428112\t0.0865033284337\tToo few iters to compute p-value (num_iters=5)\tToo few iters to compute p-value (num_iters=5)\r\n\"\"\"\r\n obs = all_pairs_t_test(self.labels2, self.dists2,\r\n num_permutations=5, tail_type='low')\r\n self.assertEqual(self.remove_nums(obs), self.remove_nums(exp))", "def entropy_permutation_test(ordered_pitch_types, single_pitch_pdf, conditional_joint_probabilities, total_transitions,\n n=1000):\n pitch_types, pitch_probabilities = zip(*single_pitch_pdf.items())\n permutation_entropies = []\n progress = progressbar.ProgressBar()\n\n for test_number in progress(xrange(n)):\n # create the new matrix\n permutation_counts = {}\n for first_pitch_type in ordered_pitch_types:\n permutation_counts[first_pitch_type] = {}\n for second_pitch_type in ordered_pitch_types:\n permutation_counts[first_pitch_type][second_pitch_type] = 0\n\n pitch_permutation = numpy.random.choice(pitch_types, total_transitions, p=pitch_probabilities)\n current_pitch = numpy.random.choice(pitch_types, p=pitch_probabilities)\n for next_pitch in pitch_permutation:\n permutation_counts[current_pitch][next_pitch] += 1\n current_pitch = next_pitch\n\n joint_probabilities, _, _ = joint_probabilities_from_transitions(ordered_pitch_types, permutation_counts)\n permutation_entropies.append(entropy_from_probability_matrix(joint_probabilities))\n\n joint_entropy = entropy_from_probability_matrix(conditional_joint_probabilities)\n # print 'Mean', numpy.mean(permutation_entropies)\n # print 'Standard deviation', numpy.std(permutation_entropies)\n # tdof, tloc, tscale = stats.t.fit(permutation_entropies)\n # print 'DF', tdof, 'Loc (mean)', tloc, 'Scale (SD)', tscale\n # t_score = (joint_entropy - tloc) / tscale\n # print stats.t.cdf(joint_entropy, df=tdof, loc=tloc, scale=tscale)\n\n mean, stddev = stats.norm.fit(permutation_entropies)\n print 'Mean = {mean}\\t StdDev = {stddev}'.format(mean=mean, stddev=stddev)\n z_score = (joint_entropy - mean) / stddev\n p_value = stats.norm.cdf(joint_entropy, mean, stddev)\n print 'The joint entropy has a Z-score of {z_score} which gives a P-value of {p_value}'.format(z_score=z_score,\n p_value=p_value)\n return z_score, p_value", "def prob1(n):\n#raise NotImplementedError(\"Problem 1 Incomplete\")\n if n == 0 :\n raise ValueError(\"Sampling 0 points is not defined.\")\n total = 0\n for i in xrange(n) :\n if np.random.normal() > 3 :\n total += 1\n return float(total)/n", "def test_penalty_compute(self):\n objective = {'G': numpy.zeros((9)),\n 'H': numpy.diag((1,)*9),\n 'X': 1}\n for penalty in self.penalties:\n result=penalty.compute([1]*self.np, objective)\n self.assertEqual(tuple, type(result))\n # more tests go here", "def _basic_probability(count: int, sequence_total_count: int) -> float:\n return float(count) / sequence_total_count", "def probability(self, samples):\n pass", "def test(self):\n return self._test(result_count=1, failure_amount=1)" ]
[ "0.6936601", "0.6664292", "0.66631645", "0.6586792", "0.6584908", "0.6570972", "0.6527964", "0.64336675", "0.62824607", "0.6241327", "0.6203977", "0.6199201", "0.61862606", "0.615248", "0.60994565", "0.60717577", "0.6062275", "0.6058721", "0.60521495", "0.59509856", "0.5940808", "0.59283894", "0.59265584", "0.59183437", "0.5910044", "0.5902426", "0.5882822", "0.5880274", "0.58747643", "0.5865586" ]
0.84885967
0
Allows Users to edit their account info, such as their email and password. Implementation of the AccountSettingsView.
def account_settings(request): assert isinstance(request, HttpRequest) if request.method == "POST": form = UserChangeForm(request.POST, instance=request.user) if form.is_valid(): user = form.save(commit=False) if len(form.cleaned_data["password"]) > 0: user.set_password(form.cleaned_data["password"]) user.save() login(request, user) messages.info(request, "Changes saved.") else: user = request.user form = UserChangeForm(initial={'email': user.email}) context = { 'title': 'User Profile', 'message': 'Edit Account Settings', 'year': datetime.now().year, 'user': request.user, 'form': form, } return render( request, 'app/accountSettings.html', context, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def account_settings(request):\n # Include email and password changing in settings handler so form\n # validation errors appear on settings page.\n old_email = request.user.email\n if request.method == 'POST' and 'change_email' in request.POST:\n email_form = ChangeEmailForm(request.POST, old_email=old_email)\n if email_form.is_valid():\n request.user.email = email_form.cleaned_data['email']\n request.user.save()\n else:\n email_form = ChangeEmailForm({'email': old_email}, old_email=old_email)\n\n context = RequestContext(request, {\n 'email_form': email_form})\n # Python Social Auth sets a `backends` context variable, which includes\n # which social backends are and are not associated with the current user.\n return render(request, 'account-settings.html', context)", "def edit_account(request, id=None):\n account = id and get_object_or_404(Account, pk=id, user=request.user)\n if request.method == 'POST':\n form = EditAccountForm(instance=account, data=request.POST)\n if form.is_valid():\n account = form.save(commit=False)\n account.user = request.user\n account.save()\n return redirect(account)\n else:\n form = EditAccountForm(instance=account)\n return render(request, 'pages/form.html', {\n 'title': \"{} Account\".format(\"Edit\" if account else \"New\"),\n 'breadcrumbs': [account] if account else [],\n 'form': form,\n })", "def edit_account_view(request):\n if request.user.is_authenticated:\n print(request.user.organization.phone)\n form = EditAccountForm(initial={\n #User model\n 'username': request.user,\n # Organization model\n 'name': request.user.organization.name,\n 'phone': request.user.organization.phone,\n 'address_line_1': request.user.organization.address_line_1,\n 'address_line_2': request.user.organization.address_line_2,\n 'zip_code' : request.user.organization.zip_code,\n 'city': request.user.organization.city,\n 'country': request.user.organization.country,\n },\n user=request.user\n )\n\n #If we receive POST data\n context = {\n 'form': form,\n 'submit_button_text': _('Update account details')\n }\n if request.method == 'POST':\n # Create a form instance and populate it with data from the request (binding):\n form = EditAccountForm(request.POST, user=request.user)\n context.update({'form': form})\n # Check if the form is valid:\n if form.is_valid():\n request.user.username = form.cleaned_data['username']\n request.user.email = form.cleaned_data['username']\n request.user.organization.name = form.cleaned_data['name']\n request.user.organization.phone = form.cleaned_data['phone']\n request.user.organization.address_line_1 = form.cleaned_data['address_line_1']\n request.user.organization.address_line_2 = form.cleaned_data['address_line_2']\n request.user.organization.zip_code = form.cleaned_data['zip_code']\n request.user.organization.city = form.cleaned_data['city']\n request.user.organization.country = form.cleaned_data['country']\n\n request.user.save()\n request.user.organization.save()\n messages.success(request, _('Your profile details was updated.'), extra_tags='alert alert-success')\n\n return render(request, 'edit_account_form.html', context)\n #if user not authenticated\n else:\n #this should never occcur\n logger.warning(\"%s %s: %s tried to edit someone else's account\"%(datetime.datetime.now().strftime('[%d/%m/%Y %H:%M:%S]'), 'WARNING: ', request.user))\n messages.error(request, _(\"Can't edit profile when you are not logged in.\"), extra_tags='alert alert-danger')\n return HttpResponseRedirect(reverse('loginc'))", "def edit_account_info(self, short_name=None, author_name=None,\n author_url=None):\n return self._telegraph.method('editAccountInfo', values={\n 'short_name': short_name,\n 'author_name': author_name,\n 'author_url': author_url\n })", "def view_user_edit(self):\n\n logged_in = authenticated_userid(self.request)\n message = ''\n form = Form(self.request, schema=UserEditSchema,\n state=State(request=self.request))\n if form.validate():\n password = self.request.params['password']\n if self.context.validate_password(password):\n if self.request.params['new_password']:\n password = self.request.params['new_password']\n message = 'Successfully saved'\n email = self.request.params['email']\n self.context.edit(password, email)\n else:\n message = msg['password_invalid']\n return {\n 'message': message,\n 'project': '',\n 'username': self.context.username,\n 'logged_in': logged_in,\n 'form': FormRenderer(form),\n 'email': self.context.email\n }", "def cmd_account_change_settings(client, args):\n fields = data_fields(args, client.allowed_account_fields)\n account_settings = client.change_account_settings(args.user, fields)\n generate_output({'account_settings': account_settings})", "def account():\n \n form = UpdateAccountForm()\n \n # perform actions when the form is submitted\n if form.validate_on_submit():\n # checking if the form contains a picture file\n if form.picture.data:\n picture_file = save_picture(form.picture.data)\n current_user.image_file = picture_file\n # changing the current user details with the form data\n current_user.username = form.username.data\n current_user.email = form.email.data\n db.session.commit()\n flash('Your account has been updated!', 'success')\n return redirect(url_for('account'))\n # performs action if the form method is get\n elif request.method == 'GET':\n # setting the form data with the user data from the database\n form.username.data = current_user.username\n form.email.data = current_user.email\n image_file = url_for('static', filename='profile_pics/' + current_user.image_file)\n return render_template('account.html', title='Account',\n image_file=image_file, form=form)", "def change_account_details(main_page):\n\n header = \"What do you want to change?\"\n change_choices =(\n ('Name',change_name),\n ('Surname',change_surname),\n ('Password',change_password),\n ('To exit',log.exit)\n )\n\n change_account = Screen( header, change_choices, main_page.login,\n main_page.password)\n\n change_account.activate()", "def gameaccount_edit(request, account_id=None):\n\n gameaccount = None\n if account_id is not None:\n gameaccount = GameAccount.query.get(account_id)\n if gameaccount is None:\n raise NotFound()\n form = EditGameAccountForm(request.user, gameaccount)\n\n if request.method == 'POST':\n if 'cancel' in request.form:\n return form.redirect('account/gameaccounts')\n elif request.form.get('delete') and gameaccount:\n return redirect_to('account/gameaccounts/delete', account_id=account_id)\n elif form.validate(request.form):\n if gameaccount is None:\n gameaccount = form.make_gameaccount()\n msg = _('The game account %s was registered successfully.')\n icon = 'add'\n else:\n form.save_changes()\n msg = _('The game account %s was updated successfully.')\n icon = 'info'\n account_flash(msg % (escape(gameaccount.account)), icon)\n\n db.commit()\n if 'save_and_continue' in request.form:\n return redirect_to('account/gameaccounts/edit', account_id=gameaccount.id)\n return redirect_to('account/gameaccounts')\n return render_account_response('account/gameaccount_edit.html', 'gameaccounts',\n form=form.as_widget())", "def edit_basic_info(request):\n if request.POST:\n request.user.first_name = request.POST['first_name']\n request.user.last_name = request.POST['last_name']\n request.user.email = request.POST['email']\n request.user.save()\n request.user.userprofile.phone_number = request.POST['phone']\n request.user.userprofile.save()\n messages.add_message(request, messages.SUCCESS, 'Your changes have been saved.')\n return redirect('base_dashboard')\n\n return render(request, 'edit_basic_info.html', {'the_user': request.user})", "def edit_email(request):\n form = EmailForm(instance=request.user.profile)\n if request.method == \"POST\":\n form = EmailForm(data=request.POST, instance=request.user.profile)\n if form.is_valid():\n form.save()\n return redirect('profile')\n return render(request, 'accounts/forms.html', {'form': form})", "def edit_profile(self, name, username, email):\n return self.app.post('/_editProfile', data = dict(\n name = name,\n username = username,\n email = email\n ), follow_redirects = True)", "def user_edit(request):\n DEBUG = False\n\n if not has_permission('editUser', request.context, request):\n #print \"NOT has_permission !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"\n request.message = \"You do not have permissions to edit this user!\"\n raise HTTPForbidden\n\n # if no user_id in URL and not logged in, tell user to login\n\n try:\n user_id = request.matchdict['user_id']\n except KeyError, ke:\n #print ke\n return HTTPFound(location=request.route_url('not_found'))\n\n user = User.get_by_user_id(user_id)\n\n if user is None:\n msg = \"User was not founf in database.\"\n return HTTPFound(location=request.route_url('not_found'))\n\n form = Form(request, schema=UserSettingsSchema, obj=user)\n\n if 'form.submitted' in request.POST and not form.validate():\n # form didn't validate\n request.session.flash('Please check the form below for errors!')\n if DEBUG: # pragma: no cover\n print \"submitted but not validated!\"\n\n if 'form.submitted' in request.POST and form.validate():\n # ready for changing database entries!\n request.session.flash('form validated!')\n if DEBUG: # pragma: no cover\n print \"the form was submitted and validated.\"\n\n if form.data['surname'] != user.surname:\n if DEBUG: # pragma: no cover\n request.session.flash('surname was not same --> changing')\n print \"changing surname\"\n user.surname = form.data['surname']\n if form.data['lastname'] != user.lastname:\n if DEBUG: # pragma: no cover\n request.session.flash('lastname was not same --> changing')\n print \"changing lastname\"\n user.lastname = form.data['lastname']\n if form.data['email'] != user.email:\n request.session.flash('email was not same --> changing')\n user.email = form.data['email']\n if form.data['phone'] != user.phone:\n request.session.flash('phone was not same --> changing')\n user.phone = form.data['phone']\n if form.data['fax'] != user.fax:\n request.session.flash('fax was not same --> changing')\n user.fax = form.data['fax']\n if form.data['street'] != user.street:\n request.session.flash('street was not same --> changing')\n user.street = form.data['street']\n if form.data['number'] != user.number:\n request.session.flash('number was not same --> changing')\n user.number = form.data['number']\n if form.data['city'] != user.city:\n request.session.flash('city was not same --> changing')\n user.city = form.data['city']\n if form.data['postcode'] != user.postcode:\n request.session.flash('postcode was not same --> changing')\n user.postcode = form.data['postcode']\n if form.data['country'] != user.country:\n request.session.flash('country was not same --> changing')\n user.country = form.data['country']\n\n if DEBUG: # pragma: no cover\n print \"returning the form\"\n return {\n 'the_user_id': user_id,\n 'the_username': user.username,\n 'form': FormRenderer(form),\n }", "def edit_profile(request):\n if request.method == 'POST':\n form = EditProfileForm(request.POST, instance=request.user)\n\n if form.is_valid():\n form.save()\n messages.success(request, 'Profile updated successfully.')\n return redirect('profile')\n\n else:\n messages.error(request, 'Invalid entry, please try again.')\n return redirect('edit_profile')\n else:\n form = EditProfileForm(instance=request.user)\n return render(request, 'accounts/edit_profile.html', {'form': form})", "def settings():\n # TODO: How should this be handled? Should a speaker's bio be stored\n # as a snapshot from event to event? It could be stored as part of a\n # talks.models.Presentation.\n from pygotham.forms import ProfileForm\n\n form = ProfileForm(request.form, obj=current_user)\n if form.validate_on_submit():\n form.populate_obj(current_user)\n db.session.commit()\n\n flash('Your profile has been updated.', 'success')\n\n return redirect(url_for('profile.settings'))\n\n return render_template('profile/settings.html', form=form)", "def user_settings(request):\n return redirect('edit_profile')", "def manage():\n if current_user.is_agency:\n form = ManageAgencyUserAccountForm(user=current_user)\n else:\n form = ManageUserAccountForm(user=current_user)\n\n if request.method == \"POST\":\n if form.validate_on_submit():\n update_openrecords_user(form)\n redirect(url_for(\"auth.manage\"))\n else:\n flash(\"Account cannot be updated.\", category=\"danger\")\n return render_template(\"auth/manage_account.html\", form=form)\n else:\n form.autofill()\n\n return render_template(\n \"auth/manage_account.html\", form=form, is_agency=current_user.is_agency\n )", "def account_update(request):\r\n params = request.params\r\n json_body = request.json_body\r\n user_acct = request.user\r\n\r\n if 'name' in params and params['name'] is not None:\r\n name = params.get('name')\r\n user_acct.name = name\r\n\r\n if 'name' in json_body and json_body['name'] is not None:\r\n name = json_body.get('name')\r\n user_acct.name = name\r\n\r\n if 'email' in params and params['email'] is not None:\r\n email = params.get('email')\r\n user_acct.email = email.lower()\r\n\r\n if 'email' in json_body and json_body['email'] is not None:\r\n email = json_body.get('email')\r\n user_acct.email = email.lower()\r\n\r\n return _api_response(request, user_acct.safe_data())", "def profile_edit():\n form = ProfileForm(obj=current_user)\n\n if form.validate_on_submit():\n form.populate_obj(current_user)\n\n try:\n correct = True\n db.session.commit()\n\n flash(_('Profile updated correctly'), 'success')\n\n return render_template('admin/profile/edit.html', form=form)\n\n except IntegrityError:\n # Email already exists\n correct = False\n form.errors.email.append(_('Email is already registered'))\n\n return render_template('admin/profile/edit.html', form=form)\n\n except Exception:\n # Catch anything unknown\n correct = False\n\n flash(_('Failed to update profile, contact an administrator'), 'error')\n\n return render_template('admin/profile/edit.html', form=form)\n\n finally:\n if not correct:\n db.session.rollback()\n\n return render_template('admin/profile/edit.html', form=form)", "def edit_user(request, userid):\n if request.method == 'POST':\n form = UserDetailsForm(request.POST)\n if form.is_valid():\n # we're going to update the first_name, last_name, and email fields of this object\n user = request.user\n user.first_name = form.cleaned_data['first_name']\n user.last_name = form.cleaned_data['last_name']\n user.email = form.cleaned_data['email']\n user.save()\n return HttpResponseRedirect(reverse('view-profile', args=[userid]))\n else:\n form = UserDetailsForm(instance=request.user)\n\n return render(request, 'woofer/show_form.html', {\n 'form' : form,\n 'message' : None,\n 'form_action' : reverse('edit-user', args=[userid]),\n 'title' : \"Edit Account\"\n })", "def account():\n form = UpdateAccountForm()\n new_project_form = ProjectForm()\n\n if form.validate_on_submit():\n\n if form.picture.data: # if statement responsible for change of default picture\n picture_file = save_image(form.picture.data)\n current_user.img_file = picture_file\n\n current_user.user_name = form.user_name.data\n current_user.email = form.email.data\n db.session.commit()\n flash(\"Changes saved\", \"success\")\n\n return redirect(url_for('users.account'))\n\n elif request.method == \"GET\":\n form.user_name.data = current_user.user_name\n form.email.data = current_user.email\n\n img_file = url_for('static', filename='images/' + current_user.img_file)\n\n return render_template('account.html',\n title=\"Account\",\n form=form,\n img_file=img_file,\n new_project_form=new_project_form)", "def manage_edit_save(self, REQUEST):\n self._config.update(ldap_config.read_form(REQUEST.form, edit=True))\n REQUEST.RESPONSE.redirect(self.absolute_url() + '/manage_edit')", "def edit_profile(request):\n profile_to_edit = get_object_or_404(UserProfile, user=request.user)\n if request.method == \"POST\":\n form = UserProfileForm(request.POST, instance=profile_to_edit)\n if form.is_valid:\n form.save()\n messages.success(request, \"Profile updated succesfully\")\n return redirect('profile')\n else:\n messages.error(request, \"Updated failed. \\\n Please ensure the form is valid\")\n else:\n profile_form = UserProfileForm(instance=profile_to_edit)\n template = 'profiles/edit_profile.html'\n context = {\n 'form': profile_form,\n }\n return render(request, template, context)", "def update_user(self):\n self.client.force_authenticate(user=self.user)\n self.response = self.client.patch(\n reverse(\n 'edit_account',kwargs={ 'pk': self.user.id}),\n self.updated_data, format='json'\n )\n self.user = CustomUser.objects.get(username=self.user.username)", "def profile(request, info=\"\", error_msg=\"\", messages=\"\"):\r\n try:\r\n user = _validate_and_get_geniuser(request)\r\n except LoggedInButFailedGetGeniUserError:\r\n return _show_failed_get_geniuser_page(request)\r\n\r\n email_form = forms.gen_edit_user_form(instance=user)\r\n affiliation_form = forms.gen_edit_user_form(instance=user)\r\n password_form = forms.EditUserPasswordForm()\r\n\r\n if request.method == 'POST':\r\n if 'affiliation' in request.POST:\r\n affiliation_form = forms.gen_edit_user_form(('affiliation',), request.POST, instance=user)\r\n if affiliation_form.is_valid():\r\n new_affiliation = affiliation_form.cleaned_data['affiliation']\r\n interface.change_user_affiliation(user, new_affiliation)\r\n info =\"Affiliation has been successfully changed to %s.\" % (user.affiliation)\r\n elif 'email' in request.POST:\r\n email_form = forms.gen_edit_user_form(('email',), request.POST, instance=user)\r\n if email_form.is_valid():\r\n new_email = email_form.cleaned_data['email']\r\n interface.change_user_email(user, new_email)\r\n info =\"Email has been successfully changed to %s.\" % (user.email)\r\n elif 'password1' in request.POST:\r\n password_form = forms.EditUserPasswordForm( request.POST, instance=user)\r\n if password_form.is_valid():\r\n new_password = password_form.cleaned_data['password1']\r\n interface.change_user_password(user, new_password)\r\n info =\"Password has been successfully changed\"\r\n\r\n username = user.username\r\n affiliation = user.affiliation\r\n email = user.email\r\n port = user.usable_vessel_port\r\n has_privkey = user.user_privkey != None\r\n #currently not used, needed if editing user port is allowed\r\n #port_range = interface.get_useable_ports()\r\n #port_range_min = port_range[0]\r\n #port_range_max = port_range[-1]\r\n\r\n return render_to_response('control/profile.html',\r\n {'email_form' : email_form,\r\n 'affiliation_form' : affiliation_form,\r\n 'password_form' : password_form,\r\n 'username' : username,\r\n 'affiliation' : affiliation,\r\n 'email' : email,\r\n 'port' : port,\r\n 'api_key' : user.api_key,\r\n 'has_privkey' : has_privkey,\r\n #'port_range_min' : port_range_min,\r\n #'port_range_max' : port_range_max,\r\n 'info' : info,\r\n 'error_msg' : error_msg,\r\n 'messages' : messages},\r\n context_instance=RequestContext(request))", "def setting_profile(request):\n page = \"profile\"\n if request.method == \"POST\":\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n user = form.save()\n update_session_auth_hash(request, user) # Important!\n messages.success(request, '密碼更改成功!')\n return redirect('ProfileSetting')\n else:\n messages.error(request, '密碼更改失敗,請填寫正確資料。')\n else:\n form = PasswordChangeForm(request.user)\n return render(request, 'restset/profileSet.html', {'page': page, 'form': form})", "def cmd_account_settings(client, args):\n account_settings = client.get_account_settings(args.username)\n data = account_settings.__dict__\n generate_output({'account_settings': data})", "def edit_profile():\n form = EditProfileForm()\n if request.method == 'GET':\n form.first_name.data = current_user.first_name\n form.first_name.data = current_user.first_name\n form.last_name.data = current_user.last_name\n form.email.data = current_user.email\n form.address_1.data = current_user.address_1\n form.address_2.data = current_user.address_2\n form.city.data = current_user.city\n form.state.data = current_user.state\n form.zipcode.data = current_user.zipcode\n form.telephone.data = current_user.telephone\n if form.validate_on_submit():\n form.last_name.data = form.last_name.data\n current_user.first_name = form.first_name.data\n current_user.last_name = form.last_name.data\n current_user.email = form.email.data\n current_user.address_1 = form.address_1.data\n current_user.address_2 = form.address_2.data\n current_user.city = form.city.data\n current_user.state = form.state.data\n current_user.zipcode = form.zipcode.data\n current_user.telephone = form.telephone.data\n db.session.commit()\n flash(('Your changes have been saved.'))\n\n return redirect(url_for('edit_profile'))\n\n return render_template('edit_profile.html', title=('Edit Profile'),\n form=form)", "def user_account_settings(self) -> pulumi.Input['UserAccountSettingsArgs']:\n return pulumi.get(self, \"user_account_settings\")", "def update(self, account):\n model = models.load('Account', account)\n return self.client.update_account(model=model)" ]
[ "0.7434128", "0.70122176", "0.65583986", "0.636276", "0.6248716", "0.61849844", "0.6114714", "0.6070023", "0.60032195", "0.6001212", "0.58814716", "0.58777857", "0.5845628", "0.5818513", "0.5755409", "0.5748084", "0.5716239", "0.5675303", "0.5671848", "0.5655881", "0.5651442", "0.56513137", "0.5649804", "0.5649049", "0.5633479", "0.5633389", "0.56041497", "0.5593269", "0.5585969", "0.5575906" ]
0.76723003
0
renders the trending page
def trending(request): assert isinstance(request, HttpRequest) try: stocks = StockList.objects.all() hold = [] count = 0 except StockList.DoesNotExist: return print("No Stocks Available") while len(hold) < 8: for stock in stocks: stock.trend = stock.positiveSentimentCount + stock.negativeSentimentCount if stock.trend>= count: hold.append(stock) count = stock.trend context = { 'title': 'Trending', 'year': datetime.now().year, 'user': request.user, 'stocks': stocks, 'hold': hold, } return render( request, 'app/trending.html', context, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trending(request):\n\titems = Item.objects.all()\n\ttrending = []\n\n\tfor item in Item.objects.order_by('-dailyVisits'):\n\t\t#Include items that have been uploaded within the past day and havent been sold\n\t\tif (date.today() - item.datePosted).days <= 0 and item.sold_to == None:\n\t\t\tif (len(trending) <= 5):\n\t\t\t\ttrending.append(item)\n\t\telse:\n\t\t\titem.dailyVisits = 0\n\t\t\titem.save()\n\n\t#If there are not enough items in the trending list, add older items to the list\n\tif len(trending) <= 5:\n\t\tfor item in Item.objects.order_by('-dailyVisits'):\n\t\t\tif ((len(trending) <= 5) and (item.sold_to == None) and (item not in trending)):\n\t\t\t\ttrending.append(item)\n\n\tcontext_dict = {\"trendingItems\": trending[0:3], \"search_bar\" :Search_bar()}\n\treturn render(request, 'tailored/index.html', context_dict)", "def page_dashboard(state):\n\n st.title(\":chart_with_upwards_trend: Prediction Results Dashboard\")\n\n st.markdown(\"# Select Stocks to View Results:\")\n if state.finalized_data:\n for stock_data in state.finalized_data:\n st.write(\"---\")\n st.markdown(\"## \" + stock_data[\"stock\"])\n if st.checkbox(\"View Results for \" + stock_data[\"stock\"]):\n\n ############################################\n\n st.markdown(\"### Historical Predictions:\")\n\n df2 = pd.DataFrame.from_dict(stock_data[\"prev_predictions\"])\n\n select_lbl = (\n \"Enter the names of models for \" + stock_data[\"stock\"] + \":\"\n )\n models_selections = st.multiselect(\n label=select_lbl,\n options=df2.columns,\n ) # allow users to display specific model results on dataframe graph\n\n if not models_selections: # if nothing is selected show all models!\n st.line_chart(df2)\n else:\n st.line_chart(df2[models_selections])\n\n st.markdown(\n \"*Note:* 'Prices' are the actual prices for those days. The rest are model predictions for those days.\\nPrices (in USD) are on the y-axis, the day number in the data is on the x-axis.\"\n )\n\n ############################################\n\n st.markdown(\"### Future (Next-Day) Predictions:\")\n\n df = pd.DataFrame()\n df = df.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"swing_predictions\"]]\n )\n )\n df = df.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"next_day_predictions\"]]\n )\n )\n df = df.append(\n pd.DataFrame([stock_data[\"prediction_results\"][\"model_scores\"]])\n )\n\n df.index = [\n \"Swing Predicton\",\n \"Price Prediction ($)\",\n \"Model Fit Score\",\n ]\n df = df.transpose()\n df # display chart\n\n st.markdown(\n \"- The current price of the stock is *$\"\n + str(\n round(stock_data[\"prediction_results\"][\"current_prev_close\"], 2)\n )\n + \"*.\"\n )\n\n if state.period == \"1mo\":\n st.markdown(\"- *Recommended Model (for 1mo):* SVR-RBF\")\n st.markdown(\n \"- *View the homescreen for more model & dataset size combination recommendations.*\"\n )\n elif state.period == \"6mo\":\n st.markdown(\n \"- *Recommended Model (for 6mo):* SVR-Poly (most recommended), LR, EN, or Lasso.\"\n )\n st.markdown(\n \"- *View the homescreen for more model & dataset size combination recommendations.*\"\n )\n elif state.period == \"1y\":\n st.markdown(\"- *Recommended Model (for 1yr):* SVR-Poly\")\n st.markdown(\n \"- *View the homescreen for more model & dataset size combination recommendations.*\"\n )\n else:\n st.markdown(\n \"- *Note:* View the home screen for information about the best models and training data size combinations.\"\n )\n\n ############################################\n st.markdown(\"### View Other Information:\")\n\n if st.checkbox(\n \"View \" + stock_data[\"stock\"] + \"'s Model Efficiency Timings\"\n ):\n st.markdown(\"#### Model Efficiencies:\")\n st.markdown(\n \"Shows the time in seconds it took models to complete specific tasks:\"\n )\n df3 = pd.DataFrame()\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"training_times\"]]\n )\n )\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"testing_times\"]]\n )\n )\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"new_predictions_times\"]]\n )\n )\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"prev_predictions_times\"]]\n )\n )\n df3.index = [\n \"Training\",\n \"Testing/Scoring\",\n \"Future Predictions\",\n \"Historical Predictions\",\n ]\n df3 = df3.transpose()\n df3\n\n ############################################\n\n if st.checkbox(\"View \" + stock_data[\"stock\"] + \"'s Information\"):\n st.markdown(\"#### Company Information:\")\n for key in stock_data[\"stock_info\"].keys():\n st.write(\"*\", key + \":\", stock_data[\"stock_info\"][key])\n else:\n st.markdown(\n \"## Generate data to populate and initialize this page by going to the 'Settings' page and running the tool!\"\n )", "def graphs():\n return render_template(\"graphs.html\")", "def draw_html():\n station_reference = request.args.get(\"stationReference\")\n station_name = request.args.get(\"stationName\")\n station_name = station_name.replace(\" \",\"+\")\n # print(\"**********\")\n # print(station_name)\n # print(\"**********\")\n # print(\"**********\")\n # print(station_data.stationName)\n # print(\"**********\") \n if station_name:\n station = station_data.loc[station_data.stationName == station_name]\n else:\n station = station_data.loc[station_data.stationReference == station_reference]\n # print(station)\n # print(\"**********\")\n # print(station)\n # print(\"**********\")\n result_station = station.iloc[0]\n # print(\"**********\")\n # print(result_station)\n # print(\"**********\")\n time_from = request.args.get(\"from\")\n time_to = request.args.get(\"to\")\n if time_from:\n pass\n else:\n time_from = None\n if time_to:\n pass\n else:\n time_to = None\n\n statistic = request.args.get(\"statistic\")\n if statistic is not None:\n statistic = statistic.split(',')\n\n if result_station is not None:\n tide_values = data.station_tides(result_station.stationName, time_from, time_to).reset_index()\n tide_values.rename(columns={result_station.stationName: 'tideValue'}, inplace=True)\n return tide_values.to_html(index=False)\n\n if statistic is not None:\n # print(\"**********\")\n # print(statistic)\n # print(\"**********\")\n frames = []\n for statistic_method in statistic:\n if statistic_method == 'max':\n frames.append(data.max_tides(time_from, time_to))\n elif statistic_method == 'min':\n frames.append(data.min_tides(time_from, time_to))\n else:\n frames.append(data.mean_tides(time_from, time_to))\n # print(\"**********\")\n # print(result)\n # print(\"**********\")\n\n result = pd.concat(frames, axis=1, keys=statistic).reset_index()\n return result.to_html(index=False)", "def performance_analysis(request):\n context = {}\n\n return render(request, 'classroom_main/performance_analysis.html', context)", "def index():\n return render_template(\"charts.html\")", "def graphing1():\n return render_template('graph1.html')", "def chart(request):\n assert isinstance(request, HttpRequest)\n filename = 'ppg_RawDataSheet13.mat'\n subtitle = 'VerityDB/' + filename\n return render(\n request,\n 'research/chart.html',\n {\n 'title':'Chart',\n 'message':'Highcharts Based',\n 'year':datetime.now().year,\n #'data': content['val'][0:11]\n 'temp': models.load_data(),\n 'test': models.load_data_filename(filename),\n 'subtitle_text': subtitle,\n }\n )", "def analyze(request, *args, **kwargs):\n\n mode = 'lines+markers'\n\n tickers = Stock.objects.distinct(\n 'ticker').values_list('ticker', flat=True)\n tickers_dict = {ticker: [] for ticker in tickers}\n tickers_count = tickers.count()\n\n actual_dates = Stock.objects.values('date').annotate(\n dcount=Count('date')).filter(dcount=tickers_count).values_list(\n 'date', flat=True).order_by('date')\n date_list = list(actual_dates)\n\n data = Stock.objects.filter(date__in=actual_dates).order_by('date')\n\n for item in data.values('ticker', 'close', 'oopen'):\n tickers_dict[item['ticker']].append(\n round((item['close']-item['oopen'])*100/item['oopen'], 2)\n )\n\n scatters = [Scatter(x=date_list, y=tickers_dict[obj], mode=mode, name=obj,\n opacity=0.8, visible='legendonly') for obj in tickers_dict]\n figure = {'data': scatters, 'layout': {\n 'title': {\n 'text': 'Open-Closed comparision', 'y': 0.9, 'x': 0.5,\n 'xanchor': 'center','yanchor': 'top'},\n 'yaxis_title': \"Daily percent\",\n 'xaxis_title': \"Years\",\n }}\n\n return render(request, \"analyze.html\", context={\n 'plot_div': plot(figure, output_type='div')})", "def graphing2():\n return render_template('graph2.html')", "def home():\n return render_template(\"d3_graph.html\")", "def graphs_kelly():\n return render_template(\"graphs-Kelly.html\")", "def schedule(request):\n return render(request, 'vaxcharts/schedule.html')", "def chart1(request):\n\n full_url = HttpRequest.build_absolute_uri(request)\n relative = HttpRequest.get_full_path(request)\n\n base_url = full_url[:-len(relative)]\n\n request_amount = ['10', '100', '200', '500', '1000']\n\n json_urls = list()\n xml_urls = list()\n\n for x in request_amount:\n json_urls.append(reverse('objects:leads_json', args=[x]))\n xml_urls.append(reverse('objects:leads_xml', args=[x]))\n\n json_data = list()\n xml_data = list()\n\n for x in json_urls:\n json_average=0\n for i in range (0,5):\n start = time.perf_counter()\n req = requests.get(base_url + x)\n end = time.perf_counter()\n json_average += (end-start)\n json_data.append((json_average)/5)\n\n for x in xml_urls:\n xml_average=0\n for i in range(0,5):\n start = time.perf_counter()\n req = requests.get(base_url + x)\n end = time.perf_counter()\n xml_average+=(end-start)\n xml_data.append((xml_average)/5)\n\n final_data = {\n 'labels': request_amount,\n 'datasets': [\n {\n 'label': 'JSON',\n 'backgroundColor': 'rgba(255, 99, 132, 0.2)',\n 'borderColor': 'rgba(255,99,132,1)',\n 'data': json_data,\n 'borderWidth': 2,\n 'yAxisID': 'first-y-axis'\n },\n {\n 'label': 'XML',\n 'backgroundColor': 'rgba(54, 162, 235, 0.2)',\n 'borderColor': 'rgba(54, 162, 235, 1)',\n 'data': xml_data,\n 'borderWidth': 2,\n 'yAxisID': 'first-y-axis'\n }\n ]\n }\n\n return JsonResponse(final_data)", "def period_chart(self,req,period,start,end,prior):\n # fetch the raw data\n period,start,end,prior=self.get_chart_period(req)\n todate='' if prior else 'to date'\n year=int(str(period)[:4])\n now=int(DATE())\n if req.alltime and (period in (now//10000,now//100)):\n req.title=f\"{req.alltime} {req._pl_chartkind}\"\n elif period>9999:\n date=DATE(period*100+1)\n req.title=f\"{req.alltime} {req._pl_chartkind} for {date.datetime.strftime('%B')} {year} {todate}\"\n else:\n req.title=f\"{req.alltime} {req._pl_chartkind} for {year} {todate}\"\n raw=self.list(asObjects=False,sql=self.sql)\n # process the raw data, so it is ready for the template\n req.data=[]\n for i in raw:\n try:\n ob=self.get(i[\"page\"])\n ob.plays=i[\"sum(times)\"] # monthly score is stored temporarily as self.plays\n req.data.append(ob)\n # is this the currently playing/paused track?\n if self.player.list and (ob.uid == self.transport.uid):\n req._pl_index=ob.uid # the display will use this to hilite the track \n except: # we have a deleted item - ignore it\n pass\n# for i in req.data:\n# print(i.uid, i.name, i.times)\n # set more constants for the template to use\n req.period=period\n req._pl_prevperiod=self.prevperiod(period)\n if prior:\n req._pl_nextperiod=self.nextperiod(period) \n req._pl_len=len(req.data)\n req._pl_start=0\n # and return the template\n return self.charts(req)", "def main_page(self):\n return render_template(\"index.html\", traders_count=len(self.market.traders),\n current_iteration=self.market.current_iteration, traders=self.market.traders,\n buy_orders=self.market.buy_orders, sell_orders=self.market.sell_orders,\n current_stock_price=self.market.stock.price_history[-1])", "def index():\n \n currentDateTime = current_datetime()\n fromDateTime = calc_day(currentDateTime, -3)\n\n # Adjust if any graphs should be shown in index page\n # Temperatur=XML(render_graph(3, 5, fromDateTime, currentDateTime, show_dots=False))\n # Procent_smoke=XML(render_graph(3, 6, fromDateTime, currentDateTime, show_dots=False))\n # Kitchen_Stove=XML(render_graph(2, 3, fromDateTime, currentDateTime, show_dots=False))\n # Humid=XML(render_graph(3, 4, fromDateTime, currentDateTime, show_dots=False))\n # Brightness=XML(render_graph(3, 7, fromDateTime, currentDateTime, show_dots=False))\n # Hall_motions=XML(render_graph(1, 1, fromDateTime, currentDateTime, show_dots=False, hits=True))\n # Hall_door=XML(render_graph(1, 2, fromDateTime, currentDateTime, show_dots=False, on_off=['Open', 'Close']))\n\n # return dict(test=locals())\n # return dict(test=device_monitoring)\n return dict()", "def statistics():\n return render_template('statistics.html'), 200", "def index():\n # call the datetime function\n datetime_today = datetime.datetime.now(central)\n datestring_today = datetime_today.strftime(\"%m-%d-%Y\")\n return render_score_page(\"index.html\", datestring_today, \"bballfast.com\")", "def hchart(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'research/bar.html',\n {\n 'title':'Chart',\n 'message':'Highcharts Based',\n 'year':datetime.now().year,\n 'hchart_url':'bar',\n }\n )", "def stats(request):\n \n return render(request, 'stats.html')", "def index():\n graphs = [\n message_genre_bar_chart(df),\n category_bar_chart(df),\n top_words_bar_chart(df)\n ]\n \n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n \n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)", "def index(request):\n return render(request, 'vaxcharts/home.html')", "def plot(self, context=None):\n\n response = requests.get(self.url).content\n table = pd.read_html(response, attrs={\"id\": \"main_table_countries_today\"})\n df = table[0].fillna(0)\n # df.drop(df.index[0], inplace=True) # World\n df.drop([\"ActiveCases\", 'Serious,Critical', 'Serious,Critical', 'Deaths/1M pop', 'Tests/ 1M pop'], axis=1, inplace=True)\n df.drop(df.columns[6], axis=1, inplace=True)\n\n if len(context) > 3:\n context = context.lower().capitalize()\n df = df.loc[df[\"Country,Other\"] == context]\n if 4 > len(context) > 1:\n context = context.upper()\n df = df.loc[df[\"Country,Other\"] == context]\n if len(context) <= 1:\n df = df[1:]\n\n C_Names = df[\"Country,Other\"].head(n=10).values.tolist()\n T_Cases = df[\"TotalCases\"].head(n=10).values.tolist()\n # N_Cases = df[\"NewCases\"].head(n=10).values.tolist() # not plotted\n T_Deaths = df[\"TotalDeaths\"].head(n=10).values.tolist()\n # N_Deaths = df[\"NewDeaths\"].head(n=10).values.tolist() # not plotted\n T_Recovered = df[\"TotalRecovered\"].head(n=10).values.tolist()\n T_Tests = df[\"TotalTests\"].head(n=10).values.tolist()\n\n x = np.arange(len(C_Names))\n width = 0.20\n\n fig, ax = plt.subplots()\n\n ax.bar(x - 0.30, T_Cases, width, label='TotalCases', color=\"Blue\")\n ax.bar(x - 0.10, T_Deaths, width, label='TotalDeaths', color=\"Red\")\n ax.bar(x + 0.10, T_Tests, width, label='TotalTests', color=\"Green\")\n ax.bar(x + 0.30, T_Recovered, width, label='TotalRecovered', color=\"Orange\")\n\n if len(context) > 1:\n ax.set_title(\"{}'s Situation\".format(context))\n else:\n ax.set_title(\"World's Top10 Situation\")\n\n ax.set_xticks(x)\n ax.set_xticklabels(C_Names)\n ax.legend()\n plt.ticklabel_format(style='plain', axis=\"y\")\n fig.set_size_inches(18.5, 10.5)\n fig.tight_layout()\n plt.grid()\n\n if len(context) > 1:\n font1 = {'family': 'serif',\n 'color': 'blue',\n 'weight': 'bold',\n 'size': 20}\n font2 = {'family': 'serif',\n 'color': 'red',\n 'weight': 'normal',\n 'size': 20}\n font3 = {'family': 'serif',\n 'color': 'green',\n 'weight': 'normal',\n 'size': 20}\n font4 = {'family': 'serif',\n 'color': 'orange',\n 'weight': 'normal',\n 'size': 20}\n\n # bbox=dict(facecolor='black', alpha=0.5)\n plt.text(0.863, 0.67, \"Total Cases:\\n{:,}\".format(int(T_Cases[0])), fontdict=font1, transform=ax.transAxes)\n plt.text(0.863, 0.57, \"Total Deaths:\\n{:,}\".format(int(T_Deaths[0])), fontdict=font2, transform=ax.transAxes)\n plt.text(0.863, 0.47, \"Total Tests:\\n{:,}\".format(int(T_Tests[0])), fontdict=font3, transform=ax.transAxes)\n plt.text(0.863, 0.37, \"Total Recovered:\\n{:,}\".format(int(T_Recovered[0])), fontdict=font4, transform=ax.transAxes)\n\n # plt.savefig('corona.png') # Uncomment it to save the figure\n plt.show()", "def regression_page():\n return render_template(\"regr-matmortality.html\")", "def __render(self):\r\n scores = self.__score_repository.get_high_scores()\r\n self.__renderer.draw(\"high_scores\", scores=scores)", "def show_visualizations(self, number = -1):\n instance = self.instance\n if number > instance.view_num:\n print(\"In function show_visualizations: Error, input number greater than the view numbers.\")\n return Page()\n if self.rank_method == methods_of_ranking[3]: # diversified_ranking\n G = myGraph(instance.view_num)\n for i in range(instance.view_num):\n view = instance.tables[instance.views[i].table_pos].views[instance.views[i].view_pos]\n G.addNode(view)\n G.getSim()\n result = G.getTopK(instance.view_num)\n if number != -1:\n begin = number - 1\n end = number\n else:\n begin = 0\n end = instance.view_num\n page = Page()\n for order in range(begin, end):\n if self.rank_method == methods_of_ranking[3]: # diversified_ranking\n view = G.nodes[result[order]]\n else:\n view = instance.tables[instance.views[order].table_pos].views[instance.views[order].view_pos]\n data = {}\n data['order'] = order\n data['chartname'] = instance.table_name\n data['describe'] = view.table.describe\n data['x_name'] = view.fx.name\n data['y_name'] = view.fy.name\n data['chart'] = Chart.chart[view.chart]\n data['classify'] = [v[0] for v in view.table.classes]\n data['x_data'] = view.X\n data['y_data'] = view.Y\n data['title_top'] = 5\n \n # 以下代码与html_handle相似\n margin = str(data['title_top']) + '%'\n \n if data['chart'] == 'bar':\n chart = (Bar().set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_global_opts(title_opts=opts.TitleOpts(title=data['chartname'], subtitle=data['describe'], pos_left='center', pos_top=margin),\n xaxis_opts=opts.AxisOpts(name=data['x_name']),\n yaxis_opts=opts.AxisOpts(name=data['y_name'], splitline_opts=opts.SplitLineOpts(is_show=True))))\n elif data['chart'] == 'pie': \n chart = (Pie().set_global_opts(title_opts=opts.TitleOpts(title=data['chartname'], subtitle=data['describe'], pos_left='center', pos_top=margin)))\n elif data['chart'] == 'line': \n chart = (Line().set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_global_opts(title_opts=opts.TitleOpts(title=data['chartname'], subtitle=data['describe'], pos_left='center', pos_top=margin),\n xaxis_opts=opts.AxisOpts(name=data['x_name']),\n yaxis_opts=opts.AxisOpts(name=data['y_name'], splitline_opts=opts.SplitLineOpts(is_show=True))))\n elif data['chart']== 'scatter': \n chart = (Scatter().set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_global_opts(title_opts=opts.TitleOpts(title=data['chartname'], subtitle=data['describe'], pos_left='center', pos_top=margin),\n xaxis_opts=opts.AxisOpts(type_='value', name=data['x_name'], splitline_opts=opts.SplitLineOpts(is_show=True)),\n yaxis_opts=opts.AxisOpts(type_='value', name=data['y_name'], splitline_opts=opts.SplitLineOpts(is_show=True))))\n else :\n print (\"not valid chart\")\n \n if not data[\"classify\"] :\n attr = data[\"x_data\"][0]\n val = data[\"y_data\"][0]\n if data['chart'] == 'bar': \n chart.add_xaxis(attr).add_yaxis(\"\", val, label_opts=opts.LabelOpts(is_show=False))\n elif data['chart'] == 'line': \n chart.add_xaxis(attr).add_yaxis(\"\", val, label_opts=opts.LabelOpts(is_show=False))\n elif data['chart'] == 'pie': \n chart.add(\"\", [list(z) for z in zip(attr, val)])\n elif data['chart'] == 'scatter': \n if isinstance(attr[0], str):\n attr = [x for x in attr if x != '']\n attr = list(map(float, attr))\n if isinstance(val[0], str):\n val = [x for x in val if x != '']\n val = list(map(float, val))\n chart.add_xaxis(attr).add_yaxis(\"\", val, label_opts=opts.LabelOpts(is_show=False))\n page.add(chart)\n else :\n attr = data[\"x_data\"][0]\n for i in range(len(data[\"classify\"])) :\n val = data[\"y_data\"][i]\n name = (data[\"classify\"][i][0] if type(data[\"classify\"][i]) == type(('a','b')) else data[\"classify\"][i])\n if i == 0:\n if data['chart'] != 'pie' and data['chart'] != 'scatter':\n chart.add_xaxis(attr)\n if data['chart'] == 'bar': \n chart.add_yaxis(name, val, stack=\"stack1\", label_opts=opts.LabelOpts(is_show=False))\n elif data['chart'] == 'line': \n chart.add_yaxis(name, val, label_opts=opts.LabelOpts(is_show=False))\n elif data['chart'] == 'pie': \n chart.add(\"\", [list(z) for z in zip(attr, val)])\n elif data['chart'] == 'scatter': \n attr_scatter = data[\"x_data\"][i]\n if isinstance(attr_scatter[0], str):\n attr_scatter = [x for x in attr_scatter if x != '']\n attr_scatter = list(map(float, attr_scatter))\n if isinstance(val[0], str):\n val = [x for x in val if x != '']\n val = list(map(float, val))\n chart.add_xaxis(attr_scatter).add_yaxis(name, val, label_opts=opts.LabelOpts(is_show=False))\n page.add(chart)\n return page", "def get(self):\n\n # Retrieve keyword from the HTML form. If no keyword provided, use a random suggested keyword.\n keyword = self.request.get(\"keyword\")\n if not keyword:\n suggested_keywords = [\"alarm clocks\", \"the future\", \"miller lite\", \"taco bell\", \"yoga\", \"netflix\",\n \"life\", \"traffic\", \"elon musk\", \"beards\", \"world trade\", \"pepsi\", \"amazon\"]\n indices = np.arange(len(suggested_keywords))\n random.shuffle(indices)\n keyword = suggested_keywords[indices[0]]\n\n # Get recent tweets based on the keyword, up to 300 maximum tweets.\n tweets = get_tweets(keyword, max_tweets=300)\n\n # Compute the sentiment of each tweet.\n v = VaderSentimentModel()\n sentiment_scores = [v.classify_sentiment(tw) for tw in tweets] # shape (ntweets,)\n\n # Label sentiment categorically, e.g. \"negative\" or \"positive\"\n M_sent = np.mean(sentiment_scores)\n map = {1 : \"positive\", 0 : \"negative\"}\n valence = map[int(M_sent > 0)]\n\n \"\"\"\n Create plots. \n \"\"\"\n\n #############\n # Plot #1:\n ############\n # Plot the distribution of tweets and sentiment.\n # Resources is CSS code that goes in the header of the HTML. Shared across all bokeh plots.\n # Script1 is javascript for this plot.\n # Div1 is an HTML container for the plot. Goes where you want the plot to appear.\n resources, script1, div1 = plot_tweets(tweets=tweets, sentiment_scores=sentiment_scores)\n\n #############\n # Plot #2:\n ############\n # Plot the key words that lead us to this classification.\n # Script2 is javascript for this plot.\n # Div2 is an HTML container for this plot. Goes where you want the plot to appear.\n # Requires the HTML to include the shared resources, generated above, in the <HEAD>\n script2, div2 = plot_reason(tweets=tweets, sentiment_scores=sentiment_scores)\n\n \"\"\"\n Create HTML output. \n \"\"\"\n\n # Load HTML template.\n # This is a functioning webpage, with some placeholders for the keywords and plots we have created.\n html_p = os.path.join(\"html\", \"index.html\")\n html = open(html_p, \"r\").read()\n\n # Fill in placeholders in the HTML with varibles we have created.\n term_to_value = {\n \"[[!KEYWORD]]\" : keyword,\n \"[[!VALENCE]]\" : valence,\n \"[[!BOKEH_SCRIPT]]\" : script1,\n \"[[!BOKEH_SCRIPT2]]\": script2,\n \"[[!BOKEH_DIV]]\" : div1,\n \"[[!BOKEH_RESOURCES]]\" : resources,\n \"[[!BOKEH_DIV2]]\" : div2\n }\n for term, val in term_to_value.items():\n html = html.replace(term, val)\n\n \"\"\"\n Write a response.\n This essentially returns HTML to the google app engine.\n This will render a webpage visible to the user. \n \"\"\"\n self.response.headers[\"Content-Type\"] = \"text/html\"\n self.response.write(html)", "def home(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'research/index.html',\n {\n 'title':'Health Infinity',\n 'info' :'Medical BigData Platform',\n 'year' : datetime.now().year,\n 'temp': models.load_data(),\n 'test': models.hchart_str(),\n }\n )", "def datapage(datatype,ticker):\n\n\tinterval = \"60min\"\t\t# this is currently hardcoded but should be a user choice\n\tdata_key = \"Time Series (\" + str(interval) + \")\"\n\tdataset = get_response(datatype, ticker, interval).json()\n\n\tif \"Error Message\" in dataset:\n\t\treturn redirect(url_for('daytrader'))\n\t\n\tkeys = dataset[data_key].keys()\n\topen_vals = []\n\tdate_times = []\n\tfor key in keys:\n\t\tdate_times.append(key)\n\t\topen_val = float(dataset[data_key][key]['1. open'])\n\t\topen_vals.append(open_val)\n\n\tfor item in dataset:\n\t\tjson_type = str(item)\n\n\t# create stock object\n\tstockObj = stockObject(ticker, dataset)\n\t\n\tdatas = zip(reversed(date_times), reversed(open_vals))\n\n\n\tif request.method == \"POST\":\n\t\tif \"goBack\" in request.form:\n\t\t\treturn redirect(url_for('daytrader'))\n\n\treturn render_template('datapage.html',dataset=dataset,datas=datas,ticker=ticker,json_type=json_type)" ]
[ "0.68559766", "0.6295515", "0.6277298", "0.6245203", "0.622359", "0.61615133", "0.61368996", "0.6067943", "0.60238075", "0.59976786", "0.59772927", "0.5917164", "0.5844733", "0.5836843", "0.5825049", "0.5824816", "0.58095306", "0.5795138", "0.5763063", "0.5749309", "0.5745847", "0.5741387", "0.5734679", "0.5667474", "0.5632897", "0.5619144", "0.5617716", "0.5614807", "0.55585", "0.55550224" ]
0.6601207
1
Displays all stocks. User is able to filter stocks, using buttons provided to the user, by Price, Sentiment, and StockName. All of these filters can be toggled to be in either ascending or descending order. Direct implementation of the FilterStockView.
def stocks(request): try: stocks = StockList.objects.all() except StockList.DoesNotExist: stocks = None context = { 'title': 'Filter Stocks', 'year': datetime.now().year, 'user': request.user, 'stocks': stocks, } return render( request, 'app/stocksview.html', context, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list(self, request):\n\n stocks = Stock.objects.all()\n serializer = StockSerializer(stocks, many=True)\n return Response(serializer.data, status.HTTP_200_OK)", "def retrieveAllDistinctStocks(self):\n return self.db.select_all_distinct_stocks()", "def history():\n \"\"\"Show portfolio of stocks\"\"\"\n all_rows = []\n rows = db.execute(\"SELECT * FROM history WHERE id = :id\",\n id=session['user_id'])\n if rows==None or len(rows) < 1:\n return render_template(\"history.html\", all_rows=all_rows)\n else:\n for row in rows:\n share_row = []\n share_row.append(row[\"symbol\"])\n share_row.append(row[\"shares\"])\n share_row.append(usd(row[\"price\"]))\n share_row.append(row[\"transacted\"])\n all_rows.append(share_row)\n return render_template(\"history.html\", all_rows=all_rows)", "def stock(request, stock_id):\n stock= Stock.objects.get(id=stock_id)\n entries= stock.entry_set.order_by('-date_added')\n context= {'stock': stock, 'entries': entries}\n return render(request, 'stock_trackers/stock.html', context)", "def get_stock(self, name: str=\"all\") -> List[QTableWidgetItem]:\n try:\n if name != \"all\":\n log.debug(f\"Getting the stock for {name.upper()}.\")\n with DBCursor(self.host) as cursor:\n cursor.execute(\"SELECT rowid, name, units, last_buy, cost_price, sell_price FROM items WHERE name = ?\", (name.lower(), ))\n result = cursor.fetchone()\n if result:\n log.debug(\"There was a product named like soo, returning a StoredProduct for it.\")\n return StoredProduct(*result).to_table()\n else:\n raise ProductNotFound(\"There was no product named like so.\")\n else:\n log.debug(\"Getting the stock for all products.\")\n with DBCursor(self.host) as cursor:\n cursor.execute(\"SELECT rowid, name, units, last_buy, cost_price, sell_price FROM items\")\n results = cursor.fetchall()\n if not results:\n log.error(\"There were no products to show at all.\")\n raise ProductsNotFound(\"There are no products to show.\")\n product_list = []\n for product in results:\n product_list.append(StoredProduct(*product).to_table())\n except Exception:\n log.critical(\"An exception was raised.\")\n raise\n else:\n log.debug(\"A StoredProduct list was consumated.\")\n return product_list", "def filter_in_stock(self, queryset, name, value):\n if str2bool(value):\n return queryset.filter(StockItem.IN_STOCK_FILTER)\n else:\n return queryset.exclude(StockItem.IN_STOCK_FILTER)", "def list(self, request, *args, **kwargs):\n\n stock = request.GET.get('stock', None)\n if stock:\n buys = self.compute_total_value(constants.BUY, stock)\n else:\n buys = self.compute_total_value(constants.BUY)\n\n summary = {'total_value': buys}\n return Response(summary, status=status.HTTP_200_OK)", "def index():\n stocks = []\n username = session.get(\"username\")\n symbol_list = db.execute(\"SELECT stock_symbol FROM history WHERE username=:username GROUP BY stock_symbol\", username=username)\n cash_balance = db.execute(\"SELECT cash FROM users WHERE username=:username\", username=username)[0][\"cash\"]\n total_value = cash_balance\n\n for sym in symbol_list:\n symbol = sym[\"stock_symbol\"]\n new_stock = Stock(username, symbol)\n stocks.append(new_stock)\n total_value += new_stock.quantity * new_stock.price\n\n\n return render_template(\"portfolio.html\", stocks = stocks, cash_balance=usd(cash_balance), total_value=usd(total_value))", "def stock(request, s_id):\n assert isinstance(request, HttpRequest)\n\n try:\n #Get the requested stock with (stock_id)\n stocks = StockList.objects.filter(stock_id=s_id).get()\n stock_id = stocks.stock_id\n stock_name = stocks.symbol\n stock_value = stocks.value\n #do we have a sentiment object?\n\n except StockList.DoesNotExist:\n #If no stock is not found, returns to stock view to search for new one.\n return redirect('stock')\n \n context = {\n 'title': 'Stocks',\n 'year': datetime.now().year,\n 'user': request.user,\n 'stock_id': stock_id,\n 'stock_name': symbol,\n 'stock_value': value,\n }\n\n return render(\n request,\n 'app/stock.html',\n context\n )", "def index():\n # Use a place holder ':curr_id' to call the session id which is the user's id\n rows = db.execute(\"SELECT stocks.symbol, stocks.name, portfolio.shares FROM portfolio JOIN users ON users.id = portfolio.user_id JOIN stocks ON portfolio.stock_id = stocks.id WHERE users.id==:curr_id\", curr_id=session[\"user_id\"])\n # Make a select query only on cash to be able to display it in portfolio's table\n row_cash = db.execute(\"SELECT cash FROM users WHERE id==:curr_id\", curr_id=session[\"user_id\"])\n\n # gets the current price of each stock queried\n if rows:\n for r in rows:\n r_shares = r[\"shares\"]\n r_symbol = r[\"symbol\"]\n # run lookup function to get current price\n dict_2 = lookup(r_symbol)\n # Adds the key \"price\" and its value to the dictionary \"rows\"\n r[\"price\"] = dict_2[\"price\"]\n # Calculates the grand total (stocks’ total value plus cash)\n total = sum([r[\"price\"]*r[\"shares\"] for r in rows]) + row_cash[0][\"cash\"]\n return render_template(\"portfolio.html\", rows=rows, row_cash=row_cash, total=total)", "def get_queryset(self):\n\n if getattr(self, 'swagger_fake_view', False):\n return Order.objects.none()\n\n account = self.request.user.account\n return StockShare.objects.filter(account=account, total_value__gt=0)", "def index():\n\n rows = db.execute(\n 'SELECT symbol, SUM(CASE WHEN operation = \"SELL\" THEN -shares ELSE shares END) shares FROM transactions WHERE id = :id GROUP BY symbol;', id=session['user_id'])\n\n cash = db.execute('SELECT cash FROM users WHERE id = :id', id=session['user_id'])[0]['cash']\n\n grand_total = cash\n\n for row in rows:\n stock = lookup(row['symbol'])\n\n row['name'] = stock['name']\n row['price'] = stock['price']\n row['total'] = row['shares'] * stock['price']\n\n grand_total += row['shares'] * stock['price']\n\n rows.append({\n 'symbol': 'CASH',\n 'cash': cash,\n 'total': grand_total\n })\n\n return render_template('index.html', stocks=rows)", "def portfolio_view(request):\n\n try:\n query = request.dbsession.query(Stock)\n user_entries = query.filter(Stock.account_id == request.authenticated_userid)\n except DBAPIError:\n return DBAPIError(DB_ERR_MSG, content_type='text/plain', status=500)\n\n return {'stocks': all_entries}", "def index():\n user_name = db.execute(\"SELECT username FROM users WHERE id = ?\", session[\"user_id\"])\n check = db.execute(\"SELECT name FROM main.sqlite_master WHERE type='table'\")\n #print(check)\n #print('stocks' not in check[0]['name'])\n if not any(c['name'] == 'stocks' for c in check):\n return render_template(\"index.html\", user_name=user_name)\n\n stocks = db.execute(\"SELECT * FROM stocks WHERE user_id = ?\", session[\"user_id\"])\n cash = db.execute(\"SELECT cash FROM users WHERE id = ?\", session[\"user_id\"])\n\n user_name = db.execute(\"SELECT username, cash FROM users WHERE id = ?\", session[\"user_id\"])\n total_value = user_name[0][\"cash\"]\n sum_stocks = db.execute(\"SELECT symbol, ammount FROM stocks WHERE user_id = ?\", session[\"user_id\"])\n\n for stock in sum_stocks:\n total_value += stock[\"ammount\"] * lookup(stock[\"symbol\"])['price']\n \n #print(stocks)\n return render_template(\"index.html\", stocks=stocks, user_name=user_name, cash=usd(cash[0]['cash']), total_value=usd(total_value))", "def filters():\n states = list(storage.all('State').values())\n states.sort(key=lambda state: state.name)\n cities = list(storage.all('City').values())\n cities.sort(key=lambda city: city.name)\n amenities = list(storage.all('Amenity').values())\n amenities.sort(key=lambda amenity: amenity.name)\n\n return render_template('10-hbnb_filters.html', states=states,\n cities=cities, amenities=amenities)", "def get_data_of_stocks(self):\n\n indexes_to_remove = []\n # Request data for each stock\n for index, stock in enumerate(self.stock_list):\n stock.get_soups()\n stock.find_data()\n stock.print_report()\n self.print_progress(index)", "def collect_all_stock_data(self):\n for stock in self.stocks:\n self.add_stock(stock)", "def index():\n user_stocks_list = db.execute(\"SELECT stock FROM transactions WHERE id = :current_id\", current_id=session[\"user_id\"])\n user_stocks = []\n for stock in user_stocks_list:\n if stock['stock'] not in user_stocks:\n user_stocks.append(stock['stock'])\n\n stock_portfolio = []\n\n for possible_stock in user_stocks:\n bought_shares_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE (id = :current_id AND stock = :stock AND type = :t)\",\n current_id=session[\"user_id\"], stock=possible_stock, t='B')\n bought_shares = 0\n bought_shares = bought_shares_list[0][\"SUM(units)\"]\n sold_shares_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE (id = :current_id AND stock = :stock AND type = :t)\",\n current_id=session[\"user_id\"], stock=possible_stock, t='S')\n sold_shares = 0\n sold_shares = sold_shares_list[0][\"SUM(units)\"]\n if sold_shares == None:\n sold_shares = 0\n\n available_shares = 0\n if bought_shares != None and (bought_shares - sold_shares) > 0:\n available_shares = bought_shares - sold_shares\n current_price = int(lookup(possible_stock)[\"price\"])\n market_value = current_price * available_shares\n dict_stock = {}\n dict_stock['name_stock'] = possible_stock\n dict_stock['shares_quantity'] = available_shares\n dict_stock['current_price'] = current_price\n dict_stock['market_value'] = market_value\n stock_portfolio.append(dict_stock)\n else:\n pass\n\n available_money_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money = usd(available_money_list[0]['cash'])\n\n username_list = db.execute(\"SELECT username FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n username = username_list[0][\"username\"]\n\n sum_market_values = 0\n for collection in stock_portfolio:\n sum_market_values += int(collection['market_value'])\n\n total_value = usd(available_money_list[0]['cash'] + sum_market_values)\n\n return render_template(\"index.html\", stock_portfolio=stock_portfolio, user_stocks=user_stocks, money=available_money, name=username, total_value=total_value)", "def stock(request, *args, **kwargs):\n\n mode = 'lines'\n xaxis_title = 'Years'\n date_list = []\n open_list = []\n close_list = []\n low_list = []\n high_list = []\n ticker = request.GET.get('ticker', '')\n year = request.GET.get('year', '')\n month = request.GET.get('month', '')\n\n if month.isdigit():\n month = int(month)\n\n data = Stock.objects.filter(ticker__iexact=ticker).order_by('date')\n if year and year.isdigit():\n if month and month in MONTHS:\n data = data.filter(Q(date__year=year,\n date__month=month))\n xaxis_title = f'{MONTHS[month]} {year}'\n else:\n data = data.filter(Q(date__year=year))\n xaxis_title = year\n\n if not ticker or not data.exists():\n return HttpResponseRedirect('/stocks')\n title = f'{ticker} ({year})' if year else f'{ticker}'\n if data.exists():\n xy_data = data.values('date', 'oopen', 'close', 'low', 'high')\n for item in xy_data:\n date_list.append(item['date'])\n open_list.append(item['oopen'])\n close_list.append(item['close'])\n low_list.append(item['low'])\n high_list.append(item['high'])\n\n figure = {'data': [\n Scatter(x=date_list, y=high_list, mode=mode, name='high',\n opacity=0.8, marker_color='green'),\n Scatter(x=date_list, y=low_list, mode=mode, name='low',\n opacity=0.8, marker_color='red', visible='legendonly'),\n Scatter(x=date_list, y=open_list, mode=mode, name='open',\n opacity=0.8, marker_color='blue', visible='legendonly'),\n Scatter(x=date_list, y=close_list, mode=mode, name='close',\n opacity=0.8, marker_color='orange', visible='legendonly'),\n ], 'layout': {'title': {'text': title, 'y': 0.9, 'x': 0.5,\n 'xanchor': 'center', 'yanchor': 'top'},\n 'yaxis_title': \"Value\", 'xaxis_title': xaxis_title\n }}\n\n plot_div = plot(figure, output_type='div')\n return render(request, \"index.html\", context={'plot_div': plot_div})", "def filter_queryset(self, queryset):\n params = self.request.query_params\n\n queryset = super().filter_queryset(queryset)\n\n if common.settings.stock_expiry_enabled():\n\n # Filter by 'expiry date'\n expired_date_lte = params.get('expiry_date_lte', None)\n if expired_date_lte is not None:\n try:\n date_lte = datetime.fromisoformat(expired_date_lte)\n queryset = queryset.filter(expiry_date__lte=date_lte)\n except (ValueError, TypeError):\n pass\n\n expiry_date_gte = params.get('expiry_date_gte', None)\n if expiry_date_gte is not None:\n try:\n date_gte = datetime.fromisoformat(expiry_date_gte)\n queryset = queryset.filter(expiry_date__gte=date_gte)\n except (ValueError, TypeError):\n pass\n\n # Filter by 'stale' status\n stale = params.get('stale', None)\n\n if stale is not None:\n stale = str2bool(stale)\n\n # How many days to account for \"staleness\"?\n stale_days = common.models.InvenTreeSetting.get_setting('STOCK_STALE_DAYS')\n\n if stale_days > 0:\n stale_date = datetime.now().date() + timedelta(days=stale_days)\n\n stale_filter = StockItem.IN_STOCK_FILTER & ~Q(expiry_date=None) & Q(expiry_date__lt=stale_date)\n\n if stale:\n queryset = queryset.filter(stale_filter)\n else:\n queryset = queryset.exclude(stale_filter)\n\n # Exclude stock item tree\n exclude_tree = params.get('exclude_tree', None)\n\n if exclude_tree is not None:\n try:\n item = StockItem.objects.get(pk=exclude_tree)\n\n queryset = queryset.exclude(\n pk__in=[it.pk for it in item.get_descendants(include_self=True)]\n )\n\n except (ValueError, StockItem.DoesNotExist):\n pass\n\n # Filter by \"part tree\" - only allow parts within a given variant tree\n part_tree = params.get('part_tree', None)\n\n if part_tree is not None:\n try:\n part = Part.objects.get(pk=part_tree)\n\n if part.tree_id is not None:\n queryset = queryset.filter(part__tree_id=part.tree_id)\n except Exception:\n pass\n\n # Exclude StockItems which are already allocated to a particular SalesOrder\n exclude_so_allocation = params.get('exclude_so_allocation', None)\n\n if exclude_so_allocation is not None:\n\n try:\n order = SalesOrder.objects.get(pk=exclude_so_allocation)\n\n # Grab all the active SalesOrderAllocations for this order\n allocations = SalesOrderAllocation.objects.filter(\n line__pk__in=[\n line.pk for line in order.lines.all()\n ]\n )\n\n # Exclude any stock item which is already allocated to the sales order\n queryset = queryset.exclude(\n pk__in=[\n a.item.pk for a in allocations\n ]\n )\n\n except (ValueError, SalesOrder.DoesNotExist):\n pass\n\n # Does the client wish to filter by the Part ID?\n part_id = params.get('part', None)\n\n if part_id:\n try:\n part = Part.objects.get(pk=part_id)\n\n # Do we wish to filter *just* for this part, or also for parts *under* this one?\n include_variants = str2bool(params.get('include_variants', True))\n\n if include_variants:\n # Filter by any parts \"under\" the given part\n parts = part.get_descendants(include_self=True)\n\n queryset = queryset.filter(part__in=parts)\n\n else:\n queryset = queryset.filter(part=part)\n\n except (ValueError, Part.DoesNotExist):\n raise ValidationError({\"part\": \"Invalid Part ID specified\"})\n\n # Does the client wish to filter by stock location?\n loc_id = params.get('location', None)\n\n cascade = str2bool(params.get('cascade', True))\n\n if loc_id is not None:\n\n # Filter by 'null' location (i.e. top-level items)\n if isNull(loc_id):\n if not cascade:\n queryset = queryset.filter(location=None)\n else:\n try:\n # If '?cascade=true' then include items which exist in sub-locations\n if cascade:\n location = StockLocation.objects.get(pk=loc_id)\n queryset = queryset.filter(location__in=location.getUniqueChildren())\n else:\n queryset = queryset.filter(location=loc_id)\n\n except (ValueError, StockLocation.DoesNotExist):\n pass\n\n # Does the client wish to filter by part category?\n cat_id = params.get('category', None)\n\n if cat_id:\n try:\n category = PartCategory.objects.get(pk=cat_id)\n queryset = queryset.filter(part__category__in=category.getUniqueChildren())\n\n except (ValueError, PartCategory.DoesNotExist):\n raise ValidationError({\"category\": \"Invalid category id specified\"})\n\n # Does the client wish to filter by BomItem\n bom_item_id = params.get('bom_item', None)\n\n if bom_item_id is not None:\n try:\n bom_item = BomItem.objects.get(pk=bom_item_id)\n\n queryset = queryset.filter(bom_item.get_stock_filter())\n\n except (ValueError, BomItem.DoesNotExist):\n pass\n\n # Filter by company (either manufacturer or supplier)\n company = params.get('company', None)\n\n if company is not None:\n queryset = queryset.filter(Q(supplier_part__supplier=company) | Q(supplier_part__manufacturer_part__manufacturer=company))\n\n return queryset", "def filter_queryset(self, queryset):\n params = self.request.query_params\n\n queryset = super().filter_queryset(queryset)\n\n # Filter by 'build'\n build = params.get('build', None)\n\n if build is not None:\n\n try:\n build = Build.objects.get(pk=build)\n\n queryset = queryset.filter(stock_item__build=build)\n\n except (ValueError, Build.DoesNotExist):\n pass\n\n # Filter by stock item\n item = params.get('stock_item', None)\n\n if item is not None:\n try:\n item = StockItem.objects.get(pk=item)\n\n items = [item]\n\n # Do we wish to also include test results for 'installed' items?\n include_installed = str2bool(params.get('include_installed', False))\n\n if include_installed:\n # Include items which are installed \"underneath\" this item\n # Note that this function is recursive!\n installed_items = item.get_installed_items(cascade=True)\n\n items += list(installed_items)\n\n queryset = queryset.filter(stock_item__in=items)\n\n except (ValueError, StockItem.DoesNotExist):\n pass\n\n return queryset", "def get_stocks(request):\n if request.method == 'GET':\n\n data = requests.get(\n 'https://www.alphavantage.co/query?',\n params=request.query_params\n )\n\n return Response({'data': json.loads(data.content.decode('utf-8')),\n 'request': {'method': request.method,\n 'path': request.path,\n 'params': request.query_params,\n },\n })", "def history():\n # Select stock info for every single stock transaction for the respective user\n rows = db.execute(\"SELECT symbol, shares, price, transacted FROM portfolio WHERE userid = :userid\", userid=session[\"user_id\"])\n # Return template with the list that has each stock transaction info\n return render_template(\"history.html\", rows=rows)", "def index():\n stocks = db.execute(\"SELECT Symbol, Company, SUM(NumberOfShares) AS Shares, UnitPrice, SUM(TotalPrice) AS TotalPrice FROM \"\n \"portfolio WHERE UserID = :userid GROUP BY Symbol\", userid=session.get(\"user_id\"))\n\n symbol = db.execute(\"SELECT Symbol FROM portfolio WHERE UserID = :userid\", userid=session.get(\"user_id\"))\n\n cash = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session.get(\"user_id\"))\n\n balance = cash[0][\"cash\"]\n grandTotal = 0\n for stock in stocks:\n grandTotal = grandTotal + stock[\"TotalPrice\"]\n\n grandTotal = grandTotal + balance\n\n return render_template(\"index.html\", stockList=stocks, cash=balance, totalAssets=grandTotal, currentUser=session.get(\"user_id\"))", "def hbnb_filters():\n bu = storage.all(State).values()\n bee = storage.all(Amenity).values()\n return render_template('10-hbnb_filters.html', burger=bu, beer=bee)", "def viewUserStocks(self, userID):\n message = None\n status = 1\n list = []\n count = 1\n for (a, b, c, d) in self.db.select_all_user_stocks(userID):\n list.append('{}) <b>{}:{}</b>: {}'.format(count, c, b, d))\n count += 1\n message = \"{}, here's the list of stocks you saved:\\n\\n\".format(\n userID) + \"\\n\".join(list)\n if not list:\n message = \"It appears you do not have any stocks saved. You can save a stock by selecting <b>Add a stock</b>.\"\n status = 0\n return (message, status)", "async def stocks(self, ctx):\n\t\tpass", "def index():\n\n user_id = session.get('user_id')\n table_name = f'stocks_user{user_id}'\n db.execute(\"CREATE TABLE IF NOT EXISTS ? (stock_symbol TEXT NOT NULL, shares NUMBER NOT NULL, price NUMBER NOT NULL, time TEXT NOT NULL)\", table_name)\n money = db.execute(\"SELECT dinheiro FROM users WHERE id = ?\", user_id)[0]['dinheiro']\n total_value_in_stocks = 0\n\n rows = db.execute('SELECT DISTINCT stock_symbol FROM ? WHERE NOT stock_symbol=\"DINHEIRO\" GROUP BY stock_symbol HAVING SUM(shares) >= 1', table_name)\n for row in rows:\n row[\"company_name\"] = lookup(row[\"stock_symbol\"])['name']\n row[\"price_stock\"] = lookup(row[\"stock_symbol\"])['price']\n row[\"shares\"] = db.execute(\"SELECT SUM(shares) FROM ? WHERE stock_symbol = ?\", table_name, row[\"stock_symbol\"])[0][\"SUM(shares)\"]\n total_value_in_stocks += row[\"shares\"] * row[\"price_stock\"]\n\n portfolio_value = total_value_in_stocks + money\n\n return render_template('index.html', rows=rows, money=money, portfolio_value=portfolio_value)", "def index():\n #if request.method == \"GET\":\n #Выбрать знак акции,и кол-во акции которые пренадлежат id\n #stocks_shares = db.execute(\"SELECT symbol, shares FROM total WHERE id=:id ORDER BY symbol\",\n #id=session[\"user_id\"])\n #return render_template(\"index.html\")\n #return redirect(url_for(\"index.html\"))\n return apology(\"TODO\")", "def get_all_stocks(userId):\r\n print(\"<get_all_stocks()>\")\r\n print(\"userId: \", userId)\r\n stocks = Stock.objects(user_id=userId)\r\n return jsonify(json.loads(stocks.to_json()))" ]
[ "0.63406605", "0.5798307", "0.5752256", "0.5661666", "0.5644087", "0.56410515", "0.5636392", "0.5636154", "0.5573223", "0.5552212", "0.5549842", "0.5541733", "0.55342644", "0.55234843", "0.5513628", "0.5484633", "0.5403669", "0.53891045", "0.5384445", "0.5316756", "0.53150934", "0.5312744", "0.53011376", "0.52851635", "0.52560455", "0.52535695", "0.52212036", "0.52211237", "0.52193123", "0.52037394" ]
0.7166876
0
Displays the selected stock, providing the name, sentiment, and the price of said stock. Direct implementation of the StockView.
def stock(request, s_id): assert isinstance(request, HttpRequest) try: #Get the requested stock with (stock_id) stocks = StockList.objects.filter(stock_id=s_id).get() stock_id = stocks.stock_id stock_name = stocks.symbol stock_value = stocks.value #do we have a sentiment object? except StockList.DoesNotExist: #If no stock is not found, returns to stock view to search for new one. return redirect('stock') context = { 'title': 'Stocks', 'year': datetime.now().year, 'user': request.user, 'stock_id': stock_id, 'stock_name': symbol, 'stock_value': value, } return render( request, 'app/stock.html', context )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stock_view(request):\n if request.method == 'GET':\n try:\n symbol = request.GET['symbol']\n except KeyError:\n return {}\n try:\n response = requests.get(API_URL + '/stock/{}/company'.format(symbol))\n data = response.json()\n return {'company': data}\n except ValueError:\n raise HTTPNotFound()\n if request.method == 'POST':\n try:\n symbol = request.POST['symbol']\n except KeyError:\n raise HTTPBadRequest()\n\n try:\n response = requests.get(API_URL + '/stock/{}/company'.format(symbol))\n data = response.json()\n except ValueError:\n raise HTTPNotFound()\n\n isntance = Stock(**data)\n\n try:\n request.dbsession.add(instance)\n except DBAPIError:\n return Response(DB_ERR_MSG, content_type='text/plain', status=500)\n \n return HTTPFound(location=request.route_url('portfolio'))", "def stockButtonClicked(self):\n # Clear text edit box and get the stock symbol from combobox.\n self.central.text3.clear()\n stocksymbol = self.central.combobox.currentText()\n\n URL = 'https://finance.yahoo.com/quote/{0}/profile?p={0}'.format(stocksymbol)\n\n # Safely get the web page using the above URL.\n try:\n r = requests.get(URL)\n except:\n logging.error(\"Failed to get the web page: \" + URL)\n self.central.text3.setText(\"Failed to get the web page: \" + URL)\n return\n\n # Safely turn the response from requests into soup.\n try:\n html = r.text.encode('utf-8')\n soup = bs4.BeautifulSoup(html, 'lxml')\n except:\n logging.error(\"Failed on the soup\")\n self.central.text3.setText(\"Failed on the soup\")\n return\n\n # Safely extract data from the table.\n try:\n table = soup.find_all(\"table\")\n rows = table[0].find_all('tr')\n data = []\n for row in rows:\n cols = row.find_all('td')\n cols = [str.text.strip() for str in cols]\n data.append([str for str in cols if str])\n\n textdisplay = ''\n\n for x in data:\n for y in x:\n print(y)\n textdisplay += y\n textdisplay += '\\n'\n if y.isdigit():\n textdisplay += '\\n'\n self.central.text3.setText(textdisplay)\n\n except:\n logging.error(\"Failed to extract data from the table\")\n self.central.text3.setText(\"Failed to extract data from the table\")\n return\n\n self.updateGraph(symbol=stocksymbol)", "def stock(request, stock_id):\n stock= Stock.objects.get(id=stock_id)\n entries= stock.entry_set.order_by('-date_added')\n context= {'stock': stock, 'entries': entries}\n return render(request, 'stock_trackers/stock.html', context)", "def stocks(request):\n\n try:\n stocks = StockList.objects.all()\n except StockList.DoesNotExist:\n stocks = None\n\n context = {\n 'title': 'Filter Stocks',\n 'year': datetime.now().year,\n 'user': request.user,\n 'stocks': stocks,\n }\n\n return render(\n request,\n 'app/stocksview.html',\n context,\n )", "def stock(request, *args, **kwargs):\n\n mode = 'lines'\n xaxis_title = 'Years'\n date_list = []\n open_list = []\n close_list = []\n low_list = []\n high_list = []\n ticker = request.GET.get('ticker', '')\n year = request.GET.get('year', '')\n month = request.GET.get('month', '')\n\n if month.isdigit():\n month = int(month)\n\n data = Stock.objects.filter(ticker__iexact=ticker).order_by('date')\n if year and year.isdigit():\n if month and month in MONTHS:\n data = data.filter(Q(date__year=year,\n date__month=month))\n xaxis_title = f'{MONTHS[month]} {year}'\n else:\n data = data.filter(Q(date__year=year))\n xaxis_title = year\n\n if not ticker or not data.exists():\n return HttpResponseRedirect('/stocks')\n title = f'{ticker} ({year})' if year else f'{ticker}'\n if data.exists():\n xy_data = data.values('date', 'oopen', 'close', 'low', 'high')\n for item in xy_data:\n date_list.append(item['date'])\n open_list.append(item['oopen'])\n close_list.append(item['close'])\n low_list.append(item['low'])\n high_list.append(item['high'])\n\n figure = {'data': [\n Scatter(x=date_list, y=high_list, mode=mode, name='high',\n opacity=0.8, marker_color='green'),\n Scatter(x=date_list, y=low_list, mode=mode, name='low',\n opacity=0.8, marker_color='red', visible='legendonly'),\n Scatter(x=date_list, y=open_list, mode=mode, name='open',\n opacity=0.8, marker_color='blue', visible='legendonly'),\n Scatter(x=date_list, y=close_list, mode=mode, name='close',\n opacity=0.8, marker_color='orange', visible='legendonly'),\n ], 'layout': {'title': {'text': title, 'y': 0.9, 'x': 0.5,\n 'xanchor': 'center', 'yanchor': 'top'},\n 'yaxis_title': \"Value\", 'xaxis_title': xaxis_title\n }}\n\n plot_div = plot(figure, output_type='div')\n return render(request, \"index.html\", context={'plot_div': plot_div})", "def index():\n stocks = []\n username = session.get(\"username\")\n symbol_list = db.execute(\"SELECT stock_symbol FROM history WHERE username=:username GROUP BY stock_symbol\", username=username)\n cash_balance = db.execute(\"SELECT cash FROM users WHERE username=:username\", username=username)[0][\"cash\"]\n total_value = cash_balance\n\n for sym in symbol_list:\n symbol = sym[\"stock_symbol\"]\n new_stock = Stock(username, symbol)\n stocks.append(new_stock)\n total_value += new_stock.quantity * new_stock.price\n\n\n return render_template(\"portfolio.html\", stocks = stocks, cash_balance=usd(cash_balance), total_value=usd(total_value))", "def new_stock(request):\n if request.method != 'POST':\n # No data submitted; create a blank form.\n form= StockForm()\n else:\n # POST data submitted; process data.\n form= StockForm(data=request.POST)\n if form.is_valid():\n new_stock= form.save(commit=False)\n new_stock.save()\n return redirect('stock_trackers:stocks')\n #Display a blank or invalid form.\n context= {'form': form}\n return render(request, 'stock_trackers/new_stock.html', context)", "async def stock(self, ctx, ticker: str):\n symbols = await self.bot.aiojson(\"https://api.robinhood.com/quotes/\"\\\n f\"?symbols={ticker.upper()}\")\n if not symbols:\n await ctx.send(\"Stock not found. This stock is probably not tradeable on robinhood.\")\n return\n symbols_result = symbols[\"results\"][0]\n instrument = await self.bot.aiojson(symbols_result[\"instrument\"])\n fundamentals = await self.bot.aiojson(\n f\"https://api.robinhood.com/fundamentals/{ticker.upper()}/\")\n\n current_price = (symbols_result[\"last_trade_price\"] if\n \"last_extended_hours_trade_price\" in symbols_result\n else symbols_result[\"last_extended_hours_trade_price\"])\n diff = Decimal(Decimal(current_price) -\n Decimal(symbols_result[\"previous_close\"]))\n percentage = str(100 * diff / Decimal(current_price))[:6]\n\n if not percentage.startswith(\"-\"):\n percentage = \"+\" + percentage\n\n current_price_string = self.format_currency(current_price)\n diff_string = self.format_currency(diff)\n bid_price_string = self.format_currency(Decimal(symbols_result[\"bid_price\"]))\n ask_price_string = self.format_currency(Decimal(symbols_result[\"ask_price\"]))\n tradeable_string = (\n \":white_check_mark:\" if instrument[\"tradeable\"] else \":x:\")\n\n update_timestamp = parser.parse(symbols_result[\"updated_at\"])\n\n symbol = symbols_result[\"symbol\"]\n change_color = await self.get_stock_change_color(symbol)\n\n embed = discord.Embed(title=f\"{symbol}'s stocks info\",\n color=change_color,\n timestamp=update_timestamp)\n\n embed.add_field(name=\"Name\", value=instrument[\"name\"])\n embed.add_field(name=\"Current Price\", value=current_price_string)\n embed.add_field(name=\"Change from yesterday\", value=f\"{diff_string} ({percentage}%)\")\n embed.add_field(name=\"Bid size\", value=f\"{symbols_result['bid_size']} ({bid_price_string})\")\n embed.add_field(name=\"Ask size\", value=f\"{symbols_result['ask_size']} ({ask_price_string})\")\n embed.add_field(name=\"Current Volume\", value=fundamentals[\"volume\"])\n embed.add_field(name=\"Average Volume\", value=fundamentals[\"average_volume\"])\n embed.add_field(name=\"Tradeable on Robinhood\", value=tradeable_string)\n embed.add_field(name=\"Country\", value=f\":flag_{instrument['country'].lower()}:\")\n\n await ctx.send(embed=embed)", "def T_stock(self, V_stock):\n return Stock.T_stock(self, V_stock, self.Q_stock()).to(u.hr)", "def index_view(request):\n\n\t# Create blank form instances.\n\tform = TickerForm()\n\tcrypto_form = CryptoTickerForm()\n\t\n\t# Check if the request method == POST\n\tif request.method == 'POST':\n\t\tpost_data = request.POST or None\n\t\t# Check that ther is data on the request.\n\t\tif post_data != None:\n\t\t\t# Check if the user enters data and the stock ticker form.\n\t\t\tif request.POST.get(\"form_type\") == 'stock_form':\n\t\t\t\tform = TickerForm(request.POST)\n\t\t\t\t# Check if form is valid.\n\t\t\t\tif form.is_valid():\n\t\t\t\t\t# Get the 'ticker' value from the form and store it the ticker variable.\n\t\t\t\t\tticker = form.cleaned_data.get('ticker')\n\t\t\t\t\t# If the variable ticker exists in the users portfolio send error message.\n\t\t\t\t\ttry: \n\t\t\t\t\t\tif request.user.stocks_set.get(ticker=ticker) != None:\n\t\t\t\t\t\t\tmessages.info(request, 'Stock ticker already exists in portfolio.')\n\t\t\t\t\t# Create the Stock Object in the database and link it to the current user.\n\t\t\t\t\texcept Stocks.DoesNotExist:\n\t\t\t\t\t\tStocks.objects.create(\n\t\t\t\t\t\t\tticker = ticker, \n\t\t\t\t\t\t\tuser=request.user)\n\t\t\t\t\t\t# Get the stock that was created from the database.\n\t\t\t\t\t\tcurrent_stock = Stocks.objects.get(ticker=ticker, user=request.user)\n\t\t\t\t\t\t# Get the meta and price data\n\t\t\t\t\t\tcurrent_stock_meta_dict = current_stock.get_meta_data()\n\t\t\t\t\t\tcurrent_stock_price_dict = current_stock.get_price_data()\n\t\t\t\t\t\t# Add the highest price for the stock to the meta data dict\n\t\t\t\t\t\tcurrent_stock_meta_dict['high'] = current_stock_price_dict.get('high')\n\t\t\t\t\t\t# Add a ticker variable to meta data incase user enters incorrect ticker and there is no data.\n\t\t\t\t\t\tcurrent_stock_meta_dict['ticker'] = current_stock.ticker\n\t\t\t\t\t\t# Add the meta and price data to the current session\n\t\t\t\t\t\trequest.session['meta_data'][current_stock.ticker] = current_stock_meta_dict\n\t\t\t\t\t\trequest.session['price_data'][current_stock.ticker] = current_stock_price_dict\n\t\t\t\t\t\t# Explicitly save the session\n\t\t\t\t\t\trequest.session.modified = True\n\t\t\t\t\t\t# Reset the form instance.\n\t\t\t\t\t\tform = TickerForm()\n\n\n\t\t\t# Check wether the user enters data on the crypto currency ticker form.\n\t\t\telif request.POST.get(\"form_type\") == 'crypto_form':\n\t\t\t\tcrypto_form = CryptoTickerForm(request.POST)\n\t\t\t\tif crypto_form.is_valid():\n\t\t\t\t\tcrypto_ticker = request.POST['crypto_ticker']\n\t\t\t\t\t# If the variable crypto_ticker exists in the users portfolio send error message.\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif request.user.crypto_set.get(crypto_ticker=crypto_ticker) != None:\n\t\t\t\t\t\t\tmessages.info(request, 'Crypto ticker already exists in portfolio.')\n\t\t\t\t\t# Else create the Crypto Object in the database and link it to the current user.\n\t\t\t\t\texcept Crypto.DoesNotExist:\n\t\t\t\t\t\tCrypto.objects.create(\n\t\t\t\t\t\t\tcrypto_ticker = crypto_ticker, \n\t\t\t\t\t\t\tuser=request.user)\n\t\t\t\t\t\t# Get the currently created cryptocurrency ticker\n\t\t\t\t\t\tcurrent_crypto = Crypto.objects.get(crypto_ticker = crypto_ticker, user = request.user)\n\t\t\t\t\t\t# Get the meta data and price data for the current cryptocurrency\n\t\t\t\t\t\tcurrent_crypto_meta_dict = current_crypto.get_crypto_meta_data()\n\t\t\t\t\t\tcurrent_crypto_price_dict = current_crypto.get_crypto_price_data()\n\t\t\t\t\t\t# Add a crypto_ticker variable to meta data incase user enters incorrect ticker and there is no data.\n\t\t\t\t\t\tcurrent_crypto_meta_dict['crypto_ticker'] = current_crypto.crypto_ticker\n\t\t\t\t\t\t# Handle Error for no data on creation of invalid cryptocurrency object\n\t\t\t\t\t\tif len(current_crypto_price_dict) == 0:\n\t\t\t\t\t\t\tcurrent_crypto_price_dict.append({'topOfBookData':[{'lastPrice':'No_Data'}]})\n\n\t\t\t\t\t\t# Add the meta data and price data to the current session\n\t\t\t\t\t\trequest.session['crypto_meta_data'][current_crypto.crypto_ticker] = current_crypto_meta_dict\n\t\t\t\t\t\trequest.session['crypto_price_data_dict'][current_crypto.crypto_ticker] = current_crypto_price_dict\n\t\t\t\t\t\t# Save the session\n\t\t\t\t\t\trequest.session.modified = True\n\t\t\t\t\t\t# Reset the crypto_form\n\t\t\t\t\t\tcrypto_form = CryptoTickerForm()\n\t\t\t\t\t\n\n\n\t#Call a list of the users stocks and store it to be passed into the context.\n\tstock_list = request.user.stocks_set.all()\n\tcrypto_list = request.user.crypto_set.all()\n\n\t# Initialse dictionaries to store meta data and price data.\n\tstock_metadata_dict = {}\n\tstock_price_data_dict = {}\n\n\tcrypto_metadata_dict = {}\n\tcrypto_price_data_dict = {}\n\n\t# Loop through users stock and crypto portfolios and add meta and price data to respective dictionaries. \n\n\t# Only do this the first time the user logs into the site.\n\tif request.session.get('meta_data') == None:\n\t\tfor stock in stock_list:\n\t\t\tstock_metadata_dict[stock.ticker] = stock.get_meta_data()\n\t\t\tstock_price_data_dict[stock.ticker] = stock.get_price_data()\n\t\t\t# Add stocks highest price data to meta data dict for use on index page.\n\t\t\tstock_metadata_dict[stock.ticker]['high'] = stock_price_data_dict[stock.ticker].get('high')\n\t\t\t# Add a ticker to metadata dict incase user enters incorrect ticker and there is no data returned.\n\t\t\tstock_metadata_dict[stock.ticker]['ticker'] = stock.ticker\n\n\t\tfor crypto in crypto_list:\n\t\t\tcrypto_metadata_dict[crypto.crypto_ticker] = crypto.get_crypto_meta_data()\n\t\t\tcrypto_price_data_dict[crypto.crypto_ticker] = crypto.get_crypto_price_data()\n\t\t\t# Add a crypto_ticker to metadata dict incase user enters incorrect ticker and there is no data returned.\n\t\t\tcrypto_metadata_dict[crypto.crypto_ticker]['crypto_ticker'] = crypto.crypto_ticker\n\t\t\t# Handle error when there is no data recieved for an incorrect ticker.\n\t\t\tif len(crypto_price_data_dict[crypto.crypto_ticker]) == 0:\n\t\t\t\tcrypto_price_data_dict[crypto.crypto_ticker] = [{'topOfBookData':[{'lastPrice':'No Data'}]}]\n\t\n\t\t# Set session variables for meta and price data to be used throughout site.\n\t\trequest.session['meta_data'] = stock_metadata_dict\n\t\trequest.session['price_data'] = stock_price_data_dict\n\n\t\trequest.session['crypto_meta_data'] = crypto_metadata_dict\n\t\trequest.session['crypto_price_data_dict'] = crypto_price_data_dict\n\t\n\tcontext = {\n\t\t'form' : form,\n\t\t'crypto_form' : crypto_form,\n\t}\n\n\treturn render(request, 'index.html', context)", "def stockholm(request):\n # Test Comment\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'stockholm.html',\n context_instance=RequestContext(request, {})\n )", "def index(request):\n return render(request, 'stock_trackers/index.html')", "def history():\n \"\"\"Show portfolio of stocks\"\"\"\n all_rows = []\n rows = db.execute(\"SELECT * FROM history WHERE id = :id\",\n id=session['user_id'])\n if rows==None or len(rows) < 1:\n return render_template(\"history.html\", all_rows=all_rows)\n else:\n for row in rows:\n share_row = []\n share_row.append(row[\"symbol\"])\n share_row.append(row[\"shares\"])\n share_row.append(usd(row[\"price\"]))\n share_row.append(row[\"transacted\"])\n all_rows.append(share_row)\n return render_template(\"history.html\", all_rows=all_rows)", "def T_stock(self, V_stock):\n return Stock.T_stock(self, V_stock, self._Q_stock).to(u.hr)", "def render_investip():\n\tlinewidth = 2\n\n\tst.sidebar.markdown('# Dashboard')\n\tstock = st.sidebar.selectbox('Stock:', stocks)\n\n\tstartdd = datetime.datetime(2020, 3, 1)\n\tstartdd = st.sidebar.date_input('start-date', value=startdd)\n\n\tendd = datetime.datetime.now()\n\tendd = st.sidebar.date_input('end-date', value=endd)\n\n\tt0 = stock\n\tt0_ohlc = extract(ticker=t0, start_date=startdd, end_date=endd)\n\tt0_df = pd.DataFrame({f'{t0}-Close': t0_ohlc.Close})\n\n\t# st.write(t0_ohlc)\n\tmpf.plot(t0_ohlc, type='candle',volume=True,show_nontrading=False, title=t0, figscale=1.)\n\t# tdf = plot_ticker(t0, df=t0_df, start_date=startdd, end_date=endd)\n\tst.pyplot()\n\n\n\tst.sidebar.markdown('## Stock Correlation')\n\tstock_returns = st.sidebar.checkbox('Enable', value=True, key='cb_corrs')\n\tif stock_returns:\n\t\tst.markdown('## Stock Correlation')\n\t\tstock_selection = st.sidebar.multiselect('Stocks', stocks, def_stocks)\n\t\tplot_stock_correlations(stock_selection, startdd, endd)\n\t\tst.pyplot()\n\n\t# trading_context = True\n\tst.sidebar.markdown('## Returns')\n\tstock_returns = st.sidebar.checkbox('Enable', value=True, key='cb_returns')\n\tif stock_returns:\n\t\tst.markdown('## Stock Returns')\n\t\tst.markdown('''### Daily Stock returns\n[EWMA](https://www.investopedia.com/articles/07/ewma.asp)''')\n\t\tspan = st.sidebar.slider('span', 2, 21, value=5)\n\t\tplot_historical(t0, t0_ohlc, span=span, linewidth=linewidth)\n\t\tst.pyplot()\n\n\n\t# trading_context = True\n\tst.sidebar.markdown('## Volatility')\n\ttrading_context = st.sidebar.checkbox('Enable', value=False, key='cb_volatility')\n\tif trading_context:\n\t\tst.markdown('## Volatility & Risk')\n\t\tst.markdown('''### Daily differences between High & Low\nWe model these ranges with [Inverse Gamma PDF](https://en.wikipedia.org/wiki/Inverse-gamma_distribution).\nGreen lines denote +/- 1 stdev.\n''')\n\t\tf, ax = plt.subplots(1, 2, figsize=(14,6), sharex=False)\n\t\tf.suptitle(f'{t0} High-Low Daily')\n\t\tmmd = t0_ohlc.High - t0_ohlc.Low\n\t\t# mmd.dropna(inplace=True)\n\t\tmmd.plot(color='r', ax=ax[0], lw=linewidth)\n\n\t\tmu, sigma = mmd.dropna().mean(), mmd.dropna().std()\n\t\tzval = 1.#96\n\t\t# TODO: try one-tail limit to get outliers\n\t\t_=ax[0].axhline(y=mu, color='k', lw=linewidth)\n\t\t_=ax[0].axhline(y=mu-zval*sigma, color='g', lw=linewidth)\n\t\t_=ax[0].axhline(y=mu+zval*sigma, color='g', lw=linewidth)\n\n\t\tp95 = mmd.dropna().quantile(.95)\n\t\t_=ax[0].axhline(y=p95, color='b', lw=linewidth, label='p95')\n\t\t_=ax[1].axvline(p95, color='b', lw=linewidth, label='p95')\n\n\t\twith warnings.catch_warnings():\n\t\t warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n\t\t print(invgamma.fit(mmd))\n\t\t sns.distplot(mmd, fit=invgamma, kde=False, ax=ax[1])\n\t\t_=ax[1].axvline(mmd.values[-1], color='r', label='last', lw=linewidth)\n\t\t_=ax[1].axvline(mu, color='k', label='mean', lw=linewidth)\n\t\t_=ax[1].legend()\n\t\tst.pyplot()\n\n\t\tst.markdown('''### Daily Average True Range (ATR)\nImplementation follows [ATR](https://kodify.net/tradingview/indicators/average-true-range/).\nCheck [Investopedia](https://www.investopedia.com/terms/a/atr.asp) for more info.''')\n\n\t\tatr_df = pd.DataFrame({\n\t\t\tf'{t0}-High-Low': t0_ohlc.High - t0_ohlc.Low,\n\t\t\tf'{t0}-High-PrevCloseAbs': abs(t0_ohlc.High - t0_ohlc.Close.shift(1)),\n\t\t\tf'{t0}-Low-PrevCloseAbs': abs(t0_ohlc.Low - t0_ohlc.Close.shift(1)),\n\t\t}).max(axis=1)\n\t\tatr_df = pd.DataFrame({\n\t\t\tf'{t0}-true-range': atr_df,\n\t\t})\n\t\tatr_df[f'{t0}-ATR14'] = atr_df.iloc[:, 0].rolling(14).mean()\n\t\t# st.write(atr_df)\n\n\t\tf, ax = plt.subplots(1, 2, figsize=(14,6), sharex=False)\n\t\tf.suptitle(f'{t0} True Range & SMA14')\n\t\tatr_df.plot(ax=ax[0], lw=linewidth)\n\n\t\twith warnings.catch_warnings():\n\t\t warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n\t\t #print(invgamma.fit(f'{t0}-true-range'))\n\t\t sns.distplot(atr_df[f'{t0}-true-range'], fit=invgamma, kde=False, ax=ax[1])\n\t\t_=ax[1].axvline(atr_df[f'{t0}-true-range'].values[-1], color='b', label='last', lw=linewidth)\n\t\t_=ax[1].axvline(atr_df[f'{t0}-ATR14'].values[-1], color='r', label='last', lw=linewidth)\n\t\t_=ax[1].legend()\n\t\tst.pyplot()\n\n\n\n\t# do_strategy_analysis = True\n\tst.sidebar.markdown('## Trading Strategy')\n\tdo_strategy_analysis = st.sidebar.checkbox('Enable', value=False, key='cb_stra')\n\tif do_strategy_analysis:\n\t\tst.markdown('## Trading Strategy')\n\t\tst.markdown('[investopedia](https://www.investopedia.com/articles/active-trading/052014/how-use-moving-average-buy-stocks.asp)')\n\t\tshort_window = st.sidebar.slider('short_window', 2, 21, 3)\n\t\tlong_window = st.sidebar.slider('long_window', 3, 50, 5)\n\t\tplot_strategy(t0, t0_df, short_window, long_window)\n\t\tst.pyplot()\n\n\t# do_corr_analysis = False\n\tst.sidebar.markdown('## Correlation analysis')\n\tdo_corr_analysis = st.sidebar.checkbox('Enable', value=False, key='cb_corr')\n\tif do_corr_analysis:\n\t\tst.markdown('## Correlation analysis')\n\t\tt1= 'GC=F' # # SP500 'GC=F'\n\t\tt2 = 'CL=F' # '^GSPC' # '^DJI' # DJ30 'CL=F'\n\t\tt1 = st.sidebar.selectbox('REF1:', stocks, index=stocks.index(t1))\n\t\tt2 = st.sidebar.selectbox('REF2:', stocks, index=stocks.index(t2))\n\t\tif st.sidebar.button('Reset'):\n\t\t\tt1 = 'GC=F' # # SP500 'GC=F'\n\t\t\tt2 = 'CL=F' # '^GSPC' # '^DJI' # DJ30 'CL=F'\n\t\t\t# t1 = st.sidebar.selectbox('ref1:', stocks, index=stocks.index(t1))\n\t\t\t# t2 = st.sidebar.selectbox('ref2:', stocks, index=stocks.index(t2))\n\n\t\[email protected](persist=True, show_spinner=False)\n\t\tdef get_dataframes(t1, t2, startdd, endd):\n\t\t\tt1_ohlc = extract(ticker=t1, start_date=startdd, end_date=endd)\n\t\t\tt2_ohlc = extract(ticker=t2, start_date=startdd, end_date=endd)\n\t\t\treturn t1_ohlc, t2_ohlc\n\n\t\tt1_ohlc, t2_ohlc = get_dataframes(t1, t2, startdd, endd)\n\t\tt1_df = pd.DataFrame({f'{t1}-Close': t1_ohlc.Close})\n\t\tt2_df = pd.DataFrame({f'{t2}-Close': t2_ohlc.Close})\n\n\t\t#print(t0_ohlc.shape)\n\t\t#t0_ohlc.head()\n\t\t# print(t1_ohlc.shape)\n\t\t# ticker_ohlc.head()\n\t\t# ticker_ohlc.info()\n\n\t\ttdf = t0_df.join(t1_df).join(t2_df).interpolate().dropna()\n\t\t# tdf.head(10)\n\n\t\t# t0_ohlc.corr(t1_ohlc)\n\t\t#ax = t0_ohlc.Close.plot()\n\t\t#t1_ohlc.Close.plot(ax=ax)\n\n\t\timport numpy as np\n\t\tprint('glocal corrleation1: ', t0_ohlc.Close.corr(t1_ohlc.Close))\n\t\tprint('glocal corrleation2: ', t0_ohlc.Close.corr(t2_ohlc.Close))\n\n\t\tp_window_size = 5\n\t\tr_window_size = 5\n\t\tcentering = False\n\n\n\t\tmodf = lambda x: x\n\t\t#modf = np.log10\n\n\n\t\tmain_stat = f'[{t0}]-mean-roll{p_window_size}'\n\t\talt_stat_1 = f'[{t1}]-mean-roll{p_window_size}'\n\t\talt_stat_2 = f'[{t2}]-mean-roll{p_window_size}'\n\t\t# df_rc = pd.DataFrame({\n\t\t# main_stat : tdf.iloc[:, 0].apply(modf).rolling(window=p_window_size,center=centering).mean(),\n\t\t# alt_stat_1: tdf.iloc[:, 1].apply(modf).rolling(window=p_window_size,center=centering).mean(),\n\t\t# alt_stat_2: tdf.iloc[:, 2].apply(modf).rolling(window=p_window_size,center=centering).mean(),\n\t\t# })\n\t\tcom_val = 0.2\n\t\tdf_rc = pd.DataFrame({\n\t\t main_stat : tdf.iloc[:, 0].apply(modf).ewm(span=p_window_size, adjust=False).mean(),\n\t\t alt_stat_1: tdf.iloc[:, 1].apply(modf).ewm(span=p_window_size, adjust=False).mean(),\n\t\t alt_stat_2: tdf.iloc[:, 2].apply(modf).ewm(span=p_window_size, adjust=False).mean(),\n\t\t})\n\n\t\tdf_rc = df_rc.interpolate()\n\t\tdf_rc[f'[{t0}]-[{t1}]-corr-roll{r_window_size}'] = df_rc[main_stat].rolling(window=r_window_size, center=centering).corr(df_rc[alt_stat_1])\n\t\tdf_rc[f'[{t0}]-[{t2}]-corr-roll{r_window_size}'] = df_rc[main_stat].rolling(window=r_window_size, center=centering).corr(df_rc[alt_stat_2])\n\n\t\tf, ax = plt.subplots(3,1,figsize=(16,10),sharex=True)\n\t\t#df_rc.iloc[:,0].plot(ax=ax[0], legend=True)\n\t\tdf_rc.iloc[:,1].plot(ax=ax[0], legend=True, color='gold')\n\t\tdf_rc.iloc[:,2].plot(ax=ax[1], legend=True, color='darkred')\n\t\tdf_rc.iloc[:,3].plot(ax=ax[2], legend=True, color='gold')\n\t\tdf_rc.iloc[:,4].plot(ax=ax[2], legend=True, color='darkred')\n\t\tax[2].axhline(y=0, lw=1, color='black')\n\t\t#t0_ohlc.Close.rolling(window=r_window_size,center=True).mean().plot(ax=ax[0])\n\t\t#t1_ohlc.Close.rolling(window=r_window_size,center=True).mean().plot(ax=ax[1])\n\t\t# ax[0].set(xlabel='Frame',ylabel='Smiling Evidence')\n\t\t# ax[1].set(xlabel='Frame',ylabel='Pearson r')\n\t\t_=plt.suptitle(f\"{t0} Close rolling correlation to {t1}, {t2}\")\n\n\t\tst.pyplot()\n\n\n\t\tf,ax=plt.subplots(1, 2, figsize=(16,8),sharex=False)\n\n\t\t_= df_rc.plot.scatter(x=df_rc.columns[1],\n\t\t y=df_rc.columns[2],\n\t\t c=df_rc.columns[0],\n\t\t colormap='viridis',\n\t\t # legend=None,\n\t\t ax=ax[0])\n\n\t\tprint(df_rc.columns)\n\t\tnewr_p = df_rc.iloc[-1, 0]\n\t\tt1_p = df_rc.iloc[-1, 1]\n\t\tt2_p = df_rc.iloc[-1, 2]\n\t\tt1_c = df_rc.dropna().iloc[-1, 3]\n\t\tt2_c = df_rc.dropna().iloc[-1, 4]\n\t\tprint('current_corr:', (t1_c, t2_c))\n\n\t\t# figure out circle size\n\t\taaaa = df_rc.iloc[:, 1].aggregate([np.max, np.min])\n\t\txrange = np.ceil(aaaa.values[0] - aaaa.values[1])\n\t\tprint(aaaa.values[0], aaaa.values[1], xrange)\n\t\txradius = xrange / 20.\n\n\t\tcircle = plt.Circle((t1_p, t2_p), xradius, color='r', fill=False)\n\t\tax[0].add_artist(circle)\n\t\t#ax[0].set_xlabel(f'GOLD Price {t1_p:.4f}')\n\t\t#ax[0].set_ylabel(f'OIL Price {t2_p:.4f}')\n\t\t# ax[0].legend().set_visible(False)\n\n\t\t_= df_rc.plot.scatter(x=df_rc.columns[-2],\n\t\t y=df_rc.columns[-1],\n\t\t c=df_rc.columns[0],\n\t\t colormap='viridis',\n\t\t # legend=True,\n\t\t #linestyle=\n\t\t ax=ax[1])\n\n\t\t# figure out circle size\n\t\taaaa = df_rc.iloc[:, -2].aggregate([np.max, np.min])\n\t\txrange = np.ceil(aaaa.values[0] - aaaa.values[1])\n\t\tprint(aaaa.values[0], aaaa.values[1], xrange)\n\t\txradius = xrange / 20.\n\n\t\tcircle1 = plt.Circle((t1_c, t2_c), xradius, color='r', fill=False)\n\t\tax[1].add_artist(circle1)\n\t\t#ax[1].set_ylabel('OIL Correlation')\n\t\t#_= ax[1].set_xlabel('GOLD Correlation')\n\n\n\t\tst.pyplot()", "def index():\n # Use a place holder ':curr_id' to call the session id which is the user's id\n rows = db.execute(\"SELECT stocks.symbol, stocks.name, portfolio.shares FROM portfolio JOIN users ON users.id = portfolio.user_id JOIN stocks ON portfolio.stock_id = stocks.id WHERE users.id==:curr_id\", curr_id=session[\"user_id\"])\n # Make a select query only on cash to be able to display it in portfolio's table\n row_cash = db.execute(\"SELECT cash FROM users WHERE id==:curr_id\", curr_id=session[\"user_id\"])\n\n # gets the current price of each stock queried\n if rows:\n for r in rows:\n r_shares = r[\"shares\"]\n r_symbol = r[\"symbol\"]\n # run lookup function to get current price\n dict_2 = lookup(r_symbol)\n # Adds the key \"price\" and its value to the dictionary \"rows\"\n r[\"price\"] = dict_2[\"price\"]\n # Calculates the grand total (stocks’ total value plus cash)\n total = sum([r[\"price\"]*r[\"shares\"] for r in rows]) + row_cash[0][\"cash\"]\n return render_template(\"portfolio.html\", rows=rows, row_cash=row_cash, total=total)", "def index(request):\n # Get biggest movers\n stock_mover = top_movers()\n\n # Get latest data\n stock_mover_quotes = {}\n for stock in stock_mover:\n all_of_quote = get_current_quote(stock.ticker)\n # Get jUut the fields you need from the result\n stock_mover_quotes[stock.ticker] = {\n k: all_of_quote.get(k, None) for k in ('Symbol', 'Name', 'Bid', 'Change', 'PercentChange')}\n\n # XXX messages should be a list of messages of the biggest movers\n messages = list(Message.objects.filter(source=\"twitter\"))[:33]\n messages += list(Message.objects.filter(source=\"stocktwits\"))[:33]\n messages += list(Message.objects.filter(source=\"reddit\"))[:33]\n random.shuffle(messages)\n\n return render(\n request,\n 'index.html',\n {\"streamer\": messages, \"stock_list\": stock_mover_quotes.values()}\n )", "def analyze(request, *args, **kwargs):\n\n mode = 'lines+markers'\n\n tickers = Stock.objects.distinct(\n 'ticker').values_list('ticker', flat=True)\n tickers_dict = {ticker: [] for ticker in tickers}\n tickers_count = tickers.count()\n\n actual_dates = Stock.objects.values('date').annotate(\n dcount=Count('date')).filter(dcount=tickers_count).values_list(\n 'date', flat=True).order_by('date')\n date_list = list(actual_dates)\n\n data = Stock.objects.filter(date__in=actual_dates).order_by('date')\n\n for item in data.values('ticker', 'close', 'oopen'):\n tickers_dict[item['ticker']].append(\n round((item['close']-item['oopen'])*100/item['oopen'], 2)\n )\n\n scatters = [Scatter(x=date_list, y=tickers_dict[obj], mode=mode, name=obj,\n opacity=0.8, visible='legendonly') for obj in tickers_dict]\n figure = {'data': scatters, 'layout': {\n 'title': {\n 'text': 'Open-Closed comparision', 'y': 0.9, 'x': 0.5,\n 'xanchor': 'center','yanchor': 'top'},\n 'yaxis_title': \"Daily percent\",\n 'xaxis_title': \"Years\",\n }}\n\n return render(request, \"analyze.html\", context={\n 'plot_div': plot(figure, output_type='div')})", "def readStockFromOG(self):\n # TODO If not in stock, do not display item\n self.og_stock = pd.read_csv('data/stock.csv')\n self.og_stock.to_csv('data/menu.csv', index = False)", "def sell():\n rows = db.execute(\"SELECT stock_id, shares, stocks.symbol FROM portfolio JOIN stocks ON portfolio.stock_id = stocks.id WHERE user_id==:user_id\", user_id=session[\"user_id\"])\n if request.method==\"GET\":\n return render_template(\"sell.html\", rows=rows)\n else:\n symbol = request.form.get(\"symbol\")\n if symbol==\"None\":\n return apology(\"You must select a symbol\")\n # shares sold will be stored in history table with negative value\n shares = int(request.form.get(\"shares\"))*(-1)\n if abs(shares) > rows[0][\"shares\"]:\n return apology(\"You don't own enough shares\")\n # run lookup function\n dict_4 = lookup(symbol)\n price = dict_4[\"price\"]\n # Insert new transaction in 'history' table\n db.execute(\"INSERT INTO history(user_id, stock_id, price, shares, buy) VALUES(:user_id, :stock_id, :price, :shares, :buy)\", user_id=session[\"user_id\"], stock_id=rows[0][\"stock_id\"], price=price, shares=shares, buy=0)\n # UPDATE shares in 'portfolio' table\n new_shares = (rows[0][\"shares\"])+shares\n db.execute(\"UPDATE portfolio SET shares==:shares WHERE user_id==:user_id and stock_id==:stock_id\", user_id=session[\"user_id\"], stock_id=rows[0][\"stock_id\"], shares=new_shares)\n # Update cash in 'users' table\n row_cash = db.execute(\"SELECT cash FROM users WHERE id==:user_id\", user_id=session[\"user_id\"])\n new_cash = row_cash[0][\"cash\"]-(price*shares)\n db.execute(\"UPDATE users SET cash==:cash WHERE id==:user_id\", user_id=session[\"user_id\"], cash=new_cash)\n # message to be retrieved in portfolio.html when user sells stock\n flash('Sold!')\n return redirect(\"/\")", "def index():\n user_name = db.execute(\"SELECT username FROM users WHERE id = ?\", session[\"user_id\"])\n check = db.execute(\"SELECT name FROM main.sqlite_master WHERE type='table'\")\n #print(check)\n #print('stocks' not in check[0]['name'])\n if not any(c['name'] == 'stocks' for c in check):\n return render_template(\"index.html\", user_name=user_name)\n\n stocks = db.execute(\"SELECT * FROM stocks WHERE user_id = ?\", session[\"user_id\"])\n cash = db.execute(\"SELECT cash FROM users WHERE id = ?\", session[\"user_id\"])\n\n user_name = db.execute(\"SELECT username, cash FROM users WHERE id = ?\", session[\"user_id\"])\n total_value = user_name[0][\"cash\"]\n sum_stocks = db.execute(\"SELECT symbol, ammount FROM stocks WHERE user_id = ?\", session[\"user_id\"])\n\n for stock in sum_stocks:\n total_value += stock[\"ammount\"] * lookup(stock[\"symbol\"])['price']\n \n #print(stocks)\n return render_template(\"index.html\", stocks=stocks, user_name=user_name, cash=usd(cash[0]['cash']), total_value=usd(total_value))", "def sell():\n username = session.get(\"username\")\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n req_quantity = request.form.get(\"shares\")\n if not req_quantity.isdigit() or int(req_quantity)<=0:\n return apology(\"Quantity must be positive integer\", 400)\n req_quantity = int(req_quantity)\n status = \"sold\"\n\n time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\n owned_stock = db.execute(\"SELECT SUM(quantity) FROM history WHERE username=:username GROUP BY stock_symbol HAVING stock_symbol=:symbol\",\n username=username, symbol=symbol)\n if owned_stock:\n owned_quantity = owned_stock[0][\"SUM(quantity)\"]\n stock = lookup(symbol)\n price = stock[\"price\"]\n name = stock[\"name\"]\n else:\n owned_quantity = 0\n if owned_quantity>=req_quantity:\n total_value = req_quantity * price\n db.execute(\"INSERT INTO history (username, stock_symbol, unit_price, time, quantity, stock_name, status) VALUES (:username, :symbol, :price, :time, :quantity, :name, :status)\",\n username=username, symbol=symbol, price=price, time=time, quantity=-req_quantity, name=name, status=status)\n db.execute(\"UPDATE users SET cash = cash+:total_value WHERE username=:username\",\n total_value=total_value, username=username)\n cash = db.execute(\"SELECT cash FROM users WHERE username=:username\", username=username)[0][\"cash\"]\n message = f\"Recorded sold {req_quantity} share(s) of {name} total {usd(total_value)}, your new cash balance is {usd(cash)}\"\n return render_template(\"sell.html\", message = message)\n else:\n return apology(\"Insufficient shares\", 400)\n # if db.execute()\n else:\n stock_options = db.execute(\"SELECT stock_symbol FROM history WHERE username=:username GROUP BY stock_symbol\", username=username)\n stock_options = [s[\"stock_symbol\"] for s in stock_options]\n\n # print(f\"Stock options: {stock_options}\")\n return render_template(\"sell.html\", options = stock_options)", "def index():\n\n rows = db.execute(\n 'SELECT symbol, SUM(CASE WHEN operation = \"SELL\" THEN -shares ELSE shares END) shares FROM transactions WHERE id = :id GROUP BY symbol;', id=session['user_id'])\n\n cash = db.execute('SELECT cash FROM users WHERE id = :id', id=session['user_id'])[0]['cash']\n\n grand_total = cash\n\n for row in rows:\n stock = lookup(row['symbol'])\n\n row['name'] = stock['name']\n row['price'] = stock['price']\n row['total'] = row['shares'] * stock['price']\n\n grand_total += row['shares'] * stock['price']\n\n rows.append({\n 'symbol': 'CASH',\n 'cash': cash,\n 'total': grand_total\n })\n\n return render_template('index.html', stocks=rows)", "def sell():\n if request.method == \"POST\":\n symbol = request.form.get(\"Symbol\")\n if symbol is None:\n return apology(\"Enter a symbol\", 403)\n shares = request.form.get(\"Shares\")\n if int(shares) < 0:\n return apology(\"Please enter postive shares\", 403)\n\n stock = lookup(symbol)\n rows = db.execute(\"SELECT Symbol, SUM(Shares) as totalShares FROM cash WHERE id=:id GROUP BY Symbol HAVING totalShares > 0\", id=session[\"user_id\"])\n for row in rows:\n if row[\"Symbol\"] == symbol:\n if int(shares) > row[\"totalShares\"]:\n return apology(\"Too many shares\")\n\n rows = db.execute(\"SELECT Cash FROM cash WHERE id=:id\", id=session[\"user_id\"])\n cash = rows[0][\"Cash\"]\n\n current_cash = cash + int(shares)*stock[\"price\"]\n db.execute(\"UPDATE cash SET Cash=:current_cash WHERE id=:id\", current_cash = current_cash, id=session[\"user_id\"])\n db.execute(\"INSERT INTO cash (id, Symbol, Name, Shares) VALUES (:id, :Symbol, :Name, :Shares)\", id=session[\"user_id\"], Symbol=stock[\"symbol\"], Name=stock[\"name\"], Shares=-1*int(shares))\n\n flash(\"Sold!\")\n return redirect(\"/\")\n\n else:\n rows = db.execute(\"SELECT Symbol FROM cash WHERE id=:id GROUP BY Symbol HAVING SUM(Shares) > 0\", id=session[\"user_id\"])\n # Shorthand for obtaining the symbol for every row in rows. So would output AAPL e.g.\n return render_template(\"sell.html\", symbols=[ row[\"Symbol\"] for row in rows ])", "def viewUserStocks(self, userID):\n message = None\n status = 1\n list = []\n count = 1\n for (a, b, c, d) in self.db.select_all_user_stocks(userID):\n list.append('{}) <b>{}:{}</b>: {}'.format(count, c, b, d))\n count += 1\n message = \"{}, here's the list of stocks you saved:\\n\\n\".format(\n userID) + \"\\n\".join(list)\n if not list:\n message = \"It appears you do not have any stocks saved. You can save a stock by selecting <b>Add a stock</b>.\"\n status = 0\n return (message, status)", "def index():\n #if request.method == \"GET\":\n #Выбрать знак акции,и кол-во акции которые пренадлежат id\n #stocks_shares = db.execute(\"SELECT symbol, shares FROM total WHERE id=:id ORDER BY symbol\",\n #id=session[\"user_id\"])\n #return render_template(\"index.html\")\n #return redirect(url_for(\"index.html\"))\n return apology(\"TODO\")", "def list(self, request):\n\n stocks = Stock.objects.all()\n serializer = StockSerializer(stocks, many=True)\n return Response(serializer.data, status.HTTP_200_OK)", "def sell():\n if request.method == \"POST\":\n current_user = session[\"user_id\"]\n\n\n if not request.form.get(\"sell_amount\"):\n return apology(\"Must provide a number to sell\", 403)\n\n stock_to_sell= request.form.get(\"stock_to_sell\")\n sell_amount= int(request.form.get(\"sell_amount\"))\n\n current_stocks = db.execute(\"SELECT volume FROM portfolio WHERE id = :id AND stock_symbol=:stock_symbol\", id=current_user, stock_symbol=stock_to_sell)\n # current_stocks=db.execute(\"SELECT volume FROM portfolio WHERE id= :id AND stock_symbol= :stock_symbol\", id=current_user, stock_symbol=stock_to_sell)\n\n\n\n if not current_stocks:\n return apology(\"You do not own any stocks, try refreshing the sell page\")\n\n current_volume = current_stocks[0][\"volume\"]\n current_volume = int(current_volume)\n\n if current_volume < int(request.form.get(\"sell_amount\")):\n return apology(\"Attempting to sell more shares than you own\", 403)\n\n lookedup=[]\n lookedup=lookup(request.form.get(\"stock_to_sell\"))\n if not lookedup:\n return apology(\"Unable to lookup stock info.\")\n\n stock_name = lookedup.get(\"name\")\n stock_price = lookedup.get(\"price\")\n stock_symbol = lookedup.get(\"symbol\")\n\n\n user_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=current_user)\n # see if properly selecting cash amount\n if not user_cash:\n return apology(\"Didn't find user's current balance\", 000)\n # update user total cash\n current_cash = user_cash[0][\"cash\"]\n current_cash = int(current_cash)\n total_revenue = sell_amount * stock_price\n new_balance = current_cash + total_revenue\n db.execute(\"UPDATE users SET cash = :new_balance WHERE id = :id\", new_balance=new_balance, id=current_user)\n\n # update portfolio\n new_volume=0\n new_volume=current_volume-sell_amount\n db.execute(\"UPDATE portfolio SET volume = :new_volume WHERE id = :id AND stock_symbol = :stock_symbol\", new_volume=new_volume, id=current_user, stock_symbol=stock_symbol)\n\n # update sales database\n db.execute(\"INSERT INTO sales (id,stock_symbol,volume_sold,price,date_sold) VALUES(:id,:symbol,:amount,:price,datetime('now'))\", id=current_user, symbol=stock_symbol, amount=sell_amount, price=stock_price)\n\n\n return render_template(\"sold.html\",stock_name=stock_name, stock_price=stock_price, stock_symbol=stock_symbol,shares_to_sell=sell_amount, total_value=total_revenue)\n\n\n else:\n current_user = session[\"user_id\"]\n current_stocks=db.execute(\"SELECT stock_symbol, volume FROM portfolio WHERE id = :id\", id=current_user)\n if not current_stocks:\n return apology(\"You do not own any stocks\")\n return render_template(\"sell.html\",current_stocks=current_stocks)\n # return apology(\"i suck at selling?\")", "def index():\n holdings = db.execute(\"SELECT symbol, amount FROM stocks WHERE stocks.user_id = :userid AND amount != 0\", userid = session[\"user_id\"])\n total = 0\n\n for row in holdings:\n sDict = lookup(row['symbol'])\n row['name'] = sDict['name']\n row['share_total'] = sDict['price'] * row['amount']\n total += row['share_total']\n\n row['price'] = usd(sDict['price'])\n row['share_total'] = usd(row['share_total'])\n\n cash = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid = session[\"user_id\"])\n cash = cash[0][\"cash\"]\n total += cash\n\n return render_template(\"index.html\", holdings=holdings, cash=usd(cash), total=usd(total))", "def index():\n\n # Get user\n user = session[\"user_id\"]\n\n # Query infos from database\n rows = db.execute(\"SELECT * FROM stocks WHERE user_id = :user\", user=user)\n cash = db.execute(\"SELECT cash FROM users WHERE id = :user\", user=user)[0]['cash']\n total_cash = cash\n\n # Populate stocks list wit hstock data\n stocks = []\n for index, row in enumerate(rows):\n stock_data = lookup(row['symbol'])\n stock_data['amount'] = row['amount']\n stock_data['quantity'] = round(stock_data['price'] * stock_data['amount'], 2)\n\n # Generate index table data\n stocks.append(list((\n stock_data['symbol'],\n stock_data['name'],\n stock_data['amount'],\n stock_data['price'],\n stock_data['quantity']\n )))\n total_cash += stocks[index][4]\n\n return render_template(\"index.html\", stocks=stocks, cash=round(cash, 2), total=round(total_cash, 2))" ]
[ "0.62709934", "0.62612647", "0.62485635", "0.60026693", "0.592461", "0.59138197", "0.58526856", "0.57856214", "0.57244945", "0.5683895", "0.56825393", "0.56317514", "0.56277454", "0.5605878", "0.55981827", "0.5585482", "0.5584243", "0.55689496", "0.55596864", "0.55541444", "0.5500225", "0.549561", "0.547753", "0.5448342", "0.5445924", "0.54162544", "0.54143435", "0.5404552", "0.5398289", "0.539638" ]
0.7422457
0
Allows Users to create a new Watchlist
def create_watchlist(request): # Below loop seems very inefficient, since it querys the database # TODO: See if the below loop can be optimized, or another solution # is available. # Get the current highest watchList_id try: watchlist = WatchList.objects.latest() watchlist_id = watchlist.watchList_id + 1 except: watchlist_id = 0 if request.method == 'POST': form = CreateWatchListForm(request.POST) if form.is_valid(): new_watchlist = form.save(commit=False) new_watchlist.user = request.user new_watchlist.watchList_id = watchlist_id new_watchlist.save() form.save_m2m() return redirect('edit_watchlist', w_id=watchlist_id) else: # Create a blank form otherwise form = CreateWatchListForm() context = { 'form': form, 'year': datetime.now().year, } return render( request, 'app/create_watchlist.html', context, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_watchlist(name, user_id):\n watchlist = Watchlist(\n name = name, \n user_id = user_id\n )\n \n db.session.add(watchlist)\n db.session.commit()\n\n return watchlist", "def __init__(__self__, *,\n watchlist_id: pulumi.Input[str],\n watchlist_item_pair: pulumi.Input[Mapping[str, Any]],\n created_by: Optional[pulumi.Input['UserInfoArgs']] = None,\n created_time_utc: Optional[pulumi.Input[str]] = None,\n entity_mapping: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n last_updated_time_utc: Optional[pulumi.Input[str]] = None,\n tenant_id: Optional[pulumi.Input[str]] = None,\n time_to_live_utc: Optional[pulumi.Input[str]] = None,\n updated_by: Optional[pulumi.Input['UserInfoArgs']] = None,\n watchlist_item_name: Optional[pulumi.Input[str]] = None,\n watchlist_item_type: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"watchlist_id\", watchlist_id)\n pulumi.set(__self__, \"watchlist_item_pair\", watchlist_item_pair)\n if created_by is not None:\n pulumi.set(__self__, \"created_by\", created_by)\n if created_time_utc is not None:\n pulumi.set(__self__, \"created_time_utc\", created_time_utc)\n if entity_mapping is not None:\n pulumi.set(__self__, \"entity_mapping\", entity_mapping)\n if last_updated_time_utc is not None:\n pulumi.set(__self__, \"last_updated_time_utc\", last_updated_time_utc)\n if tenant_id is not None:\n pulumi.set(__self__, \"tenant_id\", tenant_id)\n if time_to_live_utc is not None:\n pulumi.set(__self__, \"time_to_live_utc\", time_to_live_utc)\n if updated_by is not None:\n pulumi.set(__self__, \"updated_by\", updated_by)\n if watchlist_item_name is not None:\n pulumi.set(__self__, \"watchlist_item_name\", watchlist_item_name)\n if watchlist_item_type is not None:\n pulumi.set(__self__, \"watchlist_item_type\", watchlist_item_type)", "def add_list(user_id):\n\n list_title = request.form[\"list_title\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.filter_by(list_title=list_title).first()\n\n if to_do_list:\n flash(\"List name already exists. Please select a new name.\")\n return redirect(\"/dashboard\")\n\n new_list = ToDoList(list_title=list_title, user_id=user_id)\n \n db.session.add(new_list)\n db.session.commit()\n \n return redirect(\"/dashboard\")", "def wants_new(self, name):\n self.wants[name] = []\n self.db.wants_new(name)\n util.log(\"Want list '\" + name + \"' created\", util.LogLevel.Info)\n self.push_status(\"Created want list '\" + name + \"'\")", "def create(self):\n ...", "def create(self, *args, **kwargs):\n pass", "def create(self):\n pass", "def create(self):\n pass", "def create(self):\n pass", "def create():\n pass", "def create(self):\n\n pass", "def create():", "def create():", "def create(self):", "def post(self, dnzo_user):\n from tasks_data.task_lists import add_task_list, get_task_list\n \n task_list_name = self.request.get('task_list_name', None)\n if not task_list_name:\n self.bad_request(\"Must provide task_list_name to create a new list\")\n return\n \n new_list = add_task_list(dnzo_user, task_list_name)\n if not new_list:\n self.bad_request(\"Could not add the new task list!\")\n return\n \n self.json_response(task_list=new_list.to_dict())", "def do_todo_create(self, arg):\n try:\n my_list = arg[\"<list_name>\"]\n my_list_str = \" \".join(my_list) \n app.ToDoApp.to_create_todo(my_list_str)\n \n except ValueError as e:\n cprint(e, 'red')", "def create_work_item(self):", "def manage_watchlists(request):\n assert isinstance(request, HttpRequest)\n\n try:\n # Handling deleting Watchlist(s)\n if request.method == \"POST\":\n # Get list of selected checkmark boxes\n watchlists_to_delete = request.POST.getlist('delWatchListId')\n for watchlist_id in watchlists_to_delete:\n # Grab the specified watchlist from the database\n watchlist = WatchList.objects.filter(user=request.user, watchList_id=watchlist_id).get()\n watchlist.delete()\n # User just loads this page again, so grab their (newly updated) WatchLists\n watchlists = WatchList.objects.filter(user=request.user).all() \n except WatchList.DoesNotExist:\n watchlists = []\n\n context = {\n 'title': 'Manage Watchlists',\n 'year': datetime.now().year,\n 'user': request.user,\n 'watchlists': watchlists,\n }\n\n return render(\n request,\n 'app/manage_watchlists.html',\n context,\n )", "def create_toptenlist(self, user_ref, index):\n self.client.force_authenticate(user=getattr(self, user_ref))\n response = self.client.post(create_list_url, toptenlist_data_1, format='json')\n toptenlist_id = json.loads(response.content)['id']\n\n toptenlist_ref = 'toptenlist_' + str(index) # refer to toptenlist by self.toptenlist_1 etc\n\n # this allows us to reference the originial toptenlist from self\n # self.toptenlist_1 etc\n # this is not safe for properties like name, but is safe for getting toptenlist and toptenitem id because these do not change\n setattr(self, toptenlist_ref, TopTenList.objects.get(pk=toptenlist_id))\n\n # the request should succeed\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.client.logout()", "def create_item():\n\n data = request.get_json()\n title = data.get(\"title\", None)\n description = data.get(\"description\", None)\n due_date = data.get(\"due_date\", None)\n list_id = data.get(\"list_id\", None)\n\n if title is None or list_id is None:\n return abort(400, description=f\"List ID and title cannot be null!\")\n\n list_to_append = ToDoList.query.filter(ToDoList.id == list_id).first()\n\n if list_to_append is None:\n return abort(404, description=f\"List ID {list_id} does not exist!\")\n\n if due_date is not None:\n try:\n due_date = datetime.datetime.strptime(due_date, DATE_FORMAT)\n except ValueError:\n return abort(400, description=f\"Date format must be YYYY-MM-DD HH:MM\")\n\n new_item = Task(\n title=title,\n description=description,\n status=\"pending\",\n due_date=due_date,\n list_id=list_id,\n )\n db.session.add(new_item)\n db.session.commit()\n\n return make_response(json.dumps(new_item.serialize()))", "async def add(self, request: AddToWatchlistRequest) -> WatchlistResponse:\n return await self._modify_watchlist(request)", "def create_playlist():\n sp = credentials()\n sp.user_playlist_create('truetiming', name='Billboard Hot 100')", "def createNewList(self, args): \n print(args)\n listID = args[\"thread_id\"]\n print(listID) \n for l in self.lists: \n print(\"Comparing with \" + l.name)\n if l.name == listID: \n return \">> This chat is already added!\"\n self.board.add_list(str(listID)) \n return \">> Now listening to this chat!\"", "def addToWatchlist(self, items):\n if not isinstance(items, list):\n items = [items]\n\n for item in items:\n if self.onWatchlist(item):\n raise BadRequest(f'\"{item.title}\" is already on the watchlist')\n ratingKey = item.guid.rsplit('/', 1)[-1]\n self.query(f'{self.METADATA}/actions/addToWatchlist?ratingKey={ratingKey}', method=self._session.put)\n return self", "def create_watch(kls, id, email, event_type, locale=''):\n\n if id != None and not kls.objects.filter(pk=id).exists():\n raise kls.DoesNotExist\n\n ct = ContentType.objects.get_for_model(kls)\n try:\n e = EventWatch(content_type=ct, watch_id=id, email=email,\n event_type=event_type, locale=locale)\n e.save()\n return True\n except IntegrityError:\n return False", "def create(self, **kwa):\n return []", "def create_new_entry(client, list_id, mail_addr, merge_fields, tags):\n try:\n response = client.lists.add_list_member(list_id,\n {\"email_address\": mail_addr, \"status\": \"subscribed\", \"tags\": tags,\n \"merge_fields\": merge_fields})\n print(response)\n except ApiClientError as error:\n print(\"Error on mail address {}: {}\".format(mail_addr, error.text))", "def add_list(self):\n the_list = models.List(user_id=1,\n list_name=self.test_list,\n description=self.test_list_desc)\n the_list.add()", "def create_item(self, user: User, **kwargs) -> None:", "def users_create():" ]
[ "0.7325427", "0.6267545", "0.6190541", "0.6150246", "0.59611124", "0.5956357", "0.594649", "0.594649", "0.594649", "0.59319955", "0.5924205", "0.5896398", "0.5896398", "0.58567035", "0.5854901", "0.58541435", "0.5847053", "0.58465815", "0.58413273", "0.5820835", "0.5816319", "0.5751812", "0.5724901", "0.572166", "0.57144904", "0.5696632", "0.56669235", "0.56470156", "0.5644917", "0.5638037" ]
0.723283
1
Deletes a User's Watchlist where watchList_id=id. Redirects to /watchlists/ after execution.
def delete_watchlist(request, w_id): try: watchlist = WatchList.objects.filter(user=request.user).get(watchList_id=w_id) except WatchList.DoesNotExist: return redirect('watchlists') watchlist.delete() return redirect('watchlists')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def manage_watchlists(request):\n assert isinstance(request, HttpRequest)\n\n try:\n # Handling deleting Watchlist(s)\n if request.method == \"POST\":\n # Get list of selected checkmark boxes\n watchlists_to_delete = request.POST.getlist('delWatchListId')\n for watchlist_id in watchlists_to_delete:\n # Grab the specified watchlist from the database\n watchlist = WatchList.objects.filter(user=request.user, watchList_id=watchlist_id).get()\n watchlist.delete()\n # User just loads this page again, so grab their (newly updated) WatchLists\n watchlists = WatchList.objects.filter(user=request.user).all() \n except WatchList.DoesNotExist:\n watchlists = []\n\n context = {\n 'title': 'Manage Watchlists',\n 'year': datetime.now().year,\n 'user': request.user,\n 'watchlists': watchlists,\n }\n\n return render(\n request,\n 'app/manage_watchlists.html',\n context,\n )", "def remove(id):\n q = User.delete().where(User.id == id)\n try:\n q.execute()\n except Exception as e:\n return e\n return redirect(url_for('db'))", "def delete(self, dnzo_user, task_list):\n from tasks_data.task_lists import delete_task_list\n if dnzo_user.lists_count <= 1:\n self.bad_request(\"User only has one list; cannot delete the last list.\")\n return\n \n delete_task_list(dnzo_user, task_list)\n self.json_response(task_list=task_list.to_dict())", "def delete(request, shoppinglist_id):\n Shoppinglist.objects.filter(pk=shoppinglist_id,\n pantry__owner=request.user).delete()\n return redirect('blackem.users.views.home')", "async def remove(self, request: RemoveFromWatchlistRequest) -> WatchlistResponse:\n return await self._modify_watchlist(request)", "def delete_single_list(current_user, id):\n\n try:\n int(id)\n except ValueError:\n return response('failed', 'Please provide a valid ShoppingList Id', 400)\n else:\n shoplist = ShoppingList.query.filter_by(user_id=current_user.id, id=id).first()\n if shoplist is not None:\n db.session.delete(shoplist)\n db.session.commit()\n return response('success', 'Shopping list has been deleted', 200)\n return response('failed', 'Shopping list not found', 404)\n\n\n\n # decorator used to allow cross origin requests", "def deleteUser(user):\n delete_user(user)\n return redirect(url_for('login'))", "def destroy_wishlist(request):\n wishlist = Wishlist.objects.get(pk=request.POST.get('id'))\n title = wishlist.name\n if wishlist.customer != request.user:\n request.user.message_set.create(message=\"You can't delete wishlists that aren't yours!\")\n return HttpResponseRedirect(reverse(\"get_wishlist\"))\n\n if request.method == \"POST\" and request.POST[\"action\"] == \"delete\":\n wishlist.delete()\n request.user.message_set.create(message=_(\"Successfully deleted wishlist '%s'\") % title)\n return HttpResponseRedirect(reverse(\"get_wishlist\"))", "def remove_stock_from_watchlist(stock_id, watchlist_id):\n WatchlistStock.query.filter(WatchlistStock.stock_id == stock_id, WatchlistStock.watchlist_id == watchlist_id).delete()\n\n db.session.commit()\n\n \"\"\"\n DELETE FROM watchlist_stock\n WHERE watchlist_stock.stock_id = stock_id(python)\n AND watchlist_stock.watchlist_id = watchlist_id(python)\n \"\"\"\n\n # watchlist_stock = WatchlistStock.query.get(stock_id)\n # watchlist = WatchlistStock.query.get(watchlist_id)\n\n # db.session.delete(watchlist_stock)\n # db.session.delete(watchlist)\n # db.session.commit()", "def deleteUser(self, userList, index):\n\n if(self.adminAccess):\n ret = userList.pop(index)\n print(\"User has been deleted\")\n \n return userList", "def rem_list(action, user):\n \n try:\n l = List.objects.get(id=action['listId'])\n verify_permission(l, user)\n l.delete()\n \n # Remove\n userprofile = user.get_profile()\n board = userprofile.get_board(action['boardId'])\n board.lists.remove(action['listId'])\n userprofile.save()\n except:\n # the list or the board doesn't exist.\n pass", "def delete_friend(request, id):\n user = request.user\n friend = get_object_or_404(User, id=id)\n user.profile.friends.remove(friend)\n friend.profile.friends.remove(user)\n messages.success(\n request,\n 'User deleted from your friends list'\n )\n return redirect('profiles:profile')", "def remove_list(list_title):\n\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.filter_by(list_title=list_title).first()\n\n db.session.delete(to_do_list)\n db.session.commit()\n \n return \"OK\"", "def delete_user(id):\n pass", "def delete(self, user, id):\n # Search for bucketlist\n print (id)\n bucketlist = Bucketlist.query.filter_by(\n id=id, created_by=user.email).first()\n\n # return 400 if bucketlist non exixtant or not belongs to this user\n if bucketlist is None:\n return 'Bucketlist not found', 202\n\n bucketlist.delete()\n\n return \"Successfully deleted bucketlist\", 200", "def delete(self, user_id):\r\n return delete_user(request, user_id)", "def delete_user(user_id):\n user = User.query.get_or_404(user_id)\n db.session.delete(user)\n db.session.commit()\n\n return redirect('/')", "def delete_user(user_id):\n\n user = User.query.get_or_404(user_id)\n db.session.delete(user)\n db.session.commit()\n\n return redirect(\"/users\")", "def delete(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:\n self.object = self.get_object()\n success_url = self.get_success_url()\n success_message = _(f'Successfully deleted todo list: {self.object}')\n\n self.object.delete()\n\n messages.success(request=request, message=success_message)\n\n return redirect(success_url)", "def delete_user(user_id):\n user = User.query.get_or_404(user_id)\n db.session.delete(user)\n db.session.commit()\n\n return redirect(\"/users\")", "def task_delete(request, tasklist_id):\n tasklist = get_object_or_404(Todo, pk=tasklist_id)\n tasklist.delete()\n print(tasklist)\n messages.success(request, \"Successfully deleted\")\n return redirect('lists:alllist')", "def delete_notification(request, noti_id):\n user = request.user\n Notification.objects.filter(id=noti_id, user=user).delete()\n return redirect('show_notifications')", "def delete_user(self, _id):\n return self.make_request(\"DELETE\", \"users/\"+_id, {})", "def delete_user_process(user_id):\n\n db_user = User.query.get_or_404(user_id)\n\n db.session.delete(db_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user():", "def delete_admin(request, username, d):\n a = AdminUser.objects.get(username__exact=d)\n a.delete()\n print('deleted')\n return redirect('list_users', username)", "def destroy_watch(kls, id, email, event_type=None, locale=''):\n\n ct = ContentType.objects.get_for_model(kls)\n\n kwargs = {'content_type': ct, 'watch_id': id, 'email': email,\n 'locale': locale}\n if event_type:\n kwargs['event_type'] = event_type\n w = EventWatch.objects.filter(**kwargs)\n count = w.count()\n w.delete()\n return count > 0" ]
[ "0.6410832", "0.62330365", "0.61755645", "0.61663187", "0.6013739", "0.592736", "0.58316094", "0.57497805", "0.57326704", "0.56883246", "0.564724", "0.56429976", "0.5639883", "0.56356984", "0.5621058", "0.56124485", "0.560047", "0.55856293", "0.5584695", "0.5572463", "0.55544394", "0.55385536", "0.5537941", "0.5524896", "0.5513125", "0.5513125", "0.5513125", "0.5496112", "0.54881096", "0.54855186" ]
0.8551365
0
Allows Users to create and delete Watchlists from their account. Direct implementation of the ManageWatchlistView.
def manage_watchlists(request): assert isinstance(request, HttpRequest) try: # Handling deleting Watchlist(s) if request.method == "POST": # Get list of selected checkmark boxes watchlists_to_delete = request.POST.getlist('delWatchListId') for watchlist_id in watchlists_to_delete: # Grab the specified watchlist from the database watchlist = WatchList.objects.filter(user=request.user, watchList_id=watchlist_id).get() watchlist.delete() # User just loads this page again, so grab their (newly updated) WatchLists watchlists = WatchList.objects.filter(user=request.user).all() except WatchList.DoesNotExist: watchlists = [] context = { 'title': 'Manage Watchlists', 'year': datetime.now().year, 'user': request.user, 'watchlists': watchlists, } return render( request, 'app/manage_watchlists.html', context, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_watchlist(request, w_id):\n try:\n watchlist = WatchList.objects.filter(user=request.user).get(watchList_id=w_id)\n except WatchList.DoesNotExist:\n return redirect('watchlists')\n watchlist.delete()\n return redirect('watchlists')", "def create_watchlist(request):\n\n # Below loop seems very inefficient, since it querys the database\n # TODO: See if the below loop can be optimized, or another solution\n # is available.\n\n # Get the current highest watchList_id\n try:\n watchlist = WatchList.objects.latest()\n watchlist_id = watchlist.watchList_id + 1\n\n except:\n watchlist_id = 0\n\n if request.method == 'POST':\n form = CreateWatchListForm(request.POST)\n if form.is_valid():\n new_watchlist = form.save(commit=False)\n new_watchlist.user = request.user\n new_watchlist.watchList_id = watchlist_id\n new_watchlist.save()\n form.save_m2m()\n return redirect('edit_watchlist', w_id=watchlist_id)\n\n else:\n # Create a blank form otherwise\n form = CreateWatchListForm()\n\n context = {\n 'form': form,\n 'year': datetime.now().year,\n }\n\n return render(\n request,\n 'app/create_watchlist.html',\n context,\n )", "def edit_watchlist(request, w_id):\n assert isinstance(request, HttpRequest)\n\n # Get the user's watchlist that matches the provided id\n try:\n watchlist = WatchList.objects.filter(user=request.user, watchList_id=w_id).get()\n watchlist_id = watchlist.watchList_id\n watchlist_name = watchlist.watchList_name\n\n stocks = []\n\n except WatchList.DoesNotExist:\n # Given an invalid watchlist id\n # For now, redirect to the watchlists page\n # TODO: Possibly change the behaviour of invalid ids (maybe \n # show a message on the watchlists page that a watchlist \n # with the given id doesn't exist?)\n return redirect('watchlists')\n\n if request.method == 'POST':\n form = EditWatchListForm(request.POST, instance=watchlist)\n if form.is_valid:\n updated_watchlist = form.save(commit=False)\n updated_watchlist.watchList_id = watchlist_id\n updated_watchlist.user = request.user\n updated_watchlist.name = watchlist_name\n updated_watchlist.save()\n form.save_m2m()\n else:\n\n form = EditWatchListForm(instance=watchlist)\n\n for stock in watchlist.stockResults.all():\n stocks.append(stock)\n\n print(f'Stocks: {stocks}')\n\n context = {\n 'title': 'Edit Watchlist',\n 'year': datetime.now().year,\n 'user': request.user,\n 'watchlist_id': watchlist_id,\n 'watchlist_name': watchlist_name,\n 'stocks': stocks,\n 'form': form,\n }\n\n return render(\n request,\n 'app/edit_watchlist.html',\n context\n )", "def watchlists(request):\n \"\"\"\n Alternative to @login_required decorator: manually test with:\n request.user.is_authenticated\n \"\"\"\n assert isinstance(request, HttpRequest)\n\n # Get all of the user's watchlists\n watchlists = WatchList.objects.filter(user=request.user).all()\n \n # Store the stocks in each watchlist in a dictionary\n # Each key is the watchList_name from the user's watchlists\n # Each value is a list of Stocks (as StockList model objects) \n # present in the watchlist\n stocks = []\n counter = 0\n\n for w in watchlists:\n stocks.append([])\n for stock in w.stockResults.all():\n # No need to check if key is in the dict, since \n # it is added above\n stocks[counter].append(stock)\n counter += 1\n\n print(f'Watchlists:{watchlists}\\tStocks:{stocks}')\n\n if watchlists.count() != 0 and len(stocks) != 0:\n watchlist_stocks = zip(watchlists, stocks)\n else:\n watchlist_stocks = None\n\n context = {\n 'title':'Watchlists',\n 'message':'Your Watchlist page.',\n 'year':datetime.now().year,\n 'user': request.user,\n 'data': watchlist_stocks,\n }\n\n return render(\n request,\n 'app/watchlists.html',\n context\n )", "def create_watchlist(name, user_id):\n watchlist = Watchlist(\n name = name, \n user_id = user_id\n )\n \n db.session.add(watchlist)\n db.session.commit()\n\n return watchlist", "def watch_namespaced_user_list(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_user_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/watch/users'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='JsonWatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def access_list(list_id):\n\n to_do_list = ToDoList.query.get(list_id)\n\n shared_lists = SharedUsersLists.query.filter(SharedUsersLists.shared_list_id==to_do_list.list_id)\n num_users = len({shared_list.shared_user_id for shared_list in shared_lists})\n\n return render_template(\"items.html\", \n to_do_list=to_do_list,\n num_users=num_users)", "def add_list(user_id):\n\n list_title = request.form[\"list_title\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.filter_by(list_title=list_title).first()\n\n if to_do_list:\n flash(\"List name already exists. Please select a new name.\")\n return redirect(\"/dashboard\")\n\n new_list = ToDoList(list_title=list_title, user_id=user_id)\n \n db.session.add(new_list)\n db.session.commit()\n \n return redirect(\"/dashboard\")", "def share_lists(list_id):\n\n email = request.form[\"email\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.get(list_id)\n shared_user = User.query.filter_by(email=email).first()\n\n if not shared_user:\n flash(\"No user found. Please enter a valid email.\")\n return redirect(f\"/lists/{list_id}\")\n\n shared_user.shared_lists.append(to_do_list)\n flash(f\"{shared_user.name} has now been added to the list!\")\n db.session.add(shared_user)\n db.session.commit()\n\n return redirect(f\"/lists/{list_id}\")", "def user_list_update(self):\n\t\tclient_log.debug(f'Запрос списка известных пользователей {self.username}')\n\t\treq = {\n\t\t\tACTION: USERS_REQUEST,\n\t\t\tTIME: time.time(),\n\t\t\tACCOUNT_NAME: self.username\n\t\t}\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tif RESPONSE in ans and ans[RESPONSE] == 202:\n\t\t\tself.database.add_users(ans[LIST_INFO])\n\t\telse:\n\t\t\tclient_log.error('Не удалось обновить список известных пользователей.')", "async def _modify_watchlist(\n self, request: Union[AddToWatchlistRequest, RemoveFromWatchlistRequest]\n ) -> WatchlistResponse:\n product = await self._client.products.get(request.symbol)\n\n assert product\n payload = {\n \"instrumentID\": str(product.id),\n \"userID\": str(self._client.user.id),\n \"watching\": request.watching,\n }\n data = await self._client.post(Url.watchlist_modify, payload=payload)\n\n return WatchlistResponse(symbol=request.symbol, watching=data[\"watching\"])", "def view_list():\n # an HTML representation of the user shopping list\n printed_list = user[\"name\"]\n printed_list += \"<form>\"\n printed_list += '<br>'\n printed_list += 'New Item:<br>'\n printed_list += '<input type=\"text\" name=\"newitem\">'\n printed_list += '<br>'\n printed_list += '<input type=\"submit\" value=\"Submit\">'\n printed_list += \"</form>\"\n printed_list += list_to_html(user[\"shopping_list\"])\n\n return printed_list", "def GetListCtrl(self):\n\n return self", "def __init__(self):\n\t\t\n\t\t# Initialise default list.\n\t\t# TODO: Support multiple lists\n\t\tself.todolist = TodoList(json_folder + 'todo.json')\n\n\t\t# Start of multiple lists support. Currently unused\n\t\tfor infile in glob.glob( os.path.join(json_folder, '*.json') ):\n\t\t\tlistname = infile.replace(json_folder, '').replace('.json', '')\n\t\t\tself.lists[listname] = TodoList(infile)\n\t\t\tprint 'Found:', infile.replace(json_folder, '').replace('.json', '')\n\n\t\tself.window = gtk.Window(gtk.WINDOW_TOPLEVEL)\n\t\tself.window.set_title('Simple Python Todo')\n\t\n\t\tsend_button = gtk.Button(None, gtk.STOCK_ADD)\n\t\tsend_button.connect('clicked', self.send_button_clicked)\n\n\t\tvbox = gtk.VBox(False, 10)\n\t\tvbox.set_border_width(10)\n\n\t\thbox_note_area = gtk.HBox(False, 0)\n\t\thbox_send_area = gtk.HBox(False, 0)\n\n\t\tself.current_list = ListPanel(self.todolist)\n\t\tsw_display = self.current_list.sw\n\t\thbox_note_area.pack_start(sw_display)\n\n\t\t# Set up the text view for adding new notes\n\t\tsw_add = gtk.ScrolledWindow()\n\t\tsw_add.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n\t\t\n\t\tself.textview_add = self.create_text_view(230, 80)\n\t\tself.textview_add.connect('key_press_event', self.textbox_key_pressed)\n\t\tsw_add.add(self.textview_add)\n\t\t\n\t\thbox_send_area.pack_start(sw_add)\n\t\thbox_send_area.pack_start(send_button)\n\n\n\t\t# Organise the boxes\n\t\tvbox.pack_start(hbox_note_area)\n\t\thbox_note_area.show()\n\t\tvbox.pack_start(hbox_send_area)\n\t\thbox_send_area.show()\n\t\tself.window.add(vbox)\n\n\t\tself.window.connect('delete_event', self.delete_event)\n\t\tself.window.show_all()", "def changelist_view(self, request, extra_context=None):\n if request.user.has_perm('deflect.list_all'):\n self.list_filter = self._list_filter + ('creator__username',)\n self.list_display = self._list_display + ('creator',)\n else:\n self.list_filter = self._list_filter\n self.list_display = self._list_display\n return super(ShortURLAdmin, self).changelist_view(request, extra_context=extra_context)", "def see_tasks(self, widget):\n my_task_list = tasklistwindow.TaskListWindow(self.task_list)", "def update_list_view(self):\n # Clear the list/tree view.\n self.list_view.clear()\n\n # Find all the selected things in Maya.\n selected = cmds.ls(selection=True)\n\n # For each of the selected things, create a widget item.\n for thing in selected:\n item = QtGui.QListWidgetItem(thing)\n item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)\n self.list_view.addItem(item)\n # Set the flags on the widget item so it is editable.", "def addToWatchlist(self, items):\n if not isinstance(items, list):\n items = [items]\n\n for item in items:\n if self.onWatchlist(item):\n raise BadRequest(f'\"{item.title}\" is already on the watchlist')\n ratingKey = item.guid.rsplit('/', 1)[-1]\n self.query(f'{self.METADATA}/actions/addToWatchlist?ratingKey={ratingKey}', method=self._session.put)\n return self", "def get_watchlists(user_id):\n # user = User.query.get(user_id)\n\n watchlists = Watchlist.query.filter(Watchlist.user_id == user_id).all()\n\n return watchlists", "def watch_template_list(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_template_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/watch/templates'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='JsonWatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def add_list(request) -> HttpResponse:\n\n # Only staffers can add lists, regardless of TODO_STAFF_USER setting.\n if not request.user.is_staff:\n raise PermissionDenied\n\n if request.POST:\n form = AddTaskListForm(request.user, request.POST)\n if form.is_valid():\n try:\n newlist = form.save(commit=False)\n newlist.slug = slugify(newlist.name, allow_unicode=True)\n newlist.save()\n messages.success(request, \"A new list has been added.\")\n return redirect(\"todo:lists\")\n\n except IntegrityError:\n messages.warning(\n request,\n \"There was a problem saving the new list. \"\n \"Most likely a list with the same name in the same group already exists.\",\n )\n else:\n if request.user.groups.all().count() == 1:\n # FIXME: Assuming first of user's groups here; better to prompt for group\n form = AddTaskListForm(request.user, initial={\"group\": request.user.groups.all()[0]})\n else:\n form = AddTaskListForm(request.user)\n\n context = {\"form\": form}\n\n return render(request, \"todo/add_list.html\", context)", "def changelist_view(self, request, extra_context=None):\n from django.contrib.admin.views.main import ERROR_FLAG\n opts = self.model._meta\n app_label = opts.app_label\n if not self.has_change_permission(request, None):\n raise PermissionDenied\n\n list_display = self.get_list_display(request)\n list_display_links = self.get_list_display_links(request, list_display)\n list_filter = self.get_list_filter(request)\n search_fields = self.get_search_fields(request)\n list_select_related = self.get_list_select_related(request)\n\n # Check actions to see if any are available on this changelist\n actions = self.get_actions(request)\n if actions:\n # Add the action checkboxes if there are any actions available.\n list_display = ['action_checkbox'] + list(list_display)\n\n ChangeList = self.get_changelist(request)\n try:\n cl = ChangeList(\n request, self.model, list_display,\n list_display_links, list_filter, self.date_hierarchy,\n search_fields, list_select_related, self.list_per_page,\n self.list_max_show_all, self.list_editable, \n self, self.list_display_mobile, \n )\n except IncorrectLookupParameters:\n # Wacky lookup parameters were given, so redirect to the main\n # changelist page, without parameters, and pass an 'invalid=1'\n # parameter via the query string. If wacky parameters were given\n # and the 'invalid=1' parameter was already in the query string,\n # something is screwed up with the database, so display an error\n # page.\n if ERROR_FLAG in request.GET.keys():\n return SimpleTemplateResponse('admin/invalid_setup.html', {\n 'title': _('Database error'),\n })\n return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')\n\n # If the request was POSTed, this might be a bulk action or a bulk\n # edit. Try to look up an action or confirmation first, but if this\n # isn't an action the POST will fall through to the bulk edit check,\n # below.\n action_failed = False\n selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)\n\n # Actions with no confirmation\n if (actions and request.method == 'POST' and\n 'index' in request.POST and '_save' not in request.POST):\n if selected:\n response = self.response_action(request, queryset=cl.get_queryset(request))\n if response:\n return response\n else:\n action_failed = True\n else:\n msg = _(\"Items must be selected in order to perform \"\n \"actions on them. No items have been changed.\")\n self.message_user(request, msg, messages.WARNING)\n action_failed = True\n\n # Actions with confirmation\n if (actions and request.method == 'POST' and\n helpers.ACTION_CHECKBOX_NAME in request.POST and\n 'index' not in request.POST and '_save' not in request.POST):\n if selected:\n response = self.response_action(request, queryset=cl.get_queryset(request))\n if response:\n return response\n else:\n action_failed = True\n\n if action_failed:\n # Redirect back to the changelist page to avoid resubmitting the\n # form if the user refreshes the browser or uses the \"No, take\n # me back\" button on the action confirmation page.\n return HttpResponseRedirect(request.get_full_path())\n\n # If we're allowing changelist editing, we need to construct a formset\n # for the changelist given all the fields to be edited. Then we'll\n # use the formset to validate/process POSTed data.\n formset = cl.formset = None\n\n # Handle POSTed bulk-edit data.\n if request.method == 'POST' and cl.list_editable and '_save' in request.POST:\n FormSet = self.get_changelist_formset(request)\n formset = cl.formset = FormSet(request.POST, request.FILES, queryset=self.get_queryset(request))\n if formset.is_valid():\n changecount = 0\n for form in formset.forms:\n if form.has_changed():\n obj = self.save_form(request, form, change=True)\n self.save_model(request, obj, form, change=True)\n self.save_related(request, form, formsets=[], change=True)\n change_msg = self.construct_change_message(request, form, None)\n self.log_change(request, obj, change_msg)\n changecount += 1\n\n if changecount:\n msg = ngettext(\n \"%(count)s %(name)s was changed successfully.\",\n \"%(count)s %(name)s were changed successfully.\",\n changecount\n ) % {\n 'count': changecount,\n 'name': model_ngettext(opts, changecount),\n 'obj': force_text(obj),\n }\n self.message_user(request, msg, messages.SUCCESS)\n\n return HttpResponseRedirect(request.get_full_path())\n\n # Handle GET -- construct a formset for display.\n elif cl.list_editable:\n FormSet = self.get_changelist_formset(request)\n formset = cl.formset = FormSet(queryset=cl.result_list)\n\n # Build the list of media to be used by the formset.\n if formset:\n media = self.media + formset.media\n else:\n media = self.media\n\n # Build the action form and populate it with available actions.\n if actions:\n action_form = self.action_form(auto_id=None)\n action_form.fields['action'].choices = self.get_action_choices(request)\n media += action_form.media\n else:\n action_form = None\n\n selection_note_all = ngettext(\n '%(total_count)s selected',\n 'All %(total_count)s selected',\n cl.result_count\n )\n\n context = dict(\n self.admin_site.each_context(request),\n module_name=force_text(opts.verbose_name_plural),\n selection_note=_('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},\n selection_note_all=selection_note_all % {'total_count': cl.result_count},\n title=cl.title,\n is_popup=cl.is_popup,\n to_field=cl.to_field,\n cl=cl,\n media=media,\n has_add_permission=self.has_add_permission(request),\n opts=cl.opts,\n action_form=action_form,\n actions_on_top=self.actions_on_top,\n actions_on_bottom=self.actions_on_bottom,\n actions_selection_counter=self.actions_selection_counter,\n preserved_filters=self.get_preserved_filters(request),\n )\n context.update(extra_context or {})\n\n request.current_app = self.admin_site.name\n\n return TemplateResponse(request, self.change_list_template or [\n 'admin/%s/%s/change_list.html' % (app_label, opts.model_name),\n 'admin/%s/change_list.html' % app_label,\n 'admin/change_list.html'\n ], context)", "def update_listview(self, content=[]):\n old_model = self.ui.listItemList.model()\n model = ItemListModel(data=content, view=self)\n self.ui.listItemList.setModel(model)\n del old_model", "def do_list(self, arg):\n try:\n cprint (\"Here are your todo lists: \\n\", 'blue')\n app.ToDoApp.to_view_todo()\n\n except ValueError as e:\n cprint(e, 'red')", "def todo_list_view(request):\n\n context = {}\n queryset = Todo.objects.filter(user=request.user)\n context['lists'] = queryset\n return render(request,'todos/index.html', context)", "def KLP_Users_list(request):\n\n # get logged in user\n\n user = request.user\n if user.id:\n\n # check logged in user permissions, to get user list\n\n KLP_user_Perm(request.user, 'Users', None)\n\n # get all active(1) users list other than staff and super user order by username\n\n user_list = User.objects.filter(is_staff=0,\n is_superuser=0).order_by('username')\n\n # render show users form with users list\n\n return render_to_response('viewtemplates/show_users_form.html',\n {\n 'user_list': user_list,\n 'user': user,\n 'title': 'KLP Users',\n 'legend': 'Karnataka Learning Partnership',\n 'entry': 'Add',\n }, context_instance=RequestContext(request))\n else:\n\n # if user is not logged in redirect to login page\n\n return HttpResponseRedirect('/login/')", "def add_list(self):\n the_list = models.List(user_id=1,\n list_name=self.test_list,\n description=self.test_list_desc)\n the_list.add()", "def watch_policy_list(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_policy_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/watch/policies'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='JsonWatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def user_admin_list_data():\n video = VideoFactory()\n collection = video.collection\n moira_list = factories.MoiraListFactory()\n collection.admin_lists.set([moira_list])\n return SimpleNamespace(video=video, moira_list=moira_list, collection=collection)", "async def add(self, request: AddToWatchlistRequest) -> WatchlistResponse:\n return await self._modify_watchlist(request)" ]
[ "0.6297587", "0.595493", "0.5712135", "0.56618613", "0.5633352", "0.5611895", "0.5321622", "0.53103226", "0.5265301", "0.5262247", "0.52606416", "0.5250518", "0.5248871", "0.5230241", "0.5209476", "0.5166371", "0.51378316", "0.5135525", "0.5120909", "0.5105004", "0.5089999", "0.5081767", "0.5065842", "0.50640106", "0.50365037", "0.5028675", "0.49802098", "0.49666125", "0.49596885", "0.49461442" ]
0.72588384
0
Page where Users can add/remove Stocks from a specific Watchlist Direct implementation of the EditWatchlistView.
def edit_watchlist(request, w_id): assert isinstance(request, HttpRequest) # Get the user's watchlist that matches the provided id try: watchlist = WatchList.objects.filter(user=request.user, watchList_id=w_id).get() watchlist_id = watchlist.watchList_id watchlist_name = watchlist.watchList_name stocks = [] except WatchList.DoesNotExist: # Given an invalid watchlist id # For now, redirect to the watchlists page # TODO: Possibly change the behaviour of invalid ids (maybe # show a message on the watchlists page that a watchlist # with the given id doesn't exist?) return redirect('watchlists') if request.method == 'POST': form = EditWatchListForm(request.POST, instance=watchlist) if form.is_valid: updated_watchlist = form.save(commit=False) updated_watchlist.watchList_id = watchlist_id updated_watchlist.user = request.user updated_watchlist.name = watchlist_name updated_watchlist.save() form.save_m2m() else: form = EditWatchListForm(instance=watchlist) for stock in watchlist.stockResults.all(): stocks.append(stock) print(f'Stocks: {stocks}') context = { 'title': 'Edit Watchlist', 'year': datetime.now().year, 'user': request.user, 'watchlist_id': watchlist_id, 'watchlist_name': watchlist_name, 'stocks': stocks, 'form': form, } return render( request, 'app/edit_watchlist.html', context )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def watchlists(request):\n \"\"\"\n Alternative to @login_required decorator: manually test with:\n request.user.is_authenticated\n \"\"\"\n assert isinstance(request, HttpRequest)\n\n # Get all of the user's watchlists\n watchlists = WatchList.objects.filter(user=request.user).all()\n \n # Store the stocks in each watchlist in a dictionary\n # Each key is the watchList_name from the user's watchlists\n # Each value is a list of Stocks (as StockList model objects) \n # present in the watchlist\n stocks = []\n counter = 0\n\n for w in watchlists:\n stocks.append([])\n for stock in w.stockResults.all():\n # No need to check if key is in the dict, since \n # it is added above\n stocks[counter].append(stock)\n counter += 1\n\n print(f'Watchlists:{watchlists}\\tStocks:{stocks}')\n\n if watchlists.count() != 0 and len(stocks) != 0:\n watchlist_stocks = zip(watchlists, stocks)\n else:\n watchlist_stocks = None\n\n context = {\n 'title':'Watchlists',\n 'message':'Your Watchlist page.',\n 'year':datetime.now().year,\n 'user': request.user,\n 'data': watchlist_stocks,\n }\n\n return render(\n request,\n 'app/watchlists.html',\n context\n )", "def add_stock_to_watchlist(ticker, company_name, watchlist_id):\n stock_res = Stock.query.filter(Stock.ticker == ticker).all()\n # print(stock_res)\n\n if len(stock_res) == 0:\n stock = Stock(\n ticker = ticker,\n company_name = company_name,\n # stock_url = stock_url\n )\n\n db.session.add(stock)\n db.session.commit()\n else:\n stock = stock_res[0]\n\n # watchlist_stock_res = WatchlistStock.query.filter(WatchlistStock.watchlist_id == watchlist_id).all()\n\n watchlist_stock = WatchlistStock(\n watchlist_id = watchlist_id,\n stock_id = stock.id\n )\n\n db.session.add(watchlist_stock)\n db.session.commit()", "def manage_watchlists(request):\n assert isinstance(request, HttpRequest)\n\n try:\n # Handling deleting Watchlist(s)\n if request.method == \"POST\":\n # Get list of selected checkmark boxes\n watchlists_to_delete = request.POST.getlist('delWatchListId')\n for watchlist_id in watchlists_to_delete:\n # Grab the specified watchlist from the database\n watchlist = WatchList.objects.filter(user=request.user, watchList_id=watchlist_id).get()\n watchlist.delete()\n # User just loads this page again, so grab their (newly updated) WatchLists\n watchlists = WatchList.objects.filter(user=request.user).all() \n except WatchList.DoesNotExist:\n watchlists = []\n\n context = {\n 'title': 'Manage Watchlists',\n 'year': datetime.now().year,\n 'user': request.user,\n 'watchlists': watchlists,\n }\n\n return render(\n request,\n 'app/manage_watchlists.html',\n context,\n )", "async def _modify_watchlist(\n self, request: Union[AddToWatchlistRequest, RemoveFromWatchlistRequest]\n ) -> WatchlistResponse:\n product = await self._client.products.get(request.symbol)\n\n assert product\n payload = {\n \"instrumentID\": str(product.id),\n \"userID\": str(self._client.user.id),\n \"watching\": request.watching,\n }\n data = await self._client.post(Url.watchlist_modify, payload=payload)\n\n return WatchlistResponse(symbol=request.symbol, watching=data[\"watching\"])", "def index(request):\n\n # todo implement\n # create a watchlist for a user if Logged in and the watchlist doesn't yet exist\n if request.user.is_authenticated and \"watchlist\" not in request.session:\n request.session[\"watchlist\"] = []\n \n return render(request, \"auctions/index.html\", {\"listings\": Listing.objects.filter(isActive=True)})", "def update_list_view(self):\n # Clear the list/tree view.\n self.list_view.clear()\n\n # Find all the selected things in Maya.\n selected = cmds.ls(selection=True)\n\n # For each of the selected things, create a widget item.\n for thing in selected:\n item = QtGui.QListWidgetItem(thing)\n item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)\n self.list_view.addItem(item)\n # Set the flags on the widget item so it is editable.", "def put_on_wish_list():\n book = request.form\n flash(\"The Wish list feature is under construction! Please check back soon!\")\n return render_template('book_details.html', list_of_books=book)", "def remove_stock_from_watchlist(stock_id, watchlist_id):\n WatchlistStock.query.filter(WatchlistStock.stock_id == stock_id, WatchlistStock.watchlist_id == watchlist_id).delete()\n\n db.session.commit()\n\n \"\"\"\n DELETE FROM watchlist_stock\n WHERE watchlist_stock.stock_id = stock_id(python)\n AND watchlist_stock.watchlist_id = watchlist_id(python)\n \"\"\"\n\n # watchlist_stock = WatchlistStock.query.get(stock_id)\n # watchlist = WatchlistStock.query.get(watchlist_id)\n\n # db.session.delete(watchlist_stock)\n # db.session.delete(watchlist)\n # db.session.commit()", "def handle_list_items(self, object, name, old, new):\n raise NotImplementedError", "def add_new_item():\n\n lst = item_list()\n return render_template('index.html', sell_flag=1, items=lst)", "def stocks(request):\n\n try:\n stocks = StockList.objects.all()\n except StockList.DoesNotExist:\n stocks = None\n\n context = {\n 'title': 'Filter Stocks',\n 'year': datetime.now().year,\n 'user': request.user,\n 'stocks': stocks,\n }\n\n return render(\n request,\n 'app/stocksview.html',\n context,\n )", "def add_the_song_to_playlist(self):\n com_util.tap_on(self.driver, element['AddToPlaylist'])\n # com_util.send_to(self.driver, element['EnterThePlaylist'], 'My Songs')\n com_util.tap_on(self.driver, element['ClickMySongs'])\n # com_util.tap_on(self.driver, element['SaveBtn'])\n com_util.tap_on(self.driver, element['CancelBtn'])\n com_util.tap_on(self.driver, element['DownArrow'])", "def edit_tools(self, e):\n #GETTING SELECTION\n\n self.selected_item = self.user_inventory.selection()\n self.select_name = self.user_inventory.item([i for i in self.selected_item], \"values\")[0]\n self.select_entdate = self.user_inventory.item([i for i in self.selected_item], \"values\")[3]\n\n self.df_same_name = self.df_user.query(\"title == @self.select_name\")\n #this is the selected one for sure\n self.df_the_selected_item = self.df_same_name.loc[self.df_same_name[\"entry date\"] == self.select_entdate]\n\n #GETTING THE INDEX NUMBER OF THE SELECTION IN .CSV FILE\n self.index_select = self.df_the_selected_item.index\n self.index_select_number = self.index_select.tolist()\n\n #bottom buttons appear:\n self.changing_item_label.config(text=\"Now editing \"+self.select_name+\" that added on \"+self.select_entdate+\":\")\n\n self.delete_but = Button (self.bottom_frame, text=\"DELETE\", command=self.delete_button)\n self.delete_but.place(relx=0.1, rely=0.7, relwidth=0.28, anchor=\"w\")\n\n self.servings_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n self.serv_drop = Combobox(self.bottom_frame, value=self.servings_list, state=\"readonly\")\n self.serv_drop.place(relx=0.5, rely=0.7, relwidth=0.2, anchor=CENTER)\n\n \n self.serv_but = Button(self.bottom_frame, text=\"CHANGE AMOUNT\", command=self.change_amount_button, state=\"disabled\")\n self.serv_but.place(relx=0.9, rely=0.7, relwidth=0.28, anchor=\"e\")\n\n self.serv_drop.bind(\"<<ComboboxSelected>>\", self.activate_button)", "def delete_watchlist(request, w_id):\n try:\n watchlist = WatchList.objects.filter(user=request.user).get(watchList_id=w_id)\n except WatchList.DoesNotExist:\n return redirect('watchlists')\n watchlist.delete()\n return redirect('watchlists')", "def getEditForm( self ):\n return \"listc_edit\"", "def send_button_clicked(self, widget, event, Data=None):\n\t\tself.add_item_to_list(self.current_list)\n\t\treturn True", "def new_playlist_command(self):\n self.parent.song_object_list.clear()\n self.display_data(self.parent.song_object_list)\n self.playlist_select.set(\"Working Playlist\")", "def edit_the_selected_value_wishlist():\n\n curItem = wishlist_treeview.focus().strip('#')\n\n with open(\"Other_title_categories.json\", \"r\") as other_categories_fo:\n other_categories_foData = json.load(other_categories_fo)\n completeDict = other_categories_foData[\"wish_list\"]\n selectetItemData_complete = completeDict[curItem]\n\n select_values = selectetItemData_complete\n\n def raging_fire():\n \"\"\"call back function for edit button in the edit window\"\"\"\n\n if editspin1.get() != '0': # season\n select_values[0] = int(editspin1.get()) # modify the change\n select_values[3] = \"{}\".format(datetime.datetime.now().date()) # update the modify date\n with open('Other_title_categories.json', 'w') as f:\n completeDict[curItem] = select_values\n json.dump(other_categories_foData, f, indent=2)\n print(\"done writing changes\")\n\n if editspin2.get() != '0': # episode\n select_values[1] = int(editspin2.get())\n select_values[3] = \"{}\".format(datetime.datetime.now().date()) # update the modify date\n with open('Other_title_categories.json', 'w') as f:\n completeDict[curItem] = select_values\n json.dump(other_categories_foData, f, indent=2)\n print(\"done writing changes\")\n\n if editentvar.get() != curItem: # name\n removed_values = completeDict.pop(curItem) # remove the previous title and write the new one\n completeDict[editentvar.get()] = removed_values\n select_values[3] = \"{}\".format(datetime.datetime.now().date())\n with open('Other_title_categories.json', 'w') as f:\n completeDict[editentvar.get()] = select_values # set the values of the edited title\n json.dump(other_categories_foData, f, indent=2)\n print(\"done writing changes\")\n\n edittop.destroy()\n\n if curItem != \"\": # test if an item is highlighted first\n \"\"\"the actual edit window widgets\"\"\"\n edittop = Toplevel()\n\n editlab1 = Label(edittop, text=\"Current Tv-Series title : \")\n editlab1.grid(row=1, column=1, sticky=W, pady=4)\n\n editent = Entry(edittop, textvariable=editentvar, width=30)\n editentvar.set(curItem)\n editent.grid(row=1, column=2, sticky=W, pady=4)\n\n editlab2 = Label(edittop, text=\"Current Season {}, chance to : \".format(select_values[0]))\n editlab2.grid(row=2, column=1, sticky=W, pady=4)\n\n editspin1 = Spinbox(edittop, from_=0, to=1000, width=5)\n editspin1.grid(row=2, column=2, sticky=W, pady=4)\n\n editlab3 = Label(edittop, text=\"Current Episode {}, change to : \".format(select_values[1]))\n editlab3.grid(row=3, column=1, sticky=W, pady=4)\n\n editspin2 = Spinbox(edittop, from_=0, to=1000, width=5)\n editspin2.grid(row=3, column=2, sticky=W, pady=4)\n\n editbut = Button(edittop, text='Edit', command=raging_fire)\n editbut.grid(row=5, column=1, sticky=W, pady=4, padx=20)\n\n edittop.geometry(\"400x200+200+300\")\n edittop.title(\"Edit properties of {} \".format(curItem).upper())", "def view_list():\n # an HTML representation of the user shopping list\n printed_list = user[\"name\"]\n printed_list += \"<form>\"\n printed_list += '<br>'\n printed_list += 'New Item:<br>'\n printed_list += '<input type=\"text\" name=\"newitem\">'\n printed_list += '<br>'\n printed_list += '<input type=\"submit\" value=\"Submit\">'\n printed_list += \"</form>\"\n printed_list += list_to_html(user[\"shopping_list\"])\n\n return printed_list", "def update_stock_info(self, entry, item_name, item_url, item_stock, item_cost):\n self.items_list.delete(entry)\n self.items_list.insert(\n \"\", \"end\", values=(item_name, item_url, item_stock, item_cost)\n )", "def GetListCtrl(self):\n\n return self", "def edit_station(self):\n mac = request.params.get('mac', g.DEFAULT_MAC)\n log.debug('edit_station(%s)' % mac)\n\n # collect desired request params into dictionary\n # XXX need to do form validation here\n items = request.params\n\n stations = model.Session.query(model.Station)\n station = stations.filter(model.Station.mac == mac).first()\n if not station:\n station = model.Station(mac)\n model.Session.save(station)\n station.update(items)\n model.Session.update(station)\n model.Session.commit()\n redirect_to('/admin/dashboard')", "def update_listview(self, content=[]):\n old_model = self.ui.listItemList.model()\n model = ItemListModel(data=content, view=self)\n self.ui.listItemList.setModel(model)\n del old_model", "async def list(self) -> List[WatchlistProduct]:\n watchlist = await self._client.get(\n Url.watchlist.format(userId=self._client.user.id) # type: ignore\n )\n return [\n WatchlistProduct(**watched) for watched in watchlist[\"instrumentsWatchList\"]\n ]", "def handle_list(self, object, name, old, new):\n raise NotImplementedError", "def stock(request, s_id):\n assert isinstance(request, HttpRequest)\n\n try:\n #Get the requested stock with (stock_id)\n stocks = StockList.objects.filter(stock_id=s_id).get()\n stock_id = stocks.stock_id\n stock_name = stocks.symbol\n stock_value = stocks.value\n #do we have a sentiment object?\n\n except StockList.DoesNotExist:\n #If no stock is not found, returns to stock view to search for new one.\n return redirect('stock')\n \n context = {\n 'title': 'Stocks',\n 'year': datetime.now().year,\n 'user': request.user,\n 'stock_id': stock_id,\n 'stock_name': symbol,\n 'stock_value': value,\n }\n\n return render(\n request,\n 'app/stock.html',\n context\n )", "def __init__(self, parent=None):\n super(MusicListWin, self).__init__(parent)\n \n #Connects the ui elements to self\n uic.loadUi(uiFile, self)\n \n self.actionAdd_Folder_Site.triggered.connect(self.addFolderSiteDia)\n self.actionRemove_Folder_Site.triggered.connect(self.removeFolderSiteDia)\n\n self.actionRefresh_Folders.triggered.connect(self.refreshFolderSites)\n\n self.artist_lw.currentItemChanged.connect(self.fillAlbum)\n self.album_lw.currentItemChanged.connect(self.fillSong)\n self.song_lw.currentItemChanged.connect(self.printSongInfo)\n\n self.addPL_but.clicked.connect(self.addToPlaylist)\n self.removePL_but.clicked.connect(self.removeFromPlaylist)\n self.printPL_but.clicked.connect(self.printPlaylist)\n\n self.playlist_comB.setContextMenuPolicy(Qt.CustomContextMenu)\n self.playlist_comB.customContextMenuRequested.connect(self.playlistContext)\n\n self.playlist_comB.currentIndexChanged.connect(self.loadPlaylist)\n \n self.fillSites()\n self.fillPlaylist()\n self.readSettings()\n self.noDelete = True", "def StockUpdate(list,SaveOnly=False):\n if list: # Checks to see if list has items. [] = false, while [1,2] = true!\n if not SaveOnly:\n for component in StockLibrary:\n for item in list:\n if component.get(item):\n component.update({item:component.get(item)-1})\n \n # Saving to CSV file. \n with open(\"shopStock.csv\",\"w+\",newline='') as document:\n Writer = csv.writer(document,delimiter=',')\n for part in StockLibrary:\n listAp = []\n for key in part.keys():\n line = \"{} : {}\".format(key,part[key])\n listAp.append(line)\n Writer.writerow(listAp)\n document.close()\n else:\n print(\"[STOCK MODULE ERROR] No items listed to update!\")", "def current_listing(request, auction_id):\n \n # if user is not logged in, display an error message\n if not request.user.is_authenticated:\n return render(request, 'auctions/apology.html', {\n 'message': \"You must be logged in to see this listing.\"\n })\n \n else:\n # query for watchlist status of the selected listing\n watchlist_item = Watchlist.objects.filter(user = request.user, auction_listing_id = auction_id)\n # query for the selected listing's data in the database\n listing = Auction_listing.objects.get(pk = auction_id)\n # if data is submitted\n if request.method == 'POST':\n # if user submits form via the watchlist button\n if request.POST.get('Watchlist_delete') or request.POST.get('Watchlist_add'):\n # check whether listing is on watchlist, if not add it, if yes remove it from watchlist\n if watchlist_item:\n watchlist_item.delete()\n else:\n watchlist = Watchlist(user = request.user, auction_listing_id = auction_id)\n watchlist.save()\n # if user submits form via the place bid button\n elif request.POST.get('min_bid') or request.POST.get('min_price'):\n # if previous bids were already made\n if request.POST.get('min_bid'):\n # if user provided amount is greater than the current highest bid\n if Decimal(request.POST.get('min_bid')) > Bid.objects.filter(auction_listing_id = auction_id).aggregate(Max('bid')).get('bid__max'):\n bid = Bid(user = request.user, auction_listing_id = auction_id, bid = request.POST.get('min_bid'))\n bid.save()\n # return an error message if user tries to bypass HTML verification\n else:\n return render(request, 'auctions/apology.html', {\n 'message': \"Looks you tried to bypass the HTML verification. Unfortunately, your hacker level is too low to break this site.\"\n })\n # if no bids were made yet \n elif request.POST.get('min_price'):\n # if user provided amount is greater than or equal to the starting price\n if Decimal(request.POST.get('min_price')) >= listing.price:\n bid = Bid(user = request.user, auction_listing_id = auction_id, bid = request.POST.get('min_price'))\n bid.save()\n # return an error message if user tries to bypass HTML verification\n else:\n return render(request, 'auctions/apology.html', {\n 'message': \"Looks you tried to bypass the HTML verification. Unfortunately, your hacker level is too low to break this site.\"\n })\n # if user submits form via the post comment button \n elif request.POST.get('post'):\n form = CommentForm(request.POST)\n # verify form is valid\n if form.is_valid():\n instance = form.save(commit=False)\n instance.user = request.user\n instance.auction_listing_id = auction_id\n instance.save()\n # else return an error message\n else:\n return render(request, 'auctions/apology.html', {\n 'message': \"Form is invalid.\"\n })\n # if user submits form via the close auction button\n elif request.POST.get('close'):\n listing.active = False\n listing.save()\n \n return HttpResponseRedirect(reverse(\"current_listing\", kwargs={'auction_id': auction_id }))\n \n # if reached via URL\n else:\n form = CommentForm()\n # check if bid exists for current auction listing\n if Bid.objects.filter(auction_listing_id = auction_id).aggregate(Max('bid')).get('bid__max'):\n # query for the current bid in current listing\n current_bid = round((Bid.objects.filter(auction_listing_id = auction_id).aggregate(Max('bid')).get('bid__max')), 2)\n # find the user who made the current bid\n max_price = Bid.objects.get(auction_listing_id = auction_id, bid = Bid.objects.filter(auction_listing_id = auction_id).aggregate(Max('bid')).get('bid__max'))\n winner = max_price.user\n # if not bids were made, initiliaze both variables to 0 \n else:\n current_bid = 0\n winner = 0\n return render(request, 'auctions/current_listing.html', {\n 'listing': listing,\n 'price': listing.price,\n 'watchlist': watchlist_item,\n \"bid_count\": Bid.objects.filter(auction_listing_id = auction_id).count(),\n \"min_bid\": current_bid + Decimal(0.01),\n \"current_bid\": current_bid,\n \"winner\": winner,\n \"form\": form,\n \"comments\": Comment.objects.filter(auction_listing_id = auction_id),\n \"user\": request.user\n })", "def edit():\n\n curitem = treeview.focus().strip(\"#\")\n select_values = series_dict[curitem]\n editent2var.set(\"thumbnails\\\\\")\n\n def raging_fire():\n \"\"\"call back function for edit button in the edit window\"\"\"\n\n if editspin1.get() != '0': # season\n select_values[0] = int(editspin1.get())\n select_values[3] = \"{}\".format(datetime.datetime.now()) # update the modify date\n with open('series_table.json', 'w') as f:\n json.dump(series_dict, f, indent=2)\n\n if editspin2.get() != '0': # episode\n select_values[1] = int(editspin2.get())\n select_values[3] = \"{}\".format(datetime.datetime.now()) # update the modify date\n with open('series_table.json', 'w') as f:\n json.dump(series_dict, f, indent=2)\n\n if editentvar.get() != curitem: # name\n series_dict[editentvar.get().title()] = series_dict.pop(curitem) # update the modify date\n select_values[3] = \"{}\".format(datetime.datetime.now())\n with open('series_table.json', 'w') as f:\n json.dump(series_dict, f, indent=2)\n\n if editent2var.get() != select_values[2]: # pic\n select_values[3] = \"{}\".format(datetime.datetime.now()) # update the modify date\n select_values[2] = editent2var.get()\n with open('series_table.json', 'w') as f:\n json.dump(series_dict, f, indent=2)\n\n edittop.destroy()\n\n if curitem != \"\": # test if an item is highlighted first\n \"\"\"the actual ecit window widgets\"\"\"\n edittop = Toplevel()\n\n editlab1 = Label(edittop, text=\"Current Tv-Series title : \")\n editlab1.grid(row=1, column=1, sticky=W, pady=4)\n\n editent = Entry(edittop, textvariable=editentvar, width=30)\n editentvar.set(curitem)\n editent.grid(row=1, column=2, sticky=W, pady=4)\n\n editlab2 = Label(edittop, text=\"Current Season {}, chance to : \".format(select_values[0]))\n editlab2.grid(row=2, column=1, sticky=W, pady=4)\n\n editspin1 = Spinbox(edittop, from_=0, to=1000, width=5)\n editspin1.grid(row=2, column=2, sticky=W, pady=4)\n\n editlab3 = Label(edittop, text=\"Current Episode {}, change to : \".format(select_values[1]))\n editlab3.grid(row=3, column=1, sticky=W, pady=4)\n\n editspin2 = Spinbox(edittop, from_=0, to=1000, width=5)\n editspin2.grid(row=3, column=2, sticky=W, pady=4)\n\n editlab4 = Label(edittop, text=\"Change image to : \")\n editlab4.grid(row=4, column=1, sticky=W, pady=4)\n\n editent2 = Entry(edittop, textvariable=editent2var, width=35)\n editent2var.set(select_values[2])\n editent2.grid(row=4, column=2, sticky=E, pady=4)\n\n editbut = Button(edittop, text='Edit', command=raging_fire)\n editbut.grid(row=5, column=1, sticky=W, pady=4, padx=20)\n\n download_thumbbut = Button(edittop, text=\"Download The thumbnail\", command=download_thumb)\n download_thumbbut.grid(row=5, column=2, sticky=W, pady=4, padx=20)\n\n edittop.geometry(\"400x200+200+300\")\n edittop.title(\"Edit properties of {} \".format(curitem).upper())" ]
[ "0.58458495", "0.5819087", "0.55722135", "0.5433836", "0.5122696", "0.5097534", "0.5068661", "0.5053849", "0.5053286", "0.4996129", "0.49931148", "0.49425825", "0.48860303", "0.4858759", "0.48571426", "0.48514402", "0.48511544", "0.48433307", "0.4835179", "0.4833105", "0.48207664", "0.48124912", "0.47942343", "0.47919598", "0.47919378", "0.47875842", "0.4775617", "0.47393182", "0.47054708", "0.4693919" ]
0.62513083
0
Initialise given the type and value.
def __init__(self, type, value): self.type = type self.value = value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, type, value):\r\n self._type = type\r\n self._value = value", "def __init__(self, type, value,):\n self.type = type\n self.value = value", "def __init__(self, type_: Union[ConstraintTypes, str], value: Any):\n self.type = ConstraintTypes(type_)\n self.value = value\n enforce(self.check_validity(), \"ConstraintType initialization inconsistent.\")", "def initialize(self, value):\n if self.paramValidationPref:\n if not iscompatible(value, self.value):\n raise MechanismError(\"Initialization value ({}) is not compatiable with value of {}\".\n format(value, append_type_to_name(self)))\n self.value[0] = value\n self._update_output_states()", "def __init__(self, spec_type, values):\r\n self._values = values\r\n self._spec_type = spec_type", "def _initialize(self, index, value):\n # by default we just set corresponding value\n self.setvalue(index, value)", "def __init__(self, value):\n\n\t\tLOGGER.debug(\"> Initializing '{0}()' class.\".format(self.__class__.__name__))\n\n\t\t# --- Setting class attributes. ---\n\t\tself.__value = value", "def __init__(self, value=None):\r\n if value is not None:\r\n if isinstance(value, int):\r\n value = self.field(value)\r\n elif isinstance(value, self.field):\r\n pass\r\n elif isinstance(value, Future):\r\n pass # NB: for internal use in runtime only\r\n else:\r\n if isinstance(value, finfields.FiniteFieldElement):\r\n raise TypeError(f'incompatible finite field {type(value).__name__} '\r\n f'for {type(self).__name__}')\r\n\r\n raise TypeError('None, int, or finite field element required')\r\n\r\n super().__init__(value)", "def __init__(self, var_type='', value=''):\n self.cid = None\n self.var_type = var_type\n self.value = value", "def __init__(__self__, *,\n value: str):\n pulumi.set(__self__, \"value\", value)", "def __init__(self, tokenType, value = None):\r\n\t\tif not tokenType in VALID_TOKEN_TYPES:\r\n\t\t\traise ValueError(\"Invalid token type '{}' given\".format(tokenType))\r\n\r\n\t\tself.type = tokenType\r\n\t\tif value:\r\n\t\t\tself.value = value\r\n\t\telse:\r\n\t\t\tself.value = self.type", "def __init__(self, value=None):\n self._value = value\n self._list = []\n self._is_int = True if value is not None else False", "def __init__(self, value):\n self.value = value", "def __init__(self, value):\n self.value = value", "def __init__(self, value):\n self.value = value", "def __init__(self, value):\n self.value = value", "def __init__(self, value):\n self.value = value", "def __init__(self, value):\n self.value = value", "def __init__(self, value=None):\r\n if value is None:\r\n value = (self.significand_type(None), self.exponent_type(None))\r\n else:\r\n if isinstance(value, (int, float)):\r\n e = math.ceil(math.log(abs(value), 2)) if value else 0\r\n s = value / 2**e\r\n assert s == 0 or 0.5 <= abs(s) <= 1, (value, s, e)\r\n value = (self.significand_type(s, integral=False), self.exponent_type(e))\r\n elif isinstance(value, tuple):\r\n if len(value) != 2 or \\\r\n not isinstance(value[0], self.significand_type) or \\\r\n not isinstance(value[1], self.exponent_type):\r\n raise TypeError('Significand/exponent pair required')\r\n\r\n else:\r\n raise TypeError('None, int, float, or significand/exponent pair required')\r\n\r\n super().__init__(value)", "def __init__(self, value=None):\r\n if value is not None:\r\n if isinstance(value, int):\r\n value = self.field(value)\r\n elif isinstance(value, self.field):\r\n pass\r\n# elif isinstance(value, Future):\r\n# pass # NB: for internal use in runtime only\r\n else:\r\n if isinstance(value, finfields.FiniteFieldElement):\r\n raise TypeError(f'incompatible finite field {type(value).__name__} '\r\n f'for {type(self).__name__}')\r\n\r\n raise TypeError('None, int, or finite field element required')\r\n\r\n super().__init__(value)", "def __init__(self, type, value, **kwargs):\n\n if isinstance(type, (int, long)):\n if type < 0 or type > 255:\n raise ValueError('IE type must be unsigned 8-bit integer')\n self._type = type\n else:\n if type in ie_name_to_type:\n self._type = ie_name_to_type[type]\n else:\n raise ValueError('Provided IE type ({}) not understood'.format(type))\n\n if 'raw' in kwargs and kwargs['raw']:\n self._encoded_value = value\n self._decoded_value = ie_decoders[ie_types[self._type]](value)\n else:\n self._decoded_value = value\n self._encoded_value = ie_encoders[ie_types[self._type]](value)\n\n if self._type in ie_types:\n value = ie_encoders[ie_types[self._type]](value)\n\n self._encoded_value = value\n\n self._length = len(value)\n self._encoded_length = self._length + 4", "def __init__(self, value):\r\n self.value = value", "def _init(self, store_value):\n self.store_value = store_value", "def __init__(self, value: Any, log: Log) -> None:\n super().__init__()\n\n self._value = (value, log)", "def __init__(self, currency, value):\n assert isinstance(value, six.integer_types)\n assert not isinstance(currency, six.string_types)\n self.currency = currency\n self.value = value", "def __init__(self, value=None):\n self.set(value)", "def __init__(self, data_type=None):\n self.type = data_type", "def __init__(self, value):\n self.value = value", "def __init__(self, value=None):\n if isinstance(value, Iterable):\n self.value = [NestedInteger(v) for v in value]\n elif isinstance(value, NestedInteger):\n self.value = value.value\n else:\n self.value = value", "def __init__(self, type_):\n\n self.type = type_" ]
[ "0.7659906", "0.7468749", "0.6850241", "0.68287164", "0.65762985", "0.6521753", "0.64979446", "0.6404977", "0.6335378", "0.6326324", "0.63115156", "0.6291628", "0.6281383", "0.6281383", "0.6281383", "0.6281383", "0.6281383", "0.6281383", "0.62743884", "0.626497", "0.6212518", "0.62107044", "0.61776394", "0.61582863", "0.6144123", "0.6127733", "0.6122953", "0.61215556", "0.61037815", "0.60700846" ]
0.7627943
1
Sets the pthid of this Thread.
def pthid(self, pthid): self._pthid = pthid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def thid(self, thid):\n\n self._thid = thid", "def thread_id(self, thread_id):\n\n self._thread_id = thread_id", "def thread_id(self, thread_id):\n\n self._thread_id = thread_id", "def thread_id(self, thread_id):\n\n self._thread_id = thread_id", "def pid(self, pid):\n\n self._pid = pid", "def pid(self, pid):\n\n self._pid = pid", "def def_pid(self,pid):\n self.pid=int(pid)", "def set_tid(self, tid):\n self.__tid = tid", "def setParentID(self, pid='0'):\n self.PUID = pid\n logger.debug('parentID set to: %s' % self.PID)", "def set_id(self, id):\n self.id = id\n print(\"self id = \" + str(self.id))", "def setID(self, id):\r\n raise NotImplementedError(\"must be implemented in subclass\")", "def set_id(self, player_id):\n pass", "def set_id(self, id):\n self.__id = id", "def set_id(self, id_):\n\n self.id_ = id_", "def process_id(self, process_id):\n\n self._process_id = process_id", "def process_id(self, process_id):\n\n self._process_id = process_id", "def id(self, _id):\n self._id = _id", "def pmid(self, pmid):\n\n self._pmid = pmid", "def _set_id(self):\n raise NotImplementedError()", "def set_node_id(self, node_id):\n self._node_id = node_id", "def set_id(self, id_=None):\n if id_ is None:\n self.id = id(self)\n else:\n self.id = id_", "def node_id(self, node_id: int):\r\n self._node_id = node_id", "def set_id(self, id):\n self.data['id'] = id", "def id(self, id):\n self._id = id", "def id(self, id):\n self._id = id", "def id(self, id):\n self._id = id", "def id(self, id):\n self._id = id", "def id(self, id):\n self._id = id", "def id(self, id):\n self._id = id", "def id(self, id):\n self._id = id" ]
[ "0.72480106", "0.6926966", "0.6926966", "0.6926966", "0.6742504", "0.6742504", "0.6700898", "0.66840965", "0.63728267", "0.627035", "0.622268", "0.6181539", "0.61787826", "0.6133578", "0.6112368", "0.6112368", "0.6035746", "0.59666836", "0.5958457", "0.59483874", "0.5893692", "0.5862362", "0.58516103", "0.58281493", "0.58281493", "0.58281493", "0.58281493", "0.58281493", "0.58281493", "0.58281493" ]
0.8066558
0
Sets the received_orders of this Thread.
def received_orders(self, received_orders): self._received_orders = received_orders
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def orders(self, orders):\n\n self._orders = orders", "def orders(self, orders):\n\n self._orders = orders", "async def on_orders_replaced(self, orders: List[MetatraderOrder]):\n self._orders = orders", "def received_order(self, order):\n\t\tif order.direction == ORDERDIR.IN:\n\t\t\tself.set_button_light(order.floor, OUTPUT.IN_LIGHTS, 1)\n\t\telse:\n\t\t\tself.startedOrderQueue.put(order)\n\t\tself.orderQueue.add_order(order)\n\t\tself.update_and_send_elevator_info()\n\t\tself.should_drive()", "def on_received(self, order):\n self.received_messages.append(order)\n self.bm.on_interesting_shit()", "def set_room_order(self, room_orders):\n orders = []\n self.room_orders = ';'.join(['%d-%d' % \\\n (item[0], item[1]) for item in room_orders.items()])", "def setOrder(self, order):\n\t\tself.orderInData = order", "def received(self, received):\n\n self._received = received", "def sender_order(self, sender_order):\n\n self._sender_order = sender_order", "def set_order(self, order):\n self.order = order", "def set_order(self, order):\n self.order = order", "def SetOrder(self, order):\n if self.__order != order:\n self.__order = order\n self.Modified()", "def orderWatch(self, order):\r\n\t\tself.pair.orders.append(order)", "def order(self, order):\n\n self._order = order", "def order(self, order):\n\n self._order = order", "def order(self, order):\n\n self._order = order", "async def on_order_updated(self, order: MetatraderOrder):\n for i in range(len(self._orders)):\n if self._orders[i]['id'] == order['id']:\n self._orders[i] = order\n break\n else:\n self._orders.append(order)", "def order(self, order):\n self._order = order", "def orderWatch(self, order):\r\n\t\tself.orders.append(order)", "def place_orders(self):\n buy_orders = []\n sell_orders = []\n buy_stop_order = {}\n sell_stop_order = {}\n order_status = 0\n \"\"\"order_status参数说明\n 0: running_qty为0, 维持原样\n 1: self.running_qty > 0, 买卖都变化, 买单按照offset2, 卖单按照offset3\n 2: 买单维持不变, 卖单按照offset3\n 3: self.running_qty < 0, 买卖都变化, 买单按照offset3, 卖单按照offset2\n 4: 卖单维持不变, 买单按照offset3\n 5: 追加指定订单\n 6: 取消指定订单\n 7: self.running_qty > 0, 买单按照offset2, 卖单不变\n 8: self.running_qty < 0, 买单不变, 卖单按照offset2\n \"\"\"\n # Create orders from the outside in. This is intentional - let's say the inner order gets taken;\n # then we match orders from the outside in, ensuring the fewest number of orders are amended and only\n # a new order is created in the inside. If we did it inside-out, all orders would be amended\n # down and a new order would be created at the outside.\n position_grade = self.get_position_grade()\n avgCostPrice = self.exchange.get_position()['avgCostPrice']\n print ('position_grade: %s ' % position_grade)\n print ('running_qty: %s ' % self.running_qty)\n print ('ORDER_START_SIZE: %s ' % self.ORDER_START_SIZE)\n schedule.run_pending()\n\n if(self.countdown == True): #设置倒数计时, 60秒后delay_order_check设为True, 可以重新挂非清仓方向的价格\n self.cycleclock = self.cycleclock - 1\n if(self.cycleclock <= 0):\n if(self.check_last_price_upordown() == True):\n self.cycleclock = 5\n else:\n self.countdown = False\n self.delay_order_check = True\n\n if(self.get_ticker()['last'] > STOP_PRICE and self.buy_only_flag == False):\n self.buy_only_flag = True\n if(self.running_qty < 0):\n self.clear_position(buy_orders, sell_orders)\n return self.converge_orders(buy_orders, sell_orders, order_status)\n\n if(self.get_5th_max_MA15_defference(getmessage = 1) > 100):\n self.stop_market_maker_flag = True\n self.cancel_all_orders_flag = True\n self.buy_only_flag = False\n self.sell_only_flag = False\n tg_important_message('上涨差值超过100,暂停交易')\n\n if(self.stop_market_maker_flag == True and self.cancel_all_orders_flag == True):\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n logger.info(\"Cancel all orders\")\n elif(self.stop_market_maker_flag == True and self.clear_position_flag == True):\n if(self.running_qty != 0):\n self.clear_position(buy_orders, sell_orders)\n else:\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n logger.info(\"Market_maker has stopped. No orders, no positions now\")\n elif(self.stop_market_maker_flag == True):\n if(self.running_qty > 0):\n if avgCostPrice != None:\n sell_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice - STOP_SIZE, self.instrument['tickSize']), \"Sell\", abs(self.running_qty))\n order_status = 4\n elif(self.running_qty < 0):\n if avgCostPrice != None:\n buy_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice + STOP_SIZE, self.instrument['tickSize']), \"Buy\", abs(self.running_qty))\n order_status = 2\n elif(self.running_qty == 0 and self.last_running_qty == 0):\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n logger.info(\"Market_maker has stopped. No orders, no positions now\")\n\n elif(self.running_qty == 0 and self.restart_flag == False):\n if(self.check_last_price_upordown() == True):\n self.restart_flag = True\n self.countdown_restart = 5\n return\n self.ORDER_START_SIZE = self.start_XBt // 1000000 * START_SIZE_MAGNIFICATION #新算法, 每次初始交易重新设定ORDER_START_SIZE\n order_status = 0\n if not(self.sell_only_flag == True):\n buy_orders.append(self.prepare_order(-1, order_status))\n if not(self.buy_only_flag == True):\n sell_orders.append(self.prepare_order(1, order_status))\n self.countdown = False\n self.restart_flag = True\n self.countdown_restart = 30\n\n elif(self.running_qty == 0 and self.restart_flag == True):\n self.countdown_restart = self.countdown_restart - 1\n if(self.countdown_restart <= 0):\n self.restart_flag = False\n return\n\n elif(self.running_qty != 0 and self.running_qty != self.last_running_qty): #仓位变动后开始倒计时60秒, 60秒后delay_order_check为True, 可以重新挂非清仓方向的价格\n if(self.running_qty > 0):\n order_status = 2\n sell_orders.append(self.prepare_order(1, order_status))\n elif(self.running_qty < 0):\n order_status = 4\n buy_orders.append(self.prepare_order(-1, order_status))\n self.cycleclock = 60\n self.countdown = True\n self.restart_flag = False\n self.delay_order_check = False\n\n elif(self.running_qty != 0 and self.running_qty == self.last_running_qty and self.delay_order_check == True): #可以重新挂非清仓方向的价格\n i = abs(self.running_qty) // (self.ORDER_START_SIZE//4) + 1\n if(self.running_qty > 0):\n order_status = 7\n if(i <= 3):\n buy_orders.append(self.prepare_order(-i, order_status))\n if(self.running_qty < 0):\n order_status = 8\n if(i <= 3):\n sell_orders.append(self.prepare_order(i, order_status))\n self.cycleclock = 30\n self.countdown = True\n self.delay_order_check = False\n\n else:\n if(self.running_qty > 0):\n order_status = 2\n sell_orders.append(self.prepare_order(1, order_status))\n elif(self.running_qty < 0):\n order_status = 4\n buy_orders.append(self.prepare_order(-1, order_status))\n\n if(self.last_running_qty != self.running_qty):\n self.send_tg_message()\n self.last_running_qty = self.running_qty\n self.reset = False\n buy_orders = list(filter(None.__ne__, buy_orders)) #去除None\n sell_orders = list(filter(None.__ne__, sell_orders)) #去除None\n print('BXBT_MA15: %s' % self.get_BXBT_MA15())\n print(buy_orders)\n print(sell_orders)\n if((self.last_buy_orders == buy_orders and self.last_sell_orders == sell_orders) or (buy_orders == [] and sell_orders == [])):\n print('order no change, return')\n return\n else:\n self.last_buy_orders = buy_orders\n self.last_sell_orders = sell_orders\n self.converge_stop_order(buy_stop_order, sell_stop_order)\n return self.converge_orders(buy_orders, sell_orders, order_status)", "def set_task_order(self, order):\n for task in self.tasks:\n task.order = order", "def _place_orders_onto_queue(self, order_list: List[OrderEvent]):\n for order_event in order_list:\n self._events.add_event(order_event)", "def add_orders(self, response_data):\n orders = response_data[self.DATA][self.DATA]\n for order in orders:\n self.orders.append(self.process_order_data(order))", "def clearOrderList(self):\r\n\t\tself.pair.orders = []", "def set_filled_orders(self, filled_orders):\n # set to class property\n self._filled_orders = filled_orders\n\n # start spread analysis\n spread = Spread(filled_orders=filled_orders)\n spread.get_name()\n spread.get_spread()\n\n filled_order = self._filled_orders[0]\n \"\"\":type: FilledOrder\"\"\"\n\n underlying = filled_order.underlying\n future = filled_order.future\n forex = filled_order.forex\n\n # primary field\n if self.position_set.name == '':\n self.position_set.name = spread.name\n\n if self.position_set.spread == '':\n self.position_set.spread = spread.spread\n\n self.position_set.status = 'OPEN'\n if not self.position_set.start_date:\n self.position_set.start_date = filled_order.trade_summary.date\n\n self.position_set.underlying = underlying\n self.position_set.future = future\n self.position_set.forex = forex\n\n # position stages\n if spread.get_spread() != 'CUSTOM':\n stage_module = import_module(\n '%s.%s' % (self.stage_path, spread.get_name(module=True, lower=True))\n )\n class_name = 'Stage%s' % spread.get_spread_module()\n class_obj = getattr(stage_module, class_name)\n self.stages = class_obj(filled_orders=filled_orders).create_stages()", "def clearOrderList(self):\r\n\t\tself.orders = []", "def set_filled_order(self):\n self.set_values(\n start_phrase='Filled Orders',\n end_phrase=None,\n start_with=2,\n end_until=-1,\n prop_keys=self.filled_order_keys,\n prop_name='filled_order'\n )\n\n self.filled_order = map(self.del_empty_keys, self.filled_order)\n self.fillna_dict_with_exists(\n self.filled_order,\n 'exec_time',\n ('exec_time', 'spread', 'order')\n )\n\n self.replace_nan(self.filled_order)\n self.convert_type(self.filled_order, 'exec_time', self.convert_datetime, 0)\n\n self.convert_type(self.filled_order, 'quantity', int, 0)\n self.convert_type(self.filled_order, 'strike', float, 0.0)\n self.convert_type(self.filled_order, 'price', float, 0.0)\n self.convert_type(self.filled_order, 'net_price', float, 0.0)\n self.convert_type(self.filled_order, 'expire_date', str, '')", "def __init__(self, *args, **kwargs):\n self._orders = None\n super().__init__(*args, **kwargs)", "def send_order(self, p_order, p_in_out, count):\n pass", "async def _listen_on_orders(self):\n # The lock is used to make sure the websocket is setup before using it.\n await self._orders_sock_info.connected_event.wait()\n try:\n async for message in self._orders_sock_info.ws:\n self._orders_sock_info.ready.set()\n if self._orders_sock_info.queue.qsize() >= 100:\n log.warning(\"Websocket message queue is has \"\n f\"{self._orders_sock_info.queue.qsize()} pending \"\n \"messages.\")\n await self._orders_sock_info.queue.put(message)\n finally:\n await self._orders_sock_info.ws.close()" ]
[ "0.6786346", "0.6786346", "0.6680266", "0.6170726", "0.61563283", "0.603664", "0.5962039", "0.58325493", "0.5801888", "0.57870305", "0.57870305", "0.5752953", "0.56680435", "0.56383055", "0.56383055", "0.56383055", "0.5635519", "0.558436", "0.5562859", "0.55175805", "0.5458763", "0.5456671", "0.5443021", "0.54248446", "0.5423419", "0.539403", "0.5374378", "0.5355051", "0.5280426", "0.5221879" ]
0.82472265
0
Sets the sender_order of this Thread.
def sender_order(self, sender_order): self._sender_order = sender_order
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_task_order(self, order):\n for task in self.tasks:\n task.order = order", "def set_order(self, order):\n self.order = order", "def set_order(self, order):\n self.order = order", "def order(self, order):\n self._order = order", "def SetOrder(self, order):\n if self.__order != order:\n self.__order = order\n self.Modified()", "def order(self, order):\n\n self._order = order", "def order(self, order):\n\n self._order = order", "def order(self, order):\n\n self._order = order", "def setOrder(self, order):\n\t\tself.orderInData = order", "def setPlayerOrder(self):\n\n player_order_functions = {\n \"BuyPrivateCompany\": PlayerTurnOrder,\n \"BiddingForPrivateCompany\": PrivateCompanyInitialAuctionTurnOrder,\n \"StockRound\": PlayerTurnOrder,\n \"StockRoundSellPrivateCompany\": None,\n \"OperatingRound\": None\n }\n\n player_order_generator = player_order_functions.get(self.minigame_class)(self.getState())\n\n if player_order_generator.stacking_type:\n self.player_order_fn_list.append(player_order_generator)\n\n if player_order_generator.overwrite_type:\n # An overwrite type function usually clears the full stack of player order functions.\n # The only case in which we don't is if we are \"resuming\" a player stack.\n try:\n self.player_order_fn_list.pop()\n except IndexError:\n logging.warning(\"No old player order function available\")\n\n if len(self.player_order_fn_list) > 0 and \\\n self.get_player_order_fn().__class__.__name__ == player_order_generator.__class__.__name__:\n logging.warning(\"keeping old player order generator\")\n else:\n self.player_order_fn_list = [player_order_generator]", "def set_window_order(self, order: int = 0) -> None:\n if order == 0:\n setval = QMdiArea.CreationOrder\n if order == 1:\n setval = QMdiArea.StackingOrder\n if order == 2:\n setval = QMdiArea.ActivationHistoryOrder\n\n self.setActivationOrder(setval)", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def sender_name(self, sender_name):\n\n self._sender_name = sender_name", "def sender(self, sender: str):\n if sender is None:\n raise ValueError(\"Invalid value for `sender`, must not be `None`\") # noqa: E501\n\n self._sender = sender", "def sender(self, sender: Address) -> None:\n enforce(\n isinstance(sender, str), f\"Sender must be string. Found '{type(sender)}'\"\n )\n self._sender = sender", "def set_document_order(self, order):\n self.set_value_into_input_field(self.order_text_field_locator, order)", "def setOrder(self, verbose = 1):\n\n self.order = np.arange(self.atoms.shape[0])\n if verbose > 0:\n string = \"Updated the saved order\"\n ut.infoPrint(string)", "def sort_order(self, sort_order):\n\n self._sort_order = sort_order", "def set_byte_order(self, byteorder='little'):\n self._byteorder = byteorder", "def set_thread_priority(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_sptr_set_thread_priority(self, *args, **kwargs)", "def setOrder(self, *args):\n return _libsbml.CompartmentGlyph_setOrder(self, *args)", "def __set_sender_id(self, sender_id):\n if not isinstance(sender_id, int):\n raise TypeError('It has to be an integer identifier')\n if sender_id < 0:\n raise ValueError('There are not negative identifiers')\n self.__sender_id = sender_id", "def set_thread_priority(self, *args, **kwargs):\n return _TestA_swig.my_qpsk_demod_cb_sptr_set_thread_priority(self, *args, **kwargs)", "def set_thread_priority(self, *args, **kwargs):\n return _uhd_swig.usrp_source_sptr_set_thread_priority(self, *args, **kwargs)", "def sort_order(self, sort_order: int):\n\n self._sort_order = sort_order", "def order_by(self, order_by):\n\n self._order_by = order_by" ]
[ "0.6738106", "0.67285603", "0.67285603", "0.641746", "0.6392167", "0.63920426", "0.63920426", "0.63920426", "0.6216461", "0.596148", "0.5932994", "0.5864125", "0.5864125", "0.5864125", "0.5864125", "0.5864125", "0.5649165", "0.55945116", "0.5574294", "0.5551362", "0.5378497", "0.5357545", "0.53355765", "0.5287306", "0.5263054", "0.5247023", "0.52443814", "0.5206895", "0.5196841", "0.51899046" ]
0.83562696
0
Sets the thid of this Thread.
def thid(self, thid): self._thid = thid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def thread_id(self, thread_id):\n\n self._thread_id = thread_id", "def thread_id(self, thread_id):\n\n self._thread_id = thread_id", "def thread_id(self, thread_id):\n\n self._thread_id = thread_id", "def set_tid(self, tid):\n self.__tid = tid", "def pthid(self, pthid):\n\n self._pthid = pthid", "def set_id(self, id):\n self.id = id\n print(\"self id = \" + str(self.id))", "def set_id(self, id):\n self.__id = id", "def task_id(self, task_id):\n self._task_id = task_id", "def set_node_id(self, node_id):\n self._node_id = node_id", "def setID(self, id):\r\n raise NotImplementedError(\"must be implemented in subclass\")", "def trace_id(self, trace_id):\n\n self._trace_id = trace_id", "def trace_id(self, trace_id):\n\n self._trace_id = trace_id", "def task_id(self, task_id):\n\n self._task_id = task_id", "def node_id(self, node_id: int):\r\n self._node_id = node_id", "def setID(self, idNb):\n self._id = idNb", "def ticket_id(self, ticket_id):\n self._ticket_id = ticket_id", "def set_id(self, id_):\n\n self.id_ = id_", "def id(self, _id):\n self._id = _id", "def set_husb_id(self, h, line_number=0):\n self.husb_id = h\n self._husb_id_line = line_number", "def pid(self, pid):\n\n self._pid = pid", "def pid(self, pid):\n\n self._pid = pid", "def set_id(self, player_id):\n pass", "def thread(self, thread):\n\n self._thread = thread", "def class_id(self, class_id):\n\n self._class_id = class_id", "def node_id(self, node_id):\n\n self._node_id = node_id", "def kill(self, threadid):\n self.rpc.call(MsfRpcMethod.CoreThreadKill, [threadid])", "def trace_id_set(trace_id: tuple[str, str]) -> None:\n trace_id_cv.set(trace_id)", "def trace_set_child_id(child_key: str, child_run_id: str) -> None:\n node = cast(TraceElement, trace_stack_top(trace_stack_cv))\n if node:\n node.set_child_id(child_key, child_run_id)", "def id(self, id):\n self._id = id", "def id(self, id):\n self._id = id" ]
[ "0.71798706", "0.71798706", "0.71798706", "0.7097371", "0.6712861", "0.5938794", "0.57075554", "0.5689583", "0.5666702", "0.5609876", "0.56035346", "0.56035346", "0.559792", "0.55645794", "0.5563854", "0.5559026", "0.5547788", "0.55468583", "0.5533373", "0.55246586", "0.55246586", "0.54863435", "0.5481676", "0.54622316", "0.54403204", "0.5431918", "0.54310405", "0.54096746", "0.5398044", "0.5398044" ]
0.8122559
0
construct a test method based on on the notebook name, path, and nbconvert Preprocessor options Required Inputs
def get_test(nbname, nbpath, timeout=600): # use nbconvert to execute the notebook def test_func(self): cwd = os.getcwd() passing = True print( "\n---------------------" " Testing {0}.ipynb " "---------------------".format(nbname) ) if (nbname in self.ignore) or ( nbname in self.py2_ignore and sys.version_info[0] == 2 ): print(" Skipping {}".format(nbname)) return run_path = os.path.sep.join(nbpath.split(os.path.sep)[:-1]) os.chdir(run_path) clear_output = ClearOutputPreprocessor() with open(nbpath) as nbfile: notebook = nbformat.read(nbfile, as_version=4) clear_output.preprocess(notebook, {}) execute = ExecutePreprocessor( timeout=timeout, kernel_name="python{}".format(sys.version_info[0]), allow_errors=True, ) out = execute.preprocess(notebook, {}) os.chdir(cwd) for cell in out[0]["cells"]: if "outputs" in cell.keys(): for output in cell["outputs"]: if output["output_type"] == "error": passing = False err_msg = [] for traceback in output["traceback"]: err_msg += ["{}".format(traceback)] err_msg = "\n".join(err_msg) msg = """ \n ... {} FAILED \n {} in cell [{}] \n-----------\n{}\n-----------\n """.format( nbname, output["ename"], cell["execution_count"], cell["source"], ) traceback = """ ----------------- >> begin Traceback << ----------------- \n {}\n \n----------------- >> end Traceback << -----------------\n """.format( err_msg ) print(u"{}".format(msg + traceback)) assert passing, msg print(" ... {0} Passed \n".format(nbname)) return test_func
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_test_func(nb_name, nb_path, clearoutput=True):\n\n nb_func = f'\\ndef test_{nb_name}():\\n'\\\n f' fpath_rel = {nb_path.split(os.sep)[1:]}\\n'\\\n ' fname = os.path.join(nb_dir, *fpath_rel)\\n'\\\n ' tf.run_notebook(fname, clearoutput=False)\\n'\\\n ' return 0\\n'\n\n return nb_func", "def _notebook_run(path):\n dirname, __ = os.path.split(path)\n os.chdir(dirname)\n\n # Create a temporary file to write the notebook to.\n # 'with' method is used so the file is closed by tempfile\n # and free to be overwritten.\n # with tempfile.NamedTemporaryFile('w', suffix=\".ipynb\") as fout:\n with tempfile.NamedTemporaryFile(\n \"w\", suffix=\".nbconvert.ipynb\", delete=False\n ) as fout:\n nbpath = fout.name\n\n jupyter_exec = shutil.which(\"jupyter\")\n\n # recent version (~7.3.1) requires output without extension\n out_path = os.path.join(\n os.path.dirname(nbpath), os.path.basename(nbpath).split(\".\", 1)[0]\n )\n args = [\n jupyter_exec,\n \"nbconvert\",\n path,\n \"--output\",\n out_path,\n \"--to\",\n \"notebook\",\n \"--execute\",\n \"--ExecutePreprocessor.timeout=60\",\n ]\n subprocess.check_call(args)\n\n assert os.path.exists(nbpath), \"nbconvert used different output filename\"\n\n nb = nbformat.read(nbpath, nbformat.current_nbformat)\n\n errors = [\n output\n for cell in nb.cells\n if \"outputs\" in cell\n for output in cell[\"outputs\"]\n if output.output_type == \"error\"\n ]\n\n # Remove the temp file once the test is done\n if os.path.exists(nbpath):\n os.remove(nbpath)\n\n return nb, errors", "def runTest(self):\n self.setUp()\n self.test_JupyterNotebooks1()", "def check(nb_or_test_path, test_name=None, global_env=None):\n if test_name is None:\n test = OKTestFile.from_file(nb_or_test_path)\n else:\n test = NotebookMetadataOKTestFile.from_file(nb_or_test_path, test_name)\n\n if global_env is None:\n # Get the global env of our callers - one level below us in the stack\n # The grade method should only be called directly from user / notebook\n # code. If some other method is calling it, it should also use the\n # inspect trick to pass in its parents' global env.\n global_env = inspect.currentframe().f_back.f_globals\n\n test.run(global_env)\n\n return test", "def test_generate_nb_testing(self):\n pass", "def test_generate_nb(self):\n pass", "def grade_notebook(submission_path, *, tests_glob=None, name=None, ignore_errors=True, script=False, \n cwd=None, test_dir=None, seed=None, seed_variable=None, log=None, variables=None, \n plugin_collection=None):\n if not script:\n nb = nbformat.read(submission_path, as_version=NBFORMAT_VERSION)\n\n else:\n with open(submission_path) as f:\n nb = f.read()\n\n nb = script_to_notebook(nb)\n\n if plugin_collection is not None:\n nb = plugin_collection.before_execution(nb)\n\n # remove any ignored cells from the notebook\n if not script:\n nb = filter_ignored_cells(nb)\n\n secret = id_generator()\n results_array = \"check_results_{}\".format(secret)\n initial_env = {\n results_array: []\n }\n\n if name:\n initial_env[\"__name__\"] = name\n\n if log is not None:\n global_env = execute_log(\n nb, log, secret, initial_env, ignore_errors=ignore_errors, cwd=cwd, test_dir=test_dir, \n variables=variables)\n\n else:\n global_env = execute_notebook(\n nb, results_array, initial_env, ignore_errors=ignore_errors, cwd=cwd, test_dir=test_dir, \n seed=seed, seed_variable=seed_variable)\n\n if plugin_collection is not None:\n plugin_collection.run(\"after_execution\", global_env)\n\n tests_run = global_env[results_array]\n\n # Check for tests which were not included in the notebook and specified by tests_globs\n # Allows instructors to run notebooks with additional tests not accessible to user\n if tests_glob:\n # unpack list of paths into a single list\n tested_set = [test.path for test in tests_run]\n extra_tests = []\n for t in sorted(tests_glob):\n include = True\n for tested in tested_set:\n if tested in t or t in tested: # e.g. if 'tests/q1.py' is in /srv/repo/lab01/tests/q1.py\n include = False\n\n if include:\n extra_tests.append(OKTestFile.from_file(t))\n extra_tests[-1].run(global_env)\n\n tests_run += extra_tests\n\n results = GradingResults(tests_run)\n\n if plugin_collection is not None:\n plugin_collection.run(\"after_grading\", results)\n \n return results", "def notebook():\n pass", "def notebook():\n pass", "def create_exercise(exam_date, num, path_ex_folder, path_yaml):\n global images_to_add\n global REL_PATH_IMAGES\n REL_PATH_IMAGES = 'img_' + exam_date\n images_to_add = []\n path_mode_free = path_ex_folder + '/modo_libero/' # new folder for the considered submission mode\n os.mkdir(path_mode_free)\n exer = read_exercise_yaml(path_yaml) # reading the given yaml\n notebook = nb.v4.new_notebook() # creating the new notebook\n #print(exer['name'])\n if exer['name'] in ('graphs_flow','graphs_trees', 'graphs_planarity','graphs_paths'):\n insert_graph_import(notebook) #required graph import\n insert_no_scroll(notebook) #no scroll of output div\n else:\t\n insert_import_mode_free(notebook) # required import\n insert_start_button(notebook) # start button to run cells with tag 'run_start'\n insert_hide_code(notebook) # hide all code cells\n insert_user_bar_lib(notebook,path_ex_folder) # insert user_bar.py in a code cell\n insert_heading(notebook, exer['title']) # heading with title\n insert_description1(notebook, exer['description1'], exam_date, path_ex_folder) # description 1\n if 'description2' in exer:\n insert_description2(notebook, exer['description2']) # description 2\n insert_tasks(notebook, exer['tasks']) # inserting the several tasks\n if exer['name'] in ('lp_duality', 'lp_interactive', 'lp_modelling', 'lp_two_phases'): # other libraries needed for some types of exercises\n insert_needed_import(notebook, exer['name'])\n if int(num) >= 10: # writing the notebook and saving it in the correct folder\n note_name = 'Esercizio_' + num + '.ipynb'\n prev_folder = 'esercizio_' + num\n else:\n note_name = 'Esercizio_0' + num + '.ipynb'\n prev_folder = 'esercizio_0' + num\n insert_rendition(notebook, note_name)\n nb.write(notebook, note_name)\n os.rename(os.getcwd()+ '/' + note_name, path_mode_free + '/' + note_name)\n os.system(\"jupyter trust \" + path_mode_free + note_name) # signing the notebook in order to make it trusted\n insert_suppl_folders(path_mode_free) # inserting the supplementary folders (i.e., 'allegati', 'img')\n if exer['name'] in ('graphs_flow','graphs_trees', 'graphs_planarity','graphs_paths'):\n insert_graph_folder(path_mode_free)\n if 'tags' in exer:\n e_dict = {'title':exer['title'],'tags':exer['tags'],'tot_points':0,'link':'http://127.0.0.1:8888/notebooks/'+prev_folder+'/modo_libero/' + note_name, 'tasks':exer['tasks']}\n else:\n\t e_dict = {'title':exer['title'],'tags':[],'tot_points':0,'link':'http://127.0.0.1:8888/notebooks/'+prev_folder+'/modo_libero/' + note_name, 'tasks':exer['tasks']}\n return e_dict", "def create_test_modules(nbdir, name):\n\n if os.path.exists(f'test_{name}.py'):\n ans = input(\n f'test file already exist, overwrite test_{name}.py? [y/n]')\n if ans.lower() == 'y':\n print(f'overwriting test_{name}.py')\n else:\n print('abort')\n return None\n\n notebook_lst = get_notebooks(nbdir)\n\n with open(f'test_{name}.py', 'w') as f:\n f.write(_create_test_module_heading(nbdir))\n\n nb_names = []\n for nb_path in notebook_lst:\n # check if output has to be cleared\n if os.path.split(nb_path)[-1] in _keep_output_list:\n clearoutput = False\n else:\n clearoutput = True\n\n # get proper name for function\n nb_name = os.path.splitext(os.path.split(nb_path)[-1])[0]\n nb_name = nb_name.replace(' ', '_').replace('-', '_')\n while nb_name in nb_names:\n nb_name += '_'\n nb_names.append(nb_name)\n\n # write function\n with open(f'test_{name}.py', 'a') as f:\n f.write(_create_test_func(nb_name, nb_path, clearoutput))", "def test_should_handle_notebook_with_invalid_python_name_with_conf(work_dir, mocker):\n mocked_check_output = mocker.patch('subprocess.check_output', return_value=work_dir.encode())\n notebook_path = gen_notebook(cells=[('code', 'pass')], tmp_dir=work_dir, file_name='01_(test) nb.ipynb')\n\n # Create conf in a freshly init git repo\n conf_data = write_conf(work_dir=work_dir, conf_path=join(work_dir, DEFAULT_CONF_FILENAME),\n ignore_keys=['# Ignore', 'remove='])\n\n cmd_arguments = ['-n', notebook_path]\n IPynbToPython().run(*cmd_arguments)\n\n # This path is generated using the conf script_dir and the notebook name\n output_script_path = join(work_dir, conf_data['path']['python_script_root_dir'], 'mlvtools_01__test_nb.py')\n assert exists(output_script_path)\n\n with open(output_script_path, 'r') as fd:\n file_content = fd.read()\n\n # Ensure generated file syntax is right\n compile(file_content, output_script_path, 'exec')\n\n assert mocked_check_output.mock_calls == [mocker.call(\n ['git', 'rev-parse', '--show-toplevel'],\n cwd=work_dir)]", "def test_notebook():\n jupyter_notebooks = os.getenv('PYNQ_JUPYTER_NOTEBOOKS')\n\n # Try and find the notebook\n if os.path.isdir(f\"{jupyter_notebooks}/pynq-helloworld\"):\n if os.path.isfile(f\"{jupyter_notebooks}/pynq-helloworld/resizer_pl.ipynb\"): \n result = run_notebook(f\"{jupyter_notebooks}/pynq-helloworld/resizer_pl.ipynb\")\n else:\n raise CannotFindNotebook(f\"unable to locate the helloworld notebook, expecting it at {jupyter_notebooks}/pynq-helloworld/resizer_pl.ipynb\")\n else:\n raise CannotFindNotebook(f\"unable to locate the helloworld directory, expecting it at {jupyter_notebooks}/pynq-helloworld\")", "def _run_notebook(\n config: cconfig.Config,\n notebook_file: str,\n num_attempts: int,\n abort_on_error: bool,\n publish: bool,\n) -> Optional[int]:\n cdtfut.setup_experiment_dir(config)\n # Prepare the destination file.\n idx = config[(\"meta\", \"id\")]\n experiment_result_dir = config[(\"meta\", \"experiment_result_dir\")]\n dst_file = os.path.join(\n experiment_result_dir,\n os.path.basename(notebook_file).replace(\".ipynb\", \".%s.ipynb\" % idx),\n )\n _LOG.info(\"dst_file=%s\", dst_file)\n dst_file = os.path.abspath(dst_file)\n # Export config function and its `id` to the notebook.\n config_builder = config[(\"meta\", \"config_builder\")]\n dst_dir = config[(\"meta\", \"dst_dir\")]\n cmd = [\n f'export __CONFIG_BUILDER__=\"{config_builder}\";',\n f'export __CONFIG_IDX__=\"{idx}\";',\n f'export __CONFIG_DST_DIR__=\"{dst_dir}\"',\n f\"; jupyter nbconvert {notebook_file}\",\n \"--execute\",\n \"--to notebook\",\n f\"--output {dst_file}\",\n \"--ExecutePreprocessor.kernel_name=python\",\n # From https://github.com/ContinuumIO/anaconda-issues/issues/877\n \"--ExecutePreprocessor.timeout=-1\",\n ]\n cmd = \" \".join(cmd)\n # Prepare the log file.\n log_file = os.path.join(experiment_result_dir, \"run_notebook.%s.log\" % idx)\n log_file = os.path.abspath(os.path.abspath(log_file))\n _LOG.info(\"log_file=%s\", log_file)\n # TODO(gp): Repeating a command n-times is an idiom that we could move to\n # system_interaction.\n # Try running the notebook up to `num_attempts` times.\n dbg.dassert_lte(1, num_attempts)\n rc = None\n for n in range(1, num_attempts + 1):\n if n > 1:\n _LOG.warning(\n \"Run the notebook: %d / %d attempt\",\n n,\n num_attempts,\n )\n _LOG.info(\"cmd='%s'\", cmd)\n rc = si.system(cmd, output_file=log_file, abort_on_error=False)\n if rc == 0:\n _LOG.info(\"Running notebook was successful\")\n break\n if rc != 0:\n # The notebook run wasn't successful.\n _LOG.error(\"Execution failed for experiment %d\", idx)\n if abort_on_error:\n dbg.dfatal(\"Aborting\")\n else:\n _LOG.error(\"Continuing execution of next experiments\")\n else:\n # Mark as success.\n cdtfut.mark_config_as_success(experiment_result_dir)\n # Convert to HTML and publish.\n if publish:\n _LOG.info(\"Publishing notebook %d\", idx)\n html_subdir_name = os.path.join(\n os.path.basename(dst_dir), experiment_result_dir\n )\n # TODO(gp): Look for the script.\n cmd = (\n \"python amp/dev_scripts/notebooks/publish_notebook.py\"\n + f\" --file {dst_file}\"\n + f\" --subdir {html_subdir_name}\"\n + \" --action publish\"\n )\n log_file = log_file.replace(\".log\", \".html.log\")\n si.system(cmd, output_file=log_file)\n return rc", "def main_convert(args):\n try:\n file_path = args.file_name # os.path.join(static_path, args.file_name)\n if args.slides:\n config_path = os.path.join(static_path, \"config\", \"slides_config.py\")\n output = subprocess.check_output(\n [\n \"jupyter\",\n \"nbconvert\",\n file_path,\n \"--to\",\n \"slides\",\n \"--CSSHTMLHeaderPreprocessor.style=colorful\",\n \"--reveal-prefix\",\n args.reveal_prefix,\n \"--config\",\n config_path,\n ],\n stderr=subprocess.STDOUT,\n ).decode(\"utf-8\")\n else:\n config_path = os.path.join(static_path, \"config\", \"nb_config.py\")\n output = subprocess.check_output(\n [\n \"jupyter\",\n \"nbconvert\",\n file_path,\n \"--to\",\n \"html\",\n \"--config\",\n config_path,\n ],\n stderr=subprocess.STDOUT,\n ).decode(\"utf-8\")\n print(output.rstrip())\n _name = get_out_name(args)\n # _name = output.split(\" \")[-1].rstrip()\n if args.c:\n with open(_name, \"r\") as f:\n clean_file = clean_html(f.read())\n with open(_name, \"w\") as f:\n f.write(clean_file)\n if args.bib_name is not None:\n add_ref(_name, args.bib_name, keep_label=args.l, slides=args.slides)\n else:\n with open(_name, \"r\") as f:\n clean_file = clean_html_refs(clean_file)\n with open(_name, \"w\") as f:\n f.write(clean_file)\n except IndexError:\n print(\"Provide the name of the notebook.\")", "def _notebook_run(path):\n dirname, __ = os.path.split(path)\n os.chdir(dirname)\n with tempfile.NamedTemporaryFile(suffix=\".ipynb\") as fout:\n args = [\"jupyter\", \"nbconvert\", \"--to\", \"notebook\", \"--execute\",\n \"--ExecutePreprocessor.timeout=600\",\n \"--output\", fout.name, path]\n subprocess.check_call(args)\n\n # fout.seek(0)\n nb = nbformat.read(fout.name, nbformat.current_nbformat)\n\n errors = [output\n for cell in nb.cells if \"outputs\" in cell\n for output in cell[\"outputs\"]\n if output.output_type == \"error\"]\n\n return nb, errors", "def _test_generator(notebook):\n \n def test(self):\n nb, errors = run_notebook(notebook, kernel_name=self.kernel_name)\n \n message = ''\n if len(errors) > 0:\n for error in errors:\n message += '%s: %s\\n' % (error['ename'], error['evalue'])\n for line in error['traceback']:\n message += ' %s\\n' % line\n self.assertEqual(errors, [], message)\n \n return test", "def extract_tests(nb_path: PathLike):\n nb = read_nb(nb_path)\n\n root = get_config().config_path\n TEST_ROOT = root / \"test\"\n\n package_root = get_config().path(\"lib_path\")\n package_path = package_root.relative_to(root)\n package = str(package_path)\n\n default = find_default_export(nb[\"cells\"])\n if default is None:\n print(f\"{nb_path}: No export default found => SKIPPED\")\n return\n\n module = f\"{package}.{default}\"\n module_path = package_path.joinpath(*default.split(\".\")).with_suffix(\".py\")\n\n test_path = TEST_ROOT.joinpath(*default.split(\".\"))\n test_path = test_path.with_name(f\"test_{test_path.name}.py\")\n\n test_sources = [\n format_cell_test_code(cell)\n for idx, cell in enumerate(nb[\"cells\"])\n if is_test_export(cell, idx=idx)\n ]\n\n if not test_sources:\n print(f\"{nb_path}: No Test cases => SKIPPED\")\n return\n\n # We need to use \"/\" in paths to avoid weird excaped characters\n test_code = TEST_TEMPLATE.format(\n module_path=module_path.as_posix(),\n nb_path=Path(nb_path).resolve().relative_to(root).as_posix(),\n module=module,\n test_sources=\"\\n\\n\".join(test_sources),\n )\n\n # We need to create the directory if needed\n test_path.parent.mkdir(parents=True, exist_ok=True)\n test_path.write_text(test_code, encoding=\"utf-8\")\n black_format(test_path)\n\n print(f\"{nb_path}: tests to {test_path.relative_to(root)} => EXTRACTED\")", "def test_make_macrobes(self):\n basic_test_runner(self, 'macrobes')", "def test_JupyterNotebooks1(self):\n\n self.delayDisplay(\"Starting the test\")\n\n # TODO: implement test\n\n self.delayDisplay('Test passed!')", "def notebook_run(path):\n dirname, __ = os.path.split(path)\n os.chdir(dirname)\n with tempfile.NamedTemporaryFile(suffix=\".ipynb\") as fout:\n args = [\n \"jupyter\",\n \"nbconvert\",\n \"--to\",\n \"notebook\",\n \"--execute\",\n \"--ExecutePreprocessor.timeout=60\",\n \"--output\",\n fout.name,\n path,\n ]\n subprocess.check_call(args)\n\n fout.seek(0)\n nb = nbformat.read(fout, nbformat.current_nbformat)\n\n errors = [\n output\n for cell in nb.cells\n if \"outputs\" in cell\n for output in cell[\"outputs\"]\n if output.output_type == \"error\"\n ]\n\n return nb, errors", "def validate_nb(nb):\n print(\"[nbjekyll] Running test on {}\".format(os.path.split(nb)[1]))\n return validation_code(pytest.main([nb, '--nbval-lax']))", "def pytest_collect_file(path, parent):\n if path.fnmatch(\"*.ipynb\") and parent.config.option.nbval:\n return IPyNbFile(path, parent)", "def create_and_write_exam_instance(output_dir, nb_name, num_questions):\n test_dir = output_dir / 'tests'\n\n if Exam.config.get(\"public_tests\", False):\n os.makedirs(test_dir, exist_ok=True)\n else:\n os.makedirs(output_dir, exist_ok=True)\n\n student = nbformat.v4.new_notebook()\n\n # create autograder config file for this dir\n if Exam.otter():\n gen_otter_file(output_dir / nb_name)\n ok_path = None\n elif Exam.ok():\n ok_path = gen_dot_ok(output_dir / nb_name, Exam.config[\"endpoint\"])\n \n # init cell\n if Exam.config.get(\"init_cell\", True):\n student.cells.append(gen_init_cell(ok_path))\n \n # introduction\n student.cells.extend(Exam.introduction)\n\n # get question indices\n question_idx = list(range(len(Exam.questions)))\n np.random.shuffle(question_idx)\n question_idx = question_idx[:num_questions]\n\n # questions\n for i in range(num_questions):\n student.cells.append(gen_question_header_cell(i + 1))\n\n question = Exam.questions[question_idx[i]]\n version = question.choose_version()\n student.cells.extend(version.get_cells(False))\n\n if not question.manual and Exam.config.get(\"public_tests\", False):\n student.cells.append(gen_test_cell(\n version.get_hash(),\n question.points,\n version.tests,\n test_dir\n ))\n \n # remove hidden tests\n if Exam.config.get(\"public_tests\", False):\n remove_hidden_tests(test_dir)\n \n # conclusion\n student.cells.extend(Exam.conclusion)\n\n # check all cell\n if Exam.config.get(\"check_all_cell\", True) and Exam.config.get(\"public_tests\", False):\n student.cells.extend(gen_check_all_cell())\n\n # export cell\n if Exam.config.get(\"export_cell\", True):\n export_cell = Exam.config.get(\"export_cell\", True)\n if export_cell is True:\n export_cell = {}\n\n student.cells.extend(gen_export_cells(\n export_cell.get('instructions', ''), \n pdf = export_cell.get('pdf', True),\n filtering = export_cell.get('filtering', True)\n ))\n\n # remove output\n remove_output(student)\n \n # write notebooks\n nbformat.write(student, output_dir / nb_name)", "def run_nb(ju_nb):\n\n\tif len(sys.argv)>2:\n\t\tos.environ[\"JUPYTER_PARAMETER\"] = sys.argv[2]\n\telse:\n\t\tos.environ[\"JUPYTER_PARAMETER\"] = \"\"\n\t# the jupyter notebook can retrieve os.environ[\"JUPYTER_PARAMETER\"]\n\tnb = nbformat.read(open(ju_nb), as_version=4)\n\tep = ExecutePreprocessor(timeout=600, kernel_name='python3')\n\tep.preprocess(nb, {'metadata': {'path': os.path.dirname(ju_nb)}})\n\t# write sometimes destroys the notebook!\n\t# better just write a copy\n\tnew_nb_name=os.path.splitext(ju_nb)[0]+'_last_run.ipynb'\n\tnbformat.write(nb, open(new_nb_name, mode='wt'))", "def _run_notebook(nb_path):\n\n with open(nb_path) as f:\n nb = nbformat.read(f, as_version=4)\n ep = ExecutePreprocessor(allow_errors=True, kernel_name='python3',\n timeout=3600)\n ep.preprocess(nb, {'metadata': {'path': os.path.dirname(nb_path)}})\n\n # Check for any errors and return a list of error cells\n err = [out for cell in nb.cells if \"outputs\" in cell\n for out in cell[\"outputs\"]\\\n if out.output_type == \"error\"]\n\n return err", "def main(args):\n replace_ipynb(args.root)", "def generate_test_method(test_name):\n\n def run_test(self):\n # backup any existing files with our expected output_name\n output_name = \"{}.png\".format(test_name)\n backup_name = output_name + \".backup\"\n if os.path.isfile(output_name):\n os.rename(output_name, backup_name)\n self.addCleanup(cleanup_backup, backup_name, output_name)\n\n # run the test\n ret = subprocess.call(\"python {}.py\".format(test_name), shell=True)\n self.assertEqual(ret, 0)\n\n output_exists = os.path.isfile(output_name)\n if output_exists:\n self.addCleanup(cleanup_output, output_name)\n\n ps_output_name = \"{}.ps\".format(test_name)\n if os.path.isfile(ps_output_name):\n # some tests may also generate postscript files which need to be deleted\n self.addCleanup(cleanup_output, ps_output_name)\n\n self.assertTrue(output_exists)\n\n return run_test", "def execute(self, args=None):\n parser = argparse.ArgumentParser(description='run tests on jupyter notebook')\n\n parser.add_argument('notebook', type=str, help='the jupyter notebook to test')\n parser.add_argument('-t', '--target', type=str, metavar='', help='where to store results')\n parser.add_argument('-c', '--context', type=str, metavar='', help='context directory')\n parser.add_argument('-v', '--verbose', action='count', default=0, help='verbosity level')\n\n args = parser.parse_args(args)\n\n logger.setLevel(loglevel(args.verbose))\n logger.debug(f'args: {args}')\n\n results = self._grade_notebook(\n Path(args.notebook).absolute(),\n target_dir=Path(args.target).absolute() if args.target else None,\n context=Path(args.context).absolute() if args.context else None\n )\n\n return results.summary().failed", "def test_otter_check_notebook(self):\n # run for each individual test\n for file in glob(TEST_FILES_PATH + \"tests/*.py\"):\n # capture stdout\n output = StringIO()\n with contextlib.redirect_stdout(output):\n\n # mock block_print otherwise it interferes with capture of stdout\n with mock.patch(\"otter.check.block_print\"):\n check(\n TEST_FILES_PATH + \"test-nb.ipynb\", \n question = os.path.split(file)[1][:-3],\n tests_path = os.path.split(file)[0],\n )\n\n if os.path.split(file)[1] != \"q2.py\":\n self.assertEqual(\n output.getvalue().strip().split(\"\\n\")[-1].strip(), \n \"All tests passed!\", \n \"Did not pass test at {}\".format(file)\n )\n\n # run the file for all questions\n output = StringIO()\n with contextlib.redirect_stdout(output):\n with mock.patch(\"otter.check.block_print\"):\n check(\n TEST_FILES_PATH + \"test-nb.ipynb\", \n tests_path = os.path.split(file)[0],\n )\n\n self.assertEqual(\n output.getvalue().strip(), \n dedent(\"\"\"\\\n [0. 0.02002002 0.04004004 0.06006006 0.08008008]\n q1 results: All test cases passed!\n q2 results:\n q2 - 1 result:\n Trying:\n 1 == 1\n Expecting:\n False\n **********************************************************************\n Line 2, in q2 0\n Failed example:\n 1 == 1\n Expected:\n False\n Got:\n True\n\n q2 - 2 result:\n Test case passed!\n q3 results: All test cases passed!\n q4 results: All test cases passed!\n q5 results: All test cases passed!\"\"\"), \n \"Did not pass correct tests\"\n )" ]
[ "0.6850153", "0.61747766", "0.60934234", "0.6022499", "0.6000619", "0.5956608", "0.5937648", "0.58694273", "0.58694273", "0.5833995", "0.58074576", "0.5718149", "0.57117265", "0.5710831", "0.5669661", "0.56397384", "0.5630445", "0.5626195", "0.5597108", "0.55695677", "0.5523695", "0.5513838", "0.55035985", "0.5452221", "0.5407235", "0.53861374", "0.535995", "0.5359189", "0.5328924", "0.5327919" ]
0.6909046
0
Reads in lines from urls.txt, gives the status and other information on them
def readLines(): with open("text/urls.txt", "r") as f: urls = f.readlines() # cleans up the strings urls = [url.replace("\n", "") for url in urls] urls = [url.replace(" ", "") for url in urls] writeLines(urls)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_textfile(inf):\n list_of_urls_to_check = [line.rstrip() for line in inf.readlines()]\n return list_of_urls_to_check", "def read_urls(filename):\n \n urls = []\n with open(filename, 'r') as f:\n for line in f:\n if 'puzzle' in line:\n match = re.search(r'GET\\s(.*)HTTP', line)\n url = match.group(1)\n urls.append(url.strip())\n sorted_urls = sorted(set(urls))\n for url in sorted_urls:\n print (url[-8:-4])\n return sorted_urls", "def analyze_urls(filename, topic):\n # Initialize an empty list. Note that I store my urls and references\n # in a sort of strange way. Each element in result_list is a list of two\n # elements, the first element being the url, and the second element\n # being a list of all the references to the url\n result_list = []\n\n # Using the with...as construct to open the file in read mode\n with open(filename, \"r\", encoding=\"utf-8\") as files:\n # Iterate over each line (each is a url)\n for line in files:\n # Use the try ... except construct\n try:\n # Try to open each url\n with urllib.request.urlopen(line) as url_file:\n # Read the page\n page = url_file.read()\n # Decode the page\n decoded_page = page.decode(\"UTF-8\")\n # Regex expression to find the places which open\n # with a > then have some stuff, then the topic, then\n # close with a <\n pattern = fr\">[^<]*\\b{topic}\\b.*?<\"\n\n # Use the findall method from re to find all of the\n # occurrences of pattern in decoded_page as a list\n # The flags are IGNORECASE and DOTALL\n my_list = re.findall(pattern, decoded_page,\n re.IGNORECASE | re.DOTALL)\n\n # If my_list is not empty\n if my_list:\n # Slice off the the closing and opening angle\n # brackets using a list comprehension\n new_list = [word[1:-1] for word in my_list]\n # Append a new list of two elements to result_list,\n # where the first element of the list is the url,\n # and the second element of the list is the list of\n # references\n result_list.append([line, new_list])\n # One possible error is the urllib.error.URLError\n except urllib.error.URLError as url_err: # Catch the error\n # Print a message, url, and the error\n print(\"Error opening url:\", line, url_err)\n # Another possible error is the UnicodeDecodeError\n except UnicodeDecodeError as dec_err: # Catch the error\n # Print a message, and url\n print(\"Error decoding url:\", line)\n # Print the error\n print(dec_err)\n # Except all other errors\n except:\n pass\n # Return the result_list\n return result_list", "def load_links(self) -> Tuple[List[str], List[str]]:\n\n with open(URL_FILE, 'r') as txt_file:\n lines = txt_file.read().split()\n\n urls = []\n for line in lines:\n urls.append(line.split(',')[0])\n \n return lines, urls", "def read_urls(file):\r\n with open(file, \"r+\") as url_file:\r\n url_list = url_file.readlines()\r\n return url_list", "def read_urls(filename):\n # +++your code here+++\n\n res=utility(filename)\n for i in res:\n \tprint i", "def monitor_urls():\n all_files = [f for f in listdir(\n settings.CSV_PATH) if isfile(join(settings.CSV_PATH, f))]\n if all_files:\n for url in get_url_data(settings.CSV_PATH + all_files[0]):\n check_status(url[0], url[1])", "def process_from_file():\r\n global default_input_path\r\n print \"JoomFind v 1.0\"\r\n print \"\\n\\nTrying to read URL(s) form \" + default_input_path + \" file...\\n\"\r\n try:\r\n if not default_input_path:\r\n f = open(\"urls.txt\")\r\n else:\r\n f=open(default_input_path)\r\n cwd=os.getcwd()\r\n file_path = cwd + path_slash + f.name\r\n\t# extracting url's to list from file\r\n start_urls = [url.strip() for url in f.readlines() if url[0] not in ['#',' ',\"\\n\"]]\r\n if not start_urls:\r\n print \"File is empty. Add some URL(s) first.\\n\"\r\n f.close()\r\n return 0\r\n except:\r\n print \"File not found. Make sure it exists.\\n\"\r\n return 0\r\n #print start_urls\r\n \r\n num=str(len(start_urls))\r\n print \"Found \" + num + \" URL(s) on \" + time.asctime(time.localtime(time.time())) + \"\\n\"\r\n \r\n of=open(default_output_path,'a+')\r\n of.write(\"\\n\\n\\tScanning \" + num + \" URL(s) \")\r\n of.write(\"\\n\\n\\tDate\\Time : \" + time.asctime(time.localtime(time.time())) )\r\n of.write(\"\\n\\n\\tInput file path : \" + default_input_path + \"\\n\\n\")\r\n of.close()\r\n \r\n for url in start_urls:\r\n global provided_url\r\n provided_url=url\r\n print \"\\nWorking on URL \" + str(start_urls.index(url)+1) + \": \" + provided_url\r\n processing()\r\n print \"\\nAll done! Check '\" + default_output_path +\"' file for results.\\n\"", "def extract_URLs(self, input_file_name):\n file = open(input_file_name, 'r')\n lines = []\n for line in file.readlines():\n # Don't add empty lines.\n if len(line.strip()) > 0:\n lines.append(line.strip())\n return lines", "def check_url_status(self, urls):\n\n url_results = {}\n\n for item in urls:\n if item not in self.checked_links:\n self.checked_links.append(item)\n\n url_response = requests.head(item)\n from time import sleep\n sleep(0.5)\n\n url_results[item] = url_response.status_code\n self.logger.debug('[ %d ] %s', url_response.status_code, item)\n\n return url_results", "def read_urls(filename):\n # +++your code here+++\n result = []\n if not path_exists(filename):\n print 'Path ' + filename + ' doesn\\'t exist!'\n sys.exit(1)\n \n # get base url from the filename\n match = re.search(r'\\S*_(\\S*)', filename)\n host = 'http://' + match.group(1)\n \n # read file for urls\n file = open(filename, 'rU')\n for line in file:\n match = re.search(r'\\S*puzzle\\S*.jpg', line)\n if match:\n result.append(host + match.group())\n file.close()\n # sort the list and remove duplicates (-> set)\n return sorted(set(result), key=sortedFn)\n #return sorted(set(result))", "def fileReader(filename):\n try:\n openfile = open(filename, 'r')\n urls = openfile.read()\n openfile.close()\n return urls\n except IOError:\n print \"File tidak ada\"\n exit()", "def read_urls(filename):\n with open(filename, 'r') as f:\n line = f.readline()\n pattern = \"GET\" + \"(.+?)\"+ \"jpg\"\n result = []\n\n while len(line) > 0:\n end_point = re.search(pattern, line)\n if end_point != None and end_point.group(0)[4:] not in result:\n if \"no_picture\" not in end_point.group(0)[4:]:\n result.append(end_point.group(0)[4:])\n line = f.readline()\n return sorted(result, key = lambda x: x.split(\"/\")[-1].split(\"-\")[-1])", "def gather_headlines(urls):\n pass", "def getindex(url):\n try:\n req = urllib2.Request(url)\n f = urllib2.urlopen(req)\n return [ line.strip().split(':') for line in f.readlines() ] \n except:\n print \"Does the URL exist?\"", "def get_urls(path):\n urls = []\n with open(path, 'rb') as f:\n urls = [x.strip() for x in f.readlines()]\n\n # Prune urls that are not working\n goodUrls = []\n badUrls = []\n for url in urls:\n if prescreenUrl(url):\n print \"PASS prescreen: \" + str(url)\n goodUrls.append(url)\n else:\n print \"FAIL prescreen: \" + str(url)\n badUrls.append(url)\n\n with open('bad_urls', 'wb') as f:\n f.write('\\n'.join(badUrls))\n f.close()\n\n with open(path, 'wb') as f:\n f.write('\\n'.join(goodUrls))\n f.close()\n\n return goodUrls", "def __read_file(self):\n try:\n with open(self.filename) as fh:\n for line in fh:\n if self.__input_data_ok(line.strip()):\n timestamp, url = line.strip().split(\"|\")\n LOGGER.debug(\"%s %s\" %(timestamp, url))\n self.__create_record(self.__get_date_string(timestamp), url)\n else:\n LOGGER.warn(\"URLCrawler Malformed Line (Skipping): \\\"%s\\\"\" %line)\n\n LOGGER.debug(json.dumps(self.record_d, indent=4, separators=(',',':')))\n return True\n\n except Exception as e:\n LOGGER.error(\"URLCrawler File Read Exception: %s\" %(e))\n return False", "def check_url(url, read_lines=False):\n lines = None\n try:\n # Access URL\n url_stream = urllib2.urlopen(url, timeout=2)\n\n # Read lines\n if read_lines is True:\n lines = url_stream.readlines()\n except urllib2.URLError as url_error:\n url_stream = url_error\n except socket.timeout:\n return False, 'Time out. Try again!'\n\n # Return result\n if url_stream.code in (200, 401):\n url_good = True\n else:\n url_good = False\n\n # Close connect\n url_stream.close()\n\n # Return\n if read_lines is True:\n return url_good, lines\n if url_good is False:\n error_message = 'Unable to access %s. Check internet access. Code %d' % (url, url_stream.code)\n else:\n error_message = ''\n\n return url_good, error_message", "def check_URL(link, line):\n try:\n urllib2.urlopen(link)\n# g = urllib2.urlopen(ChUrl)\n# print(g.getcode())\n except urllib2.HTTPError as e:\n# print(e.code) # can add counter or line number variable here\n# print(e.reason)\n if e.code == 404 or 403:\n print(\"Status code {} returned, check your hyperlink: {} at line {}\".format(e.code, link, line))\n except urllib2.URLError as f:\n print(\"check your hyperlink: {} at line {}, {}\".format(link,line,f.args))", "def _read_image_urls(self):\n if not os.path.isfile(self._image_urls_file_name):\n raise IOError, \"'%s' is not found\" % self._image_urls_file_name\n if os.path.getsize(self._image_urls_file_name) == 0:\n raise IOError, \"'%s' is empty\" % self._image_urls_file_name\n for line in open(self._image_urls_file_name, 'r'):\n self._image_urls.append(line.strip())", "def read_urls(filename, server_name='http://code.google.com/'):\n # Construct unique URLs from file as - http://code.google.com/<url from file>\n animal_list = []\n ordered_list = []\n src_file = open(filename, 'rU')\n for line in src_file :\n animal_path = re.search( 'GET\\s+/(.+jpg)', line )\n if animal_path is not None :\n if animal_path.group(1) not in animal_list :\n animal_list.append( animal_path.group(1) )\n ordered_list = sorted(animal_list,key=sort_img_name)\n # Used in in range loop to operate on ordered_list rather than shallow copy, e.g. for path in ordered_list\n for i in range(0, len(ordered_list), 1) :\n ordered_list[i] = server_name + ordered_list[i]\n return ordered_list", "def CheckURLs(self):\n\t\tself.updated_count = 0\n\t\tself.app.setExitFlag(False)\n\t\ttry:\n\t\t\tparsed_params = self.urls\n\t\t\twhile (parsed_params):\n\t\t\t\tself.active_threads = []\n\t\t\t\twhile True:\n\t\t\t\t\twhile len(self.active_threads) < self.processes and len(parsed_params) > 0:\n\t\t\t\t\t\turlItem = parsed_params.pop()\n\t\t\t\t\t\tif urlItem != None:\n\t\t\t\t\t\t\tthread = Fetcher(self.app, urlItem)\n\t\t\t\t\t\t\tthread.start()\n\t\t\t\t\t\t\tself.active_threads.append( thread )\n\t\t\t\t\tif self.app.getExitFlag():\n\t\t\t\t\t\tbreak\n\t\t\t\t\tif len( self.active_threads ) == 0:\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor thread in self.active_threads:\n\t\t\t\t\t\t\tif not thread.isAlive():\n\t\t\t\t\t\t\t\tthread.printStatus()\n\t\t\t\t\t\t\t\tself.collectStat(thread)\n\t\t\t\t\t\t\t\tself.active_threads.remove(thread)\n\t\t\t\tif self.app.getExitFlag():\n\t\t\t\t\tbreak\n\t\texcept KeyboardInterrupt as e:\n\t\t\tself.app.setExitFlag(True)\n\t\texcept Exception as e:\n\t\t\tself.app.printflush(traceback.format_exc())", "def get_file_lines(url):\n\n # Download the file over the internet\n response = requests.get(url, stream=True)\n lines = []\n\n for line in response.iter_lines():\n lines.append(line.decode(\"UTF-8\"))\n return lines", "def get_file_lines(url):\n\n\t# Download the file over the internet\n\tresponse = requests.get(url, stream=True)\n\n\t# Decode all lines into strings\n\tlines = []\n\tfor line in response.iter_lines():\n\t\tlines.append(line.decode(\"UTF-8\"))\n\treturn lines", "def job(url):\n\n from urllib.parse import urlparse\n try:\n if urlparse(url).netloc.split('.')[-1] != 'org':\n raise TypeError(\"Nonvalid url: top level domain is not '.org': {}\".format(url))\n return [get_meta_data(url), None]\n except requests.exceptions.RequestException as e:\n # print('############Connection Error#########')\n return [None, url]\n except TypeError:\n filename = 'problematic_urls.txt'\n f = open(filename, 'a')\n f.write(url + '\\n')\n f.close()\n return [None, None]", "def dorkScanner():\n pysearch.PySearch()\n openfile = open(\"sites.txt\", 'r')\n urls = openfile.read()\n openfile.close()\n return urls", "def main(file_path, urls):\n # format urls input\n with open(urls, 'r') as file:\n urls = file.read().replace('\\n', '')\n\n urls = urls.strip('[]')\n urls = re.findall(r'\\([^\\)\\(]*\\)', urls)\n\n for file in urls:\n\n file_name, url = tuple(file.strip('()').split(', '))\n\n # check if file is already downloaded\n if os.path.exists(os.path.join(file_path, file_name)):\n print(\"%s already exists.\\n\" % file_name)\n continue\n else:\n print(\"Starting download for %s...\\n\" % file_name)\n\n # Create the data subdirectory if it doesn't exist\n os.makedirs(file_path, exist_ok=True)\n\n # create response object\n r = requests.get(url, stream=True)\n widgets = [\"Progress: \",\n progressbar.DataSize(), \"| \",\n progressbar.Timer()]\n bar = progressbar.ProgressBar(widgets=widgets,\n max_value=progressbar.UnknownLength)\n value = 0\n # download started\n with open(os.path.join(file_path, file_name), 'wb') as f:\n for chunk in r.iter_content(chunk_size=64*1024):\n if chunk:\n f.write(chunk)\n value += len(chunk)\n bar.update(value)\n\n print(\"\\n%s downloaded!\\n\" % file_name)\n\n print(\"All files downloaded!\")", "def getUrlsList(self):\n\t\ttry:\n\t\t\tf = ur.urlopen(self.sitemap_url)\n\t\t\tres = f.readlines()\n\t\t\tfor d in res:\n\t\t\t data = re.findall('<loc>(https?:\\/\\/.+?)<\\/loc>',d)\n\t\t\t for i in data:\n\t\t\t\tself.urls.append(i)\n\t\texcept Exception as e:\n\t\t\tself.app.printflush(str(e))\n\t\t\tself.app.printflush(traceback.format_exc())\n\t\tself.fetched_count = len(self.urls)", "def fetch_web_cont(self):\n with open(self.input_file) as input_file:\n data = yaml.load(input_file, yaml.FullLoader)\n url_list = data.get(self.url_access)\n regex_list = data.get(self.regex_access)\n\n print('Fetching data:')\n\n for url in url_list:\n # This restores the same behavior as before.\n # Enabling certificate verification by default for stdlib http clients\n context = ssl._create_unverified_context()\n run_time = datetime.now().strftime(\"Date: %d-%m-%Y Time: %I:%M:%S:%f_%p\")\n start = time.perf_counter()\n web_resp = request.urlopen(url, context=context)\n respData = web_resp.read()\n resp_time = '%0.2f s' % (time.perf_counter() - start)\n\n for regex in regex_list:\n contents = re.findall(regex, str(respData))\n with open(self.output_file, 'a') as file:\n if not contents:\n print(run_time, ' | URL: ', url, '| content not found with this regex: ', regex,\n file=file)\n\n else:\n for content in contents:\n print(run_time, ' | URL: ', url, ' | Response Time: ', resp_time,\n url, ' | Contents: ', content, file=file)\n \n with open(self.output_file, 'a') as file:\n \n print('\\n#################################\\n', file=file)", "def get_names_url(i):\n urls = list()\n with open('./urls/fall11_urls_train_'+str(i)+'.txt','r',encoding=\"Latin-1\") as f:\n for line in f:\n urls.append(line)\n urls = [url.strip('\\n') for url in urls]\n urls1 = [url.split('\\t')[1] for url in urls]\n names = [url.split('\\t')[0] for url in urls]\n return urls1,names" ]
[ "0.7018254", "0.66988343", "0.66491723", "0.6455066", "0.6397311", "0.6393156", "0.6373262", "0.6252797", "0.6109572", "0.6093909", "0.6017847", "0.5956489", "0.59421873", "0.5919607", "0.5848102", "0.5759251", "0.5747901", "0.57373804", "0.5727888", "0.56903315", "0.568525", "0.56816685", "0.56712776", "0.5643564", "0.56170195", "0.55531687", "0.5550961", "0.5544043", "0.55220586", "0.5521412" ]
0.6895754
1
Checks the status of the urlwhat the status number is, and whether it is an Index Of page If there is an errorthe website doens't load, for example, return that error
def checkStatus(url): def checkForIndexPage(r): """Checks whether it a given url is actually an Index Of page. Takes in a Request object""" soup = BeautifulSoup(r.text, 'lxml') head = soup.find('h1') if head != None and head.string != None and ("Index of " in head.string): return "Shows 'Index Of' page ✘" else: return "Displays properly ✓" returnString = "" try: r = requests.get(url) returnString += str(r.status_code) if r.status_code == 200: # if the page is accessible, then check whether it displays properly returnString += "\n\t" + checkForIndexPage(r) return returnString except Exception as e: return(e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_check_url(self,url):\n r = requests.get(url).status_code\n if r==requests.codes.ok:\n return(True)\n else:\n print \"something wrong! status_code: \" + r\n return(False)", "def url_was_found(url=\"localhost:5000/health\"):\n res = requests.get(url).json()\n\n if res['status_code'] == 200:\n return True\n elif res['status_code'] == 404:\n return False\n else:\n raise UnexpectedResponseError(\"Expected 200 OK or 404, got {}.\\n\".format(res['status']), \"Full response : {}\".format(res))", "def _error(self, url, soup, status, site, log_url=False):\n unexpect = False\n if status == 0:\n print('Unable to connect to website: ' + url)\n elif status >= 400 or soup is None:\n print(str(status) + ' | Can\\'t open website: ' + url)\n else:\n if site < 0:\n print('Unexpected website: ' + url)\n unexpect = True\n else:\n return False # No error\n if log_url and not unexpect:\n self._re_urls.append(url)\n return True # error", "def check_site_availability(url):\n\n try:\n conn = urllib.request.urlopen(url)\n except urllib.error.HTTPError as e:\n # Return code error (e.g. 404, 501, ...)\n print('HTTPError: {}'.format(e.code))\n logging.info('HTTPError: {}'.format(e.code))\n return int(e.code)\n except urllib.error.URLError as e:\n # Not an HTTP-specific error (e.g. connection refused)\n print('URLError: {}'.format(e.reason))\n logging.info('URLError: {}'.format(e.reason))\n return -7\n except Exception as e:\n # other reasons such as \"your connection is not secure\"\n print(e)\n logging.info(e)\n return -8\n\n # check if redirected\n if conn.geturl() != url:\n print(\"Redirected to {}\".format(conn.geturl()))\n logging.info(\"Redirected to {}\".format(conn.geturl()))\n return 302\n\n # reaching this point means it received code 200\n print(\"Return code 200\")\n logging.info(\"Return code 200\")\n return 200", "def url_is_good(url):\n return website_re.match(url)\n # possible validation of reachability of website\n # http_response = requests.get(url)\n # return http_response < 400:", "def check_url(url):\n # see also http://stackoverflow.com/questions/2924422\n good_codes = [httplib.OK, httplib.FOUND, httplib.MOVED_PERMANENTLY]\n return get_server_status_code(url) in good_codes", "def checkForIndexPage(r):\n soup = BeautifulSoup(r.text, 'lxml')\n head = soup.find('h1')\n if head != None and head.string != None and (\"Index of \" in head.string):\n return \"Shows 'Index Of' page ✘\" \n else:\n return \"Displays properly ✓\"", "def check_url(url_link):\n res = requests.get(url_link, allow_redirects =True)\n if res.status_code == 200:\n print('valid URL \\n')\n return url_link\n else:\n print('Oupps there is something wrong with your URL. Run the program again!! ')\n return res.status_code", "def check_status(site: str, url: str):\n try:\n response = requests.get(url)\n if response.status_code == 200:\n log_status(site, url, True)\n else:\n log_status(site, url, False)\n except ConnectionError:\n log_status(site, url, False)", "def check_url(url):\n good_codes = [httplib.OK, httplib.FOUND, httplib.MOVED_PERMANENTLY]\n return get_server_status_code(url) in good_codes", "def get_server_status_code(url):\n host, path = urlparse.urlparse(url)[1:3] # elems [1] and [2]\n print \"host : \", host, \" | path : \", path\n try:\n conn = httplib.HTTPConnection(host)\n conn.request('HEAD', path)\n print \" | status : \" , conn.getresponse().status\n return conn.getresponse().status\n except StandardError as e:\n print \"StandardError : \", e\n return None", "def check_URL(link, line):\n try:\n urllib2.urlopen(link)\n# g = urllib2.urlopen(ChUrl)\n# print(g.getcode())\n except urllib2.HTTPError as e:\n# print(e.code) # can add counter or line number variable here\n# print(e.reason)\n if e.code == 404 or 403:\n print(\"Status code {} returned, check your hyperlink: {} at line {}\".format(e.code, link, line))\n except urllib2.URLError as f:\n print(\"check your hyperlink: {} at line {}, {}\".format(link,line,f.args))", "def url_checker(url_str):\n file_msg = fd.Program_Msg(__file__)\n ## Checking input parameters\n if not (isinstance(url_str, str)):\n msg = '{0} `url_str` ({1}) is not a STRING!'.format(file_msg,\n type(url_str))\n raise LSSUtils_Error(msg)\n ##\n ## Checking Website\n request_url = requests.get(url_str)\n if (request_url.status_code != 200):\n msg = '{0} `url_str` ({1}) does not exist!'.format(file_msg, url_str)\n raise LSSUtils_Error(msg)", "def url_check_tester(client, url, status_code):\n response = client.get(url)\n assert response.status_code == status_code, \\\n f'Unexpected status code for {url}'\n assert response.data == b''", "def get_site_status(url):\n repCode=None\n\n if url is \"\":\n return(\"Not Valid\")\n\n try:\n t1 = datetime.now()\n response = get_response(url)\n t2 = datetime.now()\n\n elapsed = t2-t1\n\n retTime = elapsed.microseconds\n\n repCode = getattr(response, 'status')\n\n logResponse(url,repCode,\"\",str(retTime))\n\n if repCode == 200:\n return 'up'\n except Exception as e:\n logResponse(url,repCode,e)\n pass\n return 'down'", "def check_mitm_status_page(self, check_url):\n response = requests.get(check_url)\n if response.status_code == 200:\n return response\n else:\n sys.exit(2)", "def url_health():\n return \"OK\"", "def test_get_404(self):\n url = self.baseurl + \"/do-not-implement-this-page-it-is-not-found\"\n try:\n req = urllib2.urlopen(url, None, 3)\n self.assertTrue( False, \"Should have thrown an HTTP Error!\")\n except urllib2.HTTPError as e:\n self.assertTrue( e.getcode() == 404 , (\"404 Not FOUND! %d\" % e.getcode()))\n else:\n self.assertTrue( False, \"Another Error was thrown!\")", "def _check_idx(self, url):\n if not url.endswith('.idx'):\n url += '.idx'\n return requests.head(url).ok", "def is_ok(url: str) -> bool:\n try:\n resp = requests.get(url)\n except:\n return False\n return True if math.floor(resp.status_code / 100) == 2 else False", "def validate_url(url):\n response, content = get_response_from_file(url)\n\n if response == None and content == None:\n response, content = get_response_and_content(url)\n\n if response == None:\n return url, url, 0, \"\", \"N\", \"N\", \"N\", hit(\"No Response\"), \"false\"\n else:\n #print(url, get_visible_text(content))\n return evaluate_content_for_200s(response, url, content)", "def validate_url(url: str) -> None:\n if not is_valid_url(url):\n raise ValueError(f\"Validation Error. Provided url '{url}' is not valid.\")\n try:\n response = requests.get(url)\n except Exception as e:\n raise ValueError(f\"Validation Error. '{url}' website doesn't exists.\")\n else:\n if response.status_code != status.HTTP_200_OK:\n raise ValueError(f\"Validation Error. '{url}' website doesn't exists.\")", "def check_connection():\n status_code = urllib.request.urlopen(local_settings.DAFT_URL).getcode()\n\n if status_code == 200:\n on_or_404 = 'OK'\n else:\n on_or_404 = 'NOT OK'\n \n return on_or_404", "def check_url(url: str) -> bool:\n try:\n potential_error = driver.find_element_by_xpath(\"/html/body/div[5]/div/div/div[1]/div/div/div/section/div[2]/div\").text\n if '403' in potential_error:\n return True\n except:\n return False", "def get_server_status_code(url):\n # http://stackoverflow.com/questions/1140661\n host, path = urlparse.urlparse(url)[1:3] # elems [1] and [2]\n try:\n conn = httplib.HTTPConnection(host)\n conn.request('HEAD', path)\n return conn.getresponse().status\n except StandardError:\n return None", "def handleStatus_404(self):\n log.err('HTTP Error 404')", "def test_url_existence(self):\n self.assertEquals(self.response.status_code, 200)", "def check_url_availability(url):\n\n response = website_alive.get_response_object(url)\n return response.status_code == requests.codes['ok']", "def check_status():\n try:\n return HTTPClient().fetch(\"https://api.random.org/\").code == 200\n except Exception: # pylint: disable=broad-except\n return False", "def error_404(error):\n return 'Bummer, there is nothing at this URL.'" ]
[ "0.7355547", "0.7118734", "0.7070988", "0.70274246", "0.6892752", "0.6865774", "0.68003565", "0.67811525", "0.67793566", "0.6762191", "0.6718683", "0.6700998", "0.6630522", "0.6618586", "0.65520173", "0.65444887", "0.65439695", "0.64959097", "0.6469197", "0.6456228", "0.6449693", "0.6433169", "0.64305615", "0.641569", "0.64053583", "0.6398845", "0.63976073", "0.6397548", "0.6305024", "0.63010746" ]
0.8258082
0
Checks whether it a given url is actually an Index Of page. Takes in a Request object
def checkForIndexPage(r): soup = BeautifulSoup(r.text, 'lxml') head = soup.find('h1') if head != None and head.string != None and ("Index of " in head.string): return "Shows 'Index Of' page ✘" else: return "Displays properly ✓"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_idx(self, url):\n if not url.endswith('.idx'):\n url += '.idx'\n return requests.head(url).ok", "def is_store_page(entry):\n pattern = re.compile(\"^/view\\d*/.*$\")\n return entry[\"method\"] == \"GET\" and pattern.match(entry[\"uri\"]) != None", "def checkStatus(url):\n def checkForIndexPage(r):\n \"\"\"Checks whether it a given url is actually an Index Of page. Takes in a Request object\"\"\"\n soup = BeautifulSoup(r.text, 'lxml')\n head = soup.find('h1')\n if head != None and head.string != None and (\"Index of \" in head.string):\n return \"Shows 'Index Of' page ✘\" \n else:\n return \"Displays properly ✓\"\n\n returnString = \"\"\n try:\n r = requests.get(url)\n returnString += str(r.status_code) \n if r.status_code == 200: # if the page is accessible, then check whether it displays properly\n returnString += \"\\n\\t\" + checkForIndexPage(r)\n return returnString\n except Exception as e:\n return(e)", "def is_page(self, url):\n netloc = urlparse.urlparse(url).netloc.lower()\n return any(map(lambda domain: netloc.endswith(domain), self.allowed_domains))", "def is_indexed(self, url):\n query = self.con.execute(\"select rowid from urllist where url='%s'\" % url).fetchone()\n if query is not None:\n # Check if it actually has been crawled\n crawled = self.con.execute('select * from wordlocation where urlid=%d'\n % query[0]).fetchone()\n if crawled is not None:\n return True\n return False", "def isHomePage(self):\n home = self.getHome(self.url)\n if home == self.url:\n return True\n if home == self.url + '/':\n return True\n return False", "def _is_current_page(self, **kwargs):\n if kwargs:\n # do a lookup to get the object i\n object_id = self._get_object(**kwargs)[\"Id\"]\n pattern = r\"/lightning/r/{}/{}/view$\".format(self.object_name, object_id)\n else:\n # no kwargs means we should just verify we are on a detail\n # page without regard to which object\n pattern = r\"/lightning/r/{}/.*/view$\".format(self.object_name)\n\n location = self.selenium.get_location()\n if not re.search(pattern, location):\n raise Exception(\n \"Location '{}' didn't match pattern {}\".format(location, pattern)\n )", "def test_page(self, url):\n if self.get_url_components(url):\n components = self.get_url_components(url)\n else:\n return False\n for extension in self.file_extensions_only_first:\n self.check_in_front(components, extension)\n for extension in self.file_extensions_only_last:\n self.check_in_back(components, extension)\n for extension in self.file_extensions_all_possibilities:\n self.check_in_front(components, extension + '.')\n self.check_in_middle(components, '.' + extension)\n self.check_in_back(components, '.' + extension)", "def is_embedded(request):\n hx_current_url = request.headers.get('HX-Current-URL', None)\n if not hx_current_url:\n return False\n return request.path != urlparse(hx_current_url).path", "def isTopHomePage(self):\n domain = self.getDomain()\n if self.url == \"http://\" + domain + \"/\":\n return True\n if self.url == \"http://www.\" + domain + \"/\":\n return True\n if self.url == \"http://\" + domain:\n return True\n if self.url == \"http://www.\" + domain:\n return True\n return False", "def index_exists(self, index):\n req = requests.head(\n urljoin(self.base_url, '{0}'.format(index)),\n verify=self.verify_certs)\n return req.status_code == 200", "def __inzone(self, url):\n # Returns 0 if the URL is in zone\n if self.scope == self.SCOPE_PAGE:\n if url == self.scopeURL:\n return 0\n else:\n return 1\n if url.startswith(self.scopeURL):\n return 0\n else:\n return 1", "def check_url(url):\n # see also http://stackoverflow.com/questions/2924422\n good_codes = [httplib.OK, httplib.FOUND, httplib.MOVED_PERMANENTLY]\n return get_server_status_code(url) in good_codes", "def is_link(self, url):\n return not self.is_page(url)", "def _is_request_to_token_url(self, request):\n if not self.token_url:\n return False\n\n if self.token_url == request.path:\n return True\n\n request.match(self.token_url)\n\n if request.matchdict:\n return True\n\n return False", "def exists(self, page: str) -> bool:\n\n if \"-1\" in requests.get(self.apiurl.format(page)).json()[\"query\"][\"pages\"]:\n return False\n return True", "def test_page_existence(self):\r\n for page in self.pages:\r\n page.visit()", "def index(request):\r\n badRequest(\"Url not found\")", "def _is_current_page(self):\n self.selenium.wait_until_location_contains(\"/list\",timeout=60, message=\"Records list view did not load in 1 min\")\n self.selenium.location_should_contain(\"General_Accounting_Unit__c\",message=\"Current page is not a DataImport List view\")", "def can_handle(self, url):\n return self.url_re.match(url)", "def test_view_url_exists_at_desired_location(self):\n response = self.client.get('')\n self.assertEqual(response.status_code, 200)", "def check_url(url):\n good_codes = [httplib.OK, httplib.FOUND, httplib.MOVED_PERMANENTLY]\n return get_server_status_code(url) in good_codes", "def exists(self, url):\n return (self.base_path / url).exists()", "def test_page_existence(self):\r\n # Log in\r\n self.auth_page.visit()\r\n\r\n # Verify that each page is available\r\n for page in self.pages:\r\n page.visit()", "def valid_request(environ):\n if environ.get('REQUEST_METHOD') != 'GET':\n return False\n if environ.get('QUERY_STRING'):\n return False\n requested = environ.get('PATH_INFO', '/').lstrip('/')\n if not requested:\n return True\n if len(requested) > 40: # Larger than git SHA reference.\n return False\n if VALID_REQUEST.match(requested) is None:\n return False\n return True", "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return parse_url.netloc.endswith('slideshare.net')", "def is_htmx(request):\n return 'Hx-Request' in request.headers", "def _url_exists(self, url):\n return url_exists(url)", "def test_index_exists(self):\n name_exists = 'index' in self.views_module_listing\n is_callable = callable(self.views_module.index)\n \n self.assertTrue(name_exists, f\"{FAILURE_HEADER}index() view does not exist{FAILURE_FOOTER}\")\n self.assertTrue(is_callable, f\"{FAILURE_HEADER}index() function does not exist or will not execute{FAILURE_FOOTER}\")", "def check_if_not_visited(url):\n return (url not in VISITED_LINKS)" ]
[ "0.6797171", "0.6227941", "0.6205323", "0.6184429", "0.6150051", "0.57591945", "0.5743825", "0.57144886", "0.5621335", "0.5575922", "0.5543459", "0.5508243", "0.5484011", "0.5478684", "0.54784095", "0.54734105", "0.54732513", "0.5468332", "0.54342926", "0.5427616", "0.5397525", "0.5394938", "0.53784627", "0.5370917", "0.5348304", "0.5331041", "0.53069824", "0.52928776", "0.5288162", "0.52835274" ]
0.6903802
0
Test when issues do not have a parent issue
def test_get_updated_issues_without_parent(self): with open("issues_without_parent.json", "r") as issues_file: mock_response = issues_file.read() with requests_mock.Mocker() as m: m.register_uri('GET', '/rest/api/2/search', text=mock_response) issues = jiratimereport.get_updated_issues("https://jira_url", "user_name", "api_token", "MYB", "2020-01-10", "2020-01-20", "") issues_expected_result = [ Issue(10005, "MYB-5", "Summary of issue MYB-5", None, None, 3600, 900, None)] self.assertListEqual(issues_expected_result, issues, "Issues lists are unequal")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_lacking_parent(self):\n pass", "def test_parent_does_not_exist(self):\n self.bad_data_fail(self.test_data['pants'],\n self.test_data['hats'], 'Parent does not exist')", "def test_add_self_as_parent(self):\n groupa = Group('groupa')\n with pytest.raises(Exception):\n groupa.add_parent(groupa)", "def hasFailingAncestor(self):\n parent = self.parent\n if parent is None:\n return\n # TODO: Temporarily disabled.\n return\n return parent.hasFailed or parent.hasFailingAncestor()", "def test_no_parent():\n start_date = pendulum.datetime(2020, 1, 1)\n dag = DAG(\"test_test_no_parent_dag\", schedule_interval=None, start_date=start_date)\n op1 = DummyOperator(task_id=\"op1\", dag=dag)\n\n ti1 = TaskInstance(op1, start_date)\n\n with create_session() as session:\n dep = NotPreviouslySkippedDep()\n assert len(list(dep.get_dep_statuses(ti1, session, DepContext()))) == 0\n assert dep.is_met(ti1, session)\n assert ti1.state != State.SKIPPED", "def test_pod_invalid_parent(self):\n session = self.login_to_apic()\n parent = Node('1','101','Switch')\n self.assertRaises(TypeError, Pod.get, session, parent)", "def test_issue_search_issues(self):\n pass", "def orphaned(self):\n return (self.parent is None)", "def test_replace_parent_to_self(self):\n groupa, groupb = Group('groupa'), Group('groupb')\n groupa.add_parent(groupb)\n with pytest.raises(Exception):\n groupa.replace_parent(groupb, groupa)", "def test_issue_list_issues(self):\n pass", "def test_node_bad_parent(self):\n pod_id = '1'\n self.assertRaises(TypeError, Node, '1', '2', 'Spine1', role='leaf', parent=pod_id)", "def testParent(self):\n self.assertEqual(\n self.parent,\n self.mr.parent\n )", "def test_issue_get_issue(self):\n pass", "def test_parent_images_unresolved(tmpdir, docker_tasker):\n dfp = df_parser(str(tmpdir))\n dfp.content = \"FROM spam\"\n\n workflow = mock_workflow()\n workflow.builder.set_df_path(dfp.dockerfile_path)\n workflow.builder.base_image = ImageName.parse('eggs')\n # we want to fail because some img besides base was not resolved\n workflow.builder.parent_images = {\n ImageName.parse('spam'): ImageName.parse('eggs'),\n ImageName.parse('extra:image'): None\n }\n\n with pytest.raises(ParentImageUnresolved):\n ChangeFromPlugin(docker_tasker, workflow).run()", "def test_issue_create_issue(self):\n pass", "def test_validate_parent(self):\n with self.assertRaises(ValidationError):\n self.project.parent = self.project\n self.project.save()", "def test_parent_not_executed():\n start_date = pendulum.datetime(2020, 1, 1)\n dag = DAG(\"test_parent_not_executed_dag\", schedule_interval=None, start_date=start_date)\n op1 = BranchPythonOperator(task_id=\"op1\", python_callable=lambda: \"op3\", dag=dag)\n op2 = DummyOperator(task_id=\"op2\", dag=dag)\n op3 = DummyOperator(task_id=\"op3\", dag=dag)\n op1 >> [op2, op3]\n\n ti2 = TaskInstance(op2, start_date)\n\n with create_session() as session:\n dep = NotPreviouslySkippedDep()\n assert len(list(dep.get_dep_statuses(ti2, session, DepContext()))) == 0\n assert dep.is_met(ti2, session)\n assert ti2.state == State.NONE", "def test_single_issue():\n pass", "def testParent(self):\n self.assertEqual(\n self,\n self.node.parent\n )", "def testParent(self):\n self.assertEqual(\n self,\n self.node.parent\n )", "def test_remove_parent_marking_failure(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n \n incident = Incident(title=\"Test\")\n package.add_incident(incident)\n \n indicator = Indicator(title=\"Test\")\n incident.related_indicators.append(indicator)\n \n container.add_marking(incident, red_marking, descendants=True)\n self.assertTrue(container.is_marked(incident, red_marking))\n self.assertTrue(container.is_marked(indicator, red_marking))\n self.assertRaises(errors.MarkingRemovalError, container.remove_marking, indicator, red_marking, True)", "def test_get_parent_goals_for_goal(self):\n pass", "def test_parent_read_only():\n t = Tree(None)\n with pytest.raises(AttributeError):\n t.parent = None", "def test_issue_edit_issue(self):\n pass", "def test_pod_valid_parent(self):\n session = self.login_to_apic()\n parent = PhysicalModel()\n pod = Pod.get(session, parent)\n children = parent.get_children()\n self.assertEqual(pod, children)", "def has_parent(self):\n return False", "def test_check_no_circular_references(self):\n workflow = self.get_workflow(\n \"\"\"file://A <- file://B\n\nfile://B <- file://C\n \"\"\")\n assert not workflow.circular_references()", "def checkReissues(self):\n return None", "def test_set_parent_when_not_provided():\n\n # GIVEN that no parent is provided\n parent = None\n\n # WHEN running \"set_parent_if_missing\"\n validated_parent: str = set_parent_if_missing(parent=parent)\n\n # THEN the returned string should have been set to RelationshipStatus.HAS_NO_PARENT\n assert validated_parent == RelationshipStatus.HAS_NO_PARENT", "def test_parent_images_missing(tmpdir, docker_tasker):\n dfp = df_parser(str(tmpdir))\n dfp.content = dedent(\"\"\"\\\n FROM first:parent AS builder1\n FROM second:parent AS builder2\n FROM monty\n \"\"\")\n\n workflow = mock_workflow()\n workflow.builder.set_df_path(dfp.dockerfile_path)\n workflow.builder.parent_images = {ImageName.parse(\"monty\"): ImageName.parse(\"build-name:3\")}\n workflow.builder.base_image = ImageName.parse(\"build-name:3\")\n\n with pytest.raises(ParentImageMissing):\n ChangeFromPlugin(docker_tasker, workflow).run()" ]
[ "0.720484", "0.6333494", "0.61962134", "0.6195259", "0.6146895", "0.60996616", "0.60707366", "0.6057688", "0.6027769", "0.59993476", "0.5989757", "0.59845847", "0.5968011", "0.5962105", "0.59547395", "0.5866312", "0.58635676", "0.57914597", "0.574251", "0.574251", "0.57309926", "0.57089126", "0.56946504", "0.5691223", "0.56900704", "0.56847215", "0.567638", "0.5673706", "0.56628364", "0.564935" ]
0.6518267
1
Test the conversion of json issues to object issues
def test_convert_json_to_issues(self): with open("convert_json_to_issues.json", "r") as issues_file: response_json = json.loads(issues_file.read()) issues = jiratimereport.convert_json_to_issues(response_json) issues_expected_result = [ Issue(10005, "MYB-5", "Summary of issue MYB-5", "MYB-3", "Summary of the parent issue of MYB-5", 3600, 900, datetime(2020, 1, 20)), Issue(10004, "MYB-4", "Summary of issue MYB-4", "MYB-3", "Summary of the parent issue of MYB-4", None, None, None)] self.assertListEqual(issues_expected_result, issues, "Issues lists are unequal")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testfromjson(self):\n dicty = {\"id\": 5, \"class\": \"string\", \"list\": [], \"set\": {}}\n self.assertEqual([dicty, dicty],\n Base.from_json_string(json.dumps([dicty, dicty])))", "def test_json_loads_object() -> None:\n assert json_loads_object('{\"c\":1.2}') == {\"c\": 1.2}\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'list'>\"\n ):\n json_loads_object(\"[]\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'bool'>\"\n ):\n json_loads_object(\"true\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'NoneType'>\"\n ):\n json_loads_object(\"null\")", "def test_json_loads_object() -> None:\n assert json_loads_object('{\"c\":1.2}') == {\"c\": 1.2}\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'list'>\"\n ):\n json_loads_object(\"[]\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'bool'>\"\n ):\n json_loads_object(\"true\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'NoneType'>\"\n ):\n json_loads_object(\"null\")", "def test_simplef(self):\n samp1 = JObject(keys = ['status', 'result'])\n j = json.loads('{\"status\": \"success\", \"resultd\": \"yes\"}')\n self.assertFalse(check_json_object(j, samp1))", "def testfromjson(self):\n dicty = {\"id\": 5, \"class\": \"string\", \"list\": [], \"set\": {}}\n self.assertEqual([dicty], Base.from_json_string(json.dumps([dicty])))", "def test_json(self):\n\t\tdecoded = json.loads(json.dumps(self.filter.to_js_obj()))\n\t\tself.assertIsNotNone(decoded, \"JSON conversion failed!\")\n\t\tself.assertEqual(self.filter.to_js_obj(), decoded, \"JSON conversion mismatch!\")", "def test_to_plain_python_obj_error():\n\n class FailingObject:\n pass\n\n output = r.to_plain_python_obj(FailingObject())\n with pytest.raises(TypeError):\n json.dumps(output)", "def test_simple(self):\n samp1 = JObject(keys = ['status', 'result'])\n j = json.loads('{\"status\": \"success\", \"result\": \"yes\"}')\n self.assertTrue(check_json_object(j, samp1))", "def test_json_string(self):\n Base._Base__nb_objects = 0\n d1 = {\"id\": 9, \"width\": 5, \"height\": 9, \"x\": 7, \"y\": 8}\n d2 = {\"id\": 10, \"width\": 3, \"height\": 15, \"x\": 4, \"y\": 0}\n json_string = Base.to_json_string([d1, d2])\n self.assertTrue(type(json_string) is str)\n d = json.loads(json_string)\n self.assertEqual(d, [d1, d2])", "def test_non_list_of_dicts_arg(self):\n self.assertEqual(self.obj.to_json_string(666), '666')", "def test_return_type(self):\n self.assertEqual(type(self.obj.to_json_string(self.valid_ld)), str)", "def test_base_case_json(self):\n json_data = '{\"a\": 1}'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened), json.loads('{\"a\" : 1}'))", "def test_from_json_string(self):\n json_str = '[{\"id\": 9, \"width\": 5, \"height\": 9, \"x\": 7, \"y\": 8}, \\\n {\"id\": 10, \"width\": 3, \"height\": 15, \"x\": 4, \"y\": 0}]'\n jason_list = Base.from_json_string(json_str)\n self.assertTrue(type(jason_list) is list)\n self.assertEqual(len(jason_list), 2)\n self.assertTrue(type(jason_list[0]) is dict)\n self.assertTrue(type(jason_list[1]) is dict)\n self.assertEqual(jason_list[0],\n {\"id\": 9, \"width\": 5, \"height\": 9, \"x\": 7, \"y\": 8})\n self.assertEqual(jason_list[1],\n {\"id\": 10, \"width\": 3, \"height\": 15, \"x\": 4, \"y\": 0})", "def test_invalid_data_types(self):\n response=self.check_invalid_data_type()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],\"Require int or float type\")\n self.assertEqual(response.status_code, 200)", "def test_json_to_python(self):\n\n # There seems to be a problem with Flask-Login setting the current_user proxy\n # in api/models.py, which we need t run this test.\n if False:\n self.login_test_user()\n\n location = {\n 'address' : '123 Main St.',\n 'lat' : '127.0', # forgive numbers coming as strings\n 'lng' : -42,\n 'name' : 'nowhere',\n 'id' : str(ObjectId())\n }\n\n expanded = Location.from_json(location)\n\n # these should all be the same\n self.assertEqual(expanded['address'], location['address'])\n self.assertEqual(expanded['lat'], location['lat'])\n self.assertEqual(expanded['lng'], location['lng'])\n self.assertEqual(expanded['name'], location['name'])\n\n # owner should be set by the currently logged in location\n self.assertEqual(expanded['owner'], self.test_location.id)\n\n # id should be renamed from id to _id, and expanded\n self.assertTrue(expanded.has_key('_id'))\n self.assertFalse(expanded.has_key('id'))\n self.assertEqual(str(expanded['_id']), location['id'])", "def test_encode(self):\n result = json.loads(self.cls.objects.to_json())\n for index, item in enumerate(result):\n self.assertNotIn(\n \"to_json_exclude\", item,\n (\"to_json_exclude found at index {}\").format(index)\n )\n self.assertNotIn(\n \"json_exclude\", item,\n (\"json_exclude found at index {}\").format(index)\n )\n self.assertIn(\n \"from_json_exclude\", item,\n (\"from_json_exclude not found at index {}\").format(index)\n )\n self.assertIn(\n \"required\", item,\n (\"required not found at index {}\").format(index)\n )", "def test_data_to_json(self):\n\n from rubber.instanceutils import data_to_json\n\n # with a dict\n data = {'foo':'bar'}\n json_data = json.dumps(data)\n self.assertEquals(json_data, data_to_json(data))\n\n # with a string\n json_data = json.dumps(data)\n self.assertEquals(json_data, data_to_json(json_data))\n\n # try a class that implements to_indexed_json\n class Foo(object):\n def to_indexed_json(self):\n return json_data\n self.assertEquals(json_data, data_to_json(Foo()))\n\n # try a django model\n try:\n from django.db import models\n class TestModel(models.Model):\n foo = models.CharField(max_length=3)\n bar = TestModel(foo='bar')\n self.assertEquals(json_data, data_to_json(bar))\n except ImportError:\n pass", "def test_encode(self):\n data = [json.loads(item.to_json()) for item in self.docs]\n self.assertEqual(\n json.loads(self.doc_cls.objects.to_json()), data\n )", "def test_decode(self):\n models = self.cls.objects.from_json(json.dumps(self.test_data))\n for index, model in enumerate(models):\n self.assertIsNone(\n model.json_exclude,\n (\"json_exclude found at index {}\").format(index)\n )\n self.assertIsNone(\n model.from_json_exclude,\n (\"from_json_exclude found at index {}\").format(index)\n )\n self.assertIsNotNone(\n model.to_json_exclude,\n (\"to_json_exclude not found at index {}\").format(index)\n )\n self.assertIsNotNone(\n model.required,\n (\"required not found at index {}\").format(index)\n )", "def test_encode(self):\n data = [item.to_dict() for item in self.docs[0:2]]\n actual = self.doc_cls.objects[0:2]\n self.assertEqual(\n json.loads(actual.to_json()), data\n )", "def test_nested_objf(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"bc\":2}}')\n self.assertFalse(check_json_object(jdic, jobj))", "def test_panda_to_json_from_person_json(self):\n json_person = self.test_person.to_json()\n with self.assertRaises(ValueError):\n self.test_panda.from_json(json_person)", "def test_json_decode_error(self):\n with self.assertRaises(json.JSONDecodeError):\n int_t.Ind.from_json('')\n with self.assertRaises(ValueError):\n int_t.Ind.from_json('{}')\n with self.assertRaises(ValueError):\n int_t.Ind.from_json('[{}]')", "def testtojson2(self):\n dicty = {\"id\": 5, \"class\": \"string\", \"list\": [], \"set\": {}}\n self.assertEqual(json.dumps([dicty, dicty]),\n Base.to_json_string([dicty, dicty]))", "def test_to_plain_python_obj_mixed(test_input):\n # It's enough that we don't get an exception here\n output = r.to_plain_python_obj(test_input)\n # We should not get a json conversion error\n json.dumps(output)", "def test_circular_dict(self):\n obj = {}\n obj[\"obj\"] = obj\n with self.assertRaises(orjson.JSONEncodeError):\n orjson.dumps(obj)", "def test_lti20_good_json(self):\r\n for json_str, expected_comment in self.GOOD_JSON_INPUTS:\r\n score, comment = self.xmodule.parse_lti_2_0_result_json(json_str)\r\n self.assertEqual(score, 0.1)\r\n self.assertEqual(comment, expected_comment)", "def test_json(self):\n for item in self.test_data:\n json_data = json.dumps(item['data'])\n ind = int_t.Ind.from_json(json_data)\n self.assertEqual(ind.to_json(), json_data)", "def test_validation_error_json():\n error = ValidationError(\n type=\"Syntax Error\",\n data={\"data\": [1, 2, 3]},\n )\n\n assert ValidationError(**json.loads(error.json())) == error", "def test_to_json_from_json(self):\n json_panda = self.test_panda.to_json()\n new_panda = self.test_panda.from_json(json_panda)\n self.assertEqual(self.test_panda, new_panda)" ]
[ "0.6965997", "0.6955709", "0.6955709", "0.6949413", "0.6914408", "0.6651135", "0.65893173", "0.6551188", "0.6508977", "0.6478302", "0.64725137", "0.64644873", "0.6453786", "0.6436468", "0.6400618", "0.6383816", "0.6380621", "0.6368812", "0.63525397", "0.6345754", "0.632622", "0.6307183", "0.63059413", "0.6304255", "0.6302798", "0.629576", "0.62888485", "0.6288373", "0.6287648", "0.6280702" ]
0.7233345
0
Test the single page response when retrieving Jira issues
def test_get_updated_issues_one_page(self): with open("issues_one_page.json", "r") as issues_file: mock_response = issues_file.read() with requests_mock.Mocker() as m: m.register_uri('GET', '/rest/api/2/search', text=mock_response) issues = jiratimereport.get_updated_issues("https://jira_url", "user_name", "api_token", "MYB", "2020-01-10", "2020-01-20", "") issues_expected_result = [ Issue(10005, "MYB-5", "Summary of issue MYB-5", "MYB-3", "Summary of the parent issue of MYB-5", 3600, 900, datetime(2020, 1, 20)), Issue(10004, "MYB-4", "Summary of issue MYB-4", "MYB-3", "Summary of the parent issue of MYB-4", 7200, 600, None)] self.assertListEqual(issues_expected_result, issues, "Issues lists are unequal")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_existing_issue_passes(self):\n response = self.client.get(self.url)\n response_json = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_json[\"name\"], TEST_ISSUE_NAME)", "def test_issues_list(self):\n response = self.client.get(url_for('issues.issuesresource'))\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.json)", "def test_issue_get_issue(self):\n pass", "def test_issue_detail(self):\n response = self.client.get(url_for(\n 'issues.issuedetailresource',\n issue_number=self.TARGET_ISSUE_NUMBER))\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.json)", "def test_api_can_get_filtered_issues_list(self):\n path = '/issues/?language=python&tech_stack=django&experience_needed=moderate'\n response = self.client.get(path, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertGreater(len(api_response_issues), len(json.loads(response.content)))", "def test_api_can_get_issues_list(self):\n response = self.client.get('/issues/', format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(api_response_issues), len(json.loads(response.content)))", "def test_obtain_issues(self, mock_url_read):\n mock_url_read.side_effect = [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}',\n SAST_REPORT.format(false_positive=False, severity='High')]\n\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n\n self.assertIsInstance(issues, List)\n self.assertIsInstance(issues[0], Checkmarx.Issue)\n self.assertEqual('JScript Vulnerabilities', issues[0].group)\n self.assertEqual('Reflected XSS', issues[0].title)\n self.assertEqual('http://url/CxWebClient/ScanQueryDescription.aspx?queryID=789&'\n 'queryVersionCode=842956&queryTitle=Reflected_XSS', issues[0].display_url)\n self.assertEqual(1, issues[0].count)\n self.assertEqual(\"Recurrent\", issues[0].status)", "def test_get_updated_issues_multiple_pages(self):\n with open(\"issues_multiple_first_page.json\", \"r\") as issues_first_file:\n mock_response_first_page = issues_first_file.read()\n\n with open(\"issues_multiple_second_page.json\", \"r\") as issues_second_file:\n mock_response_second_page = issues_second_file.read()\n\n with requests_mock.Mocker() as m:\n m.register_uri('GET', '/rest/api/2/search', [{'text': mock_response_first_page},\n {'text': mock_response_second_page}])\n issues = jiratimereport.get_updated_issues(\"https://jira_url\", \"user_name\", \"api_token\", \"MYB\",\n \"2020-01-10\", \"2020-01-20\", \"\")\n\n issues_expected_result = [\n Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\", 3600, 900, datetime(2020, 1, 20)),\n Issue(10004, \"MYB-4\", \"Summary of issue MYB-4\", \"MYB-3\", \"Summary of the parent issue of MYB-4\", 7200, 600, None),\n Issue(10006, \"MYB-6\", \"Summary of issue MYB-6\", \"MYB-3\", \"Summary of the parent issue of MYB-6\", 3600, 900, None)]\n\n self.assertListEqual(issues_expected_result, issues, \"Issues lists are unequal\")", "def test_issue_list_issues(self):\n pass", "def test_get_sprint_dates_success(self):\n team = \"swen90013-2020-sp\"\n response = self.client.get('/api/v1/jira/' + team + '/issues_per_sprint')\n #print(response.json()[\"data\"])\n self.assertEqual(response.json()[\"code\"], RespCode.success.value.key, \"response is not success\")", "def test_get_all_issues_passes(self):\n # Act: no issues\n response = self.client.get(self.url)\n response_json = response.get_json()\n # test\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response_json[\"categories\"]), 0)\n self.assertEqual(response_json[\"categories\"], [])\n\n # Act: add 1 issue\n self.test_issue = create_canned_mapping_issue()\n response = self.client.get(self.url)\n response_json = response.get_json()\n # test\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response_json[\"categories\"]), 1)\n self.assertEqual(response_json[\"categories\"][0][\"name\"], TEST_ISSUE_NAME)", "def test_get_work_logs_one_page(self):\n with open(\"work_logs_first_issue_one_page.json\", \"r\") as first_issue_file:\n mock_response_first_issue = first_issue_file.read()\n\n with open(\"work_logs_second_issue_one_page.json\", \"r\") as second_issue_file:\n mock_response_second_issue = second_issue_file.read()\n\n issues = [Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\", 3600, 900, datetime(2020, 1, 20)),\n Issue(10004, \"MYB-4\", \"Summary of issue MYB-4\", \"MYB-3\", \"Summary of the parent issue of MYB-4\", 7200, 600, None)]\n\n with requests_mock.Mocker() as m:\n m.register_uri('GET', '/rest/api/2/issue/MYB-5/worklog/', text=mock_response_first_issue)\n m.register_uri('GET', '/rest/api/2/issue/MYB-4/worklog/', text=mock_response_second_issue)\n work_logs, issues = jiratimereport.get_work_logs(\"https://jira_url\", \"user_name\", \"api_token\",\n \"2020-01-10\", \"2020-01-20\", \"\", issues)\n\n work_logs_expected_result = [WorkLog(\"MYB-5\", datetime(2020, 1, 18), 3600, \"John Doe\"),\n WorkLog(\"MYB-5\", datetime(2020, 1, 18), 5400, \"John Doe\"),\n WorkLog(\"MYB-4\", datetime(2020, 1, 12), 3600, \"John Doe\")]\n\n self.assertListEqual(work_logs_expected_result, work_logs, \"Work Log lists are unequal\")\n\n issue_myb_5 = Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\",\n 3600, 900, datetime(2020, 1, 20))\n issue_myb_5.issue_start_date = datetime(2020, 1, 18)\n issue_myb_4 = Issue(10004, \"MYB-4\", \"Summary of issue MYB-4\", \"MYB-3\", \"Summary of the parent issue of MYB-4\",\n 7200, 600, None)\n issue_myb_4.issue_start_date = datetime(2020, 1, 12)\n\n issues_expected_result = [issue_myb_5,\n issue_myb_4]\n\n self.assertListEqual(issues_expected_result, issues, \"Issue lists are unequal\")", "def getIssuesFromAPI():\n issues = []\n pageNumber = 1\n numberOfIssuesReturned = 1\n while numberOfIssuesReturned != 0:\n issuesResponse = requests.get(\n \"https://api.github.com/repos/greenelab/covid19-review/issues?state=all&per_page=50&page=\" +\n str(pageNumber), headers=headers)\n issues_page = json.loads(issuesResponse.text)\n issues = issues + issues_page\n numberOfIssuesReturned = len(issues_page)\n pageNumber += 1\n return issues", "def test_api_can_request_issues(self):\n payload = request_github_issues('razat249', 'github-view')\n self.assertEqual(payload['error'], False)\n self.assertLess(payload['status_code'], 400)", "def test_obtain_issues_no_query(self, mock_url_read):\n mock_url_read.side_effect = \\\n [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}', '<CxXMLResults />']\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)", "def test_get_work_logs_multiple_pages(self):\n with open(\"work_logs_multiple_first_page.json\", \"r\") as issues_first_file:\n mock_response_first_page = issues_first_file.read()\n\n with open(\"work_logs_multiple_second_page.json\", \"r\") as issues_second_file:\n mock_response_second_page = issues_second_file.read()\n\n issues = [Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\", 3600, 900, datetime(2020, 1, 20))]\n\n with requests_mock.Mocker() as m:\n m.register_uri('GET', '/rest/api/2/issue/MYB-5/worklog/', [{'text': mock_response_first_page},\n {'text': mock_response_second_page}])\n work_logs, issues = jiratimereport.get_work_logs(\"https://jira_url\", \"user_name\", \"api_token\",\n \"2020-01-10\", \"2020-01-20\", \"\", issues)\n\n work_logs_expected_result = [WorkLog(\"MYB-5\", datetime(2020, 1, 12), 3600, \"John Doe\"),\n WorkLog(\"MYB-5\", datetime(2020, 1, 18), 3600, \"John Doe\"),\n WorkLog(\"MYB-5\", datetime(2020, 1, 18), 5400, \"John Doe\")]\n\n self.assertListEqual(work_logs_expected_result, work_logs, \"Work Log lists are unequal\")\n\n issue_myb_5 = Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\",\n 3600, 900, datetime(2020, 1, 20))\n issue_myb_5.issue_start_date = datetime(2020, 1, 12)\n\n issues_expected_result = [issue_myb_5]\n\n self.assertListEqual(issues_expected_result, issues, \"Issue lists are unequal\")", "def test_get_non_existent_issue_fails(self):\n response = self.client.get(self.non_existent_url)\n response_json = response.get_json()\n error_details = response_json[\"error\"]\n self.assertEqual(response.status_code, 404)\n self.assertEqual(error_details[\"message\"], ISSUE_NOT_FOUND_MESSAGE)\n self.assertEqual(error_details[\"sub_code\"], ISSUE_NOT_FOUND_SUB_CODE)", "def test_issue_search_issues(self):\n pass", "def test_obtain_issues_response_error(self, mock_error, mock_url_read):\n mock_url_read.return_value = 'non-json'\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)\n self.assertEqual(mock_error.call_args[0][0], \"Error loading json: %s.\")\n self.assertIsInstance(mock_error.call_args[0][1], ValueError)", "def test_obtain_issues_json_error(self, mock_error, mock_url_read):\n mock_url_read.return_value = '{}'\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)\n self.assertEqual(mock_error.call_args[0][0], \"Tag %s could not be found.\")\n self.assertIsInstance(mock_error.call_args[0][1], KeyError)", "def test_obtain_issues_http_error(self, mock_url_read):\n mock_url_read.side_effect = urllib.error.HTTPError('raise', None, None, None, None)\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)", "def test_api_can_sort_issues_correctly(self):\n issues_list = Issue.objects.values_list('experience_needed').order_by('experience_needed')\n response = self.client.get('/issues/?ordering=experience_needed', format=\"json\")\n response_content = json.loads(response.content)\n for i in xrange(len(issues_list)):\n self.assertEqual(issues_list[i][0], response_content[i]['experience_needed'])", "def get_issues(request, project):\n\n try:\n api_response = requests.get(constants.GITHUB_API_GET_ISSUES_URL.format(project_name=project))\n api_response_json = api_response.json()\n if api_response.status_code == 404:\n error_message = \"Repository does not exist\"\n return Response(error_message, status=status.HTTP_404_NOT_FOUND)\n if api_response.status_code == 401:\n raise Exception(\"Authentication fails. Invalid github access token.\")\n response = []\n for issue in api_response_json:\n labels_length = len(issue['labels'])\n tags = []\n # Making custom dictionary for tags\n for i in range(0, labels_length):\n # Searching inside \"labels\" key for tag_name\n for tag, tag_name in issue[\"labels\"][i].items():\n if tag in [\"name\"]:\n label = tag_name\n tags.append(label)\n result = IssueResponse(\n title=issue['title'],\n created_at=issue['created_at'],\n comments=issue['comments'],\n issue_number=issue['number'],\n repository_url=issue['repository_url'],\n labels=tags\n )\n result_as_json = result.to_json()\n response.append(result_as_json)\n\n except Exception:\n return DOWNSTREAM_ERROR_RESPONSE\n\n return Response(response)", "async def _get_pull_requests_for_issue(self, session: ClientSession, issue: dict, data: dict) -> dict:\n url = self._build_url('dev-status/1.0/issue/detail')\n async with session.get(url, params=data) as resp:\n response = await resp.json()\n response['detail'][0].update(issue)\n return response", "def get_issue(self, context):", "def getIssues(url):\n r = requests.get(url)\n if r.status_code != 200:\n raise \"There call to the GitHub issues API failed\"\n return r.json()", "def get_issues(self, start=None):\n\n page = 0 # current page\n last_page = None # last page\n url_next = self.__get_issues_url(start)\n\n logger.debug(\"Get GitHub issues from \" + url_next)\n r = self.__send_request(url_next, self.__get_payload(start),\n self.__get_headers())\n issues = r.text\n page += 1\n\n if 'last' in r.links:\n last_url = r.links['last']['url']\n last_page = last_url.split('&page=')[1].split('&')[0]\n last_page = int(last_page)\n logger.debug(\"Page: %i/%i\" % (page, last_page))\n\n while issues:\n yield issues\n\n issues = None\n\n if 'next' in r.links:\n url_next = r.links['next']['url'] # Loving requests :)\n r = self.__send_request(url_next, self.__get_payload(start), self.__get_headers())\n page += 1\n issues = r.text\n logger.debug(\"Page: %i/%i\" % (page, last_page))", "def test_issue(self):\n issue = Checkmarx.Issue('a_group', 'the_name', 'http://url', 3, 'New')\n\n self.assertEqual('a group', issue.group)\n self.assertEqual('the name', issue.title)\n self.assertEqual('http://url', issue.display_url)\n self.assertEqual(3, issue.count)\n self.assertEqual('New', issue.status)", "def allUnresolved(request, page=1):\n objects = im.Issue.objects.filter(resolved_state__isnull=True).reverse()\n \n \n args = utils.generatePageList(request, objects, page)\n args['issues'] = args['objects']\n \n args['no_results'] = args['page'].object_list.count() < 1\n\n return render_to_response(\"issue_list.html\", args,\n context_instance=RequestContext(request))", "def get_issues(): # pragma: no cover\n global issue_data\n team = {\n 'stevex196x': 0,\n 'TheSchaft': 0,\n 'melxtru': 0,\n 'aylish19': 0,\n 'connormlewis': 0,\n 'tsukkisuki': 0\n }\n all_issues = 0\n while all_issues == 0:\n url = ('https://api.github.com/repos/connormlewis/idb/'\n 'issues?state=all&filter=all&per_page=100')\n data = requests.get(\n url, headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n link = data.headers.get('Link', None)\n for i in range(1, int(find_last_page(link)) + 1):\n url = (\n 'https://api.github.com/repos/connormlewis/idb/'\n 'issues?state=all&filter=all&per_page=100' + '&page=' + str(i))\n data = requests.get(\n url,\n headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n json_list = data.json()\n for entry in json_list:\n if 'pull_request' not in entry:\n all_issues += 1\n if entry['user']['login'] in team:\n team[entry['user']['login']] += 1\n return team, all_issues" ]
[ "0.74503416", "0.7094505", "0.7088346", "0.7062276", "0.6944259", "0.6938249", "0.6899283", "0.68890667", "0.6797187", "0.67301536", "0.67243993", "0.66074836", "0.6581828", "0.65035576", "0.65014946", "0.6477712", "0.64711165", "0.645216", "0.6413023", "0.64115626", "0.63370067", "0.63261306", "0.63054365", "0.62423563", "0.6186912", "0.6141193", "0.61143523", "0.6095149", "0.60849196", "0.6031175" ]
0.77025616
0
Test the multiple pages response when retrieving Jira issues (pagination)
def test_get_updated_issues_multiple_pages(self): with open("issues_multiple_first_page.json", "r") as issues_first_file: mock_response_first_page = issues_first_file.read() with open("issues_multiple_second_page.json", "r") as issues_second_file: mock_response_second_page = issues_second_file.read() with requests_mock.Mocker() as m: m.register_uri('GET', '/rest/api/2/search', [{'text': mock_response_first_page}, {'text': mock_response_second_page}]) issues = jiratimereport.get_updated_issues("https://jira_url", "user_name", "api_token", "MYB", "2020-01-10", "2020-01-20", "") issues_expected_result = [ Issue(10005, "MYB-5", "Summary of issue MYB-5", "MYB-3", "Summary of the parent issue of MYB-5", 3600, 900, datetime(2020, 1, 20)), Issue(10004, "MYB-4", "Summary of issue MYB-4", "MYB-3", "Summary of the parent issue of MYB-4", 7200, 600, None), Issue(10006, "MYB-6", "Summary of issue MYB-6", "MYB-3", "Summary of the parent issue of MYB-6", 3600, 900, None)] self.assertListEqual(issues_expected_result, issues, "Issues lists are unequal")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_updated_issues_one_page(self):\n with open(\"issues_one_page.json\", \"r\") as issues_file:\n mock_response = issues_file.read()\n\n with requests_mock.Mocker() as m:\n m.register_uri('GET', '/rest/api/2/search', text=mock_response)\n issues = jiratimereport.get_updated_issues(\"https://jira_url\", \"user_name\", \"api_token\", \"MYB\",\n \"2020-01-10\", \"2020-01-20\", \"\")\n\n issues_expected_result = [\n Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\", 3600, 900, datetime(2020, 1, 20)),\n Issue(10004, \"MYB-4\", \"Summary of issue MYB-4\", \"MYB-3\", \"Summary of the parent issue of MYB-4\", 7200, 600, None)]\n\n self.assertListEqual(issues_expected_result, issues, \"Issues lists are unequal\")", "def test_get_work_logs_multiple_pages(self):\n with open(\"work_logs_multiple_first_page.json\", \"r\") as issues_first_file:\n mock_response_first_page = issues_first_file.read()\n\n with open(\"work_logs_multiple_second_page.json\", \"r\") as issues_second_file:\n mock_response_second_page = issues_second_file.read()\n\n issues = [Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\", 3600, 900, datetime(2020, 1, 20))]\n\n with requests_mock.Mocker() as m:\n m.register_uri('GET', '/rest/api/2/issue/MYB-5/worklog/', [{'text': mock_response_first_page},\n {'text': mock_response_second_page}])\n work_logs, issues = jiratimereport.get_work_logs(\"https://jira_url\", \"user_name\", \"api_token\",\n \"2020-01-10\", \"2020-01-20\", \"\", issues)\n\n work_logs_expected_result = [WorkLog(\"MYB-5\", datetime(2020, 1, 12), 3600, \"John Doe\"),\n WorkLog(\"MYB-5\", datetime(2020, 1, 18), 3600, \"John Doe\"),\n WorkLog(\"MYB-5\", datetime(2020, 1, 18), 5400, \"John Doe\")]\n\n self.assertListEqual(work_logs_expected_result, work_logs, \"Work Log lists are unequal\")\n\n issue_myb_5 = Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\",\n 3600, 900, datetime(2020, 1, 20))\n issue_myb_5.issue_start_date = datetime(2020, 1, 12)\n\n issues_expected_result = [issue_myb_5]\n\n self.assertListEqual(issues_expected_result, issues, \"Issue lists are unequal\")", "def getIssuesFromAPI():\n issues = []\n pageNumber = 1\n numberOfIssuesReturned = 1\n while numberOfIssuesReturned != 0:\n issuesResponse = requests.get(\n \"https://api.github.com/repos/greenelab/covid19-review/issues?state=all&per_page=50&page=\" +\n str(pageNumber), headers=headers)\n issues_page = json.loads(issuesResponse.text)\n issues = issues + issues_page\n numberOfIssuesReturned = len(issues_page)\n pageNumber += 1\n return issues", "def test_pagination(self):\n self.check_pagination()", "def test_get_work_logs_one_page(self):\n with open(\"work_logs_first_issue_one_page.json\", \"r\") as first_issue_file:\n mock_response_first_issue = first_issue_file.read()\n\n with open(\"work_logs_second_issue_one_page.json\", \"r\") as second_issue_file:\n mock_response_second_issue = second_issue_file.read()\n\n issues = [Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\", 3600, 900, datetime(2020, 1, 20)),\n Issue(10004, \"MYB-4\", \"Summary of issue MYB-4\", \"MYB-3\", \"Summary of the parent issue of MYB-4\", 7200, 600, None)]\n\n with requests_mock.Mocker() as m:\n m.register_uri('GET', '/rest/api/2/issue/MYB-5/worklog/', text=mock_response_first_issue)\n m.register_uri('GET', '/rest/api/2/issue/MYB-4/worklog/', text=mock_response_second_issue)\n work_logs, issues = jiratimereport.get_work_logs(\"https://jira_url\", \"user_name\", \"api_token\",\n \"2020-01-10\", \"2020-01-20\", \"\", issues)\n\n work_logs_expected_result = [WorkLog(\"MYB-5\", datetime(2020, 1, 18), 3600, \"John Doe\"),\n WorkLog(\"MYB-5\", datetime(2020, 1, 18), 5400, \"John Doe\"),\n WorkLog(\"MYB-4\", datetime(2020, 1, 12), 3600, \"John Doe\")]\n\n self.assertListEqual(work_logs_expected_result, work_logs, \"Work Log lists are unequal\")\n\n issue_myb_5 = Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\",\n 3600, 900, datetime(2020, 1, 20))\n issue_myb_5.issue_start_date = datetime(2020, 1, 18)\n issue_myb_4 = Issue(10004, \"MYB-4\", \"Summary of issue MYB-4\", \"MYB-3\", \"Summary of the parent issue of MYB-4\",\n 7200, 600, None)\n issue_myb_4.issue_start_date = datetime(2020, 1, 12)\n\n issues_expected_result = [issue_myb_5,\n issue_myb_4]\n\n self.assertListEqual(issues_expected_result, issues, \"Issue lists are unequal\")", "def test_pagination(self):\n self.register_get_user_response(self.user)\n self.register_get_thread_response(make_minimal_cs_thread({\n \"id\": self.thread_id,\n \"course_id\": str(self.course.id),\n \"thread_type\": \"discussion\",\n \"resp_total\": 10,\n }))\n response = self.client.get(\n self.url,\n {\"thread_id\": self.thread_id, \"page\": \"18\", \"page_size\": \"4\"}\n )\n self.assert_response_correct(\n response,\n 404,\n {\"developer_message\": \"Page not found (No results on this page).\"}\n )\n self.assert_query_params_equal(\n httpretty.httpretty.latest_requests[-2],\n {\n \"resp_skip\": [\"68\"],\n \"resp_limit\": [\"4\"],\n \"user_id\": [str(self.user.id)],\n \"mark_as_read\": [\"False\"],\n \"recursive\": [\"False\"],\n \"with_responses\": [\"True\"],\n }\n )", "def test_api_can_get_filtered_issues_list(self):\n path = '/issues/?language=python&tech_stack=django&experience_needed=moderate'\n response = self.client.get(path, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertGreater(len(api_response_issues), len(json.loads(response.content)))", "def test_get_pagination(mockclient_cl1):\n # There should be 600 statements in testset.\n r = mockclient_cl1.get(TEST_URL + \"?size=700\")\n assert r.status_code == 200\n assert len(r.json[\"statements\"]) == 600\n\n # Get the first 500\n r = mockclient_cl1.get(TEST_URL + \"?size=500\")\n assert r.status_code == 200\n assert len(r.json[\"statements\"]) == 500\n\n # Get the remaining 100\n r = mockclient_cl1.get(TEST_URL + \"?size=500&page=2\")\n assert r.status_code == 200\n assert len(r.json[\"statements\"]) == 100", "def do_case(self, objects, page_num, num_pages, expected):\n request = RequestFactory().get(\"/test\")\n paginator = DiscussionAPIPagination(request, page_num, num_pages)\n actual = paginator.get_paginated_response(objects)\n assert actual.data == expected", "def test_pagination(self):\n for page in range(1, 5):\n self._test_one_page(page=page)", "def test_paging(mocker, testclient):\n # Patch to return list_alerts json data\n with open(\"./test_data/list_alerts_paged.json\") as list_alerts_paged:\n list_alerts_response = json.load(list_alerts_paged)\n with open(\"./test_data/list_alerts_empty.json\") as list_alerts_empty:\n mocker.patch.object(Client, \"_http_request\", side_effect=[\n list_alerts_response,\n json.load(list_alerts_empty),\n ])\n data = testclient.get_paged(40, url_suffix=\"/not_real\", method=\"GET\")\n\n assert len(data) == 29\n calls = [\n call(url_suffix=\"/not_real\", method=\"GET\"),\n call(full_url=list_alerts_response.get(\"paging\").get(\"next\"),\n url_suffix=\"/not_real\",\n method=\"GET\",\n )\n ]\n Client._http_request.assert_has_calls(calls)", "def test_api_can_get_issues_list(self):\n response = self.client.get('/issues/', format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(api_response_issues), len(json.loads(response.content)))", "def test_response_is_paginated(self):\r\n user = ViewAfishaTests.mentor\r\n EventFactory.create_batch(50, city=user.profile.city)\r\n client = self.return_authorized_user_client(user)\r\n\r\n response_data = client.get(path=EVENTS_URL).data\r\n\r\n self.assertTrue(\"next\" in response_data)\r\n self.assertTrue(\"previous\" in response_data)\r\n self.assertTrue(\"results\" in response_data)", "def test_issue_list_issues(self):\n pass", "def test_404_questions_pagination_beyond_existing_pages(self):\n res = self.client().get('/questions?page=100')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], \"Resource Not Found.\")", "def _inner_paginate(request, issues, template, extra_template_params):\n visible_issues = [i for i in issues if i.view_allowed]\n _optimize_draft_counts(visible_issues)\n _load_users_for_issues(visible_issues)\n params = {\n 'issues': visible_issues,\n 'limit': None,\n 'newest': None,\n 'prev': None,\n 'next': None,\n 'nexttext': '',\n 'first': '',\n 'last': '',\n }\n if extra_template_params:\n params.update(extra_template_params)\n return respond(request, template, params)", "def _inner_paginate(request, issues, template, extra_template_params):\n visible_issues = [i for i in issues if i.view_allowed]\n _optimize_draft_counts(visible_issues)\n _load_users_for_issues(visible_issues)\n params = {\n 'issues': visible_issues,\n 'limit': None,\n 'newest': None,\n 'prev': None,\n 'next': None,\n 'nexttext': '',\n 'first': '',\n 'last': '',\n }\n if extra_template_params:\n params.update(extra_template_params)\n return respond(request, template, params)", "def get_issues(self, start=None):\n\n page = 0 # current page\n last_page = None # last page\n url_next = self.__get_issues_url(start)\n\n logger.debug(\"Get GitHub issues from \" + url_next)\n r = self.__send_request(url_next, self.__get_payload(start),\n self.__get_headers())\n issues = r.text\n page += 1\n\n if 'last' in r.links:\n last_url = r.links['last']['url']\n last_page = last_url.split('&page=')[1].split('&')[0]\n last_page = int(last_page)\n logger.debug(\"Page: %i/%i\" % (page, last_page))\n\n while issues:\n yield issues\n\n issues = None\n\n if 'next' in r.links:\n url_next = r.links['next']['url'] # Loving requests :)\n r = self.__send_request(url_next, self.__get_payload(start), self.__get_headers())\n page += 1\n issues = r.text\n logger.debug(\"Page: %i/%i\" % (page, last_page))", "def test_issues_list(self):\n response = self.client.get(url_for('issues.issuesresource'))\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.json)", "def test_get_multiple_pages_lro(client):\n from azure.mgmt.core.polling.arm_polling import ARMPolling\n poller = client.paging.begin_get_multiple_pages_lro(polling=ARMPolling(timeout=0, request_id=\"test\"))\n pager = poller.result()\n\n items = list(pager)\n\n assert len(items) == 10\n assert items[0][\"properties\"][\"id\"] == 1\n assert items[1][\"properties\"][\"id\"] == 2", "def test_pagination(self):\n self.register_get_user_response(self.user)\n cs_comment_child = self.make_comment_data(\"test_child_comment\", self.comment_id, children=[])\n cs_comment = self.make_comment_data(self.comment_id, None, [cs_comment_child])\n cs_thread = make_minimal_cs_thread({\n \"id\": self.thread_id,\n \"course_id\": str(self.course.id),\n \"children\": [cs_comment],\n })\n self.register_get_thread_response(cs_thread)\n self.register_get_comment_response(cs_comment)\n response = self.client.get(\n self.url,\n {\"comment_id\": self.comment_id, \"page\": \"18\", \"page_size\": \"4\"}\n )\n self.assert_response_correct(\n response,\n 404,\n {\"developer_message\": \"Page not found (No results on this page).\"}\n )", "def test_execute_get_success_with_multiple_pages():\n response_queue = queue.Queue()\n message = FakeMessage()\n message.raw_payload = json.dumps(TestData.JOB_TEMPLATE_PAYLOAD_ALL_PAGES)\n headers = {\"Content-Type\": \"application/json\"}\n\n with aioresponses() as mocked:\n mocked.get(\n TestData.JOB_TEMPLATES_LIST_URL,\n status=200,\n body=json.dumps(TestData.JOB_TEMPLATES_PAGE1_RESPONSE),\n headers=headers,\n )\n mocked.get(\n TestData.JOB_TEMPLATES_LIST_URL_PAGE_2,\n status=200,\n body=json.dumps(TestData.JOB_TEMPLATES_PAGE2_RESPONSE),\n headers=headers,\n )\n worker.execute(message, TestData.RECEPTOR_CONFIG, response_queue)\n\n validate_get_response(\n response_queue.get(),\n 200,\n TestData.JOB_TEMPLATE_COUNT,\n [TestData.JOB_TEMPLATE_1, TestData.JOB_TEMPLATE_2],\n )\n validate_get_response(\n response_queue.get(),\n 200,\n TestData.JOB_TEMPLATE_COUNT,\n [TestData.JOB_TEMPLATE_3],\n )", "def get_issues(): # pragma: no cover\n global issue_data\n team = {\n 'stevex196x': 0,\n 'TheSchaft': 0,\n 'melxtru': 0,\n 'aylish19': 0,\n 'connormlewis': 0,\n 'tsukkisuki': 0\n }\n all_issues = 0\n while all_issues == 0:\n url = ('https://api.github.com/repos/connormlewis/idb/'\n 'issues?state=all&filter=all&per_page=100')\n data = requests.get(\n url, headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n link = data.headers.get('Link', None)\n for i in range(1, int(find_last_page(link)) + 1):\n url = (\n 'https://api.github.com/repos/connormlewis/idb/'\n 'issues?state=all&filter=all&per_page=100' + '&page=' + str(i))\n data = requests.get(\n url,\n headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n json_list = data.json()\n for entry in json_list:\n if 'pull_request' not in entry:\n all_issues += 1\n if entry['user']['login'] in team:\n team[entry['user']['login']] += 1\n return team, all_issues", "def test_jsonHasPagination(self):\n\n elements = [\n 'TLE',\n 'CatalogEntry'\n ]\n\n for element in elements:\n # Dynamicly instanciate the view class\n request = self.factory.get('/api/v1/%s/?format=json' % element.lower())\n view_class = globals()['%sViewSet' % element]\n view = view_class.as_view({'get': 'list'})\n response = view(request).render()\n json_data = response.content.decode('utf8')\n\n self.assertIn('\"count\":', json_data)\n self.assertIn('\"next\":', json_data)\n self.assertIn('\"previous\":', json_data)\n self.assertIn('\"results\":', json_data)", "def test_get_paginated_questions(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(len(data['questions']))", "def test_obtain_issues(self, mock_url_read):\n mock_url_read.side_effect = [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}',\n SAST_REPORT.format(false_positive=False, severity='High')]\n\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n\n self.assertIsInstance(issues, List)\n self.assertIsInstance(issues[0], Checkmarx.Issue)\n self.assertEqual('JScript Vulnerabilities', issues[0].group)\n self.assertEqual('Reflected XSS', issues[0].title)\n self.assertEqual('http://url/CxWebClient/ScanQueryDescription.aspx?queryID=789&'\n 'queryVersionCode=842956&queryTitle=Reflected_XSS', issues[0].display_url)\n self.assertEqual(1, issues[0].count)\n self.assertEqual(\"Recurrent\", issues[0].status)", "def test_get_paginated_questions(self):\n\n # get response and load data\n response = self.client().get('/questions')\n data = json.loads(response.data)\n\n # check status code and message\n self.assertEqual(response.status_code, 200)\n self.assertEqual(data['success'], True)\n\n # check that total_questions and questions return data\n self.assertTrue(data['total_questions'])\n self.assertTrue(len(data['questions']))", "def test_api_can_sort_issues_correctly(self):\n issues_list = Issue.objects.values_list('experience_needed').order_by('experience_needed')\n response = self.client.get('/issues/?ordering=experience_needed', format=\"json\")\n response_content = json.loads(response.content)\n for i in xrange(len(issues_list)):\n self.assertEqual(issues_list[i][0], response_content[i]['experience_needed'])", "def allBroken(request, page=1):\n objects = im.Issue.objects.filter(resolved_state__isnull=True)\n args = utils.generatePageList(request, objects, page)\n args['issues'] = args['objects']\n issues_list = {'Issues on Unusable Machines':[]}\n for issue in args['issues']:\n iss_id = issue.item.item_id\n machine = mac.Item.objects.get(item_id=iss_id)\n\n if machine.unusable:\n issues_list['Issues on Unusable Machines'].append(issue)\n\n args['object_list'] = issues_list.items() \n args['no_results'] = args['page'].object_list.count() < 1\n return render_to_response(\"grouped_issue_list.html\", args,\n context_instance=RequestContext(request))", "def test_get_all_issues_passes(self):\n # Act: no issues\n response = self.client.get(self.url)\n response_json = response.get_json()\n # test\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response_json[\"categories\"]), 0)\n self.assertEqual(response_json[\"categories\"], [])\n\n # Act: add 1 issue\n self.test_issue = create_canned_mapping_issue()\n response = self.client.get(self.url)\n response_json = response.get_json()\n # test\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response_json[\"categories\"]), 1)\n self.assertEqual(response_json[\"categories\"][0][\"name\"], TEST_ISSUE_NAME)" ]
[ "0.7250544", "0.71837443", "0.6884279", "0.68545246", "0.68167686", "0.67703754", "0.6695603", "0.6637052", "0.6611962", "0.65908766", "0.6514894", "0.6496308", "0.64865696", "0.6438874", "0.63897264", "0.63649464", "0.63649464", "0.63618314", "0.6316557", "0.6290466", "0.6276343", "0.62473154", "0.6239023", "0.6231069", "0.6218834", "0.6190436", "0.61721057", "0.61550206", "0.6141915", "0.6137114" ]
0.75254667
0
Test the single page response when retrieving Jira work logs
def test_get_work_logs_one_page(self): with open("work_logs_first_issue_one_page.json", "r") as first_issue_file: mock_response_first_issue = first_issue_file.read() with open("work_logs_second_issue_one_page.json", "r") as second_issue_file: mock_response_second_issue = second_issue_file.read() issues = [Issue(10005, "MYB-5", "Summary of issue MYB-5", "MYB-3", "Summary of the parent issue of MYB-5", 3600, 900, datetime(2020, 1, 20)), Issue(10004, "MYB-4", "Summary of issue MYB-4", "MYB-3", "Summary of the parent issue of MYB-4", 7200, 600, None)] with requests_mock.Mocker() as m: m.register_uri('GET', '/rest/api/2/issue/MYB-5/worklog/', text=mock_response_first_issue) m.register_uri('GET', '/rest/api/2/issue/MYB-4/worklog/', text=mock_response_second_issue) work_logs, issues = jiratimereport.get_work_logs("https://jira_url", "user_name", "api_token", "2020-01-10", "2020-01-20", "", issues) work_logs_expected_result = [WorkLog("MYB-5", datetime(2020, 1, 18), 3600, "John Doe"), WorkLog("MYB-5", datetime(2020, 1, 18), 5400, "John Doe"), WorkLog("MYB-4", datetime(2020, 1, 12), 3600, "John Doe")] self.assertListEqual(work_logs_expected_result, work_logs, "Work Log lists are unequal") issue_myb_5 = Issue(10005, "MYB-5", "Summary of issue MYB-5", "MYB-3", "Summary of the parent issue of MYB-5", 3600, 900, datetime(2020, 1, 20)) issue_myb_5.issue_start_date = datetime(2020, 1, 18) issue_myb_4 = Issue(10004, "MYB-4", "Summary of issue MYB-4", "MYB-3", "Summary of the parent issue of MYB-4", 7200, 600, None) issue_myb_4.issue_start_date = datetime(2020, 1, 12) issues_expected_result = [issue_myb_5, issue_myb_4] self.assertListEqual(issues_expected_result, issues, "Issue lists are unequal")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_work_logs_multiple_pages(self):\n with open(\"work_logs_multiple_first_page.json\", \"r\") as issues_first_file:\n mock_response_first_page = issues_first_file.read()\n\n with open(\"work_logs_multiple_second_page.json\", \"r\") as issues_second_file:\n mock_response_second_page = issues_second_file.read()\n\n issues = [Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\", 3600, 900, datetime(2020, 1, 20))]\n\n with requests_mock.Mocker() as m:\n m.register_uri('GET', '/rest/api/2/issue/MYB-5/worklog/', [{'text': mock_response_first_page},\n {'text': mock_response_second_page}])\n work_logs, issues = jiratimereport.get_work_logs(\"https://jira_url\", \"user_name\", \"api_token\",\n \"2020-01-10\", \"2020-01-20\", \"\", issues)\n\n work_logs_expected_result = [WorkLog(\"MYB-5\", datetime(2020, 1, 12), 3600, \"John Doe\"),\n WorkLog(\"MYB-5\", datetime(2020, 1, 18), 3600, \"John Doe\"),\n WorkLog(\"MYB-5\", datetime(2020, 1, 18), 5400, \"John Doe\")]\n\n self.assertListEqual(work_logs_expected_result, work_logs, \"Work Log lists are unequal\")\n\n issue_myb_5 = Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\",\n 3600, 900, datetime(2020, 1, 20))\n issue_myb_5.issue_start_date = datetime(2020, 1, 12)\n\n issues_expected_result = [issue_myb_5]\n\n self.assertListEqual(issues_expected_result, issues, \"Issue lists are unequal\")", "def test_log_with_results(self):\n log('hello')\n r = self.client.get('/log')\n self.assertEqual(r.status_code, 200)\n soup = BeautifulSoup(r.data, 'html.parser')\n self.assertIs(soup.find(id='no-results'), None)\n self.assertIsNot(soup.find(id='log'), None)", "def test_get_event_logs(event_log_api_setup):\n api_response = event_log_api_setup.get_event_logs(limit=100, offset=0)\n logging.getLogger().info(\"%s\", api_response)\n print(f\"{BCOLORS.OKGREEN}OK{BCOLORS.ENDC}\")", "def test_log_no_results(self):\n r = self.client.get('/log')\n self.assertEqual(r.status_code, 200)\n soup = BeautifulSoup(r.data, 'html.parser')\n self.assertIsNot(soup.find(id='no-results'), None)\n self.assertIs(soup.find(id='log'), None)", "def test_get_updated_issues_one_page(self):\n with open(\"issues_one_page.json\", \"r\") as issues_file:\n mock_response = issues_file.read()\n\n with requests_mock.Mocker() as m:\n m.register_uri('GET', '/rest/api/2/search', text=mock_response)\n issues = jiratimereport.get_updated_issues(\"https://jira_url\", \"user_name\", \"api_token\", \"MYB\",\n \"2020-01-10\", \"2020-01-20\", \"\")\n\n issues_expected_result = [\n Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\", 3600, 900, datetime(2020, 1, 20)),\n Issue(10004, \"MYB-4\", \"Summary of issue MYB-4\", \"MYB-3\", \"Summary of the parent issue of MYB-4\", 7200, 600, None)]\n\n self.assertListEqual(issues_expected_result, issues, \"Issues lists are unequal\")", "def test_get_sprint_dates_success(self):\n team = \"swen90013-2020-sp\"\n response = self.client.get('/api/v1/jira/' + team + '/issues_per_sprint')\n #print(response.json()[\"data\"])\n self.assertEqual(response.json()[\"code\"], RespCode.success.value.key, \"response is not success\")", "def test_get_answers_to_log(self):\r\n requests = [\r\n {\"event\": \"my_event\", \"event_type\": \"my_event_type\", \"page\": \"my_page\"},\r\n {\"event\": \"{'json': 'object'}\", \"event_type\": unichr(512), \"page\": \"my_page\"}\r\n ]\r\n with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_SQL_TRACKING_LOGS': True}):\r\n for request_params in requests:\r\n try: # because /event maps to two different views in lms and cms, we're only going to test lms here\r\n response = self.client.get(reverse(user_track), request_params)\r\n except NoReverseMatch:\r\n raise SkipTest()\r\n self.assertEqual(response.status_code, 200)\r\n self.assertEqual(response.content, 'success')\r\n tracking_logs = TrackingLog.objects.order_by('-dtcreated')\r\n log = tracking_logs[0]\r\n self.assertEqual(log.event, request_params[\"event\"])\r\n self.assertEqual(log.event_type, request_params[\"event_type\"])\r\n self.assertEqual(log.page, request_params[\"page\"])", "def test_get_answers_to_log(self):\r\n requests = [\r\n {\"event\": \"my_event\", \"event_type\": \"my_event_type\", \"page\": \"my_page\"},\r\n {\"event\": \"{'json': 'object'}\", \"event_type\": unichr(512), \"page\": \"my_page\"}\r\n ]\r\n with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_SQL_TRACKING_LOGS': True}):\r\n for request_params in requests:\r\n response = self.client.get(reverse(cms_user_track), request_params)\r\n self.assertEqual(response.status_code, 204)", "def worklog(accountable):\n worklog = accountable.issue_worklog()\n headers = ['author_name', 'comment', 'time_spent']\n if worklog:\n rows = [[v for k, v in sorted(w.items()) if k in headers]\n for w in worklog]\n rows.insert(0, headers)\n print_table(SingleTable(rows))\n else:\n click.secho(\n 'No worklogs found for {}'.format(accountable.issue_key),\n fg='red'\n )", "def status():\n _request('worklog/status/')", "def test_get_daily_change_log(self):\n msg = \"Response status is not 200\"\n response = self.api.get_daily_change_log(self.year, self.month, self.day)\n self.assertEqual(response.status_code, 200, msg)", "def test_get_event_log(event_log_api_setup):\n api_response = event_log_api_setup.get_event_log(\n event_log_id=1,\n )\n logging.getLogger().info(\"%s\", api_response)\n print(f\"{BCOLORS.OKGREEN}OK{BCOLORS.ENDC}\")", "def test_work(self):\n res = self.client.get(\"/work\")\n data = res.data.decode(\"utf-8\")\n assert res.status == \"200 OK\"\n assert \"Work\" in data", "def test_list(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.get(\n reverse('retreat:waitqueuenotification-list'),\n format='json',\n )\n\n response_data = json.loads(response.content)\n\n content = {\n 'count': 1,\n 'next': None,\n 'previous': None,\n 'results': [{\n 'created_at': response_data['results'][0]['created_at'],\n 'id': self.wait_queue_notif.id,\n 'retreat':\n 'http://testserver/retreat/retreats/' +\n str(self.retreat.id),\n 'url': 'http://testserver/retreat/'\n 'wait_queue_notifications/' +\n str(self.wait_queue_notif.id),\n 'user': 'http://testserver/users/' + str(self.user2.id)\n }]\n }\n\n self.assertEqual(response_data, content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_response_is_paginated(self):\r\n user = ViewAfishaTests.mentor\r\n EventFactory.create_batch(50, city=user.profile.city)\r\n client = self.return_authorized_user_client(user)\r\n\r\n response_data = client.get(path=EVENTS_URL).data\r\n\r\n self.assertTrue(\"next\" in response_data)\r\n self.assertTrue(\"previous\" in response_data)\r\n self.assertTrue(\"results\" in response_data)", "def test_response_in_logs(self):\n operator = SimpleHttpOperator(\n task_id='test_HTTP_op',\n method='GET',\n endpoint='/',\n http_conn_id='HTTP_EXAMPLE',\n log_response=True,\n )\n\n with patch.object(operator.log, 'info') as mock_info:\n operator.execute(None)\n mock_info.assert_called_with(AnyStringWith('Example Domain'))", "def get_log(request, **kwargs):\n\n #Creating the command for the logs \n try:\n\tprint(kwargs)\n\tprint(request.GET['project_id'])\n\toutputStr = sidecar.events.test_logs(project_id=request.GET['project_id'])\n\tlog_data = outputStr.log_data\n\toutputStr = \" <br>\".join(log_data.split(\"\\n\"))\n except Exception, e:\n outputStr = \"Updating the logs...\"\t\n #Making the output\n context = {\n \"page_title\": _(\"Test Details\"),\n \"test_lists\": 'report_list', #tests_list\n \"log_data\": outputStr\n }\n return render(request, 'rally_dashboard/events/test_logs.html', context)", "def test_successful_list(self):\n\n url = '/%s/job-types/%s/%s/revisions/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n result = result['results']\n self.assertEqual(len(result), 2)\n self.assertTrue(isinstance(result[0], dict), 'result must be a dictionary')\n self.assertEqual(result[0]['job_type']['name'], self.job_type.name)\n self.assertEqual(result[0]['revision_num'], 2)\n self.assertEqual(result[0]['docker_image'], 'fake')", "def getLog():\n with open(webapp.config['LOGFILE'], 'r') as logfile:\n output = logfile.read()\n if request.headers['Accept'] == 'application/json':\n return output, 200\n else:\n return render_template(\"output.html\", output=output)", "def print_response(response):\n for report in response.get('reports', []):\n rows = report.get('data', {}).get('rows', [])\n for row in rows:\n print(row)", "def test_retrieval(self):\n response = {'activities-steps': [1, 2, 3]}\n steps = self._mock_time_series(response=response)\n self.assertEqual(steps, response['activities-steps'])", "def test_recent_querys(self):\n CreateMatch()\n\n response = self.app.get(\"/\", follow_redirects=True)\n self.assertEqual(response.status_code, 200)\n\n res_txt = response.get_data(as_text=True)\n self.assertIn(\"TESTURL1\", res_txt)\n self.assertIn(\"TESTURL2\", res_txt)", "def test_successful_on_get(self):\n\n url = '/%s/jobs/' % self.api\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)", "def test_get_single_report(self): \n from rest_framework.test import APIClient\n client = APIClient()\n \n response = self.client.get('/api/reports/epic/',\n HTTP_AUTHORIZATION='Token ' + self.token_admin,\n format='json')\n result = json.loads(response.content)\n\n self.assertEqual(result[\"message\"], \"You have no permissions\")\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_get_existing_issue_passes(self):\n response = self.client.get(self.url)\n response_json = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_json[\"name\"], TEST_ISSUE_NAME)", "def test_get(self):\n with self.login(self.user):\n response = self.client.get(\n reverse(\n 'timeline:ajax_detail_site',\n kwargs={'projectevent': self.event.sodar_uuid},\n ),\n )\n self.assertEqual(response.status_code, 200)\n expected = {\n 'app': self.event.app,\n 'name': self.event.event_name,\n 'user': self.user.username,\n 'timestamp': self._format_ts(self.event.get_timestamp()),\n 'status': [\n {\n 'type': 'OK',\n 'class': get_status_style(self.event_status_ok),\n 'description': DEFAULT_MESSAGES['OK'],\n 'timestamp': self._format_ts(\n self.event_status_ok.timestamp\n ),\n },\n {\n 'type': 'INIT',\n 'class': get_status_style(self.event_status_init),\n 'description': DEFAULT_MESSAGES['INIT'],\n 'timestamp': self._format_ts(\n self.event_status_init.timestamp\n ),\n },\n ],\n }\n self.assertEqual(response.data, expected)", "def test_get_journal_entries(self):\n url = reverse('journal')\n data = {\n 'game': {\n 'id': self.game.igdb,\n 'name': self.game.name,\n 'slug': self.game.slug,\n 'coverId': self.game.cover_id,\n 'backdropId': self.game.backdrop_id\n },\n 'date': '2019-06-28',\n 'review': 'cool game',\n 'spoilers': False,\n 'liked': True,\n 'rating': 5,\n 'entry_type': 'Finished',\n 'platform': 'PC',\n }\n expected = [{\n 'year': 2019,\n 'months': [\n {\n 'month': 6,\n 'entries': [\n {\n 'id': 1,\n 'game': {\n 'igdb': 1074,\n 'name': 'Super Mario 64',\n 'slug': 'super-mario-64',\n 'cover_id': 'iwe8jlk21lmf',\n 'backdrop_id': 'i43a2ksd901R43'\n },\n 'date': '2019-06-28',\n 'review': 'cool game',\n 'spoilers': False,\n 'liked': True,\n 'rating': '5.0',\n 'user': 1,\n 'entry_type': 'Finished',\n 'platform': 'PC',\n }\n ]\n }\n ]\n }]\n response = self.client.post(url, data, format='json')\n response = self.client.get(url, {'username': 'testing'}, format='json')\n\n self.assertEqual(response.data, expected)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_gitlogs(self):\r\n\r\n self._setstaff_login()\r\n self._mkdir(getattr(settings, 'GIT_REPO_DIR'))\r\n\r\n self._add_edx4edx()\r\n response = self.client.get(reverse('gitlogs'))\r\n\r\n # Check that our earlier import has a log with a link to details\r\n self.assertIn('/gitlogs/MITx/edx4edx/edx4edx', response.content)\r\n\r\n response = self.client.get(\r\n reverse('gitlogs_detail', kwargs={\r\n 'course_id': 'MITx/edx4edx/edx4edx'}))\r\n\r\n self.assertIn('======&gt; IMPORTING course',\r\n response.content)\r\n\r\n self._rm_edx4edx()", "def test_pagination(self):\n self.register_get_user_response(self.user)\n self.register_get_thread_response(make_minimal_cs_thread({\n \"id\": self.thread_id,\n \"course_id\": str(self.course.id),\n \"thread_type\": \"discussion\",\n \"resp_total\": 10,\n }))\n response = self.client.get(\n self.url,\n {\"thread_id\": self.thread_id, \"page\": \"18\", \"page_size\": \"4\"}\n )\n self.assert_response_correct(\n response,\n 404,\n {\"developer_message\": \"Page not found (No results on this page).\"}\n )\n self.assert_query_params_equal(\n httpretty.httpretty.latest_requests[-2],\n {\n \"resp_skip\": [\"68\"],\n \"resp_limit\": [\"4\"],\n \"user_id\": [str(self.user.id)],\n \"mark_as_read\": [\"False\"],\n \"recursive\": [\"False\"],\n \"with_responses\": [\"True\"],\n }\n )", "def test_journal_route_has_200_response(self):\n self.client.login(username='mike', password='password')\n response = self.client.get(reverse_lazy('journal'))\n self.assertEqual(response.status_code, 200)" ]
[ "0.6750942", "0.6672471", "0.6070962", "0.6000604", "0.59791046", "0.59626675", "0.596023", "0.593353", "0.5840506", "0.58361435", "0.5819851", "0.58129126", "0.5790163", "0.5786302", "0.576526", "0.5739747", "0.5691096", "0.5661728", "0.5644197", "0.5636788", "0.5630645", "0.5626524", "0.56250983", "0.5621824", "0.5620559", "0.55947524", "0.55942273", "0.5571625", "0.5549815", "0.554686" ]
0.6994508
0
Test the multiple pages response when retrieving Jira work logs (pagination)
def test_get_work_logs_multiple_pages(self): with open("work_logs_multiple_first_page.json", "r") as issues_first_file: mock_response_first_page = issues_first_file.read() with open("work_logs_multiple_second_page.json", "r") as issues_second_file: mock_response_second_page = issues_second_file.read() issues = [Issue(10005, "MYB-5", "Summary of issue MYB-5", "MYB-3", "Summary of the parent issue of MYB-5", 3600, 900, datetime(2020, 1, 20))] with requests_mock.Mocker() as m: m.register_uri('GET', '/rest/api/2/issue/MYB-5/worklog/', [{'text': mock_response_first_page}, {'text': mock_response_second_page}]) work_logs, issues = jiratimereport.get_work_logs("https://jira_url", "user_name", "api_token", "2020-01-10", "2020-01-20", "", issues) work_logs_expected_result = [WorkLog("MYB-5", datetime(2020, 1, 12), 3600, "John Doe"), WorkLog("MYB-5", datetime(2020, 1, 18), 3600, "John Doe"), WorkLog("MYB-5", datetime(2020, 1, 18), 5400, "John Doe")] self.assertListEqual(work_logs_expected_result, work_logs, "Work Log lists are unequal") issue_myb_5 = Issue(10005, "MYB-5", "Summary of issue MYB-5", "MYB-3", "Summary of the parent issue of MYB-5", 3600, 900, datetime(2020, 1, 20)) issue_myb_5.issue_start_date = datetime(2020, 1, 12) issues_expected_result = [issue_myb_5] self.assertListEqual(issues_expected_result, issues, "Issue lists are unequal")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_work_logs_one_page(self):\n with open(\"work_logs_first_issue_one_page.json\", \"r\") as first_issue_file:\n mock_response_first_issue = first_issue_file.read()\n\n with open(\"work_logs_second_issue_one_page.json\", \"r\") as second_issue_file:\n mock_response_second_issue = second_issue_file.read()\n\n issues = [Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\", 3600, 900, datetime(2020, 1, 20)),\n Issue(10004, \"MYB-4\", \"Summary of issue MYB-4\", \"MYB-3\", \"Summary of the parent issue of MYB-4\", 7200, 600, None)]\n\n with requests_mock.Mocker() as m:\n m.register_uri('GET', '/rest/api/2/issue/MYB-5/worklog/', text=mock_response_first_issue)\n m.register_uri('GET', '/rest/api/2/issue/MYB-4/worklog/', text=mock_response_second_issue)\n work_logs, issues = jiratimereport.get_work_logs(\"https://jira_url\", \"user_name\", \"api_token\",\n \"2020-01-10\", \"2020-01-20\", \"\", issues)\n\n work_logs_expected_result = [WorkLog(\"MYB-5\", datetime(2020, 1, 18), 3600, \"John Doe\"),\n WorkLog(\"MYB-5\", datetime(2020, 1, 18), 5400, \"John Doe\"),\n WorkLog(\"MYB-4\", datetime(2020, 1, 12), 3600, \"John Doe\")]\n\n self.assertListEqual(work_logs_expected_result, work_logs, \"Work Log lists are unequal\")\n\n issue_myb_5 = Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\",\n 3600, 900, datetime(2020, 1, 20))\n issue_myb_5.issue_start_date = datetime(2020, 1, 18)\n issue_myb_4 = Issue(10004, \"MYB-4\", \"Summary of issue MYB-4\", \"MYB-3\", \"Summary of the parent issue of MYB-4\",\n 7200, 600, None)\n issue_myb_4.issue_start_date = datetime(2020, 1, 12)\n\n issues_expected_result = [issue_myb_5,\n issue_myb_4]\n\n self.assertListEqual(issues_expected_result, issues, \"Issue lists are unequal\")", "def test_response_is_paginated(self):\r\n user = ViewAfishaTests.mentor\r\n EventFactory.create_batch(50, city=user.profile.city)\r\n client = self.return_authorized_user_client(user)\r\n\r\n response_data = client.get(path=EVENTS_URL).data\r\n\r\n self.assertTrue(\"next\" in response_data)\r\n self.assertTrue(\"previous\" in response_data)\r\n self.assertTrue(\"results\" in response_data)", "def test_pagination(self):\n self.register_get_user_response(self.user)\n self.register_get_thread_response(make_minimal_cs_thread({\n \"id\": self.thread_id,\n \"course_id\": str(self.course.id),\n \"thread_type\": \"discussion\",\n \"resp_total\": 10,\n }))\n response = self.client.get(\n self.url,\n {\"thread_id\": self.thread_id, \"page\": \"18\", \"page_size\": \"4\"}\n )\n self.assert_response_correct(\n response,\n 404,\n {\"developer_message\": \"Page not found (No results on this page).\"}\n )\n self.assert_query_params_equal(\n httpretty.httpretty.latest_requests[-2],\n {\n \"resp_skip\": [\"68\"],\n \"resp_limit\": [\"4\"],\n \"user_id\": [str(self.user.id)],\n \"mark_as_read\": [\"False\"],\n \"recursive\": [\"False\"],\n \"with_responses\": [\"True\"],\n }\n )", "def test_pagination(self):\n self.check_pagination()", "def test_get_pagination(mockclient_cl1):\n # There should be 600 statements in testset.\n r = mockclient_cl1.get(TEST_URL + \"?size=700\")\n assert r.status_code == 200\n assert len(r.json[\"statements\"]) == 600\n\n # Get the first 500\n r = mockclient_cl1.get(TEST_URL + \"?size=500\")\n assert r.status_code == 200\n assert len(r.json[\"statements\"]) == 500\n\n # Get the remaining 100\n r = mockclient_cl1.get(TEST_URL + \"?size=500&page=2\")\n assert r.status_code == 200\n assert len(r.json[\"statements\"]) == 100", "def test_pagination(self):\n for page in range(1, 5):\n self._test_one_page(page=page)", "def test_get_updated_issues_multiple_pages(self):\n with open(\"issues_multiple_first_page.json\", \"r\") as issues_first_file:\n mock_response_first_page = issues_first_file.read()\n\n with open(\"issues_multiple_second_page.json\", \"r\") as issues_second_file:\n mock_response_second_page = issues_second_file.read()\n\n with requests_mock.Mocker() as m:\n m.register_uri('GET', '/rest/api/2/search', [{'text': mock_response_first_page},\n {'text': mock_response_second_page}])\n issues = jiratimereport.get_updated_issues(\"https://jira_url\", \"user_name\", \"api_token\", \"MYB\",\n \"2020-01-10\", \"2020-01-20\", \"\")\n\n issues_expected_result = [\n Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\", 3600, 900, datetime(2020, 1, 20)),\n Issue(10004, \"MYB-4\", \"Summary of issue MYB-4\", \"MYB-3\", \"Summary of the parent issue of MYB-4\", 7200, 600, None),\n Issue(10006, \"MYB-6\", \"Summary of issue MYB-6\", \"MYB-3\", \"Summary of the parent issue of MYB-6\", 3600, 900, None)]\n\n self.assertListEqual(issues_expected_result, issues, \"Issues lists are unequal\")", "def test_paging(mocker, testclient):\n # Patch to return list_alerts json data\n with open(\"./test_data/list_alerts_paged.json\") as list_alerts_paged:\n list_alerts_response = json.load(list_alerts_paged)\n with open(\"./test_data/list_alerts_empty.json\") as list_alerts_empty:\n mocker.patch.object(Client, \"_http_request\", side_effect=[\n list_alerts_response,\n json.load(list_alerts_empty),\n ])\n data = testclient.get_paged(40, url_suffix=\"/not_real\", method=\"GET\")\n\n assert len(data) == 29\n calls = [\n call(url_suffix=\"/not_real\", method=\"GET\"),\n call(full_url=list_alerts_response.get(\"paging\").get(\"next\"),\n url_suffix=\"/not_real\",\n method=\"GET\",\n )\n ]\n Client._http_request.assert_has_calls(calls)", "def fetch_pages(query_val, page_num):\n \n for page_id in range(1 + page_num + 1):\n try:\n output = fetch_data(query_val, page_id)\n for j in output:\n print(str(j))\n \n except Exception as e:\n print(e)", "def iterResponsePages(service, payload, verbose, slow_down):\n token = 0\n next_page = True\n data = {'reports': []}\n\n\n while next_page:\n if verbose:\n print(f'Fetching rows starting at position: {token}')\n if slow_down > 0:\n time.sleep(slow_down)\n \n data_tmp = service.reports().batchGet(body=payload).execute()\n token = data_tmp.get('reports')[0].get('nextPageToken')\n\n if token != None:\n payload.get('reportRequests')[0].update({'pageToken': token})\n else:\n next_page = False\n payload.get('reportRequests')[0].update({'pageToken': '0'})\n\n for report in data_tmp.get('reports'):\n data.get('reports').append(report)\n\n return data", "def test_pagination(self):\n self.register_get_user_response(self.user)\n cs_comment_child = self.make_comment_data(\"test_child_comment\", self.comment_id, children=[])\n cs_comment = self.make_comment_data(self.comment_id, None, [cs_comment_child])\n cs_thread = make_minimal_cs_thread({\n \"id\": self.thread_id,\n \"course_id\": str(self.course.id),\n \"children\": [cs_comment],\n })\n self.register_get_thread_response(cs_thread)\n self.register_get_comment_response(cs_comment)\n response = self.client.get(\n self.url,\n {\"comment_id\": self.comment_id, \"page\": \"18\", \"page_size\": \"4\"}\n )\n self.assert_response_correct(\n response,\n 404,\n {\"developer_message\": \"Page not found (No results on this page).\"}\n )", "def test_get_multiple_pages_lro(client):\n from azure.mgmt.core.polling.arm_polling import ARMPolling\n poller = client.paging.begin_get_multiple_pages_lro(polling=ARMPolling(timeout=0, request_id=\"test\"))\n pager = poller.result()\n\n items = list(pager)\n\n assert len(items) == 10\n assert items[0][\"properties\"][\"id\"] == 1\n assert items[1][\"properties\"][\"id\"] == 2", "def test_get_sms_messages_paginated(self):\n pass", "def test_pagination_epic(self):\n\n # Mary creates a private library and\n # 1. Gives it a name.\n # 2. Gives it a description.\n\n # Create stub data for:\n # 1. the user, named Mary\n # 2. a library, prefilled with name, description, and bibcodes\n user_mary = UserShop()\n stub_bibcodes = {\n '2010MNRAS': {},\n '2012MNRAS': {},\n '2012MNRAS': {},\n '2014MNRAS': {},\n }\n solr_docs_page_1 = [{'bibcode': '2010MNRAS'}, {'bibcode': '2011MNRAS'}]\n solr_docs_page_2 = [{'bibcode': '2012MNRAS'}, {'bibcode': '2014MNRAS'}]\n\n docs_page_1 = ['2010MNRAS', '2011MNRAS']\n docs_page_2 = ['2012MNRAS', '2014MNRAS']\n\n stub_library = LibraryShop(want_bibcode=True, bibcode=stub_bibcodes)\n\n # Make the library by using the /library POST end point\n url = url_for('userview')\n response = self.client.post(\n url,\n data=stub_library.user_view_post_data_json,\n headers=user_mary.headers\n )\n self.assertStatus(response, 200)\n\n # Library ID is returned from this POST request\n library_id = response.json['id']\n\n # Now we check that we can retrieve the first 20 paginated documents\n # First set up the parameters for pagination\n params = {\n 'start': 0,\n 'rows': 2,\n }\n # Then send the GET request\n url = url_for('libraryview', library=library_id)\n with MockSolrBigqueryService(solr_docs=solr_docs_page_1) as BQ, \\\n MockEndPoint([user_mary]) as EP:\n response = self.client.get(\n url,\n headers=user_mary.headers,\n query_string=params\n )\n self.assertStatus(response, 200)\n self.assertEqual(docs_page_1, response.json['documents'])\n\n # Then ask for the second page\n params = {\n 'start': 2,\n 'rows': 2\n }\n url = url_for('libraryview', library=library_id)\n with MockSolrBigqueryService(solr_docs=solr_docs_page_2) as BQ, \\\n MockEndPoint([user_mary]) as EP:\n response = self.client.get(\n url,\n headers=user_mary.headers,\n query_string=params\n )\n self.assertStatus(response, 200)\n self.assertEqual(docs_page_2, response.json['documents'])", "def _paginatedRequest(allPages, *args):\n data = []\n currentPage = 0\n while True:\n newData = Gw2Spidy._request(*(args + (str(currentPage),)))\n if not allPages:\n return newData['results']\n data.extend(newData['results'])\n currentPage = currentPage + 1\n if newData['page'] == newData['last_page']:\n break\n return data", "def test_jsonHasPagination(self):\n\n elements = [\n 'TLE',\n 'CatalogEntry'\n ]\n\n for element in elements:\n # Dynamicly instanciate the view class\n request = self.factory.get('/api/v1/%s/?format=json' % element.lower())\n view_class = globals()['%sViewSet' % element]\n view = view_class.as_view({'get': 'list'})\n response = view(request).render()\n json_data = response.content.decode('utf8')\n\n self.assertIn('\"count\":', json_data)\n self.assertIn('\"next\":', json_data)\n self.assertIn('\"previous\":', json_data)\n self.assertIn('\"results\":', json_data)", "async def test_get_multiple_pages():\n writer = SimpleWriter()\n work_queue = asyncio.Queue()\n await work_queue.put(TestData.JOB_TEMPLATE_PAYLOAD_ALL_PAGES)\n worker = tower_api_worker.TowerApiWorker(TestData.config, writer, work_queue)\n headers = {\"Content-Type\": \"application/json\"}\n with aioresponses() as mocked:\n mocked.get(\n TestData.JOB_TEMPLATES_LIST_URL,\n status=200,\n body=json.dumps(TestData.JOB_TEMPLATES_PAGE1_RESPONSE),\n headers=headers,\n )\n mocked.get(\n TestData.JOB_TEMPLATES_LIST_URL_PAGE_2,\n status=200,\n body=json.dumps(TestData.JOB_TEMPLATES_PAGE2_RESPONSE),\n headers=headers,\n )\n res = await worker.start()\n assert writer.data[\"count\"] == 3\n assert writer.called == 2", "def test_execute_get_success_with_multiple_pages():\n response_queue = queue.Queue()\n message = FakeMessage()\n message.raw_payload = json.dumps(TestData.JOB_TEMPLATE_PAYLOAD_ALL_PAGES)\n headers = {\"Content-Type\": \"application/json\"}\n\n with aioresponses() as mocked:\n mocked.get(\n TestData.JOB_TEMPLATES_LIST_URL,\n status=200,\n body=json.dumps(TestData.JOB_TEMPLATES_PAGE1_RESPONSE),\n headers=headers,\n )\n mocked.get(\n TestData.JOB_TEMPLATES_LIST_URL_PAGE_2,\n status=200,\n body=json.dumps(TestData.JOB_TEMPLATES_PAGE2_RESPONSE),\n headers=headers,\n )\n worker.execute(message, TestData.RECEPTOR_CONFIG, response_queue)\n\n validate_get_response(\n response_queue.get(),\n 200,\n TestData.JOB_TEMPLATE_COUNT,\n [TestData.JOB_TEMPLATE_1, TestData.JOB_TEMPLATE_2],\n )\n validate_get_response(\n response_queue.get(),\n 200,\n TestData.JOB_TEMPLATE_COUNT,\n [TestData.JOB_TEMPLATE_3],\n )", "def extract_page_urls(self, _):\n url = \"https://mossadams.taleo.net/careersection/rest/jobboard/searchjobs?lang=en&portal=4160751617\"\n page_num = 1\n last_count = 0\n this_count = 0\n\n while True:\n last_count = len(self.urls_to_scrape)\n payload = PAYLOAD + '\"pageNo\":' + str(page_num) + \"}\"\n json_data = self.post_request(url, out_format='json', headers=HEADERS, data=payload)\n\n for job in json_data['requisitionList']:\n job_url = \"https://mossadams.taleo.net/careersection/6/jobdetail.ftl?job=\" + job['contestNo']\n self.urls_to_scrape.add(job_url)\n\n # check to see if any new records were scraped; if not, I've reach the end\n this_count = len(self.urls_to_scrape)\n if last_count == this_count:\n break\n else:\n last_count = this_count\n page_num += 1", "def test_get_event_logs(event_log_api_setup):\n api_response = event_log_api_setup.get_event_logs(limit=100, offset=0)\n logging.getLogger().info(\"%s\", api_response)\n print(f\"{BCOLORS.OKGREEN}OK{BCOLORS.ENDC}\")", "def do_case(self, objects, page_num, num_pages, expected):\n request = RequestFactory().get(\"/test\")\n paginator = DiscussionAPIPagination(request, page_num, num_pages)\n actual = paginator.get_paginated_response(objects)\n assert actual.data == expected", "def _GetAllTestRuns(self, ispy):\n template = JINJA.get_template('list_view.html')\n data = {}\n max_keys = 1000\n marker = 'failures/%s' % self.request.get('marker')\n test_runs = list([path.split('/')[1] for path in\n ispy.GetAllPaths('failures/', max_keys=max_keys,\n marker=marker, delimiter='/')])\n base_url = '/?test_run=%s'\n next_url = '/?marker=%s' % test_runs[-1]\n data['next_url'] = next_url\n data['links'] = [(test_run, base_url % test_run) for test_run in test_runs]\n self.response.write(template.render(data))", "def test_get_html_paginated(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'video_upload_pagination')", "def _get_pages(self,url,params,section):\n if self.verbose:\n print('Get Pages for {}'.format(url))\n print(params)\n page = 1\n maxPage = 1\n \n all_results = []\n this_batch = []\n while page <= maxPage: \n \n params['page']=page\n resp = self._get(url=url,params=params)\n maxPage = int(resp.headers.get('X-Total-Page-Count',0))\n try:\n results=resp.json()\n except:\n results=None\n if isinstance(results,(list,dict)):\n if 'errors' in results:\n print(results['errors'])\n return results\n \n this_batch = results[section]\n all_results.extend(this_batch)\n\n page+=1\n else:\n if self.verbose:\n print(\"PROBLEM\")\n return results\n\n return all_results", "def test_get_updated_issues_one_page(self):\n with open(\"issues_one_page.json\", \"r\") as issues_file:\n mock_response = issues_file.read()\n\n with requests_mock.Mocker() as m:\n m.register_uri('GET', '/rest/api/2/search', text=mock_response)\n issues = jiratimereport.get_updated_issues(\"https://jira_url\", \"user_name\", \"api_token\", \"MYB\",\n \"2020-01-10\", \"2020-01-20\", \"\")\n\n issues_expected_result = [\n Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\", 3600, 900, datetime(2020, 1, 20)),\n Issue(10004, \"MYB-4\", \"Summary of issue MYB-4\", \"MYB-3\", \"Summary of the parent issue of MYB-4\", 7200, 600, None)]\n\n self.assertListEqual(issues_expected_result, issues, \"Issues lists are unequal\")", "def test_iter_paging(self):\n ref = mock.Mock()\n ref.side_effect = [\n {'rows': [x for x in range(100)]},\n {'rows': []}\n ]\n rslt = Result(ref, page_size=10)\n collection = [x for x in rslt]\n self.assertEqual(len(collection), 100)", "def iterate_new_reports(title, hikeurl,last_scrape):\n # lists how many reports are on the page\n r = requests.get(hikeurl + '/@@related_tripreport_listing').text\n soup = BeautifulSoup(r, 'lxml')\n numit = math.ceil(float(soup.find('div', {'id': 'count-data'}).text) / 5)\n for i in range(int(numit)):\n get_new_trail_report(title, hikeurl, last_scrape, params={'b_start:int': str(i * 5)})\n return None", "def test_pagination(self):\n\n # Add channel\n for i in range(22):\n self.create_channel('test-channel-' + str(i))\n\n # There should now be one page (20) + 2 channels\n response = self.client.get('/api/channels/2')\n self.assertEqual(response.json['total-channels'], 22)\n self.assertEqual(len(response.json['channels']), 2)\n\n # And the third page should not exist\n response = self.client.get('/api/channels/3')\n self.assert404(response)", "def test_project_list_pagination(self):\n # Add enough projects so that pagination is required.\n # project_list should show 5 projects per page, so 15\n # projects will be split up over 3 pages.\n for i in range(15):\n add_project(title='{0}'.format(i), description='{0}'.format(i))\n\n url = reverse('portfolio:project_list')\n\n # Check buttons on first page.\n response = self.client.get(url)\n self.assertNotContains(response, 'Previous')\n self.assertContains(response, 'Next')\n\n # Check buttons on second page.\n response = self.client.get('{url}?page=2'.format(url=url))\n self.assertContains(response, 'Previous')\n self.assertContains(response, 'Next')\n\n # Check buttons on third page.\n response = self.client.get('{url}?page=3'.format(url=url))\n self.assertContains(response, 'Previous')\n self.assertNotContains(response, 'Next')", "def test_journals_paged_fields(self, api_client):\n rv = api_client.get(\"/journals-paged\")\n json_data = rv.get_json()\n sample = next(\n (item for item in json_data[\"results\"] if item[\"issn_l\"] == \"1907-1760\"),\n None,\n )\n top_level_keys = [\n \"id\",\n \"issn_l\",\n \"issns\",\n \"title\",\n \"publisher\",\n \"previous_issn_ls\",\n \"other_titles\",\n \"journal_metadata\",\n \"total_dois\",\n \"dois_by_issued_year\",\n \"sample_dois\",\n \"subscription_pricing\",\n \"apc_pricing\",\n \"open_access\",\n \"status\",\n \"status_as_of\",\n ]\n\n i = 0\n for key in sample.keys():\n assert key == top_level_keys[i]\n i += 1" ]
[ "0.7200031", "0.66805685", "0.6650853", "0.6601061", "0.6430808", "0.63457406", "0.6279656", "0.6261222", "0.621969", "0.6196999", "0.612971", "0.6080827", "0.605526", "0.59867334", "0.59738255", "0.5971564", "0.5957453", "0.5955607", "0.59216136", "0.5888905", "0.58683413", "0.58531123", "0.58109486", "0.5807042", "0.5795422", "0.57950425", "0.5781573", "0.57706505", "0.57457894", "0.57449025" ]
0.743094
0
Test the formatting of the time field when the time is greater than several days
def test_format_optional_time_field(self): formatted_time = jiratimereport.format_optional_time_field(99960, "") expected_result = "27:46:00" self.assertEqual(expected_result, formatted_time)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def time_filter(target_time, format, delta_hours):\n return datetime.strptime(target_time, format) + timedelta(hours=delta_hours) >= datetime.utcnow()", "def time_is_valid(request, day, time, name):\n\n\tif ((day != '0' and day != '6') and time.hour == 21) or time.minute != 0:\n\t\treturn False\n\n\t# George's time\n\tif name != \"George Yeh\" and day == '6' and time.hour >= 9 and time.hour < 12:\n\t\treturn False\n\n\treturn True", "def format_evening(self, data):\n return (self.input['start_time'].hour - 12) >= profile.evening_threshold", "def validtimefilter(self, hito):\n\t\tif self.horadesde == \"\" and self.horahasta == \"\":\n\t\t\treturn True\n\t\telse:\n\n\t\t\thora = hito.fechahora[hito.fechahora.index(\" / \")+3:]\n\n\t\t\thora_hito = datetime.datetime.strptime(hora, \"%H:%M\")\n\t\t\tif self.horadesde != \"\":\n\t\t\t\tif self.isPrimerHitoDelDia(hito):\n\t\t\t\t\thora_desde = datetime.datetime.strptime(self.horadesde, \"%H:%M\")\n\t\t\t\t\tif hora_desde > hora_hito:\n\t\t\t\t\t\treturn False\n\n\t\t\tif self.horahasta != \"\":\n\t\t\t\tif self.isUltimoHitoDelDia(hito):\n\t\t\t\t\thora_hasta = datetime.datetime.strptime(self.horahasta, \"%H:%M\")\n\t\t\t\t\t#print(\"%s --- %s = %s --- %s\" % (self.horahasta,str(hora_hasta),hora_hito, str(hora_hito)))\n\t\t\t\t\tif hora_hasta < hora_hito:\n\t\t\t\t\t\treturn False\n\n\t\t\treturn True", "def test_time_field():", "def properTimeInput(time_):\r\n if not time_.isdigit() or len(time_) > 4 or len(time_) < 4 or int(time_) > 2400 or int(time_) < 0 or int(time_[2])>5:\r\n print(\"'\",time_, \"' is an invalid input for the time. Use 24 hr format.\\nExamples: 8 a.m = 0800, 1 p.m = 1300, 2:30 = 1430, 12:50 a.m = 0050\\n\")\r\n return False\r\n return True", "def test_as_time(self):\n self.assertEqual(\n time_display.as_time(\n datetime(2020, 7, 31, 23, 59, 30, 357921),\n with_msec=True),\n '23:59:30.357')", "def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds", "def is_time(self) -> bool:\n return self.times > 1", "def test_format_date_time(self):\r\n formatted_date = date_formatter.format_date_time(\"190501:0902\")\r\n self.assertEqual(formatted_date, \"2019-05-01 09:02\")", "def _valid_day(self, date_find):\n try:\n datetime.strptime(date_find, settings.TIME_FORMAT)\n valid = True\n except ValueError:\n valid = False\n return valid", "def _validate_time_fields(cls, item):\n if item.time_started_msec and (\n item.time_queued_msec > item.time_started_msec):\n cls._add_error(\n 'time queued check',\n 'Entity id %s: time queued %s is greater '\n 'than time started %s' % (\n item.id, item.time_queued_msec, item.time_started_msec))\n\n if item.time_finished_msec and (\n item.time_started_msec > item.time_finished_msec):\n cls._add_error(\n 'time started check',\n 'Entity id %s: time started %s is greater '\n 'than time finished %s' % (\n item.id, item.time_started_msec, item.time_finished_msec))\n\n current_time_msec = utils.get_current_time_in_millisecs()\n if item.time_finished_msec > current_time_msec:\n cls._add_error(\n 'time finished check',\n 'Entity id %s: time finished %s is greater '\n 'than the current time' % (\n item.id, item.time_finished_msec))", "def important_event(time: int) -> bool:\n last_event = get_events(True)[0]\n try:\n time_event = int(last_event.split('\\n')[0].strip(\"'\"))\n except ValueError:\n time_event = int(last_event.split('\\n')[-1].strip(\"'\"))\n if time - time_event < 60:\n return 'gol' in last_event or 'cartão' in last_event\n return False", "def is_complete_hour(text):\n for fmt in ['%H:%M:%S', '%H:%M']:\n try:\n strptime(text, fmt)\n return True \n except ValueError:\n pass\n return False", "def _matchTime(self, time: float):\n return self._comparator['Time'] < time", "def verify_date_or_time(css, date_or_time):\r\n # We need to wait for JavaScript to fill in the field, so we use\r\n # css_has_value(), which first checks that the field is not blank\r\n assert_true(world.css_has_value(css, date_or_time))", "def check_time(self,data,data_orginal):\n if data['start_time'] > data['end_time']:\n raise ValidationError('event end time should be greater than start time.')", "def test_long_not_configured(self):\n locale = {\n 'timeformat': '%H:%M',\n 'dateformat': '%Y-%m-%d',\n 'longdateformat': '',\n 'datetimeformat': '%Y-%m-%d %H:%M',\n 'longdatetimeformat': '',\n }\n assert (dt.datetime(2017, 1, 1), True) == guessdatetimefstr(\n '2017-1-1'.split(), locale=locale, default_day=dt.datetime.today())\n assert (dt.datetime(2017, 1, 1, 16, 30), False) == guessdatetimefstr(\n '2017-1-1 16:30'.split(), locale=locale, default_day=dt.datetime.today())", "def test_format_time():\n number = random.randint(10000, 99999)\n formatted_number = race.format_time(str(number))\n assert type(formatted_number) == str\n assert \":\" in formatted_number and \".\" in formatted_number", "def _validate_time_fields(cls, item):\n if item.last_started_msec > item.last_finished_msec and (\n item.last_started_msec > item.last_stopped_msec):\n cls._add_error(\n 'last started check',\n 'Entity id %s: last started %s is greater '\n 'than both last finished %s and last stopped %s' % (\n item.id, item.last_started_msec, item.last_finished_msec,\n item.last_stopped_msec))\n\n current_time_msec = utils.get_current_time_in_millisecs()\n if item.last_finished_msec > current_time_msec:\n cls._add_error(\n 'last finished check',\n 'Entity id %s: last finished %s is greater '\n 'than the current time' % (\n item.id, item.last_finished_msec))\n\n if item.last_stopped_msec > current_time_msec:\n cls._add_error(\n 'last stopped check',\n 'Entity id %s: last stopped %s is greater '\n 'than the current time' % (\n item.id, item.last_stopped_msec))", "def check_wrong_time(self, cr, uid, att, context=None):\n # check have overtime yet?\n att_name = datetime.strptime(att.name, DEFAULT_SERVER_DATETIME_FORMAT)\n param_obj = self.pool.get('ir.config_parameter') \n max_early = param_obj.get_param(cr, uid, 'maximum_early_minutes', default=60)\n max_late = param_obj.get_param(cr, uid, 'maximum_late_minutes', default=60)\n try:\n max_early = int (max_early)\n max_late = int (max_late)\n except:\n raise except_osv(_(\"Warning !\"),_(\"maximum_early_minutes or maximum_late_minutes in config parameter is incorrect\"))\n \n time_early = att_name + timedelta(minutes = max_early)\n time_late = att_name - timedelta(minutes = max_late)\n \n overtime_obj = self.pool.get('hr.overtime')\n overtime_confirmed_ids = overtime_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('mode', '=', 'by_employee'),\n ('name', '=', att.day_tz),\n ('datetime_start', '<=', time_early.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('datetime_stop', '>=', time_late.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('state', 'in', ['confirmed'])\n ])\n if overtime_confirmed_ids:\n return False\n working_hour_obj = self.pool.get('hr.payroll.working.hour')\n \n \n \n \n working_hour_ids = working_hour_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('expected_start', '<=', time_early.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('expected_end', '>=', time_late.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ], context=context)\n if not working_hour_ids:\n return True\n return False", "def valid(t):\n return float(t) > time.time()", "def test_strftimeEx_07():\n t = 7.9996\n fmt = \"%S %(ms_)\"\n result = strftimeEx(fmt, t)\n expected = \"07 999\"\n print 'result = \"%s\" expected = \"%s\"' % (result, expected)\n assert result == expected", "def valid_time(time):\n if time.hour < 0 or time.minute < 0 or time.second < 0:\n return False\n if time.minute >= 60 or time.second >= 60:\n return False\n return True", "def valid_time(time):\n if time.hour < 0 or time.minute < 0 or time.second < 0:\n return False\n if time.minute >= 60 or time.second >= 60:\n return False\n return True", "def test_format_datetime(self):\n assert BaseTSVWriter.format_datetime(FIXED_DATETIME) == '2016/05/15 15:02:55'", "def test_strftimeEx_06():\n t = 7.9996\n fmt = \"%S %(ms)\"\n result = strftimeEx(fmt, t)\n expected = \"08 000\"\n print 'result = \"%s\" expected = \"%s\"' % (result, expected)\n assert result == expected", "def valid_format(self):\n\n # If candidate is None, return true\n if not self.dt:\n print \"dt empty\"\n return True\n\n # Verify if time format is ok and stores in into a time-tuple format\n try:\n stime = datetime.strptime(self.dt, \"%Y-%m-%d %H:%M:%S\")\n except ValueError:\n return False\n else:\n return True", "def format_time(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_time(data)\r\n\r\n return data.isoformat()", "def check_time(self, m, s):\r\n if m*60 + s > 5400:\r\n self.unit.s = 0\r\n self.unit.m = 90\r\n return\r\n if s < 0:\r\n s = 0\r\n if m < 0:\r\n m = 0\r\n self.unit.s = s\r\n self.unit.m = m" ]
[ "0.6860934", "0.6165199", "0.61254984", "0.6100006", "0.6025755", "0.594662", "0.5908246", "0.58880615", "0.58691204", "0.5866743", "0.5844216", "0.5768878", "0.5734076", "0.5729486", "0.5691129", "0.5662217", "0.56508267", "0.5647204", "0.5638489", "0.56327003", "0.5612369", "0.5602961", "0.55566597", "0.5555742", "0.5555742", "0.5547056", "0.55442125", "0.5543762", "0.55433846", "0.5540797" ]
0.6492797
1
Retquired operations for ETL XML to CSV.
def etl_operations(): tap = SQLTaps(db_type='mysql', username='root', password='', host='localhost', db_name='ETLtestDb') conn = tap.get_connection() query = 'SELECT id, filename, student_xml FROM StudentsData' rows = tap.get_rows(conn, query) rows_json = tap.covert_ResultProxy_to_JSON(rows) result_list = rows_json.get('result') converter = Convert() csv_row_list = list() headers = list() for row in result_list: xml_content = base64.b64decode(row.get('student_xml').encode()) csv_content = converter.xml_to_csv(xml_content) headers = csv_content.get('columns') csv_row_list.append(csv_content.get('values')) csv_target('students.csv', csv_row_list, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(_):\n # Create an XML data frame, which represents the CSV format\n xml_df = xml_to_df(FLAGS.xml_input)\n print(FLAGS.xml_input)\n print(FLAGS.output_path)\n # Convert the data frame into CSV format, and save to the output path\n xml_df.to_csv(FLAGS.output_path, index=None)\n print('Successfully converted xml to csv!')", "def process_document(as_xml, as_csv):\n tree = ET.parse(as_xml)\n lists = xml_to_list(tree.getroot())\n list_to_csv(lists, as_csv)\n # raise NotImplementedError()\n pass", "def write_data_to_csv(xml_files, csv_file):\n output = {}\n for xml_file in xml_files:\n with open(xml_file, 'r') as xml_output:\n xml_string = xml_output.read()\n try:\n root = ET.fromstring(xml_string)\n except ET.ParseError:\n LOGGER.warning('Could not parse %s, adding closing tag.' % xml_file)\n root = ET.fromstring(xml_string + Output.close_tag)\n \n for trade_tag in root.findall('Entity'):\n items = {}\n for item in FILE_HEADER:\n value = ''\n try:\n value = trade_tag.find(item).text\n except AttributeError:\n LOGGER.exception('Failed to find \"%s\" attribute.' % item)\n \n if item == 'PLImpact':\n try:\n items[item] = float(value)\n except ValueError:\n items[item] = float('nan')\n elif item == 'SourceType':\n items[item] = SOURCE_TYPE_MAPPING[value]\n else:\n items[item] = value\n row_key = get_key(items)\n if row_key in output:\n output[row_key]['PLImpact'] += items['PLImpact']\n else:\n output[row_key] = items\n \n output_ordered = OrderedDict(sorted(output.items(), key=lambda t: t[1]['UpdateTime']))\n with open(csv_file, 'w') as f:\n csvwriter = csv.DictWriter(f, FILE_HEADER, lineterminator='\\n')\n csvwriter.writeheader()\n for item in output_ordered.values():\n csvwriter.writerow(item)", "def csvdata(nodelist):\n\n data = \"\"\n for subnode in nodelist:\n if (subnode.nodeType == subnode.ELEMENT_NODE):\n try:\n data = data + \",\" + subnode.childNodes[0].data\n except:\n data = data+ \",\"\n return data[1:] + \"\\n\"", "def xml_to_csv(xml_folder, output_file=None):\n\n xml_list = []\n # Loop through every XML file\n for xml_file in glob(xml_folder + '/*.xml'):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n\n filename = root.find('filename').text\n size = root.find('size')\n width = int(size.find('width').text)\n height = int(size.find('height').text)\n\n # Each object represents each actual image label\n for member in root.findall('object'):\n box = member.find('bndbox')\n label = member.find('name').text\n\n # Add image file name, image size, label, and box coordinates to CSV file\n row = (filename, width, height, label, int(box[0].text),\n int(box[1].text), int(box[2].text), int(box[3].text))\n xml_list.append(row)\n\n # Save as a CSV file\n column_names = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']\n xml_df = pd.DataFrame(xml_list, columns=column_names)\n\n if output_file is not None:\n xml_df.to_csv(output_file, index=None)\n\n return xml_df", "def parse_xml_to_csv(self, fName, tag=\"Row\"):\n\n output_name = fName.replace('.XML', '.csv')\n converter = xml2csv(fName, output_name, encoding=\"utf-8\")\n converter.convert(tag=tag)\n\n return self", "def test_create_csv(self):\n\n # absolute path to xml file to parse\n xml_file = os.path.join(self.xmlfilepath, \"DLTINS_20210117_01of01.xml\")\n\n # absolute path to the csv file to create\n csv_file = os.path.join(self.csvfile, \"DLTINS_20210117_01of01.csv\")\n\n # Test for correct data\n self.assertEqual(create_csv(xml_file, self.csvfile), csv_file)\n\n # Test for incorrect input xml file\n self.assertEqual(create_csv(\"somerandomfile\", self.csvfile), None)\n\n # Test for incorrect path to write csv to\n self.assertEqual(create_csv(xml_file, r\"D:\\kqcA CK j \"), None)", "def generate_dataset_csv(request):\n\n response = csv_export(request,Dataset)\n return response", "def parse_to_csv(data,namee):\n pth = BASE_DIR + '/reports/' + csv_name\n if not os.path.isfile(namee):\n csv_file = open(namee, 'wb')\n csv_writer = csv.writer(csv_file)\n top_row = [\n 'IP', 'Host', 'os', 'Proto', 'Port',\n 'Service','Service_version', 'Product', 'Service FP',\n 'NSE Script ID', 'NSE Script Output', 'Notes'\n ]\n csv_writer.writerow(top_row)\n print('\\n[+] The file {} does not exist. New file created!\\n'.format(\n csv_name))\n # else:\n # # try:\n # csv_file = open(csv_name, 'w')\n\n # csv_writer = csv.writer(csv_file)\n # print('\\n[+] {} exists. Appending to file!\\n'.format(csv_name))\n\n \n for item in data:\n csv_writer.writerow(item)\n csv_file.close()", "def csv(self, destination_path):\n # todo - test for single and duplicate base cases\n to_csv(self._axl_data, destination_path)", "def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )", "def _csv_export(self, exppath):\n with open(exppath, 'w') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',', skipinitialspace=True)\n csvwriter.writerow(['hexstr','dmc','name'])\n for clr in self.lookup_table:\n csvwriter.writerow([clr.hex.to_str(), clr.id, clr.name])", "def convert2csv(contacts, output_path):\n\n print(\"[!] not implemented yet\")", "def dlCsvReport(self):\r\n requestElems = {'xf': 'csv'}\r\n requestElems.update(self.getReportConfig())\r\n \r\n csvdata = self.sendRequest(self.reportFormURL, self.fileOpener,\r\n requestElems, 'POST').read()\r\n\r\n self.writeExportFile('csv', csvdata)", "def xml_to_csv(path):\n\n xml_list = []\n f = open(path, \"r\")\n basepath = osp.dirname(osp.dirname(osp.dirname(path)))\n while True:\n line = f.readline().strip()\n if not line: break\n xml_file = osp.join(basepath, 'Annotations', line + '.xml')\n jpg_file = osp.join(basepath, 'JPEGImages', line + '.jpg')\n tree = ET.parse(xml_file)\n root = tree.getroot()\n if os.path.isfile(jpg_file): file_name = jpg_file\n else:\n file_name = root.find('filename').text\n for member in root.findall('object'):\n if member[4].tag == 'difficult': idx = 5\n else: idx = 4\n value = (file_name,\n int(root.find('size')[0].text),\n int(root.find('size')[1].text),\n member[0].text,\n int(member[idx][0].text),\n int(member[idx][1].text),\n int(member[idx][2].text),\n int(member[idx][3].text)\n )\n xml_list.append(value)\n column_name = ['filename', 'width', 'height',\n 'class', 'xmin', 'ymin', 'xmax', 'ymax']\n xml_df = pd.DataFrame(xml_list, columns=column_name)\n f.close()\n return xml_df", "def to_csv(self, csvwriter):\n csvwriter.writerow(self.to_csv_row())", "def ToXMLElement(self):\n table_element = xml.etree.ElementTree.Element('table')\n table_element.set('id', self.table_id)\n\n for column in self.columns:\n table_element.append(column.ToXMLElement())\n\n table_data = xml.etree.ElementTree.Element('data')\n table_data_file = xml.etree.ElementTree.Element('file')\n table_data_file.set('encoding', 'utf-8')\n table_data_file.set('format', 'csv')\n table_data_file.text = self.file_name\n\n table_data.append(table_data_file)\n\n table_element.append(table_data)\n\n return table_element", "def importxml(db, xmlinput):\n\n from io import StringIO\n import xml.dom.minidom\n\n try:\n doc = xml.dom.minidom.parseString(xmlinput)\n except:\n raise Exception(\"XML parse error\")\n\n parent = doc.childNodes[0].tagName\n csvout = csvheader(parent, doc.childNodes[0].childNodes)\n for subnode in doc.childNodes:\n csvout = csvout + csvdata(subnode.childNodes)\n fh = StringIO()\n fh.write(csvout)\n fh.seek(0, 0)\n db[parent].import_from_csv_file(fh)", "def _populate_from_xml_file(self, xml):\n '''\n example from API: http://www.ga.gov.au/www/argus.argus_api.survey?pSurveyNo=921\n\n <?xml version=\"1.0\" ?>\n <ROWSET>\n <ROW>\n <SURVEYID>921</SURVEYID>\n <SURVEYNAME>Goomalling, WA, 1996</SURVEYNAME>\n <STATE>WA</STATE>\n <OPERATOR>Stockdale Prospecting Ltd.</OPERATOR>\n <CONTRACTOR>Kevron Geophysics Pty Ltd</CONTRACTOR>\n <PROCESSOR>Kevron Geophysics Pty Ltd</PROCESSOR>\n <SURVEY_TYPE>Detailed</SURVEY_TYPE>\n <DATATYPES>MAG,RAL,ELE</DATATYPES>\n <VESSEL>Aero Commander</VESSEL>\n <VESSEL_TYPE>Plane</VESSEL_TYPE>\n <RELEASEDATE/>\n <ONSHORE_OFFSHORE>Onshore</ONSHORE_OFFSHORE>\n <STARTDATE>05-DEC-96</STARTDATE>\n <ENDDATE>22-DEC-96</ENDDATE>\n <WLONG>116.366662</WLONG>\n <ELONG>117.749996</ELONG>\n <SLAT>-31.483336</SLAT>\n <NLAT>-30.566668</NLAT>\n <LINE_KM>35665</LINE_KM>\n <TOTAL_KM/>\n <LINE_SPACING>250</LINE_SPACING>\n <LINE_DIRECTION>180</LINE_DIRECTION>\n <TIE_SPACING/>\n <SQUARE_KM/>\n <CRYSTAL_VOLUME>33.6</CRYSTAL_VOLUME>\n <UP_CRYSTAL_VOLUME>4.2</UP_CRYSTAL_VOLUME>\n <DIGITAL_DATA>MAG,RAL,ELE</DIGITAL_DATA>\n <GEODETIC_DATUM>WGS84</GEODETIC_DATUM>\n <ASL/>\n <AGL>60</AGL>\n <MAG_INSTRUMENT>Scintrex CS2</MAG_INSTRUMENT>\n <RAD_INSTRUMENT>Exploranium GR820</RAD_INSTRUMENT>\n </ROW>\n </ROWSET>\n '''\n # turn the XML doc into a Python object\n root = objectify.fromstring(xml)\n\n if hasattr(root.ROW, 'SURVEYNAME'):\n self.survey_name = root.ROW.SURVEYNAME\n if hasattr(root.ROW, 'STATE'):\n self.state = root.ROW.STATE\n if hasattr(root.ROW, 'OPERATOR'):\n self.operator = root.ROW.OPERATOR\n if hasattr(root.ROW, 'CONTRACTOR'):\n self.contractor = root.ROW.CONTRACTOR\n if hasattr(root.ROW, 'PROCESSOR'):\n self.processor = root.ROW.PROCESSOR\n if hasattr(root.ROW, 'SURVEY_TYPE'):\n self.survey_type = root.ROW.SURVEY_TYPE\n if hasattr(root.ROW, 'DATATYPES'):\n self.data_types = root.ROW.DATATYPES\n if hasattr(root.ROW, 'VESSEL'):\n self.vessel = root.ROW.VESSEL\n if hasattr(root.ROW, 'VESSEL_TYPE'):\n self.vessel_type = root.ROW.VESSEL_TYPE\n if hasattr(root.ROW, 'RELEASEDATE'):\n self.release_date = datetime.strptime(root.ROW.RELEASEDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.RELEASEDATE.text is not None else None\n if hasattr(root.ROW, 'ONSHORE_OFFSHORE'):\n self.onshore_offshore = root.ROW.ONSHORE_OFFSHORE\n if hasattr(root.ROW, 'STARTDATE'):\n self.start_date = datetime.strptime(root.ROW.STARTDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.STARTDATE.text is not None else None\n if hasattr(root.ROW, 'ENDDATE'):\n self.end_date = datetime.strptime(root.ROW.ENDDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.ENDDATE.text is not None else None\n if hasattr(root.ROW, 'WLONG'):\n self.w_long = root.ROW.WLONG\n if hasattr(root.ROW, 'ELONG'):\n self.e_long = root.ROW.ELONG\n if hasattr(root.ROW, 'SLAT'):\n self.s_lat = root.ROW.SLAT\n if hasattr(root.ROW, 'NLAT'):\n self.n_lat = root.ROW.NLAT\n if hasattr(root.ROW, 'LINE_KM'):\n self.line_km = root.ROW.LINE_KM\n if hasattr(root.ROW, 'TOTAL_KM'):\n self.total_km = root.ROW.TOTAL_KM\n if hasattr(root.ROW, 'LINE_SPACING'):\n self.line_spacing = root.ROW.LINE_SPACING\n if hasattr(root.ROW, 'LINE_DIRECTION'):\n self.line_direction = root.ROW.LINE_DIRECTION\n if hasattr(root.ROW, 'TIE_SPACING'):\n self.tie_spacing = root.ROW.TIE_SPACING\n if hasattr(root.ROW, 'SQUARE_KM'):\n self.square_km = root.ROW.SQUARE_KM\n if hasattr(root.ROW, 'CRYSTAL_VOLUME'):\n self.crystal_volume = root.ROW.CRYSTAL_VOLUME\n if hasattr(root.ROW, 'UP_CRYSTAL_VOLUME'):\n self.up_crystal_volume = root.ROW.UP_CRYSTAL_VOLUME\n if hasattr(root.ROW, 'DIGITAL_DATA'):\n self.digital_data = root.ROW.DIGITAL_DATA\n if hasattr(root.ROW, 'GEODETIC_DATUM'):\n self.geodetic_datum = root.ROW.GEODETIC_DATUM\n if hasattr(root.ROW, 'ASL'):\n self.asl = root.ROW.ASL\n if hasattr(root.ROW, 'AGL'):\n self.agl = root.ROW.AGL\n if hasattr(root.ROW, 'MAG_INSTRUMENT'):\n self.mag_instrument = root.ROW.MAG_INSTRUMENT\n if hasattr(root.ROW, 'RAD_INSTRUMENT'):\n self.rad_instrument = root.ROW.RAD_INSTRUMENT", "def export_html(self, model_view='gapd'):\n '''\n <?xml version=\"1.0\" ?>\n <ROWSET>\n <ROW>\n <SURVEYID>921</SURVEYID>\n <SURVEYNAME>Goomalling, WA, 1996</SURVEYNAME>\n <STATE>WA</STATE>\n <OPERATOR>Stockdale Prospecting Ltd.</OPERATOR>\n <CONTRACTOR>Kevron Geophysics Pty Ltd</CONTRACTOR>\n <PROCESSOR>Kevron Geophysics Pty Ltd</PROCESSOR>\n <SURVEY_TYPE>Detailed</SURVEY_TYPE>\n <DATATYPES>MAG,RAL,ELE</DATATYPES>\n <VESSEL>Aero Commander</VESSEL>\n <VESSEL_TYPE>Plane</VESSEL_TYPE>\n <RELEASEDATE/>\n <ONSHORE_OFFSHORE>Onshore</ONSHORE_OFFSHORE>\n <STARTDATE>05-DEC-96</STARTDATE>\n <ENDDATE>22-DEC-96</ENDDATE>\n <WLONG>116.366662</WLONG>\n <ELONG>117.749996</ELONG>\n <SLAT>-31.483336</SLAT>\n <NLAT>-30.566668</NLAT>\n <LINE_KM>35665</LINE_KM>\n <TOTAL_KM/>\n <LINE_SPACING>250</LINE_SPACING>\n <LINE_DIRECTION>180</LINE_DIRECTION>\n <TIE_SPACING/>\n <SQUARE_KM/>\n <CRYSTAL_VOLUME>33.6</CRYSTAL_VOLUME>\n <UP_CRYSTAL_VOLUME>4.2</UP_CRYSTAL_VOLUME>\n <DIGITAL_DATA>MAG,RAL,ELE</DIGITAL_DATA>\n <GEODETIC_DATUM>WGS84</GEODETIC_DATUM>\n <ASL/>\n <AGL>60</AGL>\n <MAG_INSTRUMENT>Scintrex CS2</MAG_INSTRUMENT>\n <RAD_INSTRUMENT>Exploranium GR820</RAD_INSTRUMENT>\n </ROW>\n </ROWSET>\n '''\n if model_view == 'prov':\n prov_turtle = self.export_rdf('prov', 'text/turtle')\n g = Graph().parse(data=prov_turtle, format='turtle')\n\n view_html = render_template(\n 'survey_prov.html',\n visjs=self._make_vsjs(g),\n prov_turtle=prov_turtle,\n )\n else: # model_view == 'gapd':\n view_html = render_template(\n 'survey_gapd.html',\n survey_no=self.survey_no,\n survey_name=self.survey_name,\n state=self.state,\n operator=self.operator,\n contractor=self.contractor,\n processor=self.processor,\n survey_type=self.survey_type,\n data_types=self.data_types,\n vessel=self.vessel,\n vessel_type=self.vessel_type,\n release_date=self.release_date,\n onshore_offshore=self.onshore_offshore,\n start_date=self.start_date,\n end_date=self.end_date,\n line_km=self.line_km,\n total_km=self.total_km,\n line_spacing=self.line_spacing,\n line_direction=self.line_direction,\n tie_spacing=self.tie_spacing,\n area=self.square_km,\n crystal_volume=self.crystal_volume,\n up_crystal_volume=self.up_crystal_volume,\n digital_data=self.digital_data,\n geodetic_datum=self.geodetic_datum,\n asl=self.asl,\n agl=self.agl,\n mag_instrument=self.mag_instrument,\n rad_instrument=self.rad_instrument,\n wkt_polygon=self.wkt_polygon\n )\n\n return render_template(\n 'page_survey.html',\n view_html=view_html,\n survey_no=self.survey_no,\n end_date=self.end_date,\n survey_type=self.survey_type,\n date_now=datetime.now().strftime('%Y-%m-%d'),\n centroid_lat=self.centroid_lat,\n centroid_lon=self.centroid_lon,\n n_lat=self.n_lat,\n s_lat=self.s_lat,\n w_long=self.w_long,\n e_long=self.e_long,\n gm_key=config.GOOGLE_MAPS_API_KEY\n )", "def csv_data():\r\n for row in data:\r\n writer.writerow(row)\r\n csv_data = read_and_flush()\r\n yield csv_data", "def process(self, element, *_args, **_kwargs):\n fieldnames = self.fieldnames\n filtered_element = {\n key: value.encode('utf-8')\n for (key, value) in element.iteritems()\n if key in fieldnames\n }\n with io.BytesIO() as stream:\n writer = csv.DictWriter(stream, fieldnames)\n writer.writerow(filtered_element)\n csv_string = stream.getvalue().strip('\\r\\n')\n\n yield csv_string", "def to_csv_file_obj(self, rows):\n output = StringIO.StringIO()\n writer = csv.writer(output)\n writer.writerows(rows)\n return output", "def _CsvFunc(self, obj=None, verbose=False, use_pager=None, to_file=None):\n if obj is not None:\n self._printed_variables.append(obj)\n lines = describe.GenerateLines(obj, verbose=verbose, recursive=False,\n format_name='csv')\n _WriteToStream(lines, use_pager=use_pager, to_file=to_file)", "def XML_EC_PL(Name, InputsFile, OutputFile, emin,emax):\n\n\t#On commence par afficher ce qu'on fait\r\n\tprint \" Build xml file \"\r\n\r\tprint InputsFile\n\t#ouverture du fichier dans lequel on place le source model\n\ttry:\n\t\tfresult = open(OutputFile, 'w')\n\texcept:\n\t\tprint \"Coucou\"\r\n \t#ecriture des premieres lignes invariantes\n\tfresult.write('<?xml version=\"1.0\" ?>')\r\n\tfresult.write(\"<source_library title=\\\"source library\\\">\\n\")\n\r\n \t#ouverture du fichier avec les entrees\r\n\tf = open(InputsFile,\"r\")\r\n\tlines = f.readlines()\r\n\t\r\n \t#Ajout des sources detectees dans le catalogue\n\t#Pour chaque ligne du fichier d'entree\r\n\tfor line in range(len(lines)):\n\t\t#Lire les donnees de la ligne\t\t\r\n\t\tdata = lines[line].split()\r\n\t\tname = data[0]\n\n\t\t#Verification : est on en train de traiter la source que l'on veut etudier ou une autre ?\r\n\t\tif str(name) == Name :\r\n\t\t\tmysource = 1\r\n\t\telse:\r\n\t\t\tmysource = 0\n\n\t\t#recuperation des donnees\r\n\t\tRA = data[1]\r\n\t\tDEC = data[2]\r\n\t\tIntegral = float(data[3])*float(Frac)\r\n\t\tGamma= data[4]\n\n\t\t\r\n\t\ttry:\n\t\t\t#essai de definition des donnees pour un PL avec ExpCut\n\t\t\tPrefactor = float(data[5])*float(Frac)\r\n\t\t\tEnergy = float(data[6])\r\n\t#\t\tPrefactor = Prefactor/pow(Energy/100., float(Gamma)) #Densite de flux calculee a Epivot\r\n\t#\t\tPrefactor = Prefactor*pow(1000./100., float(Gamma)) #We do the calculation with (E/1000.)^Gamma\n\t\t\tvariabilite=float(data[8])\n\n#\t\t\tprint variabilite\n\n\n\n\r\n\t\t\tcut = float(data[7]) # Cut est la variable qui nous permettra de savoir si il faut utiliser un cut off (1) ou une loi de puissance normale (2)\r\n\t\texcept:\r\n\t\t\ttry:\r\n\t\t\t\tcut = float(data[5])\r\n\t\t\texcept:\r\n\t\t\t\tprint \" Wrong size of list \"\r\n\t\t\t\tsys.exit()\r\n \t#Si on considere un ccut off exponentiel pour la source :\r\n\t\tif cut == 1:\n\t\t\t#ecriture du nom de la source consideree\r\n\t\t\tresult_line=\" <source \"\r\n\t\t\tresult_line += \"name=\\\"\"+name+\"\\\"\"\r\n\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\r\n\t\t\tspectrum_type = \"PLSuperExpCutoff\"\n\t\t\t#Utilisation de la modelisation PLSuperExpCutoff car plus simple et plus intuitive pour nous et pour la modelisation des pulsars si il faut en modeliser\n\r\n\t\t\t#definition des parametres spectraux a prendre en comtpe et de la chaine de caractere a integrer\r\n\n\n\n\t\t\tif variabilite==0.0 or variabilite==2.0:\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"0\\\" max=\\\"10000000.0\\\" min=\\\"0.0000001\\\"\"\n\n\t\t\t\t#d'ou vient ce 1e-12\r\n\t\t\t\tIntegral = float(Prefactor)*1.0e10\r\n\t\t\t\tscale = 1.0e-10\n\r\n\t\t\t\tspectrum_lines += \" name=\\\"Prefactor\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index1\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"20000.0\\\" min=\\\"1.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Scale\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Energy)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"100.0\\\" min=\\\"0.001\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"Cutoff\\\" scale=\\\"1000.0\\\" value=\\\"30.0\\\"/>\\n\"\n\r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index2\\\" scale=\\\"1.0\\\" value=\\\"1.0\\\"/>\\n\"\n\t\t\telif variabilite==1.0 :\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10000000.0\\\" min=\\\"0.0\\\"\"\n\n\t\t\t\t#d'ou vient ce 1e-12\r\n\t\t\t\tIntegral = float(Prefactor)*1.0e10\r\n\t\t\t\tscale = 1.0e-10\n\n\t\t\t\tspectrum_lines += \" name=\\\"Prefactor\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index1\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"20000.0\\\" min=\\\"1.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Scale\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Energy)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"100.0\\\" min=\\\"0.0001\\\"\"\r\t\t\t\tspectrum_lines += \" name=\\\"Cutoff\\\" scale=\\\"1000.0\\\" value=\\\"30.0\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index2\\\" scale=\\\"1.0\\\" value=\\\"1.0\\\"/>\\n\"\n\n\r\n \r\n\n# <spectrum type=\"PLSuperExpCutoff\">\n# <parameter free=\"1\" max=\"100000\" min=\"0\" name=\"Prefactor\" scale=\"1e-10\" value=\"Prefactor*1e-10\"/>\n# <parameter free=\"1\" max=\"0\" min=\"5\" name=\"Index1\" scale=\"-1\" value=\"valeur du catalogue\"/>\n# <parameter free=\"0\" max=\"20000\" min=\"1.0\" name=\"Scale\" scale=\"1\" value=\"Epivot\"/>\n# <parameter free=\"1\" max=\"300000\" min=\"100\" name=\"Cutoff\" scale=\"1\" value=\"3000\"/>\n# <parameter free=\"0\" max=\"5\" min=\"0\" name=\"Index2\" scale=\"1\" value=\"1.5\"/>\n# </spectrum>\n\n\r\n\t\telse:\n\t\t#Sinon (si on considere une loi de puissance simple)\n\t\t#definition de la chaine de caractere comportant le nom de la source\r\n\t\t\tresult_line=\" <source \"\r\n\t\t\tresult_line += \"name=\\\"\"+name+\"\\\"\"\n\t\t\tif mysource == 0:\r\t\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\n\t\t\telse:\n\t\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\t\t\t\t\n\n\t\t\t#definition de la chaine de caractere correspondant a la forme de fit que l'on souhaite utiliser (Loi de puissance)\r\n\t\t\tspectrum_type = \"PowerLaw2\"\r\n\r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t\t\t#si ce n'est pas la source que l'on etudie on fige le parametre Integrale\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"0\\\" max=\\\"1000000.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\telse:\n\t\t\t#sinon on le libere\r\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"1000000.0\\\" min=\\\"0.0\\\"\"\n\n\n\n\n\n\t\t\t#Toujours ce facteur....\r\n\t\t\tIntegral = float(Integral)*1e10\r\n\t\t\tscale = 1e-10\n\n\n\t\n\r\n\t\t\tspectrum_lines += \" name=\\\"Integral\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\n\r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t\t\t\t#si ce n'est pas la source que l'on etudie on fige le parametre gamma\r\n\t\t \t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\telse:\n\t\t\t\t#si c'est pas la source que l'on etudie on le laisse libre\r\n\t\t \t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\n\n\t\t\t#fin de la chaine de parametres sur le modele spectral\r\n\t\t\tspectrum_lines += \" name=\\\"Index\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t \n\t\t\t spectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"200000.0\\\" min=\\\"20.0\\\"\"\r\n\t\t\t spectrum_lines += \" name=\\\"LowerLimit\\\" scale=\\\"1.0\\\" value=\\\"1000.0\\\"/>\\n\"\r\n \r\n\t\t\t spectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"1000000.0\\\" min=\\\"20.0\\\"\"\r\n\t\t\t spectrum_lines += \" name=\\\"UpperLimit\\\" scale=\\\"1.0\\\" value=\\\"100000.0\\\"/>\\n\"\n\t\t\telse:\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"200000.0\\\" min=\\\"20.0\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"LowerLimit\\\" scale=\\\"1.0\\\" value=\\\"100\\\"/>\\n\"\n\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"100000.0\\\" Min =\\\"20.0\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"UpperLimit\\\" scale=\\\"1.0\\\" value=\\\"100000.0\\\"/>\\n\"\n\n \t\t#ajout du modele spectral a la liste de parametres \r\n\t\tresult_line += \" <spectrum type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\t\tresult_line += spectrum_lines\r\n\t\tresult_line += \" </spectrum>\\n\"\n\n\t\t\n\n\t\tif mysource==0 and variabilite!=1.0:\n \t\t\t#ajout du modele spatial a la liste de parametres \r\n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\r\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\r\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\telif mysource==0 and variabilite==1.0:\n \t\t\t#ajout du modele spatial a la liste de parametres \r\n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\r\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\r\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\telse:\n #ajout du modele spatial a la liste de parametres \n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\t\t\n\t\tresult_line += \" </source>\\n\"\r\n\t\tfresult.write(result_line+\"\\n\")\r\n #Ajout du fond diffus galactique\n\tresult_line=\" <source \"\r\n\tresult_line += \"name=\\\"gal_v02\\\"\"\r\n\tresult_line += \" type=\\\"DiffuseSource\\\">\\n\"\r\n\tspectrum_type = \"ConstantValue\"\r\n\r\n\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10.0\\\" min=\\\"0\\\"\"\r\n\tspectrum_lines += \" name=\\\"Value\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Frac)+\"\\\" />\\n\"\r\n\r\n\tresult_line += \" <spectrum type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\n\tresult_line += spectrum_lines\r\n\tresult_line += \" </spectrum>\\n\"\r\n\r\n\tresult_line += \" <spatialModel file=\\\"/nfs/farm/g/glast/u31/marianne/VelaX/July09_Pointed/gll_iem_v02.fit\\\" type=\\\"MapCubeFunction\\\">\\n\"\r\n\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"1000.0\\\" min=\\\"0.0\\\"\"\r\n\tresult_line += \" name=\\\"Normalization\\\" scale=\\\"1\\\" value=\\\"1.0\\\"/>\\n\"\r\n\tresult_line += \" </spatialModel>\\n\"\r\n\tresult_line += \" </source>\\n\"\r\n\tfresult.write(result_line+\"\\n\")\r\n\r\n \t#Ajout du fond diffus extragalactique\r\n\tresult_line=\" <source \"\r\n\tresult_line += \"name=\\\"eg_v02\\\"\"\r\n\tresult_line += \" type=\\\"DiffuseSource\\\">\\n\"\r\n\tspectrum_type = \"FileFunction\"\r\n\r\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10.0\\\" min=\\\"0\\\"\"\r\n\tspectrum_lines += \" name=\\\"Normalization\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Frac)+\"\\\" />\\n\"\r\n\r\n\tresult_line += \" <spectrum file=\\\"/nfs/farm/g/glast/u31/marianne/VelaX/July09_Pointed/isotropic_iem_v02.txt\\\" type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\n\tresult_line += spectrum_lines\r\n\tresult_line += \" </spectrum>\\n\"\r\n \r\n\tresult_line += \" <spatialModel type=\\\"ConstantValue\\\">\\n\"\r\n\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"100.0\\\" min=\\\"0.0\\\"\"\r\n\tresult_line += \" name=\\\"Value\\\" scale=\\\"1\\\" value=\\\"1.0\\\"/>\\n\"\r\n\tresult_line += \" </spatialModel>\\n\"\r\n\tresult_line += \" </source>\\n\"\r\n\tfresult.write(result_line+\"\\n\")\r\n\n \t#Fermeture des fichiers \r\n\tf.close() \r\n\tfresult.write(\"\\n</source_library>\\n\")\r\n\tfresult.close()\r\n\treturn", "def get_csv_string(self, **kwargs):\n ...", "def _csv(content):\n response = _data_frame(content).to_csv(index=False)\n return response", "def generateCsvData(self, context, obj, entity):\n raise NotImplementedError()", "def create_preprocessed_csv(self):\n self.process_report_data().to_csv(self.out_file_path, index=False)", "def prepare_CSV(self):\n self.drop_columns()\n self.rename_columns()\n self.spilt_columns()\n self.add_vehicle_id_column()\n self.add_source_column()\n self.add_timestamp_columns()\n self.get_colour_columns()\n self.clean_column_formats()\n\n # print(self.data.info())\n # print(self.data.sample(10))\n\n return self.data" ]
[ "0.6848973", "0.6119344", "0.5600611", "0.5541763", "0.5541486", "0.5538107", "0.55232733", "0.54312", "0.5399392", "0.5382571", "0.5359434", "0.5281513", "0.5275438", "0.5271903", "0.526789", "0.52156895", "0.5205032", "0.5199951", "0.5174725", "0.51475275", "0.5137194", "0.51361585", "0.5124136", "0.5100558", "0.50538206", "0.50514895", "0.50512236", "0.5051127", "0.5027852", "0.5022038" ]
0.6973972
0
Function for filtering regions which are smaller than minLength, that is, regions which are too small to contain an intergenic gene.
def filterFunction(region): inset = abs(region.stopGene.location[1] - region.stopGene.location[0])/2 if region.stopGene else 0 return region.stop + inset - region.start > minLength
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_contigs(contig_file, min_length):\n basename_original = os.path.splitext(contig_file)[0] \n basename_filtered = basename_original + \"_min_len_\" + str(min_length)\n contigs_filtered = basename_filtered + \".fasta\"\n num_reads = fasta_filter.seq_length_greater(contig_file, contigs_filtered, min_length)\n if num_reads == 0:\n print \"All of the contigs are smaller than \" + str(min_length) + \" nucleotide.\"\n print \"NOTHING is going to be annotated. (Decrease length or drop the argument for annotation.)\"\n sys.exit(0)\n return (basename_filtered, contigs_filtered)", "def filter_by_length(genes, transcripts, min_length):\n filtered_transcripts = {}\n filtered_genes = {}\n\n for transcript_id in transcripts:\n curr_transcript = transcripts[transcript_id]\n length = curr_transcript.get_length()\n\n if length >= min_length:\n filtered_transcripts[transcript_id] = curr_transcript\n gene_id = curr_transcript.gene_id\n if gene_id in genes:\n filtered_genes[gene_id] = genes[gene_id]\n\n return filtered_genes, filtered_transcripts", "def filter(self, roi_lengths):\n return roi_lengths >= self.min_roi_length_for_fragmentation", "def filter_rare_genes(data, *extra_data, cutoff=0, min_cells=5):\n gene_sums = measure.gene_capture_count(data, cutoff=cutoff)\n keep_genes_idx = gene_sums >= min_cells\n data = select.select_cols(data, *extra_data, idx=keep_genes_idx)\n return data", "def filter_by_seq_len(self, min_len):\n if not isinstance(min_len, numbers.Number):\n raise TypeError(\"min_len must be a number\")\n\n # Iterate over the nodes and remove any nodes shorter than min_len\n old_nodes = set(self.nodes)\n for n in old_nodes:\n comp_name = n[:-2]\n if self.get_component_len(comp_name) < min_len:\n self.remove_node(n)", "def trim_region(self, start, stop):\n if stop > len(self.bases):\n sys.stderr.write(\"Sequence.trim called on sequence that is too short; doing nothing.\\n\")\n return\n # Remove any genes that are overlap the trimmed region\n genes_to_remove = [g for g in self.genes if overlap([start, stop], g.indices)]\n self.genes = [g for g in self.genes if g not in genes_to_remove]\n # Remove bases from sequence\n self.bases = self.bases[:start - 1] + self.bases[stop:]\n # Adjust indices of remaining genes\n bases_removed = stop - start + 1\n for g in self.genes:\n g.adjust_indices(-bases_removed, start)\n return genes_to_remove", "def filter_instances_by_size(self, im, unique_instances, min_building_size):\n # create array to store building instances to ignore\n ignored_instances = np.array([])\n # if min_building_size is negative, error\n if min_building_size < 0:\n raise ValueError(\"Building size filter cannot be a negative number\")\n # return list of instances to check and list of instances to ignore\n # if min_building_size is 0, return original array of instances, ignored_instances is empty\n if min_building_size == 0:\n return unique_instances, ignored_instances\n else:\n for i in range(len(unique_instances)):\n _, current_building_size = self.get_current_building_mask(im, unique_instances[i])\n if current_building_size < min_building_size:\n ignored_instances = np.append(ignored_instances, i)\n return np.setdiff1d(unique_instances, ignored_instances), ignored_instances", "def _filter_boxes(boxes, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n keep = np.where((ws >= min_size) & (hs >= min_size))[0]\n return keep", "def _filter_boxes(boxes, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n keep = np.where((ws >= min_size) & (hs >= min_size))[0]\n return keep", "def _filter_boxes(boxes, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n keep = np.where((ws >= min_size) & (hs >= min_size))[0]\n return keep", "def _filter_boxes(boxes, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n keep = np.where((ws >= min_size) & (hs >= min_size))[0]\n\n return keep", "def filter(args):\n p = OptionParser(filter.__doc__)\n p.add_option(\n \"--less\",\n default=False,\n action=\"store_true\",\n help=\"filter the sizes < certain cutoff [default: >=]\",\n )\n p.set_outfile()\n\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n fastafile, cutoff = args\n try:\n cutoff = int(cutoff)\n except ValueError:\n sys.exit(not p.print_help())\n\n f = Fasta(fastafile, lazy=True)\n\n fw = must_open(opts.outfile, \"w\")\n for name, rec in f.iteritems_ordered():\n\n if opts.less and len(rec) >= cutoff:\n continue\n\n if (not opts.less) and len(rec) < cutoff:\n continue\n\n SeqIO.write([rec], fw, \"fasta\")\n fw.flush()\n\n return fw.name", "def filterPotentialChimeras(self, min_length=30, flag=0, target=None):\n logger.debug('Filtering {} for potential chimeras'.format(target))\n target = '{}.filter.fasta'.format(target.rpartition(\".\")[0])\n if os.path.exists(target):\n logger.info('Skipping filtering for {}'.format(target))\n else:\n with open(target, 'w') as oH:\n with open(self.fileName) as iH:\n for row in csv.reader(iH, delimiter=\"\\t\"):\n if not row[0].startswith('@') and row[1] == str(flag):\n if len(row[9]) >= 30:\n print(textwrap.fill('>%s' % row[0], width=80), file=oH)\n print(textwrap.fill('%s' % row[9], width=80), file=oH)\n logger.debug('Filtering finished')\n return target", "def _filter_boxes(self, boxes, min_size, im_info):\n # Scale min_size to match image scale\n min_size *= im_info[2]\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n x_ctr = boxes[:, 0] + ws / 2.\n y_ctr = boxes[:, 1] + hs / 2.\n keep = np.where((ws >= min_size) & (hs >= min_size) &\n (x_ctr < im_info[1]) & (y_ctr < im_info[0]))[0]\n return keep", "def filter(self, minLength=None, maxLength=None, removeGaps=False,\n whitelist=None, blacklist=None,\n titleRegex=None, negativeTitleRegex=None,\n truncateTitlesAfter=None, indices=None, head=None,\n removeDuplicates=False, modifier=None):\n return Reads(self._filter(\n minLength=minLength, maxLength=maxLength, removeGaps=removeGaps,\n whitelist=whitelist, blacklist=blacklist, titleRegex=titleRegex,\n negativeTitleRegex=negativeTitleRegex,\n truncateTitlesAfter=truncateTitlesAfter, indices=indices,\n head=head, removeDuplicates=removeDuplicates, modifier=modifier))", "def filter_toofew_toolong(df, min_each_group, max_length):\n df = df[~(df.question.apply(lambda x : len(x)) > max_length)]\n\n counts = df[\"index\"].value_counts()\n idxs = np.array(counts.index)\n \n # index numbers of groups with count >= mineachgroup\n list_idx = [i for i, c in zip(idxs, counts) if c >= min_each_group]\n\n # filter out data with \"index\" in list_idx \n df = df[df[\"index\"].isin(list_idx)]\n return df", "def filter_by_minimum_length(\n self, min_length: int, output_file: Path = None, point_to_new_file: bool = True\n ) -> None:\n if output_file is None:\n output_file = (\n Path(self._input_file.parent)\n / f\"{self._input_file.stem}_minlength{self._input_file.suffix}\"\n )\n else:\n output_file = Path(output_file)\n fasta = pyfastx.Fasta(\n self.file_path.as_posix(), build_index=False, full_name=True\n )\n with open(output_file, \"w+\", encoding=\"UTF-8\") as outfile:\n for record_name, record_seq in fasta:\n if len(record_seq) >= min_length:\n outfile.write(f\">{record_name}\\n{record_seq}\\n\")\n if point_to_new_file:\n self.file_path = output_file", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n for i, img_info in enumerate(self.img_infos):\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n for i, img_info in enumerate(self.img_infos):\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def _filter_boxes2(boxes, max_size, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n if max_size > 0:\n keep = np.where(np.minimum(ws, hs) < max_size)[0]\n elif min_size > 0:\n keep = np.where(np.maximum(ws, hs) > min_size)[0]\n return keep", "def filter_by_size(img_segm,mini_nb_pix):\n numbers = np.zeros(int(np.max(img_segm)))\n for i in range(1,int(np.max(img_segm))+1):\n numbers[i-1] = np.count_nonzero(img_segm==i)\n \n indexes = np.arange(1,np.max(img_segm)+1)\n #indexes = indexes[numbers>np.mean(numbers)] #Deletes the 1-pixel elements\n indexes = indexes[numbers>mini_nb_pix] #Deletes the 1-pixel elements\n \n segm_filtered = np.zeros(img_segm.shape)\n j=1\n for i in (indexes):\n segm_filtered[img_segm==i] = j\n j+=1\n return segm_filtered", "def _filter_imgs(self, min_size=32):\r\n valid_inds = []\r\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\r\n for i, img_info in enumerate(self.img_infos):\r\n if self.filter_empty_gt and self.img_ids[i] not in ids_with_ann:\r\n continue\r\n if min(img_info['width'], img_info['height']) >= min_size:\r\n valid_inds.append(i)\r\n return valid_inds", "def remove_below_lower_length_limit(self) -> None:\n for column_name in self.data:\n threshold_executor = TrimUtils.remove_text_below_lower_length_threshold(\n self.config[f'{column_name}_lower_length_limit']\n )\n self.data = self.data[self.data[column_name].map(threshold_executor)]\n self.data.reset_index(drop=True, inplace=True)", "def _filter_imgs(self, min_size=32):\n\n valid_inds = []\n for i, img_info in enumerate(self.data_infos):\n if min(img_info[\"width\"], img_info[\"height\"]) < min_size:\n continue\n if self.filter_empty_gt and len(img_info[\"ann\"][\"bboxes\"]) > 0:\n valid_inds.append(i)\n else:\n valid_inds.append(i)\n\n return valid_inds", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.img_infos):\n if self.img_ids[i] not in ids_with_ann:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.img_infos):\n if self.img_ids[i] not in ids_with_ann:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.img_infos):\n if self.img_ids[i] not in ids_with_ann:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def subsettter(clipsegments, lengthtype):\n if lengthtype == 'twothirds':\n clipsegments.remove('AR8')\n clipsegments.remove('AF13')\n elif lengthtype == 'abouthalf':\n clipsegments.remove('AR8')\n clipsegments.remove('AF13')\n clipsegments.remove('AF7')\n return clipsegments", "def supportingReadsFilter(spot, args):\n if spot.tags[\"label\"] == \"INS\":\n errId = 1\n errLab = 'insertion'\n elif spot.tags[\"label\"] == \"DEL\":\n errId = 2\n errLab = 'deletion'\n else:#don't worry about other types\n return False\n\n begin, ending = spot.fetchbounds()\n begin -= args.buffer #abs(begin-ending)*.5\n ending += args.buffer #abs(begin-ending)*.5\n #do the hard work\n reads = args.bam.fetch(str(spot.chrom), begin, ending)\n totSizes = []\n coverage = 0\n nReadsErr = 0\n #For tandem\n strandCnt = {True: 0, False: 0}\n \n #count reads and errSizes\n for i in reads:\n mySize = 0\n coverage += 1\n start = i.pos - 1\n cigar = expandCigar(i.cigar)\n curSize = 0\n extraSize = 0\n readHasErr = False\n \n #What if I just intersect any stretches of errors with my boundaries.\n #Then for insertions I'll keep coordinates\n #For deletions I'll user outer bounds?\n for code in cigar: \n if code != 1:\n start += 1\n #must be in region\n if start < begin:\n continue\n if start >= ending:\n break\n \n if code == errId:\n curSize += 1\n if curSize != 0 and code != errId:\n if curSize >= args.minIndelErr:\n readHasErr = True\n mySize += curSize\n elif curSize > 1:#1bp errors will inflate\n extraSize += curSize\n curSize = 0\n \n\n if readHasErr and mySize >= args.minIndelSize:\n nReadsErr += 1\n totSizes.append(mySize + extraSize)\n strandCnt[i.is_reverse] += 1\n \n spot.tags[\"strandCnt\"] = \"%d,%d\" % (strandCnt[False], strandCnt[True])\n if len(totSizes) == 0:\n logging.debug(\"no %s found!? %s\" % (errLab, str(spot)))\n return True # true you should filter\n \n if len(totSizes) < max(math.ceil(coverage * args.minIndelPct), args.minErrReads):\n logging.debug(\"not large cnt %s found %s \" % (errLab, str(spot)))\n return True\n \n totSizes.sort()\n totSizes = numpy.array(totSizes)\n mean = totSizes.mean()\n median = numpy.percentile(totSizes, 50)\n firstQ = numpy.percentile(totSizes, 25)\n thirdQ = numpy.percentile(totSizes, 75)\n \n logging.debug(\"PassFilt %s\" % (str(spot))) \n logging.debug(\"cov %d\" % coverage )\n logging.debug(\"size %d %s\" % (len(totSizes), str(totSizes)))\n logging.debug(\"mean %d\" % mean )\n logging.debug(\"median %d\" % median)\n logging.debug(\"firstQ %d\" % firstQ)\n logging.debug(\"thirdQ %d\" % thirdQ)\n \n spot.tags[\"szCount\"] = int(nReadsErr)\n spot.tags[\"szMean\"] = int(mean)\n spot.tags[\"szMedian\"] = int(median)\n spot.tags[\"sz1stQ\"] = int(firstQ)\n spot.tags[\"sz3rdQ\"] = int(thirdQ)\n return False", "def filter_by_size(img_segm):\n \n numbers = np.zeros(np.max(img_segm-1))\n for i in range(1,np.max(img_segm)):\n numbers[i-1] = np.sum(img_segm==i)\n \n indexes = np.arange(1,np.max(img_segm))\n #indexes = indexes[numbers>np.mean(numbers)] #Deletes the 1-pixel elements\n indexes = indexes[numbers>500] #Deletes the 1-pixel elements\n \n segm_filtered = np.zeros(img_segm.shape)\n j=1\n for i in (indexes):\n segm_filtered[img_segm==i] = j\n j+=1\n return segm_filtered" ]
[ "0.6610667", "0.6599001", "0.6448203", "0.63962674", "0.5995766", "0.5735804", "0.56986856", "0.5616286", "0.560173", "0.560173", "0.5585641", "0.5506691", "0.54659474", "0.54585403", "0.54376686", "0.54318166", "0.53870505", "0.53555745", "0.53555745", "0.53540766", "0.53393954", "0.53309417", "0.5325533", "0.525063", "0.52323365", "0.52323365", "0.52323365", "0.52249974", "0.5211537", "0.5197688" ]
0.7461413
0
Fuction converts Jy/beam to Jy/pix
def jyperbeam_to_jyperpix(header, data): convert = np.pi/180 # deg to rads fwhm_to_sigma = 1./(8*np.log(2))**0.5 bmaj = header['BMAJ']*convert bmin = header['BMIN']*convert beam_area = 2.*np.pi*(bmaj*bmin*fwhm_to_sigma*2) # in staradians Jyperpix_data = data/beam_area return Jyperpix_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cgs2jyperbeam(beamsize):\n constant = 1e23#cgs 2 MJy/str\n beamsize *= np.pi/180./3600. # Arcseconds to radians\n beamsolid = beamsize**2*np.pi/(4*np.log(2))\n return constant*beamsolid", "def J_J(h):\n\n h = MTS(h)\n hdot = h.dot\n J_𝒥 = 0.5j * 𝔇inverseLaplacianinverse(\n 0.125 * (3 * h * hdot.bar.ethbar - 3 * hdot * h.bar.ethbar + hdot.bar * h.ethbar - h.bar * hdot.ethbar).eth.im\n ).ethbar.ethbar\n\n return J_𝒥", "def counts2jy_galex(counts, cal, pix_as):\n # first convert to abmag\n abmag = -2.5 * np.log10(counts) + cal\n\n # then convert to Jy\n f_nu = 10**(abmag/-2.5) * 3631.\n\n # then to MJy\n f_nu *= 1e-6\n\n # then to MJy/sr\n pix_rad = np.radians(pix_as / 3600.) # pixel scale coverted from arcsec to radians\n pix_sr = pix_rad ** 2. # pixel scale converted from radians to steradians\n val = f_nu / pix_sr\n\n return val", "def applyJET(img):\n\treturn applyColorMap(img, \"jet\")", "def magToJy(mag,emag,wband,zpFile=None):\n if zpFile == None:\n zpFile = Path(os.environ['SED_BUILDER']) / Path('zero_points.dat')\n zpWave, zpF0 = read_zp(zpFile)\n F0 = zpF0[wband]\n jy = (10**(-float(mag)/2.5))*F0\n if emag != '--':\n ejy = (float(emag)/2.5)*jy*log(10)\n else:\n ejy = np.nan\n \n return jy, ejy", "def gm2code(arr, info):\n return (arr / info.pboxsize + 0.5)# * 0.999783599", "def Y_ret(img):\r\n return solveJ(img, 1, tc(img, ignore_ch=2))", "def yiq2rgb(im_yiq):\n return multiply_by_left_matrix(np.linalg.inv(YIQ_MATRIX), im_yiq)", "def mK_to_Jy(z, cellsize, distances):\r\n\r\n nu = redshifts2frequencies(z)\r\n\r\n wvlngth = const.c / (nu / un.s)\r\n\r\n intensity = 2 * const.k_B * 1e-3 * un.K / wvlngth ** 2\r\n\r\n flux_density = 1e26 * intensity.to(un.W / (un.Hz * un.m ** 2))\r\n \r\n return flux_density.value * (( cellsize ) / distances)**2", "def jy_2_k(freq: float, theta_major: float,\n theta_minor: float, intensity: float) -> float:\n conv = 1.222E3 * (freq ** -2) / theta_minor / theta_major\n return intensity * conv", "def galaxy(img):\n return img[420:490, 710:770]", "def beam(xb,yb,zb,wx,wy,wavelen):\n\n zRx = np.pi * wx**2 / wavelen\n zRy = np.pi * wy**2 / wavelen \n \n sqrtX = np.sqrt( 1 + np.power(zb/zRx,2) ) \n sqrtY = np.sqrt( 1 + np.power(zb/zRy,2) ) \n intensity = np.exp( -2.*( np.power(xb/(wx*sqrtX ),2) \\\n + np.power(yb/(wy*sqrtY),2) )) / sqrtX / sqrtY\n return intensity", "def sky2pix(self, sky):\n\t\treturn self.wcs.wcs_sky2pix([sky], 0)[0]", "def AB_zero_Jy(self):\n c = 1e-8 * Constants.c.to('m/s').value\n f = 1e5 / c * self.lpivot.to('AA').value ** 2 * self.AB_zero_flux.value\n return f * Unit('Jy')", "def rgb2yiq(imRGB):\n return np.dot(imRGB, TRANSFORM.T.copy())", "def img_jet(img):\n if len(img.shape) == 2:\n normalized_img = img / 255.0\n jet = np.array(cm.jet(1 - normalized_img)[:, :, :3] * 255, np.uint8)\n else:\n jet = img\n return jet", "def rgb2yiq(imRGB):\n return np.dot(imRGB, np.array(MATRIX).T)", "def jadeite():\n\n rho = 3330.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 274.; C[0,1] = 94.; C[0,2] = 71.; C[0,3] = 0.; C[0,4] = 4.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 253.; C[1,2] = 82.; C[1,3] = 0.; C[1,4] = 14.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 282.; C[2,3] = 0.; C[2,4] = 28.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 88.; C[3,4] = 0.; C[3,5] = 13.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 65.; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 94.\n\n return C, rho", "def plotxy_image(self, *args, **kws):\n _beam = kws.get('beam', 'star.01')\n _col1 = kws.get('col1', 1)\n _col2 = kws.get('col1', 3)\n _nbins = kws.get('nbins', 101)\n _nolost = kws.get('nolost', 1)\n _title = kws.get('title', r'Image')\n _xtitle = kws.get('xtitle', 'x - sagittal (Hor. focusing) [cm]')\n _ytitle = kws.get('ytitle', 'z - meridional (E dispersion) [cm]')\n _xrange = kws.get('xrange', None)\n _yrange = kws.get('yrange', None) \n _calfwhm = kws.get('calfwhm', 1)\n _level = kws.get('level', 15)\n _noplot = kws.get('noplot', 0)\n _contour = kws.get('contour', 6)\n return self.plotxy(_beam, _col1, _col2, nbins=_nbins, nolost=_nolost,\\\n title=_title, xtitle=_xtitle, ytitle=_ytitle,\\\n xrange=_xrange, yrange=_yrange,\\\n calfwhm=_calfwhm, noplot=_noplot,\\\n contour=_contour, level=_level)", "def beamarea_pix(self):\n beamsigma1 = self.header['BMAJ'] / self.wcs.wcs.cdelt[0]\n beamsigma2 = self.header['BMIN'] / self.wcs.wcs.cdelt[0]\n return (np.pi * beamsigma1 * beamsigma2) / (4 * np.log(2))", "def world_to_npimage(world):\r\n coefs = np.array([2,25,25]).reshape((3,1,1))\r\n accentuated_world = 255*coefs*world['SIR']\r\n image = accentuated_world[::-1].swapaxes(0,2).swapaxes(0,1)\r\n return np.minimum(255, image)", "def jacobin(y):\n\n df = np.zeros((3,3))\n\n df[0,0] = 77.27*(1.0 - y(1) -2.*8.375e-6*y(0))\n df[0,1] = 77.27*(1.0 -y(0) )\n df[0,2] = 0.0;\n df[1,0] = -1.0/77.27;\n df[1,1] = (-1.0/77.27)*(1.0+y(0))\n df[1,2] = 1.0/77.27\n df[2,0] = 0.161\n df[2,1] = 0.0\n df[2,2] = -0.161\n\n return df", "def hz2mel(hz):\n return 2595 * pylab.log10(1+hz/700.0)", "def kelvin_to_jansky(self):\n this_unit = self.stokes.unit\n if self.component_type == \"point\":\n if this_unit.is_equivalent(\"Jy\"):\n return\n\n else:\n if this_unit.is_equivalent(\"Jy/sr\"):\n return\n\n if self.spectral_type == \"spectral_index\" or (\n self.spectral_type == \"flat\" and self.reference_frequency is not None\n ):\n conv_factor = 1 / skyutils.jy_to_ksr(self.reference_frequency)\n conv_factor = np.repeat(\n np.repeat(conv_factor[np.newaxis, np.newaxis, :], 4, axis=0),\n self.Nfreqs,\n axis=1,\n )\n elif self.freq_array is not None:\n conv_factor = 1 / skyutils.jy_to_ksr(self.freq_array)\n conv_factor = np.repeat(\n np.repeat(conv_factor[np.newaxis, :, np.newaxis], 4, axis=0),\n self.Ncomponents,\n axis=2,\n )\n else:\n raise ValueError(\n \"Either reference_frequency or freq_array must be set to convert to Jy.\"\n )\n\n self.stokes = self.stokes * conv_factor\n if self.stokes_error is not None:\n self.stokes_error = self.stokes_error * conv_factor\n\n if self.stokes.unit.is_equivalent(\"Jy\"):\n # need the `to(units.Jy)` call because otherwise even though it's in Jy,\n # the units are a CompositeUnit object which doesn't have all the same\n # functionality as a Unit object\n self.stokes = self.stokes.to(units.Jy)\n if self.stokes_error is not None:\n self.stokes_error = self.stokes_error.to(units.Jy)\n\n if self.frame_coherency is not None:\n self.calc_frame_coherency()", "def J(t,y):\n return np.array( [ [lam] ] )", "def yiq2rgb(imYIQ):\n return np.dot(imYIQ, np.linalg.inv(np.array(MATRIX).T))", "def preprocess(image):\n return (image / 255) * 2 - 1", "def cie_lab(self):\n K = Fraction(1, 3) * Fraction(29, 6) ** 2\n e = Fraction(6, 29) ** 3\n x, y, z = (n / m for n, m in zip(self.cie_xyz, D65))\n fx, fy, fz = (\n n ** Fraction(1, 3) if n > e else K * n + Fraction(4, 29)\n for n in (x, y, z)\n )\n return (116 * fy - 16, 500 * (fx - fy), 200 * (fy - fz))", "def jet(x):\r\n r = (x >= 3.0 / 8.0 and x < 5.0 / 8.0) * (4.0 * x - 3.0 / 2.0) + (x >=\r\n 5.0 / 8.0 and x < 7.0 / 8.0) + (x >= 7.0 / 8.0) * (-4.0 * x + 9.0 / 2.0)\r\n g = (x >= 1.0 / 8.0 and x < 3.0 / 8.0) * (4.0 * x - 1.0 / 2.0) + (x >= 3.0 /\r\n 8.0 and x < 5.0 / 8.0) + (x >= 5.0 / 8.0 and x < 7.0 / 8.0) * (-4.0 * x + 7.0 / 2.0)\r\n b = (x < 1.0 / 8.0) * (4.0 * x + 1.0 / 2.0) + (x >= 1.0 / 8.0 and x <\r\n 3.0 / 8.0) + (x >= 3.0 / 8.0 and x < 5.0 / 8.0) * (-4.0 * x + 5.0 / 2.0)\r\n\r\n return (255.0 * r, 255.0 * g, 255.0 * b)", "def acc_j2(U,comp):\n if comp == 'x' or comp == 'y':\n return (1 - 1.5*J2*(R/r(U))**2*(5*(U[2]/r(U))**2-1))\n elif comp =='z':\n return (1 - 1.5*J2*(R/r(U))**2*(5*(U[2]/r(U))**2-3))" ]
[ "0.6268127", "0.5918865", "0.58972245", "0.583188", "0.57700664", "0.55387676", "0.5529212", "0.5513946", "0.54779565", "0.545998", "0.543896", "0.54380053", "0.543641", "0.5419115", "0.5406976", "0.5398323", "0.53837305", "0.5373626", "0.5364091", "0.5348948", "0.5344056", "0.5328909", "0.5327277", "0.5322401", "0.5321497", "0.531903", "0.531389", "0.5300666", "0.52940285", "0.52909005" ]
0.7008477
0
Benchmark numpy module import.
def test_Numpy_import(benchmark): def Benchmark(): import numpy as np a = np.ndarray(1) del a benchmark(Benchmark)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_numpy_core_multiarray(finder, module):\n module.AddGlobalName(\"arange\")", "def load_numpy_core_umath(finder, module):\n module.AddGlobalName(\"add\")\n module.AddGlobalName(\"absolute\")\n module.AddGlobalName(\"arccos\")\n module.AddGlobalName(\"arccosh\")\n module.AddGlobalName(\"arcsin\")\n module.AddGlobalName(\"arcsinh\")\n module.AddGlobalName(\"arctan\")\n module.AddGlobalName(\"arctanh\")\n module.AddGlobalName(\"bitwise_and\")\n module.AddGlobalName(\"bitwise_or\")\n module.AddGlobalName(\"bitwise_xor\")\n module.AddGlobalName(\"ceil\")\n module.AddGlobalName(\"conj\")\n module.AddGlobalName(\"conjugate\")\n module.AddGlobalName(\"cosh\")\n module.AddGlobalName(\"divide\")\n module.AddGlobalName(\"fabs\")\n module.AddGlobalName(\"floor\")\n module.AddGlobalName(\"floor_divide\")\n module.AddGlobalName(\"fmod\")\n module.AddGlobalName(\"greater\")\n module.AddGlobalName(\"hypot\")\n module.AddGlobalName(\"invert\")\n module.AddGlobalName(\"isfinite\")\n module.AddGlobalName(\"isinf\")\n module.AddGlobalName(\"isnan\")\n module.AddGlobalName(\"less\")\n module.AddGlobalName(\"left_shift\")\n module.AddGlobalName(\"log\")\n module.AddGlobalName(\"logical_and\")\n module.AddGlobalName(\"logical_not\")\n module.AddGlobalName(\"logical_or\")\n module.AddGlobalName(\"logical_xor\")\n module.AddGlobalName(\"maximum\")\n module.AddGlobalName(\"minimum\")\n module.AddGlobalName(\"multiply\")\n module.AddGlobalName(\"negative\")\n module.AddGlobalName(\"not_equal\")\n module.AddGlobalName(\"power\")\n module.AddGlobalName(\"remainder\")\n module.AddGlobalName(\"right_shift\")\n module.AddGlobalName(\"sign\")\n module.AddGlobalName(\"sinh\")\n module.AddGlobalName(\"sqrt\")\n module.AddGlobalName(\"tan\")\n module.AddGlobalName(\"tanh\")\n module.AddGlobalName(\"true_divide\")", "def load_numpy_distutils_misc_util(finder, module):\n module.IgnoreName(\"numscons\")", "def _test1():\n sys.argv.append('--Numeric')\n from . import numpytools as N\n verify(N)\n sys.argv[-1] = '--numarray'\n reload(N)\n verify(N)\n sys.argv[-1] = '--numpy'\n reload(N)\n verify(N)", "def load_Numeric(finder, module):\n module.IgnoreName(\"dotblas\")", "def test_imports():\n import pylablib.core.fileio.binio\n import pylablib.core.fileio.datafile\n import pylablib.core.fileio.dict_entry\n import pylablib.core.fileio.loadfile\n import pylablib.core.fileio.location\n import pylablib.core.fileio.logfile\n import pylablib.core.fileio.parse_csv\n import pylablib.core.fileio.savefile", "def performance_test():\n from timeit import Timer\n t = Timer(\"test()\", \"from __main__ import test\")\n print t.timeit(number=1)", "def test_load_numpy_file(save_npz) -> None:\n filename, data = save_npz\n result = loader.load_numpy_file(filename)\n\n for k, v in data.items():\n assert np.array_equal(v, result[k])", "def load_numpy_random_mtrand(finder, module):\n module.AddGlobalName(\"rand\")\n module.AddGlobalName(\"randn\")", "def _load_objects():\n global DataArray, DataFrame, Series, Index, ndarray\n ndarray = np.ndarray\n DataArray = getattr(sys.modules.get('xarray', None), 'DataArray', ndarray)\n DataFrame = getattr(sys.modules.get('pandas', None), 'DataFrame', ndarray)\n Series = getattr(sys.modules.get('pandas', None), 'Series', ndarray)\n Index = getattr(sys.modules.get('pandas', None), 'Index', ndarray)", "def numpy_extension():\n jsonpickle.ext.numpy.register_handlers()\n yield # control to the test function.\n jsonpickle.ext.numpy.unregister_handlers()", "def load_numpy_core_numerictypes(finder, module):\n module.AddGlobalName(\"bool_\")\n module.AddGlobalName(\"cdouble\")\n module.AddGlobalName(\"complexfloating\")\n module.AddGlobalName(\"csingle\")\n module.AddGlobalName(\"double\")\n module.AddGlobalName(\"float64\")\n module.AddGlobalName(\"float_\")\n module.AddGlobalName(\"inexact\")\n module.AddGlobalName(\"intc\")\n module.AddGlobalName(\"int32\")\n module.AddGlobalName(\"number\")\n module.AddGlobalName(\"single\")", "def test_compute_glycemic_load(self):\n pass", "def load_numpy_distutils_system_info(finder, module):\n module.IgnoreName(\"Numeric\")", "def run_performance( pkg_mod_iter ):\n for package, module_iter in pkg_mod_iter:\n print( package )\n print( \"=\"*len(package ) )\n print()\n for filename, modname in module_iter:\n print( filename, modname )\n try:\n module= __import__( package+\".\"+modname, fromlist=(modname,\"performance\") )\n module.performance()\n except AttributeError:\n pass # no performance() function in the module.", "def timing_test(dtype, trans_a, trans_b, n, k, trials):\n as_matrix = True\n\n np_time = 0.0\n bp_time = 0.0\n\n for i in range(trials):\n\n # create random scalars and matrices to test\n alpha = uniform(SCAL_MIN, SCAL_MAX)\n beta = uniform(SCAL_MIN, SCAL_MAX)\n A = random_matrix((n if trans_a == 'n' else k), (k if trans_a == 'n' else n), dtype, as_matrix)\n B = random_matrix((k if trans_b == 'n' else n), (n if trans_b == 'n' else k), dtype, as_matrix)\n C = random_matrix(n, n, dtype, as_matrix)\n\n # create copies/views for NumPy\n A_2 = A if trans_a == 'n' else A.T\n B_2 = B if trans_b == 'n' else B.T\n C_2 = copy(C)\n\n if i % 2 == 0:\n\n # BLASpy first\n start = time.time()\n gemm(A, B, C, trans_a, trans_b, alpha, beta)\n end = time.time()\n bp_time += end - start\n\n # then NumPy\n start = time.time()\n beta * C_2 + alpha * dot(A_2, B_2)\n end = time.time()\n np_time += end - start\n\n else:\n\n # NumPy first\n start = time.time()\n beta * C_2 + alpha * dot(A_2, B_2)\n end = time.time()\n np_time += end - start\n\n # then BLASpy\n start = time.time()\n gemm(A, B, C, trans_a, trans_b, alpha, beta)\n end = time.time()\n bp_time += end - start\n\n return bp_time / trials, np_time / trials", "def timeit_2vector(nb_element=1e6, nb_repeat=3, nb_call=int(1e2), expr=\"a**2 + b**2 + 2*a*b\", do_unalign=False, do_amd=True):\r\n rval = dict() \r\n print\r\n print \"timeit_2vector(nb_element=%(nb_element)s,nb_repeat=%(nb_repeat)s,nb_call=%(nb_call)s, expr=%(expr)s, do_unalign=%(do_unalign)s)\"%locals()\r\n\r\n if do_unalign:\r\n init = \"import numpy as np; a = np.empty(%(nb_element)s, dtype='b1,f8')['f1'];b = np.empty(%(nb_element)s, dtype='b1,f8')['f1'];a[:] = np.arange(len(a));b[:] = np.arange(len(b));\"%locals()\r\n else:\r\n init = \"import numpy as np; a = np.arange(%(nb_element)s);b = np.arange(%(nb_element)s)\"%locals()\r\n t1 = timeit.Timer(\"%(expr)s\"%locals(),\"from numpy import exp; %(init)s\"%locals())\r\n numpy_times = np.asarray(t1.repeat(nb_repeat,nb_call))\r\n print \"NumPy time: each time=\",numpy_times, \"min_time=\", numpy_times.min()\r\n rval['numpy'] = numpy_times\r\n\r\n t2 = timeit.Timer(\"\"\"ne.evaluate(\"%(expr)s\")\"\"\"%locals(),\r\n \"import numexpr as ne; %(init)s\"%locals())\r\n numexpr_times=np.asarray(t2.repeat(nb_repeat,nb_call))\r\n rval['numexpr'] = numexpr_times\r\n print \"Numexpr time: each time=\",numexpr_times,'min_time=', numexpr_times.min()\r\n\r\n theano.config.lib.amdlibm = False\r\n theano_times = timeit_2vector_theano(init, nb_element,nb_repeat,nb_call,expr)\r\n print \"Theano time: each time=\",theano_times, 'min_time=',theano_times.min()\r\n rval['theano'] = theano_times\r\n\r\n if do_amd:\r\n theano.config.lib.amdlibm = True\r\n theanoamd_times = timeit_2vector_theano(init, nb_element,nb_repeat,nb_call,expr)\r\n print \"Theano+amdlibm time\",theanoamd_times, theanoamd_times.min()\r\n rval['theano_amd'] = theanoamd_times\r\n\r\n print \"time(NumPy) / time(numexpr) = \",numpy_times.min()/numexpr_times.min()\r\n print \"time(NumPy) / time(Theano)\",numpy_times.min()/theano_times.min()\r\n print \"time(numexpr) / time(Theano)\",numexpr_times.min()/theano_times.min()\r\n if do_amd:\r\n print \"time(NumPy) / time(Theano+amdlibm)\",numpy_times.min()/theanoamd_times.min()\r\n print \"time(numexpr) / time(Theano+amdlibm)\",numexpr_times.min()/theanoamd_times.min()\r\n return rval", "def test_c_extensions_import():\n import storm_analysis.dbscan.dbscan_c\n \n import storm_analysis.fista.fista_fft_c\n \n import storm_analysis.frc.frc_c\n \n import storm_analysis.L1H.homotopy_imagea_c\n\n import storm_analysis.rolling_ball_bgr.rolling_ball_lib_c\n\n import storm_analysis.sa_library.cs_decon_utilities_c\n import storm_analysis.sa_library.dao_fit_c\n import storm_analysis.sa_library.grid_c\n import storm_analysis.sa_library.ia_utilities_c\n import storm_analysis.sa_library.matched_filter_c\n\n import storm_analysis.sa_utilities.fitz_c\n\n import storm_analysis.simulator.pf_math_c\n import storm_analysis.simulator.draw_gaussians_c\n \n import storm_analysis.spliner.cubic_spline_c\n import storm_analysis.spliner.cubic_fit_c", "def benchmark():\n print defer.Deferred.__module__\n for func, args, iter in benchmarkFuncs:\n print func.__name__, args, timeit(func, iter, *args)", "def test_import_multiple(monkeypatch, modpath, numtimes):\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeSourceFileLoader)\n\n taskfiles = ['a_{}.py'.format(i) for i in range(10)]\n names = [splitext(n)[0] for n in taskfiles]\n pypath = ['{}.{}'.format(modpath, n) for n in names]\n randpath = choice(pypath)\n\n sys.meta_path.append(TaskImporter(*taskfiles))\n taskfile = import_module(randpath)\n\n for i in range(numtimes):\n newmod = import_module(randpath)\n assert newmod is taskfile", "def get_array_module(arr):\n # TODO: also check for __array_interface__ attribute and not\n # __cuda_array_interface__?\n if have_cupy:\n return cupy.get_array_module(arr)\n else:\n return np", "def with_numpy(func):\r\n return func", "def load_matplotlib_numerix(finder, module):\n for name in (\"ma\", \"fft\", \"linear_algebra\", \"random_array\", \"mlab\"):\n finder.IncludeModule(\"%s.%s\" % (module.name, name))", "def load_numpy_linalg(finder, module):\n finder.IncludeModule(\"numpy.linalg.lapack_lite\")", "def test_imports():\n import sys\n import src\n assert 'sklearn.feature_extraction' not in sys.modules.keys()", "def np(self, *args, **kwargs):\n raise NotImplementedError('numpy is unavailable on your system. Please install numpy before calling plist.np().')", "def test_Keras_import(benchmark):\n\n def Benchmark():\n from keras import models\n m = models.Sequential()\n del m\n\n benchmark(Benchmark)", "def compile_cutils():\r\n\r\n types = ['npy_' + t for t in ['int8', 'int16', 'int32', 'int64', 'int128',\r\n 'int256', 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',\r\n 'float16', 'float32', 'float64', 'float80', 'float96', 'float128',\r\n 'float256']]\r\n\r\n complex_types = ['npy_' + t for t in ['complex32', 'complex64',\r\n 'complex128', 'complex160', 'complex192', 'complex512']]\r\n\r\n inplace_map_template = \"\"\"\r\n #if defined(%(typen)s)\r\n static void %(type)s_inplace_add(PyArrayMapIterObject *mit, PyArrayIterObject *it)\r\n {\r\n int index = mit->size;\r\n while (index--) {\r\n %(op)s\r\n\r\n PyArray_MapIterNext(mit);\r\n PyArray_ITER_NEXT(it);\r\n }\r\n }\r\n #endif\r\n \"\"\"\r\n\r\n floatadd = \"((%(type)s*)mit->dataptr)[0] = ((%(type)s*)mit->dataptr)[0] + ((%(type)s*)it->dataptr)[0];\"\r\n complexadd = \"\"\"\r\n ((%(type)s*)mit->dataptr)[0].real = ((%(type)s*)mit->dataptr)[0].real + ((%(type)s*)it->dataptr)[0].real;\r\n ((%(type)s*)mit->dataptr)[0].imag = ((%(type)s*)mit->dataptr)[0].imag + ((%(type)s*)it->dataptr)[0].imag;\r\n \"\"\"\r\n\r\n fns = ''.join([inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': floatadd % {'type': t}}\r\n for t in types] +\r\n [inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': complexadd % {'type': t}}\r\n for t in complex_types])\r\n\r\n fn_array = (\"static inplace_map_binop addition_funcs[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(type)s_inplace_add,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"\"\"NULL};\r\n \"\"\")\r\n\r\n type_number_array = (\"static int type_numbers[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(typen)s,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"-1000};\")\r\n\r\n code = (\"\"\"\r\n #include <Python.h>\r\n #include \"numpy/arrayobject.h\"\r\n\r\n extern \"C\"{\r\n static PyObject *\r\n run_cthunk(PyObject *self, PyObject *args)\r\n {\r\n PyObject *py_cthunk = NULL;\r\n if(!PyArg_ParseTuple(args,\"O\",&py_cthunk))\r\n return NULL;\r\n\r\n if (!PyCObject_Check(py_cthunk)) {\r\n PyErr_SetString(PyExc_ValueError,\r\n \"Argument to run_cthunk must be a PyCObject.\");\r\n return NULL;\r\n }\r\n void * ptr_addr = PyCObject_AsVoidPtr(py_cthunk);\r\n int (*fn)(void*) = (int (*)(void*))(ptr_addr);\r\n void* it = PyCObject_GetDesc(py_cthunk);\r\n int failure = fn(it);\r\n\r\n return Py_BuildValue(\"i\", failure);\r\n }\r\n\r\n #if NPY_API_VERSION >= 0x00000008\r\n typedef void (*inplace_map_binop)(PyArrayMapIterObject *, PyArrayIterObject *);\r\n \"\"\" + fns + fn_array + type_number_array +\r\n\r\n\"\"\"\r\nstatic int\r\nmap_increment(PyArrayMapIterObject *mit, PyObject *op, inplace_map_binop add_inplace)\r\n{\r\n PyArrayObject *arr = NULL;\r\n PyArrayIterObject *it;\r\n PyArray_Descr *descr;\r\n if (mit->ait == NULL) {\r\n return -1;\r\n }\r\n descr = PyArray_DESCR(mit->ait->ao);\r\n Py_INCREF(descr);\r\n arr = (PyArrayObject *)PyArray_FromAny(op, descr,\r\n 0, 0, NPY_ARRAY_FORCECAST, NULL);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n if ((mit->subspace != NULL) && (mit->consec)) {\r\n PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&arr, 0);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n }\r\n it = (PyArrayIterObject*)\r\n PyArray_BroadcastToShape((PyObject*)arr, mit->dimensions, mit->nd);\r\n if (it == NULL) {\r\n Py_DECREF(arr);\r\n return -1;\r\n }\r\n\r\n (*add_inplace)(mit, it);\r\n\r\n Py_DECREF(arr);\r\n Py_DECREF(it);\r\n return 0;\r\n}\r\n\r\n\r\nstatic PyObject *\r\ninplace_increment(PyObject *dummy, PyObject *args)\r\n{\r\n PyObject *arg_a = NULL, *index=NULL, *inc=NULL;\r\n PyArrayObject *a;\r\n inplace_map_binop add_inplace = NULL;\r\n int type_number = -1;\r\n int i =0;\r\n PyArrayMapIterObject * mit;\r\n\r\n if (!PyArg_ParseTuple(args, \"OOO\", &arg_a, &index,\r\n &inc)) {\r\n return NULL;\r\n }\r\n if (!PyArray_Check(arg_a)) {\r\n PyErr_SetString(PyExc_ValueError, \"needs an ndarray as first argument\");\r\n return NULL;\r\n }\r\n\r\n a = (PyArrayObject *) arg_a;\r\n\r\n if (PyArray_FailUnlessWriteable(a, \"input/output array\") < 0) {\r\n return NULL;\r\n }\r\n\r\n if (PyArray_NDIM(a) == 0) {\r\n PyErr_SetString(PyExc_IndexError, \"0-d arrays can't be indexed.\");\r\n return NULL;\r\n }\r\n type_number = PyArray_TYPE(a);\r\n\r\n\r\n\r\n while (type_numbers[i] >= 0 && addition_funcs[i] != NULL){\r\n if (type_number == type_numbers[i]) {\r\n add_inplace = addition_funcs[i];\r\n break;\r\n }\r\n i++ ;\r\n }\r\n\r\n if (add_inplace == NULL) {\r\n PyErr_SetString(PyExc_TypeError, \"unsupported type for a\");\r\n return NULL;\r\n }\r\n mit = (PyArrayMapIterObject *) PyArray_MapIterArray(a, index);\r\n if (mit == NULL) {\r\n goto fail;\r\n }\r\n if (map_increment(mit, inc, add_inplace) != 0) {\r\n goto fail;\r\n }\r\n\r\n Py_DECREF(mit);\r\n\r\n Py_INCREF(Py_None);\r\n return Py_None;\r\n\r\nfail:\r\n Py_XDECREF(mit);\r\n\r\n return NULL;\r\n}\r\n #endif\r\n\r\n\r\n static PyMethodDef CutilsExtMethods[] = {\r\n {\"run_cthunk\", run_cthunk, METH_VARARGS|METH_KEYWORDS,\r\n \"Run a theano cthunk.\"},\r\n #if NPY_API_VERSION >= 0x00000008\r\n {\"inplace_increment\", inplace_increment,\r\n METH_VARARGS,\r\n \"increments a numpy array inplace at the passed indexes.\"},\r\n #endif\r\n {NULL, NULL, 0, NULL} /* Sentinel */\r\n };\"\"\")\r\n\r\n if PY3:\r\n # This is not the most efficient code, but it is written this way to\r\n # highlight the changes needed to make 2.x code compile under python 3.\r\n code = code.replace(\"<Python.h>\", '\"numpy/npy_3kcompat.h\"', 1)\r\n code = code.replace(\"PyCObject\", \"NpyCapsule\")\r\n code += \"\"\"\r\n static struct PyModuleDef moduledef = {\r\n PyModuleDef_HEAD_INIT,\r\n \"cutils_ext\",\r\n NULL,\r\n -1,\r\n CutilsExtMethods,\r\n };\r\n\r\n PyMODINIT_FUNC\r\n PyInit_cutils_ext(void) {\r\n import_array();\r\n return PyModule_Create(&moduledef);\r\n }\r\n }\r\n \"\"\"\r\n else:\r\n code += \"\"\"\r\n PyMODINIT_FUNC\r\n initcutils_ext(void)\r\n {\r\n import_array();\r\n (void) Py_InitModule(\"cutils_ext\", CutilsExtMethods);\r\n }\r\n } //extern C\r\n \"\"\"\r\n\r\n loc = os.path.join(config.compiledir, 'cutils_ext')\r\n if not os.path.exists(loc):\r\n os.mkdir(loc)\r\n\r\n args = cmodule.GCC_compiler.compile_args()\r\n cmodule.GCC_compiler.compile_str('cutils_ext', code, location=loc,\r\n preargs=args)", "def test_dtype_cache(self):\r\n\r\n start, stop, step = iscalars('start', 'stop', 'step')\r\n out1 = arange(start, stop, step)\r\n out2 = arange(start, stop, step, dtype=out1.dtype)\r\n out3 = arange(start, stop, 2., dtype=out1.dtype)\r\n out4 = arange(start, stop, 2.)\r\n\r\n assert out1.owner.op is out2.owner.op\r\n assert out2.owner.op is out3.owner.op\r\n assert out3.owner.op is not out4.owner.op", "def with_numpy(func):\r\n def my_func():\r\n raise nose.SkipTest('Test requires numpy')\r\n return my_func" ]
[ "0.6325043", "0.6092226", "0.60902053", "0.6086288", "0.60037786", "0.57273877", "0.54694366", "0.5443474", "0.54396516", "0.5428644", "0.5428636", "0.53996533", "0.53130186", "0.5310884", "0.53034955", "0.5291817", "0.52899164", "0.5268922", "0.5227571", "0.52256954", "0.52089626", "0.5203733", "0.51786125", "0.5176473", "0.5158059", "0.50898695", "0.50884044", "0.50766873", "0.5042354", "0.5023368" ]
0.7774883
0
Benchmark keras module import.
def test_Keras_import(benchmark): def Benchmark(): from keras import models m = models.Sequential() del m benchmark(Benchmark)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_keras() -> Any:\n import keras\n keras.losses.weighted_log_loss = weighted_log_loss\n keras.metrics.false_pos = false_pos\n keras.metrics.false_positives = false_pos\n keras.metrics.false_neg = false_neg\n return keras", "def main():\n setup_keras()\n\n args = parse()\n\n train_settings = common.load_settings(args.settings_path, default_conf_name='train.yml')\n train_settings['store'] = args.store\n\n feature_settings = common.load_settings(args.settings_path, default_conf_name='feature.yml')\n model_settings = common.load_settings(args.settings_path, default_conf_name=train_settings['model_conf'])\n\n train_df, val_df = load_training_data(dict(train_settings, **feature_settings))\n assert train_df.shape[0] > val_df.shape[0] * 4.5, f'training data {train_df.shape[0]} should be much larger than validation {val_df.shape[0]}'\n\n sample_featurizer = AudioFeature(feature_settings)\n\n if args.load_name:\n model_name = args.load_name\n print('Loading existing model', model_name)\n m = keras.models.load_model(model_name)\n else:\n t = datetime.datetime.now().strftime('%Y%m%d-%H%M')\n model_name = f\"model-{model_settings['model']}_hop{feature_settings['hop_length']}_{t}\"\n m = models.build(dict(model_settings, **feature_settings))\n m.summary()\n\n output_dir = os.path.join(args.model_store, model_name)\n\n print(f\"Training model: '{model_name}'\", json.dumps(train_settings, indent=1))\n\n combined_settings = dict(train_settings, **model_settings, **feature_settings)\n\n h = train_model(output_dir, train_df, val_df,\n model=m,\n sample_featurizer=sample_featurizer,\n settings=combined_settings)", "def run_experiments() :\n #%%\n target_size=(32,32)\n g_specs = {\n \"batch_size\" : [ 30 , 60, 100 ],\n \"learning_rate\" : [ 0.0002, 0.0003, 0.0005 ],\n \"drop_out_rate\" : [ 0.2, 0.25, 0.3 ],\n \"rescale_mode\" : [ \"max_q\" , \"max\", \"\" ]\n }\n\n model_traits = MODEL_TRAITS[\"model2\"].copy()\n tt_obj = model_traits[\"trainer_tester_class\"]( model_traits )\n del model_traits[\"trainer_tester_class\"]\n\n cnt = 0\n for batchs, lrate, do_rate, resc_mode in product( g_specs[\"batch_size\"],\n g_specs[\"learning_rate\"],\n g_specs[\"drop_out_rate\"],\n g_specs[\"rescale_mode\"] ) :\n\n tt_obj.model_traits.update( {\"batch_size\" : batchs,\n \"learning_rate\" : lrate,\n \"rescale_mode\" : resc_mode,\n \"drop_out_rate\" : do_rate } )\n\n train_4d, train_gt = tu.make_4d_arrays( images_dir=\"images/train\",\n target_size=target_size )\n\n test_4d, test_gt = tu.make_4d_arrays( images_dir=\"images/test\",\n target_size=target_size )\n\n data = {\"train_4d\" : train_4d,\n \"test_4d\" : test_4d,\n \"train_y\" : train_gt,\n \"test_y\" : test_gt}\n\n valid_accu_log, train_accu_log = tt_obj.train( model_traits, data,\n logl=100 )\n idx_v = int(np.argmax( valid_accu_log))\n idx_t = int(np.argmax( train_accu_log))\n\n model_traits.update({\"valid_accu_log\" : valid_accu_log,\n \"train_accu_log\" : train_accu_log,\n \"best_valid\" : max(valid_accu_log),\n \"best_valid_at\" : idx_v,\n \"train_at_best_valid\" : train_accu_log[idx_v],\n \"best_train\" : max(train_accu_log),\n \"best_train_at\": idx_t })\n\n #print(cnt, pformat(model_traits) )\n print( \"%d : best_train = %.4f, best_valid = %.4f\" % \\\n (cnt, max(train_accu_log), max(valid_accu_log) ))\n\n with open( \"exp_results_%d.json\" % cnt,\n \"wt\" , encoding=\"utf8\" ) as f_out :\n print( json.dumps( model_traits ), file=f_out)\n\n\n cnt += 1\n #%%", "def test_keras_mnist():\n data = fetch(\"mnist\")\n check(data, n_samples_train=60000, n_samples_test=10000, n_features=28 * 28)", "def run(layers):", "def test_Numpy_import(benchmark):\n\n def Benchmark():\n import numpy as np\n a = np.ndarray(1)\n del a\n\n benchmark(Benchmark)", "def test_Tensorflow_import(benchmark):\n\n def Benchmark():\n import tensorflow as tf\n a = tf.Variable(1)\n del a\n\n benchmark(Benchmark)", "def test_machine_learning():", "def init():\n global neural_network\n global labels\n\n # load objects required by run() for inferencing\n model_dir = Model.get_model_path(\"mnist-fashion\")\n # neural model\n neural_network = keras.models.load_model(f\"{model_dir}/neural-network.h5\")\n # labels\n with open(f\"{model_dir}/labels.jsonpickle\", \"r\") as labels_file:\n labels = jsonpickle.decode(labels_file.read())", "def test():\n return _make_modules(is_train=False)", "def test_compute_glycemic_load(self):\n pass", "def run_performance( pkg_mod_iter ):\n for package, module_iter in pkg_mod_iter:\n print( package )\n print( \"=\"*len(package ) )\n print()\n for filename, modname in module_iter:\n print( filename, modname )\n try:\n module= __import__( package+\".\"+modname, fromlist=(modname,\"performance\") )\n module.performance()\n except AttributeError:\n pass # no performance() function in the module.", "def setUp(self):\n\t\tprint(\"\\n-------------------------------------\\nIn Test_KerasLayers:\", self._testMethodName)\n\t\tself.model = VGG16() \n\t\tself.last_layer = self.model.submodules[-1]\n\t\tww.weightwatcher.keras = keras", "def train():\n import trace\n trace.train()", "def _get_backend_module(name):\n if name == \"numpy\":\n import numpy as np\n\n return np\n if name == \"numpy.ma\":\n import numpy as np\n\n return np.ma\n if name == \"torch\":\n import torch\n\n return torch\n if name == \"jax\":\n import jax\n import jax.numpy as jnp\n\n _JAX_KEY = jax.random.PRNGKey(0)\n return jnp\n if name == \"tensorflow\":\n import tensorflow as tf\n\n return tf", "def test_imports():\n import sys\n import src\n assert 'sklearn.feature_extraction' not in sys.modules.keys()", "def test_deep_learning_models():\n atom = ATOMClassifier(*mnist, n_rows=0.1, random_state=1)\n pytest.raises(PermissionError, atom.clean)\n atom.run(KerasClassifier(neural_network, epochs=1, batch_size=512, verbose=0))", "def main():\n options = get_options()\n dataset, test, fs = get_dataset(options)\n\n def eval_all(folder):\n \"\"\"evaluates all optimizers and all models on given dataset, and saves\n info pictures to folder\n\n Args:\n folder: folder to save results\n \"\"\"\n optimizers = [\n tf.keras.optimizers.Adadelta(learning_rate=0.01),\n tf.keras.optimizers.Adagrad(learning_rate=0.002),\n tf.keras.optimizers.Adam(learning_rate=0.0001),\n tf.keras.optimizers.Adamax(learning_rate=0.0005),\n tf.keras.optimizers.Ftrl(learning_rate=0.002),\n tf.keras.optimizers.Nadam(learning_rate=0.001),\n tf.keras.optimizers.RMSprop(learning_rate=0.0005),\n tf.keras.optimizers.SGD(learning_rate=0.003),\n ]\n\n epochs = [\n 500, 120, 80, 150, 300, 60, 100, 500\n ]\n\n biased_randomized = [\n (models.DefaultModel, False),\n (models.BiasedModel, False),\n (models.NeuralModel, False),\n (models.DefaultModel, True),\n (models.BiasedModel, True),\n (models.NeuralModel, True),\n ]\n\n for optimizer, n_epochs in zip(optimizers, epochs):\n for model, rndmz in biased_randomized:\n eval_optimizer(folder,\n model,\n optimizer,\n n_epochs,\n rndmz)\n\n def eval_complicated(folder):\n optimizers = [\n tf.keras.optimizers.Adadelta,\n tf.keras.optimizers.Adagrad,\n tf.keras.optimizers.Adam,\n tf.keras.optimizers.Adamax,\n tf.keras.optimizers.Ftrl,\n tf.keras.optimizers.Nadam,\n tf.keras.optimizers.RMSprop,\n tf.keras.optimizers.SGD,\n ]\n\n type_eph_lrate = [\n (models.Deep2Hidden, 15, 0.00003),\n (models.Deep11Hidden, 15, 0.00003)\n ]\n\n for opt in optimizers:\n for model, epochs, lrate in type_eph_lrate:\n eval_optimizer(folder,\n model,\n opt(learning_rate=lrate),\n epochs,\n True)\n\n def eval_big(folder):\n optimizers_filter = [\n (tf.keras.optimizers.Adadelta(learning_rate=1e-3), 200),\n (tf.keras.optimizers.Adagrad(learning_rate=1e-3), 200),\n (tf.keras.optimizers.SGD(learning_rate=1e-3), 200)\n ]\n optimizers_layer = [\n (tf.keras.optimizers.Adam(learning_rate=1e-5), 30),\n (tf.keras.optimizers.Adamax(learning_rate=1e-5), 30),\n (tf.keras.optimizers.Ftrl(learning_rate=1e-5), 30),\n (tf.keras.optimizers.Nadam(learning_rate=1e-5), 30),\n (tf.keras.optimizers.RMSprop(learning_rate=1e-5), 30)\n ]\n optimizers_deep = [\n (tf.keras.optimizers.Adam(learning_rate=3e-6), 15),\n (tf.keras.optimizers.Adamax(learning_rate=3e-6), 15),\n (tf.keras.optimizers.RMSprop(learning_rate=3e-6), 15)\n ]\n mdls = [\n models.DefaultModel,\n models.NeuralModel,\n models.Deep1Hidden\n ]\n\n for (opt, model) in zip((optimizers_filter,\n optimizers_layer,\n optimizers_deep), mdls):\n for (optimizer, epochs) in opt:\n randomize = False if model is models.DefaultModel else True\n eval_optimizer(folder,\n model,\n optimizer,\n epochs,\n randomize)\n\n def eval_optimizer(folder,\n model, optimizer, epochs, randomize):\n \"\"\"Evaluates given model on given dataset\n\n Evaluates model on given dataset, optimizes result by optimizer, and saves\n info image to given folder\n\n Args:\n folder: folder to save info images\n model: tf.keras.Model model for evaluation\n optimizer: tf.keras optimizer\n epochs (int): epochs of training\n randomize (bool): tandomize initial weights and biases\n\n \"\"\"\n class2name = {\n models.DefaultModel: \"default\",\n models.BiasedModel: \"biased\",\n models.NeuralModel: \"neural\",\n models.NeuralSTD: \"neuralSTD\",\n models.Deep1Hidden: \"deep1h\",\n models.Deep2Hidden: \"deep2h\",\n models.Deep11Hidden: \"deep1_1\"\n }\n\n # prepare for training\n layer_len = len(dataset.take(1).as_numpy_iterator().next()[0][0])\n optimizer_conf = optimizer.get_config()\n fname = \"/%s_%s_%deph_%.5flrate_%s\" % \\\n (class2name[model],\n optimizer_conf[\"name\"],\n epochs,\n optimizer_conf[\"learning_rate\"],\n \"rnd\" if randomize else \"nornd\")\n\n pic_name = folder + fname + \".png\"\n file_name = folder + \"/models\" + fname + \".model\"\n model_obj = model(layer_len, randomize)\n model_obj.compile(optimizer=optimizer, loss=models.SimpleLoss())\n\n # prepare data from test dataset for result visualization\n train_sample = None\n no_train_sample = None\n samples = []\n labels = []\n for features, label in test.as_numpy_iterator():\n samples.append(features)\n labels.append(label)\n if train_sample is None and label == 1:\n train_sample = features\n if no_train_sample is None and label == 0:\n no_train_sample = features\n samples = np.array(samples)\n labels = np.array(labels, dtype=np.bool)\n # save untrained classification, for result visualization\n untrained_predicted_labels = model_obj(samples).numpy()\n # train model\n history = model_obj.fit(x=dataset, epochs=epochs)\n train_filtered = model_obj.filter_single(train_sample)\n no_train_filtered = model_obj.filter_single(no_train_sample)\n predicted_labels = model_obj(samples).numpy()\n\n # result visualization and saving\n fig = plt.figure(figsize=(15., 7.))\n loss_ax = fig.add_subplot(3, 1, 1)\n loss_ax.set_title(\"ход обучения\")\n loss_ax.set_xlabel(\"эпоха\")\n loss_ax.set_ylabel(\"ф-я потерь\")\n sig_untrained_ax = fig.add_subplot(3, 2, 3)\n sig_untrained_ax.set_title(\"примеры сигналов\")\n sig_untrained_ax.set_xlabel(\"время, сек\")\n sig_untrained_ax.set_ylabel(\"ускорение, мкм/сек^2\")\n sig_trained_ax = fig.add_subplot(3, 2, 4)\n sig_trained_ax.set_title(\"отфильтрованные сигналы\")\n sig_trained_ax.set_xlabel(\"время, сек\")\n sig_trained_ax.set_ylabel(\"ускорение, мкм/сек^2\")\n # sig_trained_ax.set_ylim(-1, 1)\n label_untrained_ax = fig.add_subplot(3, 2, 5)\n label_untrained_ax.set_title(\"классификация необученной моделью\")\n label_untrained_ax.set_xlabel(\"вероятность, что сигнал от поезда\")\n label_trained_ax = fig.add_subplot(3, 2, 6)\n label_trained_ax.set_title(\"классификация обученной моделью\")\n label_trained_ax.set_xlabel(\"вероятность, что сигнал от поезда\")\n\n loss_ax.plot(history.history[\"loss\"])\n train_ax_label, = sig_untrained_ax.plot(\n np.linspace(0, len(train_sample)/fs, len(train_sample)),\n train_sample,\n \"g\", label=\"сигнал с поездом\")\n no_train_ax_label, = sig_untrained_ax.plot(\n np.linspace(0, len(no_train_sample)/fs, len(no_train_sample)),\n no_train_sample,\n \"r\", label=\"сигнал без поезда\")\n sig_untrained_ax.legend(handles=[train_ax_label, no_train_ax_label])\n train_ax_label, = sig_trained_ax.plot(\n np.linspace(0, len(train_filtered)/fs, len(train_filtered)-1),\n train_filtered[1:],\n \"g\", label=\"сигнал с поездом\")\n no_train_ax_label, = sig_trained_ax.plot(\n np.linspace(0, len(no_train_filtered)/fs, len(no_train_filtered)-1),\n no_train_filtered[1:],\n \"r\", label=\"сигнал без поезда\")\n sig_trained_ax.legend(handles=[train_ax_label, no_train_ax_label])\n train_ax_label = label_untrained_ax.scatter(\n untrained_predicted_labels[labels],\n np.array(range(0, len(labels)))[labels],\n color='green', marker='.', label=\"сигнал с поездом\")\n no_train_ax_label = label_untrained_ax.scatter(\n untrained_predicted_labels[np.invert(labels)],\n np.array(range(0, len(labels)))[np.invert(labels)],\n color='red', marker='.', label=\"сигнал без поезда\")\n label_untrained_ax.legend(handles=[train_ax_label, no_train_ax_label])\n train_ax_label = label_trained_ax.scatter(\n predicted_labels[labels],\n np.ma.array(range(0, len(labels)))[labels],\n color='green', marker='.', label=\"сигнал с поездом\")\n no_train_ax_label = label_trained_ax.scatter(\n predicted_labels[np.invert(labels)],\n np.array(range(0, len(labels)))[np.invert(labels)],\n color='red', marker='.', label=\"сигнал без поезда\")\n label_trained_ax.legend(handles=[train_ax_label, no_train_ax_label])\n fig.tight_layout(w_pad=3, h_pad=2,\n rect=[0.0225, 0.0225, 0.95, 0.95])\n #plt.show()\n plt.savefig(pic_name)\n with open(file_name, \"w\") as f:\n f.write(str(model_obj))\n\n for i in range(0, 20):\n path = \"tmp/%i\" % i\n if not os.path.exists(\"%s/models\" % path):\n os.makedirs(\"%s/models\" % path)\n eval_optimizer(\n path,\n models.DefaultModel,\n tf.keras.optimizers.Adagrad(learning_rate=0.001),\n 150,\n False,\n )", "def train(entropy_fn):\n del entropy_fn # unused\n return _make_modules(is_train=True)", "def train_and_score(genome, dataset):\n logging.info(\"Getting Keras datasets\")\n\n if dataset == 'cifar10_mlp':\n nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs = get_cifar10_mlp()\n elif dataset == 'cifar10_cnn':\n nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs = get_cifar10_cnn()\n elif dataset == 'mnist_mlp':\n nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs = get_mnist_mlp()\n elif dataset == 'mnist_cnn':\n nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs = get_mnist_cnn()\n\n logging.info(\"Compling Keras model\")\n\n if dataset == 'cifar10_mlp':\n model = compile_model_mlp(genome, nb_classes, input_shape)\n elif dataset == 'cifar10_cnn':\n model = compile_model_cnn(genome, nb_classes, input_shape)\n elif dataset == 'mnist_mlp':\n model = compile_model_mlp(genome, nb_classes, input_shape)\n elif dataset == 'mnist_cnn':\n model = compile_model_cnn(genome, nb_classes, input_shape)\n\n history = LossHistory()\n\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs, \n # using early stopping so no real limit - don't want to waste time on horrible architectures\n verbose=1,\n validation_data=(x_test, y_test),\n #callbacks=[history])\n callbacks=[early_stopper])\n\n score = model.evaluate(x_test, y_test, verbose=0)\n\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n\n K.clear_session()\n #we do not care about keeping any of this in memory - \n #we just need to know the final scores and the architecture\n \n return score[1] # 1 is accuracy. 0 is loss.", "def main(seed, filter_, num_classes, setup, model_name, images_dir, precision_mode, test):\n f1, f2 = filter_\n model_name = 'flex_random_seed_{}_resnet_manual_highres_center_only_f1_{}_f2_{}'.format(seed, f1, f2)\n frozen_graph_filepath = './Models/Frozen_graphs/{}_{}/'.format(f1,f2) + model_name + '_frozen_graph.pb'\n frozen_graph, x_tensor, y_tensor = trt_frozen_graph_and_tensors(\n model_name=model_name, \n frozen_graph_filepath=frozen_graph_filepath, \n precision_mode=precision_mode\n )\n\n elapsed_time_full_dataset = []\n sum_of_confusion_matrices = np.zeros((6, 6))\n \n with tf.compat.v1.Session(graph=frozen_graph) as sess:\n for image_file in [img for img in os.listdir(images_dir) if img.endswith('.JPG')]:\n\n img = Image.open(images_dir + image_file)\n sx,sy = img.size\n\n print(\"Image size is %i x %i\" % (sx,sy)) # sx = 4912, sy = 3264\n print(\"Loading image %s\" % image_file)\n\n img_np = np.array(img)/255.0\n del img\n\n print(\"Predicting for image %s (%i x %i pixel)\" % (image_file,sx,sy))\n\n start = time.time()\n predictions_flex = sess.run(y_tensor, feed_dict={x_tensor:np.expand_dims(img_np, 0)})\n elapsed = time.time() - start\n elapsed_time_full_dataset.append(elapsed)\n del img_np #deleting afterwards to not take the deleting time into account\n\n print(\"Prediction took %f seconds (inference on full image)\" % elapsed)\n print(\"Merging predictions\")\n # merge the predictions on the quarter images\n predictions_flex_combined = np.zeros(predictions_flex.shape)\n\n elapsed = time.time()-start\n if embedded_version:\n print(\"Prediction took %f seconds (inference on split up image)\" % elapsed)\n\n if embedded_version:\n predictions_flex = predictions_flex_combined\n\n if save_annotations:\n print(\"Computing annotations...\")\n annotations = []\n d = 4\n for x in range(100, sx-101, d):\n for y in range(100, sy-101, d):\n x0 = int(round(float(x-100)/4) + 15)\n y0 = int(round(float(y-100)/4) + 15)\n probs_flex = np.squeeze(predictions_flex[0, y0, x0, :])\n annotations.append((probs_flex, x, y))\n\n if test: # add a prefix for test to not replace real experiments\n model_name = 'TEST_' + model_name\n\n # saving annotations\n annotation_dir = images_dir.replace('Data', 'Results/seeds/annotations_trt') + image_file\n annotate_and_save(annotations, d, annotation_dir, model_name, precision_mode)\n classes_image = annotate_and_save_per_class(\n annotations, \n d, \n annotation_dir, \n model_name, \n precision_mode\n )\n\n labels = load_labels(annotation_dir)\n confusion_matrix = np.zeros((num_classes, num_classes))\n for (c_name, x, y) in labels:\n if 100 <= x < sx-101 and 100 <= y < sy-101:\n x0 = int(round(float(x-100)/4) + 15 )\n y0 = int(round(float(y-100)/4) + 15)\n probs_flex = np.squeeze(predictions_flex[0, y0, x0, :])\n\n predicted_class = np.argmax(probs_flex)\n c = train_model.get_classes().index(c_name)\n confusion_matrix[c, predicted_class] += 1\n print(confusion_matrix)\n sum_of_confusion_matrices += confusion_matrix\n\n print(sum_of_confusion_matrices)\n sum_of_cm_fp = './Results/seeds/preds_trt/{}/{}_{}/sum_of_cm_'\\\n .format(precision_mode.lower(), f1,f2) + model_name + '_fp32.npy'\n elapsed_time_fp = './Results/seeds/elapsed_trt/{}/{}_{}/time_taken_'\\\n .format(precision_mode.lower(), f1,f2) + model_name + '_fp32.npy'\n\n\n np.save(sum_of_cm_fp, sum_of_confusion_matrices)\n np.save(elapsed_time_fp, elapsed_time_full_dataset)\n tf.reset_default_graph()", "def compile(self):\n for layer in self.layers:\n layer._Dense__load()", "def _main():\n\n # setup paths\n json_model_path = osp.join(FLAGS.input_dir, FLAGS.json_model_fname)\n weights_path = osp.join(FLAGS.input_dir, FLAGS.weights_fname)\n save_path = osp.splitext(json_model_path)[0][:-6] + \"graph_w\" + str(weights_path.split(\"_\")[-1][:-3]) + \".pb\"\n print(\"Loading Model: \" + json_model_path)\n print(\"Loading Weights: \" + weights_path)\n\n # Set keras to test phase\n k.set_learning_phase(0)\n\n # Load json and weights, then compile model\n with open(json_model_path, 'r') as json_file:\n loaded_model_json = json_file.read()\n model = model_from_json(loaded_model_json)\n model.load_weights(weights_path)\n model.compile(loss='mse', optimizer='sgd')\n\n # Freeze graph\n frozen_graph = freeze_session(k.get_session(), output_names=[out.op.name for out in model.outputs])\n\n # Write graph to protobuf file\n tf.train.write_graph(frozen_graph, \"model\", save_path, as_text=False)\n print(\"Written Graph to: \" + save_path)", "def with_cpu(ops, model):\n ...", "def test_stage_0():\n\tra_1 = readImage(TRAIN_RAW_IMAGE_1)\n\tre_1 = readImage(TRAIN_RESULT_IMAGE_1)\n\n\tra_2 = readImage(TRAIN_RAW_IMAGE_2)\n\tre_2 = readImage(TRAIN_RESULT_IMAGE_2)\n\n\t# Uncomment below if more examples are required.\n\t# ra_3 = readImage(TRAIN_RAW_IMAGE_3)\n\t# re_3 = readImage(TRAIN_RESULT_IMAGE_3)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_1),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_2 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_2),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_2)[0],\\\n\t# \t)\n\n\t# The prediction model is obtained and trained.\n\tengine = get_model((ra_1, ra_2,), (re_1, re_2,), model_type=SVM, percentage=0.1)\n\n\ttest_percentage = float(1) # how many tests\n\n\tra_1 = readImage(TEST_RAW_IMAGE_1)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TEST_RAW_IMAGE_1),\\\n\t# \t# k_means(TEST_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\tre_1 = readImage(TEST_RESULT_IMAGE_1)\n\n\t# ra_2 = readImage(TEST_RAW_IMAGE_2)\n\t# re_2 = readImage(TEST_RESULT_IMAGE_2)\n\n\tinput_vec = []\n\t# The features are extracted.\n\tinput_vec += buildFeatureArray_2(ra_1[0], ra_1[1], ra_1[2],\\\n\t\tRADIUS_ARRAY,\\\n\t\tadditional_feats=([] if len(ra_1) == 3 else ra_1[3:]))\n\n\tex_no = int(test_percentage * len(input_vec)) # actual number of the test sample\n\n\toutput_vec = []\n\toutput_vec += matrixToArray(re_1[0], lambda el: 1 if el == 255 else 0)\n\n\tprint('Will start predicting...')\n\n\tpredicted_vec = engine.predict(input_vec[:ex_no])\n\n\tcounter = float(0)\n\tfor y, p in zip(output_vec[:ex_no], predicted_vec[:ex_no]):\n\t\tif y == p: counter += 1\n\n\tprint('Accuracy: ' + str(counter/ex_no))\n\n\tpredicted_mat = arrayToMatrix( predicted_vec, len(re_1[0]), len(re_1[0][0]),\\\n\t\tlambda el: 255 if el == 1 else 0)\n\n\t# The predicted segmentation is saved.\n\tsave_rgb_img(\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t 'pred.bmp',\\\n\t)", "def main():\r\n # assert tf.__version__[0] == \"2\"\r\n\r\n \"\"\" Load Config \"\"\"\r\n with open('./config/config_origin.json', 'r') as f:\r\n CONFIG = json.load(f)\r\n BATCH_SIZE = CONFIG[\"BATCH_SIZE\"]\r\n ROOT_PATH = CONFIG[\"ROOT_PATH\"]\r\n TRAIN_DATA_DIR = CONFIG[\"TRAIN_DATA_DIR\"]\r\n TEST_DATA_DIR = CONFIG[\"TEST_DATA_DIR\"]\r\n TRAIN_DATA_DIR = os.path.join(ROOT_PATH, TRAIN_DATA_DIR)\r\n TEST_DATA_DIR = os.path.join(ROOT_PATH, TEST_DATA_DIR)\r\n MODEL_CKPT = CONFIG[\"MODEL_CKPT\"]\r\n\r\n \"\"\" Prepare Model \"\"\"\r\n n = 6 # order of ResNetv2\r\n version = 2\r\n depth = model_depth(n, version)\r\n MODEL_TYPE = 'ResNet%dv%d' % (depth, version)\r\n SAVES_DIR = \"models-%s/\" % MODEL_TYPE\r\n SAVES_DIR = os.path.join(ROOT_PATH, SAVES_DIR)\r\n MODEL_CKPT = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n\r\n # Features directory\r\n FEATURE_DIR = os.path.join(ROOT_PATH, \"features\")\r\n FEATURE_DIR = os.path.join(FEATURE_DIR, \"models-%s/\" % MODEL_TYPE)\r\n if not os.path.exists(FEATURE_DIR):\r\n os.mkdir(FEATURE_DIR)\r\n\r\n if not os.path.exists(SAVES_DIR):\r\n os.mkdir(SAVES_DIR)\r\n model = resnet_v2(input_shape=INPUT_SHAPE, depth=depth, num_classes=2)\r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=Adam(learning_rate=lr_schedule(TRAINING_EPOCHS)),\r\n metrics=METRICS)\r\n # model.summary()\r\n print(MODEL_TYPE)\r\n\r\n \"\"\" Load Weights \"\"\"\r\n model_ckpt_file = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n if os.path.exists(model_ckpt_file):\r\n print(\"Model ckpt found! Loading...:%s\" % model_ckpt_file)\r\n model.load_weights(model_ckpt_file)\r\n\r\n \"\"\" Extract Testing Data \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"bad_1\"))\r\n train_bad_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_bad_samples = train_bad_df.shape[0]\r\n train_bad_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"bad_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Extract good samples \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"good_0\"))\r\n train_good_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_good_samples = train_good_df.shape[0]\r\n train_good_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"good_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_bad_datagen = ImageDataGenerator(rescale=1./255)\r\n train_bad_generator = train_bad_datagen.flow_from_dataframe(\r\n train_bad_df,\r\n os.path.join(TRAIN_DATA_DIR, \"bad_1\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_good_datagen = ImageDataGenerator(rescale=1./255)\r\n train_good_generator = train_good_datagen.flow_from_dataframe(\r\n train_good_df,\r\n os.path.join(TRAIN_DATA_DIR, \"good_0\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Extractor \"\"\"\r\n extractor = Model(\r\n model.inputs, model.layers[-2].output) # flatten_2 (Flatten) (None, 12544)\r\n # features = extractor.predict(data)\r\n\r\n \"\"\" Extract train set 的特征 \"\"\"\r\n import time\r\n # bad samples\r\n start = time.perf_counter()\r\n print(\"Start extracting bad samples...\")\r\n features = extractor.predict_generator(\r\n train_bad_generator, steps=np.ceil(n_bad_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_bad.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)\r\n # TODO 用 pandas 存储\r\n # good samples\r\n start = time.perf_counter()\r\n print(\"Start extracting good samples...\")\r\n features = extractor.predict_generator(\r\n train_good_generator, steps=np.ceil(n_good_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_good.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)", "def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n if benchmark_spec.tpus:\n # For MLPerf 1.0, the benchmake code of different hardware are different.\n if (benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-32' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-128' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-256' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-512' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-1024' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-2048'):\n run_path = (\n '$HOME/training_results_{version}/Google/benchmarks/{model}/tpu-{tpus}'\n .format(\n version=VERSION.value,\n model=benchmark_spec.benchmark,\n tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))\n code_path = (\n '$HOME/training_results_{version}/Google/benchmarks/{model}/implementations/tpu-{tpus}-{model}'\n .format(\n version=VERSION.value,\n model=benchmark_spec.benchmark,\n tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))\n\n if MASK in benchmark_spec.benchmark:\n model = 'mask_rcnn'\n elif GNMT in benchmark_spec.benchmark:\n model = 'nmt'\n else:\n model = benchmark_spec.benchmark\n\n mlperf_benchmark_cmd = (\n 'cd {code_path} && '\n 'export PYTHONPATH=$(pwd):$(pwd)/{model} && '\n 'cd {model} && '\n '{run_path}/run_and_time.sh'.format(\n code_path=code_path,\n model=model,\n run_path=run_path))\n\n if SSD in benchmark_spec.benchmark:\n mlperf_benchmark_cmd = (\n 'export '\n 'MLP_GCS_RESNET_CHECKPOINT={checkpoint}'\n ' && {cmd}'.format(\n checkpoint=FLAGS.mlperf_gcs_resnet_checkpoint,\n cmd=mlperf_benchmark_cmd))\n else:\n raise ValueError(\n 'MLPerf configurations do not support the hardware in PKB. PKB may '\n 'need to be updated if this is a new TPU type.')\n\n else:\n run_sub_paths = {RESNET: 'resnet/implementations/mxnet',\n TRANSFORMER: 'transformer/implementations/pytorch',\n MINIGO: 'minigo/implementations/tensorflow',\n MASK: 'maskrcnn/implementations/pytorch',\n GNMT: 'gnmt/implementations/pytorch',\n SSD: 'ssd/implementations/pytorch',\n BERT: 'bert/implementations/pytorch',}\n benchmark_path = f'$HOME/training_results_{VERSION.value}/NVIDIA/benchmarks'\n run_path = posixpath.join(benchmark_path,\n run_sub_paths[benchmark_spec.benchmark])\n env = {\n 'DGXSYSTEM': DGXSYSTEM,\n 'NEXP': 1,\n 'PULL': 0,\n 'LOGDIR': f'/tmp/{benchmark_spec.benchmark}',\n }\n envs = {\n RESNET: {},\n TRANSFORMER: {'DATADIR': '/data/wmt/utf8'},\n MINIGO: {'CONT': 'mlperf-nvidia:minigo'},\n MASK: {},\n GNMT: {'DATADIR': '/data/gnmt'},\n SSD: {'DATADIR': '/data'},\n BERT: {}\n }\n env.update(envs[benchmark_spec.benchmark])\n\n run_script = posixpath.join(run_path, 'run_with_docker.sh')\n vm_util.ReplaceText(vm, 'SYSLOGGING=1', 'SYSLOGGING=0', run_script)\n vm_util.ReplaceText(vm, 'docker exec -it', 'docker exec -t', run_script)\n if benchmark_spec.benchmark == RESNET:\n vm_util.ReplaceText(vm, r'mpirun.*run_and_time\\.sh',\n r'.\\/run_and_time.sh', run_script)\n\n env = ' '.join(f'{key}={value}' for key, value in env.items())\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n env = f'{tensorflow.GetEnvironmentVars(vm)} {env}'\n\n mlperf_benchmark_cmd = (\n f'chmod 755 {run_script} && '\n f'cd {run_path} && '\n f'{env} {run_script}')\n\n samples = []\n metadata = _CreateMetadataDict(benchmark_spec)\n stdout, _ = vm.RobustRemoteCommand(mlperf_benchmark_cmd)\n if NONE in FLAGS.mlperf_profiler:\n samples.extend(\n MakeSamplesFromOutput(\n metadata,\n stdout,\n use_tpu=bool(benchmark_spec.tpus),\n model=benchmark_spec.benchmark))\n return samples", "def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n\n if benchmark_spec.tpus:\n mnist_benchmark_script = 'mnist_tpu.py'\n mnist_benchmark_cmd = ('cd tpu/models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--iterations={iterations} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size}'.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n iterations=benchmark_spec.iterations,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n else:\n mnist_benchmark_script = 'mnist.py'\n mnist_benchmark_cmd = ('cd models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size} '.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n mnist_benchmark_cmd = '{env} {cmd}'.format(\n env=tensorflow.GetEnvironmentVars(vm), cmd=mnist_benchmark_cmd)\n samples = []\n metadata = CreateMetadataDict(benchmark_spec)\n\n if benchmark_spec.train_steps > 0:\n if benchmark_spec.tpus:\n tpu = benchmark_spec.tpu_groups['train'].GetName()\n num_shards = '--num_shards={}'.format(\n benchmark_spec.tpu_groups['train'].GetNumShards())\n else:\n tpu = num_shards = ''\n\n if benchmark_spec.tpus:\n mnist_benchmark_train_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --train_steps={train_steps} '\n '{num_shards} --noenable_predict'.format(\n cmd=mnist_benchmark_cmd,\n tpu=tpu,\n use_tpu=bool(benchmark_spec.tpus),\n train_steps=benchmark_spec.train_steps,\n num_shards=num_shards))\n else:\n mnist_benchmark_train_cmd = (\n '{cmd} --train_epochs={train_epochs} '.format(\n cmd=mnist_benchmark_cmd,\n train_epochs=benchmark_spec.train_epochs))\n\n start = time.time()\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_train_cmd)\n elapsed_seconds = (time.time() - start)\n samples.extend(MakeSamplesFromTrainOutput(\n metadata, stdout + stderr, elapsed_seconds, benchmark_spec.train_steps))\n\n if benchmark_spec.eval_steps > 0:\n if benchmark_spec.tpus:\n mnist_benchmark_eval_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --eval_steps={eval_steps}'\n .format(\n cmd=mnist_benchmark_cmd,\n use_tpu=bool(benchmark_spec.tpus),\n tpu=benchmark_spec.tpu_groups['eval'].GetName(),\n eval_steps=benchmark_spec.eval_steps))\n else:\n mnist_benchmark_eval_cmd = ('{cmd} --eval_steps={eval_steps}'.format(\n cmd=mnist_benchmark_cmd, eval_steps=benchmark_spec.eval_steps))\n\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_eval_cmd)\n samples.extend(MakeSamplesFromEvalOutput(metadata, stdout + stderr,\n elapsed_seconds))\n return samples", "def keras_model_fn_cpu(model_config, vocab_size, embedding_size, embeddings):\n ## hyperparams\n model_name = model_config['model_name']\n num_class = model_config['num_class']\n lstm_hs = model_config['lstm_hs']\n gru_hs = model_config['gru_hs']\n learning_rate = model_config['learning_rate']\n \n with tf.device('/cpu:0'):\n ## build model\n inputs = ks.Input(shape=(None,), dtype='int32', name='inputs')\n embedded_sequences_ft1 = layers.Embedding(vocab_size, embedding_size, trainable = False, mask_zero = False)(inputs)\n embedded_sequences_ft2 = layers.Embedding(vocab_size, embedding_size, trainable = False, mask_zero = False)(inputs)\n concat_embed = layers.concatenate([embedded_sequences_ft1 ,embedded_sequences_ft2])\n concat_embed = layers.SpatialDropout1D(0.5)(concat_embed)\n x = layers.Bidirectional(layers.LSTM(lstm_hs,recurrent_activation = 'sigmoid', return_sequences = True))(concat_embed)\n x, x_h, x_c = layers.Bidirectional(layers.GRU(gru_hs, reset_after = True, recurrent_activation = 'sigmoid', return_sequences = True, return_state = True))(x)\n x_1 = layers.GlobalMaxPool1D()(x)\n x_2 = layers.GlobalAvgPool1D()(x)\n x_out = layers.concatenate([x_1 ,x_2, x_h])\n x_out = layers.BatchNormalization()(x_out)\n outputs = layers.Dense(num_class, activation = 'softmax', name = 'outputs')(x_out) # outputs\n model = ks.Model(inputs, outputs, name = model_name)\n\n ## compile\n model.compile(loss = 'categorical_crossentropy', \n optimizer=ks.optimizers.Adam(lr=learning_rate, clipnorm=.25, beta_1=0.7, beta_2=0.99), \n metrics=['categorical_accuracy', ks.metrics.TopKCategoricalAccuracy(k=3)])\n return model", "def main():\n feature_extraction_model = \"HOG\"\n # feature_extraction_models = [\"CM\", \"HOG\"]\n feature_extraction_model_1 = \"CM\"\n dimension_reduction_model = \"PCA\"\n k_value = 10\n dim_k_value = 40\n # K_value = 20\n # lab_folder = \"Dataset3/Labelled/Set1\"\n # unlab_folder = \"Dataset3/Unlabelled/Set 2\"\n lab_folder = get_input_folder(\"Labelled Folder\")\n unlab_folder = get_input_folder(\"Classify\")\n start = time.time()\n # ================================================================================================================\n # labelled Images\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value,\n folder_metadata=lab_folder,\n metadata_collection=\"labelled\")\n obj_feat_lab = dim_red.get_object_feature_matrix()\n features_list_lab = np.array(obj_feat_lab['featureVector'].tolist())\n images_list_lab = np.array(obj_feat_lab['imageId'])\n # filtering the labelled set\n dorsal_list, palmar_list = filter_images_by_label(images_list_lab)\n\n # unlabelled images\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value,\n folder_metadata=unlab_folder,\n metadata_collection=\"unlabelled\")\n obj_feat_unlab = dim_red.get_object_feature_matrix()\n features_list_unlab = np.array(obj_feat_unlab['featureVector'].tolist())\n images_list_unlab = np.array(obj_feat_unlab['imageId'])\n\n # ================================================================================================================\n # labelled Images\n dim_red = DimensionReduction(feature_extraction_model_1, dimension_reduction_model, dim_k_value,\n folder_metadata=lab_folder,\n metadata_collection=\"labelled\")\n obj_feat_lab_1 = dim_red.get_object_feature_matrix()\n features_list_lab_1 = np.array(obj_feat_lab_1['featureVector'].tolist())\n # images_list_lab = np.array(obj_feat_lab_1['imageId'])\n # filtering the labelled set\n\n\n # unlabelled images\n dim_red = DimensionReduction(feature_extraction_model_1, dimension_reduction_model, dim_k_value,\n folder_metadata=unlab_folder,\n metadata_collection=\"unlabelled\")\n obj_feat_unlab_1 = dim_red.get_object_feature_matrix()\n features_list_unlab_1 = np.array(obj_feat_unlab_1['featureVector'].tolist())\n # images_list_unlab = np.array(obj_feat_unlab['imageId'])\n features_list_lab = np.concatenate((features_list_lab, features_list_lab_1), axis=1)\n features_list_unlab = np.concatenate((features_list_unlab, features_list_unlab_1), axis=1)\n\n # ================================================================================================================\n\n dorsal_list, palmar_list = filter_images_by_label(images_list_lab)\n features_list = np.concatenate((features_list_lab, features_list_unlab))\n images_list = np.concatenate((images_list_lab, images_list_unlab))\n images_list = list(images_list)\n # Finding Similarity Matrix\n cos_sim = cosine_similarity(features_list)\n sim_graph = np.empty((0, len(cos_sim)))\n for row in cos_sim:\n k_largest = np.argsort(-np.array(row))[1:k_value + 1]\n sim_graph_row = [d if i in k_largest else 0 for i, d in enumerate(row)]\n sim_graph = np.append(sim_graph, np.array([sim_graph_row]), axis=0)\n\n row_sums = sim_graph.sum(axis=1)\n sim_graph = sim_graph / row_sums[:, np.newaxis]\n idx = 0\n results_dorsal = ppr(sim_graph, images_list, dorsal_list)\n results_palmar = ppr(sim_graph, images_list, palmar_list)\n final_results = {}\n\n for img in images_list_unlab:\n if results_dorsal[img] < results_palmar[img]:\n final_results[img] = \"dorsal\"\n else:\n final_results[img] = \"palmar\"\n\n actual_labels = fetch_actual_labels(images_list_unlab)\n print(\"Classification\")\n no_correct = 0\n correctly_classified = []\n incorrectly_classified = []\n print(\"| ImageId | Prediction | Actual |\")\n for r in final_results:\n print(\"| {} | {} | {} |\".format(r, final_results[r], actual_labels[r]))\n if final_results[r] == actual_labels[r]:\n correctly_classified.append(r)\n no_correct += 1\n else:\n incorrectly_classified.append(r)\n\n print(\"Correctly classified: {}\\n\".format(correctly_classified))\n print(\"InCorrectly classified: {}\\n\".format(incorrectly_classified))\n\n print(\"Classification Accuracy: {}%\".format(no_correct / len(images_list_unlab) * 100))\n print(\"Execution time: {} seconds\".format(time.time() - start))" ]
[ "0.6922674", "0.6126091", "0.59120166", "0.5763644", "0.56895125", "0.56394553", "0.5586993", "0.55789703", "0.55661064", "0.554108", "0.55030227", "0.54506975", "0.5448948", "0.54210556", "0.5392779", "0.5350082", "0.5346639", "0.5344349", "0.5337076", "0.5336877", "0.5334215", "0.5307431", "0.52947116", "0.5288093", "0.52727145", "0.5260122", "0.525958", "0.5257157", "0.52444845", "0.5244153" ]
0.7570074
0
Benchmark tensorflow module import.
def test_Tensorflow_import(benchmark): def Benchmark(): import tensorflow as tf a = tf.Variable(1) del a benchmark(Benchmark)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def try_tensorflow_import(verbose=False):\n import os\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\n import tensorflow as tf\n\n tf.compat.v1.disable_eager_execution()\n\n if verbose:\n tf.debugging.set_log_device_placement(True) # logs what device is being used\n gpus = tf.config.experimental.list_physical_devices(\"GPU\")\n if not gpus:\n return\n\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n if verbose:\n logical_gpus = tf.config.experimental.list_logical_devices(\"GPU\")\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")", "def test_Keras_import(benchmark):\n\n def Benchmark():\n from keras import models\n m = models.Sequential()\n del m\n\n benchmark(Benchmark)", "def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n\n if benchmark_spec.tpus:\n mnist_benchmark_script = 'mnist_tpu.py'\n mnist_benchmark_cmd = ('cd tpu/models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--iterations={iterations} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size}'.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n iterations=benchmark_spec.iterations,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n else:\n mnist_benchmark_script = 'mnist.py'\n mnist_benchmark_cmd = ('cd models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size} '.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n mnist_benchmark_cmd = '{env} {cmd}'.format(\n env=tensorflow.GetEnvironmentVars(vm), cmd=mnist_benchmark_cmd)\n samples = []\n metadata = CreateMetadataDict(benchmark_spec)\n\n if benchmark_spec.train_steps > 0:\n if benchmark_spec.tpus:\n tpu = benchmark_spec.tpu_groups['train'].GetName()\n num_shards = '--num_shards={}'.format(\n benchmark_spec.tpu_groups['train'].GetNumShards())\n else:\n tpu = num_shards = ''\n\n if benchmark_spec.tpus:\n mnist_benchmark_train_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --train_steps={train_steps} '\n '{num_shards} --noenable_predict'.format(\n cmd=mnist_benchmark_cmd,\n tpu=tpu,\n use_tpu=bool(benchmark_spec.tpus),\n train_steps=benchmark_spec.train_steps,\n num_shards=num_shards))\n else:\n mnist_benchmark_train_cmd = (\n '{cmd} --train_epochs={train_epochs} '.format(\n cmd=mnist_benchmark_cmd,\n train_epochs=benchmark_spec.train_epochs))\n\n start = time.time()\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_train_cmd)\n elapsed_seconds = (time.time() - start)\n samples.extend(MakeSamplesFromTrainOutput(\n metadata, stdout + stderr, elapsed_seconds, benchmark_spec.train_steps))\n\n if benchmark_spec.eval_steps > 0:\n if benchmark_spec.tpus:\n mnist_benchmark_eval_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --eval_steps={eval_steps}'\n .format(\n cmd=mnist_benchmark_cmd,\n use_tpu=bool(benchmark_spec.tpus),\n tpu=benchmark_spec.tpu_groups['eval'].GetName(),\n eval_steps=benchmark_spec.eval_steps))\n else:\n mnist_benchmark_eval_cmd = ('{cmd} --eval_steps={eval_steps}'.format(\n cmd=mnist_benchmark_cmd, eval_steps=benchmark_spec.eval_steps))\n\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_eval_cmd)\n samples.extend(MakeSamplesFromEvalOutput(metadata, stdout + stderr,\n elapsed_seconds))\n return samples", "def _try_import_strategy():\n import tensorflow as tf\n return tf.distribute.experimental.MultiWorkerMirroredStrategy", "def import_tf(device_id=-1, verbose=False):\n # set visible gpu, -1 is cpu\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if device_id < 0 else str(device_id)\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' if verbose else '3'\n import tensorflow as tf\n tf.logging.set_verbosity(tf.logging.DEBUG if verbose else tf.logging.ERROR)\n return tf", "def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n if benchmark_spec.tpus:\n # For MLPerf 1.0, the benchmake code of different hardware are different.\n if (benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-32' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-128' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-256' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-512' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-1024' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-2048'):\n run_path = (\n '$HOME/training_results_{version}/Google/benchmarks/{model}/tpu-{tpus}'\n .format(\n version=VERSION.value,\n model=benchmark_spec.benchmark,\n tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))\n code_path = (\n '$HOME/training_results_{version}/Google/benchmarks/{model}/implementations/tpu-{tpus}-{model}'\n .format(\n version=VERSION.value,\n model=benchmark_spec.benchmark,\n tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))\n\n if MASK in benchmark_spec.benchmark:\n model = 'mask_rcnn'\n elif GNMT in benchmark_spec.benchmark:\n model = 'nmt'\n else:\n model = benchmark_spec.benchmark\n\n mlperf_benchmark_cmd = (\n 'cd {code_path} && '\n 'export PYTHONPATH=$(pwd):$(pwd)/{model} && '\n 'cd {model} && '\n '{run_path}/run_and_time.sh'.format(\n code_path=code_path,\n model=model,\n run_path=run_path))\n\n if SSD in benchmark_spec.benchmark:\n mlperf_benchmark_cmd = (\n 'export '\n 'MLP_GCS_RESNET_CHECKPOINT={checkpoint}'\n ' && {cmd}'.format(\n checkpoint=FLAGS.mlperf_gcs_resnet_checkpoint,\n cmd=mlperf_benchmark_cmd))\n else:\n raise ValueError(\n 'MLPerf configurations do not support the hardware in PKB. PKB may '\n 'need to be updated if this is a new TPU type.')\n\n else:\n run_sub_paths = {RESNET: 'resnet/implementations/mxnet',\n TRANSFORMER: 'transformer/implementations/pytorch',\n MINIGO: 'minigo/implementations/tensorflow',\n MASK: 'maskrcnn/implementations/pytorch',\n GNMT: 'gnmt/implementations/pytorch',\n SSD: 'ssd/implementations/pytorch',\n BERT: 'bert/implementations/pytorch',}\n benchmark_path = f'$HOME/training_results_{VERSION.value}/NVIDIA/benchmarks'\n run_path = posixpath.join(benchmark_path,\n run_sub_paths[benchmark_spec.benchmark])\n env = {\n 'DGXSYSTEM': DGXSYSTEM,\n 'NEXP': 1,\n 'PULL': 0,\n 'LOGDIR': f'/tmp/{benchmark_spec.benchmark}',\n }\n envs = {\n RESNET: {},\n TRANSFORMER: {'DATADIR': '/data/wmt/utf8'},\n MINIGO: {'CONT': 'mlperf-nvidia:minigo'},\n MASK: {},\n GNMT: {'DATADIR': '/data/gnmt'},\n SSD: {'DATADIR': '/data'},\n BERT: {}\n }\n env.update(envs[benchmark_spec.benchmark])\n\n run_script = posixpath.join(run_path, 'run_with_docker.sh')\n vm_util.ReplaceText(vm, 'SYSLOGGING=1', 'SYSLOGGING=0', run_script)\n vm_util.ReplaceText(vm, 'docker exec -it', 'docker exec -t', run_script)\n if benchmark_spec.benchmark == RESNET:\n vm_util.ReplaceText(vm, r'mpirun.*run_and_time\\.sh',\n r'.\\/run_and_time.sh', run_script)\n\n env = ' '.join(f'{key}={value}' for key, value in env.items())\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n env = f'{tensorflow.GetEnvironmentVars(vm)} {env}'\n\n mlperf_benchmark_cmd = (\n f'chmod 755 {run_script} && '\n f'cd {run_path} && '\n f'{env} {run_script}')\n\n samples = []\n metadata = _CreateMetadataDict(benchmark_spec)\n stdout, _ = vm.RobustRemoteCommand(mlperf_benchmark_cmd)\n if NONE in FLAGS.mlperf_profiler:\n samples.extend(\n MakeSamplesFromOutput(\n metadata,\n stdout,\n use_tpu=bool(benchmark_spec.tpus),\n model=benchmark_spec.benchmark))\n return samples", "def test_compute_glycemic_load(self):\n pass", "def run(config_file):\n config = load_config(config_file)\n config_global = config['global']\n\n # setup a logger\n logger = logging.getLogger('experiment')\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler_stdout = logging.StreamHandler(sys.stdout)\n handler_stdout.setLevel(config['logger']['level'])\n handler_stdout.setFormatter(formatter)\n logger.addHandler(handler_stdout)\n\n if 'path' in config['logger']:\n handler_file = logging.FileHandler(config['logger']['path'])\n handler_file.setLevel(config['logger']['level'])\n handler_file.setFormatter(formatter)\n logger.addHandler(handler_file)\n\n logger.setLevel(config['logger']['level'])\n\n # Allow the gpu to be used in parallel\n sess_config = tf.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n if 'max_threads' in config_global:\n sess_config.intra_op_parallelism_threads = config_global['max_threads']\n\n # we allow to set the random seed in the config file for reproducibility. However, when running on GPU, results\n # will still be nondeterministic (due to nondeterministic behavior of tensorflow)\n if 'random_seed' in config_global:\n seed = config_global['random_seed']\n logger.info('Using fixed random seed'.format(seed))\n np.random.seed(seed)\n tf.set_random_seed(seed)\n\n with tf.Session(config=sess_config) as sess:\n # We are now fetching all relevant modules. It is strictly required that these module contain a variable named\n # 'component' that points to a class which inherits from experiment.Data, experiment.Experiment,\n # experiment.Trainer or experiment.Evaluator\n data_module = config['data-module']\n model_module = config['model-module']\n training_module = config['training-module']\n evaluation_module = config.get('evaluation-module', None)\n\n # The modules are now dynamically loaded\n DataClass = importlib.import_module(data_module).component\n ModelClass = importlib.import_module(model_module).component\n TrainingClass = importlib.import_module(training_module).component\n EvaluationClass = importlib.import_module(evaluation_module).component if evaluation_module else None\n\n # We then wire together all the modules and start training\n data = DataClass(config['data'], config_global, logger)\n model = ModelClass(config['model'], config_global, logger)\n training = TrainingClass(config['training'], config_global, logger)\n\n # setup the data (validate, create generators, load data, or else)\n logger.info('Setting up the data')\n data.setup()\n # build the model (e.g. compile it)\n logger.info('Building the model')\n model.build(data, sess)\n # start the training process\n logger.info('Starting the training process')\n training.start(model, data, sess)\n\n # perform evaluation, if required\n if EvaluationClass:\n logger.info('Evaluating')\n evaluation = EvaluationClass(config['evaluation'], config_global, logger)\n evaluation.start(model, data, sess)\n else:\n logger.info('No evaluation')\n\n logger.info('DONE')", "def setUp(self):\n self._logger = mock_lib.MockBenchmarkLogger()\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n tf.train.create_global_step()\n self.train_op = tf.assign_add(tf.train.get_global_step(), 1)\n self.global_step = tf.train.get_global_step()", "def import_ops(self):\n if self.is_training:\n self.lr = tf.get_collection_ref(\"lr\")[0]\n self.new_lr = tf.get_collection_ref(\"new_lr\")[0]\n self.lr_update = tf.get_collection_ref(\"lr_update\")[0]\n\n self.cost = tf.get_collection_ref(util.with_prefix(self.name, \"cost\"))[0]\n self.initial_state = util.import_state_tuples(\n self.initial_state, self.initial_state_name, self.name)\n self.final_state = util.import_state_tuples(\n self.final_state, self.final_state_name, self.name)", "def _benchmark_cnn(self):\n self.single_session = False\n (image_producer_ops, enqueue_ops, fetches) = self._build_model()\n fetches_list = nest.flatten(list(fetches.values()))\n main_fetch_group = tf.group(*fetches_list)\n execution_barrier = None\n \n\n global_step = tf.train.get_global_step()\n with tf.device(self.global_step_device):\n with tf.control_dependencies([main_fetch_group]):\n fetches['inc_global_step'] = global_step.assign_add(1)\n\n\n local_var_init_op = tf.local_variables_initializer()\n variable_mgr_init_ops = [local_var_init_op]\n with tf.control_dependencies([local_var_init_op]):\n variable_mgr_init_ops.extend(self.variable_mgr.get_post_init_ops())\n local_var_init_op_group = tf.group(*variable_mgr_init_ops)\n\n summary_op = tf.summary.merge_all()\n is_chief = (not self.job_name or self.task_index == 0)\n summary_writer = None\n \n # We run the summaries in the same thread as the training operations by\n # passing in None for summary_op to avoid a summary_thread being started.\n # Running summaries and training operations in parallel could run out of\n # GPU memory.\n saver = tf.train.Saver(\n self.variable_mgr.savable_variables(), save_relative_paths=True)\n ready_for_local_init_op = None\n \n sv = tf.train.Supervisor(\n is_chief=is_chief,\n logdir=self.params.train_dir,\n ready_for_local_init_op=ready_for_local_init_op,\n local_init_op=local_var_init_op_group,\n saver=saver,\n global_step=global_step,\n summary_op=None,\n save_model_secs=self.params.save_model_secs,\n summary_writer=summary_writer)\n\n step_train_times = []\n start_standard_services = (\n self.params.summary_verbosity >= 1 or\n self.dataset.queue_runner_required())\n target = self.cluster_manager.get_target() if self.cluster_manager else ''\n with sv.managed_session(\n master=target,\n config=create_config_proto(self.params),\n start_standard_services=start_standard_services) as sess:\n image_producer = cnn_util.ImageProducer(sess, image_producer_ops,\n self.batch_group_size)\n image_producer.start()\n for i in xrange(len(enqueue_ops)):\n sess.run(enqueue_ops[:(i + 1)])\n image_producer.notify_image_consumption()\n self.init_global_step, = sess.run([global_step])\n if not self.single_session:\n global_step_watcher = GlobalStepWatcher(\n sess, global_step,\n self.num_workers * self.num_warmup_batches +\n self.init_global_step,\n self.num_workers * (self.num_warmup_batches + self.num_batches) - 1)\n global_step_watcher.start()\n \n\n log_fn('Running warm up')\n local_step = -1 * self.num_warmup_batches\n done_fn = global_step_watcher.done\n loop_start_time = time.time()\n while not done_fn():\n if local_step == 0:\n log_fn('Done warm up')\n \n header_str = 'Step\\tImg/sec\\tloss'\n \n log_fn(header_str)\n \n # reset times to ignore warm up batch\n step_train_times = []\n loop_start_time = time.time()\n \n fetch_summary = None\n summary_str = benchmark_one_step(\n sess, fetches, local_step,\n self.batch_size * (self.num_workers if self.single_session else 1),\n step_train_times, self.trace_filename, image_producer, self.params,\n fetch_summary)\n \n local_step += 1\n loop_end_time = time.time()\n # Waits for the global step to be done, regardless of done_fn.\n \n num_steps = global_step_watcher.num_steps()\n elapsed_time = global_step_watcher.elapsed_time()\n\n average_wall_time = elapsed_time / num_steps if num_steps > 0 else 0\n images_per_sec = ((self.num_workers * self.batch_size) / average_wall_time\n if average_wall_time > 0 else 0)\n\n log_fn('-' * 64)\n log_fn('total images/sec: %.2f' % images_per_sec)\n log_fn('-' * 64)\n image_producer.done()\n #if is_chief:\n # store_benchmarks({'total_images_per_sec': images_per_sec}, self.params)\n # Save the model checkpoint.\n \n sv.stop()\n return {\n 'num_workers': self.num_workers,\n 'num_steps': num_steps,\n 'average_wall_time': average_wall_time,\n 'images_per_sec': images_per_sec\n }", "def train():\n import trace\n trace.train()", "def import_tf(gpu_list):\n import tensorflow as tf\n os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(gpu_list)\n\n return tf", "def main():\r\n # assert tf.__version__[0] == \"2\"\r\n\r\n \"\"\" Load Config \"\"\"\r\n with open('./config/config_origin.json', 'r') as f:\r\n CONFIG = json.load(f)\r\n BATCH_SIZE = CONFIG[\"BATCH_SIZE\"]\r\n ROOT_PATH = CONFIG[\"ROOT_PATH\"]\r\n TRAIN_DATA_DIR = CONFIG[\"TRAIN_DATA_DIR\"]\r\n TEST_DATA_DIR = CONFIG[\"TEST_DATA_DIR\"]\r\n TRAIN_DATA_DIR = os.path.join(ROOT_PATH, TRAIN_DATA_DIR)\r\n TEST_DATA_DIR = os.path.join(ROOT_PATH, TEST_DATA_DIR)\r\n MODEL_CKPT = CONFIG[\"MODEL_CKPT\"]\r\n\r\n \"\"\" Prepare Model \"\"\"\r\n n = 6 # order of ResNetv2\r\n version = 2\r\n depth = model_depth(n, version)\r\n MODEL_TYPE = 'ResNet%dv%d' % (depth, version)\r\n SAVES_DIR = \"models-%s/\" % MODEL_TYPE\r\n SAVES_DIR = os.path.join(ROOT_PATH, SAVES_DIR)\r\n MODEL_CKPT = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n\r\n # Features directory\r\n FEATURE_DIR = os.path.join(ROOT_PATH, \"features\")\r\n FEATURE_DIR = os.path.join(FEATURE_DIR, \"models-%s/\" % MODEL_TYPE)\r\n if not os.path.exists(FEATURE_DIR):\r\n os.mkdir(FEATURE_DIR)\r\n\r\n if not os.path.exists(SAVES_DIR):\r\n os.mkdir(SAVES_DIR)\r\n model = resnet_v2(input_shape=INPUT_SHAPE, depth=depth, num_classes=2)\r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=Adam(learning_rate=lr_schedule(TRAINING_EPOCHS)),\r\n metrics=METRICS)\r\n # model.summary()\r\n print(MODEL_TYPE)\r\n\r\n \"\"\" Load Weights \"\"\"\r\n model_ckpt_file = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n if os.path.exists(model_ckpt_file):\r\n print(\"Model ckpt found! Loading...:%s\" % model_ckpt_file)\r\n model.load_weights(model_ckpt_file)\r\n\r\n \"\"\" Extract Testing Data \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"bad_1\"))\r\n train_bad_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_bad_samples = train_bad_df.shape[0]\r\n train_bad_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"bad_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Extract good samples \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"good_0\"))\r\n train_good_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_good_samples = train_good_df.shape[0]\r\n train_good_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"good_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_bad_datagen = ImageDataGenerator(rescale=1./255)\r\n train_bad_generator = train_bad_datagen.flow_from_dataframe(\r\n train_bad_df,\r\n os.path.join(TRAIN_DATA_DIR, \"bad_1\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_good_datagen = ImageDataGenerator(rescale=1./255)\r\n train_good_generator = train_good_datagen.flow_from_dataframe(\r\n train_good_df,\r\n os.path.join(TRAIN_DATA_DIR, \"good_0\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Extractor \"\"\"\r\n extractor = Model(\r\n model.inputs, model.layers[-2].output) # flatten_2 (Flatten) (None, 12544)\r\n # features = extractor.predict(data)\r\n\r\n \"\"\" Extract train set 的特征 \"\"\"\r\n import time\r\n # bad samples\r\n start = time.perf_counter()\r\n print(\"Start extracting bad samples...\")\r\n features = extractor.predict_generator(\r\n train_bad_generator, steps=np.ceil(n_bad_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_bad.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)\r\n # TODO 用 pandas 存储\r\n # good samples\r\n start = time.perf_counter()\r\n print(\"Start extracting good samples...\")\r\n features = extractor.predict_generator(\r\n train_good_generator, steps=np.ceil(n_good_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_good.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)", "def test_Numpy_import(benchmark):\n\n def Benchmark():\n import numpy as np\n a = np.ndarray(1)\n del a\n\n benchmark(Benchmark)", "def test_import():\n assert tfio is not None", "def run_performance( pkg_mod_iter ):\n for package, module_iter in pkg_mod_iter:\n print( package )\n print( \"=\"*len(package ) )\n print()\n for filename, modname in module_iter:\n print( filename, modname )\n try:\n module= __import__( package+\".\"+modname, fromlist=(modname,\"performance\") )\n module.performance()\n except AttributeError:\n pass # no performance() function in the module.", "def test_make_benchmark_single_ll():\n benchmark = llvm.make_benchmark(INVALID_IR_PATH)\n assert str(benchmark.uri).startswith(\"benchmark://user-v0/\")\n assert benchmark.uri.scheme == \"benchmark\"\n assert benchmark.uri.dataset == \"user-v0\"", "def main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n counts = defaultdict(lambda: 0)\n total_sentences = 0\n for filename in sys.stdin:\n filename = filename.strip()\n reader = tf.python_io.tf_record_iterator(filename)\n n_sentences = 0\n for record in reader:\n x = tf.train.Example()\n x.ParseFromString(record)\n tokens = [int(i) for i in x.features.feature[FLAGS.field].int64_list.value]\n counts[len(tokens)] += 1\n n_sentences += 1\n tf.logging.info(\"Read %d sentences from %s.\", n_sentences, filename)\n total_sentences += n_sentences\n\n tf.logging.info(\"Statistics for %s:\", FLAGS.field)\n sorted_counts = [(l, f) for l, f in counts.iteritems()]\n sorted_counts.sort()\n acc = 0\n for l, f in sorted_counts:\n acc += f\n tf.logging.info(\"<=%d: %d/%d (%.3f%%)\", l, acc, total_sentences, 100.0 * acc / total_sentences)", "def test_basic_tf(self):\n def _map_fun(args, ctx):\n import tensorflow as tf\n x = tf.constant(args['x'])\n y = tf.constant(args['y'])\n sum = tf.add(x, y)\n with tf.Session() as sess:\n result = sess.run([sum])\n assert result[0] == 3\n\n args = {'x': 1, 'y': 2}\n cluster = TFCluster.run(self.sc, _map_fun, tf_args=args, num_executors=self.num_workers, num_ps=0)\n cluster.shutdown()", "def run_training(argv=None):\n # parse args\n args = parse_arguments(sys.argv if argv is None else argv)\n logging.info('getting the ML model...')\n model = getattr(models, args.model)(nr_predictors=24, nr_classes=2)\n\n # get the data\n logging.info('getting the data...')\n temp_folder = 'data'\n if not os.path.exists(temp_folder):\n os.mkdir(temp_folder)\n file_path = os.path.join(temp_folder, 'data.csv')\n storage_helper.download_blob(args.bucket, args.blob_path, file_path)\n time_series = pd.read_csv(file_path)\n training_test_data = preprocess.train_test_split(time_series, 0.8)\n\n\n # define training objective\n logging.info('defining the training objective...')\n sess = tf.Session()\n feature_data = tf.placeholder(\"float\", [None, 24])\n actual_classes = tf.placeholder(\"float\", [None, 2])\n\n model = model.build_model(feature_data)\n cost = -tf.reduce_sum(actual_classes * tf.log(model))\n train_opt = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost)\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # train model\n correct_prediction = tf.equal(tf.argmax(model, 1), tf.argmax(actual_classes, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\n logging.info('training the model...')\n time_dct = {}\n time_dct['start'] = time.time()\n for i in range(1, args.epochs):\n sess.run(\n train_opt,\n feed_dict={\n feature_data: training_test_data['training_predictors_tf'].values,\n actual_classes: training_test_data['training_classes_tf'].values.reshape(\n len(training_test_data['training_classes_tf'].values), 2)\n }\n )\n if i % 5000 == 0:\n print(i, sess.run(\n accuracy,\n feed_dict={\n feature_data: training_test_data['training_predictors_tf'].values,\n actual_classes: training_test_data['training_classes_tf'].values.reshape(\n len(training_test_data['training_classes_tf'].values), 2)\n }\n ))\n time_dct['end'] = time.time()\n logging.info('training took {0:.2f} sec'.format(time_dct['end'] - time_dct['start']))\n\n # print results of confusion matrix\n logging.info('validating model on test set...')\n feed_dict = {\n feature_data: training_test_data['test_predictors_tf'].values,\n actual_classes: training_test_data['test_classes_tf'].values.reshape(\n len(training_test_data['test_classes_tf'].values), 2)\n }\n metrics.tf_confusion_matrix(model, actual_classes, sess, feed_dict)\n\n # create signature for TensorFlow Serving\n logging.info('Exporting model for tensorflow-serving...')\n\n export_path = os.path.join(\"model\", args.version)\n tf.saved_model.simple_save(\n sess,\n export_path,\n inputs={'predictors': feature_data},\n outputs={'prediction': tf.argmax(model, 1),\n 'model-version': tf.constant([str(args.version)])}\n )\n\n # save model on GCS\n logging.info(\"uploading to \" + args.bucket + \"/\" + export_path)\n storage_helper.upload_to_storage(args.bucket, export_path)\n\n # remove local files\n shutil.rmtree(export_path)\n shutil.rmtree(temp_folder)", "def main(_):\n train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset()\n train_data = train_data[0:int(100000/2)] # for fast testing\n print('len(train_data) {}'.format(len(train_data))) # 929589\n print('len(valid_data) {}'.format(len(valid_data))) # 73760\n print('len(test_data) {}'.format(len(test_data))) # 82430\n print('vocab_size {}'.format(vocab_size)) # 10000\n\n config = SmallConfig()\n eval_config = SmallConfig()\n eval_config.batch_size = 1\n eval_config.num_steps = 1\n\n\n # train_data = [i for i in range(1000)]\n # for batch in ptb_iterator(train_data, batch_size=10, num_steps=20):\n # x, y = batch\n # print(len(x), len(x[0]), len(y), len(y[0]))\n # print('x:\\n', x)\n # print('y:\\n',y)\n # exit()\n\n\n sess = tf.InteractiveSession()\n initializer = tf.random_uniform_initializer(-config.init_scale,\n config.init_scale)\n with tf.variable_scope(\"model\", reuse=None, initializer=initializer):\n m = PTBModel(is_training=True, config=config)\n with tf.variable_scope(\"model\", reuse=True, initializer=initializer):\n mvalid = PTBModel(is_training=False, config=config)\n mtest = PTBModel(is_training=False, config=eval_config)\n\n sess.run(tf.initialize_all_variables())\n\n for i in range(config.max_max_epoch):\n lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)\n m.assign_lr(sess, config.learning_rate * lr_decay)\n start_time = time.time()\n print(\"Epoch: %d Learning rate: %.3f\" % (i + 1, sess.run(m.lr)))\n train_perplexity = run_epoch(sess, m, train_data, m.train_op, verbose=True)\n print(\" Train Perplexity: %.3f took %fs\" % (train_perplexity, time.time() - start_time))\n valid_perplexity = run_epoch(sess, mvalid, valid_data, tf.no_op())\n print(\" Valid Perplexity: %.3f\" % (valid_perplexity))\n\n start_time = time.time()\n test_perplexity = run_epoch(sess, mtest, test_data, tf.no_op())\n print(\"Test Perplexity: %.3f took %fs\" % (test_perplexity, time.time() - start_time))", "def speed():\r\n\r\n algo = ['logistic_sgd', 'logistic_cg', 'mlp', 'convolutional_mlp',\r\n 'dA', 'SdA', 'DBN', 'rbm', 'rnnrbm']\r\n to_exec = [True] * len(algo)\r\n# to_exec = [False] * len(algo)\r\n# to_exec[-1] = True\r\n do_float64 = True\r\n do_float32 = True\r\n do_gpu = True\r\n\r\n algo_executed = [s for idx, s in enumerate(algo) if to_exec[idx]]\r\n #Timming expected are from the buildbot that have an i7-920 @\r\n # 2.67GHz with hyperthread enabled for the cpu, 12G of ram. An GeForce GTX\r\n # 285 for the GPU. OS=Fedora 14, gcc=4.5.1, python/BLAS from EPD\r\n # 7.1-2 (python 2.7.2, mkl unknow). BLAS with only 1 thread.\r\n\r\n expected_times_64 = numpy.asarray([10.0, 22.5, 76.1, 73.7, 116.4,\r\n 346.9, 381.9, 558.1, 186.3])\r\n expected_times_32 = numpy.asarray([11.6, 29.6, 42.5, 66.5, 71,\r\n 191.2, 226.8, 432.8, 176.2])\r\n\r\n # Number with just 1 decimal are new value that are faster with\r\n # the Theano version 0.5rc2 Other number are older. They are not\r\n # updated, as we where faster in the past!\r\n # TODO: find why and fix this!\r\n\r\n# Here is the value for the buildbot on February 3th 2012.\r\n# sgd, cg mlp conv da\r\n# sda dbn rbm\r\n# gpu times[3.72957802, 9.94316864, 29.1772666, 9.13857198, 25.91144657,\r\n# 18.30802011, 53.38651466, 285.41386175]\r\n# expected [3.076634879, 7.555234910, 18.99226785, 9.58915591, 24.130070450,\r\n# 24.77524018, 92.66246653, 322.340329170]\r\n# sgd, cg mlp conv da\r\n# sda dbn rbm\r\n#expected/get [0.82492841, 0.75984178, 0.65092691, 1.04930573, 0.93125138\r\n# 1.35324519 1.7356905 1.12937868]\r\n expected_times_gpu = numpy.asarray([3.07663488, 7.55523491, 18.99226785,\r\n 9.6, 24.13007045,\r\n 20.4, 56, 302.6, 315.4])\r\n expected_times_64 = [s for idx, s in enumerate(expected_times_64)\r\n if to_exec[idx]]\r\n expected_times_32 = [s for idx, s in enumerate(expected_times_32)\r\n if to_exec[idx]]\r\n expected_times_gpu = [s for idx, s in enumerate(expected_times_gpu)\r\n if to_exec[idx]]\r\n\r\n def time_test(m, l, idx, f, **kwargs):\r\n if not to_exec[idx]:\r\n return\r\n print algo[idx]\r\n ts = m.call_time\r\n try:\r\n f(**kwargs)\r\n except Exception, e:\r\n print >> sys.stderr, 'test', algo[idx], 'FAILED', e\r\n l.append(numpy.nan)\r\n return\r\n te = m.call_time\r\n l.append(te - ts)\r\n\r\n def do_tests():\r\n m = theano.compile.mode.get_default_mode()\r\n l = []\r\n time_test(m, l, 0, logistic_sgd.sgd_optimization_mnist, n_epochs=30)\r\n time_test(m, l, 1, logistic_cg.cg_optimization_mnist, n_epochs=30)\r\n time_test(m, l, 2, mlp.test_mlp, n_epochs=5)\r\n time_test(m, l, 3, convolutional_mlp.evaluate_lenet5, n_epochs=5,\r\n nkerns=[5, 5])\r\n time_test(m, l, 4, dA.test_dA, training_epochs=2,\r\n output_folder='tmp_dA_plots')\r\n time_test(m, l, 5, SdA.test_SdA, pretraining_epochs=1,\r\n training_epochs=2, batch_size=300)\r\n time_test(m, l, 6, DBN.test_DBN, pretraining_epochs=1,\r\n training_epochs=2, batch_size=300)\r\n time_test(m, l, 7, rbm.test_rbm, training_epochs=1, batch_size=300,\r\n n_chains=1, n_samples=1, output_folder='tmp_rbm_plots')\r\n time_test(m, l, 8, rnnrbm.test_rnnrbm, num_epochs=1)\r\n return numpy.asarray(l)\r\n\r\n #test in float64 in FAST_RUN mode on the cpu\r\n import theano\r\n if do_float64:\r\n theano.config.floatX = 'float64'\r\n theano.config.mode = 'FAST_RUN'\r\n float64_times = do_tests()\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'float64 times', float64_times\r\n print >> sys.stderr, 'float64 expected', expected_times_64\r\n print >> sys.stderr, 'float64 % expected/get', (\r\n expected_times_64 / float64_times)\r\n\r\n #test in float32 in FAST_RUN mode on the cpu\r\n theano.config.floatX = 'float32'\r\n if do_float32:\r\n float32_times = do_tests()\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'float32 times', float32_times\r\n print >> sys.stderr, 'float32 expected', expected_times_32\r\n print >> sys.stderr, 'float32 % expected/get', (\r\n expected_times_32 / float32_times)\r\n\r\n if do_float64:\r\n print >> sys.stderr, 'float64/float32', (\r\n float64_times / float32_times)\r\n print >> sys.stderr\r\n print >> sys.stderr, 'Duplicate the timing to have everything in one place'\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'float64 times', float64_times\r\n print >> sys.stderr, 'float64 expected', expected_times_64\r\n print >> sys.stderr, 'float64 % expected/get', (\r\n expected_times_64 / float64_times)\r\n print >> sys.stderr, 'float32 times', float32_times\r\n print >> sys.stderr, 'float32 expected', expected_times_32\r\n print >> sys.stderr, 'float32 % expected/get', (\r\n expected_times_32 / float32_times)\r\n\r\n print >> sys.stderr, 'float64/float32', (\r\n float64_times / float32_times)\r\n print >> sys.stderr, 'expected float64/float32', (\r\n expected_times_64 / float32_times)\r\n\r\n #test in float32 in FAST_RUN mode on the gpu\r\n import theano.sandbox.cuda\r\n if do_gpu:\r\n theano.sandbox.cuda.use('gpu')\r\n gpu_times = do_tests()\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'gpu times', gpu_times\r\n print >> sys.stderr, 'gpu expected', expected_times_gpu\r\n print >> sys.stderr, 'gpu % expected/get', (\r\n expected_times_gpu / gpu_times)\r\n\r\n if do_float64:\r\n print >> sys.stderr, 'float64/gpu', float64_times / gpu_times\r\n\r\n if (do_float64 + do_float32 + do_gpu) > 1:\r\n print >> sys.stderr\r\n print >> sys.stderr, 'Duplicate the timing to have everything in one place'\r\n print >> sys.stderr, algo_executed\r\n if do_float64:\r\n print >> sys.stderr, 'float64 times', float64_times\r\n print >> sys.stderr, 'float64 expected', expected_times_64\r\n print >> sys.stderr, 'float64 % expected/get', (\r\n expected_times_64 / float64_times)\r\n if do_float32:\r\n print >> sys.stderr, 'float32 times', float32_times\r\n print >> sys.stderr, 'float32 expected', expected_times_32\r\n print >> sys.stderr, 'float32 % expected/get', (\r\n expected_times_32 / float32_times)\r\n if do_gpu:\r\n print >> sys.stderr, 'gpu times', gpu_times\r\n print >> sys.stderr, 'gpu expected', expected_times_gpu\r\n print >> sys.stderr, 'gpu % expected/get', (\r\n expected_times_gpu / gpu_times)\r\n\r\n if do_float64 and do_float32:\r\n print >> sys.stderr, 'float64/float32', (\r\n float64_times / float32_times)\r\n print >> sys.stderr, 'expected float64/float32', (\r\n expected_times_64 / float32_times)\r\n if do_float64 and do_gpu:\r\n print >> sys.stderr, 'float64/gpu', float64_times / gpu_times\r\n print >> sys.stderr, 'expected float64/gpu', (\r\n expected_times_64 / gpu_times)\r\n if do_float32 and do_gpu:\r\n print >> sys.stderr, 'float32/gpu', float32_times / gpu_times\r\n print >> sys.stderr, 'expected float32/gpu', (\r\n expected_times_32 / gpu_times)\r\n\r\n def compare(x, y):\r\n ratio = x / y\r\n # If there is more then 5% difference between the expected\r\n # time and the real time, we consider this an error.\r\n return sum((ratio < 0.95) + (ratio > 1.05))\r\n\r\n if do_float64:\r\n err = compare(expected_times_64, float64_times)\r\n print >> sys.stderr, 'speed_failure_float64=' + str(err)\r\n if do_float32:\r\n err = compare(expected_times_32, float32_times)\r\n print >> sys.stderr, 'speed_failure_float32=' + str(err)\r\n if do_gpu:\r\n err = compare(expected_times_gpu, gpu_times)\r\n print >> sys.stderr, 'speed_failure_gpu=' + str(err)\r\n\r\n assert not numpy.isnan(gpu_times).any()", "def init_tf(FLAGS):\n gpus = tf.config.experimental.list_physical_devices('GPU')\n\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n visible_gpus = []\n if gpus and not FLAGS.cpu:\n visible_gpus = gpus[hvd.local_rank()]\n tf.config.experimental.set_visible_devices(visible_gpus, 'GPU')\n\n if FLAGS.amp:\n policy = tf.keras.mixed_precision.Policy(\"mixed_float16\")\n tf.keras.mixed_precision.set_global_policy(policy)\n\n tf.config.run_functions_eagerly(FLAGS.run_eagerly)\n\n if FLAGS.tfdata_debug:\n tf.data.experimental.enable_debug_mode()\n\n if FLAGS.inter_op_parallelism:\n tf.config.threading.set_inter_op_parallelism_threads(FLAGS.inter_op_parallelism)\n\n if FLAGS.intra_op_parallelism:\n tf.config.threading.set_intra_op_parallelism_threads(FLAGS.intra_op_parallelism)\n\n tf.random.set_seed(hash((FLAGS.seed, hvd.rank())))", "def with_cpu(ops, model):\n ...", "def test_imports():\n import sys\n import src\n assert 'sklearn.feature_extraction' not in sys.modules.keys()", "def tf_bind_10(local_dir, cpus, gpus, num_parallel, num_samples):\n\n # Final Version\n\n from design_baselines.mins import mins\n ray.init(num_cpus=cpus,\n num_gpus=gpus,\n include_dashboard=False,\n _temp_dir=os.path.expanduser('~/tmp'))\n tune.run(mins, config={\n \"logging_dir\": \"data\",\n \"task\": \"TFBind10-Exact-v0\",\n \"task_kwargs\": {\"relabel\": False, \"dataset_kwargs\": {\"max_samples\": 10000}},\n \"val_size\": 200,\n \"offline\": True,\n \"normalize_ys\": True,\n \"normalize_xs\": False,\n \"base_temp\": 0.1,\n \"keep\": 0.99,\n \"start_temp\": 5.0,\n \"final_temp\": 1.0,\n \"method\": \"wasserstein\",\n \"use_conv\": False,\n \"gan_batch_size\": 128,\n \"hidden_size\": 1024,\n \"num_layers\": 1,\n \"bootstraps\": 1,\n \"initial_max_std\": 0.2,\n \"initial_min_std\": 0.1,\n \"oracle_lr\": 0.001,\n \"oracle_batch_size\": 128,\n \"oracle_epochs\": 100,\n \"latent_size\": 32,\n \"critic_frequency\": 10,\n \"flip_frac\": 0.,\n \"fake_pair_frac\": 0.0,\n \"penalty_weight\": 10.,\n \"generator_lr\": 2e-4,\n \"generator_beta_1\": 0.0,\n \"generator_beta_2\": 0.9,\n \"discriminator_lr\": 2e-4,\n \"discriminator_beta_1\": 0.0,\n \"discriminator_beta_2\": 0.9,\n \"initial_epochs\": 100,\n \"epochs_per_iteration\": 0,\n \"iterations\": 0,\n \"exploration_samples\": 0,\n \"exploration_rate\": 0.,\n \"thompson_samples\": 0,\n \"solver_samples\": 128, \"do_evaluation\": True},\n num_samples=num_samples,\n local_dir=local_dir,\n resources_per_trial={'cpu': cpus // num_parallel,\n 'gpu': gpus / num_parallel - 0.01})", "def test_all_tf_execution_regimes(test_case):\n if BACKEND == 'backend_tensorflow':\n return test_util.test_all_tf_execution_regimes(test_case)\n else:\n return test_case", "def performance_test():\n from timeit import Timer\n t = Timer(\"test()\", \"from __main__ import test\")\n print t.timeit(number=1)", "def run_experiments() :\n #%%\n target_size=(32,32)\n g_specs = {\n \"batch_size\" : [ 30 , 60, 100 ],\n \"learning_rate\" : [ 0.0002, 0.0003, 0.0005 ],\n \"drop_out_rate\" : [ 0.2, 0.25, 0.3 ],\n \"rescale_mode\" : [ \"max_q\" , \"max\", \"\" ]\n }\n\n model_traits = MODEL_TRAITS[\"model2\"].copy()\n tt_obj = model_traits[\"trainer_tester_class\"]( model_traits )\n del model_traits[\"trainer_tester_class\"]\n\n cnt = 0\n for batchs, lrate, do_rate, resc_mode in product( g_specs[\"batch_size\"],\n g_specs[\"learning_rate\"],\n g_specs[\"drop_out_rate\"],\n g_specs[\"rescale_mode\"] ) :\n\n tt_obj.model_traits.update( {\"batch_size\" : batchs,\n \"learning_rate\" : lrate,\n \"rescale_mode\" : resc_mode,\n \"drop_out_rate\" : do_rate } )\n\n train_4d, train_gt = tu.make_4d_arrays( images_dir=\"images/train\",\n target_size=target_size )\n\n test_4d, test_gt = tu.make_4d_arrays( images_dir=\"images/test\",\n target_size=target_size )\n\n data = {\"train_4d\" : train_4d,\n \"test_4d\" : test_4d,\n \"train_y\" : train_gt,\n \"test_y\" : test_gt}\n\n valid_accu_log, train_accu_log = tt_obj.train( model_traits, data,\n logl=100 )\n idx_v = int(np.argmax( valid_accu_log))\n idx_t = int(np.argmax( train_accu_log))\n\n model_traits.update({\"valid_accu_log\" : valid_accu_log,\n \"train_accu_log\" : train_accu_log,\n \"best_valid\" : max(valid_accu_log),\n \"best_valid_at\" : idx_v,\n \"train_at_best_valid\" : train_accu_log[idx_v],\n \"best_train\" : max(train_accu_log),\n \"best_train_at\": idx_t })\n\n #print(cnt, pformat(model_traits) )\n print( \"%d : best_train = %.4f, best_valid = %.4f\" % \\\n (cnt, max(train_accu_log), max(valid_accu_log) ))\n\n with open( \"exp_results_%d.json\" % cnt,\n \"wt\" , encoding=\"utf8\" ) as f_out :\n print( json.dumps( model_traits ), file=f_out)\n\n\n cnt += 1\n #%%" ]
[ "0.63114333", "0.6208205", "0.605488", "0.5974686", "0.5898292", "0.5859008", "0.58150566", "0.57757914", "0.57524824", "0.5717537", "0.5694916", "0.5694258", "0.5665611", "0.5643591", "0.5643398", "0.5629247", "0.56084234", "0.5588382", "0.5516901", "0.551364", "0.54921466", "0.54900944", "0.548223", "0.5476932", "0.5473687", "0.54565185", "0.5437372", "0.54368824", "0.54357255", "0.54357207" ]
0.76280653
0
rotatePivot(Vector rotation) This function will take a vector rotation and rotate the curently selected objects pivot to match without affecting their physical display Make sure that you rotate around the cursor and have the entire mesh selected or this will not work. This operates on an xyz euler.
def rotatePivot(rotation): # Rotate in object mode X bpy.ops.object.mode_set(mode='OBJECT') bpy.ops.transform.rotate(value=rotation.x, axis=(1,0,0), constraint_orientation='GLOBAL') # rotate in edit mode X bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.select_all(action='SELECT') bpy.ops.transform.rotate(value=-rotation.x, axis=(1,0,0), constraint_orientation='GLOBAL') # Rotate in object mode Y bpy.ops.object.mode_set(mode='OBJECT') bpy.ops.transform.rotate(value=rotation.y, axis=(0,1,0), constraint_orientation='GLOBAL') # rotate in edit mode Y bpy.ops.object.mode_set(mode='EDIT') bpy.ops.transform.rotate(value=-rotation.y, axis=(0,1,0), constraint_orientation='GLOBAL') # Rotate in object mode Z bpy.ops.object.mode_set(mode='OBJECT') bpy.ops.transform.rotate(value=rotation.z, axis=(0,0,1), constraint_orientation='GLOBAL') # rotate in edit mode Z bpy.ops.object.mode_set(mode='EDIT') bpy.ops.transform.rotate(value=-rotation.z, axis=(0,0,1), constraint_orientation='GLOBAL') # return to object mode bpy.ops.object.mode_set(mode='OBJECT')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_rotation_pivot(self, rotation_pivot: Pivot):\n pass", "def Pivot(rotation, axis, angle):\n # Check for an invalid coordinate axis.\n if axis not in [0, 1, 2]:\n raise Error('Invalid axis {}. Must be [0, 1, 2].'.format(axis))\n\n radians = math.radians(angle)\n c = math.cos(radians)\n s = math.sin(radians)\n\n # We need to maintain the \"right-hand\" rule, no matter which\n # axis was selected. That means we pick (i, j, k) axis order\n # such that the following vector cross product is satisfied:\n # i x j = k\n i = (axis + 1) % 3\n j = (axis + 2) % 3\n k = axis\n\n rot = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n\n rot[i][i] = c*rotation.rot[i][i] - s*rotation.rot[i][j]\n rot[i][j] = s*rotation.rot[i][i] + c*rotation.rot[i][j]\n rot[i][k] = rotation.rot[i][k]\n\n rot[j][i] = c*rotation.rot[j][i] - s*rotation.rot[j][j]\n rot[j][j] = s*rotation.rot[j][i] + c*rotation.rot[j][j]\n rot[j][k] = rotation.rot[j][k]\n\n rot[k][i] = c*rotation.rot[k][i] - s*rotation.rot[k][j]\n rot[k][j] = s*rotation.rot[k][i] + c*rotation.rot[k][j]\n rot[k][k] = rotation.rot[k][k]\n\n return RotationMatrix(rot)", "def rotate(self, point, rotation):\n\n self.rotation = self.rotation + rotation\n\n # get the point before the rotation\n ptBeforeScale = self.mapToScene(point)\n\n # rotate the view\n QGraphicsView.translate(self, point.x(), point.y())\n QGraphicsView.rotate(self, rotation)\n QGraphicsView.translate(self, -point.x(), -point.y())\n\n # counter rotate the selection point\n t = QTransform()\n t.rotate(-rotation)\n ptAfterScale = t.map(ptBeforeScale)\n\n # calculate the offset and update\n offset = ptBeforeScale - ptAfterScale\n newCenter = self.centerPoint - offset\n self.setCenterPoint(newCenter)", "def RotateVector(rotation, vector):\n return Vector(\n rotation.rot[0][0]*vector.x + rotation.rot[1][0]*vector.y + rotation.rot[2][0]*vector.z,\n rotation.rot[0][1]*vector.x + rotation.rot[1][1]*vector.y + rotation.rot[2][1]*vector.z,\n rotation.rot[0][2]*vector.x + rotation.rot[1][2]*vector.y + rotation.rot[2][2]*vector.z,\n vector.t\n )", "def rotate_about(self, rotation, point):\n\t\tif not isinstance(point,Vector3d):\n\t\t\tpoint = Vector3d(*point)\n\t\treturn (self - point).rotate(rotation) + point", "def rotate(surface, angle, pivot, offset):\n rotated_image = pg.transform.rotozoom(surface, -angle, 1) # Rotate the image.\n rotated_offset = offset.rotate(angle) # Rotate the offset vector.\n # Add the offset vector to the center/pivot point to shift the rect.\n rect = rotated_image.get_rect(center=pivot+rotated_offset)\n return rotated_image, rect # Return the rotated image and shifted rect.", "def rotate(self, vect, angle):\n self.pl.Rotation = Rotation(vect, angle)\n\n self.comp.Placement = self.pl\n self.box.Placement = self.pl", "def rotate(initial_vector, rotated_vector, other_vectors):\n\n init_vec_norm = normalize(initial_vector)\n rot_vec_norm = normalize(np.asarray(rotated_vector))\n middle_vec_norm = normalize(init_vec_norm + rot_vec_norm)\n first_reflector = init_vec_norm - middle_vec_norm\n second_reflector = middle_vec_norm - rot_vec_norm\n Q1 = householder(first_reflector)\n Q2 = householder(second_reflector)\n reflection_matrix = np.matmul(Q2, Q1)\n rotated_vectors = np.matmul(other_vectors, np.transpose(reflection_matrix))\n return rotated_vectors", "def _rotate_offset(shape, angle, pivot):\n if angle == 0:\n return\n cx = shape.Left + shape.Width / 2\n cy = shape.Top + shape.Height / 2\n\n theta = -angle / 180 * np.pi # Sign of angle.\n\n # Rotation matrix.\n rotmat = np.array(\n [[np.cos(theta), -np.sin(theta)], [+np.sin(theta), np.cos(theta)]]\n )\n px, py = pivot\n # Pivot's position after Rotation.\n tx, ty = rotmat @ np.array([px - cx, py - cy]) + np.array([cx, cy])\n # Pivot is equal in before and after.\n shape.Left += px - tx\n shape.Top += py - ty\n shape.Rotation = -angle # Sign of angle.\n\n \"\"\" # A candidate of code.\n However, it is a little diffuclt.\n rotmat = np.array([[1 - np.cos(theta), + np.sin(theta) ],\n [- np.sin(theta), 1 - np.cos(theta)]])\n px, py = pivot\n tx, ty = rotmat @ np.array([px - cx, py - cy])\n shape.Left += tx\n shape.Top += ty\n shape.Rotation = - angle # Definition of Rotation.\n \"\"\"", "def rotate(self, rotation):\n self.coords = dot(rotation, self.coords)\n return self", "def rotate(self, vector):\n if isinstance(vector, Quaternion):\n return self._rotate_quaternion(vector)\n q = Quaternion(vector=vector)\n a = self._rotate_quaternion(q).vector\n if isinstance(vector, list):\n l = [x for x in a]\n return l\n elif isinstance(vector, tuple):\n l = [x for x in a]\n return tuple(l)\n else:\n return a", "def rotate(vector, angle):\n return np.cos(angle) * vector[0] + np.sin(angle) * vector[1], \\\n -np.sin(angle) * vector[0] + np.cos(angle) * vector[1]", "def rotate(self, rotation):\n\t\tif not isinstance(rotation,Rotation):\n\t\t\trotation = Rotation(*rotation)\n\t\treturn rotation.matrix() * self", "def zx_rotation(vector,theta):\r\n R = np.array([[np.cos(theta),0,np.sin(theta)],\r\n [0,1,0],\r\n [-np.sin(theta),0,np.cos(theta)]\r\n ])\r\n return np.dot(R,vector)", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def rotate(self,angle):\n origin = copy.deepcopy(self._current)\n\n q = [origin.orientation.x,\n origin.orientation.y,\n origin.orientation.z,\n origin.orientation.w] # quaternion nonsense\n\n (roll, pitch, yaw) = euler_from_quaternion(q)\n\n atTarget=False\n\n currentAngle=yaw\n angle=angle+currentAngle\n\n if(angle==currentAngle):\n w=0\n elif(angle>currentAngle):\n w=1\n elif(angle<currentAngle):\n w=-1\n\n move_msg=Twist()\n move_msg.linear.x=0\n move_msg.angular.z=w\n\n\n stop_msg =Twist()\n stop_msg.linear.x=0\n stop_msg.angular.z=0\n\n while(not atTarget and not rospy.is_shutdown()):\n if(currentAngle>=angle):\n atTarget=True\n self._vel_pub.publish(stop_msg)\n print('rotate: stoped')\n else:\n origin = copy.deepcopy(self._current)\n\n q = [origin.orientation.x,\n origin.orientation.y,\n origin.orientation.z,\n origin.orientation.w] # quaternion nonsense\n\n (roll, pitch, yaw) = euler_from_quaternion(q)\n\n currentAngle=yaw\n self._vel_pub.publish(move_msg)\n rospy.sleep(.15)\n print('rotate: moving')\n print('angle: '+str(angle)+'currentAngle: '+str(currentAngle))", "def apply_rotation_only(self, vector):\n return np.dot(self.rotation_matrix, vector)", "def make_rotation(self, rotation):\n if rotation == \"r\":\n self.facing += 1\n else:\n self.facing -= 1\n\n if self.facing > 3:\n self.facing = self.facing - 4\n elif self.facing < 0:\n self.facing = self.facing + 4", "def vecRot(data, seq, euler_angles, **kwargs):\n from scipy.spatial.transform import Rotation as R\n r = R.from_euler(seq, euler_angles, **kwargs)\n return r.apply(data)", "def xy_rotation(vector,theta):\r\n R = np.array([[np.cos(theta), -np.sin(theta),0],\r\n [np.sin(theta), np.cos(theta),0],\r\n [0,0,1]\r\n ])\r\n return np.dot(R,vector)", "def rotation_pivot_to_center(self):\n pass", "def rotate(self,angle):\n radians = (angle * math.pi)/180\n self.direction += angle\n for object in self.objects:\n y = object.position[0]\n x = object.position[1]\n\n object.position[0] = x * math.sin(radians) + y * math.cos(radians)\n object.position[1] = x * math.cos(radians) - y * math.sin(radians)", "def test_rotate_vec_x(self):\n\n quat = Quat.from_axis_angle_deg(Vec3(1, 0, 0), 90.)\n vec = Vec3(1, 1, 1)\n\n rotated_vec = quat.rotate_vec(vec)\n\n # 90 deg around x moves y from positive to negative\n self.assertAlmostEqual(1.0, rotated_vec.x)\n self.assertAlmostEqual(-1.0, rotated_vec.y)\n self.assertAlmostEqual(1.0, rotated_vec.z)", "def qrotate(points, axis, theta):\n q = Quaternion.rotator(axis, theta)\n return q.rotate(points)", "def test_rotate_vec(self):\n\n quat = Quat.from_axis_angle_deg(Vec3(-1, -1, -1), 180.)\n vec = Vec3(1, 0, 0)\n\n rotated_vec = quat.rotate_vec(vec)\n\n self.assertAlmostEqual(-1/3.0, rotated_vec.x)\n self.assertAlmostEqual(2/3.0, rotated_vec.y)\n self.assertAlmostEqual(2/3.0, rotated_vec.z)", "def convert_translation_rotation_to_pose(translation, rotation):\n return Pose(position=Point(x=translation[0],y=translation[1],z=translation[2]), orientation=Quaternion(x=rotation[0],y=rotation[1],z=rotation[2],w=rotation[3]))", "def polyEditUVShell(*args, angle: Union[float, bool]=0.0, pivotU: Union[float, bool]=0.0,\n pivotV: Union[float, bool]=0.0, relative: bool=True, rotateRatio:\n Union[float, bool]=1.0, rotation: bool=True, scale: bool=True, scaleU:\n Union[float, bool]=0.0, scaleV: Union[float, bool]=0.0, uValue: Union[float,\n bool]=0.0, uvSetName: Union[AnyStr, bool]=\"\", vValue: Union[float,\n bool]=0.0, q=True, query=True, **kwargs)->Union[bool, Any]:\n pass", "def _rotate(polyreg, i=None, j=None, u=None, v=None, theta=None, R=None):\n # determine the rotation matrix based on inputs\n if R is not None:\n logger.debug(\"rotate: R=\\n{}\".format(R))\n if i is not None:\n raise ValueError(i)\n if j is not None:\n raise ValueError(j)\n if theta is not None:\n raise ValueError(theta)\n if u is not None:\n raise ValueError(u)\n if v is not None:\n raise ValueError(v)\n elif i is not None and j is not None and theta is not None:\n logger.info(\"rotate via indices and angle.\")\n if R is not None:\n raise ValueError(R)\n if u is not None:\n raise ValueError(u)\n if v is not None:\n raise ValueError(v)\n if i == j:\n raise ValueError(\"Must provide two unique basis vectors.\")\n R = givens_rotation_matrix(i, j, theta, polyreg.dim)\n elif u is not None and v is not None:\n logger.info(\"rotate via 2 vectors.\")\n if R is not None:\n raise ValueError(R)\n if i is not None:\n raise ValueError(i)\n if j is not None:\n raise ValueError(j)\n if theta is not None:\n raise ValueError(theta)\n R = solve_rotation_ap(u, v)\n else:\n raise ValueError(\"R or (i and j and theta) or (u and v) \"\n \"must be defined.\")\n if isinstance(polyreg, Polytope):\n # Ensure that half space is normalized before rotation\n n, p = _hessian_normal(polyreg.A, polyreg.b)\n # Rotate the hyperplane normals\n polyreg.A = np.inner(n, R)\n polyreg.b = p\n else:\n # Rotate subregions\n for poly in polyreg.list_poly:\n _rotate(poly, None, None, R=R)\n # transform bbox and cheby\n if polyreg.bbox is not None:\n polyreg.bbox = (np.inner(polyreg.bbox[0].T, R).T,\n np.inner(polyreg.bbox[1].T, R).T)\n if polyreg._chebXc is not None:\n polyreg._chebXc = np.inner(polyreg._chebXc, R)\n return R", "def rotate_vector(vector, angle, anchor=(0, 0)):\n x = vector.x - anchor[0]\n y = vector.y - anchor[1]\n\n cos_theta = cos(angle)\n sin_theta = sin(angle)\n\n nx = x*cos_theta - y*sin_theta\n ny = x*sin_theta + y*cos_theta\n\n nx = nx + anchor[0]\n ny = ny + anchor[1]\n\n return Vector2D(nx, ny)", "def Rotation(v, theta):\n\n v = np.array(v)\n if v.shape != (3,) or abs(v.dot(v) - 1.0) > 1e-8 or not np.all(np.isreal(v)):\n raise ValueError('Rotation vector v should be a 3D real unit vector.')\n\n return np.cos(theta/2) * Identity() - 1j * np.sin(theta/2) * (\n v[0] * PauliX() + v[1] * PauliY() + v[2] * PauliZ())" ]
[ "0.74697024", "0.6728124", "0.6568968", "0.6513034", "0.63447696", "0.6299606", "0.6126983", "0.5978732", "0.59771883", "0.5927698", "0.59080005", "0.58852196", "0.5876758", "0.58229953", "0.58206", "0.5766183", "0.5760986", "0.57403606", "0.57333255", "0.5731726", "0.57096374", "0.5697425", "0.56862557", "0.5679549", "0.56504464", "0.5648226", "0.56392294", "0.5626105", "0.5615558", "0.5610142" ]
0.84249556
0
world2local(Vector a,Vector b, float depth) This function will create a vector to translate between points a and b based on a depth.
def world2local(a,b, depth): # calculate angles based on 3 dimensional trigonometry thetaA = atan( (a.z-b.z)/( sqrt( pow(a.x-b.x,2) + pow(a.y-b.y,2) ) ) ) thetaB = atan( (a.y-b.y)/(a.x-b.x) ) # calculate the vector distVec = Vector([cos(thetaB) * cos(thetaA) * depth, sin(thetaB) * cos(thetaA) * depth, sin(thetaA) * depth]) # return the vector return distVec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def world2local(*, current_location: np.ndarray, current_rotation: np.ndarray,\n world_locations: np.ndarray) -> np.ndarray:\n # Prepares interfaces.\n assert current_location.shape == (3,)\n assert current_rotation.shape == (3,)\n assert len(world_locations.shape) < 3\n world_locations = np.atleast_2d(world_locations)\n\n # Builds the rotation matrix.\n R = rot2mat(current_rotation)\n # Transforms world coordinates to local coordinates.\n local_locations = np.dot(a=R, b=(world_locations - current_location).T).T\n\n return np.squeeze(local_locations)", "def local2world(*, current_location: np.ndarray, current_rotation: np.ndarray,\n local_locations: np.ndarray) -> np.ndarray:\n # Prepares interfaces.\n assert current_location.shape == (3,)\n assert current_rotation.shape == (3,)\n assert len(local_locations.shape) < 3\n local_locations = np.atleast_2d(local_locations)\n\n # Builds the inverse rotation matrix.\n R_inv = np.linalg.inv(rot2mat(current_rotation))\n # Transforms local coordinates to world coordinates.\n global_locations = np.dot(a=R_inv, b=local_locations.T).T + current_location\n\n return global_locations", "def body_2_world(orientation, vector):\n\n vector = np.append(vector, 0)\n orientation_inv = quaternion_inverse(orientation)\n new_vector = quaternion_multiply(orientation, quaternion_multiply(vector, orientation_inv))\n return new_vector[:3]", "def localToGlobal(local_pos, local_rot):\n worldCoordToLocal = positionVectorToTranslationMatrix(local_pos)\n worldCoordToLocal = np.linalg.inv(worldCoordToLocal) \n localCoordToWorld = np.linalg.inv(worldCoordToLocal)\n\n worldRotToLocal = eulerAnglesToRotationMatrix(local_rot)\n rot = np.linalg.inv(worldRotToLocal)\n\n localRotToWorld = np.array([\n [\n [rot[0][0], rot[0][1], rot[0][2], 0],\n [rot[1][0], rot[1][1], rot[1][2], 0],\n [rot[2][0], rot[2][1], rot[2][2], 0],\n [0, 0, 0, 1]\n ],\n ])\n\n return localCoordToWorld.dot(localRotToWorld)\n # return localRotToWorld.dot(localCoordToWorld)", "def vector_two_nodes(a, b, normalize=False):\n vector = a - b\n if not normalize:\n return vector\n return normalize_vector_numpy(vector)", "def camera_2_world(self, o, d):\r\n wo = self.camera2world_point @ ti.Vector([o.x, o.y, o.z, 1.0])\r\n wd = self.camera2world_vec @ d\r\n return ti.Vector([wo.x,wo.y,wo.z]), wd", "def translate(self, vector):\n if self.blender_object:\n self.blender_object.location = vector", "def to_world(self, uv):\n return self._projective_transform(self.A, uv)", "def gen_world2local(normal):\n last_dim_i = normal.ndim - 1\n\n z = np.array((0, 0, 1), dtype=float)\n\n # Tangents\n t = np.cross(normal, z)\n if (t == 0).all(axis=-1).any():\n raise ValueError((\n \"Found (0, 0, 0) tangents! Possible reasons: normal colinear with \"\n \"(0, 0, 1); normal is (0, 0, 0)\"))\n t = normalize_vec(t, axis=last_dim_i)\n\n # Binormals\n # No need to normalize because normals and tangents are orthonormal\n b = np.cross(normal, t)\n\n # Rotation matrices\n rot = np.stack((t, b, normal), axis=last_dim_i)\n # So that at each location, we have a 3x3 matrix whose ROWS, from top to\n # bottom, are world tangents, binormals, and normals\n\n return rot", "def normal_to_world(self, local_normal: Vector) -> Vector:\n # This will convert to one group space up if there is a parent group.\n normal = self.transform.inverse().transpose() * local_normal\n normal.w = 0\n normal = normal.normalize()\n\n if self.parent:\n normal = self.parent.normal_to_world(normal)\n return normal", "def global2local(self, gobal_coord, start, end, strand):\n return global2local(gobal_coord, start, end, strand)", "def project_vectors_ab(a, b):\n # print('dot = ', np.dot(a,b))\n # print('norm = ', np.linalg.norm(b))\n return np.dot(a, b) / np.linalg.norm(b)", "def local2global(local_coord, start, end, strand):\n\n # swap if strands disagree\n if strand == 1:\n return local_coord + start\n else:\n return end - local_coord", "def vector_between_points(a, b):\n vector_1 = Vector(*a)\n vector_2 = Vector(*b)\n return vector_1 - vector_2", "def local2global(self, local_coord, start, end, strand):\n local2global(local_coord, start, end, strand)", "def project_point_along_2Dvector(): \n \n # 2d vector \n a = vec2( 1 , 1 )\n b = vec2( -1 , -1 )\n com = vec2() \n\n #fb = pixel_op() \n #fb.create_buffer(800, 800)\n #fb.graticule(pixels_per_unit)\n\n vecs = [a,b]\n pts = [com.project_pt(a, b, 2)]\n\n bloody_simple_2drender('2d_render.png', vecs=vecs, pts=pts, gridsize=40)", "def dir_vector(p1: Vec2, p2: Vec2) -> Vec2:\n return Vec2(p2.x - p1.x, p2.y - p1.y)", "def global2local(gobal_coord, start, end, strand):\n\n # swap if strands disagree\n if strand == 1:\n return gobal_coord - start\n else:\n return end - gobal_coord", "def translate(self, vector):\n \n matrix = wf.translationMatrix(*vector)\n for wireframe in self.wireframes.values():\n wireframe.transform(matrix)", "def rot_world_space_to_local_space(m_obj, parent_m_obj):\n obj_world_mat = get_world_matrix(m_obj, 0)\n parent_inv_mat = get_world_inv_matrix(parent_m_obj, 0)\n\n local_space_mat = obj_world_mat * parent_inv_mat\n trans_matrix = oMa.MTransformationMatrix(local_space_mat)\n rot = trans_matrix.rotation()\n\n return rot", "def tanp_to_world(self, x, y):\n ra, dec = x, y\n return ra, dec", "def vector(location_1, location_2):\n x = location_2.x - location_1.x\n y = location_2.y - location_1.y\n z = location_2.z - location_1.z\n norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps\n\n return [x / norm, y / norm, z / norm]", "def vector(location_1, location_2):\n x = location_2.x - location_1.x\n y = location_2.y - location_1.y\n z = location_2.z - location_1.z\n norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps\n\n return [x / norm, y / norm, z / norm]", "def l2g(local_x, local_y, hero):\n l_hero_x = (visible_squares[0]-1)/2 #8\n l_hero_y = (visible_squares[1]-1)/2 #8\n \n global_x = local_x + hero.x - l_hero_x\n global_y = local_y + hero.y - l_hero_y \n\n #global_x = hero.x + x\n #global_y = hero.y + y\n \n return global_x, global_y", "def traj_conv_vec_vec(traj1, traj2):\r\n return Trajectory(conv_vec_vec_fast(traj1.modes, traj2.modes))", "def fromBarycentric(self, b: Vec3) -> Vec3:\n return self.a.position * b.x + self.b.position * b.y + self.c.position * b.z", "def to_world(self, x, y, **kwargs):", "def g2l(global_x, global_y, hero):\n \n l_hero_x = (visible_squares[0]-1)/2 #8\n l_hero_y = (visible_squares[1]-1)/2 #8\n \n \n local_x = global_x - hero.x + l_hero_x\n local_y = global_y - hero.y + l_hero_y\n \n return local_x, local_y", "def depth_to_xyz(self,u,v,depth_val):\n '''\n u - x image coordinate\n v - y image coodrinate\n depth_val - depth value at that (u,v) from depth_image\n '''\n\n fx=self.cam_intrin[0]\n fy=self.cam_intrin[4]\n cx=self.cam_intrin[2]\n cy=self.cam_intrin[5]\n\n z = float(depth_val)\n x = float((u - cx)/fx)*z\n y = float((v - cy)/fy)*z\n\n result = [x, y, z]\n return result", "def compute_relative_vector(camera: bpy.types.Camera, target: bpy.types.Object):\n cam_loc, cam_rot, _ = camera.matrix_world.decompose()\n target_loc, _, _ = target.matrix_world.decompose()\n relative_vector = target_loc - cam_loc\n\n rotation = cam_rot.to_matrix().transposed()\n relative_vector.rotate(rotation)\n\n # The camera's worldvector is norm to the horizon, we want a vector\n # down the barrel.\n camera_correction = Quaternion( ( sqrt(2.) / 2. , sqrt(2.) / 2. , 0. , 0.) )\n relative_vector.rotate(camera_correction)\n\n return relative_vector" ]
[ "0.5998272", "0.57170975", "0.56982654", "0.55855894", "0.5581537", "0.5494604", "0.5411607", "0.5398915", "0.53970957", "0.53813344", "0.5324072", "0.5313619", "0.5304791", "0.5235172", "0.5225136", "0.51992714", "0.5191363", "0.5187533", "0.51196164", "0.5077635", "0.5073738", "0.50285846", "0.50285846", "0.5010816", "0.5010674", "0.50083953", "0.49981266", "0.49756947", "0.49688423", "0.4940618" ]
0.8632064
0
run the ReHeat class
def run(self): print "\n\n\tPlease Note: Templates are generated based off" print "\t of the OS environment variables that are set." print "\t* Running ReHeat." self.set_creds() self.gen_ip() # used in template description self.gen_tenant_id() if self.reheat_error: return self.reheat_errmsg print "\t* You have opted to generate %s file[s]" % self.template_type if 'all' in self.template_type: self.gen_heat_data() self.gen_heat_template() self.gen_compute_data() return self.gen_compute_template() elif 'heat' in self.template_type: self.gen_heat_data() return self.gen_heat_template() elif 'compute' in self.template_type: self.gen_compute_data() return self.gen_compute_template() else: raise Exception("User provided an improper template type.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=False, check_bkgfile=True, check_bkgfile_components=False, check_parfile=True, check_zlambdafile=True)\n\n # Compute the border size\n\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n # Do the run\n self.config.start_file_logging()\n\n self.config.logger.info(\"Running runcat on pixel %d\" % (self.pixel))\n\n runcat = RunCatalog(self.config)\n if not os.path.isfile(runcat.filename):\n runcat.run(do_percolation_masking=self.config.runcat_percolation_masking)\n runcat.output(savemembers=True, withversion=True)\n\n self.config.stop_file_logging()", "def Run():\r\n pass", "def run(self):\n self.__power_on()\n\n self.__main()", "def run(self):\n self.run()", "def RUN(self):", "def run(self):\n \n # shortcut for self\n s = self\n \n # shortcut to existing heating fuel\n fuel = s.exist_fuel\n\n # holds summary measures for the heat pump project (e.g. seasonal COP,\n # internal rate of return). Fill out first item: secondary fuel info.\n s.summary = {'fuel_unit': fuel.unit, 'fuel_desc': fuel.desc}\n \n # Create the home energy simulation object\n sim = HomeHeatModel(\n city_id=s.city_id,\n hp_model_id=s.hp_model_id,\n exist_heat_fuel_id=s.exist_heat_fuel_id,\n exist_heat_effic=s.exist_heat_effic,\n exist_kwh_per_mmbtu=s.exist_kwh_per_mmbtu, \n co2_lbs_per_kwh=s.co2_lbs_per_kwh,\n low_temp_cutoff=s.low_temp_cutoff,\n off_months=s.off_months_chks,\n garage_stall_count=s.garage_stall_count,\n garage_heated_by_hp=s.garage_heated_by_hp,\n bldg_floor_area=s.bldg_floor_area,\n indoor_heat_setpoint=s.indoor_heat_setpoint,\n insul_level=s.insul_level,\n pct_exposed_to_hp=s.pct_exposed_to_hp,\n doors_open_to_adjacent=s.doors_open_to_adjacent,\n bedroom_temp_tolerance=s.bedroom_temp_tolerance, \n )\n\n # If other end uses use the heating fuel, make an estimate of their annual\n # consumption of that fuel. This figure is expressed in the physical unit\n # for the fuel type, e.g. gallons of oil. Save this as an object attribute\n # so it is accessible in other routines. See Evernote notes on values (AkWarm\n # for DHW and Michael Bluejay for Drying and Cooking).\n is_electric = (s.exist_heat_fuel_id == constants.ELECTRIC_ID) # True if Electric\n s.fuel_other_uses = s.includes_dhw * 4.23e6 / fuel.dhw_effic\n s.fuel_other_uses += s.includes_dryer * (0.86e6 if is_electric else 2.15e6)\n s.fuel_other_uses += s.includes_cooking * (0.64e6 if is_electric else 0.8e6)\n s.fuel_other_uses *= s.occupant_count / fuel.btus\n\n # For elecric heat we also need to account for lights and other applicances not\n # itemized above.\n if is_electric:\n # Use the AkWarm Medium Lights/Appliances formula but take 25% off\n # due to efficiency improvements since then.\n s.lights_other_elec = 2086. + 1.20 * s.bldg_floor_area # kWh in the year\n else:\n s.lights_other_elec = 0.0\n \n # Match the existing space heating use if it is provided. Do so by using\n # the UA true up factor.\n if not is_null(s.exist_fuel_use):\n \n # Remove the energy use from the other end uses that use the fuel, unless\n # this is electric heat and the user indicated that the entered value is\n # just space heating.\n if is_electric and s.elec_uses=='space':\n # user explicitly indicated that the entered annual usage value is\n # just space heating.\n space_fuel_use = s.exist_fuel_use\n else:\n space_fuel_use = s.exist_fuel_use - s.fuel_other_uses - s.lights_other_elec\n\n sim.no_heat_pump_use = True\n sim.calculate()\n if is_electric:\n # For electric heat, electric use for space heat is in secondary_kwh\n fuel_use1 = sim.annual_results().secondary_kwh\n else:\n fuel_use1 = sim.annual_results().secondary_fuel_units\n \n # scale the UA linearly to attempt to match the target fuel use\n ua_true_up = space_fuel_use / fuel_use1\n sim.ua_true_up = ua_true_up\n sim.calculate()\n\n if is_electric:\n # For electric heat, electric use for space heat is in secondary_kwh\n fuel_use2 = sim.annual_results().secondary_kwh\n else:\n fuel_use2 = sim.annual_results().secondary_fuel_units\n \n # In case it wasn't linear, inter/extrapolate to the final ua_true_up\n slope = (fuel_use2 - fuel_use1)/(ua_true_up - 1.0)\n # print(space_fuel_use, fuel_use1, fuel_use2, ua_true_up)\n ua_true_up = 1.0 + (space_fuel_use - fuel_use1) / slope\n # print(ua_true_up)\n\n else:\n ua_true_up = 1.0\n \n # Set the UA true up value into the model and also save it as\n # an attribute of this object so it can be observed.\n sim.ua_true_up = ua_true_up\n s.ua_true_up = ua_true_up\n \n # Run the base case with no heat pump and record energy results.\n # This model only models the space heating end use.\n sim.no_heat_pump_use = True\n sim.calculate()\n s.df_mo_en_base = sim.monthly_results()\n s.ann_en_base = sim.annual_results()\n # print(s.ann_en_base.secondary_kwh)\n \n # Run the model with the heat pump and record energy results\n sim.no_heat_pump_use = False\n sim.calculate()\n s.df_mo_en_hp = sim.monthly_results()\n s.ann_en_hp = sim.annual_results()\n s.df_hourly = sim.df_hourly\n\n # record design heat load\n s.summary['design_heat_load'], s.summary['design_heat_temp'] = sim.design_heat_load()\n \n # Calculate some summary measures\n s.summary['cop'] = s.ann_en_hp.cop\n s.summary['hp_max_capacity_5F'] = sim.hp_max_capacity_5F()\n s.summary['max_hp_reached'] = sim.max_hp_reached\n \n # CO2 savings\n s.summary['co2_lbs_saved'] = s.ann_en_base.co2_lbs - s.ann_en_hp.co2_lbs\n s.summary['co2_driving_miles_saved'] = convert_co2_to_miles_driven(s.summary['co2_lbs_saved'])\n s.summary['hp_load_frac'] = s.ann_en_hp.hp_load_mmbtu / (s.ann_en_hp.hp_load_mmbtu + s.ann_en_hp.secondary_load_mmbtu)\n \n # Create DataFrames that hold monthly energy cost amounts\n # Results are stored as object attributes.\n self.calc_monthly_cash()\n \n # Create a multi-year Cash Flow DataFrame and summary economic measures.\n # Results are stored as object attributes.\n self.calc_cash_flow()\n\n # Save a gzipped pickle of this object using Unix time as the file name.\n # make a directory to hold the files\n save_dir = 'hpcalc_runs'\n Path(save_dir).mkdir(exist_ok=True)\n fname = f'{time.time():.2f}.pkl.gz'\n s.file_name = fname\n pickle.dump(self, gzip.open(f'{save_dir}/{fname}', 'wb'))", "def main():\n\t#print(scipy.__version__)\n\t#image()\n\t#heat_capacity2()\n\t#hist()\n\t#single_plot()\n\n\t#heat_capacity2()\n\t#single_plot()\n\t#plt.show()\n\t#u0_tc()\n\t#multi_heat_capacity(\"HL_DM_flux5\",True)\n\t#multi_heat_capacity2()\n\t#plot_spin()\n\t#plt.show()\n\theat_capacity2(1,2)\n\t#hist()\n\tplt.show()\n\t#potential()\n\t#plt.show()\n\t#heat_capacity(3,4)\n\t#heat_capacity(5,6)\n\t#heat_capacity(7,8)\n\t#final_spins()\n\t#plot_spin()\n\t#plot_from_csv()\n\t#difference_plot()", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def do_stuff(self):\n self.create_tourism_raster()", "def run(self):\n\t\t\n\t\tpass", "def run(self):\n \n pass", "def run(self):\n visualize_hydro_radial(self.config, self.logger)", "def run(self):\r\n pass", "def _run():\n\n temperatures_kelvins = _create_temperature_grid()\n first_derivs_kelvins_pt01 = numpy.gradient(temperatures_kelvins)\n second_derivs_kelvins_pt01 = numpy.gradient(\n numpy.absolute(first_derivs_kelvins_pt01)\n )\n\n this_ratio = (\n numpy.max(temperatures_kelvins) /\n numpy.max(first_derivs_kelvins_pt01)\n )\n\n first_derivs_unitless = first_derivs_kelvins_pt01 * this_ratio\n\n this_ratio = (\n numpy.max(temperatures_kelvins) /\n numpy.max(second_derivs_kelvins_pt01)\n )\n\n second_derivs_unitless = second_derivs_kelvins_pt01 * this_ratio\n\n _, axes_object = pyplot.subplots(\n 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)\n )\n\n temperature_handle = axes_object.plot(\n temperatures_kelvins, color=TEMPERATURE_COLOUR, linestyle='solid',\n linewidth=SOLID_LINE_WIDTH\n )[0]\n\n second_deriv_handle = axes_object.plot(\n second_derivs_unitless, color=SECOND_DERIV_COLOUR, linestyle='solid',\n linewidth=SOLID_LINE_WIDTH\n )[0]\n\n first_deriv_handle = axes_object.plot(\n first_derivs_unitless, color=FIRST_DERIV_COLOUR, linestyle='dashed',\n linewidth=DASHED_LINE_WIDTH\n )[0]\n\n this_min_index = numpy.argmin(second_derivs_unitless)\n second_derivs_unitless[\n (this_min_index - 10):(this_min_index + 10)\n ] = second_derivs_unitless[this_min_index]\n\n tfp_handle = axes_object.plot(\n -1 * second_derivs_unitless, color=TFP_COLOUR, linestyle='dashed',\n linewidth=DASHED_LINE_WIDTH\n )[0]\n\n axes_object.set_yticks([0])\n axes_object.set_xticks([], [])\n\n x_label_string = r'$x$-coordinate (increasing to the right)'\n axes_object.set_xlabel(x_label_string)\n\n legend_handles = [\n temperature_handle, first_deriv_handle, second_deriv_handle,\n tfp_handle\n ]\n\n legend_strings = [\n TEMPERATURE_LEGEND_STRING, FIRST_DERIV_LEGEND_STRING,\n SECOND_DERIV_LEGEND_STRING, TFP_LEGEND_STRING\n ]\n\n axes_object.legend(legend_handles, legend_strings, loc='lower right')\n\n print 'Saving figure to file: \"{0:s}\"...'.format(OUTPUT_FILE_NAME)\n pyplot.savefig(OUTPUT_FILE_NAME, dpi=FIGURE_RESOLUTION_DPI)\n pyplot.close()", "def run(self):\n\n # need to think about outpath\n\n # Make sure all files are here and okay...\n\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=True, check_bkgfile=True, check_bkgfile_components=True, check_parfile=True, check_zlambdafile=True)\n\n # Compute the border size\n\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n # Do the run\n self.config.start_file_logging()\n self.config.logger.info(\"Running redMaPPer on pixel %d\" % (self.pixel))\n\n firstpass = RunFirstPass(self.config)\n\n if not os.path.isfile(firstpass.filename):\n firstpass.run()\n firstpass.output(savemembers=False, withversion=False)\n else:\n self.config.logger.info(\"Firstpass file %s already present. Skipping...\" % (firstpass.filename))\n\n self.config.catfile = firstpass.filename\n\n # Clear out the firstpass memory\n del firstpass\n\n like = RunLikelihoods(self.config)\n\n if not os.path.isfile(like.filename):\n like.run()\n like.output(savemembers=False, withversion=False)\n else:\n self.config.logger.info(\"Likelihood file %s already present. Skipping...\" % (like.filename))\n\n self.config.catfile = like.filename\n\n # Clear out the likelihood memory\n del like\n\n perc = RunPercolation(self.config)\n\n if not os.path.isfile(perc.filename):\n perc.run()\n perc.output(savemembers=True, withversion=False)\n else:\n self.config.logger.info(\"Percolation file %s already present. Skipping...\" % (perc.filename))\n\n self.config.stop_file_logging()", "def run(self):\n self.monitorTextBox.setPlainText(\"\")\n self.applyChanges()\n self.toolBox.setCurrentIndex(4)\n if self.dat.surrogateProblem == None:\n return\n tool = self.toolSelectBox.currentText()\n pg = self.dat.surrogateMethods.plugins[tool].surrogateMethod(self.dat)\n pg.loadDict(self.dat.surrogateProblem[tool])\n pg.start()\n self.pg = pg\n self.a = True\n self.timer.start(self.updateDelay)\n self.timeRunning = time.time()\n self.runButton.setEnabled(False)\n self.stopButton.setEnabled(True)\n self.setStatusBar.emit(\"Surrogate Generation Running\")" ]
[ "0.6652388", "0.64342934", "0.63346267", "0.6330017", "0.6314298", "0.6261548", "0.6249023", "0.6240787", "0.6240787", "0.6240787", "0.6240787", "0.6240787", "0.6240787", "0.6240787", "0.6240787", "0.6240787", "0.6240787", "0.6240787", "0.6240787", "0.6240787", "0.6240787", "0.6240787", "0.62254363", "0.6210158", "0.6136843", "0.6132069", "0.61057013", "0.6062931", "0.60592586", "0.6048092" ]
0.6448171
1
instantiate heat orchestration client
def gen_heat_client(self): print "\t* Generating heat client" # request a new auth token from keystone keystone = ksclient.Client(auth_url=self.auth_url, username=self.username, password=self.password, tenant_name=self.tenant_name, region_name=self.region_name) auth_token = keystone.auth_token heat_url = 'http://%s:8004/v1/%s' % (self.ip, self.tenant_id) # instantiate client self.heatclient = hClient('1', endpoint=heat_url, token=auth_token)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_client(self) -> None:\n pass", "def init_compute_clients(self):\n\n print \"\\t* instantiating clients\"\n # instantiate nova client\n self.gen_nova_client()\n\n # instantiate neutron client\n self.gen_neutron_client()\n\n # instantiate heat client (used to validate templates)\n self.gen_heat_client()", "def __init__(self, config):\n self._host = config['host']\n self._username = config['username']\n self._password = config['password']\n self._vc_name = config['Name']\n self._ingest_token = config['IngestToken']\n self._ingest_endpoint = config['IngestEndpoint']\n self._ingest_timeout = config['IngestTimeout']\n self._logger = logging.getLogger(self.get_instance_id())\n self._si = None\n self._connect()\n if self._si is None:\n raise ValueError(\"Unable to connect to host\")\n self._ingest = self._create_signalfx_ingest()\n if self._ingest is None:\n raise ValueError(\"Unable to create ingest client\")\n self._additional_dims = config.get('dimensions', None)\n if 'MORSyncInterval' not in config:\n config['MORSyncInterval'] = constants.DEFAULT_MOR_SYNC_INTERVAL\n self._mor_sync_timeout = config.get('MORSyncTimeout', constants.DEFAULT_MOR_SYNC_TIMEOUT)\n self._metric_sync_timeout = config.get('MetricSyncTimeout', constants.DEFAULT_METRIC_SYNC_TIMEOUT)\n self._inventory_mgr = inventory.InventoryManager(self._si, config['MORSyncInterval'],\n config['Name'], self.get_instance_id())\n self._inventory_mgr.start()\n if 'MetricSyncInterval' not in config:\n config['MetricSyncInterval'] = constants.DEFAULT_METRIC_SYNC_INTERVAL\n self._metric_conf = self._get_metric_config(config)\n self._metric_mgr = metric_metadata.MetricManager(self._si, config['MetricSyncInterval'],\n self._metric_conf, config['Name'], self.get_instance_id())\n self._metric_mgr.start()\n self._wait_for_sync()", "def client():\n\n client = Client()\n return client", "def test_uwsgi_client_creation(self):\n factory = get_factory('some_api_key', config={'uwsgiClient': True})\n assert isinstance(factory._get_storage('splits'), uwsgi.UWSGISplitStorage)\n assert isinstance(factory._get_storage('segments'), uwsgi.UWSGISegmentStorage)\n assert isinstance(factory._get_storage('impressions'), uwsgi.UWSGIImpressionStorage)\n assert isinstance(factory._get_storage('events'), uwsgi.UWSGIEventStorage)\n assert isinstance(factory._get_storage('telemetry'), uwsgi.UWSGITelemetryStorage)\n assert factory._apis == {}\n assert factory._tasks == {}\n assert factory._labels_enabled is True\n assert factory._impression_listener is None\n factory.block_until_ready()\n time.sleep(1) # give a chance for the bg thread to set the ready status\n assert factory.ready\n factory.destroy()", "def gen_neutron_client(self):\n\n print \"\\t* Generating neutron client\"\n self.neutronclient = neutronclient.Client(auth_url=self.auth_url,\n username=self.username,\n password=self.password,\n tenant_name=self.tenant_name,\n region_name=self.region_name)", "def client():", "def create_client(self) -> None:\n self._client = discovery.build('ml', 'v1')", "def __init__(self, hostname, port, username, password, tenant_id, connect=True):\n self.cmd_gw_ws_api = HawkularWebsocketClient(\n url=\"ws://{}:{}/hawkular/command-gateway/ui/ws\".format(hostname, port),\n headers={\"Hawkular-Tenant\": tenant_id, \"Accept\": \"application/json\"},\n username=username, password=password)\n self.tenant_id = tenant_id\n if connect:\n self.cmd_gw_ws_api.connect()", "def create_client():\n hostname = \"localhost\"\n username = \"she393\"\n password = os.getenv(\"PASSWORD\")\n\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=hostname, username=username, password=password)\n return client", "def create_client(self):\n client = iperf3.Client()\n client.duration = self._host[CONF_DURATION]\n client.server_hostname = self._host[CONF_HOST]\n client.port = self._host[CONF_PORT]\n client.num_streams = self._host[CONF_PARALLEL]\n client.protocol = self._host[CONF_PROTOCOL]\n client.verbose = False\n return client", "def client_setup(self):\n self.client = Client()", "def __init__(self, client):\n # self.client = client\n self.sense = SenseHat()\n self.trail = [[3, 3]]\n self.direction = [1, 0] # x y\n self.length = 1\n self.apple_pos = [random.randint(0, 7), random.randint(0, 7)]\n self.pixels = [clear] * 64\n # self.send_config_xml_to_broker()\n\n # keepalive topic writer thread in every 30sec, to the broker (this device is online)\n # keepalive_thread = threading.Thread(target=thread_function, args=(self.client,))\n # keepalive_thread.start()", "def __init__(self, url = None, context = \"corbaserver\"):\n self._initOrb (url)\n self._makeClients (\"manipulation\", self.defaultClients, context)", "async def __aenter__(self) -> \"HomeAssistantClient\":\n await self.connect()\n return self", "def get_client(host, port=None, username=None,\n password=None, tenant=None,\n auth_url=None, auth_strategy=None,\n auth_token=None, region=None,\n is_silent_upload=False, insecure=False):\n\n if auth_url:\n force_strategy = 'keystone'\n else:\n force_strategy = None\n\n creds = dict(username=username,\n password=password,\n tenant=tenant,\n auth_url=auth_url,\n strategy=force_strategy or auth_strategy,\n region=region,\n )\n\n if creds['strategy'] == 'keystone' and not creds['auth_url']:\n msg = (\"--auth_url option or OS_AUTH_URL environment variable \"\n \"required when keystone authentication strategy is enabled\\n\")\n raise exception.ClientConfigurationError(msg)\n\n use_ssl = (creds['auth_url'] is not None and\n creds['auth_url'].find('https') != -1)\n\n client = HeatClient\n\n return client(host=host,\n port=port,\n use_ssl=use_ssl,\n auth_tok=auth_token,\n creds=creds,\n insecure=insecure)", "def __init__(self, client):\n super().__init__(client)", "def __init__(self, **kwargs):\n self.config = kwargs[\"config\"]\n self.cli = client.DefaultClient(app_key=self.config[\"app_key\"], app_secret=self.config[\"app_secret\"])\n self.req = None", "def __init__(self, client):\n self.client = client", "def __init__(self, entrypoint_uri='https://api.stackdriver.com/', version=API_VERSION, apikey=None, use_custom_headers=False, transport_controller=None, transport_userdata=None):\n if not apikey and not use_custom_headers and not transport_controller:\n raise KeyError('apikey must be specified when talking to the Stackdriver API')\n\n # add the version template to the entrypoint\n entrypoint_uri = entrypoint_uri.strip()\n if entrypoint_uri[-1] != '/':\n entrypoint_uri += '/'\n\n self._rest_client = RestApi(entrypoint_uri,\n version,\n apikey,\n useragent='Stackdriver Python Client %s' % __version__,\n transport_controller=transport_controller,\n transport_userdata=transport_userdata)", "def get_client():\n return Client(__address, authkey='strumamor')", "def service_client_initialization(self) -> global___Snippet.ClientInitialization:", "def __init__(self, client):\n\n self.client = client", "def __init__(self):\n super(NovaClientWrapper, self).__init__(\n retry_exceptions=(nova_exc.ConnectionRefused,\n nova_exc.Conflict),\n auth_exceptions=(nova_exc.Unauthorized),\n name=\"Nova\")", "def make_client(instance):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n instance._api_version[API_NAME],\r\n API_VERSIONS,\r\n )\r\n instance.initialize()\r\n url = instance._url\r\n url = url.rstrip(\"/\")\r\n if '2.0' == instance._api_version[API_NAME]:\r\n client = neutron_client(username=instance._username,\r\n tenant_name=instance._tenant_name,\r\n password=instance._password,\r\n region_name=instance._region_name,\r\n auth_url=instance._auth_url,\r\n endpoint_url=url,\r\n token=instance._token,\r\n auth_strategy=instance._auth_strategy,\r\n insecure=instance._insecure,\r\n ca_cert=instance._ca_cert)\r\n return client\r\n else:\r\n raise exceptions.UnsupportedVersion(_(\"API version %s is not \"\r\n \"supported\") %\r\n instance._api_version[API_NAME])", "def __init__(self, hostname, port, protocol, auth, tenant_id):\n HawkularService.__init__(self, hostname=hostname, port=port, protocol=protocol,\n auth=auth, tenant_id=tenant_id, entry=\"hawkular/inventory\")", "def __init__(self, hostname, port, protocol, auth, tenant_id):\n HawkularService.__init__(self, hostname=hostname, port=port, protocol=protocol,\n auth=auth, tenant_id=tenant_id, entry=\"hawkular/alerts\")", "def __init__(self):\n self.icinga_host = module.params.get(\"host\")\n self.icinga_port = module.params.get(\"port\")\n self.icinga_username = module.params.get(\"username\")\n self.icinga_password = module.params.get(\"password\")\n self.state = module.params.get(\"state\")\n self.hostname = module.params.get(\"hostname\")\n self.hostnames = module.params.get(\"hostnames\")\n self.start_time = module.params.get(\"start_time\")\n self.end_time = module.params.get(\"end_time\")\n self.duration = module.params.get(\"duration\")\n self.object_type = module.params.get(\"object_type\")\n self.all_services = module.params.get(\"all_services\")\n self.author = module.params.get(\"author\")\n self.comment = module.params.get(\"comment\")\n self.fixed = module.params.get(\"fixed\")\n self.filter_vars = None\n self.trigger_name = None\n\n self.icinga_url = \"{0}:{1}/v1\".format(self.icinga_host, self.icinga_port)\n\n self.connection = Session()\n self.connection.headers.update({'Accept': 'application/json'})\n self.connection.auth = (self.icinga_username, self.icinga_password)\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)", "def init():\n\n @click.command()\n @click.option('--cell', required=True,\n envvar='TREADMILL_CELL',\n callback=cli.handle_context_opt,\n expose_value=False)\n @click.option('--ssh', help='SSH client to use.',\n type=click.Path(exists=True, readable=True))\n @click.argument('app')\n @click.argument('command', nargs=-1)\n def ssh(ssh, app, command):\n \"\"\"SSH into Treadmill container.\"\"\"\n if ssh is None:\n ssh = _DEFAULT_SSH\n\n if app.find('#') == -1:\n # Instance is not specified, list matching and exit.\n raise click.BadParameter('Specify full instance name: xxx#nnn')\n\n app_discovery = discovery.Discovery(context.GLOBAL.zk.conn, app, 'ssh')\n app_discovery.sync()\n\n # Restore default signal mask disabled by python spawning new thread\n # for Zk connection.\n #\n # TODO: should this be done as part of zkutils.connect?\n for sig in range(1, signal.NSIG):\n try:\n signal.signal(sig, signal.SIG_DFL)\n except OSError:\n pass\n\n # TODO: not sure how to handle mutliple instances.\n for (app, hostport) in app_discovery.items():\n _LOGGER.info('%s :: %s', app, hostport)\n if hostport:\n host, port = hostport.split(b':')\n run_ssh(host, port, ssh, list(command))\n\n return ssh", "def __init__(self, scenario):\n client.__init__(self, scenario)\n # TODO: Your initialization, if any (not likely). Oh, and remove the next line.\n raise Exception( \"DO NOT instantiate the skeleton implementation\" )" ]
[ "0.6269356", "0.62433255", "0.6221068", "0.60977536", "0.60809094", "0.6057935", "0.60513085", "0.60292315", "0.60128397", "0.60119057", "0.6009989", "0.5977305", "0.5944054", "0.5941497", "0.5909872", "0.5892974", "0.5889855", "0.5876633", "0.587016", "0.5870129", "0.58397776", "0.5832734", "0.58212066", "0.579163", "0.57825047", "0.57753783", "0.5765288", "0.57599974", "0.57567483", "0.57377255" ]
0.7973234
0
instantiate nova compute client
def gen_nova_client(self): print "\t* Generating nova client" client = nClient.get_client_class('2') self.novaclient = client(self.username, self.password, self.tenant_name, self.auth_url, service_type='compute')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_compute_clients(self):\n\n print \"\\t* instantiating clients\"\n # instantiate nova client\n self.gen_nova_client()\n\n # instantiate neutron client\n self.gen_neutron_client()\n\n # instantiate heat client (used to validate templates)\n self.gen_heat_client()", "def _get_nova_client(self):\n region_name = CONF.region_name\n session = self._get_keystone_session()\n return novaclient.client.Client(2, session=session, region_name=region_name)", "def gen_neutron_client(self):\n\n print \"\\t* Generating neutron client\"\n self.neutronclient = neutronclient.Client(auth_url=self.auth_url,\n username=self.username,\n password=self.password,\n tenant_name=self.tenant_name,\n region_name=self.region_name)", "def __init__(self):\n super(NovaClientWrapper, self).__init__(\n retry_exceptions=(nova_exc.ConnectionRefused,\n nova_exc.Conflict),\n auth_exceptions=(nova_exc.Unauthorized),\n name=\"Nova\")", "def nova(self, obj):\n\n if self._novaclient is not None:\n return self._novaclient\n params = self._build_conn_params(obj.user, obj.project)\n self._novaclient = driver_base.SenlinDriver().compute(params)\n return self._novaclient", "def make_client(instance):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n instance._api_version[API_NAME],\r\n API_VERSIONS,\r\n )\r\n instance.initialize()\r\n url = instance._url\r\n url = url.rstrip(\"/\")\r\n if '2.0' == instance._api_version[API_NAME]:\r\n client = neutron_client(username=instance._username,\r\n tenant_name=instance._tenant_name,\r\n password=instance._password,\r\n region_name=instance._region_name,\r\n auth_url=instance._auth_url,\r\n endpoint_url=url,\r\n token=instance._token,\r\n auth_strategy=instance._auth_strategy,\r\n insecure=instance._insecure,\r\n ca_cert=instance._ca_cert)\r\n return client\r\n else:\r\n raise exceptions.UnsupportedVersion(_(\"API version %s is not \"\r\n \"supported\") %\r\n instance._api_version[API_NAME])", "def init_host(self, host):\n if self._drv_nodes is None:\n self.set_nodes([nova_conf.host])\n args = (drv_conf.tenant_id, drv_conf.client_id, drv_conf.client_secret,\n drv_conf.subscription_id)\n\n self.compute_client = utils.get_compute_client(*args)\n self.resource_client = utils.get_resource_client(*args)\n self.network_client = utils.get_network_client(*args)\n is_resource_created = utils.check_resource_existence(\n self.resource_client, drv_conf.resource_group)\n if not is_resource_created:\n utils.create_resource_group(\n self.resource_client, drv_conf.resource_group, drv_conf.region)\n\n self.flavor_info.update(\n utils.get_vm_sizes(self.compute_client, drv_conf.region))\n LOG.info(\"%s driver init with %s project, %s region\" %\n (self.name, drv_conf.tenant_id, drv_conf.region))", "def compute_client(self):\n if not self.client:\n self.client = get_client_from_cli_profile(ComputeManagementClient)\n return self.client", "def __init__(self, az_account: 'account.AZAccount') -> None:\n self.az_account = az_account\n self.compute_client = compute_sdk.ComputeManagementClient(\n self.az_account.credentials,\n self.az_account.subscription_id\n ) # type: compute_sdk.ComputeManagementClient", "def __init__(self, project):\n super(NovaExtractor, self).__init__(project)\n\n self.nova = self._get_nova_client()\n self.glance = self._get_glance_client()\n self.neutron = self._get_neutron_client()\n\n self.flavors = self._get_flavors()\n self.images = self._get_images()", "def __init__(self, compute_client, resources):\n self._compute = compute_client.apitools_client\n self._messages = compute_client.messages\n self._http = compute_client.apitools_client.http\n self._batch_url = compute_client.batch_url\n self._resources = resources", "def _create_compute_service(self, **kwargs):\n\n dic = {'binary': 'nova-compute', 'topic': 'compute',\n 'report_count': 0, 'availability_zone': 'dummyzone'}\n dic['host'] = kwargs.get('host', 'dummy')\n s_ref = db.service_create(self.context, dic)\n if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys():\n t = utils.utcnow() - datetime.timedelta(0)\n dic['created_at'] = kwargs.get('created_at', t)\n dic['updated_at'] = kwargs.get('updated_at', t)\n db.service_update(self.context, s_ref['id'], dic)\n\n dic = {'service_id': s_ref['id'],\n 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,\n 'vcpus_used': 16, 'local_gb_used': 10,\n 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,\n 'cpu_info': ''}\n dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32)\n dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu')\n dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003)\n db.compute_node_create(self.context, dic)\n return db.service_get(self.context, s_ref['id'])", "def create_client(self) -> None:\n self._client = discovery.build('ml', 'v1')", "def make_client(instance):\n network_client = utils.get_client_class(\n API_NAME,\n instance._api_version[API_NAME],\n API_VERSIONS)\n LOG.debug('Instantiating network client: %s', network_client)\n\n endpoint = instance.get_endpoint_for_service_type(\n API_NAME,\n region_name=instance._region_name,\n )\n\n return network_client(\n username=instance._username,\n tenant_name=instance._project_name,\n password=instance._password,\n region_name=instance._region_name,\n auth_url=instance._auth_url,\n endpoint_url=endpoint,\n token=instance.auth.get_token(instance.session),\n insecure=instance._insecure,\n ca_cert=instance._cacert,\n )", "def get_novaclient(self):\n # TODO: We ought to be able to derive this from the keystone client,\n # but it's proving trickier than I expected --isd\n return novaclient.Client(self.cluster_account.cluster_user_name,\n self.cluster_account.cluster_password,\n self.name,\n self.cluster_account.cluster.auth_url)", "def get_nova(self, version='2.1'):\n if self.nova is None:\n self.nova = novaclient.Client(version, session=self.get_session())\n return self.nova", "def _get_neutron_client(self):\n session = self._get_keystone_session()\n return neutronclient.v2_0.client.Client(session=session)", "def create_client(self) -> None:\n pass", "def Client(api_version, *args, **kwargs):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n api_version,\r\n API_VERSIONS,\r\n )\r\n return neutron_client(*args, **kwargs)", "def get_keystone_client():\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant = os.environ.get('OS_TENANT_NAME')\n url = os.environ.get('OS_AUTH_URL')\n assert username is not None\n assert password is not None\n assert tenant is not None\n assert url is not None\n cl = client.Client(username=username, password=password,\n tenant_name=tenant, auth_url=url)\n return cl", "def create(ctx, nova_client, **kwargs):\n\n # For possible changes by _maybe_transform_userdata()\n\n server = {\n 'name': ctx.node_id\n }\n server.update(copy.deepcopy(ctx.properties['server']))\n\n ctx.logger.debug(\n \"server.create() server before transformations: {0}\".format(server))\n\n if server.get('nics'):\n raise ValueError(\"Parameter with name 'nics' must not be passed to\"\n \" openstack provisioner (under host's \"\n \"properties.nova.instance)\".format(k))\n\n _maybe_transform_userdata(server)\n\n if ('management_network_name' in ctx.properties) and ctx.properties['management_network_name']:\n nc = os_common.NeutronClient().get(config=ctx.properties.get('neutron_config'))\n managemenet_network_id = nc.cosmo_get_named('network', ctx.properties['management_network_name'])['id']\n server['nics'] = [{'net-id': managemenet_network_id}]\n else:\n managemenet_network_id = None\n # print(server['nics'])\n\n # Sugar\n if 'image_name' in server:\n server['image'] = nova_client.images.find(name=server['image_name']).id\n del server['image_name']\n if 'flavor_name' in server:\n server['flavor'] = nova_client.flavors.find(name=server['flavor_name']).id\n del server['flavor_name']\n\n _fail_on_missing_required_parameters(\n server,\n ('name', 'flavor', 'image', 'key_name'),\n 'server')\n\n # Multi-NIC by networks - start\n network_nodes_runtime_properties = ctx.capabilities.get_all().values()\n if network_nodes_runtime_properties and 'management_network_name' not in ctx.properties:\n # Known limitation\n raise RuntimeError(\"Nova server with multi-NIC requires 'management_network_name' which was not supplied\")\n nics = [\n {'net-id': n['external_id']}\n for n in network_nodes_runtime_properties\n if neutron_client.cosmo_is_network(n['external_id'])\n ]\n if nics:\n server['nics'] = server.get('nics', []) + nics\n # Multi-NIC by networks - end\n\n # Multi-NIC by ports - start\n port_nodes_runtime_properties = ctx.capabilities.get_all().values()\n if port_nodes_runtime_properties and 'management_network_name' not in ctx.properties:\n # Known limitation\n raise RuntimeError(\"Nova server with multi-NIC requires 'management_network_name' which was not supplied\")\n nics = [\n {'port-id': n['external_id']}\n for n in port_nodes_runtime_properties\n if neutron_client.cosmo_is_port(n['external_id'])\n ]\n if nics:\n server['nics'] = server.get('nics', []) + nics\n # Multi-NIC by ports - end\n\n ctx.logger.debug(\n \"server.create() server after transformations: {0}\".format(server))\n\n # First parameter is 'self', skipping\n params_names = inspect.getargspec(nova_client.servers.create).args[1:]\n\n params_default_values = inspect.getargspec(\n nova_client.servers.create).defaults\n params = dict(itertools.izip(params_names, params_default_values))\n\n # Fail on unsupported parameters\n for k in server:\n if k not in params:\n raise ValueError(\"Parameter with name '{0}' must not be passed to\"\n \" openstack provisioner (under host's \"\n \"properties.nova.instance)\".format(k))\n\n for k in params:\n if k in server:\n params[k] = server[k]\n\n if not params['meta']:\n params['meta'] = dict({})\n params['meta']['cloudify_id'] = ctx.node_id\n params['meta']['cloudify_management_network_id'] = managemenet_network_id\n params['meta']['cloudify_management_network_name'] = ctx.properties.get('management_network_name')\n\n ctx.logger.info(\"Asking Nova to create server.\"\n \"Parameters: {0}\".format(str(params)))\n ctx.logger.debug(\"Asking Nova to create server. All possible parameters are: \"\n \"{0})\".format(','.join(params.keys())))\n\n try:\n s = nova_client.servers.create(**params)\n except nova_exceptions.BadRequest as e:\n # ctx.logger.error(e)\n if str(e).startswith(MUST_SPECIFY_NETWORK_EXCEPTION_TEXT):\n raise RuntimeError(\n \"Can not provision server: management_network_name is not \"\n \"specified but there are several networks that the server \"\n \"can be connected to.\"\n )\n raise RuntimeError(\"Nova bad request error: \" + str(e))\n # os.system(\"nova show \" + s.id)\n ctx['external_id'] = s.id", "def create_client(self):\n client = iperf3.Client()\n client.duration = self._host[CONF_DURATION]\n client.server_hostname = self._host[CONF_HOST]\n client.port = self._host[CONF_PORT]\n client.num_streams = self._host[CONF_PARALLEL]\n client.protocol = self._host[CONF_PROTOCOL]\n client.verbose = False\n return client", "def test_nova_create(tiny_cirros_server):\n\n assert ping_from_mnaio(tiny_cirros_server.accessIPv4)\n\n @pytest.mark.testinfra('ansible://' + tiny_cirros_server.accessIPv4)\n @pytest.mark.test_id('57f87c9e-3adb-11e9-b2ca-6a00035510c0')\n @pytest.mark.jira('ASC-31')\n def test_nova_create_connect(host):\n f = host.file('/etc/hosts')\n assert f.exists\n assert f.user == 'root'\n assert f.group == 'root'", "def __init__(__self__,\n resource_name: str,\n args: CloudServicesNetworkArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def _init(config):\n if not config.cpu_model:\n logging.warning(\n \"It is strongly recommended to set a CPU model('--cloudCPUModel'). \"\n \"Otherwise the used machines and CPU models are undefined.\"\n )\n\n if not config.cloud_master:\n sys.exit(\"No URL of a VerifierCloud instance is given.\")\n\n revision = \":\".join(_get_revision(config))\n\n webclient = WebInterface(\n config.cloud_master,\n config.cloud_user,\n revision,\n user_agent=\"cpa_web_cloud.py\",\n version=__version__,\n )\n\n logging.info(\n \"Using %s version %s.\", webclient.tool_name(), webclient.tool_revision()\n )\n return webclient", "def __init__(self, compute_driver=None, *args, **kwargs):\n self.network_api = network.API()\n self.virtapi = ComputeVirtAPI(self)\n self.driver = driver.load_compute_driver(self.virtapi, compute_driver)\n self._resource_tracker_dict = {}\n self._sync_power_pool = eventlet.GreenPool()\n self._syncs_in_progress = {}\n\n super(ControllerManager, self).__init__(service_name=\"controller\", *args, **kwargs)", "def _keystone_client(context, version=(3, 0)):\n auth_plugin = token.Token(\n auth_url=CONF.keystone_authtoken.auth_uri,\n token=context.auth_token,\n project_id=context.project_id)\n client_session = session.Session(auth=auth_plugin,\n verify=False if\n CONF.keystone_authtoken.insecure else\n (CONF.keystone_authtoken.cafile or True))\n return client.Client(auth_url=CONF.keystone_authtoken.auth_uri,\n session=client_session, version=version)", "def init_compute_management_client(\n experiment_secrets: Secrets,\n experiment_configuration: Configuration) -> ComputeManagementClient:\n return __azure_client_factory(\"ComputeManagementClient\", Secrets)", "def run_example():\n #\n # Create all clients with an Application (service principal) token provider\n #\n credentials, subscription_id = get_credentials()\n resource_client = ResourceManagementClient(credentials, subscription_id)\n compute_client = ComputeManagementClient(credentials, subscription_id)\n network_client = NetworkManagementClient(credentials, subscription_id)\n try:\n # Create a NIC\n subnet_info = network_client.subnets.get(GROUP_NAME, VNET_NAME, SUBNET_NAME);\n nic = create_nic(network_client, subnet_info)\n\n\n #############\n # VM Sample #\n #############\n\n # Create Linux VM\n print('\\nCreating Linux Virtual Machine')\n vm_parameters = create_vm_parameters(nic.id, VM_REFERENCE['linux'])\n async_vm_creation = compute_client.virtual_machines.create_or_update(\n GROUP_NAME, VM_NAME, vm_parameters)\n async_vm_creation.wait()\n\n virtual_machine = compute_client.virtual_machines.get(\n GROUP_NAME,\n VM_NAME\n )\n except CloudError:\n print('A VM operation failed:', traceback.format_exc(), sep='\\n')\n else:\n print('All example operations completed successfully!')\n public_ip = network_client.public_ip_addresses.get(GROUP_NAME, PUBLIC_IP_NAME)\n print(public_ip.ip_address)\n finally:\n # Delete Resource group and everything in it\n print('\\nDone')", "def _get_client(self):\n _client = KOPS(provider=self.provider, config=self.config)\n return _client" ]
[ "0.7364414", "0.7071238", "0.6853191", "0.6742648", "0.66805136", "0.65438086", "0.6509512", "0.64971817", "0.64027095", "0.639064", "0.6300113", "0.6267286", "0.626444", "0.62389755", "0.61541337", "0.61468095", "0.6145711", "0.61278534", "0.61053056", "0.6089302", "0.6016518", "0.60113806", "0.60042274", "0.59840965", "0.5956244", "0.5955415", "0.5914785", "0.5911617", "0.5818846", "0.5748909" ]
0.7996696
0
instantiate neutron networking client
def gen_neutron_client(self): print "\t* Generating neutron client" self.neutronclient = neutronclient.Client(auth_url=self.auth_url, username=self.username, password=self.password, tenant_name=self.tenant_name, region_name=self.region_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_client(instance):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n instance._api_version[API_NAME],\r\n API_VERSIONS,\r\n )\r\n instance.initialize()\r\n url = instance._url\r\n url = url.rstrip(\"/\")\r\n if '2.0' == instance._api_version[API_NAME]:\r\n client = neutron_client(username=instance._username,\r\n tenant_name=instance._tenant_name,\r\n password=instance._password,\r\n region_name=instance._region_name,\r\n auth_url=instance._auth_url,\r\n endpoint_url=url,\r\n token=instance._token,\r\n auth_strategy=instance._auth_strategy,\r\n insecure=instance._insecure,\r\n ca_cert=instance._ca_cert)\r\n return client\r\n else:\r\n raise exceptions.UnsupportedVersion(_(\"API version %s is not \"\r\n \"supported\") %\r\n instance._api_version[API_NAME])", "def _get_neutron_client(self):\n session = self._get_keystone_session()\n return neutronclient.v2_0.client.Client(session=session)", "def gen_nova_client(self):\n\n print \"\\t* Generating nova client\"\n client = nClient.get_client_class('2')\n self.novaclient = client(self.username,\n self.password,\n self.tenant_name,\n self.auth_url,\n service_type='compute')", "def __init__(self):\n super(NovaClientWrapper, self).__init__(\n retry_exceptions=(nova_exc.ConnectionRefused,\n nova_exc.Conflict),\n auth_exceptions=(nova_exc.Unauthorized),\n name=\"Nova\")", "def init_compute_clients(self):\n\n print \"\\t* instantiating clients\"\n # instantiate nova client\n self.gen_nova_client()\n\n # instantiate neutron client\n self.gen_neutron_client()\n\n # instantiate heat client (used to validate templates)\n self.gen_heat_client()", "def get_neutron(self, version='2'):\n if self.neutron is None:\n self.neutron = neutronclient.Client(session=self.get_session())\n return self.neutron", "def Client(api_version, *args, **kwargs):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n api_version,\r\n API_VERSIONS,\r\n )\r\n return neutron_client(*args, **kwargs)", "def create_client(self) -> None:\n self._client = discovery.build('ml', 'v1')", "def make_client(instance):\n network_client = utils.get_client_class(\n API_NAME,\n instance._api_version[API_NAME],\n API_VERSIONS)\n LOG.debug('Instantiating network client: %s', network_client)\n\n endpoint = instance.get_endpoint_for_service_type(\n API_NAME,\n region_name=instance._region_name,\n )\n\n return network_client(\n username=instance._username,\n tenant_name=instance._project_name,\n password=instance._password,\n region_name=instance._region_name,\n auth_url=instance._auth_url,\n endpoint_url=endpoint,\n token=instance.auth.get_token(instance.session),\n insecure=instance._insecure,\n ca_cert=instance._cacert,\n )", "def __init__(self, *args, **kwargs):\n self.driver = importutils.import_module(CONF.network_driver)\n\n self.q_conn = quantum_connection.QuantumClientConnection()\n self.m_conn = melange_connection.MelangeConnection()\n self.a_conn = aiclib_connection.AICLibConnection()\n\n # NOTE(tr3buchet): map for global uuids\n # if these should change, restart this service\n # self._nw_map will look like:\n # self._nw_map = {'0000000000-0000-0000-0000-000000000000': pub_uuid,\n # '1111111111-1111-1111-1111-111111111111': priv_uuid,\n # pub_uuid: '0000000000-0000-0000-0000-000000000000',\n # priv_uuid: '1111111111-1111-1111-1111-111111111111'}\n # there will be only one (each way) entry per label\n self._nw_map = {}\n self._rackconnect_servicenet = None\n\n if CONF.network_global_uuid_label_map:\n self._nw_map = self._get_nw_map()\n LOG.debug('the self._nw_map is |%s|' % self._nw_map)\n else:\n self._nw_map = {}\n\n self._rackconnect_roles = set(CONF.rackconnect_roles)\n rc_public_gateway_roles = CONF.rackconnect_public_gateway_roles\n self._rc_public_gateway_roles = set(rc_public_gateway_roles)\n\n super(QuantumManager, self).__init__(service_name='network',\n *args, **kwargs)", "def create_client(self) -> None:\n pass", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.get_network(network[\"id\"])", "def _get_nova_client(self):\n region_name = CONF.region_name\n session = self._get_keystone_session()\n return novaclient.client.Client(2, session=session, region_name=region_name)", "def network_client(self):\n if not self.client:\n self.client = get_client_from_cli_profile(NetworkManagementClient)\n return self.client", "def __init__(self, *args):\n _snap.TNEANet_swiginit(self, _snap.new_TNEANet(*args))", "def create_client(self):\n client = iperf3.Client()\n client.duration = self._host[CONF_DURATION]\n client.server_hostname = self._host[CONF_HOST]\n client.port = self._host[CONF_PORT]\n client.num_streams = self._host[CONF_PARALLEL]\n client.protocol = self._host[CONF_PROTOCOL]\n client.verbose = False\n return client", "def setUp(self):\n self.clnt = CvpClient()\n nodes = [\"1.1.1.1\"]\n self.clnt.nodes = nodes\n self.clnt.node_cnt = len(nodes)\n self.clnt.node_pool = cycle(nodes)\n self.api = CvpApi(self.clnt)", "def __init__(__self__,\n resource_name: str,\n args: CloudServicesNetworkArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def make_client(self, context):\n return Client(self.settings['client_routing'], context=context)", "def get_novaclient(self):\n # TODO: We ought to be able to derive this from the keystone client,\n # but it's proving trickier than I expected --isd\n return novaclient.Client(self.cluster_account.cluster_user_name,\n self.cluster_account.cluster_password,\n self.name,\n self.cluster_account.cluster.auth_url)", "def __init__(self, client, network_id):\n super(NetworksMixin, self).__init__(client)\n self._network_id = network_id", "def __init__(self, project):\n super(NovaExtractor, self).__init__(project)\n\n self.nova = self._get_nova_client()\n self.glance = self._get_glance_client()\n self.neutron = self._get_neutron_client()\n\n self.flavors = self._get_flavors()\n self.images = self._get_images()", "def __init__(self, client):\n self.client = client", "def initialize(self):\n if self.real:\n self.agent.connect(self)\n else:\n self.connect() # Connect python client to VREP\n self.agent.connect(self)", "def __init__(self, **kwargs):\n self.local = salt.client.LocalClient()\n self.minion_nodes = self._query()", "def client():\n\n client = Client()\n return client", "def __init__(self, url = None, context = \"corbaserver\"):\n self._initOrb (url)\n self._makeClients (\"manipulation\", self.defaultClients, context)", "def run(self, network_create_args=None):\n self.neutron.create_network(**(network_create_args or {}))\n self.neutron.list_networks()", "def __init__(self, client):\n\n self.client = client", "def __init__(self, username, password, tenant_id, auth_url, api_protocol, api_host, api_port, api_resource):\n\n __logger__.info(\"Init CLOTO Client\")\n __logger__.debug(\"Client parameters: Username: %s, Password: %s, TenantId: %s, API protocol: %s, API host: %s, \"\n \"API port: %s, Base resource: %s\", username, password, tenant_id, api_protocol, api_host,\n api_port, api_resource)\n\n self.headers = dict()\n self.api_protocol = api_protocol\n self.api_host = api_host\n self.api_port = api_port\n self.api_resource = api_resource\n\n set_representation_headers(self.headers, content_type=HEADER_REPRESENTATION_JSON,\n accept=HEADER_REPRESENTATION_JSON)\n\n self._init_keystone_client(username, password, tenant_id, auth_url)\n self.token = self._get_auth_token()\n __logger__.debug(\"Token: %s\", self.token)\n\n self.headers.update({X_AUTH_TOKEN: self.token})\n self.headers.update({TENANT_ID: tenant_id})\n __logger__.debug(\"Headers with OpenStack credentials: %s\", self.headers)" ]
[ "0.72397894", "0.7001146", "0.6915458", "0.6756175", "0.66348696", "0.6515556", "0.6512618", "0.6477814", "0.6399194", "0.63689065", "0.62221146", "0.61706036", "0.61108536", "0.60914207", "0.6025074", "0.5990246", "0.5989588", "0.5977329", "0.5937462", "0.5884759", "0.5878487", "0.58779335", "0.58723474", "0.58610684", "0.5847372", "0.5841261", "0.5839244", "0.5837133", "0.5822653", "0.5799903" ]
0.796347
0
generate heat template information
def gen_heat_data(self): print "\t* Generating heat data" self.gen_heat_client() stacks = self.heatclient.stacks print "\t? Please select the stack to generate a template from" # list stacks and prompt user to select apropriate stack template stack_list = [] for idx, stack in enumerate(stacks.list()): print "\t - [%d] Stack: %s \n" % (idx, stack.stack_name) stack_list.append(stack) stack_num = int(raw_input("\t - ")) print "\t* You have selected: %s" % stack_list[stack_num].stack_name # stack id self.heat_template = stacks.template(stack_list[stack_num].id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_heat_template(self):\n\n print \"\\t* Generating heat template in file: %s\" % self.heat_filename\n if self.cmdline:\n with open(self.heat_filename, 'w') as f:\n f.write(yaml.safe_dump(self.heat_template))\n\n try:\n self.heatclient.stacks.validate(template=yaml.safe_dump(self.heat_template))\n except Exception as e:\n print \"Unfortunately your file is malformed. Received error: (%s)\" % str(e)\n print \"Exiting ...\"\n sys.exit(1)\n\n return self.heat_template", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def run(self):\n\n print \"\\n\\n\\tPlease Note: Templates are generated based off\"\n print \"\\t of the OS environment variables that are set.\"\n print \"\\t* Running ReHeat.\"\n\n self.set_creds()\n self.gen_ip() # used in template description\n self.gen_tenant_id()\n if self.reheat_error:\n return self.reheat_errmsg\n\n print \"\\t* You have opted to generate %s file[s]\" % self.template_type\n if 'all' in self.template_type:\n self.gen_heat_data()\n self.gen_heat_template()\n self.gen_compute_data()\n return self.gen_compute_template()\n elif 'heat' in self.template_type:\n self.gen_heat_data()\n return self.gen_heat_template()\n elif 'compute' in self.template_type:\n self.gen_compute_data()\n return self.gen_compute_template()\n else:\n raise Exception(\"User provided an improper template type.\")", "def template(c, release=\"url-shortener\"):\n c.run(f\"helm template {release} {HELM_CHART_DIR} > ./generated-deployment.yml\")", "def gen_compute_template(self):\n\n print \"\\t* Generating compute template in file %s\" % self.compute_filename\n if self.cmdline:\n with open(self.compute_filename, 'w') as f:\n f.write(yaml.safe_dump(self.compute_template))\n\n try:\n self.heatclient.stacks.validate(template=yaml.safe_dump(self.compute_template))\n except Exception as e:\n print \"Unfortunately your file is malformed. Received error: (%s)\" % str(e)\n print \"Exiting ...\"\n sys.exit(1)\n\n return self.compute_template", "def fillBackgroundTemplates(opt):\n\n totalBkg={}\n templates=[]\n\n #import signal events\n data=ROOT.TChain('data')\n for f in [os.path.join(opt.input,x) for x in os.listdir(opt.input) if 'Data13TeV' in x]:\n if 'MuonEG' in f : continue\n data.AddFile(f)\n\n #define final preselection cuts\n cuts='xangle==%d'%opt.xangle\n if len(opt.presel) : cuts += ' && ' + opt.presel \n if opt.csiacc:\n csiCuts ='csi1>%f && csi1<%f && '%opt.csiacc[opt.xangle][0]\n csiCuts+='csi2>%f && csi2<%f'%opt.csiacc[opt.xangle][1]\n cuts=csiCuts if len(cuts)==0 else '{0} && {1}'.format(cuts,csiCuts)\n\n #loop over categories build templates\n for icat in range(len(opt.categs)):\n\n #apply category cuts\n categCut=opt.categs[icat]\n categCut=cuts if len(categCut)==0 else '%s && %s'%(categCut,cuts)\n catName='%s_a%d_%d'%(opt.chTag,opt.xangle,icat)\n\n print '\\t',catName,categCut\n\n #background modelling histos\n histos=[]\n data_obs=None\n for name,pfix in [('bkg_'+catName,'mix'),('bkg_%s_bkgShape'%catName,'mixem')]:\n\n templCuts=categCut.replace('csi1',pfix+'csi1')\n templCuts=templCuts.replace('csi2',pfix+'csi2')\n data.Draw('{0}mmiss >> h({1},{2},{3})'.format(pfix,opt.nbins,opt.mMin,opt.mMax),templCuts,'goff')\n h=data.GetHistogram()\n histos.append(h.Clone(name))\n histos[-1].SetDirectory(0)\n\n if len(histos)==1:\n totalBkg[icat]=h.Integral()\n if not opt.unblind :\n data_obs=h.Clone('data_obs_'+catName)\n data_obs.SetDirectory(0)\n\n h.Reset('ICE')\n templates += defineProcessTemplates(histos)\n\n #observed data in this category if unblinding\n if opt.unblind:\n data.Draw('mmiss >> h({1},{2},{3})'.format(opt.nbins,opt.mMin,opt.mMax),categCut,'goff')\n data_obs=data.GetHistogram().Clone('data_obs_'+catName)\n data_obs.SetDirectory(0)\n\n templates.append(data_obs)\n\n print '\\t total background:',totalBkg\n return totalBkg,templates", "def healpix_template(ncomp, nside, coordinate=None):\n\n temp = so_map()\n\n if ncomp == 3:\n temp.data = np.zeros((3, 12 * nside ** 2))\n else:\n temp.data = np.zeros((12 * nside ** 2))\n\n temp.pixel = \"HEALPIX\"\n temp.ncomp = ncomp\n temp.nside = nside\n temp.geometry = \"healpix geometry\"\n temp.coordinate = coordinate\n return temp", "def get_heat_json_from_topology_config(config, project_name='admin'):\n\n template = dict()\n template[\"heat_template_version\"] = \"2013-05-23\"\n template[\"resources\"] = dict()\n\n for network in config[\"networks\"]:\n nr = dict()\n nr[\"type\"] = \"OS::Neutron::Net\"\n\n nrp = dict()\n nrp[\"shared\"] = False\n nrp[\"name\"] = network[\"name\"]\n nrp[\"admin_state_up\"] = True\n\n nr[\"properties\"] = nrp\n\n nrs = dict()\n nrs[\"type\"] = \"OS::Neutron::Subnet\"\n #\n p = dict()\n p[\"cidr\"] = \"1.1.1.0/24\"\n p[\"enable_dhcp\"] = False\n p[\"gateway_ip\"] = \"\"\n p[\"name\"] = network[\"name\"] + \"_subnet\"\n if network[\"name\"] == \"virbr0\":\n p[\"network_id\"] = configuration.openstack_mgmt_network\n elif network[\"name\"] == configuration.openstack_external_network:\n p[\"network_id\"] = configuration.openstack_external_network\n else:\n p[\"network_id\"] = {\"get_resource\": network[\"name\"]}\n\n nrs[\"properties\"] = p\n\n template[\"resources\"][network[\"name\"]] = nr\n template[\"resources\"][network[\"name\"] + \"_subnet\"] = nrs\n\n # cache the image_details here to avoid multiple REST calls for details about an image type\n # as many topologies have lots of the same types of images around\n image_details_dict = dict()\n\n for device in config[\"devices\"]:\n\n if device[\"imageId\"] in image_details_dict:\n image_details = image_details_dict[device[\"imageId\"]]\n else:\n image_details = imageUtils.get_image_detail(device[\"imageId\"])\n image_details_dict[device[\"imageId\"]] = image_details\n\n image_name = image_details[\"name\"]\n\n image_disk_size = 20\n\n # set the size in GB, rounding up to the nearest int\n if 'size' in image_details:\n current_size = float(image_details['size'])\n image_disk_size = int(math.ceil(current_size / 1000000000))\n\n # if the glance image asks for a minimum disk size, let's see if it's larger that what we have\n if \"min_disk\" in image_details and image_details['min_disk'] > image_disk_size:\n image_disk_size = image_details[\"min_disk\"]\n\n # if the user has specified a desired disk size, grab it here so we get the correct flavor\n if type(image_disk_size) is int and device[\"resizeImage\"] > image_disk_size:\n image_disk_size = device[\"resizeImage\"]\n\n # determine openstack flavor here\n device_ram = int(device[\"ram\"])\n device_cpu = int(device[\"cpu\"])\n\n flavor_detail = openstackUtils.get_minimum_flavor_for_specs(configuration.openstack_project,\n device_cpu,\n device_ram,\n image_disk_size\n )\n\n flavor = flavor_detail[\"name\"]\n\n dr = dict()\n dr[\"type\"] = \"OS::Nova::Server\"\n dr[\"properties\"] = dict()\n dr[\"properties\"][\"flavor\"] = flavor\n dr[\"properties\"][\"networks\"] = []\n index = 0\n for p in device[\"interfaces\"]:\n port = dict()\n port[\"port\"] = dict()\n port[\"port\"][\"get_resource\"] = device[\"name\"] + \"_port\" + str(index)\n index += 1\n dr[\"properties\"][\"networks\"].append(port)\n\n dr[\"properties\"][\"image\"] = image_name\n dr[\"properties\"][\"name\"] = device[\"name\"]\n\n if device[\"configDriveSupport\"]:\n dr[\"properties\"][\"config_drive\"] = True\n dr[\"properties\"][\"user_data_format\"] = \"RAW\"\n metadata = dict()\n metadata[\"hostname\"] = device[\"name\"]\n metadata[\"console\"] = \"vidconsole\"\n dr[\"properties\"][\"metadata\"] = metadata\n\n # let's check all the configDriveParams and look for a junos config\n # FIXME - this may need tweaked if we need to include config drive cloud-init support for other platforms\n # right now we just need to ignore /boot/loader.conf\n for cfp in device[\"configDriveParams\"]:\n\n if \"destination\" in cfp and cfp[\"destination\"] == \"/boot/loader.conf\":\n logger.debug(\"Creating loader.conf config-drive entry\")\n template_name = cfp[\"template\"]\n loader_string = osUtils.compile_config_drive_params_template(template_name,\n device[\"name\"],\n device[\"label\"],\n device[\"password\"],\n device[\"ip\"],\n device[\"managementInterface\"])\n\n logger.debug('----------')\n logger.debug(loader_string)\n logger.debug('----------')\n for l in loader_string.split('\\n'):\n if '=' in l:\n left, right = l.split('=')\n if left not in metadata and left != '':\n metadata[left] = right.replace('\"', '')\n\n if \"destination\" in cfp and cfp[\"destination\"] == \"/juniper.conf\":\n logger.debug(\"Creating juniper.conf config-drive entry\")\n template_name = cfp[\"template\"]\n personality_string = osUtils.compile_config_drive_params_template(template_name,\n device[\"name\"],\n device[\"label\"],\n device[\"password\"],\n device[\"ip\"],\n device[\"managementInterface\"])\n\n dr[\"properties\"][\"personality\"] = dict()\n dr[\"properties\"][\"personality\"] = {\"/config/juniper.conf\": personality_string}\n else:\n logger.debug('No juniper.conf found here ')\n\n if device['cloudInitSupport']:\n logger.debug('creating cloud-init script')\n dr[\"properties\"][\"config_drive\"] = True\n dr[\"properties\"][\"user_data_format\"] = \"RAW\"\n metadata = dict()\n metadata[\"hostname\"] = device[\"name\"]\n dr[\"properties\"][\"metadata\"] = metadata\n # grab the prefix len from the management subnet which is in the form 192.168.122.0/24\n if '/' in configuration.management_subnet:\n management_prefix_len = configuration.management_subnet.split('/')[1]\n else:\n management_prefix_len = '24'\n\n management_ip = device['ip'] + '/' + management_prefix_len\n\n device_config = osUtils.get_cloud_init_config(device['name'],\n device['label'],\n management_ip,\n device['managementInterface'],\n device['password'])\n\n script_string = \"\"\n if \"configScriptId\" in device and device[\"configScriptId\"] != 0:\n logger.debug(\"Passing script data!\")\n try:\n script = Script.objects.get(pk=int(device[\"configScriptId\"]))\n script_string = script.script\n device_config[\"script_param\"] = device.get(\"configScriptParam\", '')\n logger.debug(script_string)\n except ObjectDoesNotExist:\n logger.info('config script was specified but was not found!')\n\n user_data_string = osUtils.render_cloud_init_user_data(device_config, script_string)\n dr[\"properties\"][\"user_data\"] = user_data_string\n\n template[\"resources\"][device[\"name\"]] = dr\n\n for device in config[\"devices\"]:\n index = 0\n for port in device[\"interfaces\"]:\n pr = dict()\n pr[\"type\"] = \"OS::Neutron::Port\"\n p = dict()\n\n if port[\"bridge\"] == \"virbr0\":\n p[\"network_id\"] = configuration.openstack_mgmt_network\n\n # specify our desired IP address on the management interface\n p['fixed_ips'] = list()\n fip = dict()\n fip['ip_address'] = device['ip']\n p['fixed_ips'].append(fip)\n\n elif port[\"bridge\"] == configuration.openstack_external_network:\n p[\"network_id\"] = configuration.openstack_external_network\n else:\n p[\"network_id\"] = {\"get_resource\": port[\"bridge\"]}\n # disable port security on all other ports (in case this isn't set globally)\n p['port_security_enabled'] = False\n\n pr[\"properties\"] = p\n template[\"resources\"][device[\"name\"] + \"_port\" + str(index)] = pr\n index += 1\n\n return json.dumps(template)", "def heatmap(pivt):\n s = pivt.style.applymap(color_vals) # returns a style object\n with open(\"temp.html\", \"w\") as f:\n f.write(s.render())", "def gen_compute_data(self):\n\n print \"\\t* Generating combined nova and neutron data\"\n self.init_compute_clients()\n self.compute_data[\"heat_template_version\"] = \"2013-05-23\"\n self.compute_data[\"description\"] = \"Generated Template %s on Project %s\" % \\\n (str(datetime.datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p\")), str(self.tenant_name))\n self.compute_data[\"parameters\"] = {}\n self.compute_data[\"resources\"] = {}\n self.gen_parameters()\n self.gen_resources()\n self.compute_template = self.compute_data", "def get_mapdata():\n return render_template(\"l_heatmap.html\")", "def __fill_template__(self,template_file,output_fname):\n dictionary = {}\n for k,v in self.__dict__.iteritems():\n if k == 'sample_key':\n try:\n int(v)\n new_sample_key = \"Sample_\" + str(v)\n dictionary.update({k:new_sample_key})\n continue\n except ValueError:\n pass\n dictionary.update({k:str(v)})\n dictionary.update({'restats_tail': self.restats_file + '.tail'})\n with open(output_fname,'w') as f:\n string = fill_template(template_file,dictionary)\n f.write(string)", "def fillSignalTemplates(opt):\n\n totalSig={}\n templates=[]\n\n #import signal events\n data=ROOT.TChain('data')\n data.AddFile(os.path.join(opt.input,opt.sig))\n\n #define final preselection cuts\n cuts='xangle==%d'%opt.xangle\n if len(opt.presel) : cuts += ' && ' + opt.presel\n if opt.csiacc:\n csiCuts ='csi1>%f && csi1<%f && '%opt.csiacc[opt.xangle][0]\n csiCuts+='csi2>%f && csi2<%f'%opt.csiacc[opt.xangle][1]\n cuts=csiCuts if len(cuts)==0 else '{0} && {1}'.format(cuts,csiCuts)\n\n #loop over categories build templates\n for icat in range(len(opt.categs)):\n\n #apply category cuts\n categCut=opt.categs[icat]\n categCut=cuts if len(categCut)==0 else '%s && %s'%(categCut,cuts)\n\n catName='%s_a%d_%d'%(opt.chTag,opt.xangle,icat)\n print '\\t',catName,categCut\n\n #signal modelling histograms\n histos=[]\n for name,pfix in [('sig_'+catName,''),('sig_%s_sigShape'%catName,'mix')]:\n\n templCuts=categCut.replace('csi1',pfix+'csi1')\n templCuts=templCuts.replace('csi2',pfix+'csi2')\n wgtExpr='wgt*%f'%(SIGNALXSECS[opt.xangle]*opt.lumi)\n data.Draw('{0}mmiss >> h({1},{2},{3})'.format(pfix,opt.nbins,opt.mMin,opt.mMax),\n '{0}*({1})'.format(wgtExpr,templCuts),\n 'goff')\n h=data.GetHistogram()\n histos.append( h.Clone(name) ) \n histos[-1].SetDirectory(0)\n\n if len(histos)==1:\n totalSig[icat]=h.Integral()\n\n h.Reset('ICE')\n templates += defineProcessTemplates(histos)\n \n print '\\t total signal:',totalSig\n return totalSig,templates", "def extract_template(temp_dir, fea_type):\n kps = []\n descriptors = np.array([])\n in_path = temp_dir + 'imgs/' # images\n names = os.listdir(in_path)\n for i, name in enumerate(names):\n img = cv2.imread(in_path + name, 0)\n if any(np.array(img.shape) > 1000):\n img = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)\n print(img.shape)\n kp, des = get_des(fea_type, img)\n if descriptors.size == 0:\n kps = kp\n descriptors = des\n else:\n kps.extend(kp)\n descriptors = np.vstack((descriptors, des))\n\n print(\"template descriptors shape: \" + str(descriptors.shape))\n with open(temp_dir + fea_type + '_template_0.pickle', 'wb') as ff:\n pickle.dump(descriptors, ff)\n\n # with open(temp_dir + fea_type + '_template_0.pickle', 'rb') as f:\n # template = pickle.load(f)\n\n return", "def _generate_template(dictionary):\n task_dict = dict(dictionary)\n lines = []\n for key in sorted(TaskInfo._READ_ONLY_FIELDS):\n if key not in task_dict:\n continue\n\n value = TaskInfo._dpop(task_dict, key)\n lines.extend([\n \"# {}:\".format(key),\n \"# {}\".format(\"\\n#\".join(value.splitlines())),\n \"\",\n ])\n\n for key in sorted(task_dict.keys()):\n lines.extend([\n \"{}:\".format(key),\n str(task_dict[key]),\n \"\",\n ])\n\n return \"\\n\".join(lines)", "def make_heatmap(self):\n\n self.get_selected_categories_and_codes()\n codes = deepcopy(self.codes)\n if len(codes) > 40:\n codes = codes[:40]\n Message(self.app, _(\"Too many codes\"), _(\"Too many codes for display. Restricted to 40\")).exec()\n # Filters\n heatmap_type = self.ui.comboBox_heatmap.currentText()\n if heatmap_type == \"\":\n return\n title = heatmap_type + \" \" + _(\"Heatmap\")\n self.get_selected_categories_and_codes()\n y_labels = []\n for c in codes:\n y_labels.append(c['name'])\n category = self.ui.comboBox_category.currentText()\n self.ui.lineEdit_filter.setText(\"\")\n self.ui.comboBox_case.setCurrentIndex(0)\n self.ui.comboBox_file.setCurrentIndex(0)\n owner, subtitle = self.owner_and_subtitle_helper()\n\n # Get all the coded data\n data = []\n x_labels = []\n cur = self.app.conn.cursor()\n if heatmap_type == \"File\":\n if not self.attribute_file_ids:\n sql = \"select id, name from source order by name\"\n cur.execute(sql)\n files = cur.fetchall()\n else:\n attr_msg, file_ids_txt = self.get_file_ids()\n subtitle += attr_msg\n sql = \"select id, name from source where id \" + file_ids_txt + \" order by name\"\n cur.execute(sql)\n files = cur.fetchall()\n if len(files) > 40:\n files = files[:40]\n Message(self.app, _(\"Too many files\"), _(\"Too many files for display. Restricted to 40\")).exec()\n for f in files:\n x_labels.append(f[1])\n # Calculate the frequency of each code in each file\n # Each row is a code, each column is a file\n for code_ in codes:\n code_counts = []\n for f in files:\n code_counts.append(self.heatmap_counter_by_file_and_code(owner, f[0], code_['cid']))\n data.append(code_counts)\n if heatmap_type == \"Case\":\n if not self.attribute_case_ids_and_names: # self.attribute_file_ids:\n sql = \"select caseid, name from cases order by name\"\n cur.execute(sql)\n cases = cur.fetchall()\n if len(cases) > 40:\n cases = cases[:40]\n Message(self.app, _(\"Too many cases\"), _(\"Too many cases for display. Restricted to 40\")).exec()\n for c in cases:\n x_labels.append(c[1])\n # Calculate the frequency of each code in each file\n # Each row is a code, each column is a file\n for code_ in codes:\n code_counts = []\n for c in cases:\n cur.execute(\"SELECT fid FROM case_text where caseid=?\", [c[0]])\n fids = cur.fetchall()\n case_counts = 0\n for fid in fids:\n case_counts += self.heatmap_counter_by_file_and_code(owner, fid[0], code_['cid'])\n code_counts.append(case_counts)\n data.append(code_counts)\n else:\n attr_msg, file_ids_txt = self.get_file_ids()\n print(self.attribute_case_ids_and_names)\n for c in self.attribute_case_ids_and_names:\n x_labels.append(c[1])\n subtitle += attr_msg\n # Calculate the frequency of each code in each file\n # Each row is a code, each column is a file\n for code_ in codes:\n code_counts = []\n for c in self.attribute_case_ids_and_names:\n cur.execute(\"SELECT fid FROM case_text where caseid=?\", [c[0]])\n fids = cur.fetchall()\n # TODO revise fids if file parameters selected\n case_counts = 0\n for fid in fids:\n case_counts += self.heatmap_counter_by_file_and_code(owner, fid[0], code_['cid'])\n code_counts.append(case_counts)\n data.append(code_counts)\n # Create the plot\n fig = px.imshow(data,\n labels=dict(x=heatmap_type, y=\"Codes\", color=\"Count\"),\n x=x_labels,\n y=y_labels,\n title=title+subtitle\n )\n fig.update_xaxes(side=\"top\")\n fig.show()\n self.helper_export_html(fig)\n self.ui.comboBox_heatmap.blockSignals(True)\n self.ui.comboBox_heatmap.setCurrentIndex(0)\n self.ui.comboBox_heatmap.blockSignals(False)", "def generate_heatmap(self):\n data = []\n for j in range(len(self.generations[0])):\n row = []\n for i in range(len(self.generations)):\n row.append(self.generations[i][j].fitness)\n data.append(row)\n data = np.array(data)\n\n plt.figure()\n ax = sns.heatmap(\n data,\n cmap='RdBu',\n xticklabels=2,\n yticklabels=2)\n\n hfont = {'fontname': 'Helvetica'}\n plt.xlabel('Generation', **hfont)\n plt.ylabel('Individual', **hfont)\n ax.invert_yaxis()\n ax.axhline(linewidth=4, color='black')\n ax.axvline(linewidth=4, color='black')\n ax.collections[0].colorbar.set_label('Fitness')\n plt.savefig('figures/Voltage Clamp Figure/Single VC Optimization/'\n 'heatmap.svg')", "def Template(Fenetre_largeur,Fenetre_hauteur):\r\n li= Select_ligne(\"Nombre de lignes: \",Fenetre_largeur,Fenetre_hauteur)\r\n nom=\"Template\"\r\n fich=\"Template\"\r\n version=0\r\n while Path(\"stages/\"+fich+\".txt\").is_file() == True:\r\n version+=1\r\n fich=nom+str(version)\r\n fichier=open(\"stages/\"+fich+\".txt\",'w')\r\n fichier.write(str(li))\r\n fichier.write(\"\\n\")\r\n fichier.write(\"\\n\")\r\n for i in range(li):\r\n for j in range(10):\r\n fichier.write(\"0,0|\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"gauche: resistance, droite: bonus\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"resistance max: 3\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"6=barre+\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"7=score+\")\r\n fichier.close()", "def Build_Background_Template(numBGPhotons, bgTemplate, PSFTableFront, PSFTableBack,flatLevel = 0.0,HESS = False,outputSize=300,angularSize=10.0):\r\n \r\n numPhotons = numBGPhotons\r\n numHigh = int(round(.32 *numPhotons))\r\n numLow = numPhotons-numHigh\r\n \r\n bgEventsX = []\r\n bgEventsY = []\r\n \r\n bgTemplate = bgTemplate *(1.0-flatLevel) + flatLevel\r\n# import matplotlib.pyplot as plt\r\n# plt.imshow(bgTemplate,'jet',vmin=0, vmax=1)\r\n# plt.colorbar()\r\n# plt.show()\r\n\r\n app=float(angularSize)/float(outputSize) # angle per pixel\r\n for i in range(numPhotons):\r\n x ,y = 0, 0\r\n while True:\r\n x,y = np.random.randint(0,high = len(bgTemplate)),np.random.randint(0,high = len(bgTemplate))\r\n if (np.random.ranf() < bgTemplate[y][x]):\r\n break\r\n # Shift and scale coordinates to output map and then compute PSF modification to the position.\r\n psfMod = PSF_Spread(PSFTableFront,PSFTableBack, HESS =HESS)\r\n dx = psfMod[0]*math.cos(psfMod[1]) # PSF shift in deg\r\n dy = psfMod[0]*math.sin(psfMod[1]) # PSF shift in deg\r\n \r\n bgEventsX.append((x-outputSize/2.0)*app + dx)\r\n bgEventsY.append((y-outputSize/2.0)*app + dy)\r\n \r\n return (bgEventsX, bgEventsY)", "def get_visual_attrib_template():\n return {\"conaffinity\": \"0\", \"contype\": \"0\", \"mass\": \"1e-8\", \"group\": \"1\"}", "def heatmap():\n\n team = int(request.form.get('teamname'))\n player = int(request.form.get('player'))\n attacks = request.form.get('attacks')\n data = request.form.get('datafiles')\n\n print 'Making heat map for player #%d, team #%d, attacks %s' % (player, team, attacks)\n\n if attacks.lower() == 'all':\n attacks = ['ALL']\n else:\n attacks = attacks.split(',')\n\n only_kills = request.form.get('kills')\n\n files = []\n if data == 'uploads':\n data = 'uploads/%d' % session['user_id']\n folder = 'data/' + data\n\n data_path = os.path.join(os.getcwd(), folder)\n if not os.path.isdir(data_path):\n os.makedirs(data_path)\n\n for f in os.listdir(data_path):\n if f.endswith('.dvw'):\n files.append(folder + '/' + f)\n\n locations = []\n for file_name in files:\n parser = Parser(file_name)\n new_locations = parser.get_attack_info(team, player, attacks, only_kills=only_kills)\n locations.extend(new_locations)\n\n top_caption, bottom_caption = generate_caption(team, player, attacks, only_kills)\n output_url = generate_output_filename(team, player, attacks, only_kills, session['user_id'])\n output_dict = heat_map.draw_arcs_pillow(locations, output_url, top_caption=top_caption, bottom_caption=bottom_caption)\n\n result_dict = {\n 'output_url': output_url,\n 'width': output_dict['width'],\n 'height': output_dict['height'],\n }\n print locations\n print 'Rendering finished image', output_url\n return render_template('heatmap.html', result_dict=result_dict)", "def generate_haproxy_config(template=None, instances=None):\n\n return Template(filename=template).render(instances=instances)", "def generate_cfg():\n \n if not os.path.exists(cfg_path):\n os.mkdir(cfg_path)\n \n for img_path in get_template_paths():\n extractor = BlockExtractor(img_path)\n extractor.get_cfg()\n for block in extractor.get_blocks():\n img = BlockParser(img_path, block).block_image()\n #cv.imshow(\"Block\", img)\n #cv.waitKey() & 0xFF", "def generate_heat_model(input_model, virt_config):\n heat_template = dict(\n description='Template for deploying Ardana {}'.format(\n input_model['cloud']['name'])\n )\n\n clm_cidr = IPNetwork(input_model['baremetal']['subnet'],\n input_model['baremetal']['netmask'])\n clm_network = None\n heat_networks = heat_template['networks'] = dict()\n\n # First, add L2 neutron provider networks defined in the input\n # model's neutron configuration\n for neutron_network in itervalues(input_model['neutron-networks']):\n heat_network = dict(\n name=neutron_network['name'],\n is_conf=False,\n is_mgmt=False,\n external=neutron_network['external']\n )\n if neutron_network.get('cidr'):\n heat_network['cidr'] = neutron_network['cidr']\n if neutron_network.get('gateway'):\n heat_network['gateway'] = neutron_network['gateway']\n if neutron_network.get('provider'):\n provider = neutron_network['provider'][0]\n if provider['network_type'] == 'vlan':\n if not provider.get('segmentation_id'):\n # Neutron network is incompletely defined (VLAN tag is\n # dynamically allocated), so it cannot be defined as an\n # individual heat network\n continue\n heat_network['vlan'] = provider['segmentation_id']\n elif provider['network_type'] not in ['flat', 'vlan']:\n # Only layer 2 neutron provider networks are considered\n continue\n heat_networks[heat_network['name']] = heat_network\n\n # Collect all the routers required by routes configured in the input model,\n # as pairs of networks\n routers = set()\n\n # Next, add global networks\n for network in itervalues(input_model['networks']):\n cidr = None\n vlan = network['vlanid'] if network.get('tagged-vlan', True) else None\n gateway = IPAddress(\n network['gateway-ip']) if network.get('gateway-ip') else None\n if network.get('cidr'):\n cidr = IPNetwork(network['cidr'])\n\n heat_network = dict(\n name=network['name'],\n is_conf=False,\n is_mgmt=False,\n external=False\n )\n if cidr:\n heat_network['cidr'] = str(cidr)\n if gateway:\n heat_network['gateway'] = str(gateway)\n\n # There is the special case of global networks being used to implement\n # flat neutron provider networks. For these networks, we need to\n # create a heat network based on the global network parameters\n # (i.e. VLAN) and a heat subnet based on the neutron network\n # parameters\n for neutron_network in itervalues(network['network-group'].get(\n 'neutron-networks', {})):\n heat_neutron_network = heat_networks.get(neutron_network['name'])\n if not heat_neutron_network or heat_neutron_network.get('vlan'):\n # Ignore neutron networks that:\n # - were not already considered at the previous step (i.e.\n # are not fully defined or are not layer 2 based)\n # - have a vlan (i.e. are not flat)\n continue\n\n # Replace the heat neutron network with this global network\n # This is the same as updating the heat global network with subnet\n # attributes taken from the neutron network\n del heat_networks[neutron_network['name']]\n heat_network = heat_neutron_network\n heat_network['name'] = network['name']\n\n # Only one flat neutron provider network can be associated with a\n # global network\n break\n\n if vlan:\n heat_network['vlan'] = vlan\n\n # For each route, track down the target network\n for route in network['network-group']['routes']:\n if route == 'default':\n # The default route is satisfied by adding the network to the\n # external router\n heat_network['external'] = True\n elif 'networks' in route:\n # If the route points to a network group, track down the\n # networks associated with it\n for network_name in route['networks'].keys():\n routers.add((heat_network['name'], network_name,))\n else:\n routers.add((heat_network['name'], route['name'],))\n\n if cidr and cidr in clm_cidr:\n clm_network = heat_network\n heat_network['external'] = heat_network['is_conf'] = True\n\n # Create an address pool range that excludes the list of server\n # static IP addresses\n fixed_ip_addr_list = \\\n [IPAddress(server['ip-addr'])\n for server in itervalues(input_model['servers'])]\n if gateway:\n fixed_ip_addr_list.append(gateway)\n start_addr = cidr[1]\n end_addr = cidr[-2]\n for fixed_ip_addr in sorted(list(set(fixed_ip_addr_list))):\n if start_addr <= fixed_ip_addr <= end_addr:\n if fixed_ip_addr - start_addr < end_addr - fixed_ip_addr:\n start_addr = fixed_ip_addr + 1\n else:\n end_addr = fixed_ip_addr - 1\n heat_network['allocation_pools'] = \\\n [[str(start_addr), str(end_addr)]]\n\n elif ('component-endpoints' in network['network-group'] and 'default'\n in network['network-group']['component-endpoints']):\n heat_network['external'] = heat_network['is_mgmt'] = True\n\n # Create an address pool range that is outside of the range\n # of IP addresses allocated by Ardana\n mgmt_net_last = IPAddress(IPNetwork(network['cidr']).last)\n heat_network['allocation_pools'] = \\\n [[str(mgmt_net_last - EXTERNAL_MGMT_ADDR_RANGE),\n str(mgmt_net_last - 1)]]\n elif 'bmc' in network['name'].lower():\n heat_network['external'] = True\n elif True in [('public' in lb['roles'])\n for lb in network['network-group'].get('load-balancers',\n [])]:\n heat_network['external'] = True\n\n heat_networks[network['name']] = heat_network\n\n heat_template['routers'] = []\n for network1, network2 in routers:\n if network1 not in heat_template['networks'] or \\\n network2 not in heat_template['networks']:\n continue\n network1 = heat_template['networks'][network1]\n network2 = heat_template['networks'][network2]\n # Re-use the external router, if at least one of the networks is\n # already attached to it\n if network1['external'] or network2['external']:\n network1['external'] = network2['external'] = True\n else:\n heat_template['routers'].append([network1['name'],\n network2['name']])\n\n heat_interface_models = heat_template['interface_models'] = dict()\n\n for interface_model in itervalues(input_model['interface-models']):\n heat_interface_model = \\\n heat_interface_models[interface_model['name']] = \\\n dict(\n name=interface_model['name'],\n ports=[]\n )\n ports = dict()\n clm_ports = dict()\n for interface in itervalues(interface_model['network-interfaces']):\n devices = interface['bond-data']['devices'] \\\n if 'bond-data' in interface \\\n else [interface['device']]\n for device in devices:\n port_list = ports\n port = dict(\n name=device['name'],\n networks=[]\n )\n if 'bond-data' in interface:\n port['bond'] = interface['device']['name']\n port['primary'] = \\\n (device['name'] ==\n interface['bond-data']['options'].get('primary',\n device['name']))\n\n for network_group in \\\n interface.get('network-groups', []) + \\\n interface.get('forced-network-groups', []):\n\n port['networks'].extend([network['name'] for network in\n itervalues(network_group[\n 'networks'])])\n\n # Attach the port only to those neutron networks that have\n # been validated during the previous steps\n port['networks'].extend([network['name'] for network in\n itervalues(network_group.get(\n 'neutron-networks',\n dict())) if\n network['name'] in heat_networks])\n\n if clm_network['name'] in network_group['networks']:\n # if the CLM port is a bond port, then only the\n # primary is considered if configured\n if not clm_ports and port.get('primary', True):\n # Collect the CLM port separately, to put it at\n # the top of the list and to mark it as the\n # \"management\" port - the port to which the\n # server's management IP address is assigned\n port_list = clm_ports\n\n port_list[device['name']] = port\n\n # Add a port for each device, starting with those ports attached to\n # the CLM network while at the same time preserving the order of the\n # original ports. Ultimately, the port names will be re-aligned to\n # those in the input model by an updated NIC mappings input model\n # configuration\n heat_interface_model['ports'] = [p[1] for _, p in enumerate(\n sorted(clm_ports.items()) + sorted(ports.items()))]\n\n # Generate storage setup (volumes)\n #\n # General strategy:\n # - one volume for each physical volume specified in the disk model\n # - the size of each volume cannot be determined from the input model,\n # so this information needs to be supplied separately (TBD)\n\n heat_disk_models = heat_template['disk_models'] = dict()\n disks = virt_config['disks']\n\n for disk_model in itervalues(input_model['disk-models']):\n heat_disk_model = heat_disk_models[disk_model['name']] = dict(\n name=disk_model['name'],\n volumes=[]\n )\n devices = []\n for volume_group in disk_model.get('volume-groups', []):\n devices += volume_group['physical-volumes']\n for device_group in disk_model.get('device-groups', []):\n devices += [device['name'] for device in device_group['devices']]\n for device in sorted(list(set(devices))):\n if device.endswith('da_root'):\n continue\n device = device.replace('/dev/sd', '/dev/vd')\n volume_name = device.replace('/dev/', '')\n\n size = virt_config['disk_size']\n # Check if disk size is configured explicitly for the disk model\n if disk_model['name'] in disks:\n size = disks[disk_model['name']]\n if isinstance(size, dict):\n # Use the disk size specified for the volume name, or\n # the disk model default, or the global default\n size = size.get(volume_name) or \\\n size.get('default') or \\\n virt_config['disk_size']\n heat_disk_model['volumes'].append(dict(\n name=volume_name,\n mountpoint=device,\n size=size\n ))\n\n # Generate VM setup (servers)\n #\n # General strategy:\n # - one server for each server specified in the disk model\n # - the CLM server is special:\n # - identification: server hosting the lifecycle-manager\n # service component\n # - the floating IP is associated with the \"CLM\" port attached to it\n # - the image and flavor used for the server cannot be determined from\n # the input model so this information needs to be supplied separately\n\n heat_servers = heat_template['servers'] = []\n images = virt_config['images']\n flavors = virt_config['flavors']\n\n clm_server = None\n for server in itervalues(input_model['servers']):\n distro_id = server.get('distro-id', virt_config['sles_distro_id'])\n\n image = None\n # Check if image is configured explicitly\n # for the server or for the role\n if server['id'] in images:\n image = images[server['id']]\n elif server['role']['name'] in images:\n image = images[server['role']['name']]\n if isinstance(image, dict):\n # Use the image specified for the distribution, or\n # the global default\n image = image.get(distro_id)\n if not image:\n image = virt_config['sles_image']\n if distro_id == virt_config['rhel_distro_id']:\n image = virt_config['rhel_image']\n\n flavor = None\n # Check if image is configured explicitly\n # for the server or for the role\n if server['id'] in flavors:\n flavor = flavors[server['id']]\n elif server['role']['name'] in flavors:\n flavor = flavors[server['role']['name']]\n\n heat_server = dict(\n name=server['id'],\n ip_addr=server['ip-addr'],\n ilo_ip=server.get('ilo-ip'),\n mac_addr=server.get('mac-addr'),\n role=server['role']['name'],\n interface_model=server['role']['interface-model']['name'],\n disk_model=server['role']['disk-model']['name'],\n image=image,\n flavor=flavor,\n is_admin=False,\n is_controller=False,\n is_compute=False\n )\n # Figure out which server is the CLM host, which are controllers\n # and which are computes. This information is used e.g. to determine\n # the reboot order during the MU workflow and to identify flavors\n # unless explicitly specified for each server or server role\n service_groups = list(server['role'].get('clusters', {}).values())\n service_groups += list(server['role'].get('resources', {}).values())\n for service_group in service_groups:\n # The CLM server is the first server hosting the lifecycle-manager\n # service component.\n # Compute nodes host the nova-compute service component\n if 'nova-compute' in service_group['service-components']:\n heat_server['is_compute'] = True\n if not heat_server['flavor']:\n heat_server['flavor'] = virt_config['compute_flavor']\n # Every server that is not a compute node and hosts service\n # components other than those required by the CLM is considered\n # a controller node\n else:\n ctrl_service_components = filter(\n lambda sc: sc not in virt_config['clm_service_components'],\n service_group['service-components'])\n if list(ctrl_service_components):\n heat_server['is_controller'] = True\n if not heat_server['flavor']:\n heat_server['flavor'] = \\\n virt_config['controller_flavor']\n if not clm_server and \\\n 'lifecycle-manager' in service_group['service-components']:\n clm_server = heat_server\n heat_server['is_admin'] = True\n if not heat_server['flavor']:\n heat_server['flavor'] = virt_config['clm_flavor']\n\n heat_servers.append(heat_server)\n\n return heat_template", "def create_template(path_string) :\r\n today = datetime.now()\r\n today = today.strftime('%y%y%m%d%H%M%S')\r\n # print(today)\r\n temp_path = os.path.join(path_string, today)\r\n # temp_path = today\r\n # Create a workbook and add a worksheet.\r\n workbook = xlsxwriter.Workbook(f'{temp_path}.xlsx')\r\n worksheet0 = workbook.add_worksheet('ATR') # Defaults to Sheet1.\r\n worksheet1 = workbook.add_worksheet('ESS') # Data.\r\n worksheet2 = workbook.add_worksheet('Statistics') # Defaults to Sheet\r\n\r\n # Some data we want to write to the worksheet.\r\n Tests_List = ['Temp', 'SN', 'Output Power @ P1dBCP', 'Output Power Control Range/Resolution, FWD PWR Ind',\r\n 'Output IP3', 'LO Carrier Leakage', 'Sideband Suppression',\r\n 'Frequency Accuracy and Stability', 'A1 - Noise Figure vs. Gain', 'A1 - Gain variability',\r\n 'A1 - Image Suppression vs. Gain', 'Spurious',\r\n 'A2 - Noise Figure vs. Gain', 'A2 - Gain variability', 'A2 - Image Suppression vs. Gain',\r\n 'Average Power Consumption', 'Input Voltage', 'Digital Tests'\r\n ]\r\n\r\n # Start from the first cell. Rows and columns are zero indexed.\r\n row = 0\r\n # col = 0\r\n\r\n # Iterate over the data and write it out row by row.\r\n for index in range(3) :\r\n for i in range(len(Tests_List)) :\r\n worksheet0.write(row, i, Tests_List[i])\r\n worksheet1.write(row, i, Tests_List[i])\r\n worksheet2.write(row, i, Tests_List[i])\r\n # col += 1\r\n\r\n workbook.close()\r\n\r\n return today, temp_path", "def describe(self, template='population_default.txt', engine='default'):\n raise NotImplementedError", "def render_template(self):\n # create and expand commandline template\n tmpl_r1 = self.finditem.sub(r'{{\\2}}', self.raw_template)\n tmpl_r2 = jinja2.Template(tmpl_r1).render(self.variables)\n self.relation.script = tmpl_r2\n self.relation.template_sha256 = self.variables['template_sha256']", "def Generate_BG_Template(outputSize=300, angularSize = 10, fileOut = 'BGRateMap.pickle' ):\r\n template = np.zeros((outputSize,outputSize))\r\n ppd=float(outputSize)/float(angularSize) # pixels per deg\r\n \r\n events110 = ParseFermi.Import_File('photons.txt', energyRange = (120000,140000),lonRange=(-5,5),latRange = (-5,5))\r\n events130 = ParseFermi.Import_File('photons.txt', energyRange = (100000,120000),lonRange=(-5,5),latRange = (-5,5))\r\n events150 = ParseFermi.Import_File('photons.txt', energyRange = (140000,200000),lonRange=(-5,5),latRange = (-5,5))\r\n \r\n for i in range(10000,200001,20000):\r\n if i == 130000:\r\n continue\r\n events = ParseFermi.Import_File('photons.txt', energyRange = (i-10000,i+10000),lonRange=(-5,5),latRange = (-5,5))\r\n BG = np.zeros((outputSize,outputSize)) \r\n for j in events:\r\n xIDX = int(j[1]*ppd+float(outputSize/2))\r\n yIDX = int(j[2]*ppd+float(outputSize/2))\r\n BG[yIDX][xIDX] += 1.0\r\n \r\n psfDeg = .2+float(200)/float(i)\r\n psfOut = psfDeg*ppd\r\n #print i/1e3, psfDeg, psfOut\r\n \r\n template += scipy.ndimage.filters.gaussian_filter(BG, psfOut)\r\n \r\n template = template/np.max(template)\r\n \r\n # Write to file \r\n outFile = open(fileOut, \"wb\" )\r\n pickle.dump(template, outFile)\r\n print 'Rate Map saved to ', fileOut\r\n \r\n plt.imshow(scipy.fliplr(template), 'jet',extent=[5,-5,-5,5])\r\n\r\n plt.xlabel(r'$l [^\\circ]$')\r\n plt.ylabel(r'$b [^\\circ]$')\r\n plt.xlim(5,-5)\r\n plt.ylim(-5,5)\r\n plt.colorbar()\r\n\r\n x,y = Find_Centroid(template)\r\n x,y = (x/ppd -angularSize/2.0,) ,(y/ppd -angularSize/2.0,)\r\n print x,y\r\n plt.scatter(x,y, s=10, c='r', marker = '+')\r\n \r\n X,Y = FormatEvents(events110)\r\n plt.scatter(X, Y, label = '100-120 GeV', marker = 'o' , c = 'k')\r\n \r\n X,Y = FormatEvents(events130)\r\n plt.scatter(X, Y, label = '120-140 GeV', marker = 'o' , c = 'r')\r\n \r\n X,Y = FormatEvents(events150)\r\n plt.scatter(X, Y, label = '140-200 GeV', marker = 'o' , c = 'g' )\r\n \r\n from matplotlib.font_manager import FontProperties\r\n fontP = FontProperties()\r\n fontP.set_size('small')\r\n plt.legend(loc=1, ncol=1, fancybox=True, shadow=False,prop=fontP,borderaxespad=0.,labelspacing = .2)\r\n \r\n from matplotlib.backends.backend_pdf import PdfPages\r\n if fileOut != '':\r\n pp = PdfPages(fileOut + '_sideband.pdf')\r\n plt.savefig(pp, format='pdf')\r\n print \"Figures saved to \", str(fileOut)+ '_sideband.pdf\\n',\r\n pp.close()\r\n \r\n plt.show()\r\n return template", "def build_template_meas(name='example'):\n\n meas = templates.TemplateMeasurement(name)\n meas.set_lumi(1, 0.02)\n\n # Base shape for the signal, triangular distribution\n sig = np.array([10000, 12500, 15000, 12500, 10000], dtype=float)\n # Add a source for the signal\n src_sig = meas.new_source('sig', sig)\n src_sig.use_lumi() # impacted by luminosity\n src_sig.use_stats(.1*(10*sig)**0.5) # stat unc. from 10x MC\n src_sig.set_xsec(1, 0.95, 1.05) # cross section constrained to +/-5%\n # Add a template: under the influence of parameter p, a linear slope is\n # added to the signal\n src_sig.add_template('p', sig*[-.2, -.1, 0, +.1, +.2])\n # Add highly asymmetric systematic uncertainty which looks a lot like the\n # signal. This is a challenging model to fit.\n src_sig.add_syst('s1', sig*[-.06, -.02, 0, +.02, +.06], polarity='up')\n src_sig.add_syst('s1', sig*[-.03, -.01, 0, +.01, +.03], polarity='down')\n # Add another systematic which doesn't look like the signal or the data\n # (should be constrained)\n src_sig.add_syst('s2', sig*[+.02, +.01, 0, +.01, +.02])\n\n # Add a flat-ish background (different shape from signal)\n bg1 = np.array([1600, 1300, 1000, 1000, 1000], dtype=float)\n src_bg1 = meas.new_source('bg1', bg1)\n src_bg1.use_lumi()\n src_bg1.use_stats(.1*(10*bg1)**0.5)\n src_bg1.set_xsec(1, 0.8, 1.1)\n # It is also impacted by systematic 2\n src_bg1.add_syst('s2', bg1*[+.02, +.01, 0, +.01, +.02])\n\n # Add a background not impacted by lumi or stats (e.g. data driven)\n bg2 = np.array([1000, 1000, 1000, 1300, 1600], dtype=float)\n src_bg2 = meas.new_source('bg2', bg2)\n src_bg2.set_xsec(1, 0.9, 1.1)\n\n # Build the spectrum object\n meas.build()\n\n return meas", "def generate_overview_tiles(self):\n\n gdal.SetConfigOption(\"GDAL_PAM_ENABLED\", \"NO\")\n\n print \"Generating Overview Tiles:\"\n\n if self.options.profile == 'garmin': # no overview tiles for 'garmin'\n return\n # Usage of existing tiles: from 4 underlying tiles generate one as overview.\n\n tcount = 0\n zcount = 0\n for tz in range(self.tmaxz-1, self.tminz-1, -1):\n tminx, tminy, tmaxx, tmaxy = self.tminmax[tz]\n tcount += (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n zcount+=1\n if self.options.resume:\n count_tiles=tcount\n zcount+=1\n tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]\n count_tiles += (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n i_count = self.tile_exists(0, 0, 0,1)\n if i_count == count_tiles:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; all-tiles [\",zcount,\"] zoom-levels with tiles[\",count_tiles,\"]\"\n return\n ti = 0\n\n # querysize = tilesize * 2\n\n for tz in range(self.tmaxz-1, self.tminz-1, -1):\n tminx, tminy, tmaxx, tmaxy = self.tminmax[tz]\n i_x_column_count=((tmaxx-tminx)+1)\n i_y_column_count=((tmaxy-tminy)+1)\n if self.options.verbose:\n # tx in range(tminx, tmaxx+1) tminx[ 140798 ] tmaxx[ 140872 ] ; ((tmaxx-tmaxy)+1) x_tiles[ -35331 ]\n print \"\\ttz=[\",tz,\"] : tx in range(tminx, tmaxx+1) tminx[\",tminx,\"] tmaxx[\",tmaxx,\"] ; ((tmaxx-tminx)+1) x_tiles[\",i_x_column_count,\"]\"\n # ty_tms in range(tmaxy, tminy-1, -1) tmaxy[ 176204 ] tminy[ 176126 ] ; ((tmaxy-tminy)) y_tiles[ 78 ]\n print \"\\ttz=[\",tz,\"] :ty_tms in range(tmaxy, tminy-1, -1) tmaxy[\",tmaxy,\"] tminy[\",tminy,\"] ; ((tmaxy-tminy)) y_tiles[\",i_y_column_count,\"]\"\n if self.options.resume:\n i_count = self.tile_exists(0, 0, tz,2)\n print \"\\tTile generation skipped because of --??? ; x/y-tiles of z[\",tz,\"] x/y_tiles[\",tcount,\"] i_count[\",i_count,\"]\"\n if i_count == tcount:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; x/y-tiles of z[\",tz,\"] x/y_tiles[\",tcount,\"]\"\n break\n for tx in range(tminx, tmaxx+1):\n tmaxy_work=tmaxy\n if self.options.resume:\n i_count = self.tile_exists(tx, 0, tz,3)\n print \"\\tTile generation skipped because of --??? ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"] i_count[\",i_count,\"]\"\n if i_count == i_y_column_count:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n break\n else:\n if i_count > 0:\n # this assums the rows are compleate, which may NOT be true 18-140798-176204.jpg\n tmaxy_work-=i_count\n if self.options.verbose:\n print \"\\tTile generation skipped to tmaxy[\",tmaxy_work,\"] because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n for ty_tms in range(tmaxy_work, tminy-1, -1): #range(tminy, tmaxy+1):\n ty_osm=self.flip_y(tz,ty_tms)\n ty=ty_tms\n if self.options.tms_osm:\n ty=ty_osm\n if self.stopped:\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n self.mbtiles_db=None\n break\n\n ti += 1\n\n if self.options.resume:\n exists = self.tile_exists(tx, ty, tz,0)\n if exists and self.options.verbose:\n print \"\\tTile generation skipped because of --resume\"\n else:\n exists = False\n\n if not exists:\n if self.options.verbose:\n print ti, '/', tcount, self.get_verbose_tile_name(tx, ty, tz)\n try:\n self.write_overview_tile(tx, ty, tz,self.options.tms_osm)\n except ImageOutputException, e:\n self.error(\"'%d/%d/%d': %s\" % (tz, tx, ty, e.message))\n\n if not self.options.verbose or self.is_subprocess:\n self.progressbar( ti / float(tcount) )\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n self.mbtiles_db=None" ]
[ "0.7101407", "0.63630533", "0.6335754", "0.6310076", "0.63050276", "0.62810224", "0.613421", "0.6058323", "0.6015545", "0.60110986", "0.59929484", "0.59886414", "0.5925588", "0.58619004", "0.58209044", "0.5820023", "0.5771631", "0.5749931", "0.57344365", "0.5732815", "0.5727649", "0.5715015", "0.57102627", "0.5699488", "0.5685565", "0.568251", "0.56664133", "0.5659533", "0.56561875", "0.5655667" ]
0.7476171
0
generate all data necessary for a complete compute template
def gen_compute_data(self): print "\t* Generating combined nova and neutron data" self.init_compute_clients() self.compute_data["heat_template_version"] = "2013-05-23" self.compute_data["description"] = "Generated Template %s on Project %s" % \ (str(datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p")), str(self.tenant_name)) self.compute_data["parameters"] = {} self.compute_data["resources"] = {} self.gen_parameters() self.gen_resources() self.compute_template = self.compute_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate():", "def gen_compute_template(self):\n\n print \"\\t* Generating compute template in file %s\" % self.compute_filename\n if self.cmdline:\n with open(self.compute_filename, 'w') as f:\n f.write(yaml.safe_dump(self.compute_template))\n\n try:\n self.heatclient.stacks.validate(template=yaml.safe_dump(self.compute_template))\n except Exception as e:\n print \"Unfortunately your file is malformed. Received error: (%s)\" % str(e)\n print \"Exiting ...\"\n sys.exit(1)\n\n return self.compute_template", "def _get_template_data(vm_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]:\n vm_id = vm_data['id']\n Windows.logger.debug(f'Compiling template data for VM #{vm_id}')\n data: Dict[str, Any] = {key: None for key in Windows.template_keys}\n\n data['vm_identifier'] = f'{vm_data[\"project\"][\"id\"]}_{vm_id}'\n data['image_answer_file_name'] = vm_data['image']['answer_file_name']\n\n data['image_filename'] = vm_data['image']['filename']\n # check if file exists at /mnt/images/HyperV/VHDXs/\n path = '/mnt/images/HyperV/VHDXs/'\n child_span = opentracing.tracer.start_span('vm_image_file_download', child_of=span)\n if not Windows.check_image(data['image_filename'], path):\n # download the file\n downloaded, errors = Windows.download_image(data['image_filename'], path)\n if not downloaded:\n for error in errors:\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n child_span.finish()\n\n # RAM is needed in MB for the builder but we take it in in GB (1024, not 1000)\n data['ram'] = vm_data['ram'] * 1024\n data['cpu'] = vm_data['cpu']\n data['dns'] = vm_data['dns']\n\n # Generate encrypted passwords\n data['admin_password'] = Windows._password_generator(size=12)\n # Also save the password back to the VM data dict\n vm_data['admin_password'] = data['admin_password']\n\n # Check for the primary storage\n if not any(storage['primary'] for storage in vm_data['storages']):\n error = 'No primary storage drive found. Expected one primary storage drive'\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n\n data['storages'] = vm_data['storages']\n data['storage_type'] = vm_data['storage_type']\n\n # Get the Networking details\n data['vlans'] = []\n data['ip_addresses'] = []\n data['default_ips'] = []\n data['default_gateway'] = ''\n data['default_netmask_int'] = ''\n data['default_vlan'] = ''\n\n # The private IPs for the VM will be the one we need to pass to the template\n vm_data['ip_addresses'].reverse()\n ip_addresses = []\n subnets = []\n for ip in vm_data['ip_addresses']:\n if IPAddress(ip['address']).is_private():\n ip_addresses.append(ip)\n subnets.append({\n 'address_range': ip['subnet']['address_range'],\n 'vlan': ip['subnet']['vlan'],\n 'id': ip['subnet']['id'],\n })\n # Removing duplicates\n subnets = [dict(tuple_item) for tuple_item in {tuple(subnet.items()) for subnet in subnets}]\n # sorting nics (each subnet is one nic)\n for subnet in subnets:\n non_default_ips = []\n gateway, netmask_int = subnet['address_range'].split('/')\n vlan = str(subnet['vlan'])\n data['vlans'].append(vlan)\n\n for ip_address in ip_addresses:\n address = ip_address['address']\n if ip_address['subnet']['id'] == subnet['id']:\n # Pick the default ips if any\n if vm_data['gateway_subnet'] is not None:\n if subnet['id'] == vm_data['gateway_subnet']['id']:\n data['default_ips'].append(address)\n data['default_gateway'] = gateway\n data['default_netmask_int'] = netmask_int\n data['default_vlan'] = vlan\n continue\n # else store the non gateway subnet ips\n non_default_ips.append(address)\n\n if len(non_default_ips) > 0:\n data['ip_addresses'].append({\n 'ips': non_default_ips,\n 'gateway': gateway,\n 'netmask_int': netmask_int,\n 'vlan': vlan,\n })\n\n # Add locale data to the VM\n data['language'] = 'en_IE'\n data['timezone'] = 'GMT Standard Time'\n\n # Get the host name of the server\n host_name = None\n for interface in vm_data['server_data']['interfaces']:\n if interface['enabled'] is True and interface['ip_address'] is not None:\n if IPAddress(str(interface['ip_address'])).version == 6:\n host_name = interface['hostname']\n break\n if host_name is None:\n error = f'Host name is not found for the server # {vm_data[\"server_id\"]}'\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n\n # Add the host information to the data\n data['host_name'] = host_name\n data['network_drive_url'] = settings.NETWORK_DRIVE_URL\n data['vms_path'] = settings.HYPERV_VMS_PATH\n\n return data", "def gen_parameters(self):\n\n print \"\\t* Adding parameters to compute template\"\n # get all the server client\n servers = self.novaclient.servers.list()\n\n # add all key_pair_names\n self.gen_key_name_parameters(servers)\n\n # add all images\n self.gen_image_parameters(servers)\n\n # add all flavors\n self.gen_flavor_parameters(servers)\n\n # add all networks\n self.gen_network_parameters()", "def run(self):\n\n print \"\\n\\n\\tPlease Note: Templates are generated based off\"\n print \"\\t of the OS environment variables that are set.\"\n print \"\\t* Running ReHeat.\"\n\n self.set_creds()\n self.gen_ip() # used in template description\n self.gen_tenant_id()\n if self.reheat_error:\n return self.reheat_errmsg\n\n print \"\\t* You have opted to generate %s file[s]\" % self.template_type\n if 'all' in self.template_type:\n self.gen_heat_data()\n self.gen_heat_template()\n self.gen_compute_data()\n return self.gen_compute_template()\n elif 'heat' in self.template_type:\n self.gen_heat_data()\n return self.gen_heat_template()\n elif 'compute' in self.template_type:\n self.gen_compute_data()\n return self.gen_compute_template()\n else:\n raise Exception(\"User provided an improper template type.\")", "def gen_resources(self):\n\n print \"\\t* Adding resources to compute template\"\n\n # add all the nets and subnets\n self.gen_net_resources()\n\n # add all routers\n self.gen_router_resources()\n\n # add all servers/intances\n self.gen_server_resources()", "def _get_template_data(vm_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]:\n vm_id = vm_data['id']\n Windows.logger.debug(f'Compiling template data for VM #{vm_id}')\n data: Dict[str, Any] = {key: None for key in Windows.template_keys}\n\n data['vm_identifier'] = f'{vm_data[\"project\"][\"id\"]}_{vm_id}'\n # changes\n changes: Dict[str, Any] = {\n 'ram': False,\n 'cpu': False,\n 'storages': False,\n }\n updates = vm_data['history'][0]\n try:\n if updates['ram_quantity'] is not None:\n # RAM is needed in MB for the updater but we take it in in GB (1024, not 1000)\n changes['ram'] = vm_data['ram'] * 1024\n except KeyError:\n pass\n try:\n if updates['cpu_quantity'] is not None:\n changes['cpu'] = vm_data['cpu']\n except KeyError:\n pass\n # Fetch the drive information for the update\n try:\n if len(updates['storage_histories']) != 0:\n Windows.logger.debug(f'Fetching drives for VM #{vm_id}')\n child_span = opentracing.tracer.start_span('fetch_drive_updates', child_of=span)\n changes['storages'] = Windows.fetch_drive_updates(vm_data)\n child_span.finish()\n except KeyError:\n pass\n # Add changes to data\n data['changes'] = changes\n data['storage_type'] = vm_data['storage_type']\n data['vms_path'] = settings.HYPERV_VMS_PATH\n\n # Get the host name of the server\n host_name = None\n for interface in vm_data['server_data']['interfaces']:\n if interface['enabled'] is True and interface['ip_address'] is not None:\n if IPAddress(str(interface['ip_address'])).version == 6:\n host_name = interface['hostname']\n break\n if host_name is None:\n error = f'Host ip address not found for the server # {vm_data[\"server_id\"]}.'\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n # Add the host information to the data\n data['host_name'] = host_name\n # Determine restart\n data['restart'] = vm_data['restart']\n return data", "def generate(self):", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def run_template(template, config):\n all_results = {}\n for nodenum, wires in template:\n # Find the modules\n node = template.modules[nodenum]\n module_id = node['module'] #template.modules[node]\n module = lookup_module(module_id)\n inputs = _map_inputs(module, wires)\n \n # substitute values for inputs\n kwargs = dict((k, _lookup_results(all_results, v)) \n for k, v in inputs.items())\n \n # Include configuration information\n kwargs.update(node.get('config', {}))\n kwargs.update(config[nodenum])\n result = module.action(**kwargs)\n# print result\n all_results[nodenum] = result\n \n# return all_results\n# FIXXXXXXXXXXXXXXXXXXXXXX ***********************\n from .offspecular.instruments import convert_to_plottable\n return [convert_to_plottable(value['output']) if 'output' in value else {} for key, value in all_results.items()]", "def generatePredictorDataTemplate(self):\n self.__pdir = Predictor.directory\n self.__predictorData = PredictorData(None)\n self.save()", "def gen_network_parameters(self):\n\n print \"\\t* Adding net and subnet parameters to compute template\"\n\n # add all the routers\n all_routers = self.neutronclient.list_routers()[\"routers\"]\n self.all_ports = self.neutronclient.list_ports()[\"ports\"]\n\n self.tenant_routers = filter(lambda router: router['tenant_id'] == self.tenant_id , all_routers)\n\n for idx, router in enumerate(self.tenant_routers):\n\n router_gateway = router[\"external_gateway_info\"]\n try:\n data = {\"type\": \"string\",\n \"description\": \"ID of public network\",\n \"default\": router_gateway[\"network_id\"]\n }\n self.compute_data[\"parameters\"][\"public_net_%s\" % str(idx)] = data\n except:\n print \"\\t! Could not add external_gateway_info for %s\" % router[\"name\"]\n\n networks = self.neutronclient.list_networks()[\"networks\"]\n # filter all networks that match\n filtered_networks = [net for net in networks if (net[\"tenant_id\"] == self.tenant_id or\n (net[\"shared\"] is True) and net['router:external'] is False) and (net[\"name\"] != \"public\")]\n\n # obtain subnet information\n shared_net_id = 0\n for network in filtered_networks:\n for subnet in network[\"subnets\"]:\n if network[\"shared\"] != True:\n subnet_info = self.neutronclient.show_subnet(subnet)[\"subnet\"]\n\n # generate private net\n # private name\n data = {\"type\": \"string\",\n \"description\": \"Name of network\",\n \"default\": network[\"name\"]}\n self.compute_data[\"parameters\"][\"%s_net_name\" % (network[\"name\"])] = data\n\n # private cidr\n data = {\"type\": \"string\",\n \"description\": \"Network address (CIDR notation)\",\n \"default\": subnet_info[\"cidr\"]}\n self.compute_data[\"parameters\"][\"%s_%s_cidr\" % (network[\"name\"], subnet_info[\"name\"])] = data\n\n # private gateway\n data = {\"type\": \"string\",\n \"description\": \"Network gateway address\",\n \"default\": subnet_info[\"gateway_ip\"]}\n self.compute_data[\"parameters\"][\"%s_%s_gateway\" % (network[\"name\"], subnet_info[\"name\"])] = data\n\n # private pool start\n data = {\"type\": \"string\",\n \"description\": \"Start of network IP address allocation pool\",\n \"default\": subnet_info[\"allocation_pools\"][0][\"start\"]}\n self.compute_data[\"parameters\"][\"%s_%s_pool_start\" % (network[\"name\"], subnet_info[\"name\"])] = data\n\n # private pool end\n data = {\"type\": \"string\",\n \"description\": \"End of network IP address allocation pool\",\n \"default\": subnet_info[\"allocation_pools\"][0][\"end\"]}\n self.compute_data[\"parameters\"][\"%s_%s_pool_end\" % (network[\"name\"], subnet_info[\"name\"])] = data\n else:\n print \"\\t* Adding shared network: %s\" % network[\"name\"]\n data = {\"type\": \"string\",\n \"description\": \"ID of detected shared network\",\n \"default\": network[\"id\"]\n }\n self.compute_data[\"parameters\"][\"shared_net_%s\" % str(shared_net_id)] = data\n shared_net_id += 1", "def gen_heat_data(self):\n\n print \"\\t* Generating heat data\"\n self.gen_heat_client()\n stacks = self.heatclient.stacks\n\n print \"\\t? Please select the stack to generate a template from\"\n # list stacks and prompt user to select apropriate stack template\n stack_list = []\n for idx, stack in enumerate(stacks.list()):\n print \"\\t - [%d] Stack: %s \\n\" % (idx, stack.stack_name)\n stack_list.append(stack)\n\n stack_num = int(raw_input(\"\\t - \"))\n\n print \"\\t* You have selected: %s\" % stack_list[stack_num].stack_name\n\n # stack id\n self.heat_template = stacks.template(stack_list[stack_num].id)", "def _generate_and_load_initial_batch(self, working_directory: Path):\n\n template_dir = Path(working_directory) / \"template_1\"\n template_dir.mkdir()\n # changes here should often be reflected in\n # data_generator_opts and data_loader_opts\n\n channel_decl = self.channel_configs[0]\n\n plugin_options = {\n \"pid\": \"0\",\n \"big_ids\": \"True\",\n }\n # if it's efficient to do the whole load in one go, let's just do that.\n if self.run_until.gap < MIN_PORTION_SIZE:\n num_records = self.run_until.gap\n else:\n num_records = 1 # smallest possible batch to get to parallelizing fast\n results = self._generate_and_load_batch(\n template_dir,\n channel_decl.org_config,\n {\n \"generator_yaml\": self.options.get(\"recipe\"),\n \"num_records\": num_records,\n \"num_records_tablename\": self.run_until.sobject_name or COUNT_REPS,\n \"loading_rules\": self.loading_rules,\n \"vars\": channel_decl.merge_recipe_options(self.recipe_options),\n \"plugin_options\": plugin_options,\n \"bulk_mode\": self.bulk_mode,\n },\n )\n self.update_running_totals_from_load_step_results(results)\n\n # rename directory to reflect real number of sets created.\n wd = SnowfakeryWorkingDirectory(template_dir)\n if self.run_until.sobject_name:\n self.sets_finished_while_generating_template = wd.get_record_counts()[\n self.run_until.sobject_name\n ]\n else:\n self.sets_finished_while_generating_template = num_records\n\n new_template_dir = data_loader_new_directory_name(template_dir, self.run_until)\n shutil.move(template_dir, new_template_dir)\n template_dir = new_template_dir\n\n # don't send data tables to child processes. All they\n # care about are ID->OID mappings\n wd = SnowfakeryWorkingDirectory(template_dir)\n self._cleanup_object_tables(*wd.setup_engine())\n\n return template_dir, wd.relevant_sobjects()", "def generate(self):\n pass", "def generate(self):\n pass", "def generate(self):\n pass", "def process_template(template, data):\n t = Template(template, data)\n t.job = get_current_job()\n t.process()\n\n result = dict(template=template, data=data, result_folder=t.resultdir, log=t.log)\n\n return result", "def generate(self):\n self.training_data.gen_x(self.x_func)\n self.training_data.gen_a(self.a_func)\n self.training_data.gen_y(self.y_func)\n \n self.testing_data.gen_x(self.x_func)\n self.testing_data.gen_ys(self.y_func)\n self.testing_data.gen_azero(self.ytotal_func)", "def generate(self):\n\n # Load the required datapoints into memory.\n self._load_results()\n\n # Calculate datapoints statistics, like min. and max. values.\n self._calc_stats()\n\n # Generate the plots.\n self._generate_scatter_plots()\n self._generate_histograms()\n\n # Put together the final HTML report.\n self._generate_report()", "def mk_data(self):\n self.data = self.DEFAULTS.copy()\n\n for template in self.raw_data.get('extends', []):\n template_data = self.load_template(template)\n self.data.update(template_data)\n\n self.data.update(self.raw_data)\n\n str_replace(self.data)\n\n if self.data.get('redirect_stderr'):\n self.data.pop('stderr')", "def generateDerivedData():\n # Empty all derivied tables, in reverse order that they are populated.\n PartOfPerspectives.deleteAll()\n PartOfs.deleteAll()\n RelationshipsTransitive.deleteAll()\n\n # Derive the transitive closure of the relationship graph\n RelationshipsTransitive.regenerateTable()\n\n # Derive the quick tree display\n PartOfs.regenerateTable()\n\n # Derive the part of perspective tree for each perspective\n PartOfPerspectives.regenerateTable()\n\n return", "def generate(self):\n pass", "def generate(self):\r\n raise NotImplementedError", "def gen_data(self,do_print=True,force_gen_inputs=False):\n\n \n if do_print:\n print\n print 'Generating corr space data, id = %s'%self.id\n \n self.post_init(force_gen_inputs=force_gen_inputs)\n self.run()\n self.post_run()", "def calc_template(template_def, config):\n template = Template(**template_def)\n #print \"template_def:\", template_def, \"config:\", config\n try:\n retvals = process_template(template, config, target=(None, None))\n except Exception:\n print(\"==== template ====\"); pprint(template_def)\n print(\"==== config ====\"); pprint(config)\n #traceback.print_exc()\n raise\n output = {}\n for rkey, rv in retvals.items():\n module_id, terminal_id = rkey\n module_key = str(module_id)\n output.setdefault(module_key, {})\n output[module_key][terminal_id] = rv.todict()\n return output", "def generate_data():\n for subdir, dirs, files in os.walk(legend_images_dir):\n for _file in files:\n getTables(_file)\n\n file_list = []\n for subdir, dirs, files in os.walk(pdf_output_dir):\n for _file in files:\n if _file.endswith('.pdf'):\n file_list.append(_file)\n\n print (\"Writing merged output in Output.pdf...\")\n current_dir = os.getcwd()\n mergeOutput(file_list, current_dir + \"/Output.pdf\")\n\n clean()", "def generateHtml(self):\n # only the master processor needs to do this\n if not self.master: return\n\n for page in self.layout.pages:\n \n # build the metric dictionary\n metrics = {}\n page.models = []\n for fname in glob.glob(os.path.join(self.output_path,\"*.nc\")):\n with Dataset(fname) as dataset:\n mname = dataset.getncattr(\"name\")\n if mname != \"Benchmark\": page.models.append(mname)\n if not dataset.groups.has_key(page.name): continue\n group = dataset.groups[page.name]\n\n # if the dataset opens, we need to add the model (table row)\n metrics[mname] = {}\n \n # each model will need to have all regions\n for region in self.regions: metrics[mname][region] = {}\n \n # columns in the table will be in the scalars group\n if not group.groups.has_key(\"scalars\"): continue\n \n # we add scalars to the model/region based on the region\n # name being in the variable name. If no region is found,\n # we assume it is the global region.\n grp = group.groups[\"scalars\"]\n for vname in grp.variables.keys():\n found = False\n for region in self.regions:\n if region in vname: \n found = True\n var = grp.variables[vname]\n name = vname.replace(region,\"\")\n metrics[mname][region][name] = Variable(name = name,\n unit = var.units,\n data = var[...])\n if not found:\n var = grp.variables[vname]\n metrics[mname][\"global\"][vname] = Variable(name = vname,\n unit = var.units,\n data = var[...])\n page.setMetrics(metrics)\n \n # write the HTML page\n f = file(os.path.join(self.output_path,\"%s.html\" % (self.name)),\"w\")\n f.write(str(self.layout))\n f.close()", "def _get_template_data(snapshot_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]:\n snapshot_id = snapshot_data['id']\n Linux.logger.debug(f'Compiling template data for Snapshot #{snapshot_id}')\n data: Dict[str, Any] = {key: None for key in Linux.template_keys}\n\n data['host_sudo_passwd'] = settings.NETWORK_PASSWORD\n data['snapshot_identifier'] = f'{snapshot_data[\"vm\"][\"id\"]}_{snapshot_data[\"id\"]}'\n data['vm_identifier'] = f'{snapshot_data[\"vm\"][\"project\"][\"id\"]}_{snapshot_data[\"vm\"][\"id\"]}'\n\n # Get the ip address of the host\n host_ip = None\n for interface in snapshot_data['server_data']['interfaces']:\n if interface['enabled'] is True and interface['ip_address'] is not None:\n if IPAddress(str(interface['ip_address'])).version == 6:\n host_ip = interface['ip_address']\n break\n if host_ip is None:\n error = f'Host ip address not found for the server # {snapshot_data[\"vm\"][\"server_id\"]}'\n Linux.logger.error(error)\n snapshot_data['errors'].append(error)\n return None\n data['host_ip'] = host_ip\n return data", "def template_dataset(self):\n exp_dict = {\n 'experiment_name': 'ALLEN_all_neurons',\n 'only_process_n': None, # Set to None to process all\n 'randomize_selection': True,\n 'reference_image_key': {'proc_stimuli': 'image'},\n 'reference_label_key': {'neural_trace_trimmed': 'label'},\n 'rf_query': [{\n 'rf_coordinate_range': { # Get all cells\n 'x_min': 40,\n 'x_max': 70,\n 'y_min': 20,\n 'y_max': 50,\n },\n 'cre_line': 'Cux2',\n 'structure': 'VISp'}\n ],\n 'cross_ref': 'rf_coordinate_range_and_stimuli',\n 'store_means': [\n 'image',\n 'label'\n ],\n 'cc_repo_vars': {\n 'output_size': [2, 1], # target variable -- neural activity,\n 'model_im_size': [152, 304, 1],\n 'loss_function': 'pearson',\n 'score_metric': 'pearson',\n 'preprocess': 'resize'\n },\n # 'deconv_method': 'elephant'\n }\n exp_dict = self.add_globals(exp_dict)\n return exp_dict" ]
[ "0.6641693", "0.65589625", "0.6509333", "0.6447313", "0.63695467", "0.63346595", "0.62602806", "0.62217873", "0.61809677", "0.61094594", "0.5971302", "0.5969898", "0.59352905", "0.5894241", "0.5867586", "0.5867586", "0.5867586", "0.5855343", "0.581939", "0.57920444", "0.5762273", "0.57621014", "0.5741665", "0.57340163", "0.5723478", "0.57148325", "0.5706595", "0.569528", "0.56838804", "0.5668521" ]
0.8048141
0
instantiate nova and neutron clients
def init_compute_clients(self): print "\t* instantiating clients" # instantiate nova client self.gen_nova_client() # instantiate neutron client self.gen_neutron_client() # instantiate heat client (used to validate templates) self.gen_heat_client()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_nova_client(self):\n\n print \"\\t* Generating nova client\"\n client = nClient.get_client_class('2')\n self.novaclient = client(self.username,\n self.password,\n self.tenant_name,\n self.auth_url,\n service_type='compute')", "def gen_neutron_client(self):\n\n print \"\\t* Generating neutron client\"\n self.neutronclient = neutronclient.Client(auth_url=self.auth_url,\n username=self.username,\n password=self.password,\n tenant_name=self.tenant_name,\n region_name=self.region_name)", "def __init__(self):\n super(NovaClientWrapper, self).__init__(\n retry_exceptions=(nova_exc.ConnectionRefused,\n nova_exc.Conflict),\n auth_exceptions=(nova_exc.Unauthorized),\n name=\"Nova\")", "def make_client(instance):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n instance._api_version[API_NAME],\r\n API_VERSIONS,\r\n )\r\n instance.initialize()\r\n url = instance._url\r\n url = url.rstrip(\"/\")\r\n if '2.0' == instance._api_version[API_NAME]:\r\n client = neutron_client(username=instance._username,\r\n tenant_name=instance._tenant_name,\r\n password=instance._password,\r\n region_name=instance._region_name,\r\n auth_url=instance._auth_url,\r\n endpoint_url=url,\r\n token=instance._token,\r\n auth_strategy=instance._auth_strategy,\r\n insecure=instance._insecure,\r\n ca_cert=instance._ca_cert)\r\n return client\r\n else:\r\n raise exceptions.UnsupportedVersion(_(\"API version %s is not \"\r\n \"supported\") %\r\n instance._api_version[API_NAME])", "def __init__(self, project):\n super(NovaExtractor, self).__init__(project)\n\n self.nova = self._get_nova_client()\n self.glance = self._get_glance_client()\n self.neutron = self._get_neutron_client()\n\n self.flavors = self._get_flavors()\n self.images = self._get_images()", "def _get_nova_client(self):\n region_name = CONF.region_name\n session = self._get_keystone_session()\n return novaclient.client.Client(2, session=session, region_name=region_name)", "def _get_neutron_client(self):\n session = self._get_keystone_session()\n return neutronclient.v2_0.client.Client(session=session)", "def Client(api_version, *args, **kwargs):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n api_version,\r\n API_VERSIONS,\r\n )\r\n return neutron_client(*args, **kwargs)", "def nova(self, obj):\n\n if self._novaclient is not None:\n return self._novaclient\n params = self._build_conn_params(obj.user, obj.project)\n self._novaclient = driver_base.SenlinDriver().compute(params)\n return self._novaclient", "def setUp(self):\n self.clnt = CvpClient()\n nodes = [\"1.1.1.1\"]\n self.clnt.nodes = nodes\n self.clnt.node_cnt = len(nodes)\n self.clnt.node_pool = cycle(nodes)\n self.api = CvpApi(self.clnt)", "def get_nova(self, version='2.1'):\n if self.nova is None:\n self.nova = novaclient.Client(version, session=self.get_session())\n return self.nova", "def create_client(self) -> None:\n self._client = discovery.build('ml', 'v1')", "def prepare_openstack(self):\n # init variables\n security_group = self.os_conn.create_sec_group_for_ssh()\n self.instance_keypair = self.os_conn.create_key(key_name='instancekey')\n\n self.os_conn.nova.security_group_rules.create(security_group.id,\n ip_protocol='tcp',\n from_port=1,\n to_port=65535,\n cidr='0.0.0.0/0')\n\n net, subnet = self.create_internal_network_with_subnet(\n cidr=\"10.1.1.0/24\")\n # create router\n router = self.create_router_between_nets(self.os_conn.ext_network,\n subnet)['router']\n\n self.server = self.os_conn.create_server(\n name='server01',\n key_name=self.instance_keypair.name,\n nics=[{'net-id': net['network']['id']}],\n security_groups=[security_group.id])\n\n # add floating ip to first server\n self.floating_ip = self.os_conn.assign_floating_ip(self.server,\n use_neutron=True)\n\n pkeys = self.convert_private_key_for_vm(\n [self.instance_keypair.private_key])\n\n self.check_vm_is_accessible_with_ssh(\n vm_ip=self.floating_ip['floating_ip_address'],\n pkeys=pkeys,\n **self.cirros_creds)\n\n yield\n\n self.server.delete()\n\n wait(lambda: not self.os_conn.nova.servers.findall(id=self.server.id),\n timeout_seconds=2 * 60,\n waiting_for=\"instance to be deleted\")\n\n self.os_conn.neutron.delete_floatingip(self.floating_ip['id'])\n\n self.os_conn.router_interface_delete(router['id'],\n subnet_id=subnet['subnet']['id'])\n self.os_conn.neutron.delete_router(router['id'])\n self.os_conn.neutron.delete_network(net['network']['id'])\n security_group.delete()\n self.instance_keypair.delete()", "def test_nova_create(tiny_cirros_server):\n\n assert ping_from_mnaio(tiny_cirros_server.accessIPv4)\n\n @pytest.mark.testinfra('ansible://' + tiny_cirros_server.accessIPv4)\n @pytest.mark.test_id('57f87c9e-3adb-11e9-b2ca-6a00035510c0')\n @pytest.mark.jira('ASC-31')\n def test_nova_create_connect(host):\n f = host.file('/etc/hosts')\n assert f.exists\n assert f.user == 'root'\n assert f.group == 'root'", "def create_client(self) -> None:\n pass", "def init_host(self, host):\n if self._drv_nodes is None:\n self.set_nodes([nova_conf.host])\n args = (drv_conf.tenant_id, drv_conf.client_id, drv_conf.client_secret,\n drv_conf.subscription_id)\n\n self.compute_client = utils.get_compute_client(*args)\n self.resource_client = utils.get_resource_client(*args)\n self.network_client = utils.get_network_client(*args)\n is_resource_created = utils.check_resource_existence(\n self.resource_client, drv_conf.resource_group)\n if not is_resource_created:\n utils.create_resource_group(\n self.resource_client, drv_conf.resource_group, drv_conf.region)\n\n self.flavor_info.update(\n utils.get_vm_sizes(self.compute_client, drv_conf.region))\n LOG.info(\"%s driver init with %s project, %s region\" %\n (self.name, drv_conf.tenant_id, drv_conf.region))", "def _init_keystone_admin_client(self, api_version):\n self.keystone_sentry = self.d.sentry['keystone'][0]\n keystone_ip = self.keystone_sentry.info['public-address']\n if self._get_openstack_release() >= self.xenial_queens:\n api_version = 3\n client_class = keystone_client.Client\n if api_version == 3:\n client_class = keystone_client_v3.Client\n session, auth = u.get_keystone_session(\n keystone_ip,\n api_version=api_version,\n username='admin',\n password='openstack',\n project_name='admin',\n user_domain_name='admin_domain',\n project_domain_name='admin_domain')\n self.keystone = client_class(session=session)\n self.keystone.auth_ref = auth.get_access(session)", "def _configure_services(self):\n neutron_ovs_config = {}\n neutron_ovs_config['enable-sriov'] = True\n neutron_ovs_config['sriov-device-mappings'] = 'physnet42:eth42'\n\n pxc_config = {\n 'dataset-size': '25%',\n 'max-connections': 1000,\n 'root-password': 'ChangeMe123',\n 'sst-password': 'ChangeMe123',\n }\n nova_cc_config = {'network-manager': 'Neutron'}\n configs = {\n 'neutron-openvswitch': neutron_ovs_config,\n 'percona-cluster': pxc_config,\n 'nova-cloud-controller': nova_cc_config,\n }\n super(NeutronOVSBasicDeployment, self)._configure_services(configs)", "def init_clients(config):\n clients = {\n MongoDBClient(), HackerNewsClient(), UnbabelClient()\n }\n\n for client in clients:\n client.initialize(**config)\n\n LOGGER.info(\n \"Initiated {} clients: {}.\".format(\n len(clients),\n \", \".join([type(client).__name__ for client in clients])\n )\n )", "def __init__(self):\n #config.load_kube_config('/home/ubuntu/admin.conf')\n #v1 = client.CoreV1Api()\n #v1_ext = client.ExtensionsV1beta1Api()", "def create(ctx, nova_client, **kwargs):\n\n # For possible changes by _maybe_transform_userdata()\n\n server = {\n 'name': ctx.node_id\n }\n server.update(copy.deepcopy(ctx.properties['server']))\n\n ctx.logger.debug(\n \"server.create() server before transformations: {0}\".format(server))\n\n if server.get('nics'):\n raise ValueError(\"Parameter with name 'nics' must not be passed to\"\n \" openstack provisioner (under host's \"\n \"properties.nova.instance)\".format(k))\n\n _maybe_transform_userdata(server)\n\n if ('management_network_name' in ctx.properties) and ctx.properties['management_network_name']:\n nc = os_common.NeutronClient().get(config=ctx.properties.get('neutron_config'))\n managemenet_network_id = nc.cosmo_get_named('network', ctx.properties['management_network_name'])['id']\n server['nics'] = [{'net-id': managemenet_network_id}]\n else:\n managemenet_network_id = None\n # print(server['nics'])\n\n # Sugar\n if 'image_name' in server:\n server['image'] = nova_client.images.find(name=server['image_name']).id\n del server['image_name']\n if 'flavor_name' in server:\n server['flavor'] = nova_client.flavors.find(name=server['flavor_name']).id\n del server['flavor_name']\n\n _fail_on_missing_required_parameters(\n server,\n ('name', 'flavor', 'image', 'key_name'),\n 'server')\n\n # Multi-NIC by networks - start\n network_nodes_runtime_properties = ctx.capabilities.get_all().values()\n if network_nodes_runtime_properties and 'management_network_name' not in ctx.properties:\n # Known limitation\n raise RuntimeError(\"Nova server with multi-NIC requires 'management_network_name' which was not supplied\")\n nics = [\n {'net-id': n['external_id']}\n for n in network_nodes_runtime_properties\n if neutron_client.cosmo_is_network(n['external_id'])\n ]\n if nics:\n server['nics'] = server.get('nics', []) + nics\n # Multi-NIC by networks - end\n\n # Multi-NIC by ports - start\n port_nodes_runtime_properties = ctx.capabilities.get_all().values()\n if port_nodes_runtime_properties and 'management_network_name' not in ctx.properties:\n # Known limitation\n raise RuntimeError(\"Nova server with multi-NIC requires 'management_network_name' which was not supplied\")\n nics = [\n {'port-id': n['external_id']}\n for n in port_nodes_runtime_properties\n if neutron_client.cosmo_is_port(n['external_id'])\n ]\n if nics:\n server['nics'] = server.get('nics', []) + nics\n # Multi-NIC by ports - end\n\n ctx.logger.debug(\n \"server.create() server after transformations: {0}\".format(server))\n\n # First parameter is 'self', skipping\n params_names = inspect.getargspec(nova_client.servers.create).args[1:]\n\n params_default_values = inspect.getargspec(\n nova_client.servers.create).defaults\n params = dict(itertools.izip(params_names, params_default_values))\n\n # Fail on unsupported parameters\n for k in server:\n if k not in params:\n raise ValueError(\"Parameter with name '{0}' must not be passed to\"\n \" openstack provisioner (under host's \"\n \"properties.nova.instance)\".format(k))\n\n for k in params:\n if k in server:\n params[k] = server[k]\n\n if not params['meta']:\n params['meta'] = dict({})\n params['meta']['cloudify_id'] = ctx.node_id\n params['meta']['cloudify_management_network_id'] = managemenet_network_id\n params['meta']['cloudify_management_network_name'] = ctx.properties.get('management_network_name')\n\n ctx.logger.info(\"Asking Nova to create server.\"\n \"Parameters: {0}\".format(str(params)))\n ctx.logger.debug(\"Asking Nova to create server. All possible parameters are: \"\n \"{0})\".format(','.join(params.keys())))\n\n try:\n s = nova_client.servers.create(**params)\n except nova_exceptions.BadRequest as e:\n # ctx.logger.error(e)\n if str(e).startswith(MUST_SPECIFY_NETWORK_EXCEPTION_TEXT):\n raise RuntimeError(\n \"Can not provision server: management_network_name is not \"\n \"specified but there are several networks that the server \"\n \"can be connected to.\"\n )\n raise RuntimeError(\"Nova bad request error: \" + str(e))\n # os.system(\"nova show \" + s.id)\n ctx['external_id'] = s.id", "def _initialize_tests(self, api_version=2):\n # Access the sentries for inspecting service units\n self.pxc_sentry = self.d.sentry['percona-cluster'][0]\n self.keystone_sentry = self.d.sentry['keystone'][0]\n self.glance_sentry = self.d.sentry['glance'][0]\n self.swift_proxy_sentry = self.d.sentry['swift-proxy'][0]\n self.swift_storage_sentry = self.d.sentry['swift-storage'][0]\n\n u.log.debug('openstack release val: {}'.format(\n self._get_openstack_release()))\n u.log.debug('openstack release str: {}'.format(\n self._get_openstack_release_string()))\n\n # Authenticate admin with keystone\n self._init_keystone_admin_client(api_version)\n\n # Authenticate admin with glance endpoint\n self.glance = u.authenticate_glance_admin(self.keystone)\n\n keystone_ip = self.keystone_sentry.info['public-address']\n keystone_relation = self.keystone_sentry.relation(\n 'identity-service', 'swift-proxy:identity-service')\n\n # Create a demo tenant/role/user\n self.demo_tenant = 'demoTenant'\n self.demo_role = 'demoRole'\n self.demo_user = 'demoUser'\n self.demo_project = 'demoProject'\n self.demo_domain = 'demoDomain'\n\n if (self._get_openstack_release() >= self.xenial_queens or\n api_version == 3):\n self.create_users_v3()\n self.demo_user_session, _ = u.get_keystone_session(\n keystone_ip,\n self.demo_user,\n 'password',\n api_version=3,\n user_domain_name=self.demo_domain,\n project_domain_name=self.demo_domain,\n project_name=self.demo_project\n )\n self.keystone_demo = keystone_client_v3.Client(\n session=self.demo_user_session)\n self.service_session, _ = u.get_keystone_session(\n keystone_ip,\n keystone_relation['service_username'],\n keystone_relation['service_password'],\n api_version=3,\n user_domain_name=keystone_relation['service_domain'],\n project_domain_name=keystone_relation['service_domain'],\n project_name=keystone_relation['service_tenant']\n )\n else:\n self.create_users_v2()\n # Authenticate demo user with keystone\n self.keystone_demo = \\\n u.authenticate_keystone_user(\n self.keystone, user=self.demo_user,\n password='password',\n tenant=self.demo_tenant)\n self.service_session, _ = u.get_keystone_session(\n keystone_ip,\n keystone_relation['service_username'],\n keystone_relation['service_password'],\n api_version=2,\n project_name=keystone_relation['service_tenant']\n )\n self.swift = swiftclient.Connection(session=self.service_session)", "def __create(self):\n pass\n\n # create at cluster-provider\n # get kubeconfig\n # wait for api\n # ^ could be async and seperate steps?", "def client_setup(self):\n self.client = Client()", "def _keystone_client(context, version=(3, 0)):\n auth_plugin = token.Token(\n auth_url=CONF.keystone_authtoken.auth_uri,\n token=context.auth_token,\n project_id=context.project_id)\n client_session = session.Session(auth=auth_plugin,\n verify=False if\n CONF.keystone_authtoken.insecure else\n (CONF.keystone_authtoken.cafile or True))\n return client.Client(auth_url=CONF.keystone_authtoken.auth_uri,\n session=client_session, version=version)", "def create_clients():\n clients = {}\n\n rospy.loginfo(\"Waiting for rubble detector\")\n clients['rubble_detect'] = actionlib.SimpleActionClient('rubble_detect',\n RubbleDetectAction)\n\n rospy.loginfo(\"Waiting for rubble checker\")\n clients['rubble_check'] = actionlib.SimpleActionClient('rubble_check',\n RubbleCheckAction)\n clients['rubble_check'].wait_for_server()\n rospy.loginfo(\"rubble_check connected\")\n\n rospy.loginfo(\"Waiting for room searcher\")\n clients['search_room'] = actionlib.SimpleActionClient('search_room',\n SearchRoomAction)\n clients['search_room'].wait_for_server()\n rospy.loginfo(\"search_room connected\")\n\n rospy.loginfo(\"Waiting for navigator\")\n clients['navigation'] = actionlib.SimpleActionClient('navigation',\n NavigateAction)\n clients['navigation'].wait_for_server()\n rospy.loginfo(\"navigation connected\")\n\n rospy.loginfo(\"Waiting for door clearer\")\n clients['rubble_clear'] = actionlib.SimpleActionClient('rubble_clear',\n RubbleClearAction)\n clients['rubble_clear'].wait_for_server()\n rospy.loginfo(\"rubble clear connected\")\n\n rospy.loginfo(\"All clients for policy executor set up\")\n\n return clients", "def __init__(self, url = None, context = \"corbaserver\"):\n self._initOrb (url)\n self._makeClients (\"manipulation\", self.defaultClients, context)", "def dvs_vcenter_multiple_nics(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n networks = []\n router = os_conn.get_router(os_conn.get_network(self.ext_net_name))\n\n self.show_step(2)\n self.show_step(3)\n for net in self.net_data:\n network = os_conn.create_network(network_name=net.keys()[0],\n tenant_id=tenant.id)['network']\n\n logger.info('Create subnet {}'.format(net.keys()[0]))\n subnet = os_conn.create_subnet(subnet_name=net.keys()[0],\n network_id=network['id'],\n cidr=net[net.keys()[0]],\n ip_version=4)\n\n # Check that network is created.\n assert_true(\n os_conn.get_network(network['name'])['id'] == network['id'])\n os_conn.add_router_interface(\n router_id=router[\"id\"],\n subnet_id=subnet[\"id\"])\n networks.append(network)\n\n nics = [{'net-id': network['id']} for network in networks]\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n\n _s_groups = os_conn.neutron.list_security_groups()\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups['security_groups']\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n self.show_step(4)\n instances = openstack.create_instances(\n os_conn=os_conn,\n nics=nics,\n security_groups=[default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(5)\n for instance in instances:\n for net in networks:\n assert_true(os_conn.get_nova_instance_ip(\n instance, net_name=net['name']) is not None)\n\n net_1_name = self.net_data[0].keys()[0]\n net_2_name = self.net_data[1].keys()[0]\n ips = {\n net_1_name: {'ips': [], 'access_point_ip': ''},\n net_2_name: {'ips': [], 'access_point_ip': ''}\n }\n\n for net in networks:\n ips[net['name']]['ips'] = map(\n (lambda x:\n os_conn.get_nova_instance_ip(x, net_name=net['name'])),\n instances)\n access_point, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net['id']}],\n security_groups=[default_sg['name']])\n ips[net['name']]['access_point_ip'] = access_point_ip\n\n logger.info(pretty_log(ips))\n\n self.show_step(6)\n cmds = [\"sudo /bin/ip link set up dev eth1\",\n \"sudo /sbin/cirros-dhcpc up eth1\"]\n access_point_ip = ips[net_1_name]['access_point_ip']\n for ip in ips[net_1_name]['ips']:\n openstack.remote_execute_command(access_point_ip, ip, cmds[0])\n openstack.remote_execute_command(access_point_ip, ip, cmds[1])\n\n self.show_step(7)\n for net in networks:\n inst_ips = ips[net['name']]['ips']\n access_point_ip = ips[net['name']]['access_point_ip']\n ip_pair = {ip: [v for v in inst_ips if v != ip] for ip in inst_ips}\n openstack.check_connection_through_host(access_point_ip,\n ip_pair,\n timeout=60 * 5,\n interval=10)", "def _initialize_tests(self):\n # Access the sentries for inspecting service units\n self.pxc_sentry = self.d.sentry['percona-cluster'][0]\n self.keystone_sentry = self.d.sentry['keystone'][0]\n self.cinder_sentry = self.d.sentry['cinder'][0]\n u.log.debug('openstack release val: {}'.format(\n self._get_openstack_release()))\n u.log.debug('openstack release str: {}'.format(\n self._get_openstack_release_string()))\n self.keystone_ip = self.keystone_sentry.relation(\n 'shared-db',\n 'percona-cluster:shared-db')['private-address']\n self.set_api_version(2)\n # Authenticate keystone admin\n self.keystone_v2 = self.get_keystone_client(api_version=2)\n self.keystone_v3 = self.get_keystone_client(api_version=3)\n self.create_users_v2()", "def get_novaclient(self):\n # TODO: We ought to be able to derive this from the keystone client,\n # but it's proving trickier than I expected --isd\n return novaclient.Client(self.cluster_account.cluster_user_name,\n self.cluster_account.cluster_password,\n self.name,\n self.cluster_account.cluster.auth_url)" ]
[ "0.7710618", "0.7441814", "0.7179079", "0.6937732", "0.68047905", "0.6802051", "0.64828235", "0.64741975", "0.6403919", "0.62170565", "0.62007624", "0.618936", "0.6181813", "0.61522347", "0.611692", "0.6109957", "0.6080008", "0.6060509", "0.5993093", "0.59743154", "0.5966757", "0.5964743", "0.5937109", "0.59094566", "0.5901354", "0.58690107", "0.58254576", "0.5811482", "0.5805494", "0.57813907" ]
0.7575982
1
generate parameters for compute template
def gen_parameters(self): print "\t* Adding parameters to compute template" # get all the server client servers = self.novaclient.servers.list() # add all key_pair_names self.gen_key_name_parameters(servers) # add all images self.gen_image_parameters(servers) # add all flavors self.gen_flavor_parameters(servers) # add all networks self.gen_network_parameters()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_network_parameters(self):\n\n print \"\\t* Adding net and subnet parameters to compute template\"\n\n # add all the routers\n all_routers = self.neutronclient.list_routers()[\"routers\"]\n self.all_ports = self.neutronclient.list_ports()[\"ports\"]\n\n self.tenant_routers = filter(lambda router: router['tenant_id'] == self.tenant_id , all_routers)\n\n for idx, router in enumerate(self.tenant_routers):\n\n router_gateway = router[\"external_gateway_info\"]\n try:\n data = {\"type\": \"string\",\n \"description\": \"ID of public network\",\n \"default\": router_gateway[\"network_id\"]\n }\n self.compute_data[\"parameters\"][\"public_net_%s\" % str(idx)] = data\n except:\n print \"\\t! Could not add external_gateway_info for %s\" % router[\"name\"]\n\n networks = self.neutronclient.list_networks()[\"networks\"]\n # filter all networks that match\n filtered_networks = [net for net in networks if (net[\"tenant_id\"] == self.tenant_id or\n (net[\"shared\"] is True) and net['router:external'] is False) and (net[\"name\"] != \"public\")]\n\n # obtain subnet information\n shared_net_id = 0\n for network in filtered_networks:\n for subnet in network[\"subnets\"]:\n if network[\"shared\"] != True:\n subnet_info = self.neutronclient.show_subnet(subnet)[\"subnet\"]\n\n # generate private net\n # private name\n data = {\"type\": \"string\",\n \"description\": \"Name of network\",\n \"default\": network[\"name\"]}\n self.compute_data[\"parameters\"][\"%s_net_name\" % (network[\"name\"])] = data\n\n # private cidr\n data = {\"type\": \"string\",\n \"description\": \"Network address (CIDR notation)\",\n \"default\": subnet_info[\"cidr\"]}\n self.compute_data[\"parameters\"][\"%s_%s_cidr\" % (network[\"name\"], subnet_info[\"name\"])] = data\n\n # private gateway\n data = {\"type\": \"string\",\n \"description\": \"Network gateway address\",\n \"default\": subnet_info[\"gateway_ip\"]}\n self.compute_data[\"parameters\"][\"%s_%s_gateway\" % (network[\"name\"], subnet_info[\"name\"])] = data\n\n # private pool start\n data = {\"type\": \"string\",\n \"description\": \"Start of network IP address allocation pool\",\n \"default\": subnet_info[\"allocation_pools\"][0][\"start\"]}\n self.compute_data[\"parameters\"][\"%s_%s_pool_start\" % (network[\"name\"], subnet_info[\"name\"])] = data\n\n # private pool end\n data = {\"type\": \"string\",\n \"description\": \"End of network IP address allocation pool\",\n \"default\": subnet_info[\"allocation_pools\"][0][\"end\"]}\n self.compute_data[\"parameters\"][\"%s_%s_pool_end\" % (network[\"name\"], subnet_info[\"name\"])] = data\n else:\n print \"\\t* Adding shared network: %s\" % network[\"name\"]\n data = {\"type\": \"string\",\n \"description\": \"ID of detected shared network\",\n \"default\": network[\"id\"]\n }\n self.compute_data[\"parameters\"][\"shared_net_%s\" % str(shared_net_id)] = data\n shared_net_id += 1", "def generate_template_dict(self):\n # Get the existing parameters\n params = super().generate_template_dict()\n\n # Add our custom parameters\n params['job_parameter_file'] = self.job_parameter_file\n params['job_output_directory'] = self.job_output_directory\n\n # Return the updated params\n return params", "def gen_compute_data(self):\n\n print \"\\t* Generating combined nova and neutron data\"\n self.init_compute_clients()\n self.compute_data[\"heat_template_version\"] = \"2013-05-23\"\n self.compute_data[\"description\"] = \"Generated Template %s on Project %s\" % \\\n (str(datetime.datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p\")), str(self.tenant_name))\n self.compute_data[\"parameters\"] = {}\n self.compute_data[\"resources\"] = {}\n self.gen_parameters()\n self.gen_resources()\n self.compute_template = self.compute_data", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[1,0,100]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)\", mu)')", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::vbfH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)\", mu)')", "def gen_parameters(generator=2,key_size=2048,backend=backend):\n\treturn dh.generate_parameters(generator,key_size,backend)", "def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.70,0.70]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"(1.+@0)\",Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(1.-@0)\",Afb)')", "def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_nav', 'g_kvhh', 'g_kva', 'g_kvsi', \n 'g_cav', 'g_kca', 'g_nap', 'g_kir']\n gX_log: np.ndarray = 4 * np.random.rand(9) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(9)) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n gR_name: List[str] = ['g_ampar', 'g_nmdar', 'g_gabar']\n gR_log: np.ndarray = 4 * np.random.rand(3) - 3 # from -3 to 1\n gR: np.ndarray = (10 * np.ones(3)) ** gR_log # 0.001 ~ 10\n gR_itr: Iterator = zip(gR_name, gR)\n\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n\n param_dict.update(gX_itr)\n param_dict.update(gR_itr)\n param_dict.update(tCa_dict)\n return param_dict", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def generate_params(self, randomize=True):\n pass", "def gen_parameter(self, g, ng, p):\n pass", "def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_kvhh', 'g_cav', 'g_kca', 'g_nap']\n gX_log: np.ndarray = 4 * np.random.rand(5) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(5)) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n\n param_dict.update(gX_itr)\n param_dict.update(tCa_dict)\n return param_dict", "def parameters():\n return render_template(\n 'parameters.html',\n title= \"Pi-Lapse\",\n year=datetime.now().year,\n )", "def gen_params(no_cultures):\n # Plate level\n kn = 0.1 # Nutrient diffusion\n ks = 0.1 # Signal diffusion\n b = 0.05 # Signal on cells effect constant\n a = 0.05 # Signal secretion constant\n # Culture level\n # Growth rate constant\n r_mean = 1.0\n r_var = 1.0\n r_params = [max(0.0, gauss(r_mean, r_var)) for i in range(no_cultures)]\n params = np.array([kn, ks, b, a] + r_params)\n return params", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def generate_config(context):\n\n\n properties = context.properties\n project_id = properties.get('project', context.env['project'])\n\n network = context.properties.get('networkURL', generate_network_uri(\n project_id,\n context.properties.get('network','')\n ))\n target_vpn_gateway = context.env['name'] + '-tvpng'\n esp_rule = context.env['name'] + '-esp-rule'\n udp_500_rule = context.env['name'] + '-udp-500-rule'\n udp_4500_rule = context.env['name'] + '-udp-4500-rule'\n vpn_tunnel = context.env['name'] + '-vpn'\n router_vpn_binding = context.env['name'] + '-router-vpn-binding'\n resources = []\n if 'ipAddress' in context.properties:\n ip_address = context.properties['ipAddress']\n static_ip = ''\n else:\n static_ip = context.env['name'] + '-ip'\n resources.append({\n # The reserved address resource.\n 'name': static_ip,\n # https://cloud.google.com/compute/docs/reference/rest/v1/addresses\n 'type': 'gcp-types/compute-v1:addresses',\n 'properties': {\n 'name': properties.get('name', static_ip),\n 'project': project_id,\n 'region': context.properties['region']\n }\n })\n ip_address = '$(ref.' + static_ip + '.address)'\n\n resources.extend([\n {\n # The target VPN gateway resource.\n 'name': target_vpn_gateway,\n # https://cloud.google.com/compute/docs/reference/rest/v1/targetVpnGateways\n 'type': 'gcp-types/compute-v1:targetVpnGateways',\n 'properties':\n {\n 'name': properties.get('name', target_vpn_gateway),\n 'project': project_id,\n 'network': network,\n 'region': context.properties['region'],\n }\n },\n {\n # The forwarding rule resource for the ESP traffic.\n 'name': esp_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-esp'.format(properties.get('name')) if 'name' in properties else esp_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'ESP',\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n {\n # The forwarding rule resource for the UDP traffic on port 4500.\n 'name': udp_4500_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-udp-4500'.format(properties.get('name')) if 'name' in properties else udp_4500_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'UDP',\n 'portRange': 4500,\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n {\n # The forwarding rule resource for the UDP traffic on port 500\n 'name': udp_500_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-udp-500'.format(properties.get('name')) if 'name' in properties else udp_500_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'UDP',\n 'portRange': 500,\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n\n ])\n router_url_tag = 'routerURL'\n router_name_tag = 'router'\n\n if router_name_tag in context.properties:\n router_url = context.properties.get(router_url_tag, generate_router_uri(\n context.env['project'],\n context.properties['region'],\n context.properties[router_name_tag]))\n # Create dynamic routing VPN\n resources.extend([\n {\n # The VPN tunnel resource.\n 'name': vpn_tunnel,\n # https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels\n 'type': 'gcp-types/compute-v1:vpnTunnels',\n 'properties':\n {\n 'name': properties.get('name', vpn_tunnel),\n 'project': project_id,\n 'description':\n 'A vpn tunnel',\n 'ikeVersion':\n 2,\n 'peerIp':\n context.properties['peerAddress'],\n 'region':\n context.properties['region'],\n 'router': router_url,\n 'sharedSecret':\n context.properties['sharedSecret'],\n 'targetVpnGateway':\n '$(ref.' + target_vpn_gateway + '.selfLink)'\n },\n 'metadata': {\n 'dependsOn': [esp_rule,\n udp_500_rule,\n udp_4500_rule]\n }\n }])\n else:\n # Create static routing VPN\n resources.append(\n {\n # The VPN tunnel resource.\n 'name': vpn_tunnel,\n 'type': 'gcp-types/compute-v1:vpnTunnels',\n 'properties': {\n 'name': vpn_tunnel,\n 'description':\n 'A vpn tunnel',\n 'ikeVersion':\n 2,\n 'peerIp':\n context.properties['peerAddress'],\n 'region':\n context.properties['region'],\n 'sharedSecret':\n context.properties['sharedSecret'],\n 'targetVpnGateway':\n '$(ref.' + target_vpn_gateway + '.selfLink)',\n 'localTrafficSelector':\n context.properties['localTrafficSelector'],\n 'remoteTrafficSelector':\n context.properties['remoteTrafficSelector'],\n\n },\n 'metadata': {\n 'dependsOn': [esp_rule, udp_500_rule, udp_4500_rule]\n }\n },\n )\n\n return {\n 'resources':\n resources,\n 'outputs':\n [\n {\n 'name': 'targetVpnGateway',\n 'value': target_vpn_gateway\n },\n {\n 'name': 'staticIp',\n 'value': static_ip\n },\n {\n 'name': 'espRule',\n 'value': esp_rule\n },\n {\n 'name': 'udp500Rule',\n 'value': udp_500_rule\n },\n {\n 'name': 'udp4500Rule',\n 'value': udp_4500_rule\n },\n {\n 'name': 'vpnTunnel',\n 'value': vpn_tunnel\n },\n {\n 'name': 'vpnTunnelUri',\n 'value': '$(ref.'+vpn_tunnel+'.selfLink)'\n }\n ]\n }", "def parameters(self):", "def get_hyperparams(self):", "def param_computation(self, param):\n result = {}\n\n result[\"V_t_V\"] = param_computation_V_t_V(param)\n\n if compute_gammas:\n result[\"V_t_g_n\"] = param_computation_V_t_g_n(param)\n if compute_lambdas:\n result[\"V_n_t_V\"] = param_computation_V_n_t_V(param)\n\n param_computation_memory_cleanup(param)\n\n return result", "def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_nav', 'g_kvhh', 'g_kva', 'g_kvsi', \n 'g_cav', 'g_kca', 'g_nap', 'g_kir']\n gX_name: List[str] = list(itertools.compress(gX_name, list(self.channel_bool.values())[:9]))\n gX_log: np.ndarray = 4 * np.random.rand(len(gX_name)) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(len(gX_name))) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n gR_name: List[str] = ['g_ampar', 'g_nmdar', 'g_gabar']\n gR_name: List[str] = list(itertools.compress(gR_name, list(self.channel_bool.values())[9:12]))\n gR_log: np.ndarray = 4 * np.random.rand(len(gR_name)) - 3 # from -3 to 1\n gR: np.ndarray = (10 * np.ones(len(gR_name))) ** gR_log # 0.001 ~ 10\n gR_itr: Iterator = zip(gR_name, gR)\n\n param_dict.update(gX_itr)\n param_dict.update(gR_itr)\n\n if self.channel_bool['ca']:\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n param_dict.update(tCa_dict)\n\n return param_dict", "def params(self):\n return {'cfg': self.cfg,\n 'momentum': self.momentum,\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'act_fn': self.act_fn}", "def create_params(self):\n\n params = {'time_step':\n DesignParameter('time_step',\n unit='s',\n description='Time step with which the component model will be discretized'),\n 'horizon':\n DesignParameter('horizon',\n unit='s',\n description='Horizon of the optimization problem'),\n 'lines': DesignParameter('lines',\n unit='-',\n description='List of names of the lines that can be found in the network, e.g. '\n '\\'supply\\' and \\'return\\'',\n val=['supply', 'return'])\n }\n return params", "def output_parameters(self):\n output_params = get_data_node(\n 'parameter',\n dict={\n 'stress': self.vasprun_obj.ionic_steps[-1]['stress'],\n 'efermi': self.vasprun_obj.efermi,\n 'energy': self.vasprun_obj.final_energy\n })\n return output_params", "def generate_parameters(nid):\n G = EcGroup(nid)\n g = G.hash_to_point(b\"g\")\n o = G.order()\n return (g, o)", "def _gen_policy_params(self, state: State) -> Tensor:\n ...", "def generate():", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def prepare_template_vals(dtype, compute_capability, rounding=False):\n template_vals = dict()\n for key in (\"inits\", \"finish\", \"stats_args\", \"mul_by_scale\", \"atomic_max\", \"cvt_out\"):\n template_vals[key] = \"\"\n\n template_vals[\"common\"] = _common_divmod\n\n if rounding:\n template_vals[\"common\"] += _common_urand_gen\n template_vals[\"common\"] += _common_round[\"nearest\"].get(dtype, \"\")\n template_vals[\"inits\"] += _init_rand_func + _init_rand_round_func\n template_vals[\"finish\"] += _finish_rand_func\n mode = \"random\"\n else:\n mode = \"nearest\"\n\n template_vals[\"common\"] += _common_round[mode].get(dtype, \"\")\n template_vals[\"common\"] += _common_max_abs\n\n if (compute_capability[0] == 3 and compute_capability[1] < 5) or compute_capability[0] < 3:\n template_vals[\"common\"] += _common_kepler\n\n template_vals[\"type\"] = _ew_types[dtype][\"type\"]\n template_vals[\"cvt\"] = _ew_types[dtype][\"cvt\"]\n\n if dtype == \"f2\":\n template_vals[\"common\"] += _common_fp16_to_fp32\n template_vals[\"cvt_out\"] = \"fp32_to_fp16\"\n elif dtype == flex_dtype:\n template_vals[\"stats_args\"] += \", int* maxabs\"\n template_vals[\"cvt\"] = \"(float)\"\n template_vals[\"cvt_out\"] = \"fp32_to_int16\"\n template_vals[\"atomic_max\"] += atomic_max\n elif dtype == \"f4\":\n pass\n else:\n raise ValueError(\"Did not understand clss dtype \" + str(dtype))\n\n return template_vals" ]
[ "0.69365746", "0.6322276", "0.6235999", "0.607797", "0.60738146", "0.6066625", "0.60367715", "0.6012754", "0.5991196", "0.59908175", "0.5963455", "0.59469604", "0.59338236", "0.5911403", "0.58955294", "0.58939403", "0.5887018", "0.585859", "0.5838612", "0.5831838", "0.5817446", "0.58103997", "0.580736", "0.57996935", "0.5794917", "0.5791596", "0.5784435", "0.57785165", "0.57746387", "0.5757208" ]
0.78243065
0
generate all the key_pair names and add them to compute_data
def gen_key_name_parameters(self, servers): self.set_of_keys = set(map(lambda server: server.key_name, servers)) key_idx = "" for idx, key_pair in enumerate(self.set_of_keys): data = {"type": "string", "description": "Name of keypair to assign to servers", "default": key_pair} self.compute_data["parameters"]["key_name%s" % key_idx] = data if len(self.set_of_keys) >= 1: key_idx = str(1+idx)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_keys():", "def create_key ():", "def createAllKP():\n\tif not os.path.exists(keysDir):\n\t\tos.makedirs(keysDir)\n\tfor info in conf_HVM:\n\t\tkeyName = 'Key-'+info['region']+'-'+info['zone']\n\t\ttry:\n\t\t\tos.remove(keysDir+'/'+keyName+'.pem')\n\t\texcept OSError:\n\t\t\tpass\n\t\tprint \"Key creation :\",keyName\n\t\tec2 = boto.ec2.connect_to_region(info['region']+'-'+info['zone'])\n\t\t# check if the key pair exists\n\t\tkps = [kp for kp in ec2.get_all_key_pairs() if kp.name == keyName]\n\t\tif kps:\n\t\t\tec2.delete_key_pair(keyName)\t\n\t\tkey = ec2.create_key_pair(keyName)\n\t\tkey.save(keysDir)", "def generate_keys(self):\n self.keys = []\n key = string_to_bit_array(self.passwd)\n key = self.permutation(key, CP_1) # Perform initial permutation on the key\n g, d = split_into_n(key, 28) # Split into g (LEFT) & d (RIGHT)\n for i in range(16): # Apply the 16 rounds\n g, d = self.shift(g, d, ROUND_KEY_SHIFT[i]) # Shift the key according to the round\n tmp = g + d # Merge them\n self.keys.append(self.permutation(tmp, CP_2)) # Perform the permutation to get the Ki", "def genKeys():\r\n (pub, priv) = rsa.newkeys(256)\r\n context = {\r\n 'pub': pub,\r\n 'priv': priv\r\n }\r\n return context", "def create_keys(i):\n sk = elgamal.create_sk()\n secret_keys.append(sk)\n\n keys = [0, 0]\n\n keys[x[i]] = elgamal.gen(sk)\n keys[1 - x[i]] = elgamal.o_gen()\n\n public_keys.append(keys)", "def generate_keys(self):\n\n # TODO: Store keys encrypted\n rsa1 = RsaPrivateKey.Generate()\n self.sign_private = str(rsa1)\n self.sign_public = str(rsa1.public_key)\n\n rsa2 = RsaPrivateKey.Generate()\n self.crypt_private = str(rsa2)\n self.crypt_public = str(rsa2.public_key)", "def add_key(self, key_list: list) -> None:\n\n for key, funct, desc in key_list:\n # Force keys to be lowercase\n key = key.lower()\n \n self.key_functs[key] = funct\n self.key_satified[key] = False\n self.key_description[key] = desc\n self.key_values[key] = None", "def keys(self, data, installer_context):", "def gen_keys(lname,dsa=False):\n d = 'keys'\n if not os.path.isdir(d):\n os.mkdir(d)\n for n in lname:\n if not os.path.isfile('%s/%s.pem'%(d,n)):\n key = Crypto.PublicKey.DSA.generate(512, os.urandom) if dsa else Crypto.PublicKey.RSA.generate(1024,os.urandom)\n open('%s/%s.pem'%(d,n),'w').write(key.exportKey('PEM'))", "def _init_keys(self):\n\n basic_constraints = crypto.X509Extension('basicConstraints'.encode('ascii'), True,\n 'CA:TRUE, pathlen:0'.encode('ascii'))\n serial = self._get_serial()\n pkey = self._create_pkey(self.commonname, serial)\n self._create_cert(pkey, self.commonname, serial, [basic_constraints], expire=30*365)", "def generate_keys(self):\n\n\t\tmin_ext = 1 << self.size_ext - 1\n\t\tmax_ext = 1 << self.size_ext\n\t\t\n\t\t\t\n\t\t# step 1 : chose random primary numbers p and q\n\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\tself._p = n\n\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\twhile(n == self._p):\n\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\tself._q = n\n\n\t\t#step 2 : compute n = pq\n\t\tself.n = self._p * self._q\n\n\t\t#step 3 : compute phi(n)\n\t\tself._phi = (self._p - 1) * (self._q - 1)\n\n\t\t#step 4 : chose the exponent\n\t\textension = randint(min_ext,max_ext) << math.ceil(math.log2(self.D))\n\t\textension = extension + self.D\n\t\twhile (gcd(self._phi,n) != 1):\n\t\t\textension = randint(min_ext,max_ext) << math.ceil(math.log2(self.D))\n\t\tself._d = extension\n\n\t\t#step 5 : compute d (private key)\n\t\tself.e = euclide_algorithm(self._d, self._phi)[\"U\"] % self._phi\n\n\t\tprint(\"p = \", self._p)\n\t\tprint(\"q = \", self._q)\n\t\tprint(\"d = \", self._d)", "def test_aws_service_api_keypair_generate_post(self):\n pass", "def gen_parameters(self):\n\n print \"\\t* Adding parameters to compute template\"\n # get all the server client\n servers = self.novaclient.servers.list()\n\n # add all key_pair_names\n self.gen_key_name_parameters(servers)\n\n # add all images\n self.gen_image_parameters(servers)\n\n # add all flavors\n self.gen_flavor_parameters(servers)\n\n # add all networks\n self.gen_network_parameters()", "def gen_compute_data(self):\n\n print \"\\t* Generating combined nova and neutron data\"\n self.init_compute_clients()\n self.compute_data[\"heat_template_version\"] = \"2013-05-23\"\n self.compute_data[\"description\"] = \"Generated Template %s on Project %s\" % \\\n (str(datetime.datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p\")), str(self.tenant_name))\n self.compute_data[\"parameters\"] = {}\n self.compute_data[\"resources\"] = {}\n self.gen_parameters()\n self.gen_resources()\n self.compute_template = self.compute_data", "def gen_keys_old(name):\n d = 'keys'\n if not os.path.isdir(d):\n os.mkdir(d)\n if not os.path.isfile('%s/%s.pem'%(d,name)):\n open('%s/%s.pem'%(d,name),'w').write(Crypto.PublicKey.RSA.generate(1024,os.urandom).exportKey('PEM'))", "def create_keys(self):\n crypto_tool = CryptoTools()\n # creating RSA keys for the signer user\n public_key, private_key = crypto_tool.create_key_with_entropy()\n self.priv_key = crypto_tool.get_pem_format(private_key).decode(\"utf-8\")\n self.pub_key = crypto_tool.get_pem_format(public_key).decode(\"utf-8\")", "def keysAll():", "def create_inbound(self, keys):", "def string_to_keypair(self, data): \n return keypair_lst", "def build_dataset(self): \n start_time = datetime.datetime.now()\n self.func_log(\"\\n\\tIn build_dataset()\")\n \n self.dict_feature = {}\n for key,value in self.key_points.items():\n category = []\n buff_time = datetime.datetime.now()\n for img in value:\n histogram = np.zeros(len(self.visual_words))\n for each_feature in img:\n ind = self.find_index(each_feature, self.visual_words)\n histogram[ind] += 1\n category.append(histogram)\n self.dict_feature[key] = category\n \n buff_time = datetime.datetime.now() - buff_time\n self.func_log(\"\\t\\tKEY: {} finish, Time cose:{}\".format(key, buff_time))\n end_time = datetime.datetime.now() \n self.func_log(\"\\n\\t\\tTime Cost: {}\\n\".format(end_time-start_time))", "def generate_keystream(self):", "def __initialSigningKeys(self) -> None:\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info(\"15 signing keys have been generated successfully\")", "def derive_keys(self, master_salt, master_secret):\n\n self.sender_key = self._kdf(master_salt, master_secret, self.sender_id, 'Key')\n self.recipient_key = self._kdf(master_salt, master_secret, self.recipient_id, 'Key')\n\n self.common_iv = self._kdf(master_salt, master_secret, b\"\", 'IV')", "def generate_keys() -> tuple:\n private_key = ecdsa.SigningKey.generate(curve=curve)\n public_key = private_key.get_verifying_key()\n\n private_key = encode_private_key(private_key)\n public_key = encode_public_key(public_key)\n\n return public_key, private_key", "def prepare_data_keys(primary_master_key, master_keys, algorithm, encryption_context):\n encrypted_data_keys = set()\n encrypted_data_encryption_key = None\n data_encryption_key = primary_master_key.generate_data_key(algorithm, encryption_context)\n _LOGGER.debug(\"encryption data generated with master key: %s\", data_encryption_key.key_provider)\n for master_key in master_keys:\n # Don't re-encrypt the encryption data key; we already have the ciphertext\n if master_key is primary_master_key:\n encrypted_data_encryption_key = EncryptedDataKey(\n key_provider=data_encryption_key.key_provider, encrypted_data_key=data_encryption_key.encrypted_data_key\n )\n encrypted_data_keys.add(encrypted_data_encryption_key)\n continue\n encrypted_key = master_key.encrypt_data_key(\n data_key=data_encryption_key, algorithm=algorithm, encryption_context=encryption_context\n )\n encrypted_data_keys.add(encrypted_key)\n _LOGGER.debug(\"encryption key encrypted with master key: %s\", master_key.key_provider)\n return data_encryption_key, encrypted_data_keys", "def make_external_key(self, data):\n return data['key']", "def generatePreKeys(start, count):\n results = []\n start -= 1\n for i in range(0, count):\n preKeyId = ((start + i) % (Medium.MAX_VALUE - 1)) + 1\n results.append(PreKeyRecord(preKeyId, Curve.generateKeyPair()))\n\n return results", "def add_key_points_output_locations(self, key_points,nodeIds,distanceNodes, elementIds, distanceElements): \n \n \n if (key_points != []):\n try: self.key_points.createDimension('number_of_key_points',len(key_points))\n except Exception, e: print \"WARNING: %s\" % e\n \n try: self.key_points.createDimension('dimensions',2)\n except Exception, e: print \"WARNING: %s\" % e\n \n try: nodesVar = self.key_nodes.createVariable(varname = 'id',datatype = 'i', dimensions=('number_of_key_points',))\n except Exception, e:\n nodesVar = self.key_nodes.variables['id']\n \n try: elementsVar = self.key_elements.createVariable(varname = 'id',datatype = 'i', dimensions=('number_of_key_points',))\n except Exception, e:\n elementsVar = self.key_elements.variables['id'] \n \n \n \n try: distanceNodesVar = self.key_nodes.createVariable(varname = 'distance',datatype = 'f', dimensions=('number_of_key_points',))\n except Exception, e:\n distanceNodesVar = self.key_nodes.variables['distance']\n \n \n try: distanceElementsVar = self.key_elements.createVariable(varname = 'distance',datatype = 'f', dimensions=('number_of_key_points',))\n except Exception, e:\n distanceElementsVar = self.key_elements.variables['distance'] \n \n nodesVar[:] = array(nodeIds)\n elementsVar[:] = array(elementIds) \n distanceNodesVar[:] = array(distanceNodes)\n distanceElementsVar[:] = array(distanceElements) \n \n xy = []\n description = []\n name = []\n\n for p in key_points:\n xy.append([p[0],p[1]])\n name.append(p[2])\n description.append(p[3])\n \n try: xyVar = self.key_points.createVariable(varname = 'xy',datatype = 'f', dimensions=('number_of_key_points','dimensions'))\n except Exception, e:\n xyVar = self.key_points.variables['xy']\n \n try: desciptionVar = self.key_points.createVariable(varname = 'description',datatype = str, dimensions=('number_of_key_points',))\n except Exception, e:\n desciptionVar = self.key_points.variables['description'] \n\n try: nameVar = self.key_points.createVariable(varname = 'name',datatype = str, dimensions=('number_of_key_points',))\n except Exception, e:\n nameVar = self.key_points.variables['name'] \n \n xyVar[:] = array(xy)\n \n #add the description data\n data = empty(len(description),'O')\n i = 0\n for d in description:\n data[i] = d\n i+=1 \n desciptionVar[:] = data \n \n data = empty(len(name),'O')\n i = 0\n for d in name:\n data[i] = d\n i+=1 \n nameVar[:] = data\n self.keyPointsAdded = True \n \n else: \n\n try: self.key_points.createDimension('number_of_key_points',0)\n except Exception, e: print \"WARNING: %s\" % e\n \n self.keyPointsAdded = True", "def create_keypair(address_type, addresses_path, address_prefix, name):\n vkey_file = get_vkey_file(addresses_path, address_prefix, name)\n skey_file = get_skey_file(addresses_path, address_prefix, name)\n\n if(path.exists(vkey_file)) :\n print(address_prefix, \"key pair already exists for\", name)\n return\n \n makedirs(path.dirname(vkey_file), mode=0o777, exist_ok=True)\n\n run_params = ['cardano-cli', address_type, 'key-gen', '--verification-key-file', vkey_file, '--signing-key-file', skey_file]\n subprocess_run(run_params, capture_output=False, text=True)\n return" ]
[ "0.71171194", "0.6204172", "0.6186806", "0.5927455", "0.5921931", "0.58557147", "0.5815631", "0.5814711", "0.5783215", "0.5774029", "0.5745288", "0.5698118", "0.569271", "0.5669536", "0.5652372", "0.56495035", "0.5632463", "0.55735445", "0.5555118", "0.5534517", "0.5503742", "0.54966766", "0.5478865", "0.54645", "0.5439511", "0.54073656", "0.5400219", "0.53773963", "0.5352374", "0.5331304" ]
0.6606572
1
Generate all the network parameters
def gen_network_parameters(self): print "\t* Adding net and subnet parameters to compute template" # add all the routers all_routers = self.neutronclient.list_routers()["routers"] self.all_ports = self.neutronclient.list_ports()["ports"] self.tenant_routers = filter(lambda router: router['tenant_id'] == self.tenant_id , all_routers) for idx, router in enumerate(self.tenant_routers): router_gateway = router["external_gateway_info"] try: data = {"type": "string", "description": "ID of public network", "default": router_gateway["network_id"] } self.compute_data["parameters"]["public_net_%s" % str(idx)] = data except: print "\t! Could not add external_gateway_info for %s" % router["name"] networks = self.neutronclient.list_networks()["networks"] # filter all networks that match filtered_networks = [net for net in networks if (net["tenant_id"] == self.tenant_id or (net["shared"] is True) and net['router:external'] is False) and (net["name"] != "public")] # obtain subnet information shared_net_id = 0 for network in filtered_networks: for subnet in network["subnets"]: if network["shared"] != True: subnet_info = self.neutronclient.show_subnet(subnet)["subnet"] # generate private net # private name data = {"type": "string", "description": "Name of network", "default": network["name"]} self.compute_data["parameters"]["%s_net_name" % (network["name"])] = data # private cidr data = {"type": "string", "description": "Network address (CIDR notation)", "default": subnet_info["cidr"]} self.compute_data["parameters"]["%s_%s_cidr" % (network["name"], subnet_info["name"])] = data # private gateway data = {"type": "string", "description": "Network gateway address", "default": subnet_info["gateway_ip"]} self.compute_data["parameters"]["%s_%s_gateway" % (network["name"], subnet_info["name"])] = data # private pool start data = {"type": "string", "description": "Start of network IP address allocation pool", "default": subnet_info["allocation_pools"][0]["start"]} self.compute_data["parameters"]["%s_%s_pool_start" % (network["name"], subnet_info["name"])] = data # private pool end data = {"type": "string", "description": "End of network IP address allocation pool", "default": subnet_info["allocation_pools"][0]["end"]} self.compute_data["parameters"]["%s_%s_pool_end" % (network["name"], subnet_info["name"])] = data else: print "\t* Adding shared network: %s" % network["name"] data = {"type": "string", "description": "ID of detected shared network", "default": network["id"] } self.compute_data["parameters"]["shared_net_%s" % str(shared_net_id)] = data shared_net_id += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_parameters(self, graph, memory_manager):\n # Generate Node Parameters\n parameter_header = \"#ifndef NETWORK_PARAMETERS_H\\n\"\n parameter_header += \"#define NETWORK_PARAMETERS_H\\n\"\n parameter_header += \"#include \\\"pico-cnn/parameters.h\\\"\\n\\n\"\n parameter_code = \"#include \\\"network_parameters.h\\\"\\n\\n\"\n for node in graph.nodes:\n for num, input in enumerate(node.input_tensors):\n buffer = memory_manager.get_buffer(graph, input)\n data = node.input_tensors[input]\n\n # if node.op_type == \"Gemm\":\n # data = data.transpose()\n\n type_code = \"fp_t \" + buffer.name + \"[]\"\n declaration = \"// \" + str(data.shape) + \"\\n\"\n declaration += \"extern \" + type_code + \";\"\n definition = type_code + \" = {\" + \",\".join((str(x) for x in data.flatten())) + \"};\"\n\n parameter_code += definition + \"\\n\\n\"\n parameter_header += declaration + \"\\n\\n\"\n\n parameter_header += \"#endif \\n\"\n\n self.parameter_header = parameter_header\n self.parameter_code = parameter_code", "def gen_parameters(self):\n\n print \"\\t* Adding parameters to compute template\"\n # get all the server client\n servers = self.novaclient.servers.list()\n\n # add all key_pair_names\n self.gen_key_name_parameters(servers)\n\n # add all images\n self.gen_image_parameters(servers)\n\n # add all flavors\n self.gen_flavor_parameters(servers)\n\n # add all networks\n self.gen_network_parameters()", "def initialize(self):\n\n\t\tparameters = {}\n\t\tL = len(self.layer_dims) # number of layers in the network\n\n\t\tfor l in range(1, L):\n\t\t\tparameters['W' + str(l)] = np.random.randn(self.layer_dims[l], self.layer_dims[l-1]) * 0.01\n\t\t\tparameters['b' + str(l)] = np.zeros((self.layer_dims[l], 1))\n\n\t\t\tassert(parameters['W' + str(l)].shape == (self.layer_dims[l], self.layer_dims[l-1]))\n\t\t\tassert(parameters['b' + str(l)].shape == (self.layer_dims[l], 1))\n\n\t\treturn parameters", "def create_random(self):\n number_of_layers = random.choice(self.parameter_choices['number_of_layers'])\n neurons_per_layer = []\n dropout_per_layer = []\n self.network['number_of_layers'] = number_of_layers\n\n for i in range(number_of_layers):\n neurons_per_layer.append(random.choice(self.parameter_choices['neurons_per_layer']))\n dropout_per_layer.append(random.choice(self.parameter_choices['dropout_per_layer']))\n\n self.network['neurons_per_layer'] = neurons_per_layer\n self.network['dropout_per_layer'] = dropout_per_layer\n self.network['optimizer'] = random.choice(self.parameter_choices['optimizer'])\n self.network['activation'] = random.choice(self.parameter_choices['activation'])", "def generate_params(sw):\n\n # List of vlan ids to use for this permutation\n vlan_ids = []\n # Physical ports required for this permutation per L3 interface\n phy_ports = []\n # L3 interfaces to be created\n l3_interfaces = 0\n # List of ip address required for this permutation\n ip_address_sw = []\n # List of ip address for every host\n ip_address_hs = []\n # VxLAN interfaces to be created\n vxlan_ids = []\n # VNIs to be created\n vnis = {}\n # VTEP Peers to be created\n vtep_peers = []\n\n vlan_ids = [VLAN1, VLAN2]\n vxlan_ids = [TUN_NUM]\n vnis = {VNI: {'vlan': [VLAN1], 'vtep_peer': [H2_IP]}}\n l3_interfaces = 1\n phy_ports = [sw.vtysh_ports['if01'], sw.vtysh_ports['if02']]\n ip_address_sw = [S1_IP]\n ip_address_hs = [H1_IP, H2_IP]\n vtep_peers = [H2_IP]\n\n return {'vlan_ids': vlan_ids,\n 'vxlan_ids': vxlan_ids,\n 'vnis': vnis,\n 'vtep_peers': vtep_peers,\n 'l3_interfaces': l3_interfaces,\n 'phy_ports': phy_ports,\n 'ip_address_sw': ip_address_sw,\n 'ip_address_hs': ip_address_hs}", "def init_parameters(self):\n # Create the weights and biases\n for i in range(1, len(self.layer_dimensions)):\n # Initialization from He et al.\n mu = 0\n var = 2 / self.layer_dimensions[i]\n sigma = np.sqrt(var)\n weight_shape = (self.layer_dimensions[i - 1], self.layer_dimensions[i])\n weight = np.random.normal(loc=mu, scale=sigma, size=weight_shape)\n bias = np.zeros((self.layer_dimensions[i], ))\n\n # Saving in the parameters dict\n layer_weight = \"w_\" + str(i)\n self._parameters[layer_weight] = weight\n layer_b = \"b_\" + str(i)\n self._parameters[layer_b] = bias", "def get_network_params(self):\n with tf.variable_scope(\"get_network_params\"):\n baseline_vars = self.baselineNet.get_network_params()\n reflownet_vars = [\n x for x in tf.trainable_variables() if \"superflow\" in x.name\n ]\n return baseline_vars + reflownet_vars", "def generate_params(sw):\n\n # List of vlan ids to use for this permutation\n vlan_ids = []\n # Physical ports required for this permutation per L3 interface\n phy_ports = []\n # L3 interfaces to be created\n l3_interfaces = 0\n # List of ip address for every host\n ip_address_hs = []\n # VxLAN interfaces to be created\n vxlan_ids = []\n # VNIs to be created\n vnis = {}\n # VTEP Peers to be created\n vtep_peers = []\n\n vlan_ids = [VLAN1, VLAN2, VLAN3]\n vxlan_ids = [TUN_NUM]\n vnis = {VNI: {'vlan': [VLAN1], 'vtep_peer': [VTEP_PEER_IP]}}\n l3_interfaces = 1\n phy_ports = [sw.vtysh_ports['if01'], sw.vtysh_ports['if02'],\n sw.vtysh_ports['if03'], sw.vtysh_ports['if04']]\n ip_address_hs = [H3_IP, H4_IP]\n vtep_peers = [H3_IP, H4_IP]\n\n return {'vlan_ids': vlan_ids,\n 'vxlan_ids': vxlan_ids,\n 'vnis': vnis,\n 'vtep_peers': vtep_peers,\n 'l3_interfaces': l3_interfaces,\n 'phy_ports': phy_ports,\n 'ip_address_hs': ip_address_hs}", "def _write_model_params(self, sess):\n for nn in range(self.num_networks):\n self.networks[nn].write_model_params(sess)", "def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_nav', 'g_kvhh', 'g_kva', 'g_kvsi', \n 'g_cav', 'g_kca', 'g_nap', 'g_kir']\n gX_name: List[str] = list(itertools.compress(gX_name, list(self.channel_bool.values())[:9]))\n gX_log: np.ndarray = 4 * np.random.rand(len(gX_name)) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(len(gX_name))) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n gR_name: List[str] = ['g_ampar', 'g_nmdar', 'g_gabar']\n gR_name: List[str] = list(itertools.compress(gR_name, list(self.channel_bool.values())[9:12]))\n gR_log: np.ndarray = 4 * np.random.rand(len(gR_name)) - 3 # from -3 to 1\n gR: np.ndarray = (10 * np.ones(len(gR_name))) ** gR_log # 0.001 ~ 10\n gR_itr: Iterator = zip(gR_name, gR)\n\n param_dict.update(gX_itr)\n param_dict.update(gR_itr)\n\n if self.channel_bool['ca']:\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n param_dict.update(tCa_dict)\n\n return param_dict", "def generate_params(self, randomize=True):\n pass", "def parameters(self):\n return NeuralNetwork.flatten([module.parameters() for module in self.modules])", "def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_nav', 'g_kvhh', 'g_kva', 'g_kvsi', \n 'g_cav', 'g_kca', 'g_nap', 'g_kir']\n gX_log: np.ndarray = 4 * np.random.rand(9) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(9)) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n gR_name: List[str] = ['g_ampar', 'g_nmdar', 'g_gabar']\n gR_log: np.ndarray = 4 * np.random.rand(3) - 3 # from -3 to 1\n gR: np.ndarray = (10 * np.ones(3)) ** gR_log # 0.001 ~ 10\n gR_itr: Iterator = zip(gR_name, gR)\n\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n\n param_dict.update(gX_itr)\n param_dict.update(gR_itr)\n param_dict.update(tCa_dict)\n return param_dict", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [10]\n activation = 'relu'\n lr = 5e-4\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 5e-4\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 5e-4\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 0.3\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 1e-7\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def params_init(self) -> None:\n # Initialize weights and biases with uniform distribution.\n nn.init.uniform_(self.emb.weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].bias, self.init_lower, self.init_upper)\n for lyr in range(self.n_lyr):\n self.stack_rnn[2 * lyr].params_init()\n nn.init.uniform_(self.fc_h2e[0].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_h2e[0].bias, self.init_lower, self.init_upper)", "def setup_parameters(self):\n structure = self.ctx.structure_initial_primitive\n ecutwfc = []\n ecutrho = []\n\n for kind in structure.get_kind_names():\n try:\n dual = self.ctx.protocol['pseudo_data'][kind]['dual']\n cutoff = self.ctx.protocol['pseudo_data'][kind]['cutoff']\n cutrho = dual * cutoff\n ecutwfc.append(cutoff)\n ecutrho.append(cutrho)\n except KeyError as exception:\n self.abort_nowait('failed to retrieve the cutoff or dual factor for {}'.format(kind))\n\n natoms = len(structure.sites)\n conv_thr = self.ctx.protocol['convergence_threshold'] * natoms\n\n self.ctx.inputs['parameters'] = {\n 'CONTROL': {\n 'restart_mode': 'from_scratch',\n 'tstress': self.ctx.protocol['tstress'],\n },\n 'SYSTEM': {\n 'ecutwfc': max(ecutwfc),\n 'ecutrho': max(ecutrho),\n 'smearing': self.ctx.protocol['smearing'],\n 'degauss': self.ctx.protocol['degauss'],\n 'occupations': self.ctx.protocol['occupations'],\n },\n 'ELECTRONS': {\n 'conv_thr': conv_thr,\n }\n }", "def print_networks(self, verbose):\n print('---------- Networks initialized -------------')\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n if verbose:\n print(net)\n print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))\n print('-----------------------------------------------')", "def parameters(self):\n res = dict()\n res[\"population_size\"] = self.population_size\n res[\"mutation_prob\"] = self.mutation_prob\n res[\"crossover\"] = self.crossover\n res[\"selection\"] = self.selection\n res[\"sigma\"] = self.sigma\n res[\"crossover_method\"] = self.crossover_method\n res[\"selection_method\"] = self.selection_method\n res[\"best_rate\"] = self.best_rate\n res[\"n_parents\"] = self.n_parents\n res[\"model_parameters\"] = self.model.total_parameters()\n res[\"IDCT_from\"] = self.IDCT_from\n res[\"elitism\"] = self.elitism\n return res", "def set_parameters(self, We1,be1, We2, be2, We3, be3, Wmu, bmu, Wstd, bstd, Wd1, bd1, Wd2, bd2, Wd3, bd3):\r\n self.en_fc1.weight=nn.Parameter(We1)\r\n self.en_fc1.bias=nn.Parameter(be1)\r\n \r\n self.en_fc2.weight=nn.Parameter(We2)\r\n self.en_fc2.bias=nn.Parameter(be2)\r\n \r\n self.en_fc3.weight=nn.Parameter(We3)\r\n self.en_fc3.bias=nn.Parameter(be3)\r\n \r\n self.en_mu.weight=nn.Parameter(Wmu)\r\n self.en_mu.bias=nn.Parameter(bmu)\r\n \r\n self.en_log.weight=nn.Parameter(Wstd)\r\n self.en_log.bias=nn.Parameter(bstd)\r\n \r\n self.de_fc1.weight=nn.Parameter(Wd1)\r\n self.de_fc1.bias=nn.Parameter(bd1)\r\n \r\n self.de_fc2.weight=nn.Parameter(Wd2)\r\n self.de_fc2.bias=nn.Parameter(bd2)\r\n \r\n self.de_fc3.weight=nn.Parameter(Wd3)\r\n self.de_fc3.bias=nn.Parameter(bd3)\r\n \r\n return", "def initialize_parameters(n_a,n_x,n_y):\n np.random.seed(1)\n Wax=np.random.randn(n_a,n_x)*0.01 #input to hidden\n Waa=np.random.randn(n_a,n_a)*0.01 #hidden to hidden\n Wya=np.random.randn(n_y,n_a)*0.01 #hidden to output\n b=np.zeros((n_a,1)) #hidden bias\n by=np.zeros((n_y,1)) #output bias\n \n parameters={\"Wax\":Wax,\"Waa\":Waa,\"Wya\":Wya,\"b\":b,\"by\":by}\n return parameters", "def get_prob_params():\n prob = Namespace()\n prob.study_name = STUDY_NAME\n if IS_DEBUG:\n prob.num_trials = 3\n prob.max_capital = 10\n else:\n prob.num_trials = NUM_TRIALS\n prob.max_capital = MAX_CAPITAL\n # Common\n prob.time_distro = TIME_DISTRO\n prob.num_workers = NUM_WORKERS\n _study_params = {\n 'branin': ('synthetic/branin/config_mf.json',\n branin_mf, cost_branin_mf, 0.1, 0, 1),\n 'hartmann3_2': ('synthetic/hartmann3_2/config_mf.json',\n hartmann3_2_mf, cost_hartmann3_2_mf, 0.1, 0, 1),\n 'hartmann6_4': ('synthetic/hartmann6_4/config_mf.json',\n hartmann6_4_mf, cost_hartmann6_4_mf, 0.1, 0, 1),\n 'borehole_6': ('synthetic/borehole_6/config_mf.json',\n borehole_6_mf, cost_borehole_6_mf, 1, 0, 1),\n 'park2_4': ('synthetic/park2_4/config_mf.json',\n park2_4_mf, cost_park2_4_mf, 0.3, 0, 1),\n 'park2_3': ('synthetic/park2_3/config_mf.json',\n park2_3_mf, cost_park2_3_mf, 0.1, 0, 1),\n 'park1_3': ('synthetic/park1_3/config_mf.json',\n park1_3_mf, cost_park1_3_mf, 0.5, 0, 1),\n }\n (domain_config_file_suffix, raw_func, raw_fidel_cost_func, _fc_noise_scale,\n _initial_pool_size, _) = _study_params[prob.study_name]\n domain_config_file = os.path.join(DRAGONFLY_EXPERIMENTS_DIR, domain_config_file_suffix)\n # noisy\n prob.noisy_evals = NOISY_EVALS\n if NOISY_EVALS:\n noise_type = 'gauss'\n noise_scale = _fc_noise_scale\n else:\n noise_type = 'no_noise'\n noise_scale = None\n # Create domain, function_caller and worker_manager\n config = load_config_file(domain_config_file)\n func_caller = get_multifunction_caller_from_config(raw_func, config,\n raw_fidel_cost_func=raw_fidel_cost_func, noise_type=noise_type,\n noise_scale=noise_scale)\n # Set max_capital\n if hasattr(func_caller, 'fidel_cost_func'):\n prob.max_capital = prob.max_capital * \\\n func_caller.fidel_cost_func(func_caller.fidel_to_opt)\n else:\n prob.max_capital = prob.max_capital\n # Store everything in prob\n prob.func_caller = func_caller\n prob.worker_manager = SyntheticWorkerManager(prob.num_workers,\n time_distro='caller_eval_cost')\n prob.save_file_prefix = prob.study_name + ('-debug' if IS_DEBUG else '')\n prob.methods = METHODS\n prob.save_results_dir = SAVE_RESULTS_DIR\n prob.reporter = get_reporter('default')\n # evaluation options\n prob.evaluation_options = Namespace(prev_eval_points='none',\n initial_pool_size=_initial_pool_size)\n return prob", "def get_params(self, deep=False):\n return {\"alpha\": self.alpha, \"beta\": self.beta, \"gamma\": self.gamma, \"W\": self.W, \"bias\": self.bias, \"add_bias\": self.add_bias, \"opts\": self.opts}", "def initialize(self):\n params = {}\n for i in range(1, len(self.layer_dimensions)):\n params['b_' + str(i)] = np.ones((self.layer_dimensions[i], 1))\n if self.he_initialization:\n params['W_' + str(i)] = np.random.randn(self.layer_dimensions[i],\n self.layer_dimensions[i - 1]) * np.sqrt(\n 2 / self.layer_dimensions[i - 1])\n else:\n params['W_' + str(i)] = np.random.rand(self.layer_dimensions[i], self.layer_dimensions[i - 1]) - 0.5\n return params", "def _assign_model_params(self, sess):\n with self.graph.as_default():\n for nn in range(self.num_networks):\n self.networks[nn].assign_model_params(sess)", "def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_kvhh', 'g_cav', 'g_kca', 'g_nap']\n gX_log: np.ndarray = 4 * np.random.rand(5) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(5)) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n\n param_dict.update(gX_itr)\n param_dict.update(tCa_dict)\n return param_dict", "def initialise_parameters(self):\n # Weights\n init = select_w_init(self.w_init)\n if self.w_gain:\n gain = nn.init.calculate_gain('relu')\n init(self.relations, gain=gain)\n else:\n init(self.relations)\n\n # Biases\n if self.b_init:\n init = select_b_init(self.b_init)\n init(self.sbias)\n init(self.pbias)\n init(self.obias)" ]
[ "0.7226558", "0.7029073", "0.690551", "0.66989124", "0.66788185", "0.66468537", "0.6645865", "0.6633763", "0.66186553", "0.6609584", "0.6594055", "0.65576833", "0.652973", "0.6528494", "0.6527197", "0.6527197", "0.6520805", "0.65189373", "0.65100914", "0.6491519", "0.64554095", "0.6421126", "0.6412299", "0.64094824", "0.64001703", "0.6379222", "0.6374772", "0.63692063", "0.63652074", "0.63505644" ]
0.72256935
1
Generate all the resources
def gen_resources(self): print "\t* Adding resources to compute template" # add all the nets and subnets self.gen_net_resources() # add all routers self.gen_router_resources() # add all servers/intances self.gen_server_resources()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resources(self):", "def resources(self) -> HTMLBody:\n\t\treturn render_template(\"resources.jinja2\")", "def _load_resources(self):\n puts = (getattr(self, 'project', None) or self).puts\n for resource_type, resource_cls in six.iteritems(AVAILABLE_RESOURCES):\n for name in self.settings.get(resource_type, {}):\n extra = {\n 'project': getattr(self, 'project', None) or self,\n 'app': self if hasattr(self, 'project') else None,\n }\n\n with indent(4 if hasattr(self, 'project') else 2):\n puts(colored.green(u\"✓ {}:{}\".format(resource_type, name)))\n\n self._resources[resource_type].append(\n resource_cls.factory(\n name=name,\n settings=self.settings.get(resource_type, {})[name],\n **extra\n )\n )", "def assemble_resource_directories(project, base_dir):\n resource_path = os.path.join(base_dir, project.resources_path)\n os.makedirs(os.path.join(resource_path, 'images'))\n os.makedirs(os.path.join(resource_path, 'fonts'))\n os.makedirs(os.path.join(resource_path, 'data'))", "def load_all_resources():\n\n # Load the fonts\n ResourcesManager._load_font(\"Munro.ttf\")\n\n # Load images\n ResourcesManager.HIBER_NATION_IMG = ResourcesManager._load_image(\"hiber_nation.png\")\n ResourcesManager.SHIP_IMG = ResourcesManager._load_image(\"ship.png\")\n ResourcesManager.MISSILE_IMG = ResourcesManager._load_image(\"missile.png\")\n\n # Load sounds\n # ResourcesManager.MENU_MUSIC = ResourcesManager._load_sound(\"menu.ogg\")", "def main():\n # Create / clean output dir\n if os.path.isdir(OUT_DIR):\n shutil.rmtree(OUT_DIR)\n os.mkdir(OUT_DIR)\n\n # Write all assets to the directory\n for fname, bb in create_assets().items():\n filename = os.path.join(OUT_DIR, fname)\n dirname = os.path.dirname(filename)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(filename, \"wb\") as f:\n f.write(bb)", "def create_resources(self) -> List[ResourceDescription]:\r\n return self.resources", "def write_resources(self, resources):\n for filename, data in list(resources.get('outputs', {}).items()):\n # Determine where to write the file to\n dest = os.path.join(self.output_dir, filename)\n path = os.path.dirname(dest)\n if path and not os.path.isdir(path):\n os.makedirs(path)\n\n # Write file\n with open(dest, 'wb') as f:\n f.write(data)", "def _build_resources_template(self, output_filename=\"{}_r.json\"):\n\n template = self._base_troposphere_template()\n\n for resource_type, resource_cls in six.iteritems(AVAILABLE_RESOURCES):\n resource_cls.register_type_resources_template(self, template)\n for r in self.get_resources(resource_type):\n r.register_resources_template(template)\n\n template = utils.fix_troposphere_references(template)\n\n if template and template.resources:\n output_filename = output_filename.format(self._get_next_build_sequence_id())\n self.puts(colored.cyan(output_filename))\n with open(os.path.join(self.build_path, output_filename), 'w') as f:\n f.write(template.to_json())", "def create_assets():\n assets = {}\n\n # Load all static files\n for root, dirs, files in os.walk(STATIC_DIR):\n for fname in files:\n filename = os.path.join(root, fname)\n with open(filename, \"rb\") as f:\n assets[os.path.relpath(filename, STATIC_DIR)] = f.read()\n\n # Collect pages\n pages = {}\n for fname in os.listdir(PAGES_DIR):\n if fname.lower().endswith(\".md\"):\n name = fname.split(\".\")[0].lower()\n with open(os.path.join(PAGES_DIR, fname), \"rb\") as f:\n md = f.read().decode()\n pages[name] = Page(name, md)\n\n # todo: Collect blog posts\n\n # Get template\n with open(os.path.join(THIS_DIR, \"template.html\"), \"rb\") as f:\n html_template = f.read().decode()\n\n with open(os.path.join(THIS_DIR, \"style.css\"), \"rb\") as f:\n css = f.read().decode()\n css += \"/* Pygments CSS */\\n\" + HtmlFormatter(style=\"vs\").get_style_defs(\n \".highlight\"\n )\n\n # Generate pages\n year = datetime.now().year\n for page in pages.values():\n page.prepare(pages.keys())\n title = TITLE if page.name == \"index\" else TITLE + \" - \" + page.name\n menu = create_menu(page)\n html = html_template.format(\n title=title, style=css, body=page.to_html(), menu=menu, year=year\n )\n print(\"generating\", page.name + \".html\")\n assets[page.name + \".html\"] = html.encode()\n\n # Fix backslashes on Windows\n for key in list(assets.keys()):\n if \"\\\\\" in key:\n assets[key.replace(\"\\\\\", \"/\")] = assets.pop(key)\n\n return assets", "def assets():", "def setup_output_path(self):\n self.logger.info('setting up output path')\n try:\n self.output_path.mkdir()\n except FileExistsError:\n pass\n try:\n (self.output_path / 'simple').mkdir()\n except FileExistsError:\n pass\n for filename in resource_listdir(__name__, 'static'):\n if filename == 'index.html':\n # Skip template\n continue\n with (self.output_path / filename).open('wb') as f:\n source = resource_stream(__name__, 'static/' + filename)\n f.write(source.read())\n source.close()", "def resources():\n check_resources()", "def generate_files(self):\n\t\tapply_stemmer, xml_file, query_file, expected_file = self.read_config_file()\n\t\tself.generate_query_file(query_file, xml_file, apply_stemmer)\n\t\tself.generate_expected_file(expected_file, xml_file)\n\t\tlogging.info('FINALIZADO: MÓDULO PROCESSADOR DE CONSULTAS')", "def _generate_objects_file(self):\n xmls = glob(f'{ROOT}/Annotations/**/*.xml', recursive=True)", "def resources(self):\n return self.__resources", "def resources(self) -> Generator[Tuple[str, str, Dict[str, Any]], None, None]:\n for path, methods in self.data[\"paths\"].items():\n for method in methods:\n if method.lower() not in HTTP_VERBS:\n continue\n\n yield method, path, methods", "def get_resources():\n return Response(f\"{Resource.get_all_resources()}\", 200, mimetype='text/plain')", "def resources(self):\n return [self]", "def resources(self):\n return list(self.get_resources_for_type(gdef.ResType_All))", "def gen_static(self, output_folder):\n files = []\n for l in self.file_listers:\n files += l()\n for f in files:\n _logger.info(\"generating %s\" % f)\n content = self.get(f)\n loc = os.path.join(output_folder, f)\n d = os.path.dirname(loc)\n if not os.path.exists(d):\n os.makedirs(d)\n with open(loc, \"wb\") as file_:\n file_.write(content)", "def get_resources(minify=False):\n all_resources = dict()\n subclasses = resource_base.ResourceBase.__subclasses__() + resource_definitions.ResourceAngular.__subclasses__()\n for resource in subclasses:\n obj = resource(minify)\n all_resources[resource.RESOURCE_NAME] = dict(css=tuple(obj.resources_css), js=tuple(obj.resources_js))\n return all_resources", "def _create_assets(self):\n\n assets = Environment(self.app)\n # jQuery is served as a standalone file\n jquery = Bundle('js/jquery-*.min.js', output='gen/jquery.min.js')\n # JavaScript is combined into one file and minified\n js_all = Bundle('js/js_all/*.js',\n filters='jsmin',\n output='gen/app.min.js')\n # SCSS (Sassy CSS) is compiled to CSS\n scss_all = Bundle('scss/app.scss',\n filters='libsass',\n output='gen/app.css')\n assets.register('jquery', jquery)\n assets.register('js_all', js_all)\n assets.register('scss_all', scss_all)\n return assets", "def __init__(self, basedir=None):\n # ------------------------------------------------------------------------\n super(Resources, self).__init__()\n self.xInitialize(basedir or \"resources\")", "def generate():", "def build_all(self):\n self.android_build()\n self.generate_patch_build('')\n self.generate_specs_build()\n self.generate_interfaces()", "def get_resources(self):\n return []", "def generate_all_files():\n for (name, fn) in lang_module.targets.items():\n path = of_g.options.install_dir + '/' + name\n os.system(\"mkdir -p %s\" % os.path.dirname(path))\n with open(path, \"w\") as outfile:\n fn(outfile, os.path.basename(name))\n print(\"Wrote contents for \" + name)", "def collect_resources(namespace, output_dir, api_resources, k8s_cli_input=\"\", selector=\"\"):\n set_file_logger(output_dir)\n k8s_cli = detect_k8s_cli(k8s_cli_input)\n ns_output_dir = os.path.join(output_dir, namespace)\n make_dir(ns_output_dir)\n collect_api_resources(namespace, ns_output_dir, k8s_cli, api_resources, selector)\n collect_api_resources_description(namespace, ns_output_dir, k8s_cli, api_resources, selector)\n collect_pods_logs(namespace, ns_output_dir, k8s_cli, logs_from_all_pods=True)", "def gen_router_resources(self):\n\n print \"\\t* Adding router resources to compute template\"\n\n from nova import version\n year = version.version_string()\n\n for idx, router in enumerate(self.tenant_routers):\n router_ports = []\n for port in self.all_ports:\n if router[\"id\"] == port[\"device_id\"]:\n router_ports.append(port)\n\n # add the router definition\n if \"2013\" in year:\n # Havana Format\n data = {\"type\": \"OS::Neutron::Router\"}\n self.compute_data[\"resources\"][\"router%s\" % str(idx)] = data\n\n # routers without external gateway\n if router[\"external_gateway_info\"] is not None:\n\n name = {\"get_resource\": \"router%s\" % str(idx)}\n netid = {\"get_param\": \"public_net_%s\" % str(idx)}\n\n # add the router gateway\n data = {\"type\": \"OS::Neutron::RouterGateway\",\n \"properties\": {\n \"router_id\": name,\n \"network_id\": netid\n }}\n\n self.compute_data[\"resources\"][\"router_gateway%s\" % str(idx)] = data\n\n else:\n # Icehouse Format\n rtrName = router[\"name\"]\n # routers without external gateway\n if router[\"external_gateway_info\"] is not None:\n data = {\"type\": \"OS::Neutron::Router\",\n \"properties\": {\n \"name\": rtrName,\n \"external_gateway_info\": {\n \"network\": {\n \"get_param\": \"public_net_%s\" % str(idx)\n }\n }\n }}\n else:\n data = {\"type\": \"OS::Neutron::Router\",\n \"properties\": {\n \"name\": rtrName\n }\n }\n self.compute_data[\"resources\"][\"router%s\" % str(idx)] = data\n\n # internal port information needed\n internal_interfaces = filter(lambda port: port[\"device_owner\"] == \"network:router_interface\", router_ports)\n\n for idxs, interface in enumerate(internal_interfaces):\n # add the router interface\n\n for fixedip in interface[\"fixed_ips\"]:\n\n # create router interface\n data = {\"type\": \"OS::Neutron::RouterInterface\",\n \"properties\": {\n \"router_id\": {\"get_resource\": \"router%s\" % str(idx)},\n \"port_id\": {\"get_resource\": \"port_%s_%s\" % (str(idx), str(idxs))}\n }}\n self.compute_data[\"resources\"][\"router_interface%s_%s\" % (str(idx), str(idxs))] = data\n\n # create router port\n network = self.neutronclient.show_subnet(fixedip[\"subnet_id\"])[\"subnet\"][\"network_id\"]\n net_name = \"%s\" % str(self.neutronclient.show_network(network)[\"network\"][\"name\"])\n net_id = self.neutronclient.show_network(network)[\"network\"][\"id\"]\n\n fixed_ips = [{\"ip_address\": fixedip[\"ip_address\"]}]\n net = self.neutronclient.show_network(network)[\"network\"]\n if net[\"shared\"] is True:\n data = {\"type\": \"OS::Neutron::Port\",\n \"properties\": {\n \"fixed_ips\": fixed_ips,\n \"network_id\": net_id\n }}\n else:\n data = {\"type\": \"OS::Neutron::Port\",\n \"properties\": {\n \"fixed_ips\": fixed_ips,\n \"network_id\": {\"get_resource\": net_name}\n }}\n self.compute_data[\"resources\"][\"port_%s_%s\" % (str(idx), str(idxs))] = data" ]
[ "0.7696419", "0.6876106", "0.6563454", "0.6495365", "0.643579", "0.64304537", "0.64114857", "0.6384996", "0.6342217", "0.63224685", "0.62869084", "0.62854594", "0.6255035", "0.62494004", "0.62340724", "0.6227272", "0.62082005", "0.62045", "0.61928815", "0.6183419", "0.61670965", "0.61634964", "0.611422", "0.61099297", "0.61054826", "0.607997", "0.60752994", "0.6067303", "0.6061453", "0.6061371" ]
0.8030165
0
Genererate all net and subnet resources
def gen_net_resources(self): print "\t* Adding net and subnet resources to compute template" networks = self.neutronclient.list_networks()["networks"] # filter all networks that match filtered_networks = [net for net in networks if (net["tenant_id"] == self.tenant_id or (net["shared"] == True) and net['router:external'] == False)] # obtain subnet information for network in filtered_networks: if network["shared"] is not True: for subnet in network["subnets"]: subnet_info = self.neutronclient.show_subnet(subnet)["subnet"] # save this information for router interfaces self.all_nets.append((subnet_info, "%s" % network["name"], "%s" % subnet_info["name"])) # generate private net data = {"type": "OS::Neutron::Net", "properties": {"name": {"get_param": "%s_%s_name" % (network["name"], "net")} } } start_ = {"get_param": "%s_%s_pool_start" % (network["name"], subnet_info["name"])} data2 = {"type": "OS::Neutron::Subnet", "properties": { "name": subnet_info["name"], "network_id": {"get_resource": "%s" % network["name"]}, "cidr": {"get_param": "%s_%s_cidr" % (network["name"], subnet_info["name"])}, "gateway_ip": {"get_param": "%s_%s_gateway" % (network["name"], subnet_info["name"])}, "allocation_pools": [ {"start": start_, "end": {"get_param": "%s_%s_pool_end" % (network["name"], subnet_info["name"])}} ] } } self.compute_data["resources"]["%s" % network["name"]] = data self.compute_data["resources"]["%s" % subnet_info["name"]] = data2 else: # add shared network to the full list of networks for subnet in network["subnets"]: subnet_info = self.neutronclient.show_subnet(subnet)["subnet"] self.all_nets.append((subnet_info, "%s" % network["name"], "%s" % subnet_info["name"]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_network_resources(self, tenant_id):\n logger.info(\"Creating network resources...\")\n net_name = \"ostf-autoscaling-test-service-net\"\n net_body = {\n \"network\": {\n \"name\": net_name,\n \"tenant_id\": tenant_id\n }\n }\n ext_net = None\n net = None\n for network in self.neutron_cli.list_networks()[\"networks\"]:\n if not net and network[\"name\"] == net_name:\n net = network\n if not ext_net and network[\"router:external\"]:\n ext_net = network\n if not net:\n net = self.neutron_cli.create_network(net_body)[\"network\"]\n subnet = self.helpers.os_conn.create_subnet(\n \"sub\" + net_name, net[\"id\"], \"10.1.7.0/24\", tenant_id=tenant_id\n )\n router_name = 'ostf-autoscaling-test-service-router'\n router = self.helpers.os_conn.create_router(\n router_name, self.helpers.os_conn.get_tenant(\"admin\"))\n self.neutron_cli.add_interface_router(\n router[\"id\"], {\"subnet_id\": subnet[\"id\"]})\n return net[\"id\"]", "def add_subnets(self, router_name, netname):\n for subnet in self.router_data['properties']['networks'].keys():\n resource = str(router_name + '_' + subnet)\n subnet_resource = OrderedDict({ \n resource: {\n 'type': 'OS::Neutron::Subnet',\n 'properties': {\n 'name': resource,\n 'network_id': { \n 'get_resource': netname, \n },\n 'cidr': { \n 'get_param': resource + '_net_cidr'\n },\n 'gateway_ip': { \n 'get_param': resource + '_net_gateway'\n },\n 'allocation_pools': [{\n 'start': { 'get_param': resource + '_net_pool_start' },\n 'end': { 'get_param': resource + '_net_pool_end' }\n }],\n }\n }\n })\n self.template['resources'].update(subnet_resource)\n cidr = self.set_cidr(subnet)\n gw = self.set_gatewayIP(subnet, cidr)\n self.template['parameters'].update(OrderedDict({\n resource + '_net_cidr': {\n 'type': 'string',\n 'default': cidr\n }}))\n self.template['parameters'].update(OrderedDict({\n resource + '_net_gateway': {\n 'type': 'string',\n 'default': gw\n }}))\n self.template['parameters'].update(OrderedDict({\n resource + '_net_pool_start': {\n 'type': 'string',\n 'default': self.set_dhcp_pools(cidr)[0]\n }}))\n self.template['parameters'].update(OrderedDict({\n resource + '_net_pool_end': {\n 'type': 'string',\n 'default': self.set_dhcp_pools(cidr)[1]\n }}))", "def gen_resources(self):\n\n print \"\\t* Adding resources to compute template\"\n\n # add all the nets and subnets\n self.gen_net_resources()\n\n # add all routers\n self.gen_router_resources()\n\n # add all servers/intances\n self.gen_server_resources()", "def create_netlist(self):\n self.add_modules()\n self.add_pins()\n self.create_instances()", "def gen_network_parameters(self):\n\n print \"\\t* Adding net and subnet parameters to compute template\"\n\n # add all the routers\n all_routers = self.neutronclient.list_routers()[\"routers\"]\n self.all_ports = self.neutronclient.list_ports()[\"ports\"]\n\n self.tenant_routers = filter(lambda router: router['tenant_id'] == self.tenant_id , all_routers)\n\n for idx, router in enumerate(self.tenant_routers):\n\n router_gateway = router[\"external_gateway_info\"]\n try:\n data = {\"type\": \"string\",\n \"description\": \"ID of public network\",\n \"default\": router_gateway[\"network_id\"]\n }\n self.compute_data[\"parameters\"][\"public_net_%s\" % str(idx)] = data\n except:\n print \"\\t! Could not add external_gateway_info for %s\" % router[\"name\"]\n\n networks = self.neutronclient.list_networks()[\"networks\"]\n # filter all networks that match\n filtered_networks = [net for net in networks if (net[\"tenant_id\"] == self.tenant_id or\n (net[\"shared\"] is True) and net['router:external'] is False) and (net[\"name\"] != \"public\")]\n\n # obtain subnet information\n shared_net_id = 0\n for network in filtered_networks:\n for subnet in network[\"subnets\"]:\n if network[\"shared\"] != True:\n subnet_info = self.neutronclient.show_subnet(subnet)[\"subnet\"]\n\n # generate private net\n # private name\n data = {\"type\": \"string\",\n \"description\": \"Name of network\",\n \"default\": network[\"name\"]}\n self.compute_data[\"parameters\"][\"%s_net_name\" % (network[\"name\"])] = data\n\n # private cidr\n data = {\"type\": \"string\",\n \"description\": \"Network address (CIDR notation)\",\n \"default\": subnet_info[\"cidr\"]}\n self.compute_data[\"parameters\"][\"%s_%s_cidr\" % (network[\"name\"], subnet_info[\"name\"])] = data\n\n # private gateway\n data = {\"type\": \"string\",\n \"description\": \"Network gateway address\",\n \"default\": subnet_info[\"gateway_ip\"]}\n self.compute_data[\"parameters\"][\"%s_%s_gateway\" % (network[\"name\"], subnet_info[\"name\"])] = data\n\n # private pool start\n data = {\"type\": \"string\",\n \"description\": \"Start of network IP address allocation pool\",\n \"default\": subnet_info[\"allocation_pools\"][0][\"start\"]}\n self.compute_data[\"parameters\"][\"%s_%s_pool_start\" % (network[\"name\"], subnet_info[\"name\"])] = data\n\n # private pool end\n data = {\"type\": \"string\",\n \"description\": \"End of network IP address allocation pool\",\n \"default\": subnet_info[\"allocation_pools\"][0][\"end\"]}\n self.compute_data[\"parameters\"][\"%s_%s_pool_end\" % (network[\"name\"], subnet_info[\"name\"])] = data\n else:\n print \"\\t* Adding shared network: %s\" % network[\"name\"]\n data = {\"type\": \"string\",\n \"description\": \"ID of detected shared network\",\n \"default\": network[\"id\"]\n }\n self.compute_data[\"parameters\"][\"shared_net_%s\" % str(shared_net_id)] = data\n shared_net_id += 1", "def gen_router_resources(self):\n\n print \"\\t* Adding router resources to compute template\"\n\n from nova import version\n year = version.version_string()\n\n for idx, router in enumerate(self.tenant_routers):\n router_ports = []\n for port in self.all_ports:\n if router[\"id\"] == port[\"device_id\"]:\n router_ports.append(port)\n\n # add the router definition\n if \"2013\" in year:\n # Havana Format\n data = {\"type\": \"OS::Neutron::Router\"}\n self.compute_data[\"resources\"][\"router%s\" % str(idx)] = data\n\n # routers without external gateway\n if router[\"external_gateway_info\"] is not None:\n\n name = {\"get_resource\": \"router%s\" % str(idx)}\n netid = {\"get_param\": \"public_net_%s\" % str(idx)}\n\n # add the router gateway\n data = {\"type\": \"OS::Neutron::RouterGateway\",\n \"properties\": {\n \"router_id\": name,\n \"network_id\": netid\n }}\n\n self.compute_data[\"resources\"][\"router_gateway%s\" % str(idx)] = data\n\n else:\n # Icehouse Format\n rtrName = router[\"name\"]\n # routers without external gateway\n if router[\"external_gateway_info\"] is not None:\n data = {\"type\": \"OS::Neutron::Router\",\n \"properties\": {\n \"name\": rtrName,\n \"external_gateway_info\": {\n \"network\": {\n \"get_param\": \"public_net_%s\" % str(idx)\n }\n }\n }}\n else:\n data = {\"type\": \"OS::Neutron::Router\",\n \"properties\": {\n \"name\": rtrName\n }\n }\n self.compute_data[\"resources\"][\"router%s\" % str(idx)] = data\n\n # internal port information needed\n internal_interfaces = filter(lambda port: port[\"device_owner\"] == \"network:router_interface\", router_ports)\n\n for idxs, interface in enumerate(internal_interfaces):\n # add the router interface\n\n for fixedip in interface[\"fixed_ips\"]:\n\n # create router interface\n data = {\"type\": \"OS::Neutron::RouterInterface\",\n \"properties\": {\n \"router_id\": {\"get_resource\": \"router%s\" % str(idx)},\n \"port_id\": {\"get_resource\": \"port_%s_%s\" % (str(idx), str(idxs))}\n }}\n self.compute_data[\"resources\"][\"router_interface%s_%s\" % (str(idx), str(idxs))] = data\n\n # create router port\n network = self.neutronclient.show_subnet(fixedip[\"subnet_id\"])[\"subnet\"][\"network_id\"]\n net_name = \"%s\" % str(self.neutronclient.show_network(network)[\"network\"][\"name\"])\n net_id = self.neutronclient.show_network(network)[\"network\"][\"id\"]\n\n fixed_ips = [{\"ip_address\": fixedip[\"ip_address\"]}]\n net = self.neutronclient.show_network(network)[\"network\"]\n if net[\"shared\"] is True:\n data = {\"type\": \"OS::Neutron::Port\",\n \"properties\": {\n \"fixed_ips\": fixed_ips,\n \"network_id\": net_id\n }}\n else:\n data = {\"type\": \"OS::Neutron::Port\",\n \"properties\": {\n \"fixed_ips\": fixed_ips,\n \"network_id\": {\"get_resource\": net_name}\n }}\n self.compute_data[\"resources\"][\"port_%s_%s\" % (str(idx), str(idxs))] = data", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1):\n network = self.neutron.create_network(**(network_create_args or {}))\n for _ in range(subnets_per_network):\n self.neutron.create_subnet(network[\"id\"],\n start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n self.neutron.list_subnets()", "def networkx_resource_generator (func_name, seed=0, max_cpu=40, max_mem=16000,\n max_storage=30, max_link_bw=70,\n abc_nf_types_len=10,\n supported_nf_cnt=6, max_link_delay=2,\n sap_cnt=10,\n **kwargs):\n rnd = random.Random()\n rnd.seed(seed)\n nx_graph = get_networkx_func(func_name, seed=seed, **kwargs)\n\n nf_types = list(string.ascii_uppercase)[:abc_nf_types_len]\n nffg = NFFG(id=\"net-\" + func_name + \"-seed\" + str(seed))\n gen = NameGenerator()\n\n for infra_id in nx_graph.nodes_iter():\n infra = nffg.add_infra(id=\"infra\" + str(infra_id),\n bandwidth=rnd.random() * max_link_bw * 1000,\n cpu=rnd.random() * max_cpu,\n mem=rnd.random() * max_mem,\n storage=rnd.random() * max_storage)\n infra.add_supported_type(rnd.sample(nf_types, supported_nf_cnt))\n\n for i, j in nx_graph.edges_iter():\n infra1 = nffg.network.node[\"infra\" + str(i)]\n infra2 = nffg.network.node[\"infra\" + str(j)]\n nffg.add_undirected_link(port1=infra1.add_port(id=gen.get_name(\"port\")),\n port2=infra2.add_port(id=gen.get_name(\"port\")),\n p1p2id=gen.get_name(\"link\"),\n p2p1id=gen.get_name(\"link\"),\n dynamic=False,\n delay=rnd.random() * max_link_delay,\n bandwidth=rnd.random() * max_link_bw)\n\n infra_ids = [i.id for i in nffg.infras]\n for s in xrange(0, sap_cnt):\n sap_obj = nffg.add_sap(id=gen.get_name(\"sap\"))\n sap_port = sap_obj.add_port(id=gen.get_name(\"port\"))\n infra_id = rnd.choice(infra_ids)\n infra = nffg.network.node[infra_id]\n nffg.add_undirected_link(port1=sap_port,\n port2=infra.add_port(id=gen.get_name(\"port\")),\n p1p2id=gen.get_name(\"link\"),\n p2p1id=gen.get_name(\"link\"),\n dynamic=False,\n delay=rnd.random() * max_link_delay,\n bandwidth=rnd.uniform(max_link_bw / 2.0,\n max_link_bw))\n\n return nffg", "def run(self, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.get_subnet(subnet[\"id\"])", "def pre_subnet_create(self, resource_dict):\n pass", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.delete_subnet(subnet[\"id\"])", "def run(self, subnet_update_args, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1):\n network = self.neutron.create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.update_subnet(subnet[\"id\"], **subnet_update_args)", "def add_router_interfaces(self):\n for subnet_name in self.router_data['properties']['networks'].keys():\n #print(subnet_name)\n interface = OrderedDict({\n str(self.router_name + '_interface_' + subnet_name): {\n 'type': 'OS::Neutron::RouterInterface',\n 'properties': {\n 'router_id': { 'get_resource': self.router_name },\n 'subnet_id': { 'get_resource': str(self.router_name + '_' + subnet_name) }\n } \n }\n })\n self.template['resources'].update(interface)", "def test_create_network_and_subnet(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 254\n self.__create_network_and_subnet_test_helper__(network_name, network_cidr)", "def gen_server_resources(self):\n print \"\\t* Adding server resources to compute template\"\n # add all instances\n servers = self.novaclient.servers.list()\n\n # add all ports\n ports = []\n\n self.set_of_images = set(self.set_of_images)\n\n for server in servers:\n if self.using_snapshots:\n # get template image id\n images = [(idx, x[1]) for idx, x in enumerate(set(self.snapshot_ids)) if x[0] == server.id]\n else:\n # get template image id\n images = [(idx, x) for idx, x in enumerate(self.set_of_images) if x == server.image[\"id\"]]\n\n # continue to next iteration.\n if len(images) == 0:\n continue\n image_num = images[0][0] if images[0][0] > 0 else \"\"\n image_ = \"image%s\" % image_num\n\n # get template flavor id\n flavors = [(idx, x) for idx, x in enumerate(self.set_of_flavors) if x.id == server.flavor[\"id\"]]\n flavor_num = flavors[0][0] if flavors[0][0] > 0 else \"\"\n flavor_ = \"flavor%s\" % flavor_num\n\n # get template keys\n keys = [(idx, x) for idx, x in enumerate(self.set_of_keys) if x == server.key_name]\n key_num = keys[0][0] if keys[0][0] > 0 else \"\"\n key_ = \"key_name%s\" % key_num\n\n # get template network info\n # novaclient.servers.interface_list(servers[3])[1]._info\n # instead of server.interface_list(server.id)\n # bug : github #1280453\n networks_ = []\n with self.suppress():\n ports = self.novaclient.servers.interface_list(server)\n\n for idx, port in enumerate(ports):\n networks_.append({\n \"port\": {\n \"get_resource\": \"%s_port%s\" % (server.name, idx)}\n })\n\n # add server definition\n data = {\"type\": \"OS::Nova::Server\",\n \"properties\": {\n \"name\": server.name,\n \"image\": {\"get_param\": image_},\n \"flavor\": {\"get_param\": flavor_},\n \"key_name\": {\"get_param\": key_},\n \"networks\": networks_\n }}\n\n # add user_data\n # the following line should be proper syntax according to\n # OpenStack's documentation. However Heat did not seem to like\n # it. So, we are not using the get_file param.\n # Creating stack from command line works, but does not seem to work\n # in horizon\n # see: http://docs.openstack.org/developer/heat/template_guide/hot_spec.html\n # data[\"properties\"][\"user_data\"] = {\"get_file\": user_data}\n\n try:\n case, user_data = self.gen_userdata(server.id)\n except:\n user_data = None\n if user_data is not None:\n if \"case3\" in case:\n data[\"properties\"][\"user_data_format\"] = \"RAW\"\n data[\"properties\"][\"user_data\"] = user_data\n\n self.compute_data[\"resources\"][server.name] = data\n\n # add server port information\n self.gen_port_resources(server, ports)\n\n # add floating ip information\n self.gen_floating_ip_resources(server)", "def _send_all_data(self):\n admin_context = qcontext.get_admin_context()\n networks = []\n routers = []\n\n all_networks = super(NeutronRestProxyV2,\n self).get_networks(admin_context) or []\n for net in all_networks:\n mapped_network = self._get_mapped_network_with_subnets(net)\n net_fl_ips = self._get_network_with_floatingips(mapped_network)\n\n ports = []\n net_filter = {'network_id': [net.get('id')]}\n net_ports = super(NeutronRestProxyV2,\n self).get_ports(admin_context,\n filters=net_filter) or []\n for port in net_ports:\n mapped_port = self._map_state_and_status(port)\n mapped_port['attachment'] = {\n 'id': port.get('device_id'),\n 'mac': port.get('mac_address'),\n }\n ports.append(mapped_port)\n net_fl_ips['ports'] = ports\n\n networks.append(net_fl_ips)\n\n all_routers = super(NeutronRestProxyV2,\n self).get_routers(admin_context) or []\n for router in all_routers:\n interfaces = []\n mapped_router = self._map_state_and_status(router)\n router_filter = {\n 'device_owner': [\"network:router_interface\"],\n 'device_id': [router.get('id')]\n }\n router_ports = super(NeutronRestProxyV2,\n self).get_ports(admin_context,\n filters=router_filter) or []\n for port in router_ports:\n net_id = port.get('network_id')\n subnet_id = port['fixed_ips'][0]['subnet_id']\n intf_details = self._get_router_intf_details(admin_context,\n net_id,\n subnet_id)\n interfaces.append(intf_details)\n mapped_router['interfaces'] = interfaces\n\n routers.append(mapped_router)\n\n resource = '/topology'\n data = {\n 'networks': networks,\n 'routers': routers,\n }\n errstr = _(\"Unable to update remote topology: %s\")\n return self.servers.rest_action('PUT', resource, data, errstr)", "def createGRSAZ(gwtable, inputsubnets, Routetargets):\n ec2 = boto3.client(\"ec2\")\n elb = boto3.client('elb')\n\n #clean the inputsubnets\n vpcid = elb.describe_load_balancers(LoadBalancerNames=[elbname])['LoadBalancerDescriptions'][0]['VPCId']\n subnetsvpc = ec2.describe_subnets(Filters=[{'Name': \"vpc-id\", 'Values': [vpcid]}])\n notrealsubnets = set(inputsubnets)-set([s['SubnetId'] for s in subnetsvpc['Subnets']])\n if len(notrealsubnets) > 0:\n print('the following are not real subnets in your VPC: ', notrealsubnets)\n cleaninputsubnets = list(set(inputsubnets) - notrealsubnets)\n\n #find all the routing tables already associated with any healthy gws and their associated subnets \n rt2 = ec2.describe_route_tables(Filters=[{'Name': 'association.subnet-id', 'Values': cleaninputsubnets}])\n #disassociate subnets from RTs if used by gateway ...later\n\n M = []\n for r in rt2['RouteTables']:\n if set(Routetargets) <= set([rr['DestinationCidrBlock'] for rr in r['Routes'] if 'InstanceId' in rr.keys() and rr['InstanceId'] in [g[0] for g in gwtable if g[1] == 'InService']]):\n for s in [ass for ass in r['Associations'] if ass['SubnetId'] in cleaninputsubnets]:\n goodinstance = [rr['InstanceId'] for rr in r['Routes'] if 'InstanceId' in rr.keys() and rr['InstanceId'] in [g[0] for g in gwtable if g[1] == 'InService']].pop()\n M.append(tuple([goodinstance,\n r['RouteTableId'],\n s['SubnetId'],\n 1]))\n\n # add route tables that have the routes but no live GWs with index 2....we'll reuse these RTs and routes\n elif set(Routetargets) <= set([rr['DestinationCidrBlock'] for rr in r['Routes']]):\n for s in r['Associations']:\n M.append(tuple(['NoGW',\n r['RouteTableId'],\n s['SubnetId'],\n 2]))\n\n #add new RTs for any subnets that are not in the table. mark the GWs as NoGW and index at 3 so that we know that we need to add new routes\n subnets1 = ec2.describe_subnets(Filters=[{'Name': \"subnet-id\", 'Values': list(set([m[2] for m in M]) | set(cleaninputsubnets))}])\n subnets2 = {s['SubnetId']: s for s in subnets1['Subnets']}\n for sub in cleaninputsubnets:\n if not (sub in [m[2] for m in M]):\n if subnets2[sub]['VpcId'] == vpcid:\n rass = []\n for rt in rt2['RouteTables']:\n for ass in rt['Associations']:\n if ass['SubnetId'] == sub:\n rass.append(ass['RouteTableAssociationId'])\n if len(rass) > 0:\n ec2.disassociate_route_table(AssociationId=rass.pop())\n print('removed RT association from subnet ', sub)\n RTforS = ec2.create_route_table(VpcId=vpcid)['RouteTable']['RouteTableId']\n ec2.associate_route_table(SubnetId=sub, RouteTableId=RTforS)\n print('created route table ', RTforS, ' and associated it with subnet ', sub)\n M.append(tuple(['NoGW', RTforS, sub, 3]))\n else:\n print('Subnet ', sub, ' is in VPC ', subnets2[sub]['VpcId'], ' which is not in the same vpc as your gateways: (', vpcid, '). Ignoring!')\n \n # Convert to a list and add AZ info into table\n MM = [list(n) for n in set(M)]\n for r in MM:\n r.insert(3, subnets2[r[2]]['AvailabilityZone'])\n\n return MM", "def _generate_ribs(self):\n for fw in self._fw_rules:\n source_tag = fw['source_tag']\n dest_tag = fw['dest_tag']\n\n for source_vm_index in self._tag_owners[source_tag]:\n for dest_vm_index in self._tag_owners[dest_tag]:\n # Add to each vertex access ability nodes\n self._graph[source_vm_index].add(dest_vm_index)", "def create_t_resources(context, objects, ext_net):\n LOG.info(\">>>>Creating all the objects of the project in NSX-T.\")\n prepare = replay_utils.PrepareObjectForMigration()\n with v3_utils.NsxV3PluginWrapper() as plugin:\n # create the resource in the order opposite to the deletion\n # (but start with routers)\n ordered_resources = migrated_resources[::-1]\n ordered_resources.remove('router')\n ordered_resources = ['router'] + ordered_resources\n dhcp_subnets = []\n for resource in ordered_resources:\n total_num = len(objects[resource])\n LOG.info(\">>>Creating %s %s%s.\", total_num,\n resource, 's' if total_num > 1 else '')\n get_object = getattr(plugin, \"get_%s\" % resource)\n create_object = getattr(plugin, \"create_%s\" % resource)\n # go over the objects of this resource\n for count, obj in enumerate(objects[resource], 1):\n # check if this object already exists\n try:\n get_object(context, obj['id'])\n except exceptions.NotFound:\n # prevent logger from logging this exception\n sys.exc_clear()\n else:\n # already exists (this will happen if we rerun from files,\n # or if the deletion failed)\n LOG.info(\">>Skipping %(resource)s %(name)s %(count)s/\"\n \"%(total)s as it was already created.\",\n {'resource': resource,\n 'name': obj.get('name') or obj['id'],\n 'count': count,\n 'total': total_num})\n continue\n\n # fix object before creation using the api replay code\n orig_id = obj['id']\n prepare_object = getattr(prepare, \"prepare_%s\" % resource)\n obj_data = prepare_object(obj, direct_call=True)\n enable_dhcp = False\n # special cases for different objects before create:\n if resource == 'subnet':\n if obj_data['enable_dhcp']:\n enable_dhcp = True\n # disable dhcp for now, to avoid ip collisions\n obj_data['enable_dhcp'] = False\n elif resource == 'security_group':\n # security group rules should be added separately\n sg_rules = obj_data.pop('security_group_rules')\n elif resource == 'floatingip':\n # Create the floating IP on the T external network\n obj_data['floating_network_id'] = ext_net\n del obj_data['floating_ip_address']\n elif resource == 'port':\n # remove the old subnet id field from ports fixed_ips dict\n # since the subnet ids are changed\n for fixed_ips in obj_data['fixed_ips']:\n del fixed_ips['subnet_id']\n\n if obj_data['device_owner'] == 'network:dhcp':\n continue\n if obj_data['device_owner'] == 'network:floatingip':\n continue\n if obj_data['device_owner'] == 'network:router_gateway':\n # add a gateway on the new ext network for this router\n router_id = obj_data['device_id']\n # keep the original enable-snat value\n router_data = get_router_by_id(objects, router_id)\n enable_snat = router_data['external_gateway_info'].get(\n 'enable_snat', True)\n rtr_body = {\n \"external_gateway_info\":\n {\"network_id\": ext_net,\n \"enable_snat\": enable_snat}}\n try:\n plugin.update_router(\n context, router_id, {'router': rtr_body})\n LOG.info(\">>Uplinked router %(rtr)s to new \"\n \"external network %(net)s\",\n {'rtr': router_id,\n 'net': ext_net})\n\n except Exception as e:\n LOG.error(\">>Failed to add router %(rtr)s \"\n \"gateway: %(e)s\",\n {'rtr': router_id, 'e': e})\n continue\n if obj_data['device_owner'] == 'network:router_interface':\n try:\n # uplink router_interface ports by creating the\n # port, and attaching it to the router\n router_id = obj_data['device_id']\n obj_data['device_owner'] = \"\"\n obj_data['device_id'] = \"\"\n created_port = plugin.create_port(\n context,\n {'port': obj_data})\n LOG.info(\">>Created interface port %(port)s, ip \"\n \"%(ip)s, mac %(mac)s)\",\n {'port': created_port['id'],\n 'ip': created_port['fixed_ips'][0][\n 'ip_address'],\n 'mac': created_port['mac_address']})\n plugin.add_router_interface(\n context,\n router_id,\n {'port_id': created_port['id']})\n LOG.info(\">>Uplinked router %(rtr)s to network \"\n \"%(net)s\",\n {'rtr': router_id,\n 'net': obj_data['network_id']})\n except Exception as e:\n LOG.error(\">>Failed to add router %(rtr)s \"\n \"interface port: %(e)s\",\n {'rtr': router_id, 'e': e})\n continue\n\n # create the object on the NSX-T plugin\n try:\n created_obj = create_object(context, {resource: obj_data})\n LOG.info(\">>Created %(resource)s %(name)s %(count)s/\"\n \"%(total)s\",\n {'resource': resource, 'count': count,\n 'name': obj_data.get('name') or orig_id,\n 'total': total_num})\n except Exception as e:\n # TODO(asarfaty): subnets ids are changed, so recreating a\n # subnet will fail on overlapping ips.\n LOG.error(\">>Failed to create %(resource)s %(name)s: \"\n \"%(e)s\",\n {'resource': resource, 'e': e,\n 'name': obj_data.get('name') or orig_id})\n continue\n\n # special cases for different objects after create:\n if resource == 'security_group':\n sg_id = obj_data.get('name') or obj_data['id']\n for rule in sg_rules:\n rule_data = prepare.prepare_security_group_rule(rule)\n try:\n plugin.create_security_group_rule(\n context, {'security_group_rule': rule_data})\n except ext_sg.SecurityGroupRuleExists:\n # default rules were already created.\n # prevent logger from logging this exception\n sys.exc_clear()\n except Exception as e:\n LOG.error(\n \">>Failed to create security group %(name)s \"\n \"rules: %(e)s\",\n {'name': sg_id, 'e': e})\n elif resource == 'subnet':\n if enable_dhcp:\n dhcp_subnets.append(created_obj['id'])\n\n # Enable dhcp on all the relevant subnets (after creating all ports,\n # to maintain original IPs):\n if dhcp_subnets:\n for subnet_id in dhcp_subnets:\n try:\n plugin.update_subnet(\n context, subnet_id,\n {'subnet': {'enable_dhcp': True}})\n\n except Exception as e:\n LOG.error(\"Failed to enable DHCP on subnet %(subnet)s:\"\n \" %(e)s\",\n {'subnet': subnet_id, 'e': e})\n\n # Add static routes (after all router interfaces and gateways are set)\n for obj_data in objects['router']:\n if 'routes' in obj_data:\n try:\n plugin.update_router(\n context, obj_data['id'],\n {'router': {'routes': obj_data['routes']}})\n except Exception as e:\n LOG.error(\"Failed to add routes to router %(rtr)s: \"\n \"%(e)s\",\n {'rtr': obj_data['id'], 'e': e})\n\n LOG.info(\">>>Done Creating all objects in NSX-T.\")", "def gen_port_resources(self, server, ports):\n if (self.SuppressServerStatuses is False):\n print \"\\t* Adding all the port interface resources\"\n data = {}\n port_idx = \"0\"\n for idx, port in enumerate(ports):\n\n # get fixedips\n fixed_ip = port._info[\"fixed_ips\"]\n fixed_ip_address = fixed_ip[0][\"ip_address\"]\n\n # filter all_nets by subnet_id\n net_data = []\n fip = None\n for x in self.all_nets:\n for fip in fixed_ip:\n if x[0][\"id\"] in fip[\"subnet_id\"]:\n net_data.append(x)\n\n if len(net_data) > 0:\n net = net_data[0][1]\n subnet = net_data[0][2]\n\n networkID = [netw['id'] for netw in self.neutronclient.list_networks()['networks'] if netw['name'] == net][0]\n networkIsShared = self.neutronclient.show_network(networkID)['network']['shared']\n\n if networkIsShared is True:\n port_properties_ = {\n \"network_id\": networkID,\n \"fixed_ips\": [\n {\"subnet_id\": fip[\"subnet_id\"]}\n ]\n }\n else:\n port_properties_ = {\n \"network_id\": {\"get_resource\": net},\n \"fixed_ips\": [\n {\"subnet_id\": {\"get_resource\": subnet}}\n ]\n }\n if self.staticips:\n fixed_ips = []\n for address in server.addresses:\n server_ip_address = server.addresses[address][0]['addr']\n if server_ip_address == fixed_ip_address:\n fixed_ips.append({\"ip_address\": server_ip_address})\n\n port_properties_ = {\n \"network_id\": {\"get_resource\": net},\n \"fixed_ips\": fixed_ips\n }\n data = {\"type\": \"OS::Neutron::Port\",\"properties\": port_properties_}\n else:\n print \"!!Probable error grabbing port information for server %s!!\" % (server.name)\n data = {\"type\": \"OS::Neutron::Port\"}\n\n self.compute_data[\"resources\"][\"%s_port%s\" % (server.name, port_idx)] = data\n if len(ports) >= 1:\n port_idx = str(1 + idx)", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n net_topo = self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n\n for router in net_topo[\"routers\"]:\n self.neutron.get_router(router[\"id\"])", "def post_subnet_create(self, resource_dict):\n pass", "def dvs_vcenter_networks(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.show_step(2)\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n subnets = []\n networks = []\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n for net in self.net_data:\n logger.info('Create network {}'.format(net.keys()[0]))\n netw = os_conn.create_network(network_name=net.keys()[0],\n tenant_id=tenant.id)['network']\n\n logger.info('Create subnet {}'.format(net.keys()[0]))\n subnet = os_conn.create_subnet(subnet_name=net.keys()[0],\n network_id=netw['id'],\n cidr=net[net.keys()[0]],\n ip_version=4)\n\n subnets.append(subnet)\n networks.append(netw)\n\n self.show_step(3)\n for net in networks:\n assert_true(os_conn.get_network(net['name'])['id'] == net['id'])\n\n self.show_step(4)\n logger.info('Delete network net_1')\n os_conn.neutron.delete_subnet(subnets[0]['id'])\n os_conn.neutron.delete_network(networks[0]['id'])\n\n self.show_step(5)\n assert_true(os_conn.get_network(networks[0]) is None)\n\n self.show_step(6)\n net_1 = os_conn.create_network(network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n logger.info('Create subnet {}'.format(self.net_data[0].keys()[0]))\n # subnet\n os_conn.create_subnet(\n subnet_name=self.net_data[0].keys()[0],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n logger.info('Networks net_1 and net_2 are present.')", "def dvs_vcenter_multiple_nics(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n networks = []\n router = os_conn.get_router(os_conn.get_network(self.ext_net_name))\n\n self.show_step(2)\n self.show_step(3)\n for net in self.net_data:\n network = os_conn.create_network(network_name=net.keys()[0],\n tenant_id=tenant.id)['network']\n\n logger.info('Create subnet {}'.format(net.keys()[0]))\n subnet = os_conn.create_subnet(subnet_name=net.keys()[0],\n network_id=network['id'],\n cidr=net[net.keys()[0]],\n ip_version=4)\n\n # Check that network is created.\n assert_true(\n os_conn.get_network(network['name'])['id'] == network['id'])\n os_conn.add_router_interface(\n router_id=router[\"id\"],\n subnet_id=subnet[\"id\"])\n networks.append(network)\n\n nics = [{'net-id': network['id']} for network in networks]\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n\n _s_groups = os_conn.neutron.list_security_groups()\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups['security_groups']\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n self.show_step(4)\n instances = openstack.create_instances(\n os_conn=os_conn,\n nics=nics,\n security_groups=[default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(5)\n for instance in instances:\n for net in networks:\n assert_true(os_conn.get_nova_instance_ip(\n instance, net_name=net['name']) is not None)\n\n net_1_name = self.net_data[0].keys()[0]\n net_2_name = self.net_data[1].keys()[0]\n ips = {\n net_1_name: {'ips': [], 'access_point_ip': ''},\n net_2_name: {'ips': [], 'access_point_ip': ''}\n }\n\n for net in networks:\n ips[net['name']]['ips'] = map(\n (lambda x:\n os_conn.get_nova_instance_ip(x, net_name=net['name'])),\n instances)\n access_point, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net['id']}],\n security_groups=[default_sg['name']])\n ips[net['name']]['access_point_ip'] = access_point_ip\n\n logger.info(pretty_log(ips))\n\n self.show_step(6)\n cmds = [\"sudo /bin/ip link set up dev eth1\",\n \"sudo /sbin/cirros-dhcpc up eth1\"]\n access_point_ip = ips[net_1_name]['access_point_ip']\n for ip in ips[net_1_name]['ips']:\n openstack.remote_execute_command(access_point_ip, ip, cmds[0])\n openstack.remote_execute_command(access_point_ip, ip, cmds[1])\n\n self.show_step(7)\n for net in networks:\n inst_ips = ips[net['name']]['ips']\n access_point_ip = ips[net['name']]['access_point_ip']\n ip_pair = {ip: [v for v in inst_ips if v != ip] for ip in inst_ips}\n openstack.check_connection_through_host(access_point_ip,\n ip_pair,\n timeout=60 * 5,\n interval=10)", "def __init__(self):\n self.networks = [\n ipaddress.ip_network(address)\n for address in self.addresses\n ]", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n net_topo = self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n\n for e in range(subnets_per_network):\n router = net_topo[\"routers\"][e]\n subnet = net_topo[\"subnets\"][e]\n self.neutron.remove_interface_from_router(subnet_id=subnet[\"id\"],\n router_id=router[\"id\"])\n self.neutron.delete_router(router[\"id\"])", "def generate_config(context):\n\n\n properties = context.properties\n project_id = properties.get('project', context.env['project'])\n\n network = context.properties.get('networkURL', generate_network_uri(\n project_id,\n context.properties.get('network','')\n ))\n target_vpn_gateway = context.env['name'] + '-tvpng'\n esp_rule = context.env['name'] + '-esp-rule'\n udp_500_rule = context.env['name'] + '-udp-500-rule'\n udp_4500_rule = context.env['name'] + '-udp-4500-rule'\n vpn_tunnel = context.env['name'] + '-vpn'\n router_vpn_binding = context.env['name'] + '-router-vpn-binding'\n resources = []\n if 'ipAddress' in context.properties:\n ip_address = context.properties['ipAddress']\n static_ip = ''\n else:\n static_ip = context.env['name'] + '-ip'\n resources.append({\n # The reserved address resource.\n 'name': static_ip,\n # https://cloud.google.com/compute/docs/reference/rest/v1/addresses\n 'type': 'gcp-types/compute-v1:addresses',\n 'properties': {\n 'name': properties.get('name', static_ip),\n 'project': project_id,\n 'region': context.properties['region']\n }\n })\n ip_address = '$(ref.' + static_ip + '.address)'\n\n resources.extend([\n {\n # The target VPN gateway resource.\n 'name': target_vpn_gateway,\n # https://cloud.google.com/compute/docs/reference/rest/v1/targetVpnGateways\n 'type': 'gcp-types/compute-v1:targetVpnGateways',\n 'properties':\n {\n 'name': properties.get('name', target_vpn_gateway),\n 'project': project_id,\n 'network': network,\n 'region': context.properties['region'],\n }\n },\n {\n # The forwarding rule resource for the ESP traffic.\n 'name': esp_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-esp'.format(properties.get('name')) if 'name' in properties else esp_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'ESP',\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n {\n # The forwarding rule resource for the UDP traffic on port 4500.\n 'name': udp_4500_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-udp-4500'.format(properties.get('name')) if 'name' in properties else udp_4500_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'UDP',\n 'portRange': 4500,\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n {\n # The forwarding rule resource for the UDP traffic on port 500\n 'name': udp_500_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-udp-500'.format(properties.get('name')) if 'name' in properties else udp_500_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'UDP',\n 'portRange': 500,\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n\n ])\n router_url_tag = 'routerURL'\n router_name_tag = 'router'\n\n if router_name_tag in context.properties:\n router_url = context.properties.get(router_url_tag, generate_router_uri(\n context.env['project'],\n context.properties['region'],\n context.properties[router_name_tag]))\n # Create dynamic routing VPN\n resources.extend([\n {\n # The VPN tunnel resource.\n 'name': vpn_tunnel,\n # https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels\n 'type': 'gcp-types/compute-v1:vpnTunnels',\n 'properties':\n {\n 'name': properties.get('name', vpn_tunnel),\n 'project': project_id,\n 'description':\n 'A vpn tunnel',\n 'ikeVersion':\n 2,\n 'peerIp':\n context.properties['peerAddress'],\n 'region':\n context.properties['region'],\n 'router': router_url,\n 'sharedSecret':\n context.properties['sharedSecret'],\n 'targetVpnGateway':\n '$(ref.' + target_vpn_gateway + '.selfLink)'\n },\n 'metadata': {\n 'dependsOn': [esp_rule,\n udp_500_rule,\n udp_4500_rule]\n }\n }])\n else:\n # Create static routing VPN\n resources.append(\n {\n # The VPN tunnel resource.\n 'name': vpn_tunnel,\n 'type': 'gcp-types/compute-v1:vpnTunnels',\n 'properties': {\n 'name': vpn_tunnel,\n 'description':\n 'A vpn tunnel',\n 'ikeVersion':\n 2,\n 'peerIp':\n context.properties['peerAddress'],\n 'region':\n context.properties['region'],\n 'sharedSecret':\n context.properties['sharedSecret'],\n 'targetVpnGateway':\n '$(ref.' + target_vpn_gateway + '.selfLink)',\n 'localTrafficSelector':\n context.properties['localTrafficSelector'],\n 'remoteTrafficSelector':\n context.properties['remoteTrafficSelector'],\n\n },\n 'metadata': {\n 'dependsOn': [esp_rule, udp_500_rule, udp_4500_rule]\n }\n },\n )\n\n return {\n 'resources':\n resources,\n 'outputs':\n [\n {\n 'name': 'targetVpnGateway',\n 'value': target_vpn_gateway\n },\n {\n 'name': 'staticIp',\n 'value': static_ip\n },\n {\n 'name': 'espRule',\n 'value': esp_rule\n },\n {\n 'name': 'udp500Rule',\n 'value': udp_500_rule\n },\n {\n 'name': 'udp4500Rule',\n 'value': udp_4500_rule\n },\n {\n 'name': 'vpnTunnel',\n 'value': vpn_tunnel\n },\n {\n 'name': 'vpnTunnelUri',\n 'value': '$(ref.'+vpn_tunnel+'.selfLink)'\n }\n ]\n }", "def setup_net(self):\n pass", "def _ensure_net_and_subnets(self, port):\n\n # Gather the subnet IDs that we need for this port, and get the\n # NetModel if we already have it in the cache.\n needed_subnet_ids = set()\n net = None\n for fixed_ip in port['fixed_ips']:\n subnet_id = fixed_ip.get('subnet_id')\n if subnet_id:\n needed_subnet_ids.add(subnet_id)\n if not net:\n net = self.agent.cache.get_network_by_subnet_id(subnet_id)\n LOG.debug(\"Needed subnet IDs: %s\", needed_subnet_ids)\n LOG.debug(\"Existing network model by subnet ID: %s\", net)\n\n # For each subnet that we need, get its data from SubnetWatcher and\n # hold for adding into the cache.\n new_subnets = {}\n for subnet_id in needed_subnet_ids:\n # Get data for this subnet from the SubnetWatchers.\n subnet = (self.subnet_watcher.get_subnet(subnet_id) or\n self.v1_subnet_watcher.get_subnet(subnet_id))\n if subnet is None:\n LOG.warning(\"No data for subnet %s\", subnet_id)\n raise SubnetIDNotFound()\n new_subnets[subnet_id] = subnet\n\n if not net:\n # We don't already have a NetModel, so look for a cached NetModel\n # with the right network ID. (In this case we must have new\n # subnets to add into the cache, and the cached NetModel must have\n # subnets other than the ones that we're adding in this iteration;\n # otherwise we would have already found it when searching by\n # subnet_id above.)\n assert new_subnets\n network_id = list(new_subnets.values())[0]['network_id']\n net = self.agent.cache.get_network_by_id(network_id)\n LOG.debug(\"Existing network model by network ID: %s\", net)\n\n if not net:\n # We still have no NetModel for the relevant network ID, so create\n # a new one. In this case we _must_ be adding new subnets.\n assert new_subnets\n net = empty_network(network_id)\n LOG.debug(\"New network %s\", net)\n elif new_subnets:\n # We have a NetModel that was already in the cache and are about to\n # modify it. Cache replacement only works if the new NetModel is a\n # distinct object from the existing one, so make a copy here.\n net = copy_network(net)\n LOG.debug(\"Copied network %s\", net)\n\n if new_subnets:\n # Add the new subnets into the NetModel.\n assert net\n net.subnets = [s for s in net.subnets\n if s.id not in new_subnets]\n net.subnets += list(new_subnets.values())\n\n # Add (or update) the NetModel in the cache.\n LOG.debug(\"Net: %s\", net)\n _fix_network_cache_port_lookup(self.agent, net.id)\n self.agent.cache.put(net)\n\n return net.id", "def run(self):\n tenant_id = self.context[\"tenant\"][\"id\"]\n users = self.context[\"tenants\"][tenant_id][\"users\"]\n number = users.index(self.context[\"user\"])\n for network in self.context[\"tenants\"][tenant_id][\"networks\"]:\n # delete one of subnets based on the user sequential number\n subnet_id = network[\"subnets\"][number]\n self.neutron.delete_subnet(subnet_id)" ]
[ "0.6704986", "0.66477597", "0.6631073", "0.64082897", "0.6147564", "0.6098752", "0.5986638", "0.59644693", "0.58866364", "0.5883728", "0.5876759", "0.582784", "0.5800922", "0.5764724", "0.5711894", "0.5677105", "0.5655168", "0.55968636", "0.55950785", "0.5588146", "0.55799073", "0.5540796", "0.5503078", "0.54916376", "0.5473418", "0.54597425", "0.54298925", "0.54145104", "0.54134005", "0.54090333" ]
0.7728604
0
Generate all the router resources
def gen_router_resources(self): print "\t* Adding router resources to compute template" from nova import version year = version.version_string() for idx, router in enumerate(self.tenant_routers): router_ports = [] for port in self.all_ports: if router["id"] == port["device_id"]: router_ports.append(port) # add the router definition if "2013" in year: # Havana Format data = {"type": "OS::Neutron::Router"} self.compute_data["resources"]["router%s" % str(idx)] = data # routers without external gateway if router["external_gateway_info"] is not None: name = {"get_resource": "router%s" % str(idx)} netid = {"get_param": "public_net_%s" % str(idx)} # add the router gateway data = {"type": "OS::Neutron::RouterGateway", "properties": { "router_id": name, "network_id": netid }} self.compute_data["resources"]["router_gateway%s" % str(idx)] = data else: # Icehouse Format rtrName = router["name"] # routers without external gateway if router["external_gateway_info"] is not None: data = {"type": "OS::Neutron::Router", "properties": { "name": rtrName, "external_gateway_info": { "network": { "get_param": "public_net_%s" % str(idx) } } }} else: data = {"type": "OS::Neutron::Router", "properties": { "name": rtrName } } self.compute_data["resources"]["router%s" % str(idx)] = data # internal port information needed internal_interfaces = filter(lambda port: port["device_owner"] == "network:router_interface", router_ports) for idxs, interface in enumerate(internal_interfaces): # add the router interface for fixedip in interface["fixed_ips"]: # create router interface data = {"type": "OS::Neutron::RouterInterface", "properties": { "router_id": {"get_resource": "router%s" % str(idx)}, "port_id": {"get_resource": "port_%s_%s" % (str(idx), str(idxs))} }} self.compute_data["resources"]["router_interface%s_%s" % (str(idx), str(idxs))] = data # create router port network = self.neutronclient.show_subnet(fixedip["subnet_id"])["subnet"]["network_id"] net_name = "%s" % str(self.neutronclient.show_network(network)["network"]["name"]) net_id = self.neutronclient.show_network(network)["network"]["id"] fixed_ips = [{"ip_address": fixedip["ip_address"]}] net = self.neutronclient.show_network(network)["network"] if net["shared"] is True: data = {"type": "OS::Neutron::Port", "properties": { "fixed_ips": fixed_ips, "network_id": net_id }} else: data = {"type": "OS::Neutron::Port", "properties": { "fixed_ips": fixed_ips, "network_id": {"get_resource": net_name} }} self.compute_data["resources"]["port_%s_%s" % (str(idx), str(idxs))] = data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_resources(self):\n\n print \"\\t* Adding resources to compute template\"\n\n # add all the nets and subnets\n self.gen_net_resources()\n\n # add all routers\n self.gen_router_resources()\n\n # add all servers/intances\n self.gen_server_resources()", "def resources(self):", "def get_routers(self):", "def resources(self) -> Generator[Tuple[str, str, Dict[str, Any]], None, None]:\n for path, methods in self.data[\"paths\"].items():\n for method in methods:\n if method.lower() not in HTTP_VERBS:\n continue\n\n yield method, path, methods", "def __create_routes__(self):\n self.app.add_url_rule('/', 'main_page', self.main_page)\n self.app.add_url_rule('/day', 'day', self.get_current_iteration, methods=['GET'])\n self.app.add_url_rule('/traders', 'traders', self.register_trader, methods=['POST'])\n self.app.add_url_rule('/traders/<id>', 'trader', self.get_trader_state, methods=['GET'])\n self.app.add_url_rule('/stock/price', 'price', self.get_stock_price, methods=['GET'])\n self.app.add_url_rule('/stock/history', 'history', self.get_stock_price_history, methods=['GET'])\n self.app.add_url_rule('/stock/buy', 'buy', self.place_buy_order, methods=['POST'])\n self.app.add_url_rule('/stock/sell', 'sell', self.place_sell_order, methods=['POST'])\n self.app.add_url_rule('/simulation/step', 'step', self.market_session_step, methods=['POST'])\n self.app.add_url_rule('/simulation/run', 'run', self.run_simulation, methods=['POST'])", "def _get_resources():\n return {\n 'searchPageUrl': flask.url_for('search_page'),\n 'searchJsonUrl': flask.url_for('search'),\n 'userAddIconUrl': flask.url_for('static', filename='img/add-users.svg'),\n 'logoutUrl': flask.url_for('logout'),\n 'settingsUrl': flask.url_for('setup') + '#settingsDisplayTemplate',\n 'listAdminUrl': flask.url_for('admin_list'),\n 'addAdminUrl': flask.url_for('add_admin'),\n 'changeAdminPasswordUrl': flask.url_for('change_admin_password'),\n 'removeAdminUrl': flask.url_for('delete_admin'),\n 'loginUrl': flask.url_for('login'),\n 'recaptchaKey': ufo.app.config.get('RECAPTCHA_SITE_KEY', ''),\n 'setupUrl': flask.url_for('setup'),\n 'setupAdminUrl': flask.url_for('setup_admin'),\n 'setupOauthUrl': flask.url_for('setup_oauth'),\n 'download_chrome_policy': flask.url_for('download_chrome_policy'),\n 'policy_filename': 'chrome_policy.json',\n 'proxyServerAddUrl': flask.url_for('proxyserver_add'),\n 'proxyServerAddIconUrl': flask.url_for('static',\n filename='img/add-servers.svg'),\n 'proxyServerInverseAddIconUrl': flask.url_for(\n 'static', filename='img/add-servers-inverse.svg'),\n 'proxyServerListId': 'proxyList',\n 'proxyServerListUrl': flask.url_for('proxyserver_list'),\n 'listLimit': 10,\n 'proxyServerDetailsButtonId': 'serverDetailsButton',\n 'editButtonId': 'serverEditButton',\n 'proxyServerDetailsOverlayId': 'serverDetailsOverlay',\n 'proxyServerEditUrl': flask.url_for('proxyserver_edit'),\n 'proxyServerDeleteUrl': flask.url_for('proxyserver_delete'),\n 'proxyServerIconUrl': flask.url_for('static', filename='img/server.svg'),\n 'proxyServerAddButtonId': 'addServerButton',\n 'proxyServerModalId': 'serverModal',\n 'textAreaMaxRows': 10,\n 'ipInput': 'ipInput',\n 'nameInput': 'nameInput',\n 'sshPrivateKeyInput': 'sshPrivateKeyInput',\n 'hostPublicKeyInput': 'hostPublicKeyInput',\n 'getSettingsUrl': flask.url_for('get_settings'),\n 'settingsEditUrl': flask.url_for('edit_settings'),\n 'userAddUrl': flask.url_for('add_user'),\n 'userInverseAddIconUrl': flask.url_for(\n 'static', filename='img/add-users-inverse.svg'),\n 'userListId': 'userList',\n 'userListUrl': flask.url_for('user_list'),\n 'revokeToggleUrl': flask.url_for('user_toggle_revoked'),\n 'rotateKeysUrl': flask.url_for('user_get_new_key_pair'),\n 'inviteCodeUrl': flask.url_for('user_get_invite_code'),\n 'userDeleteUrl': flask.url_for('delete_user'),\n 'userDetailsButtonId': 'userDetailsButton',\n 'userDetailsOverlayId': 'userDetailsOverlay',\n 'userIconUrl': flask.url_for('static', filename='img/user.svg'),\n 'userAddButtonId': 'addUserButton',\n 'userModalId': 'userModal',\n 'groupAddTabId': 'groupAddTab',\n 'groupAddFormId': 'groupAdd',\n 'groupAddInputName': 'group_key',\n 'userAddTabId': 'userAddTab',\n 'userAddFormId': 'userAdd',\n 'userAddInputName': 'user_key',\n 'domainAddTabId': 'domainAddTab',\n 'domainAddFormId': 'domainAdd',\n 'manualAddTabId': 'manualAddTab',\n 'manualAddFormId': 'manualAdd',\n 'regexes': regex.REGEXES_AND_ERRORS_DICTIONARY,\n 'jsonPrefix': ufo.XSSI_PREFIX,\n 'maxFailedLoginsBeforeRecaptcha': ufo.MAX_FAILED_LOGINS_BEFORE_RECAPTCHA,\n 'userAddListFlipperId': 'userAddListFlipper',\n 'proxyServerAddListFlipperId': 'proxyServerAddListFlipper',\n 'userAddTabsId': 'userAddTabs',\n 'proxyServerAddFormId': 'serverAddFormHolder',\n }", "def getRoutes(self):\n pass", "def initialize_routes(app):\n # Authentification \n app.add_resource(auth.LoginApi, '/auth/login')\n app.add_resource(auth.SignupApi, '/auth/SignupApi')\n # Intialisation et activation d'un parking\n app.add_resource(parkingInit.InitilizeAParking, '/administrate/add')\n app.add_resource(parkingInit.ActivateParking, '/administrate/activate')\n app.add_resource(parkingInit.InitilizeAParking, '/administrate/getall', endpoint='getall')\n # Gestion de Clients\n app.add_resource(useresResources.GestionUstilisateurs, '/administrate/usesrs/get')\n app.add_resource(useresResources.GestionUstilisateurs, '/administrate/usesrs/getById/<int:idUser>', endpoint='get_by_id')\n # statistiques financéres\n app.add_resource(stats.Money, '/administrate/finance/monthly', endpoint='monthly')\n app.add_resource(stats.Money, '/administrate/finance/yearly', endpoint='yearly')", "def add_routes(self):\n pass", "def initialize_routes(api):\n api.add_resource(WatchlistsApi, '/api/watchlists')\n api.add_resource(WatchlistApi, '/api/watchlist/<id>')\n api.add_resource(RegisterUserApi, '/api/auth/register')\n api.add_resource(LoginUserApi, '/api/auth/login')\n api.add_resource(ResetPassword, '/api/auth/reset')\n api.add_resource(ResetFogottenPassword, '/api/auth/reset/password')\n api.add_resource(ForgotPassword, '/api/auth/forgot')\n api.add_resource(ForgotPasswordReset, '/reset/password/<token>')\n api.add_resource(Home, '/')\n api.add_resource(Logout, '/logout')\n api.add_resource(Dashboard, '/dashboard')\n api.add_resource(DashboardSearch, '/dashboard/search')\n api.add_resource(SearchMovies, '/search/movies/<title>')\n api.add_resource(SearchMovieDetails, '/search/movie/details/<id>')\n api.add_resource(SearchTvShows, '/search/shows/<title>')\n api.add_resource(SearchShowDetails, '/search/show/details/<id>')\n api.add_resource(SearchTrendingMovies, '/search/trending/movies')\n api.add_resource(Recommend, '/recommend')", "def create_routes(api: Api):\n api.add_resource(SignUpApi, '/user/signup/')\n api.add_resource(LoginApi, '/user/login/')\n\n api.add_resource(UsersApi, '/users/')\n\n api.add_resource(CafeteriasCreationAPI, '/createcafeteria/')\n api.add_resource(CreateItemsAPI, '/createcafeteriaitems/')", "def gen_server_resources(self):\n print \"\\t* Adding server resources to compute template\"\n # add all instances\n servers = self.novaclient.servers.list()\n\n # add all ports\n ports = []\n\n self.set_of_images = set(self.set_of_images)\n\n for server in servers:\n if self.using_snapshots:\n # get template image id\n images = [(idx, x[1]) for idx, x in enumerate(set(self.snapshot_ids)) if x[0] == server.id]\n else:\n # get template image id\n images = [(idx, x) for idx, x in enumerate(self.set_of_images) if x == server.image[\"id\"]]\n\n # continue to next iteration.\n if len(images) == 0:\n continue\n image_num = images[0][0] if images[0][0] > 0 else \"\"\n image_ = \"image%s\" % image_num\n\n # get template flavor id\n flavors = [(idx, x) for idx, x in enumerate(self.set_of_flavors) if x.id == server.flavor[\"id\"]]\n flavor_num = flavors[0][0] if flavors[0][0] > 0 else \"\"\n flavor_ = \"flavor%s\" % flavor_num\n\n # get template keys\n keys = [(idx, x) for idx, x in enumerate(self.set_of_keys) if x == server.key_name]\n key_num = keys[0][0] if keys[0][0] > 0 else \"\"\n key_ = \"key_name%s\" % key_num\n\n # get template network info\n # novaclient.servers.interface_list(servers[3])[1]._info\n # instead of server.interface_list(server.id)\n # bug : github #1280453\n networks_ = []\n with self.suppress():\n ports = self.novaclient.servers.interface_list(server)\n\n for idx, port in enumerate(ports):\n networks_.append({\n \"port\": {\n \"get_resource\": \"%s_port%s\" % (server.name, idx)}\n })\n\n # add server definition\n data = {\"type\": \"OS::Nova::Server\",\n \"properties\": {\n \"name\": server.name,\n \"image\": {\"get_param\": image_},\n \"flavor\": {\"get_param\": flavor_},\n \"key_name\": {\"get_param\": key_},\n \"networks\": networks_\n }}\n\n # add user_data\n # the following line should be proper syntax according to\n # OpenStack's documentation. However Heat did not seem to like\n # it. So, we are not using the get_file param.\n # Creating stack from command line works, but does not seem to work\n # in horizon\n # see: http://docs.openstack.org/developer/heat/template_guide/hot_spec.html\n # data[\"properties\"][\"user_data\"] = {\"get_file\": user_data}\n\n try:\n case, user_data = self.gen_userdata(server.id)\n except:\n user_data = None\n if user_data is not None:\n if \"case3\" in case:\n data[\"properties\"][\"user_data_format\"] = \"RAW\"\n data[\"properties\"][\"user_data\"] = user_data\n\n self.compute_data[\"resources\"][server.name] = data\n\n # add server port information\n self.gen_port_resources(server, ports)\n\n # add floating ip information\n self.gen_floating_ip_resources(server)", "def resources(self) -> HTMLBody:\n\t\treturn render_template(\"resources.jinja2\")", "def resources(self):\n return [self]", "def create_routes(self):\r\n self._app.route('/api/autoconf',\r\n methods=['GET'],\r\n endpoint='api_autoconf')(self.entrypoint)\r\n self._app.route('/api/autoconf/<string:session_id>',\r\n methods=['GET', 'POST', 'DELETE'],\r\n endpoint='api_autoconf_status')(self.entrypoint)\r\n self._app.route('/api/autoconf/rgc',\r\n methods=['POST', 'DELETE', 'GET', 'PATCH'],\r\n endpoint='api_autoconf_rgc')(self.entrypoint)\r\n self._app.route('/api/autoconf/pd',\r\n methods=['POST', 'DELETE', 'GET', 'PATCH'],\r\n endpoint='api_autoconf_pd')(self.entrypoint)", "def setup_routes():\n root = CherryTarball()\n d = cherrypy.dispatch.RoutesDispatcher()\n d.connect('main', '/', controller=root)\n # This enumerates the tarball and connects each file within to a URL in the dispatcher\n tar = tarfile.open(tarball_path)\n for tarobj in tar.getmembers():\n if tarobj.isdir():\n pass # Skip directories\n else:\n d.connect(tarobj.name, tarobj.name, controller=root, action='static', filepath=tarobj.name)\n dispatcher = d\n return dispatcher", "def build_routes(app):\n app.register_blueprint(workflow_plans_blueprint)\n app.register_blueprint(cache_blueprint)\n app.register_blueprint(config_blueprint)\n app.register_blueprint(dataset_blueprint)\n app.register_blueprint(graph_blueprint)\n app.register_blueprint(jobs_blueprint)\n app.register_blueprint(project_blueprint)\n app.register_blueprint(templates_blueprint)\n app.register_blueprint(version_blueprint)\n app.register_blueprint(apispec_blueprint)\n app.register_blueprint(versions_list_blueprint)", "def resource_map(self):", "def genlist(self):\n out = []\n def responder():\n \"\"\"empty responder object used to find the template name\"\"\"\n pass\n responder.view = static_view_finder\n for path, route in self.routes:\n if route['generate']:\n mako_template = route['function'](responder)+'.mako'\n filename = relpath(self.mylookup.get_template(mako_template).filename)\n out.append((path, filename))\n return out", "def create_router_contexts(self, args):\n try:\n router_context_data = self.config_dict[\"RouterContextData\"]\n except KeyError:\n router_context_data = {}\n\n if \"PrimaryRegex\" in self.config_dict:\n args.primary_regex = self.config_dict[\"PrimaryRegex\"]\n if \"SecondaryRegex\" in self.config_dict:\n args.secondary_regex = self.config_dict[\"SecondaryRegex\"]\n if \"SiteNumberRegex\" in self.config_dict:\n args.site_regex = self.config_dict[\"SiteNumberRegex\"]\n if \"PodNumberRegex\" in self.config_dict:\n args.pod_regex = self.config_dict[\"PodNumberRegex\"]\n if len(self.router_patterns) > 0:\n if self.local_info.get_node_type() == 'conductor':\n self.get_json_assets()\n for asset in self.json_assets:\n try:\n router_name = asset['routerName']\n except KeyError:\n continue\n matched_router = False\n for pattern in self.router_patterns:\n if args.regex_patterns:\n if re.search(pattern, router_name):\n matched_router = True\n break\n else:\n if pattern in router_name:\n matched_router = True\n break\n if matched_router:\n op_string='Add'\n if router_name not in self.router_dict:\n rc = self.router_module.create_instance(\n self.local_info, \n router_name, \n args,\n asset, \n router_context_data\n )\n self.router_dict[router_name] = rc\n if pattern in self.routers_by_pattern:\n self.routers_by_pattern[pattern].append(rc)\n else:\n self.routers_by_pattern[pattern] = [ rc ]\n else:\n op_string='Update'\n self.router_dict[router_name].set_node(asset_json=asset)\n if args.debug or args.context:\n print(f'{op_string} router: {router_name}')\n rc.display()\n else:\n print('\\n*** Only conductors can match router patterns! ***\\n')\n sys.exit(1)\n\n if args.router is not None and \\\n len(self.router_dict) == 0:\n rc = self.router_module.create_instance(\n self.local_info, \n args.router, \n args, \n config=router_context_data\n )\n self.router_dict[rc.get_router()] = rc\n self.router_patterns = [ 'placeholder' ]\n self.routers_by_pattern['placeholder'] = [ rc ]\n if args.debug or args.context:\n print(f'Adding default Router')\n rc.display()", "def urls(self):\r\n urls = []\r\n\r\n for url_name in sorted(self.resources.keys()):\r\n\r\n resource = self.resources[url_name]\r\n urls.append(resource.as_url(\r\n api=self,\r\n name_prefix='-'.join(\r\n (self.prefix, self.str_version)).strip('-'),\r\n url_prefix=self.str_version\r\n ))\r\n\r\n return patterns(self.prefix, *urls)", "def get(self):\n resources = []\n starting_path = self.request.get('start')\n if starting_path:\n resources = Resource.query(Resource.path >= starting_path).order(\n Resource.path).fetch(11)\n else:\n resources = Resource.query().order(Resource.path).fetch(11)\n\n self.response.headers['Content-Type'] = 'text/html'\n\n self.response.write('<!doctype><html><head>' +\n '<title>Content Lister</title></head><body>Resources:<br>')\n for i in xrange(10):\n if i < len(resources):\n # TODO: constructing the path this way makes the resource\n # path a possible vector for XSS.\n self.response.write('%s ' % (resources[i].path,) +\n '<a href=\"/content_manager%s\">' % (\n resources[i].path,) +\n 'Edit</a> <a href=\"%s\">View</a><br>' % (\n resources[i].path,))\n\n if len(resources) > 10:\n self.response.write(\n '<a href=\"/content_lister?start=%s\">Next</a>' % (\n resources[10].path,))\n \n self.response.write('</body></html>')", "def register_resources(self):\n raise NotImplementedError", "def resources(self):\n return self.__resources", "def get_resources(self):\n return []", "def test_create_route_for_all_namespaces(self):\n pass", "def create_routes(name):\n if '/' in name:\n blueprint_name, model_name = name.split('/')\n output_file = 'blueprints/%s/urls.py' % blueprint_name\n else:\n model_name = name\n output_file = 'urls.py'\n file_exists = os.path.exists(output_file)\n routes = create_routes.routes_scaffold % dict(model_name=model_name.lower())\n if file_exists:\n routes = create_routes.append_routes % dict(routes=routes)\n else:\n routes = create_routes.new_routes % dict(routes=routes)\n with open(output_file, 'a') as out_file:\n if not file_exists:\n routes = '''%(imports)s\\n%(rest)s''' % dict(imports=create_routes.imports, rest=routes)\n out_file.write(routes)", "def get_resources():\n return Response(f\"{Resource.get_all_resources()}\", 200, mimetype='text/plain')", "def run(self, api, media_type, no_of_routers):\n\n # header for router API\n header = {\"Content-Type\": media_type, \"X-Auth-Token\": \"%s\" %\n AUTH_TOKEN}\n for _ in range(no_of_routers):\n # generate payload for creating router\n # creating router requires router name\n # router name is generated randomly\n router_name = ''.join(random.choice(string.ascii_lowercase)\n for x in range(10))\n data = {\"name\": router_name, \"tenantId\": \"\"}\n # create router\n self._create_router(\"POST\", api, header, data)", "def _configure_api_routes(self, app: FastAPI):\n authenticator = JWTAuthenticator(self.signer)\n\n data_update_publisher: Optional[DataUpdatePublisher] = None\n if self.publisher is not None:\n data_update_publisher = DataUpdatePublisher(self.publisher)\n\n # Init api routers with required dependencies\n data_updates_router = init_data_updates_router(\n data_update_publisher,\n self.data_sources_config,\n authenticator\n )\n webhook_router = init_git_webhook_router(self.pubsub.endpoint, authenticator)\n security_router = init_security_router(self.signer, StaticBearerAuthenticator(self.master_token))\n\n # mount the api routes on the app object\n app.include_router(bundles_router, tags=[\"Bundle Server\"], dependencies=[Depends(authenticator)])\n app.include_router(data_updates_router, tags=[\"Data Updates\"], dependencies=[Depends(authenticator)])\n app.include_router(webhook_router, tags=[\"Github Webhook\"])\n app.include_router(security_router, tags=[\"Security\"])\n app.include_router(self.pubsub.router, tags=[\"Pub/Sub\"])\n\n if self.jwks_endpoint is not None:\n # mount jwts (static) route\n self.jwks_endpoint.configure_app(app)\n\n # top level routes (i.e: healthchecks)\n @app.get(\"/healthcheck\", include_in_schema=False)\n @app.get(\"/\", include_in_schema=False)\n def healthcheck():\n return {\"status\": \"ok\"}\n\n return app" ]
[ "0.73711663", "0.70035046", "0.6320128", "0.6223917", "0.6177516", "0.6145895", "0.61361414", "0.60855603", "0.6077239", "0.606435", "0.60615534", "0.6048417", "0.6031009", "0.6005149", "0.59731054", "0.5948869", "0.5940405", "0.5921544", "0.5920081", "0.590232", "0.5882855", "0.5870052", "0.585126", "0.58215773", "0.5820865", "0.5762186", "0.574635", "0.57446796", "0.57371545", "0.5668872" ]
0.7725163
0
Generate all the port interface resources
def gen_port_resources(self, server, ports): if (self.SuppressServerStatuses is False): print "\t* Adding all the port interface resources" data = {} port_idx = "0" for idx, port in enumerate(ports): # get fixedips fixed_ip = port._info["fixed_ips"] fixed_ip_address = fixed_ip[0]["ip_address"] # filter all_nets by subnet_id net_data = [] fip = None for x in self.all_nets: for fip in fixed_ip: if x[0]["id"] in fip["subnet_id"]: net_data.append(x) if len(net_data) > 0: net = net_data[0][1] subnet = net_data[0][2] networkID = [netw['id'] for netw in self.neutronclient.list_networks()['networks'] if netw['name'] == net][0] networkIsShared = self.neutronclient.show_network(networkID)['network']['shared'] if networkIsShared is True: port_properties_ = { "network_id": networkID, "fixed_ips": [ {"subnet_id": fip["subnet_id"]} ] } else: port_properties_ = { "network_id": {"get_resource": net}, "fixed_ips": [ {"subnet_id": {"get_resource": subnet}} ] } if self.staticips: fixed_ips = [] for address in server.addresses: server_ip_address = server.addresses[address][0]['addr'] if server_ip_address == fixed_ip_address: fixed_ips.append({"ip_address": server_ip_address}) port_properties_ = { "network_id": {"get_resource": net}, "fixed_ips": fixed_ips } data = {"type": "OS::Neutron::Port","properties": port_properties_} else: print "!!Probable error grabbing port information for server %s!!" % (server.name) data = {"type": "OS::Neutron::Port"} self.compute_data["resources"]["%s_port%s" % (server.name, port_idx)] = data if len(ports) >= 1: port_idx = str(1 + idx)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_resources(self):\n\n print \"\\t* Adding resources to compute template\"\n\n # add all the nets and subnets\n self.gen_net_resources()\n\n # add all routers\n self.gen_router_resources()\n\n # add all servers/intances\n self.gen_server_resources()", "def gen_server_resources(self):\n print \"\\t* Adding server resources to compute template\"\n # add all instances\n servers = self.novaclient.servers.list()\n\n # add all ports\n ports = []\n\n self.set_of_images = set(self.set_of_images)\n\n for server in servers:\n if self.using_snapshots:\n # get template image id\n images = [(idx, x[1]) for idx, x in enumerate(set(self.snapshot_ids)) if x[0] == server.id]\n else:\n # get template image id\n images = [(idx, x) for idx, x in enumerate(self.set_of_images) if x == server.image[\"id\"]]\n\n # continue to next iteration.\n if len(images) == 0:\n continue\n image_num = images[0][0] if images[0][0] > 0 else \"\"\n image_ = \"image%s\" % image_num\n\n # get template flavor id\n flavors = [(idx, x) for idx, x in enumerate(self.set_of_flavors) if x.id == server.flavor[\"id\"]]\n flavor_num = flavors[0][0] if flavors[0][0] > 0 else \"\"\n flavor_ = \"flavor%s\" % flavor_num\n\n # get template keys\n keys = [(idx, x) for idx, x in enumerate(self.set_of_keys) if x == server.key_name]\n key_num = keys[0][0] if keys[0][0] > 0 else \"\"\n key_ = \"key_name%s\" % key_num\n\n # get template network info\n # novaclient.servers.interface_list(servers[3])[1]._info\n # instead of server.interface_list(server.id)\n # bug : github #1280453\n networks_ = []\n with self.suppress():\n ports = self.novaclient.servers.interface_list(server)\n\n for idx, port in enumerate(ports):\n networks_.append({\n \"port\": {\n \"get_resource\": \"%s_port%s\" % (server.name, idx)}\n })\n\n # add server definition\n data = {\"type\": \"OS::Nova::Server\",\n \"properties\": {\n \"name\": server.name,\n \"image\": {\"get_param\": image_},\n \"flavor\": {\"get_param\": flavor_},\n \"key_name\": {\"get_param\": key_},\n \"networks\": networks_\n }}\n\n # add user_data\n # the following line should be proper syntax according to\n # OpenStack's documentation. However Heat did not seem to like\n # it. So, we are not using the get_file param.\n # Creating stack from command line works, but does not seem to work\n # in horizon\n # see: http://docs.openstack.org/developer/heat/template_guide/hot_spec.html\n # data[\"properties\"][\"user_data\"] = {\"get_file\": user_data}\n\n try:\n case, user_data = self.gen_userdata(server.id)\n except:\n user_data = None\n if user_data is not None:\n if \"case3\" in case:\n data[\"properties\"][\"user_data_format\"] = \"RAW\"\n data[\"properties\"][\"user_data\"] = user_data\n\n self.compute_data[\"resources\"][server.name] = data\n\n # add server port information\n self.gen_port_resources(server, ports)\n\n # add floating ip information\n self.gen_floating_ip_resources(server)", "def gen_router_resources(self):\n\n print \"\\t* Adding router resources to compute template\"\n\n from nova import version\n year = version.version_string()\n\n for idx, router in enumerate(self.tenant_routers):\n router_ports = []\n for port in self.all_ports:\n if router[\"id\"] == port[\"device_id\"]:\n router_ports.append(port)\n\n # add the router definition\n if \"2013\" in year:\n # Havana Format\n data = {\"type\": \"OS::Neutron::Router\"}\n self.compute_data[\"resources\"][\"router%s\" % str(idx)] = data\n\n # routers without external gateway\n if router[\"external_gateway_info\"] is not None:\n\n name = {\"get_resource\": \"router%s\" % str(idx)}\n netid = {\"get_param\": \"public_net_%s\" % str(idx)}\n\n # add the router gateway\n data = {\"type\": \"OS::Neutron::RouterGateway\",\n \"properties\": {\n \"router_id\": name,\n \"network_id\": netid\n }}\n\n self.compute_data[\"resources\"][\"router_gateway%s\" % str(idx)] = data\n\n else:\n # Icehouse Format\n rtrName = router[\"name\"]\n # routers without external gateway\n if router[\"external_gateway_info\"] is not None:\n data = {\"type\": \"OS::Neutron::Router\",\n \"properties\": {\n \"name\": rtrName,\n \"external_gateway_info\": {\n \"network\": {\n \"get_param\": \"public_net_%s\" % str(idx)\n }\n }\n }}\n else:\n data = {\"type\": \"OS::Neutron::Router\",\n \"properties\": {\n \"name\": rtrName\n }\n }\n self.compute_data[\"resources\"][\"router%s\" % str(idx)] = data\n\n # internal port information needed\n internal_interfaces = filter(lambda port: port[\"device_owner\"] == \"network:router_interface\", router_ports)\n\n for idxs, interface in enumerate(internal_interfaces):\n # add the router interface\n\n for fixedip in interface[\"fixed_ips\"]:\n\n # create router interface\n data = {\"type\": \"OS::Neutron::RouterInterface\",\n \"properties\": {\n \"router_id\": {\"get_resource\": \"router%s\" % str(idx)},\n \"port_id\": {\"get_resource\": \"port_%s_%s\" % (str(idx), str(idxs))}\n }}\n self.compute_data[\"resources\"][\"router_interface%s_%s\" % (str(idx), str(idxs))] = data\n\n # create router port\n network = self.neutronclient.show_subnet(fixedip[\"subnet_id\"])[\"subnet\"][\"network_id\"]\n net_name = \"%s\" % str(self.neutronclient.show_network(network)[\"network\"][\"name\"])\n net_id = self.neutronclient.show_network(network)[\"network\"][\"id\"]\n\n fixed_ips = [{\"ip_address\": fixedip[\"ip_address\"]}]\n net = self.neutronclient.show_network(network)[\"network\"]\n if net[\"shared\"] is True:\n data = {\"type\": \"OS::Neutron::Port\",\n \"properties\": {\n \"fixed_ips\": fixed_ips,\n \"network_id\": net_id\n }}\n else:\n data = {\"type\": \"OS::Neutron::Port\",\n \"properties\": {\n \"fixed_ips\": fixed_ips,\n \"network_id\": {\"get_resource\": net_name}\n }}\n self.compute_data[\"resources\"][\"port_%s_%s\" % (str(idx), str(idxs))] = data", "def _create_vports(self):\n vports = self._api.select_vports()\n imports = []\n for port in self._api.config.ports:\n if port.name not in vports.keys():\n index = len(vports) + len(imports) + 1\n imports.append({\n 'xpath': '/vport[%i]' % index,\n 'name': port.name,\n 'rxMode': 'captureAndMeasure',\n 'txMode': 'interleaved'\n })\n self._import(imports)\n for name, vport in self._api.select_vports().items():\n self._api.ixn_objects[name] = vport['href']", "def resources(self):", "def register_resources(self):\n raise NotImplementedError", "def port_factory_method(self):\n pass", "def interfaces(self):", "def interfaces(self):", "def port_nic():", "def target_interfaces(self):", "def target_interfaces(self):", "def source_interfaces(self):", "def source_interfaces(self):", "def gen_net_resources(self):\n\n print \"\\t* Adding net and subnet resources to compute template\"\n\n networks = self.neutronclient.list_networks()[\"networks\"]\n\n # filter all networks that match\n filtered_networks = [net for net in networks if (net[\"tenant_id\"] == self.tenant_id or\n (net[\"shared\"] == True) and net['router:external'] == False)]\n\n # obtain subnet information\n for network in filtered_networks:\n if network[\"shared\"] is not True:\n\n for subnet in network[\"subnets\"]:\n subnet_info = self.neutronclient.show_subnet(subnet)[\"subnet\"]\n\n # save this information for router interfaces\n self.all_nets.append((subnet_info, \"%s\" % network[\"name\"], \"%s\" % subnet_info[\"name\"]))\n\n # generate private net\n data = {\"type\": \"OS::Neutron::Net\",\n \"properties\":\n {\"name\":\n {\"get_param\": \"%s_%s_name\" % (network[\"name\"], \"net\")}\n }\n }\n\n start_ = {\"get_param\": \"%s_%s_pool_start\" % (network[\"name\"], subnet_info[\"name\"])}\n\n data2 = {\"type\": \"OS::Neutron::Subnet\",\n \"properties\": {\n \"name\": subnet_info[\"name\"],\n \"network_id\": {\"get_resource\": \"%s\" % network[\"name\"]},\n \"cidr\": {\"get_param\": \"%s_%s_cidr\" % (network[\"name\"], subnet_info[\"name\"])},\n \"gateway_ip\": {\"get_param\": \"%s_%s_gateway\" % (network[\"name\"], subnet_info[\"name\"])},\n \"allocation_pools\": [\n {\"start\": start_, \"end\": {\"get_param\": \"%s_%s_pool_end\" % (network[\"name\"], subnet_info[\"name\"])}}\n ]\n }\n }\n self.compute_data[\"resources\"][\"%s\" % network[\"name\"]] = data\n self.compute_data[\"resources\"][\"%s\" % subnet_info[\"name\"]] = data2\n else:\n # add shared network to the full list of networks\n for subnet in network[\"subnets\"]:\n subnet_info = self.neutronclient.show_subnet(subnet)[\"subnet\"]\n self.all_nets.append((subnet_info, \"%s\" % network[\"name\"], \"%s\" % subnet_info[\"name\"]))", "def add_router_interfaces(self):\n for subnet_name in self.router_data['properties']['networks'].keys():\n #print(subnet_name)\n interface = OrderedDict({\n str(self.router_name + '_interface_' + subnet_name): {\n 'type': 'OS::Neutron::RouterInterface',\n 'properties': {\n 'router_id': { 'get_resource': self.router_name },\n 'subnet_id': { 'get_resource': str(self.router_name + '_' + subnet_name) }\n } \n }\n })\n self.template['resources'].update(interface)", "def rr1_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_rr1_12:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_rr1_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_rr1_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')\n for device in ci_addrs.switches_rr2_12:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_rr1_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_rr2_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr') \n assign_ports_n5k34()", "def rr2_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_rr1_12:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_rr2_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_rr1_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')\n for device in ci_addrs.switches_rr2_12:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_rr2_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_rr2_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')\n assign_ports_n5k34()", "def get_output_hot(self):\n hot_cont = hot.HOT()\n prop = dict()\n\n # MARK: CAN be better... relative straight forward\n if self.sep_access_port:\n port_suffix = ('pt', 'pt_in', 'pt_out')\n else:\n port_suffix = ('pt_in', 'pt_out')\n for srv_grp in self.srv_grp_lst:\n for srv in srv_grp:\n networks = list()\n # Remote access, ingress and egress ports\n for suffix in port_suffix:\n port_name = '_'.join((srv['name'], suffix))\n prop = {\n 'name': port_name,\n 'network_id': self.network_id['net'],\n # A list of subnet IDs\n 'fixed_ips': [{'subnet_id': self.network_id['subnet']}],\n # TODO: Add support for security group\n # 'security_groups': [self.network_id['sec_grp']]\n }\n networks.append(\n {'port': '{ get_resource: %s }' % port_name})\n hot_cont.resource_lst.append(\n hot.Resource(port_name, 'port', prop))\n\n if self.fip_port:\n prop = {\n 'floating_network': self.network_id['public'],\n }\n if self.fip_port == 'pt':\n prop['port_id'] = '{ get_resource: %s }' % (\n srv['name'] + '_pt')\n elif self.fip_port == 'pt_in':\n prop['port_id'] = '{ get_resource: %s }' % (\n srv['name'] + '_pt_in')\n elif self.fip_port == 'pt_out':\n prop['port_id'] = '{ get_resource: %s }' % (\n srv['name'] + '_pt_out')\n else:\n raise ServerChainError('Invalid floating IP port!')\n\n hot_cont.resource_lst.append(\n hot.Resource(srv['name'] + '_fip', 'fip', prop))\n\n prop = {\n 'name': srv['name'],\n 'image': srv['image'],\n 'flavor': srv['flavor'],\n 'networks': networks\n }\n\n if srv.get('ssh', None):\n prop['key_name'] = srv['ssh']['pub_key_name']\n\n # MARK: Only test RAW bash script\n if srv.get('init_script', None):\n logger.debug('Read the init bash script: %s'\n % srv['init_script'])\n with open(srv['init_script'], 'r') as f:\n # MARK: | is needed after user_data\n prop.update(\n {'user_data': '|\\n' + f.read()}\n )\n\n hot_cont.resource_lst.append(\n hot.Resource(srv['name'], 'server', prop))\n\n return hot_cont.output_yaml_str()", "def config(self):\n self._resource_manager = self._api._ixnetwork.ResourceManager\n self._ixn_vport = self._api._vport\n self._delete_vports()\n self._create_vports()\n self._create_capture()\n self._set_location()\n self._set_layer1()", "def kk_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_kk_all:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_kk_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_kk_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_kk_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')", "def get_all_port(self, conf, dpid):\n\t\tpass", "def port_maker(self, platform):\n raise NotImplementedError()", "def main():\n\n # endpdoint = \"restconf/data/ietf-interfaces:interfaces\"\n # endpoint = f\"restconf/data/ietf-interfaces:interfaces/interface={name}\"\n\n if len(argv) > 1:\n try:\n inventory = load_inventory(argv[1])\n except FileExistsError as err:\n print(\"FileExistsError: \", err)\n else:\n print(\"You must provide a path to your inventory file.\")\n sys.exit()\n\n r1 = inventory['dev-r1']\n loop = [interface for interface in r1[\"interface\"] if interface[\"name\"] == \"Loopback0\"][0]\n\n payload = render_payload(\n loop,\n \"interface.j2\"\n )\n\n session = create_session(r1[\"username\"], r1[\"password\"])\n endpoint = f\"restconf/data/ietf-interfaces:interfaces/interface=Loopback0\"\n results = put_request(r1[\"host\"],session, endpoint, payload)\n print(results)\n\n save_endpoint = \"restconf/operations/cisco-ia:save-config/\"\n saved = save_config(r1[\"host\"], session, save_endpoint)\n\n # target_routers = [\"dev-r1\"]\n\n # for host_key, attribs in inventory.items():\n\n # if host_key in target_routers:\n # print(f\"configuring interfaces on {host_key}\")\n\n # # create a session imported from restconf_api\n # session = create_session(attribs)\n\n # # get all interfaces\n # results = get_interface(attribs, session, \"Loopback0\")\n\n # interface = results[\"ietf-interfaces:interface\"]\n\n # print(json.dumps(interface))\n # # convert to yaml\n # # yaml_output = yaml.safe_dump(results)\n # # with open(\"vars/interfaces.yml\", \"w\") as file:\n # # file.write(yaml_output)\n\n # # results = update_interfaces(attribs, session)\n # # print(results.text, results.status_code)\n\n # # print(get_interfaces(attribs, session))", "def create_and_bridge_virtual_interfaces(self):\n\n for i in self._nodes.items():\n node = i[1]\n\n # Show the current bridge and interface configuration\n print(\"\\nThis the current bridge configuration:\")\n VPPUtil.show_bridge(node)\n question = \"\\nWould you like to keep this configuration [Y/n]? \"\n answer = self._ask_user_yn(question, \"y\")\n if answer == \"y\":\n continue\n\n # Create a script that builds a bridge configuration with\n # physical interfaces and virtual interfaces\n ints_with_vints = self._create_vints_questions(node)\n content = \"\"\n for intf in ints_with_vints:\n vhoststr = \"\\n\".join(\n [\n \"comment { The following command creates the socket }\",\n \"comment { and returns a virtual interface }\",\n \"comment {{ create vhost-user socket \"\n \"/var/run/vpp/sock{}.sock server }}\\n\".format(intf[\"bridge\"]),\n ]\n )\n\n setintdnstr = \"set interface state {} down\\n\".format(intf[\"name\"])\n\n setintbrstr = \"set interface l2 bridge {} {}\\n\".format(\n intf[\"name\"], intf[\"bridge\"]\n )\n setvintbrstr = \"set interface l2 bridge {} {}\\n\".format(\n intf[\"virtualinterface\"], intf[\"bridge\"]\n )\n\n # set interface state VirtualEthernet/0/0/0 up\n setintvststr = \"set interface state {} up\\n\".format(\n intf[\"virtualinterface\"]\n )\n\n # set interface state VirtualEthernet/0/0/0 down\n setintupstr = \"set interface state {} up\\n\".format(intf[\"name\"])\n\n content += (\n vhoststr\n + setintdnstr\n + setintbrstr\n + setvintbrstr\n + setintvststr\n + setintupstr\n )\n\n # Write the content to the script\n rootdir = node[\"rootdir\"]\n filename = rootdir + \"/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp\"\n with open(filename, \"w+\") as sfile:\n sfile.write(content)\n\n # Execute the script\n cmd = \"vppctl exec {}\".format(filename)\n (ret, stdout, stderr) = VPPUtil.exec_command(cmd)\n if ret != 0:\n logging.debug(stderr)\n\n print(\"\\nA script as been created at {}\".format(filename))\n print(\"This script can be run using the following:\")\n print(\"vppctl exec {}\\n\".format(filename))", "def hh_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_hh1:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_hh1_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_hh1_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_hh_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')\n\n for device in ci_addrs.switches_hh2:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_hh2_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_hh2_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_hh_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')", "def generate_config(context):\n\n\n properties = context.properties\n project_id = properties.get('project', context.env['project'])\n\n network = context.properties.get('networkURL', generate_network_uri(\n project_id,\n context.properties.get('network','')\n ))\n target_vpn_gateway = context.env['name'] + '-tvpng'\n esp_rule = context.env['name'] + '-esp-rule'\n udp_500_rule = context.env['name'] + '-udp-500-rule'\n udp_4500_rule = context.env['name'] + '-udp-4500-rule'\n vpn_tunnel = context.env['name'] + '-vpn'\n router_vpn_binding = context.env['name'] + '-router-vpn-binding'\n resources = []\n if 'ipAddress' in context.properties:\n ip_address = context.properties['ipAddress']\n static_ip = ''\n else:\n static_ip = context.env['name'] + '-ip'\n resources.append({\n # The reserved address resource.\n 'name': static_ip,\n # https://cloud.google.com/compute/docs/reference/rest/v1/addresses\n 'type': 'gcp-types/compute-v1:addresses',\n 'properties': {\n 'name': properties.get('name', static_ip),\n 'project': project_id,\n 'region': context.properties['region']\n }\n })\n ip_address = '$(ref.' + static_ip + '.address)'\n\n resources.extend([\n {\n # The target VPN gateway resource.\n 'name': target_vpn_gateway,\n # https://cloud.google.com/compute/docs/reference/rest/v1/targetVpnGateways\n 'type': 'gcp-types/compute-v1:targetVpnGateways',\n 'properties':\n {\n 'name': properties.get('name', target_vpn_gateway),\n 'project': project_id,\n 'network': network,\n 'region': context.properties['region'],\n }\n },\n {\n # The forwarding rule resource for the ESP traffic.\n 'name': esp_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-esp'.format(properties.get('name')) if 'name' in properties else esp_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'ESP',\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n {\n # The forwarding rule resource for the UDP traffic on port 4500.\n 'name': udp_4500_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-udp-4500'.format(properties.get('name')) if 'name' in properties else udp_4500_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'UDP',\n 'portRange': 4500,\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n {\n # The forwarding rule resource for the UDP traffic on port 500\n 'name': udp_500_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-udp-500'.format(properties.get('name')) if 'name' in properties else udp_500_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'UDP',\n 'portRange': 500,\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n\n ])\n router_url_tag = 'routerURL'\n router_name_tag = 'router'\n\n if router_name_tag in context.properties:\n router_url = context.properties.get(router_url_tag, generate_router_uri(\n context.env['project'],\n context.properties['region'],\n context.properties[router_name_tag]))\n # Create dynamic routing VPN\n resources.extend([\n {\n # The VPN tunnel resource.\n 'name': vpn_tunnel,\n # https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels\n 'type': 'gcp-types/compute-v1:vpnTunnels',\n 'properties':\n {\n 'name': properties.get('name', vpn_tunnel),\n 'project': project_id,\n 'description':\n 'A vpn tunnel',\n 'ikeVersion':\n 2,\n 'peerIp':\n context.properties['peerAddress'],\n 'region':\n context.properties['region'],\n 'router': router_url,\n 'sharedSecret':\n context.properties['sharedSecret'],\n 'targetVpnGateway':\n '$(ref.' + target_vpn_gateway + '.selfLink)'\n },\n 'metadata': {\n 'dependsOn': [esp_rule,\n udp_500_rule,\n udp_4500_rule]\n }\n }])\n else:\n # Create static routing VPN\n resources.append(\n {\n # The VPN tunnel resource.\n 'name': vpn_tunnel,\n 'type': 'gcp-types/compute-v1:vpnTunnels',\n 'properties': {\n 'name': vpn_tunnel,\n 'description':\n 'A vpn tunnel',\n 'ikeVersion':\n 2,\n 'peerIp':\n context.properties['peerAddress'],\n 'region':\n context.properties['region'],\n 'sharedSecret':\n context.properties['sharedSecret'],\n 'targetVpnGateway':\n '$(ref.' + target_vpn_gateway + '.selfLink)',\n 'localTrafficSelector':\n context.properties['localTrafficSelector'],\n 'remoteTrafficSelector':\n context.properties['remoteTrafficSelector'],\n\n },\n 'metadata': {\n 'dependsOn': [esp_rule, udp_500_rule, udp_4500_rule]\n }\n },\n )\n\n return {\n 'resources':\n resources,\n 'outputs':\n [\n {\n 'name': 'targetVpnGateway',\n 'value': target_vpn_gateway\n },\n {\n 'name': 'staticIp',\n 'value': static_ip\n },\n {\n 'name': 'espRule',\n 'value': esp_rule\n },\n {\n 'name': 'udp500Rule',\n 'value': udp_500_rule\n },\n {\n 'name': 'udp4500Rule',\n 'value': udp_4500_rule\n },\n {\n 'name': 'vpnTunnel',\n 'value': vpn_tunnel\n },\n {\n 'name': 'vpnTunnelUri',\n 'value': '$(ref.'+vpn_tunnel+'.selfLink)'\n }\n ]\n }", "def ibns_intf(task):\n # init lists of interfaces\n access_interfaces = []\n uplink_interfaces = []\n # iterate over all interfaces\n for intf in task.host[\"intfs\"]:\n\n # uplink interfaces\n if intf[\"interface\"] in task.host[\"uplinks\"]:\n uplink_interfaces.append(intf)\n\n # other non-excluded access ports\n elif intf[\"interface\"] not in task.host[\"excluded_intf\"]:\n if intf[\"access_vlan\"] in task.host[\"vlans\"]:\n access_interfaces.append(intf)\n\n # assign uplink interface list to task.host\n task.host[\"uplink_interfaces\"] = uplink_interfaces\n # render uplink interface configs\n uplink_intf_cfg = task.run(\n task=text.template_file,\n template=\"IBNS_uplink_intf.j2\",\n path=\"templates/\",\n **task.host,\n )\n # assign access interface list to task.host\n task.host[\"access_interfaces\"] = access_interfaces\n # render access interface configs\n access_intf_cfg = task.run(\n task=text.template_file,\n template=f\"IBNS{task.host['ibns_ver']}_access_intf.j2\",\n path=\"templates/\",\n **task.host,\n )\n\n # init list of L3 vlan interfaces\n l3_vlan_int = [\"Vlan777\"]\n # list of vlan interfaces that will not relay\n no_relay_ints = [\"1\", \"666\", \"667\"]\n # iterate over active L3 interfaces\n for intf in task.host[\"ip_int_br\"]:\n # accept only those that are active vlan interfaces\n if intf[\"intf\"].startswith(\"Vlan\") == True and intf[\"status\"] == \"up\":\n # strip vlan id from interface name\n vlan_id = intf[\"intf\"].strip(\"Vlan\")\n # compare with list of no relay ints\n if vlan_id not in no_relay_ints:\n # add to list of interfaces for ISE DHPC relay\n l3_vlan_int.append(intf[\"intf\"])\n\n # save L3 vlan interfaces to task.host\n task.host[\"l3_vlan_int\"] = l3_vlan_int\n\n if \"emea\" in task.host['region']:\n L3VLAN_template = \"IBNS_EMEA_L3VLAN_intf.j2\"\n else:\n L3VLAN_template = \"IBNS_L3VLAN_intf.j2\"\n\n # render L3 vlan interface configs\n l3_vlan_int_cfg = task.run(\n task=text.template_file,\n template=L3VLAN_template,\n path=\"templates/\",\n **task.host,\n )\n\n # return configuration\n return uplink_intf_cfg.result + access_intf_cfg.result + l3_vlan_int_cfg.result", "def create_and_bridge_iperf_virtual_interface(self):\n\n for i in self._nodes.items():\n node = i[1]\n\n # Show the current bridge and interface configuration\n print(\"\\nThis the current bridge configuration:\")\n ifaces = VPPUtil.show_bridge(node)\n question = \"\\nWould you like to keep this configuration [Y/n]? \"\n answer = self._ask_user_yn(question, \"y\")\n if answer == \"y\":\n self._sockfilename = \"/var/run/vpp/{}.sock\".format(\n ifaces[0][\"name\"].replace(\"/\", \"_\")\n )\n if os.path.exists(self._sockfilename):\n continue\n\n # Create a script that builds a bridge configuration with\n # physical interfaces and virtual interfaces\n ints_with_vints = self._iperf_vm_questions(node)\n content = \"\"\n for intf in ints_with_vints:\n vhoststr = \"\\n\".join(\n [\n \"comment { The following command creates the socket }\",\n \"comment { and returns a virtual interface }\",\n \"comment {{ create vhost-user socket \"\n \"/var/run/vpp/sock{}.sock server }}\\n\".format(intf[\"bridge\"]),\n ]\n )\n\n setintdnstr = \"set interface state {} down\\n\".format(intf[\"name\"])\n\n setintbrstr = \"set interface l2 bridge {} {}\\n\".format(\n intf[\"name\"], intf[\"bridge\"]\n )\n setvintbrstr = \"set interface l2 bridge {} {}\\n\".format(\n intf[\"virtualinterface\"], intf[\"bridge\"]\n )\n\n # set interface state VirtualEthernet/0/0/0 up\n setintvststr = \"set interface state {} up\\n\".format(\n intf[\"virtualinterface\"]\n )\n\n # set interface state VirtualEthernet/0/0/0 down\n setintupstr = \"set interface state {} up\\n\".format(intf[\"name\"])\n\n content += (\n vhoststr\n + setintdnstr\n + setintbrstr\n + setvintbrstr\n + setintvststr\n + setintupstr\n )\n\n # Write the content to the script\n rootdir = node[\"rootdir\"]\n filename = rootdir + \"/vpp/vpp-config/scripts/create_iperf_vm\"\n with open(filename, \"w+\") as sfile:\n sfile.write(content)\n\n # Execute the script\n cmd = \"vppctl exec {}\".format(filename)\n (ret, stdout, stderr) = VPPUtil.exec_command(cmd)\n if ret != 0:\n logging.debug(stderr)\n\n print(\"\\nA script as been created at {}\".format(filename))\n print(\"This script can be run using the following:\")\n print(\"vppctl exec {}\\n\".format(filename))", "def ipv4_interface_setup(self):\n\n for i in self._nodes.items():\n node = i[1]\n\n # Show the current interfaces with IP addresses\n current_ints = VPPUtil.get_int_ip(node)\n if current_ints != {}:\n print(\"\\nThese are the current interfaces with IP addresses:\")\n for items in sorted(current_ints.items()):\n name = items[0]\n value = items[1]\n if \"address\" not in value:\n address = \"Not Set\"\n else:\n address = value[\"address\"]\n print(\"{:30} {:20} {:10}\".format(name, address, value[\"state\"]))\n question = \"\\nWould you like to keep this configuration \" \"[Y/n]? \"\n answer = self._ask_user_yn(question, \"y\")\n if answer == \"y\":\n continue\n else:\n print(\"\\nThere are currently no interfaces with IP \" \"addresses.\")\n\n # Create a script that add the ip addresses to the interfaces\n # and brings the interfaces up\n ints_with_addrs = self._ipv4_interface_setup_questions(node)\n content = \"\"\n for ints in ints_with_addrs:\n name = ints[\"name\"]\n addr = ints[\"addr\"]\n setipstr = \"set int ip address {} {}\\n\".format(name, addr)\n setintupstr = \"set int state {} up\\n\".format(name)\n content += setipstr + setintupstr\n\n # Write the content to the script\n rootdir = node[\"rootdir\"]\n filename = rootdir + \"/vpp/vpp-config/scripts/set_int_ipv4_and_up\"\n with open(filename, \"w+\") as sfile:\n sfile.write(content)\n\n # Execute the script\n cmd = \"vppctl exec {}\".format(filename)\n (ret, stdout, stderr) = VPPUtil.exec_command(cmd)\n if ret != 0:\n logging.debug(stderr)\n\n print(\"\\nA script as been created at {}\".format(filename))\n print(\"This script can be run using the following:\")\n print(\"vppctl exec {}\\n\".format(filename))" ]
[ "0.6734918", "0.61334604", "0.5956791", "0.59239084", "0.5892507", "0.57652533", "0.5649685", "0.56255925", "0.56255925", "0.5609952", "0.5608133", "0.5608133", "0.5590049", "0.5590049", "0.55450815", "0.5500373", "0.5474965", "0.54544276", "0.54491496", "0.5437252", "0.5414565", "0.5406117", "0.53953046", "0.5378229", "0.53773266", "0.5302074", "0.5294071", "0.52770096", "0.52720475", "0.5270354" ]
0.71936
0
Generate a yaml file of the heat data
def gen_heat_template(self): print "\t* Generating heat template in file: %s" % self.heat_filename if self.cmdline: with open(self.heat_filename, 'w') as f: f.write(yaml.safe_dump(self.heat_template)) try: self.heatclient.stacks.validate(template=yaml.safe_dump(self.heat_template)) except Exception as e: print "Unfortunately your file is malformed. Received error: (%s)" % str(e) print "Exiting ..." sys.exit(1) return self.heat_template
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_heatmap(data, labels_dict, file_title, plot_title):\n\n fig = plt.figure()\n ax = sn.heatmap(data,\n linewidths=0.3)\n figure = ax.get_figure()\n\n if labels_dict:\n ax.set_xlabel(labels_dict[\"x\"])\n ax.set_ylabel(labels_dict[\"y\"])\n if plot_title:\n ax.set_title(plot_title)\n\n figure.savefig(file_title)", "def user_create_yaml(self):\n pass", "def generateYaml(self):\n master = self.system.master\n master_out = {'sent':master.sent, 'dropped':master.dropped, 'late':master.late}\n\n devices_out = {}\n for dev in self.system.devies:\n dev_out = {'valid':dev.valid, 'position':dev.ring_position }\n dev_out['epu_errors']=dev.epu_errors\n dev_otu['pdi_errors']=dev.pdi_errors\n for port in dev.ports:\n port_out = {'rx_errors':port.rx_errors}\n port_out['forwarded_rx_errors'] = port.forwarded_rx_errors\n port_out['frame_errors'] = port.frame_errors\n port_out['lost_links'] = port.lost_links\n devices_out[dev.name] = dev_out\n out['devices'] = devices_out\n\n out['master':master_out, 'devices':devices_out]\n out['devices'] = device_out\n \n timestamp = self.getTimestamp()\n if self.timestamp_old is not None:\n duration = timestamp - self.timestamp_old\n out['duration'] = prettyDuration(duration)\n else:\n out['date'] = prettyTimestamp(timestamp)\n out['ros_time'] = {'secs':timestamp.secs, 'nsecs':timestamp.nsecs}\n\n return out", "def generate(self, data) -> str:\n yaml_dump_params: Dict[Any, Any] = {'default_flow_style': None, 'sort_keys': False}\n if isinstance(data, list):\n return yaml.dump_all(data, Dumper=YamlDumper(self.kg), **yaml_dump_params)\n return yaml.dump(data, Dumper=YamlDumper(self.kg), **yaml_dump_params)", "def gen_heat_data(self):\n\n print \"\\t* Generating heat data\"\n self.gen_heat_client()\n stacks = self.heatclient.stacks\n\n print \"\\t? Please select the stack to generate a template from\"\n # list stacks and prompt user to select apropriate stack template\n stack_list = []\n for idx, stack in enumerate(stacks.list()):\n print \"\\t - [%d] Stack: %s \\n\" % (idx, stack.stack_name)\n stack_list.append(stack)\n\n stack_num = int(raw_input(\"\\t - \"))\n\n print \"\\t* You have selected: %s\" % stack_list[stack_num].stack_name\n\n # stack id\n self.heat_template = stacks.template(stack_list[stack_num].id)", "def conversion_yaml():\r\n data ={\r\n 'name': 'george',\r\n 'age': 16,\r\n 'friends':\r\n [{'name': 'marry', 'age': 16}, {'name': 'jack', 'age': 17}]\r\n }\r\n yaml_data = yaml.dump(data)\r\n dirname = os.path.dirname(os.path.dirname(__file__))\r\n # data_dir = os.path.join(dirname, 'data')\r\n data_dir = '/'.join([dirname, 'data'])\r\n file_path = data_dir + '/' + 'test.yaml'\r\n with open(file_path, 'w') as fw:\r\n fw.write(yaml_data)\r\n print(yaml_data)", "def template(c, release=\"url-shortener\"):\n c.run(f\"helm template {release} {HELM_CHART_DIR} > ./generated-deployment.yml\")", "def generate_heatmap(self):\n data = []\n for j in range(len(self.generations[0])):\n row = []\n for i in range(len(self.generations)):\n row.append(self.generations[i][j].fitness)\n data.append(row)\n data = np.array(data)\n\n plt.figure()\n ax = sns.heatmap(\n data,\n cmap='RdBu',\n xticklabels=2,\n yticklabels=2)\n\n hfont = {'fontname': 'Helvetica'}\n plt.xlabel('Generation', **hfont)\n plt.ylabel('Individual', **hfont)\n ax.invert_yaxis()\n ax.axhline(linewidth=4, color='black')\n ax.axvline(linewidth=4, color='black')\n ax.collections[0].colorbar.set_label('Fitness')\n plt.savefig('figures/Voltage Clamp Figure/Single VC Optimization/'\n 'heatmap.svg')", "def FCCoutputYAML(a0):\n FCC = crystal.Crystal.FCC(a0)\n FCCinter = FCC.addbasis(FCC.Wyckoffpos(np.array([0.5,0.5,0.5])) +\n FCC.Wyckoffpos(np.array([0.25,0.25,0.25])))\n # this cutoffs is for: o->t\n cutoff = 0.5*a0\n return HeaderString + \\\n FCCinter.simpleYAML(a0) + \\\n OnsagerCalc.Interstitial.sitelistYAML(FCCinter.sitelist(1)) + \\\n OnsagerCalc.Interstitial.jumpnetworkYAML(FCCinter.jumpnetwork(1, cutoff))", "def make_template(filenames):\n result = {}\n for fn in filenames:\n with open(fn) as f:\n conf = yaml.load(f)\n expand_horizons(result, conf)\n return result", "def dump(filename: Path) -> None:\n import yaml\n\n dumped_str = yaml.dump_all(\n [data_dict],\n Dumper=RegressionYamlDumper,\n default_flow_style=False,\n allow_unicode=True,\n indent=2,\n encoding=\"utf-8\",\n )\n with filename.open(\"wb\") as f:\n f.write(dumped_str)", "def generate_cfg():\n \n if not os.path.exists(cfg_path):\n os.mkdir(cfg_path)\n \n for img_path in get_template_paths():\n extractor = BlockExtractor(img_path)\n extractor.get_cfg()\n for block in extractor.get_blocks():\n img = BlockParser(img_path, block).block_image()\n #cv.imshow(\"Block\", img)\n #cv.waitKey() & 0xFF", "def yaml(self):\n return str(self.data)", "def create_yaml_files(target_directory, data):\n import os\n import yaml\n\n for k, v in data.items():\n file = os.path.join(target_directory, '.'.join((k, \"yml\")))\n\n with open(file, 'w') as yaml_file:\n yaml.dump(v, yaml_file, default_flow_style=False)", "def HCPoutputYAML(a0, c_a, z=1./8.):\n # Note: alternatively, we could construct our HCP crystal *then* generate Wyckoff positions\n hexlatt = a0*np.array([[0.5, 0.5, 0],\n [-np.sqrt(0.75), np.sqrt(0.75), 0],\n [0, 0, c_a]])\n hcpbasis = [[np.array([1./3.,2./3.,1./4.]),np.array([2./3.,1./3.,3./4.])],\n [np.array([0.,0.,0.]), np.array([0.,0.,0.5]),\n np.array([1./3.,2./3.,3./4.-z]), np.array([1./3.,2./3.,3./4.+z]),\n np.array([2./3.,1./3.,1./4.-z]), np.array([2./3.,1./3.,1./4.+z])]]\n HCP = crystal.Crystal(hexlatt, hcpbasis)\n # these cutoffs are for: o->t, t->t, o->o along c\n # (preferably all below a). The t->t should be shortest, and o->o the longest\n cutoff = 1.01*a0*max(np.sqrt(1./3.+c_a**2/64), 2*z*c_a, 0.5*c_a)\n if __debug__:\n if cutoff > a0: raise AssertionError('Geometry such that we will include basal jumps')\n if np.abs(z) > 0.25: raise AssertionError('Tetrahedral parameter out of range (>1/4)')\n if np.abs(z) < 1e-2: raise AssertionError('Tetrahedral parameter out of range (approx. 0)')\n return HeaderString + \\\n HCP.simpleYAML(a0) + \\\n OnsagerCalc.Interstitial.sitelistYAML(HCP.sitelist(1)) + \\\n OnsagerCalc.Interstitial.jumpnetworkYAML(HCP.jumpnetwork(1, cutoff))", "def store_as_yaml(dataset, dataset_file):\n\n with open(dataset_file, 'w') as outfile:\n yaml.safe_dump(dataset, outfile, default_flow_style=False)", "def DumpYaml(data):\n #NOTE(g): Import is done here, instead of the top of the file, to not require this module if it is not used\n import yaml\n \n text = yaml.safe_dump(data)\n \n return text", "def heatmap(filename, data):\n\n fig, ax = ppl.subplots(1)\n ppl.pcolormesh(fig, ax, data, vmin=-0.0016, vmax=0.0016)\n fig.savefig(filename + \".png\")", "def toyaml(self,ymlfile,Kname=\"rgb_intrinsics\",dname=\"rgb_distortion\",sname=(\"image_width\",\"image_height\")):\n q = dict()\n q[Kname] = self.K\n q[dname] = self.dist\n if type(sname) is tuple:\n q[sname[0]] = self.size[0]\n q[sname[1]] = self.size[1]\n else:\n q[sname] = self.size\n\n if ymlfile == \"\" or ymlfile is None:\n return yaml.dumps(q)\n elif type(ymlfile) is str:\n ymlfile = open(ymlfile,\"wb\")\n yaml.dump(q,ymlfile)", "def create_data(self) -> str:\r\n s = self.scale\r\n mini, maxi = self.get_min_max()\r\n diff = maxi - mini\r\n\r\n output = \"const data = {\\n\"\r\n\r\n # Create the data for the scatters\r\n # TODO: If it's not interactive, labels shouldn't be exported.\r\n for name, data in self.scatters_data.items():\r\n mapping = self.scatters[name][\"mapping\"]\r\n colormaps = self.scatters[name][\"colormap\"]\r\n cmaps = [None] * len(colormaps)\r\n\r\n for i, colormap in enumerate(colormaps):\r\n if isinstance(colormap, str):\r\n cmaps[i] = plt.cm.get_cmap(colormap)\r\n else:\r\n cmaps[i] = colormap\r\n\r\n output += name + \": {\\n\"\r\n x_norm = [round(s * (x - mini) / diff, 3) for x in data[mapping[\"x\"]]]\r\n output += \"x: [\" + \",\".join(map(str, x_norm)) + \"],\\n\"\r\n\r\n y_norm = [round(s * (y - mini) / diff, 3) for y in data[mapping[\"y\"]]]\r\n output += \"y: [\" + \",\".join(map(str, y_norm)) + \"],\\n\"\r\n\r\n z_norm = [round(s * (z - mini) / diff, 3) for z in data[mapping[\"z\"]]]\r\n output += \"z: [\" + \",\".join(map(str, z_norm)) + \"],\\n\"\r\n\r\n if mapping[\"labels\"] in data:\r\n fmt_labels = [\"'{0}'\".format(s) for s in data[mapping[\"labels\"]]]\r\n output += \"labels: [\" + \",\".join(fmt_labels) + \"],\\n\"\r\n\r\n if mapping[\"s\"] in data:\r\n output += \"s: [\"\r\n\r\n for series in range(len(data[mapping[\"s\"]])):\r\n output += (\r\n \"[\"\r\n + \",\".join(map(str, np.round(data[mapping[\"s\"]][series], 3)))\r\n + \"],\\n\"\r\n )\r\n\r\n output += \"],\\n\"\r\n\r\n output += \"colors: [\\n\"\r\n for series in range(len(data[mapping[\"c\"]])):\r\n output += \"{\\n\"\r\n if mapping[\"cs\"] in data:\r\n colors = np.array(\r\n [cmaps[series](x) for x in data[mapping[\"c\"]][series]]\r\n )\r\n\r\n for i, c in enumerate(colors):\r\n hsl = np.array(colour.rgb2hsl(c[:3]))\r\n hsl[1] = hsl[1] - hsl[1] * data[mapping[\"cs\"]][series][i]\r\n colors[i] = np.append(np.array(colour.hsl2rgb(hsl)), 1.0)\r\n\r\n colors = np.round(colors * 255.0)\r\n\r\n output += (\r\n \"r: [\" + \",\".join(map(str, map(int, colors[:, 0]))) + \"],\\n\"\r\n )\r\n output += (\r\n \"g: [\" + \",\".join(map(str, map(int, colors[:, 1]))) + \"],\\n\"\r\n )\r\n output += (\r\n \"b: [\" + \",\".join(map(str, map(int, colors[:, 2]))) + \"],\\n\"\r\n )\r\n elif mapping[\"c\"] in data:\r\n colors = np.array(\r\n [cmaps[series](x) for x in data[mapping[\"c\"]][series]]\r\n )\r\n colors = np.round(colors * 255.0)\r\n output += (\r\n \"r: [\" + \",\".join(map(str, map(int, colors[:, 0]))) + \"],\\n\"\r\n )\r\n output += (\r\n \"g: [\" + \",\".join(map(str, map(int, colors[:, 1]))) + \"],\\n\"\r\n )\r\n output += (\r\n \"b: [\" + \",\".join(map(str, map(int, colors[:, 2]))) + \"],\\n\"\r\n )\r\n output += \"},\\n\"\r\n\r\n output += \"]\"\r\n output += \"},\\n\"\r\n\r\n for name, data in self.trees_data.items():\r\n mapping = self.trees[name][\"mapping\"]\r\n point_helper = self.trees[name][\"point_helper\"]\r\n\r\n output += name + \": {\\n\"\r\n\r\n if point_helper is not None and point_helper in self.scatters_data:\r\n scatter = self.scatters_data[point_helper]\r\n scatter_mapping = self.scatters[point_helper][\"mapping\"]\r\n\r\n x_t = []\r\n y_t = []\r\n z_t = []\r\n\r\n for i in range(len(data[mapping[\"from\"]])):\r\n x_t.append(scatter[scatter_mapping[\"x\"]][data[mapping[\"from\"]][i]])\r\n x_t.append(scatter[scatter_mapping[\"x\"]][data[mapping[\"to\"]][i]])\r\n y_t.append(scatter[scatter_mapping[\"y\"]][data[mapping[\"from\"]][i]])\r\n y_t.append(scatter[scatter_mapping[\"y\"]][data[mapping[\"to\"]][i]])\r\n z_t.append(scatter[scatter_mapping[\"z\"]][data[mapping[\"from\"]][i]])\r\n z_t.append(scatter[scatter_mapping[\"z\"]][data[mapping[\"to\"]][i]])\r\n\r\n x_norm = [round(s * (x - mini) / diff, 3) for x in x_t]\r\n output += f\"x: [\" + \",\".join(map(str, x_norm)) + \"],\\n\"\r\n\r\n y_norm = [round(s * (y - mini) / diff, 3) for y in y_t]\r\n output += \"y: [\" + \",\".join(map(str, y_norm)) + \"],\\n\"\r\n\r\n z_norm = [round(s * (z - mini) / diff, 3) for z in z_t]\r\n output += \"z: [\" + \",\".join(map(str, z_norm)) + \"],\\n\"\r\n else:\r\n x_norm = [round(s * (x - mini) / diff, 3) for x in data[mapping[\"x\"]]]\r\n output += \"x: [\" + \",\".join(map(str, x_norm)) + \"],\\n\"\r\n\r\n y_norm = [round(s * (y - mini) / diff, 3) for y in data[mapping[\"y\"]]]\r\n output += \"y: [\" + \",\".join(map(str, y_norm)) + \"],\\n\"\r\n\r\n z_norm = [round(s * (z - mini) / diff, 3) for z in data[mapping[\"z\"]]]\r\n\r\n output += \"z: [\" + \",\".join(map(str, z_norm)) + \"],\\n\"\r\n\r\n if mapping[\"c\"] in data:\r\n colormap = self.trees[name][\"colormap\"]\r\n cmap = None\r\n if isinstance(colormap, str):\r\n cmap = plt.cm.get_cmap(colormap)\r\n else:\r\n cmap = colormap\r\n\r\n colors = np.array([cmap(x) for x in data[mapping[\"c\"]]])\r\n colors = np.round(colors * 255.0)\r\n output += \"r: [\" + \",\".join(map(str, colors[:, 0])) + \"],\\n\"\r\n output += \"g: [\" + \",\".join(map(str, colors[:, 1])) + \"],\\n\"\r\n output += \"b: [\" + \",\".join(map(str, colors[:, 2])) + \"],\\n\"\r\n\r\n output += \"},\\n\"\r\n\r\n output += \"};\\n\"\r\n\r\n return output", "def to_yaml(cls,dumper,self):\n #self.__modelData['ids'] = self.__mapObj.ids\n self.__modelData['ids'] = ','.join(map(str,self.__mapObj.ids))\n\n ##GENERATE Overview\n old_size = self.__size\n self.__mapObj.size = PREVIEW_SIZE\n typ,dat,width,height = processOverview(self.__mapObj.png)\n self.__modelData['overview_typ'] = typ\n self.__modelData['overview_dat'] = dat\n self.__modelData['overview_width'] = width\n self.__modelData['overview_height'] = height\n self.__mapObj.size = old_size\n #END Overview\n\n node = dumper.represent_mapping(cls.yaml_tag,self.__modelData)\n self.SetModified(False)\n return node", "def to_content(cls, data: Mapping) -> str:\n cls._check_yaml()\n s = yaml.safe_dump(data, default_flow_style=False)\n s = '---\\n' + s\n return s", "def _save_configuration_to_yml(self):\n data = self.get_configuration_data()\n timestamp = self.model.timestamp\n with open(os.path.join(CHECKPOINTS_DIR, timestamp, 'config_{}.yml'.format(timestamp)), 'w') as outfile:\n yaml.dump(dict(data), outfile, default_flow_style=False)", "def yaml(self):\n raise NotImplementedError", "def to_yaml(cls, dumper, data):\n\t\tdict_rep = {'location':data._location, 'startFrame':data._startFrame,\n\t\t\t\t\t'endFrame':data._endFrame, 'camera':data._camera}\n\n\t\tprint(dict_rep)\n\n\t\tnode = dumper.represent_mapping(cls.yaml_tag, dict_rep)\n\t\treturn node", "def render(self, data_dict, template=None, **kw):\n LOG.debug(\"rendering output as yaml via %s\" % self.__module__)\n return yaml.dump(data_dict, **kw)", "def write_init_file(name, data, path=\"\"):\n\n # find the resource and exclude it from the file\n data = data.copy()\n\n # Removes the Visa resource if needed\n try:\n data.remove(\"Visa_Resource\")\n except:\n pass\n\n if os.path.isfile(os.path.abspath(str(path) + str(name.split(\".\")[0]) + \".yaml\")):\n\n os.remove(os.path.abspath(path + str(name.split(\".\")[0]) + \".yaml\"))\n filename, version = create_new_file(\n str(name.split(\".\")[0]), path, os_file=False, suffix=\".yaml\"\n )\n yaml.dump(data, filename, indent=4)\n close_file(filename)\n\n elif not os.path.isfile(os.path.abspath(path + str(name.split(\".\")[0]) + \".yaml\")):\n\n # directory = path[:len(path) - len(path.split(\"/\")[-1])]\n\n filename, version = create_new_file(\n str(name.split(\".\")[0]), path, os_file=False, suffix=\".yaml\"\n )\n\n yaml.dump(data, filename, indent=4)\n\n close_file(filename)\n\n # Debricated\n # for items in data.items():\n # if type(items[1]) != type([]):\n # string = str(items[0]) + \" = \\\"\" + str(items[1]) + \"\\\"\\n\"\n # os.write(filename, str(string))\n # else:\n # string = str(items[0]) + \" = \\\"\"\n # for i in items[1]:\n # string += str(i).strip(\"'\").strip(\"[\").strip(\"]\") + \",\"\n # string = string[:-1]\n # string += \"\\\"\\n\"\n # print string\n # os.write(filename, string)\n\n else:\n return -1", "def create_yaml_output(trace_data, yaml_output_file):\n import yaml\n scripts = sorted(set([td[1] for td in trace_data]))\n yml_data = {} # script -> inputs, outputs\n for direction, script, locator, path, file in trace_data:\n yml_data[script] = yml_data.get(script, {'input': [], 'output': []})\n yml_data[script][direction].append((locator, path, file))\n for script in scripts:\n yml_data[script]['input'] = sorted(yml_data[script]['input'])\n yml_data[script]['output'] = sorted(yml_data[script]['output'])\n\n if os.path.exists(yaml_output_file):\n # merge existing data\n with open(yaml_output_file, 'r') as f:\n old_yml_data = yaml.load(f)\n for script in old_yml_data.keys():\n if not script in scripts:\n # make sure not to overwrite newer data!\n yml_data[script] = old_yml_data[script]\n\n with open(yaml_output_file, 'w') as f:\n yaml.dump(yml_data, f, default_flow_style=False)", "def report(name):\n # conf = None\n # with open('./readset.yaml','r') as f:\n # conf = yaml.load(f,Loader=yaml.FullLoader)\n print(\"start\")\n data = dataset(name)\n data.load_anatations()\n data.error_calulation(0.3)\n data.error_t_test()\n data.create_latex_img_table(3)\n print(\"end\")", "def make_data(type):\n data[\"skytap_\" + type + \"_status\"] = \"good\"\n if is_valid_yaml(data[\"skytap_\" + type], type):\n if len(data[\"skytap_\" + type]) == 0:\n data[\"skytap_\" + type + \"_status\"] = \"empty\"\n else:\n yamlUserData = yaml.load(data[\"skytap_\" + type])\n for n in yamlUserData:\n data[\"skytap_\" + type + \"_\" + n] = yamlUserData[n]" ]
[ "0.6449608", "0.628593", "0.61868024", "0.61316335", "0.5987216", "0.584769", "0.5845132", "0.57526094", "0.57081187", "0.56917256", "0.56857705", "0.56769764", "0.5676154", "0.56494623", "0.56241596", "0.56075615", "0.56025314", "0.55984604", "0.55886304", "0.55846953", "0.5582934", "0.55731004", "0.5565688", "0.5530643", "0.54782647", "0.545518", "0.544623", "0.54291785", "0.5412058", "0.5399926" ]
0.65655005
0
Load the queries from the given file and tokenize them
def load_queries(self, file): queries = [] with open(file, 'r') as f: for line in f: reg_match = re.match(r'^(\d+).(.*)', line) tokens = self.es_helper.get_tokens(reg_match.group(2).strip()) queries.append(Query(reg_match.group(1).strip(), self.es_helper, tokens)) self.queries = queries
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_queries_from(self, sql_file, delimiter=\";\"):\n with open(sql_file, \"r\") as file:\n # Split by delimiter\n queries = file.read().split(delimiter)\n # pop the empty line at the end of the file\n queries.pop()\n return queries", "def execute_queries_from_file(self, file_name, file_path=test_data_path):\n if file_path:\n with open(file_path + file_name, 'rb') as file:\n query = sqlalchemy.sql.text(file)\n else:\n with open(file_name, 'rb') as file:\n query = sqlalchemy.sql.text(file)\n self.execute_query(query)\n return self", "def run_sql_from_file(conn, path, replace={}):\n with open(path, 'r') as f:\n query = [s.strip() + ';' for s in f.read().split(';')[:-1]]\n for s in query:\n for k, v in replace.items():\n s = s.replace(k, v)\n run_sql_from_string(conn, s)", "def init_query():\n file_path = Config.config['init_files']['query_file']\n with open(file_path) as file:\n for line in file:\n # ignore empty line\n if line == '\\n':\n continue\n yield Query(line)", "def get_sqls_from_file(self, sql_file):\n sql_list = []\n with open(sql_file, 'r') as f:\n for line in f.readlines():\n sql_list.append(line)\n return sql_list", "def load_query(query_filename):\n with open(query_filename) as f:\n return f.read()", "def _load_file(file_path: str) -> List[str]:\n with open(file_path, 'r') as f:\n file_content = f.read()\n\n # Removes all comments\n str_without_comments = re.sub('\\/\\*[\\s\\S]*?\\*\\/|([^\\\\:]|^)\\/\\/.*$', '', file_content, flags=re.MULTILINE)\n str_without_new_lines = str_without_comments.lstrip('\\n').replace('\\n', ' ').replace('\\t', ' ') # Remove new lines and tabs\n return JackTokenizer._split_keep_seperators(str_without_new_lines) # Splits the string by symbols and spaces", "def find_queries(file_contents):\n # Remove comments -- and /* */\n cleaned_contents = re.sub(SINGLE_COMMENT, \" \", file_contents)\n cleaned_contents = re.sub(BLOCK_COMMENT, \" \", cleaned_contents, flags=re.DOTALL)\n\n # Split according to ; and remove unnecessary whitespace\n split_queries = cleaned_contents.split(\";\")\n return [re.sub(\"\\s+\", \" \", query).strip() + \";\" for query in split_queries]", "def tokenize(self, path, build_dict=False, thd=0):\n\n assert os.path.exists(path)\n\n if build_dict:\n # Add words to the dictionary\n with open(path, 'r') as f:\n for line in f:\n words = line.split()\n for word in words:\n self.dictionary.add_word(word)\n if thd > 1:\n self.dictionary.rebuild_by_freq(thd)\n\n # Tokenize file content\n ids_list = []\n with open(path, 'r') as f:\n for line in f:\n words = line.split()\n ids = []\n for word in words:\n ids.append(self.dictionary[word])\n ids_list.append(ids)\n\n return ids_list", "def read_sql_from_file(self, filename):\n tmpLines = ''\n logger.info(\"Reading from {}\".format(filename))\n\n with open(filename, 'r') as fh:\n tmpLines = fh.readlines()\n \n sqlquery = \"\".join(tmpLines)\n cursor = self.conn.cursor()\n\n try:\n cursor.execute(sqlquery)\n except Exception as e:\n logger.info(e)\n sys.exit(1)\n return", "def train(self, filename):\n with open(filename, 'r') as f:\n phrases_and_words = []\n\n for index, line in enumerate(f):\n # decoding, since input is not unicode\n cleaned_line = self.get_cleaned_line(line.decode('utf-8', 'ignore'))\n\n if cleaned_line:\n phrases_and_words.extend(self.get_phrase_and_words_from_line(cleaned_line))\n\n if index % 10000 == 0:\n self.db_storage.store_phrases_and_words(phrases_and_words)\n phrases_and_words = []\n\n self.db_storage.store_phrases_and_words(phrases_and_words)", "def _load_tokens(self, filename):\n self.tokens = dict()\n with open(filename, 'r') as f:\n for line in f.readlines():\n raw = line.strip().split('\\t')\n self.tokens[raw[0]] = int(raw[1])", "def tokenize(self, path):\n assert os.path.exists(path)\n tokens = 0\n maxLen = 0\n # Find code path and create dictionary\n with open(path, 'r') as f:\n for i, line in enumerate(f):\n filename = line.strip()\n code_path = RAW_DATA_PATH + filename\n assert os.path.exists(code_path)\n try:\n with open(code_path, 'r') as code_f:\n code = code_f.read()\n if len(code) > 100000:\n continue\n kwargs = {'vocab':self.vocab}\n words = tokenizer.tokenize_wrapper(code, **kwargs)\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n except:\n pass\n # Tokenize file content\n with open(path, 'r') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n filename = line.strip()\n code_path = RAW_DATA_PATH + filename\n assert os.path.exists(code_path)\n try:\n with open(code_path, 'r') as code_f:\n code = code_f.read()\n if len(code) > 100000:\n continue\n kwargs = {'vocab':self.vocab}\n words = tokenizer.tokenize_wrapper(code, **kwargs)\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n self.tic_marks.append(len(words))\n except Exception as e:\n #raise e\n pass\n return ids", "def process_query(query_file):\r\n query_data = query_file.readlines()\r\n query_dict = {}\r\n x = 1 \r\n search_dict = {}\r\n search_dict['username'] = query_data[x].strip('\\n')\r\n x += 1\r\n operation_list = []\r\n \r\n while query_data[x] != 'FILTER\\n': \r\n operation_list.append(query_data[x].strip('\\n'))\r\n x += 1\r\n \r\n search_dict['operations'] = operation_list \r\n query_dict['search'] = search_dict \r\n x += 1\r\n \r\n filter_dict = {}\r\n filter_format(filter_dict, query_data, 'name-includes', x)\r\n filter_format(filter_dict, query_data, 'location-includes', x)\r\n filter_format(filter_dict, query_data, 'follower', x)\r\n filter_format(filter_dict, query_data, 'following', x)\r\n query_dict['filter'] = filter_dict\r\n \r\n present_dict = {}\r\n sort_by = query_data[-2].strip('sort-by ')\r\n present_dict['sort-by'] = sort_by.strip('\\n')\r\n \r\n format_type = query_data[-1].lstrip('format ')\r\n present_dict['format'] = format_type\r\n query_dict['present'] = present_dict\r\n \r\n return query_dict", "def tokenize(self, path):\n assert os.path.exists(path), path\n # Add words to the dictionary\n with open(path, 'r') as f:\n # TODO: joblib\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in tqdm(f):\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n return ids", "def parse(self):\n self.statements = []\n with open(self.filename, \"r\", encoding=\"cp1250\", newline='') as f:\n for line in csv.DictReader(fix_quotes(f), escapechar='\\\\'):\n self._parse_line(line)\n\n return self.statements", "def _read_statements(filename):\n with open(filename, 'r') as f:\n lines = f.readlines()\n # on unix, no windows compatibility not implemented\n statements = ''.join(lines).split('\\n\\n')\n return statements", "def process_query (file):\n\n # initialize all the dictionaries and lists we will be using\n query_data = {}\n query_data ['search'] = {'operations':[]}\n query_data ['filter'] = {}\n query_data ['present'] = {}\n\n temp = ''\n\n file.readline() # for when the file says SEARCH\n\n query_data ['search']['username'] = file.readline().strip()\n\n temp = file.readline().strip()\n while temp != 'FILTER': # go until the the filter section\n query_data ['search']['operations'].append (temp)\n temp = file.readline().strip()\n\n temp = file.readline().strip()\n while temp != 'PRESENT': # go until the present section\n # we make the key everything from the beginning to the first space\n # then the value is everything after the first space\n query_data ['filter'][temp[:temp.find(' ')]] = temp[temp.find(' ') + 1:]\n temp = file.readline().strip()\n\n temp = file.readline().strip()\n while temp != '': # go until the end of the file\n # same process as the previous while loop\n query_data ['present'][temp[:temp.find(' ')]] = temp[temp.find(' ') + 1:]\n temp = file.readline().strip()\n\n return query_data", "def tokenize(self, path):\n assert os.path.exists(path)\n with open(path, 'r') as f:\n sentences = []\n for sentence in tqdm(f, desc='Processing file: {}'.format(path)):\n sentences.append(sentence.split())\n self.data = sentences", "def _tokenize(self, fileName):\n debug.show(\"Tokenizing...\")\n try:\n f = open(fileName, 'r')\n code = f.read()\n except IOError:\n sys.exit('Error: Bad input file')\n f.close()\n pattern = '/?[a-zA-Z][a-zA-Z0-9_]*|[-]?[0-9]+|[}{]|%.*|[^\\t\\n ]'\n return re.findall(pattern, code)", "def get_data_query(file_name):\n with open(file_name, 'r') as graphql_query:\n return graphql_query.read()", "def tokenize(self, path):\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r', encoding=\"utf8\") as f:\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r', encoding=\"utf8\") as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n return ids", "def tokenize(self, path):\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r') as f:\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n\n return ids", "def tokenize(self, path):\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r') as f:\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n\n return ids", "def tokenize(self, path):\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r', encoding=\"utf8\") as f:\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r', encoding=\"utf8\") as f:\n idss = []\n for line in f:\n words = line.split() + ['<eos>']\n ids = []\n for word in words:\n ids.append(self.dictionary.word2idx[word])\n idss.append(torch.tensor(ids).type(torch.int64))\n ids = torch.cat(idss)\n\n return ids", "def tokenize(self, path):\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r') as f:\n tokens = 0\n for line in f:\n words = line.split()\n tokens += len(words)\n for word in words:\n if word in self.vocabulary:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n words = line.split()\n for word in words:\n if word in self.vocabulary:\n ids[token] = self.dictionary.word2idx[word]\n else:\n ids[token] = self.dictionary.word2idx['<unk>']\n token += 1\n return ids", "def tokenize(self, path, fname):\n assert fname in {\"train.txt\", \"valid.txt\", \"test.txt\"}\n fh = os.path.join(path, fname)\n # Add words to the dictionary\n with open(fh, \"r\", encoding=\"utf8\") as f:\n for line in f:\n words = line.split() + [\"<eos>\"]\n for word in words:\n self.dictionary.add_word(word)\n # Tokenize file content\n with open(fh, \"r\", encoding=\"utf8\") as f:\n idss = []\n for line in f:\n words = line.split() + [\"<eos>\"]\n ids = []\n for word in words:\n ids.append(self.dictionary.word2idx[word])\n idss.append(torch.tensor(ids).type(torch.int64))\n ids = torch.cat(idss)\n return ids", "def parse_results_from_file(fname):\n for l in open(fname,\"r\"):\n fields=l.split()\n query_name=fields[0]\n ranks=[int(rank) for rank in fields[1::2]]\n yield (query_name, list(zip(ranks,fields[2::2])) )", "def open (self, sql_file):\n fd = open(sql_file, 'r')\n sql = fd.read()\n fd.close()\n self.sql = sql.replace(UTF_8_STR, \"\")", "def parse_data_file(\n data_file: Path,\n data_params: DatasetParams,\n query_tokenizer: TokenizerRecordable,\n per_code_language_tokenizers: Dict[str, TokenizerRecordable],\n) -> Tuple[str, int, Samples]:\n\n logger.info(f\"Reading samples from {data_file}\")\n filename = os.path.basename(data_file)\n file_language = filename.split(\"_\")[0]\n\n samples = list(read_file_samples(data_file))\n\n ds: List[Dict[str, Union[str, int, np.ndarray]]] = []\n for raw_sample in samples:\n language = raw_sample[\"language\"]\n if language.startswith(\"python\"): # In some datasets, we use 'python-2.7' and 'python-3'\n language = \"python\"\n\n if language != file_language:\n logger.error(f\"file with different language {language} from filename {file_language}\")\n sys.exit(f\"file with multiple language {language} from filename {file_language}\")\n\n # the load_data_from_sample method call places processed data into sample, and\n # returns a boolean flag indicating if sample should be used\n function_name = raw_sample.get(\"func_name\")\n data_code = load_data_from_sample(\n language=language,\n encoder_label=\"code\",\n data_to_load=raw_sample[\"code_tokens\"],\n function_name=function_name,\n tokenizer=per_code_language_tokenizers[language],\n fraction_using_func_name=data_params.fraction_using_func_name,\n min_len_func_name_for_query=data_params.min_len_func_name_for_query,\n use_subtokens=data_params.use_subtokens,\n mark_subtoken_end=data_params.mark_subtoken_end,\n max_num_tokens=data_params.code_max_num_tokens,\n )\n\n data_query = load_data_from_sample(\n language=language,\n encoder_label=\"query\",\n data_to_load=[d.lower() for d in raw_sample[\"docstring_tokens\"]],\n function_name=function_name,\n tokenizer=query_tokenizer,\n fraction_using_func_name=data_params.fraction_using_func_name,\n min_len_func_name_for_query=data_params.min_len_func_name_for_query,\n use_subtokens=data_params.use_subtokens,\n mark_subtoken_end=data_params.mark_subtoken_end,\n max_num_tokens=data_params.query_max_num_tokens,\n )\n\n if data_code is not None and data_query is not None:\n d = {\"language\": language, \"similarity\": 1, **data_code, **data_query}\n ds.append(d)\n\n logger.debug(f\"Parsed file {data_file}: language {file_language} [{len(ds)} samples]\")\n\n return (file_language, len(ds), ds)" ]
[ "0.6716595", "0.62873846", "0.61769223", "0.61609787", "0.61478555", "0.61353004", "0.6041882", "0.5998857", "0.5988838", "0.5981553", "0.5966084", "0.5951582", "0.59339494", "0.5924149", "0.5920559", "0.59034365", "0.5903293", "0.588091", "0.58698344", "0.5858286", "0.5839429", "0.5838354", "0.58382636", "0.58382636", "0.58338565", "0.58065414", "0.5805663", "0.5779358", "0.5769778", "0.5749392" ]
0.79963386
0