query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Map MXNet's multinomial operator attributes to onnx's Multinomial operator and return the created node.
def convert_multinomial(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get("dtype", 'int32'))] sample_size = convert_string_to_list(attrs.get("shape", '1')) if len(sample_size) < 2: sample_size = sample_size[-1] else: raise AttributeError("ONNX currently supports integer sample_size only") node = onnx.helper.make_node( "Multinomial", input_nodes, [name], dtype=dtype, sample_size=sample_size, name=name, ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_broadcast_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)", "def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node", "def convert_elemwise_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)", "def _create_onehot(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n # axis, indices, depth, values\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n for attr in ['depth', 'values']:\n node.input.append(op.name + \":\" + attr)\n return node", "def __call__(self, node_A, node_B, trans_A=False, trans_B=False):\r\n new_node = Op.__call__(self)\r\n new_node.matmul_attr_trans_A = trans_A\r\n new_node.matmul_attr_trans_B = trans_B\r\n new_node.inputs = [node_A, node_B]\r\n new_node.name = \"MatMul(%s,%s,%s,%s)\" % (node_A.name, node_B.name, str(trans_A), str(trans_B))\r\n return new_node", "def __call__(self, node_A, node_B, trans_A=False, trans_B=False):\n new_node = Op.__call__(self)\n new_node.matmul_attr_trans_A = trans_A\n new_node.matmul_attr_trans_B = trans_B\n new_node.inputs = [node_A, node_B]\n new_node.name = \"MatMul(%s,%s,%s,%s)\" % (node_A.name, node_B.name, str(trans_A), str(trans_B))\n return new_node", "def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node", "def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node", "def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node", "def __call__(self, node_A):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = [node_A]\r\n new_node.name = \"Oneslike[%s]\" % node_A.name\r\n return new_node", "def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def", "def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node", "def multinomial(self, size=None, n=1, pvals=[0.5, 0.5], ndim=None,\r\n dtype='int64'):\r\n return self.gen(multinomial, size, n, pvals, ndim=ndim, dtype=dtype)", "def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node", "def __call__(self, node_A):\n new_node = Op.__call__(self)\n new_node.inputs = [node_A]\n new_node.name = \"Oneslike(%s)\" % node_A.name\n return new_node", "def __call__(self):\n new_node = Op.__call__(self)\n return new_node", "def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]", "def _create_hardsigmoid(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('beta', op.gamma),\n ])\n return node", "def convert_broadcast_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)", "def node_mp(tup):\n return node_sp(*tup)", "def test_multinomial(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n m = Module()\r\n m.random = RandomStreams(utt.fetch_seed())\r\n m.fn = Method([], m.random.multinomial((20,20), 1, [0.1]*10))\r\n\r\n made = m.make()\r\n made.random.initialize()\r\n fn_val0 = made.fn()\r\n fn_val1 = made.fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.multinomial(1, [0.1]*10, size=(20,20))\r\n numpy_val1 = rng.multinomial(1, [0.1]*10, size=(20,20))\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)", "def multinomial(random_state, size=None, n=1, pvals=[0.5, 0.5],\r\n ndim=None, dtype='int64'):\r\n n = tensor.as_tensor_variable(n)\r\n pvals = tensor.as_tensor_variable(pvals)\r\n # until ellipsis is implemented (argh)\r\n tmp = pvals.T[0].T\r\n ndim, size, bcast = _infer_ndim_bcast(ndim, size, n, tmp)\r\n bcast = bcast + (pvals.type.broadcastable[-1],)\r\n op = RandomFunction(multinomial_helper,\r\n tensor.TensorType(dtype=dtype, broadcastable=bcast),\r\n ndim_added=1)\r\n return op(random_state, size, n, pvals)", "def parseNodeUsingClass(cls, multElement, xPath, linkData, **kwargs):\n\n xPath.append( multElement.tag )\n\n multiplicityComponent = cls()\n\n formClasses = {}\n for formClass in [ Unspecified, Constant1d, XYs1d, Regions1d, Reference, Polynomial1d, PartialProduction, Gridded1d, Branching1d ] :\n formClasses[formClass.moniker] = formClasses\n for form in multElement :\n formClass = formClasses.get( form.tag )\n if( formClass is None ) : raise Exception( \"encountered unknown multiplicity form: %s\" % form.tag )\n newForm = formClass.parseNodeUsingClass(form, xPath, linkData, **kwargs)\n multiplicityComponent.add( newForm )\n\n xPath.pop( )\n\n return( multiplicityComponent )", "def test_set_node_name_for_matmul_add_linear(self, export_args):\n class Linear(torch.nn.Module):\n def __init__(self):\n super(Linear, self).__init__()\n self.linear = torch.nn.Linear(3, 2)\n\n def forward(self, inp):\n x = self.linear(inp)\n return x\n\n model = Linear()\n # Using an input to linear op with dimension != 2 causes torch to use matmul->add instead of gemm op\n onnx_path = './data/MyModel.onnx'\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input=torch.randn(1, 1, 3), onnx_export_args=copy.deepcopy(export_args))\n onnx_model = onnx.load(onnx_path)\n expected_node_names = ['linear', 'linear#1.end']\n\n actual_node_names = [node.name for node in onnx_model.graph.node]\n for name in expected_node_names:\n assert name in actual_node_names\n\n expected_param_names = ['linear.weight', 'linear.bias']\n _, valid_param_set = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n for name in expected_param_names:\n assert name in valid_param_set\n\n # Check that gemm still works as expected\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input=torch.randn(1, 3), onnx_export_args=copy.deepcopy(export_args))\n onnx_model = onnx.load(onnx_path)\n\n actual_node_names = [node.name for node in onnx_model.graph.node]\n assert 'linear' in actual_node_names\n assert 'linear#1' not in actual_node_names\n\n expected_param_names = ['linear.weight', 'linear.bias']\n _, valid_param_set = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n for name in expected_param_names:\n assert name in valid_param_set\n\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def _create_tile(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.input.append(op.name + \":repeats\")\n return node", "def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node", "def multinomial(validate = True):\n return _softmax_class.multinomial(\n PATH_SENTIMENTS,\n S3_PATH_SENTIMENTS,\n 'sentiment',\n ['negative', 'positive'],\n validate = validate,\n )", "def test_multinomial(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n rng_R = random_state_type()\r\n post_r, out = multinomial(rng_R, (7, 3), 6, [0.2] * 5)\r\n\r\n f = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r, mutable=True)],\r\n [out], accept_inplace=True)\r\n\r\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\r\n val0, = f()\r\n val1, = f()\r\n numpy_val0 = numpy_rng.multinomial(6, [0.2] * 5, (7, 3))\r\n numpy_val1 = numpy_rng.multinomial(6, [0.2] * 5, (7, 3))\r\n print val0\r\n print numpy_val0\r\n print val1\r\n print numpy_val1\r\n self.assertTrue(numpy.all(val0 == numpy_val0))\r\n self.assertTrue(numpy.all(val1 == numpy_val1))\r\n\r\n self.assertTrue(val0.shape == (7, 3, 5))\r\n self.assertTrue(val1.shape == (7, 3, 5))", "def convert_linalg_gemm2(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n # Getting the attributes and assigning default values.\n alpha = float(attrs.get(\"alpha\", 1.0))\n trans_a = get_boolean_attribute_value(attrs, \"transpose_a\")\n trans_b = get_boolean_attribute_value(attrs, \"transpose_b\")\n\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n\n if alpha == 1.0 and trans_a == 0 and trans_b == 0:\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=input_nodes,\n outputs=[name],\n name=name\n )\n return [matmul_node]\n elif trans_a == 1 and trans_b == 0:\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n node_name = op_name+\"_a\"\n trans_a_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[0]],\n outputs=[op_name+\"_a\"],\n name=node_name\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[node_name, input_nodes[1]],\n outputs=[name],\n name=name\n )\n return [trans_a_node, matmul_node]\n\n elif trans_a == 0 and trans_b == 1:\n node_name = op_name + \"_b\"\n trans_b_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[1]],\n outputs=[op_name+\"_b\"],\n name=node_name\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[input_nodes[0], node_name],\n outputs=[name],\n name=name\n )\n\n return [trans_b_node, matmul_node]\n else:\n node_name_a = op_name+\"_a\"\n trans_a_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[0]],\n outputs=[op_name+\"_a\"],\n name=node_name_a\n )\n\n node_name_b = op_name + \"_b\"\n trans_b_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[1]],\n outputs=[op_name+\"_b\"],\n name=node_name_b\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=input_nodes,\n outputs=[name],\n name=name\n )\n\n return [trans_a_node, trans_b_node, matmul_node]", "def convert_broadcast_greater(node, **kwargs):\n return create_basic_op_node('Greater', node, kwargs)" ]
[ "0.58884233", "0.5768025", "0.54848015", "0.5398994", "0.5315125", "0.52907187", "0.5282063", "0.5165217", "0.51454043", "0.51417667", "0.5104819", "0.50990736", "0.50983584", "0.5056548", "0.5052555", "0.5041364", "0.50398886", "0.5031336", "0.5016045", "0.5006198", "0.5003284", "0.500088", "0.49928898", "0.49640277", "0.49332666", "0.49211028", "0.48989147", "0.48986727", "0.48939607", "0.48883414" ]
0.71358234
0
Map MXNet's random_uniform operator attributes to onnx's RandomUniform operator and return the created node.
def convert_random_uniform(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) # Converting to float32 low = float(attrs.get("low", 0)) high = float(attrs.get("high", 1.0)) shape = convert_string_to_list(attrs.get('shape', '[]')) dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))] node = onnx.helper.make_node( 'RandomUniform', input_nodes, [name], low=low, high=high, dtype=dtype, shape=shape, name=name ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen(self, op, *args, **kwargs):\r\n random_state_variable = raw_random.random_state_type()\r\n new_r, out = op(random_state_variable, *args, **kwargs)\r\n out.rng = random_state_variable\r\n self.random_state_variables.append((random_state_variable, new_r))\r\n return out", "def uniform(random_state, size=None, low=0.0, high=1.0, ndim=None, dtype=None):\r\n low = tensor.as_tensor_variable(low)\r\n high = tensor.as_tensor_variable(high)\r\n if dtype is None:\r\n dtype = tensor.scal.upcast(theano.config.floatX, low.dtype, high.dtype)\r\n ndim, size, bcast = _infer_ndim_bcast(ndim, size, low, high)\r\n op = RandomFunction('uniform',\r\n tensor.TensorType(dtype=dtype, broadcastable=bcast))\r\n return op(random_state, size, low, high)", "def glorot_uniform(seed=None):\n return lambda shape, dtype, batch_ndims=0: _initialize( # pylint: disable=g-long-lambda\n shape, dtype, batch_ndims,\n scale=1., mode='fan_avg', distribution='uniform', seed=seed)", "def convert_random_normal(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n # Converting to float32\n mean = float(attrs.get(\"loc\", 0))\n scale = float(attrs.get(\"scale\", 1.0))\n shape = convert_string_to_list(attrs.get('shape', '[]'))\n dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))]\n\n node = onnx.helper.make_node(\n 'RandomNormal',\n input_nodes,\n [name],\n mean=mean,\n scale=scale,\n dtype=dtype,\n shape=shape,\n name=name\n )\n return [node]", "def variable(self, Dist, *args, **kwargs):\n name = kwargs.pop('name', None)\n value = kwargs.pop('value', None)\n dist = Dist(*args, **kwargs)\n if value is None:\n value = dist.sample()\n observed = False\n else:\n observed = True\n if isinstance(value, RandomVariable):\n value = value.value\n node = RandomVariable(dist, value, observed, mask=self._mask)\n if name is None:\n self.append(node)\n else:\n self[name] = node\n return value", "def generateUnaryRel(graph, dist=None):\n if dist is None:\n dist = lambda: random.randint(1, len(graph.nodes()))\n\n count = dist()\n return random.sample(graph.nodes(), count)", "def WeightInitializer():\n return np.random.uniform(-1, 1)", "def test_uniform(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n m = Module()\r\n m.random = RandomStreams(utt.fetch_seed())\r\n m.fn = Method([], m.random.uniform((2,2), -1, 1))\r\n\r\n made = m.make()\r\n made.random.initialize()\r\n fn_val0 = made.fn()\r\n fn_val1 = made.fn()\r\n print fn_val0\r\n print fn_val1\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n\r\n numpy_val0 = rng.uniform(-1, 1, size=(2,2))\r\n numpy_val1 = rng.uniform(-1, 1, size=(2,2))\r\n print numpy_val0\r\n print numpy_val1\r\n\r\n assert numpy.allclose(fn_val0, numpy_val0)\r\n assert numpy.allclose(fn_val1, numpy_val1)", "def gen(self, op, *args, **kwargs):\r\n seed = int(self.gen_seedgen.randint(2 ** 30))\r\n random_state_variable = shared(numpy.random.RandomState(seed))\r\n new_r, out = op(random_state_variable, *args, **kwargs)\r\n out.rng = random_state_variable\r\n out.update = (random_state_variable, new_r)\r\n self.state_updates.append(out.update)\r\n random_state_variable.default_update = new_r\r\n return out", "def get_random_uniform(m,n):\n\n return 2*np.random.random(size=(m,n)) - 1", "def random(cls):\n return cls(np.random.randn(3)).normalized()", "def get_random_node(self):\n if random.randint(0, 100) > self.goal_sample_rate:\n random_node = self.Node(\n random.uniform(self.min_rand, self.max_rand),\n random.uniform(self.min_rand, self.max_rand),\n )\n else: # goal point sampling\n random_node = self.Node(self.end.x, self.end.y)\n return random_node", "def _create_selu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('gamma', op.gamma),\n ])\n return node", "def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node", "def RandomVar(shape, name=None):\n return variable_v1.VariableV1(\n random_ops.random_uniform(shape), dtype=dtypes.float32, name=name)", "def random(self, lower, upper, shape):\n return np.random.uniform(lower, upper, shape)", "def random(self, lower, upper, shape):\n return np.random.uniform(lower, upper, shape)", "def glorot(self, shape, name=None):\n init_range = np.sqrt(6.0 / (shape[0] + shape[1]))\n initial = tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32)\n return tf.Variable(initial, name=name)", "def _hardware_uniform(\n rng_key: Array,\n shape: Shape,\n dtype: jnp.dtype = np.float32,\n minval: Array = np.float32(0),\n maxval: Array = np.float32(1)\n) -> Array:\n del rng_key # non-deterministic prng.\n minval = jax.lax.convert_element_type(minval, dtype)\n maxval = jax.lax.convert_element_type(maxval, dtype)\n return jax.lax.rng_uniform(minval, maxval, shape)", "def _random_replace_nodes_attribute(graph, residues, weights, attribute, seed=None):\n random.seed(seed)\n for node in graph.nodes:\n resname = random.choices(residues, weights=weights)\n graph.nodes[node][attribute] = resname[0]\n\n return graph", "def uniform(lower, upper):\n\n return lower + random.random() * (upper - lower)", "def mutate_nonstructural(self):\n # TODO consider clamping weights and biases?\n for link in self.gene_links:\n # Disable/Enable links\n if event(link_toggle_prob): # Chance of toggling link\n link.enabled = True if link.enabled is False else False\n if link.enabled is False and event(link_enable_prob): # Chance of enabling a disabled link\n link.enabled = True\n # Mutate weights\n if event(weight_mutate_rate):\n if event(weight_replace_rate): # replace with random weight\n link.weight = random.uniform(weight_init_min, weight_init_max)\n else: # adjust weight\n link.weight += random.uniform(-uniform_weight_scale, uniform_weight_scale)\n for node in self.gene_nodes:\n # Mutate bias\n if event(bias_mutate_rate):\n if event(bias_replace_rate): # replace with random bias\n node.bias = random.uniform(bias_init_min, bias_init_max)\n else: # adjust bias\n node.bias += random.uniform(-uniform_weight_scale, uniform_weight_scale)\n # Mutate activation func\n if node.can_modify:\n if event(change_act_prob):\n node.act_func = self.act_set.get_random_activation_func()\n # reinit freq amp and vshift when act func changes\n if node.act_func.__name__[0] == \"g\":\n node.freq = random.uniform(-gauss_freq_range, gauss_freq_range)\n node.amp = random.uniform(-func_amp_range, func_amp_range)\n node.vshift = random.uniform(-gauss_vshift_range, gauss_vshift_range)\n elif node.act_func.__name__[0] == \"s\":\n node.freq = random.uniform(-sin_freq_range, sin_freq_range)\n node.amp = random.uniform(-func_amp_range, func_amp_range)\n node.vshift = random.uniform(-sin_vshift_range, sin_vshift_range)\n # Adjust freq amp and vshift of activation function\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\":\n node.freq += random.uniform(-guass_freq_adjust, guass_freq_adjust)\n elif node.act_func.__name__[0] == \"s\":\n node.freq += random.uniform(-sin_freq_adjust, sin_freq_adjust)\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\" or node.act_func.__name__[0] == \"s\":\n node.amp += random.uniform(-func_amp_adjust, func_amp_adjust)\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\" or node.act_func.__name__[0] == \"s\":\n node.vshift += random.uniform(-func_vshift_adjust, func_vshift_adjust)\n # Mutate substrate width/height rectangles\n if event(width_mutate_prob):\n if event(0.5):\n self.substrate_width += 1\n elif self.substrate_width > 1:\n self.substrate_width -= 1\n if event(height_mutate_prob):\n if event(0.5):\n self.substrate_height += 1\n elif self.substrate_height > 1:\n self.substrate_height -= 1\n \"\"\" ES-HyperNeat - no longer used\n # Mutate QuadTree variance\n if event(var_mutate_prob):\n self.var_thresh += np.random.normal(scale=gauss_var_scale)\n self.var_thresh = self.var_thresh if self.var_thresh > 0 else 0\n # Mutate QuadTree band thresh\n if event(band_mutate_prob):\n self.band_thresh += np.random.normal(scale=gauss_band_scale)\n self.band_thresh = self.band_thresh if self.band_thresh > 0 else 0\n \"\"\"", "def sample_bernoulli(self, probabilities):\n return tf.nn.relu(tf.sign(probabilities - tf.random.uniform(probabilities.shape)))", "def test_uniform(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n rng_R = random_state_type()\r\n # Use non-default parameters\r\n post_r, out = uniform(rng_R, (4,), -2.0, 2.0)\r\n\r\n f = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r, mutable=True)],\r\n [out], accept_inplace=True)\r\n\r\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\r\n val0 = f()\r\n val1 = f()\r\n numpy_val0 = numpy_rng.uniform(-2.0, 2.0, size=(4,))\r\n numpy_val1 = numpy_rng.uniform(-2.0, 2.0, size=(4,))\r\n print val0\r\n print numpy_val0\r\n print val1\r\n print numpy_val1\r\n self.assertTrue(numpy.allclose(val0, numpy_val0))\r\n self.assertTrue(numpy.allclose(val1, numpy_val1))", "def add_uniform_random_negatives(\n ds,\n num_nodes,\n num_negs_per_pos,\n):\n negative_sampler = RandomUniformNegativeSampler(num_nodes, num_negs_per_pos)\n return ds.map(\n negative_sampler, deterministic=False, num_parallel_calls=tf.data.AUTOTUNE\n )", "def uniform(m):\n if type(m) is nn.Linear or type(m) is nn.Conv2d:\n nn.init.uniform_(m.weight)", "def sample_uniform(instance, params):\n subpop = np.random.randint(params['N'])\n return sample_from_subpop(instance, params, subpop)", "def glorot(shape, name=None, scale=1.):\n init_range = np.sqrt(6.0/(shape[-1]+shape[-2])) * scale\n initial = tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32)\n return tf.Variable(initial, name=name)", "def rand(self):\n return np.random.rand(self.nx)", "def create_random(self):\n for key in self.nn_param_choices:\n self.network[key] = random.choice(self.nn_param_choices[key])" ]
[ "0.60839343", "0.5966107", "0.5965341", "0.59418684", "0.5821455", "0.55140215", "0.5499431", "0.5494168", "0.5481163", "0.5478966", "0.5458399", "0.5442405", "0.54335564", "0.5417929", "0.53986716", "0.539796", "0.539796", "0.5386957", "0.53401625", "0.5309333", "0.5306019", "0.53059185", "0.5293725", "0.52917767", "0.5259761", "0.5248503", "0.5241024", "0.5237806", "0.5230762", "0.523015" ]
0.7275685
0
Map MXNet's random_normal operator attributes to onnx's RandomNormal operator and return the created node.
def convert_random_normal(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) # Converting to float32 mean = float(attrs.get("loc", 0)) scale = float(attrs.get("scale", 1.0)) shape = convert_string_to_list(attrs.get('shape', '[]')) dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))] node = onnx.helper.make_node( 'RandomNormal', input_nodes, [name], mean=mean, scale=scale, dtype=dtype, shape=shape, name=name ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_random_uniform(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n # Converting to float32\n low = float(attrs.get(\"low\", 0))\n high = float(attrs.get(\"high\", 1.0))\n shape = convert_string_to_list(attrs.get('shape', '[]'))\n dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))]\n\n node = onnx.helper.make_node(\n 'RandomUniform',\n input_nodes,\n [name],\n low=low,\n high=high,\n dtype=dtype,\n shape=shape,\n name=name\n )\n return [node]", "def normal(random_state, size=None, avg=0.0, std=1.0, ndim=None, dtype=None):\r\n avg = tensor.as_tensor_variable(avg)\r\n std = tensor.as_tensor_variable(std)\r\n if dtype is None:\r\n dtype = tensor.scal.upcast(theano.config.floatX, avg.dtype, std.dtype)\r\n ndim, size, bcast = _infer_ndim_bcast(ndim, size, avg, std)\r\n op = RandomFunction('normal',\r\n tensor.TensorType(dtype=dtype, broadcastable=bcast))\r\n return op(random_state, size, avg, std)", "def _from_distribution(cls, new_normal):\n new = cls(torch.zeros(1), torch.zeros(1))\n new._normal = new_normal\n return new", "def glorot_normal(seed=None):\n return lambda shape, dtype, batch_ndims=0: _initialize( # pylint: disable=g-long-lambda\n shape, dtype, batch_ndims,\n scale=1., mode='fan_avg', distribution='truncated_normal', seed=seed)", "def random_normal():\r\n return inverse_normal_cdf(random.random())", "def random_normal():\n return inverse_normal_cdf(random.random())", "def convert_gaussian_random(g, op, block):\n\n mean = op.attr(\"mean\")\n std = op.attr(\"std\")\n shape = op.attr(\"shape\")\n seed = op.attr(\"seed\")\n dtype = op.attr(\"dtype\")\n dtype = _convert_dtype_value(dtype)\n out = _op.random.normal(key=seed, shape=shape, dtype=dtype, mean=mean, scale=std)\n g.add_node(op.output(\"Out\")[0], out)", "def random(cls):\n return cls(np.random.randn(3)).normalized()", "def standard_normal(weight_shape):\n return np.random.normal(size=weight_shape)", "def draw_normal(self):\n means, scale = self.get_means_and_scales()\n return np.random.normal(means,scale,size=[self.sims,means.shape[0]]).T", "def standard_normal(random_state, size=None, chunk_size=None, gpu=None, dtype=None):\n if dtype is None:\n dtype = np.random.RandomState().standard_normal(size=(0,)).dtype\n size = random_state._handle_size(size)\n seed = gen_random_seeds(1, random_state.to_numpy())[0]\n op = TensorStandardNormal(size=size, seed=seed, gpu=gpu, dtype=dtype)\n return op(chunk_size=chunk_size)", "def test_normal(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n m = Module()\r\n m.random = RandomStreams(utt.fetch_seed())\r\n m.fn = Method([], m.random.normal((2,2), -1, 2))\r\n\r\n made = m.make()\r\n made.random.initialize()\r\n fn_val0 = made.fn()\r\n fn_val1 = made.fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.normal(-1, 2, size=(2,2))\r\n numpy_val1 = rng.normal(-1, 2, size=(2,2))\r\n\r\n assert numpy.allclose(fn_val0, numpy_val0)\r\n assert numpy.allclose(fn_val1, numpy_val1)", "def normal(self, position):\n return self._normal", "def random_normal2(shape,\n mean=0.0,\n stddev=1.0,\n dtype=dtypes.float32,\n seed=None,\n name=None):\n with ops.name_scope(name, \"random_normal2\", [shape, mean, stddev]) as name:\n shape_tensor = _ShapeTensor(shape)\n mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name=\"mean\")\n stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name=\"stddev\")\n seed1, seed2 = seed[0],seed[1] #random_seed.get_seed(seed)\n rnd = gen_random_ops._random_standard_normal(\n shape_tensor, dtype, seed=seed1, seed2=seed2)\n mul = rnd * stddev_tensor\n value = math_ops.add(mul, mean_tensor, name=name)\n return value", "def test_normal(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n rng_R = random_state_type()\r\n # Use non-default parameters\r\n post_r, out = normal(rng_R, (2, 3), 4.0, 2.0)\r\n\r\n f = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r, mutable=True)],\r\n [out], accept_inplace=True)\r\n\r\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\r\n val0 = f()\r\n val1 = f()\r\n numpy_val0 = numpy_rng.normal(4.0, 2.0, size=(2, 3))\r\n numpy_val1 = numpy_rng.normal(4.0, 2.0, size=(2, 3))\r\n print val0\r\n print numpy_val0\r\n print val1\r\n print numpy_val1\r\n self.assertTrue(numpy.allclose(val0, numpy_val0))\r\n self.assertTrue(numpy.allclose(val1, numpy_val1))", "def GetNormal(self):\n ...", "def test_random_normal():\n tf.reset_default_graph()\n tf.random.set_random_seed(0)\n rnormal_class = INITIALIZER_REGISTRY['normal']\n rnormal_obj = rnormal_class({\"mean\":0.5, \"std\":0.1})\n tf_init = rnormal_obj.get_entity_initializer(init_type='tf')\n var1 = tf.get_variable(shape=(1000, 100), initializer=tf_init, name=\"var1\")\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n tf_var = sess.run(var1)\n np_var = rnormal_obj.get_entity_initializer(1000, 100, init_type='np')\n # print(np.mean(np_var), np.std(np_var))\n # print(np.mean(tf_var), np.std(tf_var))\n assert(np.round(np.mean(np_var),1)==np.round(np.mean(tf_var),1))\n assert(np.round(np.std(np_var),1)==np.round(np.std(tf_var),1))", "def normal_sample(mu, sigma):\n return mu + sigma * torch.randn_like(sigma)", "def get_random_node(self):\n if random.randint(0, 100) > self.goal_sample_rate:\n random_node = self.Node(\n random.uniform(self.min_rand, self.max_rand),\n random.uniform(self.min_rand, self.max_rand),\n )\n else: # goal point sampling\n random_node = self.Node(self.end.x, self.end.y)\n return random_node", "def normal_init(module, mean=0, std=1, bias=0):\n nn.init.normal_(module.weight, mean, std)\n if hasattr(module, 'bias') and module.bias is not None:\n nn.init.constant_(module.bias, bias)", "def normal_init(m, std=0.02):\n nn.init.normal_(m.weight, 0, std)\n if hasattr(m, 'biais') and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n return m", "def normal_init(self, shape):\n return np.random.normal(size=(shape[0],shape[1]))*0.01", "def create_normal_distribution_connection(name, from_group_amount, to_group_amount, stddev=0.02, scale = 1.0):\n return conn_utils.weight_variable([to_group_amount, from_group_amount],\n stddev=stddev, scale=scale, name=name)", "def init_method_normal(sigma):\n\n def init_(tensor):\n return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)\n\n return init_", "def init_method_normal(sigma):\n\n def init_(tensor):\n return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)\n\n return init_", "def _random_replace_nodes_attribute(graph, residues, weights, attribute, seed=None):\n random.seed(seed)\n for node in graph.nodes:\n resname = random.choices(residues, weights=weights)\n graph.nodes[node][attribute] = resname[0]\n\n return graph", "def draw_normal_initial(self):\n means, scale = self.get_means_and_scales_from_q()\n return np.random.normal(means,scale,size=[self.sims,means.shape[0]]).T", "def normal(mean, std):\n\n return random.gauss(mean, std)", "def normal(self, point):\n return self._normal.dup()", "def randn(self, *args, **kwargs):\n # TODO: Put this in torch.cuda.randn\n return torch.empty(*args, **kwargs).normal_()" ]
[ "0.62901884", "0.60207254", "0.5817178", "0.57946956", "0.5718311", "0.5678145", "0.56305474", "0.5630399", "0.5541356", "0.55390555", "0.5524819", "0.55016184", "0.5497462", "0.54788977", "0.54603964", "0.54529095", "0.5433405", "0.5368286", "0.5364437", "0.534533", "0.53327334", "0.53167576", "0.53137606", "0.52807957", "0.52807957", "0.5271491", "0.5255847", "0.523525", "0.52178526", "0.5208219" ]
0.7576377
0
Map MXNet's ROIPooling operator attributes to onnx's MaxRoiPool operator and return the created node.
def convert_roipooling(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) pooled_shape = convert_string_to_list(attrs.get('pooled_size')) scale = float(attrs.get("spatial_scale")) node = onnx.helper.make_node( 'MaxRoiPool', input_nodes, [name], pooled_shape=pooled_shape, spatial_scale=scale, name=name ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_pooling(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n kernel = eval(attrs[\"kernel\"])\n pool_type = attrs[\"pool_type\"] if attrs.get(\"pool_type\") else \"max\"\n stride = eval(attrs[\"stride\"]) if attrs.get(\"stride\") else (1, 1)\n global_pool = get_boolean_attribute_value(attrs, \"global_pool\")\n p_value = attrs.get('p_value', 'None')\n\n pooling_convention = attrs.get('pooling_convention', 'valid')\n ceil_mode = False\n if pooling_convention == 'full':\n if onnx.__version__ < \"1.5.0\":\n pooling_warning = \"Pooling: ONNX lower than 1.5.0 doesn't support pooling_convention. \" \\\n \"This might lead to shape or accuracy issues. \" \\\n \"https://github.com/onnx/onnx/issues/549\"\n ceil_mode = True\n logging.warning(pooling_warning)\n\n pad_dims = list(parse_helper(attrs, \"pad\", [0, 0]))\n pad_dims = pad_dims + pad_dims\n pool_types = {\"max\": \"MaxPool\", \"avg\": \"AveragePool\", \"lp\": \"LpPool\"}\n global_pool_types = {\"max\": \"GlobalMaxPool\", \"avg\": \"GlobalAveragePool\",\n \"lp\": \"GlobalLpPool\"}\n\n if pool_type == 'lp' and p_value == 'None':\n raise AttributeError('ONNX requires a p value for LpPool and GlobalLpPool')\n\n if global_pool:\n if pool_type == 'lp':\n node = onnx.helper.make_node(\n global_pool_types[pool_type],\n input_nodes, # input\n [name],\n p=int(p_value),\n name=name\n )\n else:\n node = onnx.helper.make_node(\n global_pool_types[pool_type],\n input_nodes, # input\n [name],\n name=name\n )\n else:\n if pool_type == 'lp':\n node = onnx.helper.make_node(\n pool_types[pool_type],\n input_nodes, # input\n [name],\n p=int(p_value),\n kernel_shape=kernel,\n pads=pad_dims,\n strides=stride,\n name=name\n )\n else:\n if onnx.__version__ >= \"1.5.0\":\n node = onnx.helper.make_node(\n pool_types[pool_type],\n input_nodes, # input\n [name],\n kernel_shape=kernel,\n pads=pad_dims,\n strides=stride,\n name=name,\n ceil_mode=ceil_mode\n )\n else:\n node = onnx.helper.make_node(\n pool_types[pool_type],\n input_nodes, # input\n [name],\n kernel_shape=kernel,\n pads=pad_dims,\n strides=stride,\n name=name\n )\n\n return [node]", "def max_pool(self, x, name=\"\"):\n return tf.nn.max_pool(x, ksize=self.mp_size, strides=self.mp_stride,\n padding=self.mp_padding, name=name)", "def max_pooling(self, filter_):\n return self.add_layer(max_pooling, filter_)", "def test_pool_consistency(self) -> None:\n x = Constant(\n 'const1',\n Float32(),\n np.zeros([1, 3, 3, 3])\n )\n input_ops = {'X': cast(Operator, x)}\n\n MaxPool(\n 'max_pool1',\n [1, 2, 2, 3],\n Float32(),\n input_ops,\n kernel_shape=[3, 3],\n pads=[1, 1, 1, 1],\n strides=[2, 2]\n )\n\n print(\"Consistency test for pooling operator passed!\")", "def max_pool_2x2(self, x,name=\"\"):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], \n padding=\"VALID\",\n name=name\n )", "def _max_pool(x):\n return tf.nn.max_pool(value=x,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')", "def pool(self) -> NodePool:\n\n return self._pool", "def _create_max_avg_pool(cls, onnx_node, inputs, opset_version):\n kernel = tuple(onnx_node.attrs[\"kernel_shape\"])\n padding = tuple(\n onnx_node.attrs[\"pads\"]) if \"pads\" in onnx_node.attrs else (0, 0)\n stride = tuple(onnx_node.getattr('strides', (1, 1)))\n # default the odd_padding is 0, once there are same pad mode, we modify it\n # for odd_padding, please refer the autegrade.py\n odd_padding = (0, 0, 0, 0)\n if \"auto_pad\" in onnx_node.attrs:\n auto_pad = utils.force_unicode(onnx_node.attrs['auto_pad'])\n if auto_pad in ('SAME_UPPER', 'SAME_LOWER'):\n padding, odd_padding = utils.get_padding_shape(\n auto_pad, inputs[0].shape[2:], kernel, stride)\n\n # not support count_include_pad and auto_pad\n if \"count_include_pad\" in onnx_node.attrs or \"ceil_mode\" in onnx_node.attrs:\n raise ValueError(\n \"Not implemented yet for count_include_pad or ceil_mode\")\n\n # only support 2d\n if len(kernel) != 2:\n raise ValueError(\"Not implemented yet\")\n\n is_max = onnx_node.op_type == 'MaxPool'\n x = inputs[0]\n if x.device.id() == -1:\n handle = singa.PoolingHandle(x.data, kernel, stride, padding,\n is_max)\n else:\n handle = singa.CudnnPoolingHandle(x.data, kernel, stride, padding,\n is_max)\n\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(handle, odd_padding)", "def create_max_pool(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')", "def pool_layer( x, wpool, padding, name ):\n top = tf.layers.max_pooling2d( x, \n 2, \n [2, wpool], \n padding=padding, \n name=name )\n return top", "def _max_pool(self, bottom, name='max_pool'):\n return tf.nn.max_pool(\n bottom,\n ksize=[1, 3, 1, 1],\n strides=[1, 3, 1, 1],\n padding='SAME', name=name)", "def pooler_layer(self):\n return self._pooler_layer", "def createRotoPaintNodeMI():\n return gr()", "def createRotoNodeMI():\n return gs()", "def PSROIPooling(data=None, rois=None, spatial_scale=_Null, output_dim=_Null, pooled_size=_Null, group_size=_Null, out=None, name=None, **kwargs):\n return (0,)", "def _fix_pooling(self, op_name, inputs, new_attr):\n pool_type = 'avg' if op_name == 'AveragePool' else 'max'\n stride = new_attr.get('strides')\n kernel = new_attr.get('kernel_shape')\n padding = new_attr.get('pads')\n pad_width = (0, 0, 0, 0) + _pad_sequence_fix(padding)\n new_pad_op = mx.sym.pad(inputs[0], mode='constant', pad_width=pad_width)\n new_pooling_op = mx.sym.Pooling(new_pad_op, pool_type=pool_type,\n stride=stride, kernel=kernel)\n return new_pooling_op", "def max_pooling_layer(X, kernel_size=2, stride=2, padding='SAME', name=None):\n with tf.variable_scope(name) as scope:\n pool = tf.nn.max_pool(X, ksize=[1, kernel_size, kernel_size, 1],\n strides=[1, stride, stride, 1],\n padding=padding, name=name)\n return pool", "def max_pool_2x2(self, x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')", "def pool(self):\n return self._properties.get('pool')", "def createGridWarpNodeMI():\n return gy()", "def get_device_pool(arn=None):\n pass", "def node_pools(self) -> Sequence['outputs.NodePoolResponse']:\n return pulumi.get(self, \"node_pools\")", "def _create_conv_pool(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n k = [op.handle.kernel_h, op.handle.kernel_w]\n s = [op.handle.stride_h, op.handle.stride_w]\n oddp = op.odd_padding\n p = [\n op.handle.pad_h + oddp[0],\n op.handle.pad_w + oddp[1],\n op.handle.pad_w + oddp[2],\n op.handle.pad_h + oddp[3],\n ]\n\n node.attribute.extend([\n helper.make_attribute('kernel_shape', k),\n helper.make_attribute('pads', p),\n helper.make_attribute('strides', s),\n ])\n if cls._get_singa_op_type(op) == '_Conv2d':\n node.op_type = cls._rename_operators.get('_Conv2d')\n node.attribute.extend([\n helper.make_attribute('group', op.handle.group),\n helper.make_attribute('auto_pad', 'NOTSET'),\n ])\n\n elif op.handle.is_max_pooling:\n node.op_type = cls._rename_operators.get('MaxPool2d')\n else:\n node.op_type = cls._rename_operators.get('AvgPool2d')\n return node", "def _pool_op(self, in_obj, pool_axes):\n manual_pad = collections.OrderedDict([(ax.name, (0, 0)) for ax in in_obj.axes])\n pad_int, extra_pad = self._get_pad_int(pool_axes)\n manual_pad.update(extra_pad)\n if any((pad != (0, 0)) for pad in manual_pad.values()):\n in_obj = ng.pad(in_obj, manual_pad.values())\n output_axes = self._output_axes(in_obj,\n pad_int)\n poolparams = make_poolparams(self.pool_type,\n self.pool_shape,\n self.strides,\n pad_int)\n return ng.pooling(poolparams,\n in_obj,\n axes=output_axes)", "def maxpool(input, filter_h, filter_w, stride_h, stride_w, padding, name):\n with tf.name_scope(name):\n mp = tf.nn.max_pool(input, ksize=[1, filter_h, filter_w, 1], strides=[1, stride_h, stride_w, 1],\n padding=padding)\n # print(name + \" : \", str(mp.shape))\n return mp", "def get_max_independent_set_operator(num_nodes):\n pauli_list = []\n for i in range(num_nodes):\n x_p = np.zeros(num_nodes, dtype=np.bool)\n z_p = np.zeros(num_nodes, dtype=np.bool)\n z_p[i] = True\n pauli_list.append([0.5, Pauli(z_p, x_p)])\n shift = -num_nodes/2\n return WeightedPauliOperator(paulis=pauli_list), shift", "def max_pool(self, inputs, kernel_size, strides, padding='VALID', name='maxpool_layer'):\n pool = tf.nn.max_pool2d(inputs, ksize=[1, ksize, ksize, 1],\n strides=[1, strides, strides, 1], \n padding=padding, name=name)\n return pool", "def prop_max_pool(self, activation, relevance, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1]):\n act = tf.expand_dims(activation, 3) # N x M x F x 1\n z = tf.nn.max_pool(act, ksize, strides, padding='SAME') + self.epsilon\n with self.model.graph.as_default():\n rel = tf.expand_dims(relevance, 3)\n s = rel / z\n c = gen_nn_ops.max_pool_grad_v2(act, z, s, ksize, strides, padding='SAME')\n tmp = c * act\n return tf.squeeze(tmp, [3])", "def collect_physpool_element(cfg):\n physpool_el = cfg.find_children(\"physical_pool\")\n cfg['IMAGEPOOLINSERT'] = '{}'\n\n if not physpool_el:\n cfg['DOPOOLRELOC'] = '0'\n return \n \n policydict = dict([(el.name, el.value) for el in cfg.find_child('machine').find_children('cache_policy')])\n policydict[None] = '-1'\n\n # Make a new list with DEFAULT_PHYSPOOL at the front\n # and with everything else behind\n physpool_el = [el for el in physpool_el if el.name == 'DEFAULT_PHYSPOOL'] + \\\n [el for el in physpool_el if el.name != 'DEFAULT_PHYSPOOL']\n\n handle_image_pools(cfg, physpool_el)\n handle_island_pools(cfg, physpool_el)\n\n #pool_configs\n doreloc = '0'\n for el in physpool_el:\n verify_name(cfg, el.name)\n #\n # for each region:\n # [0] = base address\n # [1] = size in bytes\n # [2] = number of bytes to be added to the pool descriptor\n # in the memory image and then subtracted during bootup;\n # this can either be from the 'padding' attribute or it\n # can represent memory \"stolen\" from the end of the\n # pool for other purposes, such as the image build utility.\n # [3] = name of the region, or None if no name exists\n # [4] = cache policy for the region (-1 if none specified)\n #\n r = [[x.base, x.size, getattr(x,'padding',0), getattr(x,'name',None),\n int(policydict[getattr(x,'cache_policy',None)],0)] for x in el.find_children('region')]\n add_physpool(cfg, el.name, r)\n\n add_physpool(cfg, '')\n cfg['DOPOOLRELOC'] = doreloc\n cfg['PHYSPOOLS'] = ' \\\\\\n'.join(cfg.physpools)", "def _pool_layer(tensor):\n return tf.nn.max_pool(tensor, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1),\n padding='SAME')" ]
[ "0.641093", "0.58088446", "0.56600386", "0.5579279", "0.55671954", "0.55426383", "0.5540293", "0.54378605", "0.5431979", "0.54058385", "0.5381407", "0.5371967", "0.5365166", "0.53410465", "0.5257729", "0.52336967", "0.5228986", "0.5204838", "0.51902246", "0.5179581", "0.516139", "0.5146488", "0.5134461", "0.5126844", "0.51161414", "0.5101897", "0.5077927", "0.5062055", "0.50509435", "0.50369006" ]
0.7807513
0
Map MXNet's Tile operator attributes to onnx's Tile operator and return the created node.
def convert_tile(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) reps_list = convert_string_to_list(attrs["reps"]) initializer = kwargs["initializer"] reps_shape_np = np.array(reps_list, dtype='int64') data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[reps_shape_np.dtype] dims = np.shape(reps_shape_np) output_shape_name = "reps_attr_tensor" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims) initializer.append( onnx.helper.make_tensor( name=output_shape_name, data_type=data_type, dims=dims, vals=reps_list, raw=False, ) ) input_nodes.append(output_shape_name) tile_node = onnx.helper.make_node( "Tile", input_nodes, [name], name=name ) return [tensor_node, tile_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_tile(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.input.append(op.name + \":repeats\")\n return node", "def _create_tile(cls, onnx_node, inputs, opset_version):\n # we move several inputs to singa's attribuates\n # and mark them so we don't use them when we run this operator\n repeats = tensor.to_numpy(inputs.pop(1)).astype(np.int32).tolist()\n onnx_node.consumed_inputs.append(onnx_node.inputs[1])\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(repeats)", "def createGridWarpNodeMI():\n return gy()", "def convert_tile(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n if op.input(\"RepeatTimes\"):\n reps = g.get_node(op.input(\"RepeatTimes\")[0])\n reps, infered = try_infer_value(reps, g.get_params())\n if infered:\n reps = reps.tolist()\n elif op.input(\"repeat_times_tensor\"):\n reps = []\n for rep_value in op.input(\"repeat_times_tensor\"):\n rep_value = g.get_node(rep_value).astype(\"int32\")\n reps.append(rep_value)\n reps = _op.concatenate(reps, axis=0)\n reps, infered = try_infer_value(reps, g.get_params())\n if infered:\n reps = reps.tolist()\n else:\n reps = op.attr(\"repeat_times\")\n infered = True\n\n if not infered:\n msg = f'Value {reps} in attribute \"repeat_times\" of operator Tile is not \"valid.\"'\n raise tvm.error.OpAttributeInvalid(msg)\n\n op_func = get_relay_op(op.type)\n out = op_func(x, reps=reps)\n g.add_node(op.output(\"Out\")[0], out)", "def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node", "def _create_onehot(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n # axis, indices, depth, values\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n for attr in ['depth', 'values']:\n node.input.append(op.name + \":\" + attr)\n return node", "def createSplineWarpNodeMI():\n return gt()", "def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node", "def getTile(self, x, y):#TODO Make this a dictionary, and make a pprint function\n o = {}\n for layer in self.layers.keys():\n o[layer] = str(self.layers[layer][x, y])\n return o", "def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node", "def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def", "def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node", "def get_tile(self):\n return Tile.get_tile(self.get_number())", "def action(self):\n operator = self.create_operator()\n operator.set_name(self.node_name)\n if not isinstance(operator, zautograd.Variable):\n z_tensor = operator(self.inputs)\n operator.set_weights(self.format_params(self.params))\n else:\n z_tensor = operator\n operator.node.element().set_weights(self.format_params(self.params))\n\n self.all_tensors[self.output] = z_tensor # update the all_tensors\n return z_tensor", "def createRotoPaintNodeMI():\n return gr()", "def _add_node(self, node_entry, idx):\n if node_entry[\"op\"].name not in relay_to_onnx_op_mapping:\n raise NotImplementedError(\n f\"Currently the operator '{node_entry['op'].name}' is \" \"not supported.\"\n )\n converter = relay_to_onnx_op_mapping[node_entry[\"op\"].name]()\n\n return converter.convert(node_entry, self._mc, self._node_dict)", "def getTile(self):\n return self.tile", "def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node", "def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node", "def tile(self, x: int, y: int):\n return self.awmap.tile(x, y)", "def __call__(self):\n new_node = Op.__call__(self)\n return new_node", "def _to_node(self, data):\n return Node(\n id = data['ps'],\n name = data['ps'],\n state = NodeState.UNKNOWN,\n public_ip = [data['ip']],\n private_ip = [],\n driver = self.connection.driver,\n extra = {\n 'current_size' : data['memory_mb'],\n 'account_id' : data['account_id'],\n 'type' : data['type']\n }\n )", "def create_tile(self, name):\n return self.subgrids[name[0:2]].tilesys.create_tile(name)", "def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )", "def createRotoNodeMI():\n return gs()", "def icon_maker(self, n, icon_x, icon_y):\n sprite = displayio.TileGrid(self.sprite_sheet, pixel_shader=self.palette, width=1,\n height=1, tile_width=40, tile_height=40, default_tile=n,\n x=icon_x, y=icon_y)\n return sprite", "def to_operator(self) -> Operator:\n return Operator(self.to_instruction())", "def _create_transpose(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('perm', op.perm),\n ])\n return node", "def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node", "def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]" ]
[ "0.6590867", "0.5910745", "0.58112407", "0.5749959", "0.5646954", "0.5629699", "0.5605283", "0.5601611", "0.55009604", "0.54751515", "0.5461425", "0.5446524", "0.5431833", "0.542485", "0.5410572", "0.53914165", "0.53725374", "0.5355028", "0.53471506", "0.5346676", "0.53449744", "0.5342683", "0.5320038", "0.5310877", "0.52969396", "0.52706575", "0.5256223", "0.5243276", "0.52368927", "0.5184577" ]
0.67022306
0
Map MXNet's broadcast_to operator attributes to onnx's Expand operator and return the created node.
def convert_broadcast_to(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) shape_list = convert_string_to_list(attrs["shape"]) initializer = kwargs["initializer"] output_shape_np = np.array(shape_list, dtype='int64') data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype] dims = np.shape(output_shape_np) output_shape_name = "expand_attr_tensor" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims) initializer.append( onnx.helper.make_tensor( name=output_shape_name, data_type=data_type, dims=dims, vals=shape_list, raw=False, ) ) input_nodes.append(output_shape_name) expand_node = onnx.helper.make_node( "Expand", input_nodes, [name], name=name ) return [tensor_node, expand_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_expand_as(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n target_shape = op.attr(\"target_shape\")\n out = _op.broadcast_to(x, target_shape)\n g.add_node(op.output(\"Out\")[0], out)", "def convert_broadcast_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)", "def convert_broadcast_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)", "def convert_expand(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n if op.input(\"Shape\"):\n sizes = g.get_node(op.input(\"Shape\")[0])\n else:\n sizes = op.attr(\"shape\")\n\n if isinstance(sizes, _expr.Expr):\n sizes = try_infer_value(sizes, parameters=g.get_params())[0]\n\n if isinstance(sizes, np.ndarray):\n sizes = sizes.tolist()\n\n out = _op.broadcast_to(x, sizes)\n g.add_node(op.output(\"Out\")[0], out)", "def create_helper_expand_node(input_name, output_name, expand_shape):\n expand_node = onnx.helper.make_node(\n \"Expand\",\n inputs=[input_name, expand_shape],\n outputs=[output_name],\n name=output_name,\n )\n return [expand_node]", "def covert_broadcast_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)", "def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node", "def _create_flatten(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node", "def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node", "def convert_broadcast_equal(node, **kwargs):\n return create_basic_op_node('Equal', node, kwargs)", "def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node", "def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def", "def convert_expand_dims(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n axis = int(attrs.get(\"axis\"))\n\n node = onnx.helper.make_node(\n \"Unsqueeze\",\n input_nodes,\n [name],\n axes=[axis],\n name=name,\n )\n return [node]", "def run_node(self, node, device='CPU'): # pylint: disable=unused-argument\n op_name = node.op_type\n attr = self._parse_attr(node.attribute)\n new_op, new_attr = _convert_operator(op_name, attr)\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # some workarounds for onnx problem\n new_attr = self._fix_bias(new_op, new_attr, len(sym_list))\n new_attr = self._fix_channels(new_op, new_attr, list(node.input))\n\n # calling again to get new symbols after some workarounds\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # onnx slice works on multiple axes whereas mxnet's slice_axis is for single axis\n if op_name == 'Slice':\n op = self._fix_slice(sym_list, new_attr)\n elif op_name == 'Squeeze':\n op = self._fix_squeeze(sym_list, new_attr)\n else:\n op = new_op(*sym_list, **new_attr)\n\n node_output = self._fix_outputs(op_name, node.output)\n for k, i in zip(list(node_output), range(len(node_output))):\n self._nodes[k] = op[i]\n\n # now return the outputs\n return op", "def convert_broadcast_lesser(node, **kwargs):\n return create_basic_op_node('Less', node, kwargs)", "def __call__(self):\n new_node = Op.__call__(self)\n return new_node", "def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym", "def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node", "def covert_broadcast_sub(node, **kwargs):\n return create_basic_op_node('Sub', node, kwargs)", "def convert_broadcast_logical_and(node, **kwargs):\n return create_basic_op_node('And', node, kwargs)", "def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node", "def _create_squeeze(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axis)),\n ])\n return node", "def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node", "def to_operator(self) -> Operator:\n return Operator(self.to_instruction())", "def broadcast() -> BroadcastDistribute:\n return _broadcast", "def convert_broadcast_greater(node, **kwargs):\n return create_basic_op_node('Greater', node, kwargs)", "def conv2d_broadcastto_op(node_A, node_B):\r\n return Conv2d_BroadcastToOp()(node_A, node_B)", "def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors", "def to_op(self):\n raise NotImplementedError", "def tohost(x):\n\n def single_tohost(x):\n n_device, n_batch, *remaining_dims = x.shape\n return np.array(x).reshape((n_device * n_batch,) + tuple(remaining_dims))\n\n return jax.tree_map(single_tohost, x)" ]
[ "0.6505846", "0.604694", "0.60059506", "0.59943974", "0.5957492", "0.5825268", "0.5748968", "0.57150435", "0.5620855", "0.5597498", "0.55732626", "0.5556347", "0.5554119", "0.5502286", "0.54921335", "0.5468958", "0.5464709", "0.5461847", "0.5448856", "0.54340446", "0.53191674", "0.53179365", "0.52966875", "0.5289854", "0.5278222", "0.52600425", "0.51931936", "0.51930314", "0.5188909", "0.51772946" ]
0.74339557
0
Map MXNet's topk operator attributes to onnx's TopK operator and return the created node.
def convert_topk(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get('axis', '-1')) k = int(attrs.get('k', '1')) ret_type = attrs.get('ret_typ') dtype = attrs.get('dtype') outputs = [name + '_output0'] if ret_type and ret_type == 'both': if dtype and dtype == 'int64': outputs.append(name + '_output1') else: raise NotImplementedError("ONNX expects indices to be of type int64") else: raise NotImplementedError("ONNX expects both value and indices as output") export_nodes = [] k = np.asarray([k], dtype=np.int) k_node = create_helper_tensor_node(k, name + '__k', kwargs) export_nodes.extend(k_node) k_node = k_node[-1].name input_node = input_nodes[0] topk_node = onnx.helper.make_node( "TopK", [input_node, k_node], outputs, axis=axis, name=name ) export_nodes.extend([topk_node]) return [topk_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_topk(g, op, block):\n\n data = g.get_node(op.input(\"X\")[0])\n if op.input(\"K\"):\n k = g.get_node(op.input(\"K\")[0])\n else:\n k = op.attr(\"k\")\n\n largest = True\n axis = -1\n if op.has_attr(\"axis\"):\n axis = op.attr(\"axis\")\n if op.has_attr(\"largest\"):\n largest = op.attr(\"largest\")\n is_ascend = not largest\n\n value_names = op.output(\"Out\")\n indice_names = op.output(\"Indices\")\n\n out = None\n indice = None\n if value_names and indice_names:\n out, indice = _op.topk(data=data, k=k, axis=axis, ret_type=\"both\", is_ascend=is_ascend)\n elif value_names:\n out = _op.topk(data=data, k=k, axis=axis, ret_type=\"values\", is_ascend=is_ascend)\n elif indice_names:\n indice = _op.topk(data=data, k=k, axis=axis, ret_type=\"indices\", is_ascend=is_ascend)\n\n if out is not None:\n g.add_node(value_names[0], out)\n if indice is not None:\n g.add_node(indice_names[0], indice)", "def add_topk(self, input_name, k, name=None):\n return self._build_op('TopKV2', [input_name, k], name=name)", "def in_top_k(predictions, targets, k, name=None):\n with ops.name_scope(name, \"in_top_k\"):\n return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name)", "def _create_onehot(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n # axis, indices, depth, values\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n for attr in ['depth', 'values']:\n node.input.append(op.name + \":\" + attr)\n return node", "def __create_internal_node_by_operator(operator: PatternStructure, sliding_window: timedelta, parent: Node = None):\n operator_type = operator.get_top_operator()\n if operator_type == SeqOperator:\n return SeqNode(sliding_window, parent)\n if operator_type == AndOperator:\n return AndNode(sliding_window, parent)\n if operator_type == KleeneClosureOperator:\n return KleeneClosureNode(sliding_window, operator.min_size, operator.max_size, parent)\n raise Exception(\"Unknown or unsupported operator %s\" % (operator_type,))", "def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def", "def top_k(input, k=1, sorted=True, index_type=dtypes.int32, name=None): # pylint: disable=redefined-builtin\n return gen_nn_ops.top_kv2(\n input, k=k, sorted=sorted, index_type=index_type, name=name\n )", "def build_topk_prediction(self, n_output, t_output, define_k=3):\n n_topk_possibility, n_topk_prediction = tf.nn.top_k(n_output, k=define_k)\n t_topk_possibility, t_topk_prediction = tf.nn.top_k(t_output, k=define_k)\n return n_topk_prediction, n_topk_possibility, t_topk_prediction, t_topk_possibility", "def setTopK(self, value):\n return self._set(topK=value)", "def setTopK(self, value):\n return self._set(topK=value)", "def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node", "def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors", "def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node", "def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node", "def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node", "def node_topology(self) -> \"LabelSelector\":\n return typing.cast(\n \"LabelSelector\",\n self._properties.get(\"nodeTopology\"),\n )", "def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node", "def __call__(self):\n new_node = Op.__call__(self)\n return new_node", "def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node", "def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )", "def _parse_op_node(self, topological_index, node_proto):\n name = node_proto.name.split('/')[-1]\n node_id = name.split('op')[-1]\n name = f'{node_proto.op_type}-op{node_id}'\n node_name = Node.create_node_name(node_proto.scope, name)\n\n if node_proto.full_name and node_proto.op_type != NodeTypeEnum.LOAD.value:\n node_name = node_proto.full_name\n\n if node_proto.full_name and any(\n node_proto.full_name.lower().endswith(f'[:{plugin.value.lower()}]') for plugin in PluginNameEnum):\n node_name = Node.create_node_name(scope=node_proto.scope,\n base_name=f'{node_proto.op_type}-op{node_proto.name}')\n\n # The Graphviz plug-in that the UI USES can't handle these special characters.\n check_invalid_character(node_name)\n\n node = Node(name=node_name, node_id=node_id, topological_index=topological_index)\n node.full_name = node_proto.full_name\n node.type = node_proto.op_type\n if getattr(node_proto, 'source_address', None):\n node.stack = DebuggerSource.build_stack_from_source_address(node_proto.source_address)\n self._parse_attributes(node_proto.attribute, node)\n self._parse_inputs(node_proto.input, node)\n\n node.output_i = node_proto.output_i\n node.scope = node_proto.scope\n node.output_shape = self._get_shape_by_parse_type_proto(node_proto.output_type)\n node.output_nums = len(node.output_shape)\n node.output_data_type = self._get_data_type_by_parse_type_proto(node_proto.output_type, node)\n\n self._cache_node(node)", "def to_operator(self) -> Operator:\n return Operator(self.to_instruction())", "def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node", "def action(self):\n operator = self.create_operator()\n operator.set_name(self.node_name)\n if not isinstance(operator, zautograd.Variable):\n z_tensor = operator(self.inputs)\n operator.set_weights(self.format_params(self.params))\n else:\n z_tensor = operator\n operator.node.element().set_weights(self.format_params(self.params))\n\n self.all_tensors[self.output] = z_tensor # update the all_tensors\n return z_tensor", "def approx_min_k(operand,\n k,\n reduction_dimension=-1,\n recall_target=0.95,\n reduction_input_size_override=-1,\n aggregate_to_topk=True,\n name=None):\n return gen_nn_ops.approx_top_k(\n operand,\n k=k,\n reduction_dimension=reduction_dimension,\n recall_target=recall_target,\n is_max_k=False,\n reduction_input_size_override=reduction_input_size_override,\n aggregate_to_topk=aggregate_to_topk,\n name=name)", "def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]", "def in_top_k_v2(targets, predictions, k, name=None):\n return in_top_k(predictions, targets, k, name)", "def __init__(self, top_k):\n super(StreamTopkAccuracy, self).__init__(\n reset_at=\"stream\", emit_at=\"stream\", mode=\"eval\", top_k=top_k\n )\n self.top_k = top_k", "def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node", "def _create_hardsigmoid(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('beta', op.gamma),\n ])\n return node" ]
[ "0.6419405", "0.6140541", "0.5742067", "0.54392713", "0.5406331", "0.5386023", "0.5383082", "0.5365803", "0.5237811", "0.5237811", "0.51466364", "0.51442844", "0.5112016", "0.509088", "0.5069989", "0.50558615", "0.50066954", "0.49857953", "0.49676558", "0.49675217", "0.49654332", "0.49523515", "0.49473682", "0.49461466", "0.49328336", "0.49113595", "0.48592785", "0.48412699", "0.48165798", "0.48107317" ]
0.68704766
0
Map MXNet's Take operator attributes to onnx's Gather operator.
def convert_take(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get('axis', 0)) node = onnx.helper.make_node( "Gather", input_nodes, [name], axis=axis, name=name, ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_gather(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n index = g.get_node(op.input(\"Index\")[0])\n axis = op.attr(\"axis\")\n out = _op.take(x, index, axis)\n g.add_node(op.output(\"Out\")[0], out)", "def _create_gather(cls, onnx_node, inputs, opset_version):\n axis = onnx_node.getattr(\"axis\", 0)\n # we move several inputs to singa's attribuates\n # and mark them so we don't use them when we run this operator\n indices = tensor.to_numpy(inputs.pop(1)).astype(np.int32).tolist()\n onnx_node.consumed_inputs.append(onnx_node.inputs[1])\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axis, indices)", "def gather(input: torch.Tensor) -> Tuple[torch.Tensor]:\n return GatherLayer.apply(input)", "def _special_handle_gather(cls, op, X, W):\n tensor_list = []\n append_inputs = {\n \"indices\": op.indices,\n }\n for tmp_name, append_input in append_inputs.items():\n node_name = op.name + \":\" + tmp_name\n tensor_list.append(\n numpy_helper.from_array(np.array(append_input), node_name))\n return tensor_list", "def convert_take_along_axis(g, op, block):\n\n x = g.get_node(op.input(\"Input\")[0])\n idx = g.get_node(op.input(\"Index\")[0])\n axis = op.attr(\"Axis\")\n out = _op.gather(x, axis, idx)\n g.add_node(op.output(\"Result\")[0], out)", "def _get_mean_and_samples_attribute(self, attr, *args, **kwargs):\n return (self._get_mean_attribute(attr, *args, **kwargs),\n self._get_samples_attribute(attr, *args, **kwargs))", "def gather_res(outputs, target_device, dim=0):\n out = outputs[0]\n args = {field: Gather.apply(target_device, dim, *[getattr(o, field) for o in outputs]) for field, v in out.__dict__.items() if v is not None}\n return type(out)(**args)", "def _create_gather(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n node.input.append(op.name + \":indices\")\n return node", "def gather(tensor):\n if PartialState().distributed_type == DistributedType.TPU:\n return _tpu_gather(tensor)\n elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:\n return _gpu_gather(tensor)\n else:\n return tensor", "def get_over(self, filter_dict, percentage):\n pass", "def my_featurize(apartment):\n return x, y", "def predict_collect(self, src, collector): # real signature unknown; restored from __doc__\n pass", "def __init__(self, **kwargs):\n super(Gather, self).__init__(**kwargs)", "def _gather_metric(method, attribute=None, transform=None):\n\n doc = \"Extract the {} attribute from the given process object\".format\n if attribute:\n doc = ( # NOQA\n \"Extract the {}().{} attribute from the given process object\".format\n )\n\n def gather_metric(process):\n \"\"\"Dynamically Generated\"\"\"\n errmsg = (\n \"Only the 'psutil.Process' interface is supported currently; not {}\".format\n )\n proc_type = type(process)\n assert proc_type is psutil.Process, errmsg(proc_type)\n metric = methodcaller(method) # pylint: disable=redefined-outer-name\n if attribute is not None:\n value = attrgetter(attribute)(metric(process))\n else:\n value = metric(process)\n\n if transform is not None:\n value = transform(value)\n\n return value\n\n # XXX: For some reason this was causing trouble for the documentation build process\n # gather_metric.__doc__ = doc(method, attribute)\n return gather_metric", "def GetGatheringFunctions(self, G, res):\n for node in G:\n if Strings.attrGather in G.node[node] and G.node[node][Strings.attrGather]:\n gf = BLNlpClipsGatheringFunction()\n gf.DeftemplateName = node\n res.AddGatheringFunction(gf)", "def collect(op_sum, tensor_names, verbose=True):\n if verbose:\n sys.stdout.write(\"Collecting...\")\n sys.stdout.flush()\n op_sum = collect_numbers_and_powers(op_sum)\n collection, rest = collect_by_tensors(op_sum, tensor_names)\n if verbose:\n sys.stdout.write(\"done.\\n\")\n return collection, rest", "def take(self):\n print(f\"You take the {self.item}.\")\n collect(self.item)", "def take(self):\n print(f\"You take the {self.item}.\")\n collect(self.item)", "def take(self):\n print(f\"You take the {self.item}.\")\n collect(self.item)", "def items():\n for point in boolfunc.iter_points(inputs):\n gpnt = {v: val for v, val in point.items()\n if v not in unmapped}\n gval = gfunc.restrict(gpnt)\n # mapped function must be completely specified\n assert isinstance(gval, TTConstant)\n fpnt = {v: val for v, val in point.items()\n if v in unmapped}\n fpnt[gvar] = int(gval)\n yield func.restrict(fpnt).pcdata[0]", "def extract_summary(\n self, mean=True, max=True, min=True, ignore_sessions=False, *args, **kwargs\n ):\n out = self.__class__().__finalize__(self)\n if ignore_sessions == False:\n out.sessions = np.unique(self.sessions)\n if mean:\n new = self.extract_mean(ignore_sessions=ignore_sessions, *args, **kwargs)\n out = out.append(new, axis=1)\n # for attr_name in ['au_columns', 'emotion_columns', 'facebox_columns', 'landmark_columns', 'facepose_columns', 'gaze_columns', 'time_columns']:\n # if new.__getattr__(attr_name):\n # new_attr = new.__getattr__(attr_name)\n # out.__setattr__(attr_name, new_attr)\n if max:\n new = self.extract_max(ignore_sessions=ignore_sessions, *args, **kwargs)\n out = out.append(new, axis=1)\n # for attr_name in ['au_columns', 'emotion_columns', 'facebox_columns', 'landmark_columns', 'facepose_columns', 'gaze_columns', 'time_columns']:\n # if out.__getattr__(attr_name) and new.__getattr__(attr_name):\n # new_attr = out.__getattr__(attr_name) + new.__getattr__(attr_name)\n # out.__setattr__(attr_name, new_attr)\n if min:\n new = self.extract_min(ignore_sessions=ignore_sessions, *args, **kwargs)\n out = out.append(new, axis=1)\n for attr_name in [\n \"au_columns\",\n \"emotion_columns\",\n \"facebox_columns\",\n \"landmark_columns\",\n \"facepose_columns\",\n \"gaze_columns\",\n \"time_columns\",\n ]:\n if self.__getattr__(attr_name):\n new_attr = []\n if mean:\n new_attr.extend(\n [\"mean_\" + attr for attr in self.__getattr__(attr_name)]\n )\n if max:\n new_attr.extend(\n [\"max_\" + attr for attr in self.__getattr__(attr_name)]\n )\n if min:\n new_attr.extend(\n [\"min_\" + attr for attr in self.__getattr__(attr_name)]\n )\n out.__setattr__(attr_name, new_attr)\n return out", "def collect(t, collection='activation'):\n tf.add_to_collection(collection, t)\n return t", "def extract_feat(self, batch_inputs: Tensor) -> dict:\n pass", "def gather(self,\n action=None,\n method=None,\n timeout=None,\n finish_on_key=None,\n num_digits=None,\n **kwargs):\n return self.append(Gather(\n action=action,\n method=method,\n timeout=timeout,\n finish_on_key=finish_on_key,\n num_digits=num_digits,\n ))", "def __reduce__(self):\n return ImageNetDownsample, (self.cutout,)", "def make_returnn_audio_features_func():\n return _extract", "def create_helper_gather_node(\n input_name, output_name,\n indices, kwargs,\n axis=None\n ):\n attrs = {}\n if axis is not None:\n attrs['axis'] = axis\n gather_tensor_node, = create_helper_tensor_node(\n np.asarray(indices, np.int64), output_name + \"__indices\", kwargs\n )\n gather_node = onnx.helper.make_node(\n \"Gather\",\n inputs=[input_name, gather_tensor_node.name],\n outputs=[output_name],\n name=output_name,\n **attrs\n )\n return [gather_tensor_node, gather_node]", "def apply_filter_metrics(self, pack_nr, filter_metrics):\n current_pack_metrics = ast.literal_eval(self.list_pack[pack_nr]['metrics'])\n\n for i in filter_metrics:\n if i in current_pack_metrics:\n filter_metrics[i] = current_pack_metrics[i]\n\n self.list_pack[pack_nr]['metrics'] = filter_metrics", "def get_representation(x):\n global model\n with torch.no_grad():\n x = tuple(i.to(device) for i in x)\n x_output = model(input_ids=x[0], attention_mask=x[1], token_type_ids=x[2])\n averaged_hidden_states = torch.stack(x_output[2][-hidden_layer:]).mean(0)\n pooled = averaged_hidden_states[:, :x[1].sum(), :].mean(1) \n return pooled.clone().detach()", "def all(self, tmin=None, tmax=None):\n stats = pd.DataFrame(columns=['Value'])\n for k in self.ops.keys():\n stats.loc[k] = (getattr(self, k)(tmin=tmin, tmax=tmax))\n\n return stats" ]
[ "0.5603602", "0.5088366", "0.4996854", "0.49716508", "0.49634594", "0.4908585", "0.48814094", "0.4826999", "0.46374422", "0.46165276", "0.46148214", "0.46006873", "0.4598241", "0.45922422", "0.45850176", "0.4572346", "0.45681974", "0.45681974", "0.45681974", "0.45255744", "0.4513917", "0.4512472", "0.45110577", "0.4497771", "0.44845036", "0.4428613", "0.44086584", "0.43996516", "0.4387309", "0.4381393" ]
0.6138117
0
raise from_none(ValueError('a')) == raise ValueError('a') from None
def from_none(exc): exc.__cause__ = None exc.__suppress_context__ = True return exc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fail(raise_):\n if raise_:\n raise _UnexpectedForm()\n return None", "def test_invalid_argument_type(self):\n t = TruthTable('A or B')\n\n with self.assertRaises(InvalidArgumentTypeError):\n t.equivalent_to(float())\n\n with self.assertRaises(InvalidArgumentTypeError):\n t.equivalent_to(None)", "def test_none_type(self):\n\n expected = TypeError\n input_ = None\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_construction_with_negative_a_raises_error(value):\n with pytest.raises(ValueError):\n MyClass(a=value, b={})", "def test_value_error_for_computing_missing_type():\n with pytest.raises(ValueError):\n compute_type(\"missing_type\", {})", "def test_validate_on_invalid_value(self):\n args = (0, enums.OpaqueDataType.NONE)\n self.assertRaises(TypeError, objects.OpaqueObject, *args)", "def test_reraise_single_trigger() -> None:\n with pytest.raises(TypeError):\n with reraise(RuntimeError(\"got value error\"), when=ValueError):\n raise TypeError(\"boom\")", "def test_raise_():\n with pytest.raises(Exception):\n raise_(ValueError)", "def _raise_none_model(self):\n raise ValueError(\"Model is of type None! Was it not initialized?\")", "def test_State_basic_instance_none(self):\n with self.assertRaises(TypeError):\n State(None)", "def __init__(self,value,message):\n ValueError.__init__(self,value,message)", "def test_11_None_input(self):\n with self.assertRaises(TypeError) as x:\n r = Rectangle(None)\n self.assertEqual(\"__init__() missing 1 required positional argument:\\\n 'height'\", str(x.exception))", "def test_multiple_with_none(self):\n with self.assertRaises(Exception) as context:\n Multiple.check_number(None)\n\n self.assertTrue('Invalid parameter value: ' in str(context.exception))", "def test_field_none_nullable(self):\n node_dict = {\n 'host_name': 'abc'\n }\n try:\n Node(**node_dict)\n except Exception as e:\n self.assertEqual(type(e), ValueError)", "def test_creation_notallow_none():\n with pytest.raises(ValueError) as __:\n value = None\n __ = param.Integer(value=value, allow_None=False)", "def check_no_raise(func, msg=None):\n # type: (Callable[[], Any], Optional[Str]) -> Any\n try:\n return func()\n except Exception as exc: # pylint: disable=W0703\n msg = \": {}\".format(msg) if msg else \".\"\n raise AssertionError(\"Exception [{!r}] was raised when none is expected{}\".format(type(exc).__name__, msg))", "def test_value_error(self):\n self._error_test(ValueError)", "def negative_test_value(self):\n self.assertRaises(ValueError, MyClass().my_func, None, [], \"a\")\n self.assertRaises(ValueError, MyClass().my_func, 1, None, \"a\")\n self.assertRaises(ValueError, MyClass().my_func, 1, [], None)\n self.assertRaises(ValueError, MyClass().my_func, a=None, b=[], c=\"a\")\n self.assertRaises(ValueError, MyClass().my_func, a=1, b=None, c=\"a\")\n self.assertRaises(ValueError, MyClass().my_func, a=1, b=[], c=None)", "def test_feed_value_throws_on_invalid_data(self):\n self.assertRaises(\n ValueError, self.factory.make_from_feed_value, \"foo\", 1\n )", "def test_null_required_argument(self):\n @converters.wrap\n def inner_test(param: str):\n \"\"\"This shouldn't be called, converting should fail.\"\"\"\n pass\n self.assert_raises_request_error(lambda: inner_test(param=None), 3101)", "def Invalid(\r\n self, s: str = \"\", e: Type[BaseException] = None, fail: bool = False\r\n) -> None:\r\n ...", "def test_NaNy(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(10, 10, float('nan'), 7)\n self.assertEqual(str(e.exception), 'y must be an integer')", "def _check_none(self) -> PossibleResult[T]:\n if self.constructor == type(None):\n if not self.obj is None:\n raise DeserializeError(\n type(None), self.obj, self.new_depth, self.key\n )\n return self.obj # type: ignore\n return NO_RESULT", "def test_retNone1(self):\r\n class retNone(gof.op.Op):\r\n def make_node(self):\r\n inputs = [theano.tensor.vector()]\r\n outputs = [theano.tensor.vector()]\r\n return gof.Apply(self, inputs, outputs)\r\n\r\n def grad(self, inp, grads):\r\n x, = inp\r\n gz, = grads\r\n pass\r\n a = retNone().make_node()\r\n self.assertRaises(TypeError, grad_sources_inputs, [(a.out, one)], None)", "def test_reraise_no_trigger() -> None:\n with pytest.raises(RuntimeError):\n with reraise(RuntimeError(\"got exception\")):\n raise ValueError(\"boom\")", "def test_empty_source_constructor_exception():\n with pytest.raises(robox.RDJParameterErr):\n test01 = Source()", "def test_value_init12(self):\n with self.assertRaises(ValueError) as err:\n r1 = Rectangle(10, 1, 17, -9)\n msg = \"y must be >= 0\"\n self.assertEqual(str(err.exception), msg)", "def test_badyvaluewithfloats(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, float(1), 3)\n self.assertEqual(str(e.exception), 'y must be an integer')", "def test_NoTransition(self):\n # NoTransition requires two arguments\n with self.assertRaises(TypeError):\n NoTransition()\n\n state = \"current-state\"\n symbol = \"transitionless-symbol\"\n noTransitionException = NoTransition(state=state, symbol=symbol)\n\n self.assertIs(noTransitionException.symbol, symbol)\n\n self.assertIn(state, str(noTransitionException))\n self.assertIn(symbol, str(noTransitionException))", "def test_initalizing_with_non_iterable_or_not_numbers_raises_ValueError():\n from bbst import Bst\n with pytest.raises(TypeError):\n Bst(\"dfsdfadgasdg\")" ]
[ "0.65491164", "0.6507355", "0.63295954", "0.61693627", "0.6143416", "0.60782343", "0.59815896", "0.5925763", "0.5909486", "0.5908904", "0.5904242", "0.58995235", "0.5848055", "0.5834902", "0.5818783", "0.57958984", "0.5760411", "0.5742847", "0.57380986", "0.5723444", "0.5700411", "0.569043", "0.56784964", "0.56672585", "0.5666538", "0.5660108", "0.56356496", "0.5624237", "0.5620601", "0.56190044" ]
0.68841505
0
Calculates the average price we would pay / receive per unit of `symbol` if we wanted to trade `quantity` of that `symbol`, based on its order book
def getOrderBookPrice(exchange, symbol, side, quantity, order_book=None): # TODO test it # print("obap1") order_book_side = order_book['asks'] \ if side == exchange.SIDE_SELL else order_book['bids'] quantity = Decimal(quantity) i, orders, price = 0, [], Decimal(0) accounted_for_quantity = Decimal(0) qtdif = Decimal(1) # print("obap2") while accounted_for_quantity < quantity or qtdif > Decimal(0.0001): try: order = order_book_side[i] except IndexError: raise Exception("There are not enough orders in the Order Book.") # return False qty = min(Decimal(order[1]), quantity - accounted_for_quantity) price += Decimal(order[0]) * qty accounted_for_quantity += qty qtdif = abs(Decimal(1) - accounted_for_quantity / quantity) i += 1 # print("obap3") return price / quantity
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cost(self, symbol) -> float:\n if len(symbol) <= 6:\n search = self.trader.stock_positions + self.trader.crypto_positions\n for p in search:\n if p['symbol'] == symbol:\n return p['avg_price']\n return None\n else:\n for p in self.trader.option_positions:\n if p['occ_symbol'] == symbol:\n return p['avg_price']", "def get_mean_volume(symbol):\n df = pd.read_csv(\"data/{}.csv\".format(symbol)) # read in data\n # Quiz: Compute and return the mean volume for this stock\n return df['Volume'].mean()", "def mid_market_price(orders: pandas.DataFrame):\n return numpy.mean((best_bid_price(orders), best_ask_price(orders)))", "def create_get_average_price_request(self, symbol: str) -> Request:", "def market_avg_price(**params):\n endpoint = 'calc/trade/avg'\n return request(authenticate=False, version=2, endpoint=endpoint, method='POST', query_params=params)", "def calculate_price(self, order: Order, price: float, quantity: float, bar: Bar, volume_used: float):\n raise NotImplementedError()", "def get_avg_price(cls, instrument: Instrument) -> float:\n try:\n avg_price = cls.objects.filter(\n instrument=instrument,\n # status=OrderStatus.COMPLETED.value\n ).annotate(price_t_volume=models.F('price') *\n models.F('total_sum')).aggregate(\n avg_price=models.Sum('price_t_volume') /\n models.Sum('total_sum'))\n except DataError: # handle division by zero\n return 0\n return float(avg_price.get('avg_price', 0) or 0)", "def get_avg(all_stock_data):\n try:\n sum_close_vol = 0.0\n sum_vol = 0.0\n for item in all_stock_data:\n adj_close = item[1]\n volume = item[2]\n sum_close_vol += adj_close * volume\n sum_vol += item[2]\n return sum_close_vol / sum_vol\n\n except Exception as e:\n print(e)\n exit()", "def stock_average(stock):\n closing_price=stock['Close']\n average=stats.mean(closing_price)\n return average", "def _get_piecewise_mean_price_vs_size_from_orderbook_entry(orders):\n cm = [0] + [x['cm'] for x in orders]\n # integral (price times qty) d_qty / qty\n # represent this as integral of piecewise polynomial with coeff [0, price]\n price = np.zeros((2, len(cm)-1))\n price[1,:] = [x['price'] for x in orders]\n f = PPoly(price, cm, extrapolate=False)\n F = f.antiderivative()\n return lambda x: F(x) / x", "def averagePrice(self, onlyUnconsumed):\n\n\t\tif onlyUnconsumed:\n\t\t\treturn self.unconsumedValue / (len(self.bottles) - self.numberConsumed)\n\n\t\treturn self.totalValue / len(self.bottles)", "def get_quantity(self, symbol: str) -> float:\n search = self.trader.stock_positions + self.trader.crypto_positions\n if not any([p['symbol'] == symbol for p in search]):\n return None\n for p in search:\n if p['symbol'] == symbol:\n return p['quantity']", "async def on_symbol_price_updated(self, price: MetatraderSymbolPrice):\n self._pricesBySymbol[price['symbol']] = price\n positions = list(filter(lambda p: p['symbol'] == price['symbol'], self._positions))\n orders = list(filter(lambda o: o['symbol'] == price['symbol'], self._orders))\n specification = self.specification(price['symbol'])\n if specification:\n for position in positions:\n if 'unrealizedProfit' not in position or 'realizedProfit' not in position:\n position['unrealizedProfit'] = (1 if (position['type'] == 'POSITION_TYPE_BUY') else -1) * \\\n (position['currentPrice'] - position['openPrice']) * \\\n position['currentTickValue'] * position['volume'] / specification['tickSize']\n position['realizedProfit'] = position['profit'] - position['unrealizedProfit']\n new_position_price = price['bid'] if (position['type'] == 'POSITION_TYPE_BUY') else price['ask']\n is_profitable = (1 if (position['type'] == 'POSITION_TYPE_BUY') else -1) * (new_position_price -\n position['openPrice'])\n current_tick_value = price['profitTickValue'] if (is_profitable > 0) else price['lossTickValue']\n unrealized_profit = (1 if (position['type'] == 'POSITION_TYPE_BUY') else -1) * \\\n (new_position_price - position['openPrice']) * current_tick_value * position['volume'] / \\\n specification['tickSize']\n position['unrealizedProfit'] = unrealized_profit\n position['profit'] = position['unrealizedProfit'] + position['realizedProfit']\n position['currentPrice'] = new_position_price\n position['currentTickValue'] = current_tick_value\n for order in orders:\n order['currentPrice'] = price['ask'] if (order['type'] == 'ORDER_TYPE_BUY_LIMIT' or\n order['type'] == 'ORDER_TYPE_BUY_STOP' or\n order['type'] == 'ORDER_TYPE_BUY_STOP_LIMIT') else price['bid']\n if self._accountInformation:\n self._accountInformation['equity'] = self._accountInformation['balance'] + \\\n functools.reduce(lambda a, b: a + b['profit'], self._positions, 0)", "def get_returns(self, symbol) -> float:\n cost = self.get_cost(symbol)\n price = self.get_price(symbol)\n ret = (price - cost) / cost\n return ret", "def price_average(lst):\n\n return sum(lst) / len(lst)", "def calculate_buy_price(price: float):\n return round(price / (1 + CONF.trade_advantage_in_percent / 100), 1)", "def calc_market_order_buy(self, time, order_volume):\n rec = self.select_order_book_price_with_retry(time)\n if rec is None:\n return None\n\n sell_min, sell_volume, buy_max, buy_volume = rec\n\n if order_volume * 1.5 < sell_volume: # 1.5 means enough margin\n return sell_min\n else:\n return sell_min + PRICE_UNIT", "def get_position_avg_price(self):\n self.__init_client()\n return float(self.get_position()['entryPrice'])", "def get_stock_price(stock):\n pass", "def aveVolumeCalc(ins, date):\n cal = ins.Currency().Calendar()\n enddate = cal.AdjustBankingDays(date, 0)\n startdate = cal.AdjustBankingDays(date, AVERAGING_PERIOD)\n\n prices=[]\n histprices = acm.FPrice.Select(\"instrument = %s and market = '%s' \\\n and day > '%s' and day <='%s'\" % \n (ins.Oid(), DAILY_MARKET, startdate, enddate))\n \n for price in histprices:\n settle = price.Settle()\n if settle >= 0:\n prices.append(settle)\n \n #upgrade 2013 fix for failure during run - acm.Math().AverageOf seems buggy\n try:\n avgprice = (sum(prices)/len(prices))\n except ZeroDivisionError:\n avgprice = 0\n \n #avgprice = acm.Math().AverageOf(prices, None)\n \n #Overwrite today's price if you find it \n newPrice = acm.FPrice.Select01(\"instrument = %s and market = '%s' and day = %s\" % \n (ins.Oid(), THREE_MONTH_MARKET, enddate),\n 'NaN')\n if not newPrice:\n newPrice = acm.FPrice()\n newPrice.Instrument(ins)\n newPrice.Day(enddate)\n newPrice.Market(THREE_MONTH_MARKET)\n newPrice.Currency(ins.Currency())\n\n newPrice.Settle(avgprice)\n try:\n newPrice.Commit()\n print 'INFO: %s price for %s was created on %s' %(THREE_MONTH_MARKET, ins.Name(), date)\n except Exception, err:\n print 'ERROR: %s price for %s did not commit: %s' %(THREE_MONTH_MARKET, ins.Name(), str(err))\n \n return newPrice", "def calculate_total_price(total_prices):\n total = 0.0\n for symbol,individual_cost in total_prices.items():\n total += individual_cost\n return total", "def net_position(self):\n average_price = 0\n sum = 0\n for transaction in self.transactions:\n average_price += abs(transaction[0]/transaction[1])\n sum += transaction[1]\n\n average_price /= len(self.transactions) \n average_price *= sum\n \n return average_price", "def compute_total(price):\n\n quantity = 20\n return price * quantity", "def avg_pay(input: list) -> float:\n pay = 0\n for emp in input:\n pay += emp.get_salary()\n return pay / len(input)", "def field_buy(self, symbol):\r\n\r\n end_percent = 150\r\n current_price = 15#self.get_price()\r\n self.log(current_price)\r\n buys = {}\r\n new_price = current_price * 1.05\r\n while (new_price / current_price) > 150:\r\n self.log(\"New sell at: {}\".format(new_price))\r\n new_price *= 1.05\r\n\r\n self.log(buys)\r\n\r\n return buys", "def getAvgMarketCosts(self):\n try:\n avgAL = self.frame.mode.game.marketStats[str(self.frame.mode.game.currentRound-1)]['avgSoldAL']\n avgEC = self.frame.mode.game.marketStats[str(self.frame.mode.game.currentRound-1)]['avgSoldEC']\n avgIA = self.frame.mode.game.marketStats[str(self.frame.mode.game.currentRound-1)]['avgSoldIA']\n except:\n avgAL = 0.0\n avgEC = 0.0\n avgIA = 0.0\n \n return (avgAL, avgEC, avgIA)", "def avg_tx_value_USD(df):\n\n tx_vol_USD = df['Tx Volume (USD)']\n daily_txs = df['Txs']\n result = tx_vol_USD.div(daily_txs)\n result.name = 'Avg Tx Value (USD)'\n return out(SETTINGS, df, result)", "def get_average_sales(data):\n print(\"Calculating stock data...\\n\")\n avg_sales = []\n for list in data:\n int_list_avg = sum(int(item) for item in list) / len(list)\n avg_plus_extra = round(int_list_avg * 1.1)\n avg_sales.append(avg_plus_extra)\n\n return avg_sales", "def avg_tx_fees_USD(df):\n result = df['Tx fees (USD)'].div(df['Txs'])\n result.name = 'Avg Tx Fees (USD)'\n return out(SETTINGS, df, result)", "def aggregate_bid_qty(self, trade_price):\n qty = 0\n for i in range(len(self.bid)):\n if self.bid[-i].price >= trade_price:\n qty += self.bid[-i].qty\n return qty" ]
[ "0.64651424", "0.6410142", "0.63740027", "0.63389856", "0.6299657", "0.6286352", "0.6263074", "0.6189815", "0.6114266", "0.6032858", "0.60058963", "0.5941486", "0.5917772", "0.5853512", "0.5828471", "0.5826779", "0.580022", "0.57563347", "0.57001877", "0.5657362", "0.5588201", "0.55870765", "0.55741495", "0.5560555", "0.55431503", "0.55321586", "0.5510306", "0.54881364", "0.54695636", "0.54348165" ]
0.7126667
0
Inserts multiple new asks in the order book (assumes that the order book AND the new_asks list are sorted)
def insertAsks(previous_asks, received_asks): new_asks = [] if len(received_asks) < 1: return previous_asks if len(previous_asks) < 1: return received_asks # print("Prev") # pprint(previous_asks) # print("Recv") # pprint(received_asks) # Uses the merge-sort idea of popping the first element in the lists # (which should also be the lowest) while len(previous_asks) > 0 and len(received_asks) > 0: ask = None if Decimal(previous_asks[0][0]) < Decimal(received_asks[0][0]): ask = previous_asks.pop(0) # print('popped from prev') elif Decimal(previous_asks[0][0]) > Decimal(received_asks[0][0]): # print('popped from recv') ask = received_asks.pop(0) else: # print('equal, popped from both') previous_asks.pop(0) ask = received_asks.pop(0) # print(ask) if Decimal(ask[1]) > Decimal(0): # print("appended") new_asks.append(ask) # print("After Merge") # pprint(new_asks) if len(previous_asks) > 0: new_asks.extend(previous_asks) elif len(received_asks) > 0: new_asks.extend(received_asks) # print("Complete") # pprint(new_asks) return new_asks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_answers(conn, cur, answers):\n \n print 'Adding answers...',\n \n for i, answer in enumerate(answers):\n cur.execute('INSERT INTO answers VALUES (\"{_id}\", \"{task_id}\", \"{text}\")'.format(\n _id = i+1,\n task_id = answer['task_id'],\n text = answer['text']\n )\n )\n \n conn.commit()\n \n print 'done.'", "def _append_pairs(new_pairs):\n desired_pairs = restore_pairs() or []\n desired_pairs += new_pairs\n print(\"Adding {} new pairs, queue has {} pairs\".format(len(new_pairs), len(desired_pairs)))\n save_pairs(desired_pairs)", "def create_entries(order, user, comp, tickets, new_order):\n for item in order.items.all():\n tickets_per_order = item.quantity\n while tickets_per_order > 0:\n create = True\n while create:\n ticket_number = randint(1, comp.tickets)\n entry, created = Entries.objects.get_or_create(\n defaults={\n 'user': user,\n 'orderItem': item\n },\n order=new_order,\n competition_entry=comp,\n ticket_number=ticket_number\n )\n if created:\n tickets_per_order -= 1\n create = False\n\n tickets_left = comp.tickets_left\n comp.tickets_left = tickets_left - tickets\n comp.save()", "def insertBids(previous_bids, received_bids):\n\n new_bids = []\n\n while len(previous_bids) > 0 and len(received_bids) > 0:\n bid = None\n if Decimal(previous_bids[0][0]) > Decimal(received_bids[0][0]):\n bid = previous_bids.pop(0)\n elif Decimal(previous_bids[0][0]) < Decimal(received_bids[0][0]):\n bid = received_bids.pop(0)\n else:\n previous_bids.pop(0)\n bid = received_bids.pop(0)\n \n if Decimal(bid[1]) > Decimal(0):\n new_bids.append(bid)\n\n if len(previous_bids) > 0:\n new_bids.extend(previous_bids)\n elif len(received_bids) > 0:\n new_bids.extend(received_bids)\n\n return new_bids", "def updateOrderbookFull(self, asks, bids):\n self.asks = asks\n self.bids = bids", "def add_car_to_order(car_to_add, cars_on_order):\r\n add_cars = int(input(\"How many of these cars would you like to add? \"))\r\n\r\n for i in range(1, add_cars + 1):\r\n cars_on_order.append(car_to_add)\r\n\r\n return cars_on_order", "def update_item_orders(begin_order, t_task, projects, api, cmd_count):\n for task in t_tasks.values():\n if is_in_the_same_proj(task, projects) and task['item_order'] >= begin_order:\n api.items.get_by_id(task['id']).update(item_order=task['item_order']+1)\n update_cmd_count(api)", "def update_order():", "def update_order():", "def _repair_crossed_asks(self, ask):\r\n while len(self.asks) and self.asks[0].price < ask:\r\n volume = self.asks[0].volume\r\n self._update_total_ask(-volume)\r\n self.asks.pop(0)\r\n self._valid_ask_cache = -1\r\n #self.debug(\"### repaired ask\")\r", "def add_questions(self, questions):\n for question in questions:\n self.questions.append(question)", "def put(self,items,priority=0):\n with self.pdq:\n self.pdq.cursor().executemany('insert into pdq values (?,?)',[(item,priority) for item in self._toiter(items)])", "def add_orders_after(user, after_date, items):\n order_dates = get_order_dates(after_date)\n for order_date in order_dates:\n if not order_date.is_editable:\n continue\n\n for item in items:\n order = BreadOrder(\n user=user,\n date=order_date,\n type=item\n )\n db.session.add(order)\n db.session.commit()", "def do_insert(self,args):\n if len(args) != 0:\n for w in args.split():\n sl.insertList(int(w.rstrip()))", "def add_addressbook_entries(\n self,\n write_cursor: 'DBCursor',\n entries: list[AddressbookEntry],\n ) -> None:\n # We iterate here with for loop instead of executemany in order to catch\n # which identifier is duplicated\n for entry in entries:\n try:\n # in the case of given blockchain being None delete any other entry for that\n # address since they are rendered redundant\n if entry.blockchain is None:\n write_cursor.execute(\n 'DELETE FROM address_book where address=? AND blockchain IS NOT NULL',\n (entry.address,),\n )\n\n write_cursor.execute(\n 'INSERT INTO address_book (address, name, blockchain) VALUES (?, ?, ?)',\n entry.serialize_for_db(),\n )\n # Handling both private db (pysqlcipher) and global db (raw sqlite3)\n except (dbapi2.IntegrityError, sqlite3.IntegrityError) as e: # pylint: disable=no-member # noqa: E501\n raise InputError(\n f'{entry} already exists in the address book. Identifier must be unique.',\n ) from e", "def _place_orders_onto_queue(self, order_list: List[OrderEvent]):\n for order_event in order_list:\n self._events.add_event(order_event)", "def put_new_questions_to_answers_table(questionList, user_id):\n\n # Put questions to answers_table\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n answer_table = dynamodb.Table(\"Answers\")\n\n now = datetime.datetime.utcnow().isoformat()\n with answer_table.batch_writer() as batch:\n for question in questionList:\n answer_to_add = {\n \"UserId\": user_id,\n \"AnswerId\": \"{}_{}\".format(user_id, question[\"QuestionId\"]),\n \"QuestionId\": question[\"QuestionId\"],\n \"Time\": now,\n \"do_again\": datetime.datetime.utcnow().isoformat(),\n \"KnowledgeList\": [],\n }\n batch.put_item(Item=answer_to_add)", "def add(self, obs, action, reward, new_obs, done):\n experience = (obs, action, reward, new_obs, done)\n insert_index = self.fix_index()\n if insert_index > 0:\n if insert_index in self._storage:\n del self._storage[insert_index]\n self._storage[insert_index] = experience\n # add to priority queue\n priority = self.priority_queue.get_max_priority()\n self.priority_queue.update(priority, insert_index)\n return True\n else:\n sys.stderr.write('Insert failed\\n')\n return False", "def create_order():", "def order_book(self, order_details):\n order_date = datetime.date.today()\n self.cursor.execute(\"INSERT INTO orderlog (loginID, orderDate) VALUES (%s, %s)\",\n (order_details['loginID'], order_date))\n order_id = self.cursor.lastrowid\n for i in range(len(order_details['ISBN'])):\n self.cursor.execute(\"INSERT INTO productof Values (%s, %s, %s)\",\n (order_details['ISBN'][i], order_id, order_details['quantity'][i]))\n self.cursor.execute(\"UPDATE book SET stock=stock-%s WHERE ISBN=%s\",\n (order_details['quantity'][i], order_details['ISBN'][i]))\n self.db.commit()\n return order_id", "def add_new_book(self, new_book):\n self.books_all.append(new_book)", "def insert_tasks(note, exer_tasks):\n for i in range(len(exer_tasks)):\n if 'graphml' in exer_tasks[i]:\n insert_single_graph_task(note,exer_tasks[i],i)\n else:\n insert_single_task(note,exer_tasks[i],i)\n return", "def new_task(self, appid):\r\n tasks = []\r\n for i in range(0, 10):\r\n tasks.append(Task(app_id=appid, state='0', info={}))\r\n db.session.add_all(tasks)\r\n db.session.commit()", "def fixOrderBeforeInsert(cur,vID,orderNum):\n cur.execute(\"\"\"UPDATE OpenTasks SET orderNum = orderNum + 1 WHERE vID = ? and orderNum >= ?\"\"\",[vID, orderNum])", "def generate_matched_orders(self, new_action, matched_queries):\n if self.sell_list and self.buy_list:\n break_flag = False\n if new_action == \"buy\":\n # for a new buy order, multipleq ueries from sell list are\n # matched as long as formula holds good\n max_buy_order = self.buy_list[-1]\n completed_sell_orders = 0\n for sell_order in self.sell_list:\n buy_qty = max_buy_order.order_qty\n if sell_order.stock_value <= max_buy_order.stock_value:\n sell_qty = sell_order.order_qty\n if buy_qty > sell_qty:\n completed_sell_orders += 1\n max_buy_order.order_qty = buy_qty - sell_qty\n matched_qty = sell_qty\n elif sell_qty == buy_qty:\n self.buy_list.pop()\n self.sell_list = self.sell_list[1:]\n matched_qty = sell_qty\n break_flag = True\n else:\n self.buy_list.pop()\n sell_order.order_qty = sell_qty - buy_qty\n matched_qty = buy_qty\n break_flag = True\n matched_queries.append(\n \"%s %s %s %s\" % (sell_order.order_id,\n matched_qty,\n sell_order.stock_value,\n max_buy_order.order_id))\n else:\n break_flag = True\n if break_flag:\n break\n if completed_sell_orders:\n self.sell_list = self.sell_list[completed_sell_orders:]\n else:\n min_sell_order = self.sell_list[0]\n completed_buy_orders = 0\n # for a new sell order, multiple queries from buy list are\n # matched as long as formula holds good\n for index in range(len(self.buy_list)-1, -1, -1):\n break_flag = False\n buy_order = self.buy_list[index]\n sell_qty = min_sell_order.order_qty\n if min_sell_order.stock_value <= buy_order.stock_value:\n buy_qty = buy_order.order_qty\n if buy_qty > sell_qty:\n buy_order.order_qty = buy_qty - sell_qty\n self.sell_list = self.sell_list[1:]\n matched_qty = sell_qty\n break_flag = True\n elif buy_qty == sell_qty:\n self.buy_list.pop()\n self.sell_list = self.sell_list[1:]\n matched_qty = sell_qty\n break_flag = True\n else:\n completed_buy_orders -= 1\n min_sell_order.order_qty = sell_qty - buy_qty\n matched_qty = buy_qty\n matched_queries.append(\n \"%s %s %s %s\" % (min_sell_order.order_id,\n matched_qty,\n min_sell_order.stock_value,\n buy_order.order_id))\n else:\n break_flag = True\n if break_flag:\n break\n if completed_buy_orders:\n self.buy_list = self.buy_list[:completed_buy_orders]", "def add_order(self, orders):\n if isinstance(orders, list):\n for order in orders:\n self._add_order(order)\n else:\n self._add_order(orders)", "def test_add_many_implicit_commit(self):\n\n # That one fails in r5 (<commit/> must be made on its own)\n\n doc_count = 10\n user_ids = [get_rand_string() for x in range(doc_count)]\n data = [get_rand_string() for x in range(doc_count)]\n ids = [get_rand_string() for x in range(doc_count)]\n documents = [dict(user_id=user_ids[x], data=data[x], id=ids[x])\n for x in range(doc_count)]\n\n # Pass in the commit flag.\n self.conn.add(documents, True)\n\n results = []\n for id in ids:\n res = self.conn.query(\"id:\" + id).results\n if not res:\n self.fail(\"Could not find document (id:%s)\" % id)\n results.append(res[0])", "def test_ordering_bulk_create(self):\n test_questionnaire = Questionnaire.objects.create(title='test_ordering_2')\n Question.objects.bulk_create([\n Question(\n questionnaire=test_questionnaire,\n question_type='boolean',\n question_text='question 1',\n ),\n Question(\n questionnaire=test_questionnaire,\n question_type='boolean',\n question_text='question 2',\n ),\n ])\n\n self.assertEqual(test_questionnaire.questions.get(question_text='question 1').order, 0)\n self.assertEqual(test_questionnaire.questions.get(question_text='question 2').order, 1)", "def add_orders_on(user, order_date, items):\n for item in items:\n order = BreadOrder(\n user=user,\n date=order_date,\n type=item\n )\n db.session.add(order)\n db.session.commit()", "def update_artist_set():\n conn = psycopg2.connect(\"dbname=artistqdb host=localhost user=postgres\")\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n cur.execute(\"\"\"insert into confirmed_artists (artist)\n select artist\n from scrobbles\n group by artist\n having count(distinct song) > 2\"\"\")\n # TODO: Figure out how to not insert duplicates (like, \"where not exists\")\n\n # Remove any duplicates\n cur.execute(\"\"\"delete from confirmed_artists as l\n using confirmed_artists as r\n where l.artist = r.artist\n and l.id > r.id\"\"\")" ]
[ "0.5846932", "0.5842225", "0.5782144", "0.5523276", "0.5449554", "0.53335917", "0.5256855", "0.5231915", "0.5231915", "0.51975715", "0.5142276", "0.51192087", "0.50733495", "0.5046175", "0.5029141", "0.49793863", "0.4966205", "0.49656373", "0.49622992", "0.49538177", "0.4896863", "0.48767522", "0.48631817", "0.48621136", "0.48524198", "0.48482648", "0.4845951", "0.4843412", "0.48421302", "0.48192734" ]
0.7092049
0
Inserts multiple new bids in the order book (assumes that the order book AND the new_bids list are sorted)
def insertBids(previous_bids, received_bids): new_bids = [] while len(previous_bids) > 0 and len(received_bids) > 0: bid = None if Decimal(previous_bids[0][0]) > Decimal(received_bids[0][0]): bid = previous_bids.pop(0) elif Decimal(previous_bids[0][0]) < Decimal(received_bids[0][0]): bid = received_bids.pop(0) else: previous_bids.pop(0) bid = received_bids.pop(0) if Decimal(bid[1]) > Decimal(0): new_bids.append(bid) if len(previous_bids) > 0: new_bids.extend(previous_bids) elif len(received_bids) > 0: new_bids.extend(received_bids) return new_bids
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_new_bids(self):\n for bidder in self._bidders:\n if bidder != self._highest_current_bidder:\n bid_price = bidder(self)\n if bid_price > self.current_bid:\n self.update_bid(bid_price, bidder)", "def add_boid(self, new_boid):\r\n self.collection.append(new_boid)", "def send_bids_to_db(ora_con, tdate):\n bid_init_insert = []\n bid_init_hour_insert = []\n bid_init_pair_insert = []\n dpgs = []\n for bid in Bid:\n if bid.is_new:\n dpgs.append((bid.dpg_id,))\n bid_init_insert.append((\n bid.dpg_code, bid.dpg_id, bid.bid_id, tdate\n ))\n for bih in bid:\n bid_init_hour_insert.append((\n bih.bid_id, bih.bid_hour_id, bih.hour, bid.dpg_id\n ))\n for bip in bih.interval_data:\n bid_init_pair_insert.append((\n bip.bid_hour_id, bip.interval_number, bip.price,\n bip.volume, bid.dpg_id, bip.volume_init\n ))\n\n with ora_con.cursor() as curs:\n curs.executemany('DELETE from bid_init_pair where dpg_id = :1', dpgs)\n curs.executemany('DELETE from bid_init_hour where dpg_id = :1', dpgs)\n curs.executemany('DELETE from bid_init where dpg_id = :1', dpgs)\n curs.executemany('''INSERT into bid_init (dpg_code, dpg_id, bid_id, target_date)\n values (:1, :2, :3, :4)''', bid_init_insert)\n curs.executemany('''INSERT into bid_init_hour (bid_id, bid_hour_id, hour, dpg_id)\n values (:1, :2, :3, :4)''', bid_init_hour_insert)\n curs.executemany('''INSERT into bid_init_pair (bid_hour_id, interval_num,\n price, volume, dpg_id, volume_src0)\n values (:1, :2, :3, :4, :5, :6)''', bid_init_pair_insert)", "def add_bids_vertica(scenario):\n con = DB.VerticaConnection()\n\n for new_row in con.script_cursor(bis_v, scenario=scenario):\n Bid(new_row, is_new=True)\n\n\n for new_row in con.script_cursor(bhs_v, scenario=scenario):\n bid = Bid[new_row.dpg_id]\n if bid:\n bid.add_hour_data(new_row)\n\n\n # h_re = re.compile(r'(?<=_)\\d+')\n for new_row in con.script_cursor(bps_v, scenario=scenario):\n bid = Bid[new_row.dpg_id]\n if bid:\n bid.add_intervals_data(new_row)", "def _repair_crossed_bids(self, bid):\r\n while len(self.bids) and self.bids[0].price > bid:\r\n price = self.bids[0].price\r\n volume = self.bids[0].volume\r\n self._update_total_bid(-volume, price)\r\n self.bids.pop(0)\r\n self._valid_bid_cache = -1\r\n #self.debug(\"### repaired bid\")\r", "def add_new_book(self, new_book):\n self.books_all.append(new_book)", "def insertAsks(previous_asks, received_asks):\n\n new_asks = []\n\n if len(received_asks) < 1:\n return previous_asks\n if len(previous_asks) < 1:\n return received_asks\n \n # print(\"Prev\")\n # pprint(previous_asks)\n # print(\"Recv\")\n # pprint(received_asks)\n\n # Uses the merge-sort idea of popping the first element in the lists\n # (which should also be the lowest)\n while len(previous_asks) > 0 and len(received_asks) > 0:\n ask = None\n if Decimal(previous_asks[0][0]) < Decimal(received_asks[0][0]):\n ask = previous_asks.pop(0)\n # print('popped from prev')\n elif Decimal(previous_asks[0][0]) > Decimal(received_asks[0][0]):\n # print('popped from recv')\n ask = received_asks.pop(0)\n else:\n # print('equal, popped from both')\n previous_asks.pop(0)\n ask = received_asks.pop(0)\n \n # print(ask)\n\n if Decimal(ask[1]) > Decimal(0):\n # print(\"appended\")\n new_asks.append(ask)\n\n # print(\"After Merge\")\n # pprint(new_asks)\n\n if len(previous_asks) > 0:\n new_asks.extend(previous_asks)\n elif len(received_asks) > 0:\n new_asks.extend(received_asks)\n \n # print(\"Complete\")\n # pprint(new_asks)\n\n return new_asks", "def add_book(book):\n\n global book_list\n book.id = generate_id()\n book_list.append(book)", "def add_bid(self, bid, player_id):\n\t\tglobal_id = self.globalize_id(player_id)\n\t\tassert len(self.bids) < self.data_size and global_id not in self.bids\n\t\tif bid == 0:\n\t\t\tbid = \"N\"\n\t\tself.bids[global_id] = bid", "def updateOrderbookFull(self, asks, bids):\n self.asks = asks\n self.bids = bids", "def add_shelf_books(edition_ids, shelf):\n\n for ed_id in edition_ids:\n try:\n shelfbook_match = db.session.query(ShelfBook).filter(ShelfBook.ed_id == ed_id, ShelfBook.shelf_id == shelf.shelf_id).one()\n print \"This shelfbook already exists!\"\n except:\n new_shelfbook = ShelfBook(ed_id=ed_id, shelf_id=shelf.shelf_id)\n db.session.add(new_shelfbook)\n\n db.session.commit()", "def put_fake_data():\r\n\r\n \"\"\"\r\n BOOKINGS:\r\n - 1: FUTURE BOOKING \r\n - USER 3 \r\n - REST 4 \r\n - TABLE 3\r\n - 2: FUTURE BOOKING \r\n - USER 4\r\n - REST 3\r\n - TABLE 4\r\n - 3: OLD BOOKING \r\n - USER 2\r\n - REST 2\r\n - TABLE 2\r\n - 4: OLD BOOKING \r\n - USER 2\r\n - REST 2\r\n - TABLE 2\r\n - 5: FUTURE BOOKING \r\n - USER 4\r\n - REST 3\r\n - TABLE 5\r\n - 6: OLD BOOKING \r\n - USER 3\r\n - REST 3\r\n - TABLE 4\r\n USERS:\r\n - 1: NO BOOKINGS \r\n - 2: 3 OLD BOOKINGS \r\n - 3: 1 NEW AND 2 OLD \r\n - 4: 2 NEW \r\n \r\n RESTAURANTS:\r\n - 1: NO BOOKINGS \r\n - 2: 2 OLD BOOKINGS \r\n - 3: 2 NEW AND 3 OLD \r\n - 4: 1 NEW\r\n\r\n TABLES:\r\n - 1: NO BOOKINGS \r\n - CAPACITY: 4\r\n - REST: 1\r\n - BOOKINGS: []\r\n - 2: 2 OLD BOOKINGS \r\n - CAPACITY: 3\r\n - REST: 2\r\n - BOOKINGS: [3, 4]\r\n - 3: TABLE WITH A NEW BOOKING \r\n - CAPACITY: 2\r\n - REST: 4\r\n - BOOKINGS: [1]\r\n - 4: TABLE WITH TWO OLD AND A NEW BOOKING\r\n - CAPACITY: 5\r\n - REST: 3\r\n - BOOKINGS: [2, 6, 8]\r\n - 5: TABLE WITH A NEW BOOKING AND AN OLD\r\n - CAPACITY: 4\r\n - REST: 3\r\n - BOOKINGS: [5, 7]\r\n - 6: NO BOOKINGS\r\n - CAPACITY: 2\r\n - REST: 3\r\n - BOOKINGS: []\r\n \"\"\"\r\n\r\n # add_booking(user_id, rest_id, number_of_people, booking_datetime, table_id)\r\n \r\n # 1: FUTURE BOOKING (USER 3, REST 4, TABLE 3)\r\n add_booking(3, 4, 2, (datetime.datetime.now().replace(hour=10) + datetime.timedelta(days=2)), 3) \r\n \r\n # 2: FUTURE BOOKING (USER 4, REST 3, TABLE 4)\r\n add_booking(4, 3, 1, (datetime.datetime.now().replace(hour=13) + datetime.timedelta(days=1)), 4)\r\n \r\n # 3: OLD BOOKING (USER 2, REST 2, TABLE 2)\r\n add_booking(2, 2, 3, (datetime.datetime.now().replace(hour=13) - datetime.timedelta(days=3)), 2)\r\n \r\n # 4: OLD BOOKING (USER 2, REST 2, TABLE 2)\r\n add_booking(2, 2, 3, (datetime.datetime.now().replace(hour=13) - datetime.timedelta(days=1)), 2)\r\n \r\n # 5: FUTURE BOOKING (USER 4, REST 3, TABLE 5)\r\n add_booking(4, 3, 1, (datetime.datetime.now().replace(hour=13) + datetime.timedelta(days=2)), 5)\r\n \r\n # 6: OLD BOOKING (USER 3, REST 3, TABLE 4)\r\n add_booking(3, 3, 1, (datetime.datetime.now().replace(hour=13) - datetime.timedelta(days=2)), 4)\r\n\r\n time = datetime.datetime.now().replace(hour=13) - datetime.timedelta(days=10)\r\n # 7: OLD BOOKING (USER 4, REST 3, TABLE 5)\r\n add_booking(4, 3, 2, (time), 5, entrance_datetime=(time + datetime.timedelta(minutes=1)))\r\n \r\n # 8: OLD BOOKING (USER 3, REST 3, TABLE 4)\r\n add_booking(3, 3, 1, (time), 4, entrance_datetime=(time + datetime.timedelta(minutes=1)))", "def add_orders_after(user, after_date, items):\n order_dates = get_order_dates(after_date)\n for order_date in order_dates:\n if not order_date.is_editable:\n continue\n\n for item in items:\n order = BreadOrder(\n user=user,\n date=order_date,\n type=item\n )\n db.session.add(order)\n db.session.commit()", "def add_book(self, data):\n exists = self.check_if_exists(data['isbn'])\n\n if exists:\n query = f\"\"\"UPDATE {TABLE} SET quantity = quantity + 10 WHERE bookID = '{data[\"isbn\"]}'\"\"\"\n else:\n query = f\"\"\"INSERT INTO {TABLE}(bookID, title, authors, avg_rating, ratings_count,\n lang_code, num_pages, text_reviews, pub_date, publisher) values(\n \"{data['isbn']}\",\n \"{data['title']}\",\n \"{data['authors']}\",\n {float(data['average_rating'])},\n {int(data['ratings_count'])},\n \"{data['language_code']}\",\n {int(data[' num_pages'])},\n {int(data['text_reviews_count'])},\n \"{data['publication_date']}\",\n \"{data['publisher']}\"\n );\"\"\"\n\n try:\n self.cursor.execute(query)\n self.conn.commit()\n except Error as e:\n print(e)", "def add_book_readlist(user_id,book):\n try: \n user_id = int(user_id)\n data = collection.find({\"user_id\": user_id})\n\n if data.count() > 0: \n bk = data[0]['books']\n if int(book) in bk:\n return jsonify({\"Error\":\"Book alredy in the list\"}),409\n bk.append(int(book))\n try:\n collection.update_one({\"user_id\": user_id},{\"$set\":{\"books\":bk}})\n except: \n # Bad request\n return jsonify({\"Error\":\"Not Acceptable (Invalid Params)\"}), 406\n return \"\",200\n\n else:\n\n data = {\"user_id\" : user_id,\"books\": [book]}\n record_created = collection.insert(data)\n\n return \"\", 200\n\n except: \n return \"\", 500", "def test_adding_item_to_list(create_shopping_item, create_shopping_list):\n shopping_list = create_shopping_list\n items_before = shopping_list.items.values_list().count()\n new_item = create_shopping_item\n shopping_list.items.add(new_item)\n items_after = shopping_list.items.values_list().count()\n assert items_after > items_before\n assert items_before == 0\n assert items_after == 1", "def issue_book(self, book_id, issue_date):\r\n for book in LibraryDB.book_list:\r\n if book.book_id == book_id:\r\n book.availability = False\r\n book.issue_date = issue_date\r\n self.issued_books += [book]\r\n # Transaction(book, self, \"Book Issued\")\r\n date = book.issue_date.strftime('%b %d, %Y')\r\n LibraryDB.transaction_history.append(Transaction(book, self, \"Book Issued\", str(date)))\r\n print(\"Book Issued!\")\r\n break\r\n else:\r\n print(\"Book with this id is not available!\")", "def generate_orderbooks(self):\n logger.DLOG(\"Generating orderbooks...\")\n # Create marketplace in db if not exist\n market_place = self.db_ops.insert_market_place(self.market_place, self.amas_location, self.amas_port, self.commit_orderbook)\n # Create market segment in db if not exist\n market_segment = self.db_ops.insert_market_segment(self.market_place, self.market_segment, self.commit_orderbook) # no creation of new market segment if update\n \n \n if market_place and market_segment:\n # Get instruments to generate orderbooks for\n instruments = self.get_instruments_to_generate_orderbooks(self.stored_query)\n \n if not instruments:\n logger.ELOG(\"no instrument selected in query'%s'\"%(self.stored_query))\n\n for each_instrument in instruments:\n \n orderbook_currency = each_instrument.Currency().Name() \n orderbook_name = self.define_orderbook_name(each_instrument, self.external_id_type)\n if not orderbook_name:\n logger.ELOG(\"**Cannot** generate Orderbook, as no ExternalId found to map\")\n continue\n\n # Check orderbook exist for instrument in db\n existing_orderbook = self.db_ops.get_orderbook_from_marketplace(each_instrument, market_place, orderbook_currency)\n if existing_orderbook: \n \n if self.update_orderbook: \n # update existing orderbook in database with new values or/and new leaf (market segment)\n self.db_ops.update_orderbook(existing_orderbook, each_instrument, market_place, market_segment, self.market_capability, self.tick_size_list, \\\n self.round_lot, self.day_count, orderbook_name, self.tiering_level, orderbook_currency) \n \n if self.commit_orderbook:\n #this is for creating the a new leaf, if customer wants an orderbook to be listed in another leaf\n group_map = self.db_ops.get_list_leaf(existing_orderbook, market_segment) \n if group_map and existing_orderbook.GroupMaps().IndexOf(group_map) <0 :\n existing_orderbook.GroupMaps().Add(group_map) \n existing_orderbook.GroupMaps().Commit() \n \n else:\n #This parts doesnt allow an orderbook to exist in in two different market segments on the same market. while for an organisational pupose\n #traders needs to add it on two different segments. but the same orderbook same physicalMarketSegment but another leaf\n # Check if same orderbook name is used for any other instrument orderbook\n #orderbook_name_in_use = self.db_ops.check_orderbook_name_already_in_use(orderbook_name, market_place)\n #if orderbook_name_in_use:\n # logger.LOG(\"**Cannot** create OrderBook. Orderbook ExternalID <%s> is already used for instrument <%s> in MarketPlace <%s>\"%(orderbook_name, orderbook_name_in_use.Instrument().Name(), market_place.Name()))\n # continue\n \n if self.commit_orderbook or (not self.commit_orderbook and not self.update_orderbook):\n logger.DLOG(\"Order book **does not exist** for instrument <%s>, MarketPlace <%s>.Creating it...\"%(each_instrument.Name(), market_place.Name()))\n # Get tick size, round lot and day count from another existing orderbook for same instrument\n tick_size_list, round_lot, day_count = self.get_orderbook_data(each_instrument)\n \n self.db_ops.insert_orderbook(each_instrument, market_place, market_segment, self.market_capability, tick_size_list, \\\n round_lot, day_count, orderbook_name, self.commit_orderbook, self.tiering_level, orderbook_currency)\n \n if self.update_orderbook and not self.commit_orderbook:\n logger.WLOG(\"**Cannot** update orderbook for <%s> as it does not exist in database.\"%each_instrument.Name()) \n \n \n else:\n if not market_place:logger.WLOG(\"Market place doesnt exist\") \n if not market_segment:logger.WLOG(\"Market segment doesnt exist\")", "def order_book(self, order_details):\n order_date = datetime.date.today()\n self.cursor.execute(\"INSERT INTO orderlog (loginID, orderDate) VALUES (%s, %s)\",\n (order_details['loginID'], order_date))\n order_id = self.cursor.lastrowid\n for i in range(len(order_details['ISBN'])):\n self.cursor.execute(\"INSERT INTO productof Values (%s, %s, %s)\",\n (order_details['ISBN'][i], order_id, order_details['quantity'][i]))\n self.cursor.execute(\"UPDATE book SET stock=stock-%s WHERE ISBN=%s\",\n (order_details['quantity'][i], order_details['ISBN'][i]))\n self.db.commit()\n return order_id", "def add_id(demand_array, old_iter, new_iter):\r\n #функция для первоначального добавления айдишника\r\n #используется в тех случаях, когда зафиксирована продажа,\r\n #но конкретно такого предмета еще нет в demand\r\n #adding item ID\r\n demand_array.append({\"item_id\": old_iter['item_id']})\r\n #ярлык для наполнения массива дополнительными свойствами, если они есть\r\n def search(value):\r\n nonlocal old_iter\r\n nonlocal demand_array\r\n if value in old_iter:\r\n demand_array[len(demand_array)-1].update({value: old_iter[value]})\r\n search('refine')\r\n search('cards')\r\n search('star_crumbs')\r\n search('element')\r\n search('beloved')\r\n #adding price:sold amount info\r\n if isinstance(new_iter, bool):\r\n _position = demand_array[len(demand_array)-1]\r\n _position[old_iter['price']] = old_iter['amount']\r\n else:\r\n _position = demand_array[len(demand_array)-1]\r\n _position[old_iter['price']] = old_iter['amount'] - new_iter['amount']", "def test_add_many_implicit_commit(self):\n\n # That one fails in r5 (<commit/> must be made on its own)\n\n doc_count = 10\n user_ids = [get_rand_string() for x in range(doc_count)]\n data = [get_rand_string() for x in range(doc_count)]\n ids = [get_rand_string() for x in range(doc_count)]\n documents = [dict(user_id=user_ids[x], data=data[x], id=ids[x])\n for x in range(doc_count)]\n\n # Pass in the commit flag.\n self.conn.add(documents, True)\n\n results = []\n for id in ids:\n res = self.conn.query(\"id:\" + id).results\n if not res:\n self.fail(\"Could not find document (id:%s)\" % id)\n results.append(res[0])", "def create_new_tickers(tick_scrape):\n #Check if ticker exists, if not add it to the ticker table\n tick_db = sqlaq_to_df(ticker.fetch())\n #add the id to the tick_ftse table\n new_tick = pd.merge(\n tick_scrape,\n tick_db[[\"id\",\"ticker\"]],\n on=[\"ticker\"],\n how=\"left\"\n )\n #find tickers which don't exist\n new_tick = new_tick[new_tick.id.isnull()]\n logger.info(f\"{new_tick.shape[0]} items to add to ticker\")\n #add to db\n ticker.add_df(new_tick)\n #fetch updated table\n tick_db = sqlaq_to_df(ticker.fetch())\n return tick_db", "def update_bonds(existing_bonds, new_bonds):\n\n # make a shallow copy\n bonds = list(existing_bonds)\n\n for b_i in new_bonds:\n\n idxes = b_i[0:2] if b_i[0] < b_i[1] else (b_i[1], b_i[0])\n\n try:\n old_idx = next(i for i, e_b in enumerate(existing_bonds)\n if e_b[0:2] == idxes)\n except StopIteration:\n bonds.append(\n b_i if b_i[0] < b_i[1] else (b_i[1], b_i[0], b_i[2])\n )\n continue\n\n if abs(b_i[2] - 0.0) < 0.1:\n del bonds[old_idx]\n else:\n bonds[old_idx] = b_i\n\n return bonds", "def move_all_boids_to_new_positions(self):\r\n #print(\"move boids to new positions\")\r\n for boid in self.boids:\r\n closeBoids = self.get_neighbouring_boids(boid, self.boids)\r\n \r\n # apply the boid algorithm\r\n boid.moveCloser(closeBoids)\r\n boid.moveWith(closeBoids) \r\n boid.moveAway(closeBoids, self.minDistance)\r\n \r\n # check for border so the boid deosn't fly into oblivion or migrate\r\n # North out of season\r\n if boid.x < (self.border - (self.ballrect.width/2)) and boid.velocity_x < 0:\r\n boid.velocity_x = -boid.velocity_x * random.random()\r\n if boid.x > (self.width - (self.ballrect.width/2)) and boid.velocity_x > 0:\r\n boid.velocity_x = -boid.velocity_x * random.random()\r\n if boid.y < (self.border - (self.ballrect.height/2)) and boid.velocity_y < 0:\r\n boid.velocity_y = -boid.velocity_y * random.random()\r\n if boid.y > (self.height - (self.border + (self.ballrect.height/2))) and boid.velocity_y > 0:\r\n boid.velocity_y = -boid.velocity_y * random.random()\r\n \r\n # velocity and position tweaked, let's move!\r\n boid.move(self.maxVelocity)", "def place_bid():\n if not request.get_json():\n abort(400)\n data = request.get_json(force=True)\n\n if not data.get('userID'):\n abort(400)\n if not data.get('amount'):\n abort(400)\n if not data.get('petID'):\n abort(400)\n\n #new_uuid = str(uuid.uuid4())\n mod.place_a_bid(data['petID'], data['amount'], data['userID'])\n # HTTP 200 Created\n # return jsonify({\"id\": new_uuid}), 200\n resp = {\"status\": \"OK\"}\n return jsonify(resp)", "def attach_item_to_limb(body,id,limb):\n\tbody[limb]['holding'].append(id)\n\tlogging.debug('%s attached to %s' % (id,limb))\n\t\n\treturn True", "def _append_pairs(new_pairs):\n desired_pairs = restore_pairs() or []\n desired_pairs += new_pairs\n print(\"Adding {} new pairs, queue has {} pairs\".format(len(new_pairs), len(desired_pairs)))\n save_pairs(desired_pairs)", "def update(self, book_info, destroy):\n self.connect()\n is_issue = len(book_info) == 2\n\n bid = book_info[0].get()\n if is_issue:\n issue_to = book_info[1].get()\n\n if is_issue:\n extract_bid = f\"select bid from {self.book_table}\"\n else:\n extract_bid = f\"select bid from {self.issued_table}\"\n\n status = False\n try:\n self.cur.execute(extract_bid)\n self.con.commit()\n for i in self.cur:\n self.all_bid.append(i[0])\n\n if bid in self.all_bid:\n check_avail = f\"select status from {self.book_table} where \" \\\n f\"bid = '{bid}'\"\n self.cur.execute(check_avail)\n self.con.commit()\n check = None\n for i in self.cur:\n check = i[0]\n\n if (is_issue and check == 'avail'\n or not is_issue and check == 'issued'):\n status = True\n else:\n status = False\n else:\n messagebox.showinfo(\"Error\", \"Book ID not present\")\n except MySQLError as err:\n messagebox.showinfo(\"Error\", \"Can't fetch Book IDs\")\n print(err)\n\n if is_issue:\n issue_sql = f\"insert into {self.issued_table} values ('{bid}',\" \\\n f\"'{issue_to}')\"\n up_status = f\"update {self.book_table} set status = 'issued' \" \\\n f\"where bid = '{bid}'\"\n else:\n issue_sql = f\"delete from {self.issued_table} where bid = '{bid}'\"\n up_status = f\"update {self.book_table} set status = 'avail' \" \\\n f\"where bid = '{bid}'\"\n\n try:\n if bid in self.all_bid and status:\n self.cur.execute(issue_sql)\n self.con.commit()\n self.cur.execute(up_status)\n self.con.commit()\n if is_issue:\n msg = \"Book Issued Successfully\"\n else:\n msg = \"Book Returned Successfully\"\n state = 'Success'\n else:\n if is_issue:\n msg = \"Book Already Issued\"\n else:\n msg = \"Please check the book ID\"\n state = \"Message\"\n messagebox.showinfo(state, msg)\n except MySQLError as err:\n messagebox.showinfo(\n \"Search Error\", \"The value entered is wrong, Try again\"\n )\n print(err)\n self.all_bid.clear()\n destroy()", "def add_boids(\n self,\n n: int,\n look_up_distance: float,\n look_up_angle: float,\n species_name: Union[str, int] = 1\n ):\n while n > 0:\n _x = random.random() * self.board_size\n _y = random.random() * self.board_size\n if self.is_in_obstacle(_x, _y):\n continue\n vel = random.random() * look_up_distance\n direction = random.random() * math.pi * 2\n self.boids.append(b.Boid(_x, _y, look_up_distance, look_up_angle, vel, direction, species_name))\n n -= 1", "def test_many_insertions():\n # Connect to the database\n mongodb = get_database()\n\n expected_ids = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 76950,\n 43380, 26717, 70, 47561, 32800, 37021, 2449, 63555, 72987}\n try:\n ids = add_candidates(mongodb)\n print(\"received ids: \", ids)\n assert all(index in expected_ids for index in ids)\n finally:\n collection = mongodb[COLLECTION_NAME]\n collection.drop()" ]
[ "0.6216382", "0.6018596", "0.5861003", "0.57200617", "0.5687406", "0.5627169", "0.5626909", "0.5589654", "0.5553824", "0.5513555", "0.54484546", "0.544297", "0.5307633", "0.52903914", "0.5283958", "0.5282783", "0.5269406", "0.52541256", "0.5221003", "0.51885104", "0.51821554", "0.5137006", "0.5134006", "0.51240104", "0.51239705", "0.5122961", "0.50914955", "0.50672644", "0.5058053", "0.5047053" ]
0.72524697
0
Populate paths with lengths in database
def postPathLengths(map_area): paths = Path.query.filter(Path.map_area==map_area).all() for path in paths: start = Node.query.filter_by(id=path.start).first() end = Node.query.filter_by(id=path.end).first() # get the length of the two side of the paths avg_length = calculatePathLength(start, end) path.length = avg_length print(avg_length) # save the changes to the database db.session.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculateWireLenght(path_list):\n\n total_length = 0\n for path in path_list:\n total_length += len(path)\n return total_length", "def path_length(self,path,num_repeats=10):\n begin_time=datetime.datetime.now()\n #num_repeats=100\n for i in range(num_repeats):\n self.virtual_move_to(path)\n end_time=datetime.datetime.now()\n delta_t=end_time-begin_time\n path_length=delta_t.total_seconds()/float(num_repeats)\n if path_length ==0.0:\n print(\"Warning the path length is less than 1 microsecond, make sure num_repeats is high enough to measure it.\")\n return path_length", "def path_lengths(self):\n trip_id2length = defaultdict(float)\n prev_id = 0\n cur_id = 0\n prev_lat = 0\n prev_lon = 0\n num_big_hops = 0\n big_hops = {}\n print \"Bad Distances\"\n for line in self.lines:\n #normalized = dg.normalize(line)\n normalized = normalize_simple(line)\n cur_id = normalized[0]\n lat = normalized[1]\n lon = normalized[2]\n if cur_id == prev_id:\n distance = gps_dist_miles(prev_lat,prev_lon,lat,lon)\n if distance > 1:\n big_hops[cur_id] = 1\n num_big_hops += 1\n print cur_id\n trip_id2length[cur_id] += distance \n prev_lat = lat\n prev_lon = lon\n prev_id = cur_id\n\n print len(trip_id2length.keys())\n #for bad_id in big_hops.keys():\n # del trip_id2length[bad_id]\n\n for i in (15,18,333,24,12345):\n print \"%d: %f\" % (i,trip_id2length[i])\n\n #for i in range(1,25001):\n # if i not in trip_id2length.keys():\n # print i\n num_trips = len(trip_id2length.keys())\n print num_trips\n total_len = 0.0\n for i in trip_id2length.keys():\n if trip_id2length[i] > 50:\n print \"Big trip: %d\" % i\n #del trip_id2length[i]\n total_len += trip_id2length[i]\n heap = []\n for i in trip_id2length.keys():\n heapq.heappush(heap,trip_id2length[i])\n quarter_len = num_trips/4\n for i in range(quarter_len):\n heapq.heappop(heap)\n print \"25th percentile: %f\" % heapq.heappop(heap)\n for i in range(quarter_len):\n heapq.heappop(heap)\n print \"median: %f\" % heapq.heappop(heap)\n for i in range(quarter_len):\n heapq.heappop(heap)\n print \"75th percentile: %f\" % heapq.heappop(heap)\n\n num_trips = len(trip_id2length.keys())\n print num_trips\n avg_len = total_len/num_trips\n print \"average length: %f\" % avg_len \n print \"total length %f\" % total_len\n print \"number of big hops: %d\" % num_big_hops\n return trip_id2length,avg_len", "def get_path_length(self) :\n return self.path_length", "def loadPaths(self):\n for ij in self.link:\n self.link[ij].flow = 0\n for p in self.path:\n for ij in self.path[p].links:\n self.link[ij].flow += self.path[p].flow\n for ij in self.link:\n self.link[ij].updateCost()\n for p in self.path:\n self.path[p].updateCost()", "def internal_path_lengths(self, node_list, link_attribute=None):\n return self.\\\n path_lengths(link_attribute)[node_list, :][:, node_list]", "def dir_creation():\n for i in range(dir_count):\n dir_name = str(i * nums)\n max_len = len(str(nums * dir_count))\n # if the len is equal, add stright forward\n if len(dir_name) == max_len:\n container.append(dir_name)\n while len(dir_name) < max_len:\n # tambahkan angka 0 sampai len(dir_name) == max_len\n dir_name = \"0\" + dir_name\n if len(dir_name) == max_len:\n container.append(dir_name)\n break\n return container", "def __len__(self):\n return len(self.paths)", "def __len__(self):\n return len(self.paths)", "def size(path):", "def update_usage_sizes():\n\n if not table_exists(USAGE_TABLE['name']):\n create_table()\n\n user_paths = select_user_paths()\n user_sizes = []\n for x in user_paths:\n curr_dir = os.path.join(ROOT_DIR, x[1])\n user_sizes.append((x[0], round(get_directory_size_in_megabytes(curr_dir), 8)))\n\n for tup in user_sizes:\n if not record_exists(tup[0]):\n insert_usage(tup)\n else:\n update_usage(tup)", "def initialize_lengths():\n global length\n for id in document_filenames:\n l = 0\n for term in dictionary:\n l += imp(term,id)**2\n length[id] = math.sqrt(l)", "def path_cost(path):\n return len(path)", "def path_entries(self):", "def write_longdir_table(self):\n\n start_of_longdirs = self.db_file.tell()\n self.longdirs = {}\n for miEntry in self.mainIndex:\n if miEntry.longdir not in self.longdirs:\n self.longdirs[miEntry.longdir] = \\\n self.db_file.tell() - start_of_longdirs\n self.db_file.write(miEntry.encodedLongdir)\n miEntry.set_longdir_offset(self.longdirs[miEntry.longdir])", "def path_length(G, path, weight=\"weight\"):\n length = 0\n u = path[0]\n for v in path[1:]:\n length += G[u][v][weight]\n u = v\n return length", "def test_pathlength_verbs(id1, id2, pathlength):\n synset1 = germanet_data.get_synset_by_id(id1)\n synset2 = germanet_data.get_synset_by_id(id2)\n dist = synset1.shortest_path_distance(synset2)\n np.testing.assert_equal(dist, pathlength)", "def test_pathlength_adj(id1, id2, pathlength):\n synset1 = germanet_data.get_synset_by_id(id1)\n synset2 = germanet_data.get_synset_by_id(id2)\n dist = synset1.shortest_path_distance(synset2)\n np.testing.assert_equal(dist, pathlength)", "def test_pathlength_nouns(id1, id2, pathlength):\n synset1 = germanet_data.get_synset_by_id(id1)\n synset2 = germanet_data.get_synset_by_id(id2)\n dist = synset1.shortest_path_distance(synset2)\n np.testing.assert_equal(dist, pathlength)", "def longify (self, path):\r\n pass", "def __len__(self):\n return len(self.A_paths)", "def __len__(self):\n return len(self.A_paths)", "def __len__(self):\n return len(self.A_paths)", "def __len__(self):\n return len(self.A_paths)", "def __len__(self):\n return len(self.path)", "def __len__(self):\n if self.path_is_string:\n if self.path:\n return 1\n else:\n return 0\n else:\n if self.path_type in (list, tuple):\n if not any(item for item in self.path):\n return 0\n return len(self.path)", "def do_generate(self, args):\n\t\t[lang.hierarchyLengths for lang in self.languages]", "def initialize_paths():\n\t_check_env()\n\t\n\tdata_dir = os.getenv(_DATA_DIRECTORY_ENV_KEY)\n\t_paths[_DATA_DIRECTORY_KEY] = data_dir\n\t_paths[_DATABASE_PATH_KEY] = os.path.join(data_dir, 'everything.db')\n\t_paths[_MEDIA_DIRECTORY_KEY] = os.path.join(data_dir, 'media')\n\t_paths[_THUMBNAILS_DIRECTORY_KEY] = os.path.join(data_dir, '.thumbnails')\n\n\t# Create directories\n\t_try_make_dir(_paths[_MEDIA_DIRECTORY_KEY])\n\t_try_make_dir(_paths[_THUMBNAILS_DIRECTORY_KEY])", "def create_length_genome():\n bam = glob.glob(os.path.join(os.getcwd(), '*.bam'))[0]\n command = r\"\"\"samtools view -H %s | perl -ne 'if ($_ =~ m/^\\@SQ/) { print $_ }' | perl -ne 'if ($_ =~ m/SN:(.+)\\s+LN:(\\d+)/) { print $1, \"\\t\", $2, \"\\n\"}' > lengths.genome\"\"\"\n sp.check_call(command % bam, shell=True)", "def change_len_kpi(self):\n self.ID_SOURCE_1DES_1URL = kpi_from_db_config.ID_SOURCE_1DES_1URL\n self.ID_SOURCE_1DES_0URL = kpi_from_db_config.ID_SOURCE_1DES_0URL\n self.ID_SOURCE_0DES_1URL = kpi_from_db_config.ID_SOURCE_0DES_1URL\n self.ID_SOURCE_0DES_0URL = kpi_from_db_config.ID_SOURCE_0DES_0URL\n self.ID_SOURCE_0DES_GT200 = kpi_from_db_config.ID_SOURCE_0DES_GT200\n self.ID_SOURCE_0DES_LT100 = kpi_from_db_config.ID_SOURCE_0DES_LT100\n self.ID_SOURCE_0DES_GTE100_LT200 = kpi_from_db_config.ID_SOURCE_0DES_GTE100_LT200\n\n list_id = [self.ID_SOURCE_1DES_1URL, \n self.ID_SOURCE_1DES_0URL, \n self.ID_SOURCE_0DES_1URL, \n self.ID_SOURCE_0DES_0URL, \n self.ID_SOURCE_0DES_GT200, \n self.ID_SOURCE_0DES_GTE100_LT200, \n self.ID_SOURCE_0DES_LT100]\n old_len_need_list = [4, 4, 4, 4, 5, 5, 5]\n for i in range(len(list_id)):\n self.cursor.execute('''\n SELECT value\n FROM public.kpi_report\n WHERE id = %s\n ORDER BY created_at DESC\n LIMIT %s\n ''', [list_id[i], 2*old_len_need_list[i]])\n\n list_result = []\n for doc in self.cursor:\n list_result.append(doc[0])\n\n new_list_result = list_result\n new_list_result[3:3] = [0, 0]\n if i < 4:\n new_list_result[9:9] = [0, 0]\n else:\n new_list_result[10:10] = [0, 0]\n\n for new_value in new_list_result[::-1]:\n self.cursor.execute('''\n INSERT INTO public.kpi_report(id, created_at, value)\n VALUES(%s, now(), %s)\n ''', [list_id[i], new_value])\n self.con_dev.commit()" ]
[ "0.58111256", "0.58005226", "0.5792587", "0.54623866", "0.5386073", "0.5357159", "0.5356317", "0.5353135", "0.5353135", "0.53270024", "0.53158593", "0.5290157", "0.52883023", "0.52871025", "0.5284866", "0.5262351", "0.52564347", "0.5213889", "0.5204303", "0.5164193", "0.51576155", "0.51576155", "0.51576155", "0.51576155", "0.51090676", "0.50876504", "0.5081543", "0.50608844", "0.5053257", "0.5025534" ]
0.6926739
0
Create a database according to schema in JSON format.
def create_db(db, schema_json): with open(schema_json) as of: schema = json.load(of, object_pairs_hook=OrderedDict) # OrderedDict so that tables are created in the order specified, # allowing foreign keys to reference previously defined tables for table_name, columns in schema.items(): col_types = columns.items() # dict -> tuple make_table(db, table_name, col_types)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(db_path, schema_json):\n create_db(db_path, schema_json)", "def create_db():\n db.create_all()\n print('Database structure created successfully')", "def create_db():\n db.create_all()\n print(\"DB Created\")", "def create_schema(db_name, schema_name):\n # 1. Connect to database\n conn = connect()\n cur = conn.cursor()\n conn.autocommit = True\n\n command_drop = \"\"\"DROP SCHEMA IF EXISTS {} CASCADE\"\"\".format(schema_name)\n command_create = \"\"\"\n CREATE SCHEMA {}\n\n CREATE TABLE cityjson (\n id serial PRIMARY KEY,\n name text,\n referenceSystem int,\n bbox geometry(POLYGON),\n datasetTitle text,\n metadata jsonb,\n meta_attr jsonb,\n transform jsonb\n )\n\n CREATE TABLE cityobject (\n id serial PRIMARY KEY,\n obj_id text,\n parents text[],\n children text[],\n bbox geometry(POLYGON),\n attributes jsonb,\n vertices jsonb,\n object jsonb,\n cityjson_id int REFERENCES cityjson (id) on delete cascade on update cascade\n )\n \"\"\".format(schema_name)\n\n commands = [command_drop, command_create]\n\n for command in commands:\n cur.execute(command)\n conn.commit()\n\n conn.close()\n print(\"\"\"The creation of schema \"{}\" in database \"{}\" is done\"\"\".format(schema_name, db_name))", "def db_init():\r\n result = odbserver.create_db()\r\n return jsonify({\r\n \"status\": 200,\r\n \"message\": result\r\n })", "def init_db():\n data = json.dumps({}, indent=4)\n with open(db_file, 'w') as f:\n f.write(data)", "def create_database(conn, schema=None):\n cur = conn.cursor()\n if schema is None:\n schema = 'leadmachine'\n\n # create sparkify database with UTF8 encoding\n cur.execute(f\"DROP DATABASE IF EXISTS {schema}\")\n cur.execute(f\"CREATE DATABASE {schema} WITH ENCODING 'utf8' TEMPLATE template0\")\n cur.close()", "def create_database():\n create_db(app)", "def create():\n\tcreate_db()", "def create_db(self):", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def _create_schema(self):\n self._conn.executescript(self._db_schema)", "def create_schema(schema): \n\n query = \"CREATE SCHEMA IF NOT EXISTS {}\".format(schema)\n qdb.execute(query)", "def create_db():\n _init_db()\n db.create_all()", "def make_db():\n\n db.create_all()", "def create_db():\n db.create_all()\n click.echo(\"DB criado com sucesso!\")", "def create(self):\n c = self.cursor()\n byte_schema = pkgutil.get_data(__package__, 'schema.sql')\n c.executescript(byte_schema.decode('utf-8'))\n self.commit()", "def createSchema(schema):\n return \"CREATE SCHEMA \\\"{name}\\\";\\n\".format(name = schema.name)", "def create_db():\n db.create_all()\n click.echo(\"Banco de dados criado\")", "def _meta_json_to_database(self):\n\n sqlalchemy_metadata = MetaData() # this is unrelated to our meta.json\n meta_table = Table('meta', sqlalchemy_metadata,\n Column('meta', String))\n\n sqlalchemy_metadata.create_all(self.engine)\n json_string = json.dumps(self.meta)\n ins = meta_table.insert().values(meta=json_string)\n conn = self.engine.connect()\n conn.execute(\"DELETE FROM meta;\")\n conn.execute(ins)" ]
[ "0.82075125", "0.7394558", "0.7111433", "0.7067049", "0.70018756", "0.7000087", "0.6994893", "0.6987741", "0.696631", "0.6925411", "0.69109344", "0.69109344", "0.69109344", "0.69109344", "0.69109344", "0.69109344", "0.69109344", "0.69109344", "0.69109344", "0.69109344", "0.69109344", "0.69090027", "0.6908591", "0.68710834", "0.6856689", "0.681587", "0.67954403", "0.6778088", "0.67777896", "0.6764038" ]
0.81120163
1
Create a database from a schema and populate it with CSV/JSON data.
def main(db_path, schema_json): create_db(db_path, schema_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_db(db, schema_json):\n with open(schema_json) as of:\n schema = json.load(of, object_pairs_hook=OrderedDict)\n # OrderedDict so that tables are created in the order specified,\n # allowing foreign keys to reference previously defined tables\n\n for table_name, columns in schema.items():\n col_types = columns.items() # dict -> tuple\n make_table(db, table_name, col_types)", "def _initialize_db():\n conn, c = _get_db_connection()\n\n with open(str(SCHEMA_PATH)) as f:\n c.executescript(f.read())\n\n conn.close()", "def database_setup():\n Base.metadata.create_all(bind=engine)\n db = LocalSession()\n try:\n populate_from_json(db, Vector, str(VECTORS))\n populate_from_json(db, Gender, str(GENDERS))\n populate_from_json(db, Tag, str(TAGS))\n finally:\n db.close()", "def init_db():\n with LoggerApi.app_context():\n db = get_db()\n with LoggerApi.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n db = get_db()\n\n with current_app.open_resource(\"schema.sql\") as f:\n db.executescript(f.read().decode(\"utf8\"))", "def init_db():\n db = get_db()\n with current_app.open_resource('schema.sql') as f:\n db.executescript(f.read().decode('utf8'))", "def _create_schema(self):\n self._conn.executescript(self._db_schema)", "def init_db():\n with closing(connect_db()) as db:\n with app.open_resource('schema.sql') as fobj:\n db.cursor().executescript(fobj.read())\n db.commit()", "def init_db():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = connect_db()\n with app.open_resource('schema.sql') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def create_schema(self, schema, *, debug=False):\n c = self.conn.cursor()\n for line in schema.split(\";\"):\n line = line.strip()\n if len(line)>0:\n if self.debug or debug:\n print(f\"{line};\", file=sys.stderr)\n try:\n c.execute(line)\n except (sqlite3.Error, pymysql.MySQLError) as e:\n print(\"SQL:\", line, file=sys.stderr)\n print(\"Error:\", e, file=sys.stderr)\n exit(1)", "def main(csvfile, dbfile, verbose=False):\n CONN = sqlite3.connect(dbfile)\n cursor = CONN.cursor()\n create_schema(cursor)\n process_data(cursor, csvfile, verbose=verbose)\n CONN.commit()\n CONN.close()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def create(self):\n c = self.cursor()\n byte_schema = pkgutil.get_data(__package__, 'schema.sql')\n c.executescript(byte_schema.decode('utf-8'))\n self.commit()", "def init_db():\n\twith closing(connect_db()) as db:\n\t\twith app.open_resource('schema.sql', mode='r') as f:\n\t\t\tdb.cursor().executescript(f.read())\n\t\tdb.commit()", "def create_database():\n with connection:\n connection.execute(CREATE_MOVIE_TABLE)\n connection.execute(CREATE_USER_TABLE)\n connection.execute(CREATE_WATCHED_TABLE)", "def populate_db():\n\n populate_table(db, models.Department, departments_data)\n populate_table(db, models.Employee, employees_data)", "def connect_db_and_load_data(cls):\n db.connect()\n db.create_tables([Product], safe=True)\n load_data(transform_data('./inventory.csv'))", "def init_db():\n # Open connection to the database\n conn = sqlite3.connect(DB_PATH)\n cursor = conn.cursor()\n\n # Open the schema file and execute its SQL code\n with current_app.open_resource('schema.sql') as db_schema:\n cursor.executescript(db_schema.read().decode('utf8'))\n\n # Save (commit) the changes\n conn.commit()\n\n # We can also close the connection if we are done with it.\n conn.close()", "def populate_db():\n stdout.write('Emptying the tables...\\n')\n empty_tables()\n stdout.write('Populating Language records...\\n')\n populate_language()\n stdout.write('Populating Lemma, Wordform, and Definition records...\\n')\n populate_lexical()\n stdout.write('Populating ProperName records...\\n')\n populate_proper_names()", "def init_database(db: sa.engine.Connectable):\n\n # setup the Postgres extensions and schema\n db.execute(\"\"\"\n CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\" WITH SCHEMA public;\n \"\"\")\n db.execute(\n ';\\n'.join(\n 'CREATE SCHEMA IF NOT EXISTS {}'.format(s) for s in SCHEMAS.values()\n )\n )\n\n # create the schema from the models\n METADATA.create_all(bind=db)", "def create_database(conn, schema=None):\n cur = conn.cursor()\n if schema is None:\n schema = 'leadmachine'\n\n # create sparkify database with UTF8 encoding\n cur.execute(f\"DROP DATABASE IF EXISTS {schema}\")\n cur.execute(f\"CREATE DATABASE {schema} WITH ENCODING 'utf8' TEMPLATE template0\")\n cur.close()", "def _create_database(self):\n self._connect()\n cursor = self._connection.cursor()\n cursor.execute(make_table_creation_command(\"reviews\", FIELD_DESCRIPTIONS))\n self._connection.commit()" ]
[ "0.7358652", "0.6983746", "0.6980258", "0.6947382", "0.68650365", "0.6854215", "0.68521404", "0.68436974", "0.6828379", "0.678138", "0.67692417", "0.67509025", "0.67440456", "0.67321527", "0.67321527", "0.67321527", "0.67321527", "0.67321527", "0.67321527", "0.67321527", "0.66689503", "0.6652739", "0.66367894", "0.6621548", "0.6614253", "0.6597855", "0.65837014", "0.65186507", "0.64568985", "0.64290756" ]
0.7520764
0
verify that, once send() is called, a tenant has been setup
def test_tenant_setup_celery(self): class interceptor(mock.Mock): tenant = None def send(self, *kw, **args): self.tenant = properties.tenant msg = interceptor() tenant = mock.Mock() tenant.client_name = 'mock-tenant' _send_celery_mail(msg, tenant, send=True) self.assertTrue(msg.tenant is tenant)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_tenant_setup_celery_reset(self):\n msg = mock.Mock()\n tenant = mock.Mock()\n tenant.client_name = 'mock-tenant'\n\n _send_celery_mail(msg, tenant, send=False)\n\n self.assertFalse(hasattr(properties, 'tenant'))\n self.assertEqual(properties.tenant_properties, {})", "def test_advertiser_recipient(self):\n self.prep_advertiser()\n UnqualifiedConsumerEmailTask().run(test_mode=self.consumer)\n self.common_asserts()", "def test_incompatible_subscription_and_tenant():\n pass", "def test_client_verification_create(self):\n pass", "def validateTenant(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_specify_non_default_tenant():\n pass", "def test_must_be_subbed_to_send(self) -> None:\n user = self.example_user(\"hamlet\")\n self.login_user(user)\n # Create Saxony as an invite-only stream.\n self.assert_json_success(\n self.common_subscribe_to_streams(user, [\"Saxony\"], invite_only=True)\n )\n\n cordelia = self.example_user(\"cordelia\")\n with self.assertRaises(JsonableError):\n self.send_stream_message(cordelia, \"Saxony\")", "def send_verification(self):\n pass", "def setup_test_tenant(self):\n self.test_tenant = rand_name('test_tenant_')\n self.test_description = rand_name('desc_')\n resp, self.tenant = self.client.create_tenant(\n name=self.test_tenant,\n description=self.test_description)\n self.tenants.append(self.tenant)", "async def test_initialization(self):\n assert self.webhook_connection.address == self.identity.address", "def test_subscribe_offer(self):\n pass", "def check(self):\n if self.connection is None\\\n or self.target is None\\\n or self.invocation_id is None:\n raise ValueError(\n \"subject must be passed as an agument to a send function. \"\n + \"hub_connection.send([method],[subject]\")", "def test_approve_agreement(self):\n pass", "def test_tenants_tenant_id_notifications_get(self):\n pass", "async def test_send(self):\n await self.webhook_connection.connect()\n assert self.webhook_connection.is_connected is True\n\n http_message = HttpMessage(\n dialogue_reference=(\"\", \"\"),\n target=0,\n message_id=1,\n performative=HttpMessage.Performative.REQUEST,\n method=\"get\",\n url=\"/\",\n headers=\"\",\n body=\"\",\n version=\"\",\n )\n envelope = Envelope(\n to=\"addr\",\n sender=\"my_id\",\n message=http_message,\n )\n with patch.object(self.webhook_connection.logger, \"warning\") as mock_logger:\n await self.webhook_connection.send(envelope)\n await asyncio.sleep(0.01)\n mock_logger.assert_any_call(\n RegexComparator(\n \"Dropping envelope=.* as sending via the webhook is not possible!\"\n )\n )", "def test_reset_tenant_token_now(self):\n self._check_reset_token(invalidate=True)", "def test_client_verification_retrieve(self):\n pass", "def createTenant(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_setup(self):\n assert self.transaction_behaviour.setup() is None\n self.assert_quantity_in_outbox(0)", "def test_send(self):\n msg_flag = self.instance.send(self.msg_short)\n assert(msg_flag)\n msg_flag, msg_recv = self.driver.recv(self.timeout)\n assert(msg_flag)\n nt.assert_equal(msg_recv, self.msg_short)", "def test_api_user_resend_confirmation_post(self):\n pass", "def test_post_activate_marketplace_vendor_v3(self):\n pass", "def test_send_email(self):\n\t\trecipient = \"\"\n\t\tself.email.send_email(self.subject, recipient, self.content)", "def send(self):\n if self.check_account_validity:\n self.builder = Builder(secret=self.sender, horizon='https://horizon-testnet.stellar.org')\n self.builder.append_payment_op(self.receiver, self.amount)\n self.builder.sign()\n self.builder.submit()\n return True\n return False", "def test_10_9_4_1_1_1(self):\n\n # Register the device\n device_a = json.load(\n open(os.path.join('testcases', 'testdata', 'device_a.json')))\n self._sas_admin.InjectFccId({'fccId': device_a['fccId']})\n request = {'registrationRequest': [device_a]}\n response = self._sas.Registration(request)['registrationResponse'][0]\n # Check registration response\n self.assertEqual(response['response']['responseCode'], 0)\n cbsd_id = response['cbsdId']\n del request, response\n\n # Request grant\n grant_0 = json.load(\n open(os.path.join('testcases', 'testdata', 'grant_0.json')))\n grant_0['cbsdId'] = cbsd_id\n request = {'grantRequest': [grant_0]}\n # Check grant response\n response = self._sas.Grant(request)['grantResponse'][0]\n self.assertEqual(response['cbsdId'], cbsd_id)\n self.assertTrue(response['grantId'])\n self.assertEqual(response['response']['responseCode'], 0)\n grant_id = response['grantId']\n del request, response\n\n # Heartbeat\n request = {\n 'heartbeatRequest': [{\n 'cbsdId': cbsd_id,\n 'grantId': grant_id,\n 'operationState': 'GRANTED'\n }]\n }\n response = self._sas.Heartbeat(request)['heartbeatResponse'][0]\n # Check the heartbeat response\n self.assertEqual(response['cbsdId'], cbsd_id)\n self.assertEqual(response['grantId'], grant_id)\n self.assertLess(datetime.utcnow(),\n datetime.strptime(response['transmitExpireTime'],\n '%Y-%m-%dT%H:%M:%SZ'))\n self.assertEqual(response['response']['responseCode'], 0)", "def test_attend_check_has_the_registration(self):\n self.user = User.objects.create_user('[email protected]')\n self.venue = Venue.objects.create(name='Seoul City Hall', latitude=37.566676, longitude=126.978397)\n self.meet_up = MeetUp.objects.create(title='Python User Group Bimonthly Seminar', venue=self.venue)\n self.ticket = Ticket.objects.create(title='Normal Ticket', meet_up=self.meet_up, charge=10000)\n self.registration = Registration.objects.create(user=self.user, ticket=self.ticket)\n\n self.attend_check = AttendCheck.objects.create(registration=self.registration)\n\n self.assertIsNotNone(self.attend_check.registration)", "def test_activate_form(self, mock_sendmail):\r\n res = self.testapp.post('/api/v1/suspend',\r\n params={'email': u'[email protected]'},\r\n status=200)\r\n\r\n success = json.loads(res.body)\r\n self.assertTrue(\r\n 'message' in success,\r\n \"Should be successful with admin email address: \" + str(res))\r\n self.assertTrue(mock_sendmail.called)", "def test_subscribe_already_subscribed(self):\n self.braintree_customer.active = True\n self.braintree_customer.save()\n with self.assertRaises(BraintreeError):\n SubscriptionManager.subscribe(self.braintree_customer)\n\n # Check state not altered\n self.assertTrue(self.braintree_customer.active)\n self.assertFalse(self.braintree_customer.pending_cancel)\n self.assertIsNone(self.braintree_customer.expiry_date)", "def test_send_subscribe_email(self):\n #Verifica se foi enviado 1 e-mail, o este não envia e-mail\n self.assertEqual(1, len(mail.outbox))", "def test_setup_is_ledger_tx(self):\n # operation\n self.search_behaviour.setup()\n\n # after\n self.assert_quantity_in_outbox(1)\n has_attributes, error_str = self.message_has_attributes(\n actual_message=self.get_message_from_outbox(),\n message_type=LedgerApiMessage,\n performative=LedgerApiMessage.Performative.GET_BALANCE,\n to=str(LEDGER_PUBLIC_ID),\n sender=str(self.skill.public_id),\n ledger_id=FETCHAI,\n address=self.skill.skill_context.agent_address,\n )\n assert has_attributes, error_str" ]
[ "0.6928082", "0.60256696", "0.5895501", "0.5891009", "0.5843694", "0.57787097", "0.577437", "0.56721795", "0.56186384", "0.55741477", "0.5572975", "0.557291", "0.55664927", "0.5555025", "0.54957616", "0.54935724", "0.5484652", "0.5469827", "0.54646283", "0.5463046", "0.54408354", "0.54238206", "0.5417404", "0.53897953", "0.53850585", "0.53847855", "0.5374362", "0.5364117", "0.5352754", "0.53451633" ]
0.7089597
0
after _send_celery_mail finishes, the tenant should be cleared again
def test_tenant_setup_celery_reset(self): msg = mock.Mock() tenant = mock.Mock() tenant.client_name = 'mock-tenant' _send_celery_mail(msg, tenant, send=False) self.assertFalse(hasattr(properties, 'tenant')) self.assertEqual(properties.tenant_properties, {})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_tenant_setup_celery(self):\n\n class interceptor(mock.Mock):\n tenant = None\n\n def send(self, *kw, **args):\n self.tenant = properties.tenant\n\n msg = interceptor()\n tenant = mock.Mock()\n tenant.client_name = 'mock-tenant'\n\n _send_celery_mail(msg, tenant, send=True)\n\n self.assertTrue(msg.tenant is tenant)", "def test_email_after_contest_end(self):\n self.prep_consumer()\n temp_date = settings.CONTEST_END_DATE\n settings.CONTEST_END_DATE = str(\n datetime.today().date() - timedelta(days=1))\n UnqualifiedConsumerEmailTask().run(test_mode=self.consumer)\n log = get_last_db_log(\n 'email_gateway.tasks.send_unqualified_emails', 'EMAIL')\n if log:\n self.fail('Performed task even though contest ended.')\n settings.CONTEST_END_DATE = temp_date", "def tearDown(self):\n delete_company_tasks([self._id], **self._test_data)", "async def afterHoursAutoPurge(self, ctx: Context):", "def refresh():\n buffer = io.StringIO()\n with mail.CaptureLogs(manage.logger, buffer):\n try:\n manage.update_aggregates()\n manage.retry_itemized()\n manage.refresh_itemized()\n manage.update_schemas()\n download.clear_bucket()\n except Exception as error:\n manage.logger.exception(error)\n try:\n mail.send_mail(buffer)\n except Exception as error:\n logger.exception(error)", "def finalize_task(self):\n pass", "def task_send_reminder_email():\n send_reminder_email()\n logger.info(\"Sent reminder email\")", "def send_ext_customer_task(email,name,password,phone,shop,address,lead_mail,mem_mail,website):\n print(\"member email\",mem_mail)\n logger.info(\"in sending existing customer mail task\")\n return send_ext_customer_mail(email,name,password,phone,shop,address,lead_mail,mem_mail,website)", "def test_reset_tenant_token_now(self):\n self._check_reset_token(invalidate=True)", "def app_fin_done(self):\n if self.task_queue.empty() and self.task_completed_queue.qsize() > 0:\n self.task_completed_queue.get()\n send_str = MSG_wrapper(wid=self.wid)\n self.client.send_string(send_str, len(send_str), 0, Tags.LOGOUT)", "def task_done(self):\n if self.message is None:\n raise Exception('no message to acknowledge')\n self.handle.delete_message(self.message)\n self.message = None", "def purge(self):\n pass", "def test_cleanup_mail(self):\n self.assertEqual(Email.objects.count(), 0)\n\n # The command shouldn't delete today's email\n email = Email.objects.create(from_email='[email protected]',\n to=['[email protected]'])\n call_command('cleanup_mail', days=30)\n self.assertEqual(Email.objects.count(), 1)\n\n # Email older than 30 days should be deleted\n email.created = now() - datetime.timedelta(31)\n email.save()\n call_command('cleanup_mail', days=30)\n self.assertEqual(Email.objects.count(), 0)", "def task_done(self,server_id):\n self.__lock.acquire()\n if server_id in self.__migrating_tasks.keys():\n del self.__migrating_tasks[server_id]\n self.logger.debug('Deleted migrate task ID %s' % server_id)\n self.__lock.release()", "def send_reset_email(user):\n msg = emails.reset_email(user)\n try:\n mail.send(msg)\n except Exception as e:\n traceback.print_exc()", "def tearDown(self):\n self.cleanup_tenants()", "def mail(request):\n email_admin.delay('testinggg')\n return JsonResponse({\"details\":\"working\"})", "async def cleanup(self):\n if self.preparing_task:\n self.preparing_task.cancel()", "def trial_clean_up(self):\n pass", "def clear_tasks(self):\n self.last_task = None\n self.tasks = []", "async def _clean_up_cache_invalidation_wrapper(self) -> None:\n delete_up_to: int = (\n self.hs.get_clock().time_msec() - RETENTION_PERIOD_OF_CACHE_INVALIDATIONS_MS\n )\n\n in_backlog = await self._clean_up_batch_of_old_cache_invalidations(delete_up_to)\n\n # Vary how long we wait before calling again depending on whether we\n # are still sifting through backlog or we have caught up.\n if in_backlog:\n next_interval = CATCH_UP_CLEANUP_INTERVAL_MS\n else:\n next_interval = REGULAR_CLEANUP_INTERVAL_MS\n\n self.hs.get_clock().call_later(\n next_interval / 1000, self._clean_up_cache_invalidation_wrapper\n )", "def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)\n self.stopRouter()", "def terminate(self):\n self.mailQueue.put(None)\n self.mailQueue.join()\n self.join()\n self.logger.info(\"Mailer terminated\")", "def async_reset(self) -> None:", "def send_reminder(self):\n pass", "def setup_periodic_tasks(sender, **kwargs):\n sender.add_periodic_task(60, clean_empty_entity_attrs, name='Clean Entity Attributes')", "def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending EMail to the configured email list\")", "def on_reset_after_execution(self):\n pass", "def purge_mailbox(self):\n self._mailbox.clear()", "def confirm(self, task, log):\n self._tasks_in_process.remove(task)\n log.confirm(self._name, task.get_name(), task.get_payment())" ]
[ "0.60660076", "0.5897273", "0.5827525", "0.58136255", "0.57590765", "0.57500565", "0.57095784", "0.56320375", "0.5629329", "0.56141365", "0.560764", "0.55990505", "0.55975515", "0.5582091", "0.5547078", "0.5544799", "0.55407256", "0.5533396", "0.5488413", "0.54429895", "0.540696", "0.5406713", "0.5402604", "0.5400224", "0.5363803", "0.53566074", "0.53507614", "0.5346462", "0.5343897", "0.5335408" ]
0.7174306
0
Handles a success in payment. If the order is paidoff, sends success, else return page to pay remaining.
def _onSuccess(self, controller): if controller.order.paid_in_full: controller.cart.empty() for item in controller.order.orderitem_set.all(): if item.product.is_subscription: item.completed = True item.save() try: curr_status = controller.order.orderstatus_set.latest() except OrderStatus.DoesNotExist: curr_status = None if (curr_status is None) or (curr_status.notes and curr_status.status == "New"): controller.order.add_status(status='New', notes = "Order successfully submitted") else: # otherwise just update and save if not curr_status.notes: curr_status.notes = _("Order successfully submitted") curr_status.save() #Redirect to the success page url = controller.lookup_url('satchmo_checkout-success') return HttpResponseRedirect(url) else: log.debug('Order #%i not paid in full, sending to pay rest of balance', controller.order.id) #url = controller.order.get_balance_remaining_url() url = reverse('satchmo_balance_remaining') return HttpResponseRedirect(url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ui_redirect_success(self, order: Order = None) -> HttpResponse:\n ui_return_url = self.extract_ui_return_url()\n if ui_return_url:\n return self._redirect_to_ui(\n ui_return_url, \"success\", order, path=\"/payment-result\"\n )\n else:\n return HttpResponse(\n content=\"Payment successful, but failed redirecting back to UI\"\n )", "def ui_redirect_success(self, order: Order = None) -> HttpResponse:\n ui_return_url = self.extract_ui_return_url()\n if ui_return_url:\n return self._redirect_to_ui(ui_return_url, \"success\", order)\n else:\n return HttpResponse(\n content=\"Payment successful, but failed redirecting back to UI\"\n )", "def order_success(request, job_id, token):\n if request.user.is_active:\n job_obj = Job.objects.get(job_id= job_id)\n if job_obj.employeer == request.user:\n if verifiers.payment_token_is_valid(token, job_obj):\n if job_obj.accepted == False:\n job_obj.status = Status.objects.create(status_name=\"PN\", comment=\"Awaiting payment confirmation from Stripe, this might take a few minutes.\")\n job_obj.accepted = True\n job_obj.save()\n \n messages.add_message(request, messages.INFO, 'Your order has been created.')\n return redirect('/') \n else:\n #someone tries to manually enter the URL with wrong payment_token\n messages.add_message(request, messages.INFO, 'Something went wrong, try again later.') \n return redirect(\"/\") \n else:\n #the person making the request is not the same as the person who created the job\n messages.add_message(request, messages.INFO, 'Something went wrong, try again later.') \n return redirect(\"/\") \n else:\n return redirect(\"/login\")", "def order_success(self, request):\n order = self.order_from_request(request)\n\n if not order:\n return self.order_new(request)\n\n if not order.balance_remaining:\n self.set_order_on_request(request, order=None)\n\n\n order_data = OrderData.objects.get(order=order)\n o_data = simplejson.loads(order_data.data)\n\n paymentData = {}\n paymentData['delivery_address2'] = o_data['delivery_address2']\n paymentData['billing_address2'] = o_data['billing_address2']\n paymentData['delivery_date'] = o_data['delivery_date']\n paymentData['delivery_state'] = o_data['delivery_state']\n paymentData['billing_state'] = o_data['billing_state']\n paymentData['salutation'] = o_data['salutation']\n paymentData['contact_number'] = o_data['billing_contact_number']\n\n #try:\n oPayment = OrderPayment.objects.get(order=order)\n oPayment.payment_method = o_data['order_payment_method']\n oPayment.data = simplejson.dumps(paymentData)\n oPayment.save()\n #except:\n # pass\n\n \"\"\"\n order update note\n \"\"\"\n notes = o_data['order_notes']\n order.notes = notes\n order.save()\n\n # st_save_helper(request, order)\n\n \"\"\"\n sbid = None\n\n if 'customer_styleboard' in request.session:\n sbid = request.session.get('customer_styleboard').id\n\n if 'personalize_id' in request.session:\n print \"There's a personalize_id\"\n \"\"\"\n\n current_user = User.objects.get(id=int(request.user.id))\n\n if 'ipn_emailed' in o_data and o_data['ipn_emailed']:\n\n pass\n \n else:\n\n emailed = send_email_order(order, current_user, notes, paymentData['contact_number'], self)\n\n logr.info('emailed order confirmation to : %s from order success' % current_user.email)\n\n\n order_data.delete() # not needed after saving to order payment\\\n \n clear_styleboard_session(request)\n\n try:\n del request.session['customer_styleboard']\n del request.session['personalize_id']\n except:\n pass\n\n return self.render(request, 'plata/shop_order_success.html',\n self.get_context(request, {\n 'order': order,\n 'progress': 'success',\n }))", "def handle_success_request(self) -> HttpResponse: # noqa: C901\n request = self.request\n logger.debug(\n \"Handling Bambora user return request, params: {}.\".format(request.GET)\n )\n\n order_number, _timestamp = request.GET.get(\"ORDER_NUMBER\", \"-\").split(\"-\")\n try:\n order = Order.objects.get(order_number=order_number)\n except Order.DoesNotExist:\n logger.warning(\"Order does not exist.\")\n return self.ui_redirect_failure()\n\n order.invalidate_tokens()\n\n if not self.check_new_payment_authcode(request):\n return self.ui_redirect_failure()\n\n return_code = request.GET[\"RETURN_CODE\"]\n if return_code == \"0\":\n logger.debug(\"Payment completed successfully.\")\n try:\n order.set_status(\n OrderStatus.PAID,\n \"Code 0 (payment succeeded) in Bambora Payform success request.\",\n )\n return self.ui_redirect_success()\n except OrderStatusTransitionError as oste:\n logger.warning(oste)\n order.create_log_entry(\n \"Code 0 (payment succeeded) in Bambora Payform success request.\"\n )\n return self.ui_redirect_failure()\n elif return_code == \"1\":\n logger.debug(\"Payment failed.\")\n return self.ui_redirect_failure()\n elif return_code == \"4\":\n logger.debug(\"Transaction status could not be updated.\")\n order.create_log_entry(\n \"Code 4: Transaction status could not be updated. Use the merchant UI to resolve.\"\n )\n return self.ui_redirect_failure()\n elif return_code == \"10\":\n logger.debug(\"Maintenance break.\")\n order.create_log_entry(\"Code 10: Bambora Payform maintenance break\")\n return self.ui_redirect_failure()\n else:\n logger.warning('Incorrect RETURN_CODE \"{}\".'.format(return_code))\n order.create_log_entry(\n 'Bambora Payform incorrect return code \"{}\".'.format(return_code)\n )\n return self.ui_redirect_failure()", "def payment_success(request):\r\n\tsecret_key = settings.SELLER_KEY\r\n\tpid = request.GET['pid']\r\n\tref = request.GET['ref']\r\n\tresult = request.GET['result']\r\n\t# Retrieve the cheksum value and validate it\r\n\tchecksumstr = \"pid={}&ref={}&result={}&token={}\".format(pid, ref, result, secret_key)\r\n\tm = md5(checksumstr.encode(\"ascii\"))\r\n\tchecksum = m.hexdigest()\r\n\tmalformed = False\r\n\tprint(\"calculated: \" + checksum)\r\n\tprint(\"received: \" + request.GET['checksum'] )\r\n\tif (checksum == request.GET['checksum'] ):\r\n\t\ttransaction = Transaction.objects.get(pk=pid)\r\n\t\ttransaction.state = Transaction.CONFIRMED\r\n\t\ttransaction.reference = ref\r\n\t\tgame = Game.objects.get(id = transaction.game.id)\r\n\t\ttransaction.save()\r\n\t\tinc_purchase = game.purchase_number + 1\r\n\t\tgame.purchase_number = inc_purchase\r\n\t\tgame.save()\r\n\t\tprint(\"about to call success\")\r\n\t\treturn render(request, 'success.html', {'game': game, 'MEDIA_URL': settings.MEDIA_URL, 'malformed': malformed})\r\n\telse:\r\n\t\ttransaction = Transaction.objects.get(pk=pid)\r\n\t\ttransaction.delete()\r\n\t\tmalformed = True\r\n\t\treturn render(request, 'success.html', {\"malformed\": malformed})", "def notify_payment_success(self, **kwargs):\n return self.notify(\"notify_payment_success\", **kwargs)", "def webhook_payment_successful(self, event):\n\n intent = event.data.object\n p_id = intent.id\n pack = intent.metadata.pack\n save_detail = intent.metadata.save_detail\n\n billing_details = intent.charges.data[0].billing_details\n shipping_details = intent.shipping\n grand_cost = round(intent.charges.data[0].amount / 100, 2)\n\n for field, value in shipping_details.address.items():\n if value == \"\":\n shipping_details.address[field] = None\n\n profile = None\n username = intent.metadata.username\n if username != 'AnonymousUser':\n profile = UserProfile.objects.get(user__username=username)\n if save_detail:\n profile.default_phone_number = shipping_details.phone,\n profile.default_home_Address = shipping_details.address.line1,\n profile.default_home_Address_continued = \\\n shipping_details.address.line2,\n profile.default_postcode = \\\n shipping_details.address.postal_code,\n profile.default_county = \\\n shipping_details.address.city,\n profile.default_country = \\\n shipping_details.address.country,\n profile.save()\n\n order_present = False\n seek = 1\n while seek <= 6:\n try:\n order = Order.objects.get(\n Name__iexact=shipping_details.name,\n user_account=profile,\n email__iexact=billing_details.email,\n phone_number__iexact=shipping_details.phone,\n home_Address__iexact=shipping_details.address.line1,\n home_Address_continued__iexact =(\n shipping_details.address.line2\n ),\n postcode__iexact=shipping_details.address.postal_code,\n county__iexact=shipping_details.address.city,\n country__iexact=shipping_details.address.country,\n grand_cost=grand_cost,\n original_pack=pack,\n stripe_p_id=p_id,\n )\n order_present = True\n break\n except Order.DoesNotExist:\n seek += 1\n time.sleep(1)\n if order_present:\n self._send_email_details(order)\n return HttpResponse(\n content=f'Webhook obtained: {event[\"type\"]} | Good news. \\\n This is now in the database',\n status=200)\n else:\n order = None\n try:\n order = Order.objects.create(\n Name=shipping_details.name,\n email=billing_details.email,\n phone_number=shipping_details.phone,\n home_Address=shipping_details.address.line1,\n home_Address_continued=shipping_details.address.line2,\n postcode=shipping_details.address.postal_code,\n county=shipping_details.address.city,\n country=shipping_details.address.country,\n original_pack=pack,\n stripe_p_id=p_id,\n )\n for item_id, item_data in json.load(pack).items():\n product = Product.objects.get(id=item_id)\n if isinstance(item_data, int):\n order_line_item = OrderLineItem(\n order=order,\n product=product,\n quantity=item_data,\n )\n order_line_item.save()\n else:\n for size, quantity in item_data['items_by_size'].items():\n order_line_item = OrderLineItem(\n order=order,\n product=product,\n quantity=quantity,\n product_size=size,\n )\n order_line_item.save()\n except Exception as e:\n if order:\n order.delete()\n return HttpResponse(\n content=f'Webhook obtained: {event[\"type\"]} | \\\n There is an error: {e}',\n status=500)\n self._send_email_details(order)\n return HttpResponse(\n content=f'Webhook obtained: {event[\"type\"]} | \\\n Goodnews: webhook order created',\n status=200)", "def payment_return(request, campaign_id, inv_id, success_code, payment_mode):\r\n campaign = get_object_or_404(Campaign, pk=campaign_id)\r\n if int(success_code) == 1:\r\n request.user.message_set.create(message=_('Thank you for your contribution.'))\r\n else:\r\n # User cancelled payment.\r\n request.user.message_set.create(message=_('Your payment has been cancelled.'))\r\n try:\r\n # Find ``PendingContribution`` and delete it.\r\n pc = PendingContribution.objects.get(pk=inv_id, campaign=campaign_id, contributor=request.user, payment_mode=payment_mode)\r\n pc.delete()\r\n _log.debug('Payment by %s was cancelled for %s', request.user.username, campaign)\r\n except PendingContribution.DoesNotExist:\r\n pass\r\n return HttpResponseRedirect(reverse('view_campaign', kwargs={'campaign_id':campaign.pk}))", "def __capture_payment(self, response):\n order_cls = get_order_class()\n self.order = order_cls.get_by_payment_details(\n {'token': response['TOKEN']}\n )\n if self.order is None or self.order.state is not OrderStates.created:\n return redirect(url_for('payment.error_payment',\n payment_method=self.method_name))\n\n request_params = {\n 'METHOD': DO_PAYMENT,\n 'TOKEN': response['TOKEN'],\n 'PAYERID': response['PAYERID'],\n 'PAYMENTREQUEST_0_AMT': self.order.total_price,\n 'PAYMENTREQUEST_0_PAYMENTACTION': ACTION,\n 'PAYMENTREQUEST_0_CURRENCYCODE': CURRENCY,\n }\n\n response = self.__do_request(request_params)\n if response['ACK'] == RESPONSE_OK:\n self.order.set_payment_details(token=unicode(response))\n self.order.mark_paid()\n\n return redirect(url_for('payment.success_payment',\n payment_method=self.method_name))\n\n return redirect(url_for('payment.error_payment',\n payment_method=self.method_name,\n order_id=self.order.id))", "def post(self, request, *args, **kwargs):\n try:\n form = self.get_form()\n except RedirectNeeded as exc:\n messages.add_message(request, messages.SUCCESS, \"Payment redirects to %s\" % exc.args[0])\n return HttpResponseRedirect(exc.args[0])\n #except Exception as exc:\n # return HttpResponseBadRequest(exc, content_type=\"text/plain\")\n\n if form.validate():\n messages.add_message(request, messages.SUCCESS, \"Payment succeeded\")\n return self.form_valid(form)\n else:\n messages.add_message(request, messages.ERROR, \"Payment failed\")\n return self.form_invalid(form)", "def postpay_callback(request):\r\n params = request.POST.dict()\r\n result = process_postpay_callback(params)\r\n if result['success']:\r\n return HttpResponseRedirect(reverse('shoppingcart.views.show_receipt', args=[result['order'].id]))\r\n else:\r\n return render_to_response('shoppingcart/error.html', {'order': result['order'],\r\n 'error_html': result['error_html']})", "def submit_order(request, orderid):\n if request.user.is_staff:\n order = WorkingOrder.objects.get(pk=orderid)\n else:\n order = request.user.workingorder_set.get(id=orderid) \n\n if order.status != BaseOrder.Const.DEALER_EDIT:\n return HttpResponseServerError()\n \n # always submit orders in the context of proper account\n account = order.owner.get_profile().account\n \n if request.method == 'GET': \n form = SubmitForm(instance=order)\n else:\n form = SubmitForm(request.POST, instance=order)\n if form.is_valid():\n order = form.save(commit=False)\n cost = order.cost or decimal.Decimal() \n if cost > account.credit_balance:\n ## users account doesn't have enough juice.. send then to the ecom engine \n ## to pay, then get them back here ...\n order = form.save()\n products = [form.cleaned_data['design_product']]\n option = form.cleaned_data.get('processing_option', None)\n if option:\n products.append(option) \n new_cart(request, products)\n request.method = 'GET' \n return paypal_checkout(request, success_url=reverse('submit-order', args=[orderid]))\n else: \n register_design_order(order.owner, order.owner.get_profile().account, order, cost)\n order = form.save(commit=False)\n order.status = BaseOrder.Const.SUBMITTED\n order.submitted = datetime.now()\n order.save()\n # return HttpResponseRedirect('completed_order_summary', args=[orderid]) # TODO\n return HttpResponseRedirect(reverse('submit-order-completed', args=[order.id])) \n return dict(order=order, form=form)", "def paynow_return(request, payment_id):\r\n # Get payment object\r\n payment = get_object_or_404(PaynowPayment, reference=payment_id)\r\n # Init Paynow oject. The urls can now be blank\r\n paynow = Paynow(settings.PAYNOW_INTEGRATION_ID, settings.PAYNOW_INTEGRATION_KEY, '', '')\r\n\r\n # Check the status of the payment with the paynow server\r\n payment_result = paynow.check_transaction_status(payment.poll_url)\r\n\r\n save_changes = False\r\n\r\n # check if status has changed\r\n if payment.status != payment_result.status:\r\n payment.status = payment_result.status\r\n save_changes = True\r\n\r\n # Check if paynow reference has changed\r\n if payment.paynow_reference != payment_result.paynow_reference:\r\n payment.paynow_reference = payment_result.paynow_reference\r\n save_changes = True\r\n\r\n # Check if payment is now paid\r\n print(payment_result.paid)\r\n if payment_result.paid:\r\n if not payment.paid:\r\n payment.paid = True\r\n payment.confirmed_at = timezone.now()\r\n\r\n if save_changes:\r\n payment.save()\r\n\r\n msg = \"Payment for Transaction \" + payment.reference + ' confirmed'\r\n msg += \" Paynow Reference: \" + payment.paynow_reference\r\n messages.success(request, msg)\r\n msg = \"Paynow Payment status => \" + payment.status\r\n messages.success(request, msg)\r\n\r\n\r\n\r\n\r\n return redirect(reverse('index'))", "def checkout_success(request, order_number):\n\n save_info = request.session.get('save_info')\n order = get_object_or_404(Order, order_number=order_number)\n\n if request.user.is_authenticated:\n profile = UserProfile.objects.get(user=request.user)\n # Attach the user's profile to the order\n order.user_profile = profile\n order.save()\n\n # Save the user's info\n if save_info:\n profile_data = {\n 'default_phone_number': order.phone_number,\n 'default_town_or_city': order.town_or_city,\n 'default_street_address1': order.street_address1,\n 'default_street_address2': order.street_address2,\n # 'default_country': order.country,\n }\n user_profile_form = UserProfileForm(profile_data,\n instance=profile)\n if user_profile_form.is_valid():\n user_profile_form.save()\n\n messages.success(request, f'Order successfully processed! \\\n Your order number is {order_number}. A confirmation \\\n email will be sent to {order.email}.')\n\n # if 'bag' in request.session:\n # del request.session['bag']\n\n \"\"\"Send the user a confirmation email\"\"\"\n cust_email = order.email\n subject = render_to_string(\n 'checkout/confirmation_emails/confirmation_email_subject.txt',\n {'order': order})\n body = render_to_string(\n 'checkout/confirmation_emails/confirmation_email_body.txt',\n {'order': order, 'contact_email': settings.DEFAULT_FROM_EMAIL})\n\n send_mail(\n subject,\n body,\n settings.DEFAULT_FROM_EMAIL,\n [cust_email]\n )\n\n template = 'checkout/checkout_success.html'\n context = {\n 'order': order,\n }\n\n return render(request, template, context)", "def checkout_success(request, order_number):\n save_info = request.session.get('save_info')\n order = get_object_or_404(Order, order_number=order_number)\n short_order = order_number[0:11]\n if request.user.is_authenticated:\n profile = UserProfile.objects.get(user=request.user)\n # Attach user profile to order\n order.user_profile = profile\n order.save()\n\n # Save the user info\n if save_info:\n profile_data = {\n 'profile_phone_number': order.phone_number,\n 'profile_street_address1': order.street_address1,\n 'profile_street_address2': order.street_address2,\n 'profile_town_or_city': order.town_or_city,\n 'profile_county': order.county_or_state,\n 'profile_postcode': order.postcode,\n 'profile_country': order.country,\n }\n user_profile_form = UserProfileForm(profile_data, instance=profile)\n if user_profile_form.is_valid():\n user_profile_form.save()\n\n messages.success(request, f'Order succesfully processed! \\\n Your transaction number is {short_order}. A confirmation \\\n email will be sent to {order.email}.')\n\n if 'bag' in request.session:\n del request.session['bag']\n\n template = 'checkout/checkout_success.html'\n context = {\n 'order': order,\n 'short_order': short_order,\n }\n\n return render(request, template, context)", "def onCheckout(self, controller):\n \n if askokcancel(\"Proceed\", \"Pay the order?\"):\n c = controller.customer\n package = {'customer_id':c.id, 'order_price':c.my_order.GetTotalPrice}\n msg = controller.transmit(package)\n \n if msg['order_received']:\n c.CheckOut(c.my_order.GetTotalPrice)\n c.Clear()\n controller.show_frame(PageThree)", "def payReturn(request, *args, **kwargs):\n initParam = {}\n pay_key = request.session.get('pay_key', None)\n gateway = request.session.get('gateway', None)\n if pay_key and gateway:\n del request.session['pay_key']\n del request.session['gateway']\n #Check and get Transaction information\n checkMethod = kwargs.pop('checkMethod', None)\n if checkMethod:\n initParam['pay_key'] = pay_key\n initParam['gateway'] = gateway\n transaction = checkMethod(request, initParam=initParam)\n if transaction:\n p = driver.PayPal()\n #Check whether use has paid successfully.\n result = p.check_ap_payment_status(transaction.pay_key)\n if result['status'][0] == 'COMPLETED':\n #Do something after user payed successfully.\n executeMethod = kwargs.pop('executeMethod', None)\n if executeMethod:\n initParam['transaction_id'] = transaction.id\n initParam['buyer_account'] = result['senderEmail'][0]\n if executeMethod(initParam=initParam):\n success_page = request.session.get('success_page', None)\n back_page = request.session.get('back_page', None)\n if back_page:\n del request.session['back_page']\n if success_page:\n del request.session['success_page']\n initParam['success_page'] = success_page\n initParam['success_page_msg'] = request.session['success_page_msg']\n #For the value in paypal_success.html\n initParam['app'] = transaction.app\n initParam['price'] = transaction.price\n initParam['type'] = 'Transaction'\n initParam['msg'] = _('You have successfully paid the money. We have already sent an email to the app seller. In the meanwhile you can send private message to seller as well.')\n log.info(_('User %(param1)s has paid with transaction id %(param2)s.')\n % {'param1': request.user.username, 'param2': transaction.id})\n return render_to_response(\"payment/paypal_success.html\", initParam, context_instance=RequestContext(request))\n else:\n log.error(_('User %(param1)s has paid with transaction id %(param2)s, but execute method %(param3)s failed.')\n % {'param1': request.user.username, 'param2': transaction.id, 'param3': executeMethod.__name__})\n else:\n log.error(_('User %(param1)s has paid with transaction id %(param2)s, but ExecuteMethod does not exist.')\n % {'param1': request.user.username, 'param2': transaction.id})\n else:\n log.error(_('User %(param1)s has no paid with transaction id %(param2)s.')\n % {'param1': request.user.username, 'param2': transaction.id})\n else:\n log.error(_('PayKey %(param1)s, Gateway: %(param2)s, User: %(param3)s, Execute method %(param4)s failed.')\n % {'param1': pay_key, 'param2': gateway, 'param3': request.user.username, 'param4': checkMethod.__name__})\n else:\n log.error(_('PayKey %(param1)s, Gateway: %(param2)s, CheckMethod does not exist.')\n % {'param1': pay_key, 'param2': gateway})\n else:\n log.error(_('Pay. PayKey or Gateway no exists.'))\n\n success_page = request.session.get('success_page', None)\n back_page = request.session.get('back_page', None)\n if success_page:\n del request.session['success_page']\n if back_page:\n del request.session['back_page']\n error_msg = driver.GENERIC_PAYPAL_ERROR\n page_msg = request.session['back_page_msg']\n return render_to_response('payment/paypal_cancel.html',\n {'error_msg': error_msg, 'back_page': back_page, 'back_page_msg': page_msg}, context_instance=RequestContext(request))\n else:\n error_msg = _('%(param1)s Please transaction again.') % {'param1': driver.GENERIC_PAYPAL_ERROR}\n return render_to_response('payment/paypal_error.html',\n {\"error_msg\": error_msg}, context_instance=RequestContext(request))", "def post(self, request):\r\n if self._is_signature_valid(request.POST):\r\n return self._payment_page_response(request.POST, '/shoppingcart/postpay_callback/')\r\n\r\n else:\r\n return render_to_response('shoppingcart/test/fake_payment_error.html')", "def handle_payment_intent_succeeded(self, event):\n intent = event.data.object\n pid = intent.id\n bag = intent.metadata.bag\n\n billing_details = intent.charges.data[0].billing_details\n grand_total = round(intent.charges.data[0].amount / 100, 2)\n\n order_exists = False\n attempt = 1\n while attempt <= 5:\n try:\n order = Order.objects.get(\n full_name__iexact=billing_details.name,\n email__iexact=billing_details.email,\n phone_number__iexact=billing_details.phone,\n street_address1__iexact=(\n billing_details.address.line1),\n street_address2__iexact=(\n billing_details.address.line2),\n town_or_city__iexact=billing_details.address.city,\n county__iexact=billing_details.address.state,\n country__iexact=billing_details.address.country,\n grand_total=grand_total,\n original_bag=bag,\n stripe_pid=pid,\n )\n order_exists = True\n break\n except Order.DoesNotExist:\n attempt += 1\n time.sleep(1)\n\n if order_exists:\n return HttpResponse(\n content=f'Webhook received: ({event[\"type\"]}'\n '| SUCCESS: Verified order already in database',\n status=200)\n else:\n order = None\n try:\n order = Order.objects.create(\n full_name=billing_details.name,\n email=billing_details.email,\n phone_number=billing_details.phone,\n street_address1=billing_details.address.line1,\n street_address2=billing_details.address.line2,\n town_or_city=billing_details.address.city,\n county=billing_details.state,\n country=billing_details.country,\n original_bag=bag,\n stripe_pid=pid,\n )\n for workshop_id, quantity in json.loads(bag).items():\n workshop = Workshop.objects.get(id=workshop_id)\n if isinstance(quantity, int):\n order_line_item = OrderLineItem(\n order=order,\n workshop=workshop,\n quantity=quantity,\n )\n order_line_item.save()\n except Exception as e:\n if order:\n order.delete()\n return HttpResponse(\n content=f'Webhook received: {event[\"type\"]} | ERROR: {e}',\n status=500)\n return HttpResponse(\n content=f'Webhook received: {event[\"type\"]}'\n '| SUCCESS: Created order in webhook',\n status=200)", "def test_process_postpay_accepted(self):\r\n student1 = UserFactory()\r\n student1.save()\r\n\r\n order1 = Order.get_cart_for_user(student1)\r\n params = {\r\n 'card_accountNumber': '1234',\r\n 'card_cardType': '001',\r\n 'billTo_firstName': student1.first_name,\r\n 'orderNumber': str(order1.id),\r\n 'orderCurrency': 'usd',\r\n 'decision': 'ACCEPT',\r\n 'ccAuthReply_amount': '0.00'\r\n }\r\n result = process_postpay_callback(params)\r\n self.assertTrue(result['success'])\r\n self.assertEqual(result['order'], order1)\r\n order1 = Order.objects.get(id=order1.id) # reload from DB to capture side-effect of process_postpay_callback\r\n self.assertEqual(order1.status, 'purchased')\r\n self.assertFalse(result['error_html'])", "def process_postpay_callback(params):\r\n try:\r\n verify_signatures(params)\r\n result = payment_accepted(params)\r\n if result['accepted']:\r\n # SUCCESS CASE first, rest are some sort of oddity\r\n record_purchase(params, result['order'])\r\n return {'success': True,\r\n 'order': result['order'],\r\n 'error_html': ''}\r\n else:\r\n return {'success': False,\r\n 'order': result['order'],\r\n 'error_html': get_processor_decline_html(params)}\r\n except CCProcessorException as error:\r\n return {'success': False,\r\n 'order': None, # due to exception we may not have the order\r\n 'error_html': get_processor_exception_html(error)}", "def handle_initiate_payment(\n self, order: Order, response, order_token: OrderToken = None\n ) -> str:\n result = response[\"result\"]\n if order.status == OrderStatus.EXPIRED:\n raise ExpiredOrderError(_(\"Order has already expired\"))\n if result == 0:\n # Create the URL where user is redirected to complete the payment\n # Append \"?minified\" to get a stripped version of the payment page\n token = response[\"token\"]\n\n if order_token:\n order_token.token = token\n order_token.save()\n\n return self.url_payment_token.format(token=token)\n elif result == 1:\n raise PayloadValidationError(\n f\"{_('Payment payload data validation failed: ')} {' '.join(response['errors'])}\"\n )\n elif result == 2:\n raise DuplicateOrderError(_(\"Order with the same ID already exists\"))\n elif result == 10:\n raise ServiceUnavailableError(_(\"Payment service is down for maintenance\"))\n else:\n raise UnknownReturnCodeError(\n f\"{_('Return code was not recognized: ')} {result}\"\n )", "def payment_completed(order_id):\n order = Order.objects.get(id=order_id)\n\n #create invoice email\n subject = f'My Shop - EE Invoice no. {order.id}'\n message = 'Please, find attached the invoice for your recent purchase.'\n email = EmailMessage(subject, message, '[email protected]', [order.email])\n\n #generate PDF\n html = render_to_string('admin/orders/order/pdf.html', {'order': order})\n out =BytesIO()\n stylesheets = [weasyprint.CSS(settings.STATIC_ROOT + 'pdf.css')]\n weasyprint.HTML(string=html).write_pdf(out, stylesheets=stylesheets)\n\n #attach PDf file\n email.attach(f'order_{order.id}.pdf', out.getvalue(), 'application/pdf')\n\n #send email\n email.send()", "def process(request, order):\n # Transaction results\n APPROVED = '1'\n DECLINED = '2'\n ERROR = '3'\n HELD_FOR_REVIEW = '4'\n print \"I am processing the request\"\n\n postdata = request.POST.copy()\n amount = cart.cart_subtotal(request)\n\n print amount\n\n charge = stripe.Charge.create(\n amount=int(amount*100),\n currency=\"ngn\", # I can Change to naira if needed\n card=postdata.get('stripeToken', ''),\n description=\"Example charge\"\n )\n #\n #charge.capture()\n\n\n if charge['card']['cvc_check']:\n transaction_id = charge.id[3:22]\n order = create_order(request, order, transaction_id)\n results = {'order_number': order.id, 'message': u''}\n elif charge.balance_transaction:\n results = {'order_number': 0, 'message': charge.failure_message, 'code': charge.failure_code,\n 'text': charge.description}\n else:\n results = {'order_number': 0, 'message':charge.failure_message, 'errors': charge.errors}\n return results", "def awaiting_payment(self):", "def payment_completed(order_id):\n\n order = Order.objects.get(id=order_id)\n subject = f'My Shop - EE Invoice no. {order.id}'\n message = 'Please, find attached the invoice for your recent purchase.'\n email = EmailMessage(subject, message, '[email protected]', [order.email])\n\n pdf = render_to_pdf('admin/orders/order/pdf.html', {'order': order})\n email.attach(f'order_{order.id}.pdf', pdf.getvalue(), 'application/pdf')\n email.send()", "def payment_completed(order_id):\n order = Order.objects.get(id=order_id)\n # create invoice e-mail\n subject = f'My Shop - EE Invoice no. {order.id}'\n message = 'Please, find attached the invoice for your recent purchase.'\n email = EmailMessage(subject,\n message,\n '[email protected]',\n [order.user.email])\n # generate PDF\n html = render_to_string('orders/order/pdf.html', {'order': order})\n out = BytesIO()\n stylesheets=[weasyprint.CSS(settings.STATIC_ROOT + 'css/pdf.css')]\n weasyprint.HTML(string=html).write_pdf(out,\n stylesheets=stylesheets)\n # attach PDF file\n email.attach(f'order_{order.id}.pdf',\n out.getvalue(),\n 'application/pdf')\n # send e-mail\n email.send()", "def process_payment():\n\n url = 'https://api.worldpay.com/v1/orders'\n headers = {'Authorization': environ.get('WORLDPAY_API_KEY'),\n 'Content-type': 'application/json'}\n body = {\n \"paymentMethod\": {\n \"type\": \"Card\",\n \"name\": session['caller_name'],\n \"expiryMonth\": session['expiry'][:2],\n \"expiryYear\": f\"20{session['expiry'][2:]}\",\n \"cardNumber\": session['card_number'],\n \"cvc\": session['cvv'],\n \"issueNumber\": \"1\"\n },\n \"orderType\": \"ECOM\",\n \"orderDescription\": session['call_sid'],\n \"amount\": session['payment_amount'],\n \"currencyCode\": \"GBP\"}\n r = requests.post(url, headers=headers, data=json.dumps(body))\n requests.post(environ.get('END_OF_INTERACTION_URL'), r.text)\n response = VoiceResponse()\n response.say(\"Payment processed, goodbye\")\n # If your flow started in Twilio Studio, redirect back to it to complete the call\n # response.redirect(\n # 'https://webhooks.twilio.com/v1/Accounts/ACfd0573f9f976b99746c693XXXXXXXXXX/Flows/FWbfdeda0a21644267231d3dXXXXXXXXXX?FlowEvent=return')\n return str(response)", "def post(self, request, *args, **kwargs):\n form = self.get_form()\n\n if form.validate():\n messages.add_message(request, messages.SUCCESS, \"Payment update successfull\")\n return self.form_valid(form)\n else:\n messages.add_message(request, messages.ERROR, \"Payment update failed\")\n return self.form_invalid(form)" ]
[ "0.7352169", "0.72179747", "0.7183236", "0.7149302", "0.70583147", "0.700066", "0.69343185", "0.68865967", "0.6824747", "0.68121576", "0.6804239", "0.6716359", "0.6692291", "0.661997", "0.6476802", "0.6473549", "0.6418588", "0.6369197", "0.63250667", "0.6308499", "0.6287491", "0.6212806", "0.61367404", "0.6112529", "0.61093175", "0.61078024", "0.61049247", "0.60185", "0.5982502", "0.5972522" ]
0.74831283
0
| Ground Truth | Forehand Backhand Serve Forehand num_FF num_BF num_SF Backhand num_FB num_BB num_SB Serve num_FS num_BS num_SS No_action num_FN num_BN num_SN
def show_eval_class_level(num_BB, num_BF, num_BS, num_BN, num_FB, num_FF, num_FS, num_FN, num_SB, num_SF, num_SS, num_SN): print("************************************************") print(" | Ground Truth | ") print(" Forehand Backhand Serve ") print("Forehand " + str(num_FF) + " " + str(num_BF) + " " + str(num_SF)) print("Backhand " + str(num_FB) + " " + str(num_BB) + " " + str(num_SB)) print("Serve " + str(num_FS) + " " + str(num_BS) + " " + str(num_SS)) print("Noaction " + str(num_FN) + " " + str(num_BN) + " " + str(num_SN)) print("************************************************")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handDecision(handIn):", "def part1b_2():\n xs = exampleInput\n z = 5.881\n forward = [\n Counter({'-FEAT-': 0.622, '-SIZE-': 0.377}), \n Counter({'-SIZE-': 0.761, '-FEAT-': 0.238}), \n Counter({'-SIZE-': 0.741, '-FEAT-': 0.258})]\n \n z_, forward_ = submission.computeForward(simpleCRF, xs)\n for vec, vec_ in zip( forward, forward_):\n grader.requireIsTrue( Counters.approximateEquals( vec, vec_ ) )\n grader.requireIsEqual( z, z_, 1e-2)", "def test():\n sf = \"6C 7C 8C 9C TC\".split() # Straight Flush\n sf1 = \"6C 7C 8C 9C TC\".split() # Straight Flush\n sf2 = \"6D 7D 8D 9D TD\".split() # Straight Flush\n fk = \"9D 9H 9S 9C 7D\".split() # Four of a Kind\n fk3 = \"TC TS TH 2C TD\".split() # Four of a Kind\n fh = \"TD TC TH 7C 7D\".split() # Full House\n fl = \"AH KH JH 6H TH\".split() # Flush\n st = \"AH KC QD JD TS\".split() # Straight\n tk = \"2H 2C 2D AC TD\".split() # Three of kind\n tp = \"TD 9H TH 7C 9S\".split() # Two Pair\n op = \"TD TC AD KD QD\".split() # One Pair\n hq = \"2D 3D 4C 5H 7H\".split() # High card\n al = \"AC 2D 4H 3D 5S\".split() # Ace-Low Straight\n tp1 = \"7H 7D 9C 3C 9S\".split() #Two Pair\n fkranks = card_ranks(fk)\n tpranks = card_ranks(tp)\n op1 = \"KH 7C 5S KS 2S\".split() # One pair\n tp2 = \"TH 3S 2H 3D TC\".split() # Two pair\n tk1 = \"TH JD JH 8C JC\".split() # Three of kind\n hq1 = \"TH 9D 5C 3H 2C\".split() # High card\n f3 = \"2C 4C 6C 7C TC\".split() # Flush\n s3 = \"3C 4D 5H 6D 7H\".split() # Straight\n assert poker([fk3, f3, s3]) == fk3 #gilje start\n assert poker([sf, 20*fk]) == sf\n assert poker([fk3, 5*f3]) == fk3\n assert card_ranks(fk3) == [10, 10, 10, 10, 2]\n assert card_ranks(f3) == [10, 7, 6, 4, 2]\n assert hand_rank(fk3) == (7, 10, 2)\n assert hand_rank(f3) == (5, [10, 7, 6, 4, 2])\n assert flush(f3) == True\n assert straight(card_ranks(s3)) == True\n assert straight(card_ranks(f3)) == False #gilje slutt\n assert poker([fh, tk, hq]) == fh #oistein start\n assert poker([fl, sf1, tk]) == sf1\n assert poker([op, al, fh]) == fh\n assert poker([st, fk, tp]) == fk\n assert poker([tk, tp, op]) == tk\n assert poker([hq, op, hq]) == op\n assert card_ranks(op1) == [13, 13, 7, 5, 2]\n assert card_ranks(tp2) == [10, 10, 3, 3, 2]\n assert card_ranks(tk1) == [11, 11, 11, 10, 8]\n assert card_ranks(hq1) == [10, 9, 5, 3, 2] #oistein slutt\n assert poker([hq, tp, op]) == tp#steffen start\n assert poker([al, st]) == st\n assert poker([al, st, fl]) == fl\n assert card_ranks(hq) == [7, 5, 4, 3, 2]\n assert card_ranks(fk) == [9, 9, 9, 9, 7]\n assert card_ranks(fh) == [10, 10, 10, 7, 7]#steffen slutt\n assert poker([sf2, tk, al]) == sf2#arild start\n assert poker([hq, st]) == st\n assert poker([al, st, fk]) == fk\n assert flush(fl) == True\n assert straight(card_ranks(tp)) == False\n assert card_ranks(fk) == [9, 9, 9, 9, 7]\n assert card_ranks(hq) == [7, 5, 4, 3, 2]\n assert hand_rank(tk) == (3, 2, [14, 10, 2, 2, 2])\n assert hand_rank(st) == (4, 14)\n assert kind(5, tpranks) == None#arild slutt\n assert poker([tp, op]) == tp #Even start\n assert poker([hq, tk]) == tk\n assert poker([sf1] + 50*[fl]) == sf1\n assert card_ranks(sf1) == [10, 9, 8, 7, 6]\n assert card_ranks(tk) == [14, 10, 2, 2, 2]\n assert card_ranks(st) == [14, 13, 12, 11, 10]\n assert kind(4, fkranks) == 9\n assert kind(3, fkranks) == None\n assert kind(2, tpranks) == 10\n assert kind(1, fkranks) == 7 #Even slutt\n assert poker([sf1, fk, fh]) == sf1\n assert poker([fk, fh]) == fk\n assert poker([fh, fh]) == [fh, fh]\n assert poker([sf1]) == sf1\n assert poker([sf1] + 99*[fh]) == sf1\n assert hand_rank(sf1) == (8, 10)\n assert hand_rank(fk) == (7, 9, 7)\n assert hand_rank(fh) == (6, 10, 7)\n assert straight(card_ranks(al)) == True\n assert poker([sf1, sf2, fk, fh]) == [sf1, sf2]\n assert kind(4, fkranks) == 9\n assert kind(3, fkranks) == None\n assert kind(2, fkranks) == None\n assert kind(1, fkranks) == 7\n return 'You did good, and you should feel good about yourself :)'", "def exercise_b2_95():\r\n pass", "def exercise_b2_52():\r\n pass", "def test_hand_is_straightflush():\n from poker_rankings import PokerHand\n heroes_hand = PokerHand(\"5H 4H 3H 2H AH\")\n assert heroes_hand._is_flush == True\n assert heroes_hand._is_straight == True\n assert heroes_hand._hand_value == 9", "def score_hand(hand, flip, verbose):\n if type(hand) == tuple:\n hand = list(hand)\n hand = hand + [flip]\n nums = [int(c.split('-')[0]) for c in hand]\n suits = [c.split('-')[1] for c in hand]\n\n # nobs\n jack = 0\n if 11 in nums:\n flip_suit = flip.split('-')[1]\n for card in hand:\n if card.split('-') == ['11', flip_suit]:\n jack = 1\n\n # pairs\n pairs = {i:nums.count(i) for i in nums}\n pair_score = sum([Cribbage.permu(n, 2) for n in pairs.values() if n>1])\n\n # flush\n if len(unique(suits[:4])) == 1:\n if flip.split('-')[1] == suits[0]:\n flush_score = 5\n else:\n flush_score = 4\n else:\n flush_score = 0\n\n #fifteens and runs\n fifteens = list()\n runs_raw=list()\n\n for comb in [combinations(hand, i) for i in list(range(6,1, -1))]:\n for c in (list(comb)):\n #fifteen\n c_adj = [10 if int(n.split('-')[0])>10 else int(n.split('-')[0]) for n in c] # deals with face cards\n if c not in fifteens and sum(c_adj) == 15:\n fifteens.append(c)\n\n # runs\n nums_a = [int(c_.split('-')[0]) for c_ in c]\n l = len(c_adj)\n c_sorted = sorted(c)\n if l>= 3 and len(unique(nums_a)) == l and (max(nums_a) - min(nums_a)) == (l-1):\n runs_raw.append(tuple(c_sorted))\n\n runs = [list(x) for x in Cribbage.get_unique_runs(runs_raw)] # helps in counting points\n\n fifteen_score = len(fifteens) * 2\n runs_score = len(ndarray.flatten(asarray(runs)))\n\n if verbose:\n pair_explain = [\"{} {}s\".format(v, k) for k,v in pairs.items() if v>1]\n s = \"\"\"Jack: {}\\npairs({}): {}\\nfifteens({}): {}\\nruns({}): {}\\nflush: {}\"\"\"\n print(s.format(jack, pair_score, pair_explain, fifteen_score,fifteens,\n runs_score, runs, flush_score))\n\n return int(jack + pair_score + flush_score + fifteen_score + runs_score)", "def HBNB():\n return 'HBNB'", "def back_test(self, turnover_frequency):", "def pre_flop_strength(hand):\n highs = {}\n highs[4] = [\n \"AA\", \"AKs\", \"AQs\", \"AJs\", \"ATs\", \"AKo\", \"KK\", \"KQs\", \"KJs\", \"AQo\",\n \"QQ\", \"QJs\", \"JJ\", \"TT\"\n ]\n highs[3] = [\n \"A5s\", \"A4s\", \"A3s\", \"KTs\", \"KQo\", \"QTs\", \"AJo\", \"JTs\", \"T9s\", \"99\",\n \"98s\", \"88\", \"87s\", \"77\", \"66\"\n ]\n highs[2] = [\n \"A9s\", \"A8s\", \"A7s\", \"A6s\", \"A2s\", \"K9s\", \"K8s\", \"Q9s\", \"KJo\", \"QJo\",\n \"J9s\", \"ATo\", \"KTo\", \"QTo\", \"JTo\", \"T8s\", \"A9o\", \"J9o\", \"T9o\", \"97s\",\n \"98o\", \"86s\", \"76s\", \"75s\", \"65s\", \"55\", \"44\", \"33\", \"22\"\n ]\n highs[1] = [\n \"K7s\", \"K6s\", \"K5s\", \"K4s\", \"K3s\", \"Q8s\", \"Q7s\", \"Q6s\", \"Q5s\", \"Q4s\",\n \"J8s\", \"J7s\", \"J6s\", \"J5s\", \"T7s\", \"T6s\", \"K9o\", \"Q9o\", \"96s\", \"A8o\",\n \"K8o\", \"Q8o\", \"J8o\", \"T8o\", \"85s\", \"A7o\", \"K7o\", \"Q7o\", \"T7o\", \"97o\",\n \"87o\", \"74s\", \"A6o\", \"K6o\", \"86o\", \"76o\", \"64s\", \"63s\", \"A5o\", \"75o\",\n \"65o\", \"54s\", \"53s\", \"A4o\", \"43s\", \"A3o\"\n ]\n card0, card1 = hand\n if card0[0] == card1[0]:\n pair = \"\".join([card0[0], card1[0]])\n elif card0[1] == card1[1]:\n pair = \"\".join([card0[0], card1[0], \"s\"])\n else:\n pair = \"\".join([card0[0], card1[0], \"o\"])\n for strenght in highs:\n if pair in highs[strenght]:\n return strenght\n return 0", "def part1b_3():\n xs = exampleInput\n backward = [\n Counter({'-SIZE-': 0.564, '-FEAT-': 0.435}),\n Counter({'-SIZE-': 0.567, '-FEAT-': 0.432}),\n Counter({'-FEAT-': 0.5, '-SIZE-': 0.5})]\n backward_ = submission.computeBackward(simpleCRF, xs)\n for vec, vec_ in zip( backward, backward_):\n grader.requireIsTrue( Counters.approximateEquals( vec, vec_ ) )", "def hbnb():\n return \"HBNB\"", "def hbnb():\n return \"HBNB\"", "def vanilaScore(self,attended,state,W):", "def exercise_b2_70():\r\n pass", "def hbnb():\n return 'HBNB'", "def foxes_and_hens(strategy, foxes=7, hens=45):\n # A state is a tuple of (score-so-far, number-of-hens-in-yard, deck-of-cards)\n state = (score, yard, cards) = (0, 0, 'F'*foxes + 'H'*hens)\n while cards:\n action = strategy(state)\n state = (score, yard, cards) = do(action, state)\n return score + yard", "def exercise_b2_53():\r\n pass", "def score_int( hand ):\n m = matches(hand)\n #print( m )\n #royal_flush -- a special case of straight flush.\n if flush(hand) and straight(hand) and hand[4].rank == 14:\n return 80000 + 100*order(hand[4])\n #straight_flush\n elif flush(hand) and straight(hand):\n return 80000 + 100*order(hand[4])\n #four_of_a_kind\n elif len(m) == 2 and m[0].count == 4:\n return 70000 + 100*order(m[0].card)\n #full_house\n elif len(m) == 2 and m[0].count == 3 and m[1].count == 2:\n return 60000 + 100*order(m[0].card) + order(m[1].card)\n #flush\n elif flush(hand):\n return 50000 + 100*order(hand[4])\n #straight\n elif straight(hand):\n return 40000 + 100*order(hand[4])\n #three_of_a_kind\n elif len(m) == 3 and m[0].count == 3:\n return 30000 + 100*order(m[0].card)\n #two_pair\n elif len(m) == 3 and m[0].count == 2 and m[1].count == 2:\n return 20000 + 100*order(m[0].card) + order(m[1].card)\n #one_pair\n elif len(m) == 4 and m[0].count == 2 and m[1].count == 1:\n return 10000 + 100*order(m[0].card) + order(m[1].card)\n # Simple high card. Is this adequate? We'll know if we get ties.\n else:\n return 100*order(hand[4]) # or 100*order(m[0].card)", "def test_57o_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 7.9)", "def test_99_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 15.6)", "def test_perfect_recall_hebbian(self):\n v_one = [1, -1, -1, -1, 1, -1, -1, -1, 1]\n v_two = [-1, -1, -1, 1, 1, 1, -1, -1, -1]\n network = HopfieldNetwork([v_one, v_two], learning_rule=\"Hebb\")\n\n # Check recall of exemplar 1 under async & sync\n results = network.synchronous_recall([1, -1, -1, -1, 1, -1, -1, -1, 1])\n p = results[-1]\n npt.assert_equal(p, v_one)\n results = network.asynchronous_recall([1, -1, -1, -1, 1, -1, -1, -1, 1])\n p = results[-1]\n npt.assert_equal(p, v_one)\n\n # Check recall of exemplar 2 under async & sync\n results = network.synchronous_recall([-1, -1, -1, 1, 1, 1, -1, -1, -1])\n p = results[-1]\n npt.assert_equal(p, v_two)\n results = network.synchronous_recall([-1, -1, -1, 1, 1, 1, -1, -1, -1])\n p = results[-1]\n npt.assert_equal(p, v_two)", "def event_m20_11_3020():\n \"\"\"State 0,2: [Preset] Beautiful frog singing voice_flag_SubState\"\"\"\n assert event_m20_11_x110(z24=211000081, z25=802, z26=211020082)\n \"\"\"State 1: Finish\"\"\"\n EndMachine()", "def event11515495():\n header(11515495)\n\n if_event_flag_on(0, EVENT.DarkSmoughIsSupport)\n\n wait_random_seconds(12, 17) # Time between intermittent one-off Smough attacks.\n\n end_if_event_flag_on(EVENT.DarkOrnsteinAndSmoughPhaseTwoStarted)\n\n flag.disable_chunk(11515470, 11515479)\n if_entity_health_less_than_or_equal(1, CHR.DarkOrnsteinGiant, 0.25)\n skip_if_condition_false(2, 1)\n flag.enable_random_in_chunk(11515470, 11515478) # Maybe butt slam (3008).\n skip(1)\n flag.enable_random_in_chunk(11515470, 11515477) # No butt slam.\n\n restart()", "def test_AKs_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 20.7)", "def way_to_prewin(self, one):\r\n\t\tkind = [0, 0, 0, 0] \r\n\t\tuseful = []\r\n\t\tscore = 0\r\n\t\tfor part in one.split(\"\\t\"):\r\n\t\t\t#print \"part: {0}\".format(part)\r\n\t\t\ttmp = part.split()\r\n\t\t\tlength = len(tmp)\r\n\t\t\tif (length == 3):\t\t# triple\r\n\t\t\t\tkind[0] += 1\r\n\t\t\t\tscore += 3\r\n\t\t\telif (length == 1):\t\t# single\r\n\t\t\t\tkind[3] += 1\r\n\t\t\telif (tmp[0] == tmp[1]):# pair\r\n\t\t\t\tkind[1] += 1\r\n\t\t\t\tuseful.append(\"{0}\".format(tmp[0]))\r\n\t\t\t\tscore += 2\r\n\t\t\telse:\r\n\t\t\t\tkind[2] += 1\r\n\t\t\t\tn = GameBoard.NextCard(tmp[0])\r\n\t\t\t\tp = GameBoard.PrevCard(tmp[0])\r\n\t\t\t\tnn = GameBoard.NextCard(tmp[1])\r\n\t\t\t\tif (n == tmp[1]):\t# consecutive neighbor\r\n\t\t\t\t\tscore += 2\r\n\t\t\t\t\tif (p != None): useful.append(p)\r\n\t\t\t\t\tif (nn != None): useful.append(nn)\r\n\t\t\t\telse:\r\n\t\t\t\t\tuseful.append(n)\r\n\t\t\t\t\tscore += 1\r\n\r\n\t\t#print \"kind: {0}\".format(kind)\r\n\t\t\"\"\" how many cards can we probably get \"\"\"\r\n\t\tuseful_amount = 0\r\n\t\tfor card in set(useful):\r\n\t\t\tctype = GameBoard.CardType(card)\r\n\t\t\tif (ctype == 1): a = self.wang_list.count(card)\r\n\t\t\telif (ctype == 2): a = self.tube_list.count(card)\r\n\t\t\telif (ctype == 3): a = self.bamb_list.count(card)\r\n\t\t\telif (ctype == 4): a = self.word_list.count(card)\r\n\t\t\telse: a = self.wind_list.count(card)\r\n\t\t\tb = self.gb.drop_list.count(card)\r\n\t\t\tuseful_amount += (4 - a - b)\r\n\r\n\t\t\"\"\"\r\n\t\t1. enumerate, because goal state not too much\r\n\t\t2. counting pong_list so that we can focus on 16 cards\r\n\t\t3. check prewin first\t\r\n\t\t\"\"\"\r\n\t\tgoals = [[4, 1, 1, 0], [5, 0, 0, 1], [4, 2, 0, 0]]\r\n\t\tgpattern = [[\"***\", \"***\", \"***\", \"***\", \"##\", \"$$\"],[\"***\", \"***\", \"***\", \"***\", \"***\", \"/\"],\r\n\t\t\t\t\t[\"***\", \"***\", \"***\", \"***\", \"##\", \"##\"]]\r\n\t\tresult = [0, 0, 0]\r\n\t\tsize = len(self.pong_list) / 3\r\n\t\tif ((size + kind[0]) > 4): return [0, useful_amount, score]\r\n\t\tfor i in range(size): \r\n\t\t\tgpattern[0].remove(\"***\")\r\n\t\t\tgpattern[1].remove(\"***\")\r\n\t\t\tgpattern[2].remove(\"***\")\r\n\t\tfor number in range(3):\r\n\t\t\tk0 = kind[0]\r\n\t\t\tk1 = kind[1]\r\n\t\t\tk2 = kind[2]\r\n\t\t\tk3 = kind[3]\r\n\t\t\tp1 = gpattern[number]\r\n\t\t\tfor i in range(k0): p1.remove(\"***\")\r\n\t\t\tfor i in range(min(k1, goals[number][1])): \r\n\t\t\t\tk1 -= 1\r\n\t\t\t\tp1.remove(\"##\")\r\n\t\t\tfor i in range(min(k2, goals[number][2])):\r\n\t\t\t\tk2 -= 1\r\n\t\t\t\tp1.remove(\"$$\")\r\n\t\t\t#print \"p1: {0}, size: {1}\".format(p1, (k1+k2)*2+k3)\r\n\t\t\tstep = 0\r\n\t\t\ttwo = k1 + k2\r\n\t\t\tfor i in range(len(p1)):\r\n\t\t\t\tlength = len(p1[i])\r\n\t\t\t\tif (length == 3):\r\n\t\t\t\t\tif two:\r\n\t\t\t\t\t\ttwo -= 1\r\n\t\t\t\t\t\tstep += 1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tstep += 2\r\n\t\t\t\telif (length == 2):\r\n\t\t\t\t\tstep += 1\r\n\t\t\tresult[number] = step\r\n\t\t#print \"steps to goal: {0}, useful: {1}\".format(result, len(useful))\r\n\t\t\r\n\t\treturn [min(result), useful_amount, score]", "def sugg(n):\n print (\"%s\\t\"*3)%(\"p\", \"m(bytes)\", \"ok\")\n for p in (0.1, 0.01, 0.001, 0.0001, 0.00001):\n m=BloomFilter.calBitLen(n,p)\n ok=BloomFilter.calHash(n,m)\n print (\"%.5f\\t\"+\"%d\\t\"*2)%(p, m/8, ok)\n for k in BloomFilter.KRange:\n rp=BloomFilter.calPFP(n,m,k)\n print (\"\\t\"*2+\"%d\\t%f\")%(k, rp)", "def exercise_b2_106():\r\n pass", "def event_m20_11_x69(z54=211000002, z56=_):\n \"\"\"State 0,1: Defeat flag ON\"\"\"\n SetEventFlag(z54, 1)\n \"\"\"State 2: Head flag judgment\"\"\"\n CompareEventFlag(0, 102750, 1)\n if ConditionGroup(0):\n \"\"\"State 4: Hand flag judgment\"\"\"\n CompareEventFlag(0, 102752, 1)\n if ConditionGroup(0):\n \"\"\"State 5: Foot flag judgment\"\"\"\n CompareEventFlag(0, 102753, 1)\n if ConditionGroup(0):\n \"\"\"State 6: Torso flag judgment\"\"\"\n CompareEventFlag(0, 102751, 1)\n if ConditionGroup(0):\n \"\"\"State 10: Weapon flag ON\"\"\"\n SetEventFlag(z56, 1)\n else:\n \"\"\"State 7: Torso flag ON\"\"\"\n SetEventFlag(102751, 1)\n else:\n \"\"\"State 9: Foot flag ON\"\"\"\n SetEventFlag(102753, 1)\n else:\n \"\"\"State 8: Hand flag ON\"\"\"\n SetEventFlag(102752, 1)\n else:\n \"\"\"State 3: Head flag ON\"\"\"\n SetEventFlag(102750, 1)\n \"\"\"State 11: End state\"\"\"\n return 0", "def exercise_b2_93():\r\n pass" ]
[ "0.58027256", "0.5786682", "0.5755825", "0.5679587", "0.56194", "0.56090933", "0.5567905", "0.5535476", "0.5521466", "0.54943043", "0.5431216", "0.5410346", "0.5410346", "0.54039884", "0.53975636", "0.53964627", "0.53595126", "0.5350653", "0.5346474", "0.53431875", "0.5331558", "0.5298031", "0.5278022", "0.5268218", "0.5254117", "0.5249863", "0.52471447", "0.5246645", "0.5237407", "0.52357864" ]
0.58535993
0
Remove Key from a Key Value pair Can be performed on Dictionary or Json key value string
def remove(kv_data, key): if isinstance(kv_data, str): kv_data = loads(kv_data) # Turn into Dictionary try: del kv_data[key] except NameError: print(key, " does not exists in key value pair.") kv_data = dumps(kv_data) else: print("Provide a Json Key Value String") sys.exit(6) return kv_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_value(self, thing_key, dkey):\n if thing_key in self.things_dict:\n dic = self.things_dict[thing_key]\n if type(dic) != type({}):\n return\n dic.pop(dkey, None)", "def remove_value(self, key: str) -> None:\n raise NotImplementedError", "def test_remove_key(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\", \"hello\"},\n \"funilrys\": [\"Fun\", \"Ilrys\"],\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.to_test).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)\n\n # Test of the case that a dict is not given\n expected = None\n actual = Dict([\"Hello\", \"World!\"]).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)", "def removeDic(dic, key):\n pass", "def test_remove_key(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\": \"hello\"},\n \"funilrys\": [\"Fun\", \"Ilrys\"],\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.test_subject).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)\n\n actual = Dict(self.test_subject).remove_key([\"Py\", \"test\"])\n\n self.assertEqual(expected, actual)", "def remove(self, key):", "def remove_key(self, key: str):\r\n\r\n if key in self._inner_dict:\r\n del self._inner_dict[key]\r\n else:\r\n raise KeyError(f\"key '{key}' is invalid\")", "def delete_key_HELPER(data_dict, key_list, key_to_delete):\n data_dict = get_key_from_dict_HELPER(data_dict, key_list[:-1])\n data_dict.pop(key_to_delete)\n return data_dict", "def dict_pop(d, key):\n return d.pop(key)", "def test_remove_key_not_dict(self):\n\n expected = None\n actual = Dict([\"Hello\", \"World!\"]).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)", "def __delitem__(self, key):\n try:\n kvp = self.keyvaluepair_set.get(key=key)\n except KeyValuePair.DoesNotExist:\n raise KeyError\n else:\n kvp.delete()", "def remove(enforcer_dict, key):\n del enforcer_dict['f']\n assert other.keystring == 'abcde'\n assert other.valuesum == 15\n\n enforcer_dict['a'] = 2\n assert other.keystring == 'bcdea'\n assert other.valuesum == 16\n\n enforcer_dict.clear()\n assert other.keystring == ''\n assert other.valuesum == 0", "def remove_element( self, dictionary, key):\n\n _dict = dictionary.copy()\n _dict.pop(key, None)\n return _dict", "def test_remove_key_not_found(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\": \"hello\"},\n \"funilrys\": [\"Fun\", \"Ilrys\"],\n \"Py\": \"Funceble\",\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.test_subject).remove_key(\"xxx\")\n\n self.assertEqual(expected, actual)", "def remove(self, key, value):\n if key not in self: return\n elif self[key] == value: del(self[key])\n else:\n try:\n parts = self.list(key)\n parts.remove(value)\n self[key] = \"\\n\".join(parts)\n except ValueError:\n if key in self: del(self[key])", "def eliminate_key (self,key):\r\n\r\n if self.using_shelf:\r\n\r\n del self.key_dict[str(key)]", "def remove_key(self, key):\n del self.data[key]\n self.save_data()", "def remove(self, key):\n pass", "def remove(self, key):\n\t\tfrom collections import OrderedDict\n\t\tdic = self.__dict__\n\t\tif not dic.get(key):\n\t\t\traise ValueError(\"Inputted Key is not valid for removal.\")\n\t\tdel dic[key]\n\t\tnew_dict = {}\n\t\targs = dic.values()\n\t\tfor x, y in enumerate(args):\n\t\t\tnew_dict.update({x: y})\n\t\tnew_dict = OrderedDict(sorted(new_dict.items()))\n\t\tself.__dict__ = new_dict", "def discard(m: MutableMapping[KT, VT], key: KT) -> None:\n try:\n del m[key]\n except KeyError:\n pass", "def remove(self, data, key, value):\n if key in data:\n if not value: # value is empty or false, just remove it\n data.pop(key, None) # delete\n elif isinstance(value, type(data[key])): # if same type\n if isinstance(value, list): # if it's a list, like modules\n data[key] = list(set(data[key]) - set(value))\n elif isinstance(\n value, dict\n ): # if it's a dict, difference of the keys and rebuild dict\n for k, v in value.items():\n data[key][k] = self.remove(data[key], k, v)\n else:\n raise TypeError(\n f\"Value of {key} is {type(value)} and\"\n f\" the imported {key} is {type(data[key])}. Type mismatch.\"\n )\n return data[key]", "def test_remove_multiple_key(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\": \"hello\"},\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.test_subject).remove_key([\"funilrys\", \"Py\"])\n\n self.assertEqual(expected, actual)", "def remove(self, key_name: str):\n pass", "def removeKey(self, timeOrHash) -> None:\n ...", "def delete(self, key):", "def remove_keys(data: dict, keys: list[str]) -> None:\n for k in keys:\n _ = data.pop(k, None)", "def delete(self, key):\n self.map.pop(key, None)", "def remove_from_dictionary(self,dictionary,*keys):\r\n for key in keys:\r\n if key in dictionary:\r\n value = dictionary.pop(key)\r\n logger.info(\"removed item with key '%s' and value '%s'\" %(key,value))\r\n else:\r\n logger.info(\"Key '%s' not found\" %(key))", "def test_remove_for_specific_key_value_pairs(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"a=4\" %}',\n query_str='a=1&a=2&a=3&a=4')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&a=2&a=3&'))", "def remove(self, key: int | str):\n self.__delitem__(key)" ]
[ "0.7145345", "0.7130893", "0.7018091", "0.6990667", "0.6903429", "0.6875397", "0.6862628", "0.6806682", "0.6771876", "0.6739055", "0.67074156", "0.6704598", "0.6693101", "0.6646352", "0.6637673", "0.66145486", "0.6510989", "0.6470834", "0.64574033", "0.64387095", "0.6428868", "0.6426329", "0.6420157", "0.63916564", "0.6390707", "0.638956", "0.6385038", "0.63835984", "0.63544047", "0.634082" ]
0.8426328
0
If JSON Key Value, Value contains this value
def contains_value(kv_json, value): if isinstance(kv_json, str): kv_dict = loads(kv_json) for key in kv_dict: if kv_dict[key] == value: # Found value in dictionary return True return False else: print("Provide A JSON Key Value String")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __is_key_in_json(self, key=str, json_dict=json):\n if key in json_dict:\n # noinspection PyUnresolvedReferences\n return json_dict[key]\n else:\n return self.NO_KEY_VALUE_FOR_ENTRY", "def is_item_in_the_response(key, value, jsonResponse):\n flag = False\n for item in jsonResponse:\n if type(jsonResponse[item]) == int:\n if item == key and jsonResponse[item] == int(value):\n flag = True\n\n if type(jsonResponse[item]) == str:\n if item == key and jsonResponse[item] == str(value):\n flag = True\n\n if type(jsonResponse[item]) == bool:\n if item == key and jsonResponse[item] == bool(value):\n flag = True\n else:\n #log and error\n pass\n return flag", "def find_in_json(cls, key, value):\n data = JsonFile.read_file()\n new_json = []\n for i in range(len(data['todo'])):\n if data['todo'][i][key] == value:\n if key is 'id':\n return data['todo'][i] # because id is unique and when find it we don't need keep processing\n else:\n new_json.append(data['todo'][i])\n return new_json", "def hasValue(self, key):\n return self.has_key('__' + key)", "def contains_key(kv_json, key):\n if isinstance(kv_json, str):\n kv_dict = loads(kv_json)\n try:\n res = kv_dict[key]\n return True\n except KeyError:\n return False\n else:\n print(\"Provide A JSON Key Value String\")", "def has(\n obj: Union[JSONArray, JSONObject], # pylint: disable=unsubscriptable-object\n key: Union[int, str, FlatKey], # pylint: disable=unsubscriptable-object\n):\n try:\n get(obj, key)\n return True\n except JSONKeyError:\n return False", "def has(self, key):", "def has_value(value):\n return IsDictContainingValue(wrap_matcher(value))", "def _is_key_value(data):\n if data is None:\n return False\n return all(x in data for x in ['key', 'value'])", "def extract_value(self, json_body):\n # Extract\n res = next(iter(jmespath.search(JMESPATH_BASE, json_body)))\n\n try:\n res = res[self.key]\n except (KeyError, TypeError):\n _LOGGER.warning(\"Sensor %s not found in %s\", self.key, res)\n self.value = None\n return False\n\n if self.path is None:\n # Try different methods until we can decode...\n _paths = [JMESPATH_VAL, JMESPATH_VAL_IDX.format(self.key_idx)]\n while _paths:\n _path = _paths.pop()\n _val = jmespath.search(_path, res)\n if _val:\n _LOGGER.debug(\"Extracting %s using %s\", self.name, _path)\n self.path = _path\n break\n\n # Extract new value\n if self.path is None:\n _LOGGER.debug(\"Null path %s\", res)\n res = None\n else:\n res = jmespath.search(self.path, res)\n\n if isinstance(res, int) and self.factor:\n res /= self.factor\n try:\n return res != self.value\n finally:\n self.value = res", "def can_serialize_json(key, value):\n\n try:\n json.dumps({key: value})\n return True\n except:\n return False", "def contains(self, value):\n for item in self.data:\n if item == value:\n return item\n return False", "def has(self, v):\n return v in self.values", "def contains(self, key):\n try:\n self.keyvaluepair_set.get(key=key)\n return True\n except KeyValuePair.DoesNotExist:\n return False", "async def contains(self, key: str) -> bool:", "def might_contain(self, value):\n h = self.hash_value(value)\n return self.values[h]", "def __contains__(self, value):\n\n try:\n # Just use __getitem__()\n self[value]\n except KeyError:\n return False\n else:\n return True", "def __contains__(self, key: str) -> bool:\n return key in self.raw", "def __contains__(self, key):\n return self._lookup(key).value is not None", "def __contains__(self, key):\n return self.__getitem__(key)", "def __contains__(self, item: object) -> bool:\n if isinstance(item, tuple) and len(item) == 2:\n var, value = item\n else:\n return False\n if isinstance(var, str):\n if var and var[0] == '$':\n var = var[1:]\n try:\n return self._mapping._fixup[var.casefold()].value == conv_kv(value)\n except KeyError:\n return False\n return False", "def contains(self, key):\n\n return self._get(\"contains\", key, rtype=Bool)", "def hasCustomData( self, key ):\n return str(key) in self._customData", "def contains(self, key):\n visitor = VisitorContains()\n self.visit(key, visitor)\n return visitor.result", "def get_if_exist(self, data, key):\n if key in data:\n return data[key]\n return None", "def dictionary_value_grabber(self, value, dic):\r\n self.coder=\"Used to grab a value in a dictionary\"\r\n for v in dic.values():\r\n if v==value:\r\n return value\r\n else:\r\n pass", "def check_list(source, value):\n try:\n return value in json.loads(source)\n except:\n return False", "def has(self, key):\n return self.data.get(key, None) is not None", "def assert_contains(self, result, key):\n if type(result) == bytes:\n result = self.type_convert(result)\n content = result.get(key)\n if content:\n pass\n else:\n raise AssertionError(\"Unexpected response, missing param: \", key)", "def __contains__(self, key):\n return self.contains(key)" ]
[ "0.71973366", "0.68866235", "0.66816956", "0.64885634", "0.6401842", "0.6400266", "0.6142757", "0.61393195", "0.60865045", "0.60471123", "0.60368866", "0.60345954", "0.6024064", "0.6017378", "0.5997288", "0.59498996", "0.59224445", "0.58714145", "0.5839041", "0.58378595", "0.58166796", "0.5813549", "0.5786215", "0.5785954", "0.576785", "0.5763452", "0.575908", "0.57541525", "0.57430553", "0.5741245" ]
0.71492994
1
from all the information provided by the ONCat template, we are only interested by the following infos [name, path and units]. We isolate those into the template_information dictionary
def isolate_relevant_information(self): def get_formula(oncat_formula): """will need to go from something like "${value/10e11}`" to something more pythonic "{value/10e11}""" regular_expression = r'\$(?P<formula>.+)\`' m = re.search(regular_expression, oncat_formula) if m: return m.group('formula') else: return "" template_information = {} for _index, _element in enumerate(self._oncat_default_template): _title = _element["name"] _path = _element["path"] if "units" in _element: _units = _element["units"] else: _units = "" if "transform" in _element: _formula = get_formula(_element["transform"]) else: _formula = "" template_information[_index] = {'title': _title, 'path': _path, 'units': _units, 'formula': _formula} self.template_information = template_information
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_template(self):\n for line in self.raw_template.split(\"\\n\"):\n line = line.strip()\n if line.startswith('#m3'):\n key, val = line[3:].strip().split('=', 1)\n key = key.strip()\n val = val.strip()\n self.variables[key] = val\n\n for fitem in self.finditem.finditer(self.raw_template):\n fgrp = fitem.groups()\n categ = fgrp[0]\n name = fgrp[1]\n rest_str = fgrp[2]\n rest = {} # type: dict\n for item in rest_str.split('|'):\n item = item.strip()\n if item:\n key, val = item.split('=')\n rest[key] = val\n\n self.data[name] = (categ, rest)", "def context(template):\n\n return {\n v.key: v.read()\n for v in [Variable(name) for name in extract_variables(template)]\n }", "def local_metadata(paths):\n\n # Update template directory\n image_graph = network.load_graph(paths.image_network)\n\n template_paths = {}\n\n def template_selection(path_listing):\n for node in path_listing:\n if os.path.exists(paths.resource_pack + '\\\\' + os.path.join(*(node.split(os.path.sep)[1:]))):\n image_data = dict(image_graph.nodes(data=True))[node]\n print(image_data)\n template_paths[os.path.split(image_data['group_name'])[1]] = node\n return\n\n for bunch in connected_component_subgraphs(image_graph):\n sorted_bunch = network.connectivity_sort(bunch.nodes(), bunch)\n\n if len(sorted_bunch) == 1:\n continue\n\n template_selection(sorted_bunch)\n\n print(str(len(list(template_paths.values()))) + ' templates identified.')\n\n with open(paths.binding_identifiers, 'w') as json_binding_ids:\n json.dump(template_paths, json_binding_ids, sort_keys=True, indent=2)\n\n bindings.build(paths, template_paths.values())", "def get_template_data(self) -> dict:\n template_data = self._get_template_data()\n\n @dataclass\n class FileEntry:\n \"\"\"Provides an entry into manifest object.\"\"\"\n\n name: str\n size: str\n md5: Optional[str]\n\n template_data[\"resource_files\"] = [\n FileEntry(entry.name, convert_size(entry.size), entry.md5)\n for entry in self.resource.get_manifest().entries.values()\n if not entry.name.startswith(\"statistics\")\n and entry.name != \"index.html\"]\n template_data[\"resource_files\"].append(\n FileEntry(\"statistics/\", \"\", \"\"))\n return template_data", "def _ProcessTemplate(self,topdir):\n self.dicomdir = \"%s/anatomicals\" % self.topdir\n self.rawdir = \"%s/raw\" % topdir\n self.rawdirs = {}\n tmplt = self._GetTemplate()\n if self.opts.outdir is not None:\n# Override template output directory.\n tmplt['top_outdir'] = self.opts.outdir\n self.tmplt = tmplt\n if len(tmplt['top_outdir']) == 0:\n tmplt['top_outdir'] = os.path.realpath(self.topdir)\n raise RuntimeError('Template file must specify an output directory.')\n tmplt['top_outdir'] = os.path.realpath(tmplt['top_outdir'])\n if '/home' in tmplt['top_outdir'][:7]:\n raise RuntimeError('Image data cannot be stored in the /home partition. Change the \"top_outdir\" entry in the template file: %s.' % (' '.join(self.templates)))\n# tmplt['subject'] = 'orig'\n self.procdir = os.path.abspath(\"%s/%s\" % \\\n (tmplt['top_outdir'],tmplt['subject']))\n target = os.path.abspath('%s/../..' % tmplt['top_outdir'])\n if not ismounted(target):\n raise RuntimeError('Could not access partition at %s' % target)\n\n self.anatdir = \"%s/anat\" % self.procdir\n self.fmapdir = \"%s/%s\" % (self.procdir,tmplt['fmap']['outdir'])\n self.dtidir = \"%s/%s\" % (self.procdir,tmplt['dti']['outdir'])\n self.logdir = \"%s/%s\" % (self.procdir,tmplt['logdir'])\n self.skip = tmplt.get('skip', DEFAULT_SKIP)\n self.acq_tr = tmplt.get('acq_tr',None)\n self.episetup_dir = \"%s/%s\" % (self.procdir,tmplt['first_epi'])\n self.fsl_cmpblty = tmplt.get('fsl_compatibility',False)\n self.epi_file_format = self.tmplt['epi_file_format']\n self.censor_thresh = tmplt.get('censor_threshold', 2.)\n self.censor_interleave = tmplt.get('censor_interleave', True)\n# self.server_userid = self.tmplt.get('server_userid','default')\n\n# Overide flags for aligning EPIs and skull-stripping with command-\n# line options.\n if self.opts.align_fmaps:\n self.align_fmaps = True\n else:\n self.align_fmaps = self.tmplt.get('epi_align', False)\n\n if self.opts.no_align_fmaps:\n self.no_align_fmaps = True\n else:\n self.no_align_fmaps = self.tmplt.get('no_epi_align', False)\n\n if self.opts.skull_strip:\n self.skull_strip = True\n else:\n self.skull_strip = self.tmplt.get('skull_strip', False)\n\n# Create log file now so it can be used immediately.\n if not os.path.exists(self.logdir):\n if self.verbose:\n print 'mkdir %s' % self.logdir\n if not self.opts.fake_opts:\n self.MakeDir(self.logdir)\n\n self._ProcessTemplateEpiInfo()", "def parse_special(special):\n special_name = \"\"\n specials = {}\n for line in special:\n if check_template_start(line):\n special_name = line.split(\":\")[1]\n specials[special_name] = []\n elif check_template_end(line):\n special_name = \"\"\n elif special_name != \"\":\n specials[special_name].append(line)\n\n return specials", "def test_get_device_template(self):\n pass", "def get_template_names(self): \n product = self.get_object()\n names = ['%s/detail-for-upc-%s.html' % (self.template_folder, product.upc), \n '%s/detail-for-class-%s.html' % (self.template_folder, product.item_class.name.lower()),\n '%s/detail.html' % (self.template_folder)]\n return names", "def test_get_device_templates(self):\n pass", "def preprocess_template(template_file: str) -> None:\n LOGGER.info(\"Processing template %s\", template_file)\n\n with DFReader(open(template_file, \"rb\")) as reader:\n level = reader.read_level()\n\n doors = {}\n keys_needed: Dict[int, int] = collections.Counter()\n for eid, (_, _, entity) in level.entities.items():\n if not isinstance(entity, LevelDoor):\n continue\n\n doors[eid] = {\n \"level\": entity.file_name.decode(),\n \"door\": entity.door_set,\n }\n keys_needed[DOOR_INFO[entity.door_set][1]] += 1\n\n for door_data in doors.values():\n key_type = DOOR_INFO[door_data[\"door\"]][1]\n while key_type < 3 and keys_needed[key_type + 1] == 0:\n key_type += 1\n door_data[\"key_get\"] = key_type\n\n with open(template_file + \".json\", \"w\") as fout:\n json.dump(\n {\"doors\": doors},\n fout,\n )", "def test_ws_getItemInfosWithReusedPODTemplates(self):\n # in the PM test profile, some templates are only defined for the plonemeeting-assembly\n self.usedMeetingConfigId = \"plonegov-assembly\"\n self.changeUser('pmCreator1')\n item = self.create('MeetingItem')\n # first check that the only returned template is a template rusing another\n viewlet = self._get_viewlet(\n context=item,\n manager_name='plone.belowcontenttitle',\n viewlet_name='document-generation-link')\n templates = viewlet.get_generable_templates()\n self.assertEqual(len(templates), 1)\n self.assertTrue(templates[0].pod_template_to_use)\n self.assertIsNone(templates[0].odt_file)\n # get the reponse\n resp = self._getItemInfos(item.UID(), showTemplates=True, toBeDeserialized=False)\n # we have 1 template\n self.assertEqual(len(resp._itemInfo[0]._templates), 1)\n # templateFilename was taken from template to use\n self.assertEqual(resp._itemInfo[0]._templates[0]._templateFilename, u'Item.odt')\n self.assertEqual(resp._itemInfo[0]._templates[0]._templateFormat, 'odt')", "def calc_template(template_def, config):\n template = Template(**template_def)\n #print \"template_def:\", template_def, \"config:\", config\n try:\n retvals = process_template(template, config, target=(None, None))\n except Exception:\n print(\"==== template ====\"); pprint(template_def)\n print(\"==== config ====\"); pprint(config)\n #traceback.print_exc()\n raise\n output = {}\n for rkey, rv in retvals.items():\n module_id, terminal_id = rkey\n module_key = str(module_id)\n output.setdefault(module_key, {})\n output[module_key][terminal_id] = rv.todict()\n return output", "def _test_template_data(self):\n chars=string.ascii_uppercase + string.digits\n id = ''.join(random.choice(chars) for x in range(6))\n\n return {\n 'test_module': self.test_modulename(),\n 'driver_module': self.driver_modulename(),\n 'driver_dir': self.driver_dir(),\n 'file': self.driver_relative_path(),\n 'author': self.metadata.author,\n 'driver_name': self.metadata.driver_name,\n 'constructor': self.metadata.constructor,\n 'full_instrument_lower': self.metadata.driver_name.lower(),\n 'full_instrument_camelcase': self.driver_name_camelcase(),\n }", "def _fill_template(\n self,\n template: Dict[Text, Any],\n template_vars: Dict[Text, Any]\n ) -> Dict[Text, Any]:\n\n line_object_keys = [\"quickReply\", \"items\", \"action\", \"template\", \"actions\"]\n\n if type(template) == list:\n for item in template:\n self._fill_template(item, template_vars)\n else:\n self._fill_template_text(template, template_vars)\n for key in line_object_keys:\n if key in template:\n self._fill_template(template[key], template_vars)\n\n return template", "def process_template(template, data):\n t = Template(template, data)\n t.job = get_current_job()\n t.process()\n\n result = dict(template=template, data=data, result_folder=t.resultdir, log=t.log)\n\n return result", "def get_layer_info_template(file, print_first_element = True):\n \n try:\n layer_info_template = json.load(open(file))\n if(print_first_element==True):\n print(\"/n----This is the layer info template ----\")\n print(layer_info_template)\n return layer_info_template\n except:\n print(\"Unexpected error:\", sys.exc_info()[0]) \n return None", "def template_data(self) -> Any:\n return pulumi.get(self, \"template_data\")", "def get_default_template(env):\n return env.from_string(\n \"\"\"\\\n{% if record.standard_information and record.filename_information %}\n0|{{ prefix }}{{ record.path }}|{{ record.inode }}|0|{{ record.standard_information.owner_id }}|0|{{ record.size }}|{{ record.standard_information.accessed|unixtimestampformat }}|{{ record.standard_information.modified|unixtimestampformat }}|{{ record.standard_information.changed|unixtimestampformat }}|{{ record.standard_information.created|unixtimestampformat }}\n{% endif %}\n{% if record.standard_information and record.filename_information %}\n0|{{ prefix }}{{ record.path }} (filename)|{{ record.inode }}|0|{{ record.standard_information.owner_id }}|0|{{ record.size }}|{{ record.filename_information.accessed|unixtimestampformat }}|{{ record.filename_information.modified|unixtimestampformat }}|{{ record.filename_information.changed|unixtimestampformat }}|{{ record.filename_information.created|unixtimestampformat }}\n{% endif %}\n{% for e in record.indx_entries %}\n0|{{ prefix }}{{ record.path }}\\\\{{ e.name }} (INDX)|{{ e.inode }}|0|0|0|{{ e.logical_size }}|{{ e.accessed|unixtimestampformat }}|{{ e.modified|unixtimestampformat }}|{{ e.changed|unixtimestampformat }}|{{ e.created|unixtimestampformat }}\n{% endfor %}\n{% for e in record.slack_indx_entries %}\n0|{{ prefix }}{{ record.path }}\\\\{{ e.name }} (slack-INDX)|{{ e.inode }}|0|0|0|{{ e.logical_size }}|{{ e.accessed|unixtimestampformat }}|{{ e.modified|unixtimestampformat }}|{{ e.changed|unixtimestampformat }}|{{ e.created|unixtimestampformat }}\n{% endfor %}\n\"\"\"\n )", "def _generate_info_dict(meta_path, bands='ugrizy'):\n return DC2DMTractCatalog._generate_info_dict(meta_path, bands)", "def _usage_report_dict(self, root):\n details = {}\n selector = 'table > tbody > tr'\n for (resource, unit, used) in root.cssselect(selector):\n name = resource.findtext('strong').strip()\n details[name] = (used.text.strip(), unit.text.strip())\n return details", "def test_for_template(self):\n self.assertTemplateUsed(self.response, 'my_info_template.html')", "def fileInfo(tif: TiffFile):\n print(tif.flags)\n print(tif.geotiff_metadata)\n for page in tif.pages:\n print(page.tags)\n print(page.geotiff_tags)\n print(page.shape)\n print(page.dtype)\n print(page.flags)", "def _get_template_nics(self, template):\n\n try:\n backend_template = self.client.get_template_library_item(\n template.backend_id\n )\n except VMwareError as e:\n raise VMwareBackendError(e)\n else:\n return [nic['key'] for nic in backend_template['nics']]", "def get_info(self) -> str:\n template_data = self.get_template_data()\n return self.get_template().render(\n resource=self.resource,\n markdown=markdown,\n data=template_data,\n base=RESOURCE_TEMPLATE\n )", "def __init__(self, template):\n\n self.template = template\n self.parsed_template = {}", "def get_templates(instrument=''):\n import os, json\n template_path = os.path.dirname(__file__)\n template_names = [fn\n for fn in os.listdir(template_path)\n if fn.endswith(\".json\") and fn.startswith(instrument)]\n templates = dict([(tn[len(instrument)+1:-5],\n json.loads(open(os.path.join(template_path, tn), 'r').read()))\n for tn in template_names])\n return templates", "def T(request):\n\treturn all_templates[request.param]", "def processTemplates(self, tk, templateFile = '', id = '', shotNum = '', inprogressBar = ''):\r\n ## Now fetch all the template paths from shotgun\r\n getTemplatePaths = tk.paths_from_template(templateFile, {'Step' : 'Light', 'id' : id, 'Shot' : shotNum})\r\n debug(app = self, method = 'processTemplates', message = 'getTemplatePaths: %s' % getTemplatePaths, verbose = False)\r\n \r\n ## Now look for each assets template path: \r\n xmlFile = max(getTemplatePaths) \r\n debug(app = self, method = 'processTemplates', message = 'Max Version xmlFile.... %s' % xmlFile, verbose = False)\r\n \r\n ## Now if versions has stuff in it..\r\n if not xmlFile:\r\n debug(app = self, method = 'processTemplates', message = 'Can not find any xml files for %s' % shotNum, verbose = False)\r\n pass\r\n else:\r\n \r\n debug(app = self, method = 'processTemplates', message = 'PathTo: %s' % os.path.isfile(xmlFile.replace(os.path.sep, \"/\")), verbose = False)\r\n if os.path.isfile(xmlFile.replace(os.path.sep, \"/\")):## is this a valid xml file!?\r\n inprogressBar.updateProgress(percent = 10, doingWhat = 'createAll shaders...')\r\n self._createAllShaders(XMLPath = xmlFile.replace(os.path.sep, \"/\"), Namespace = '', Root = 'MaterialNodes')\r\n \r\n inprogressBar.updateProgress(percent = 30, doingWhat = 'connectAll shaders...')\r\n self._connectAllShaders(XMLPath = xmlFile.replace(os.path.sep, \"/\"), Namespace = '', Root = 'MaterialNodes')\r\n else:\r\n debug(app = self, method = 'processTemplates', message = 'FAILED Can not find a valid published xml file for %s ...' % os.path.isfile(xmlFile.replace(os.path.sep, \"/\")), verbose = False)\r\n pass", "def __verify_details(self):\n if self.major[0] not in self.data[self.root]:\n self.data[self.root][self.major[0]] = {}\n for key, value in self.template_data[self.root][self.major[0]].items():\n key, value = self.__verified_details_key_value(key, value)\n self.data[self.root][self.major[0]][key] = self.__verify_values(key, value, self.data[self.root][self.major[0]])", "def template_list(call=None):\n templates = {}\n session = _get_session()\n vms = session.xenapi.VM.get_all()\n for vm in vms:\n record = session.xenapi.VM.get_record(vm)\n if record[\"is_a_template\"]:\n templates[record[\"name_label\"]] = record\n return templates" ]
[ "0.6129451", "0.5694503", "0.5597461", "0.5589702", "0.5520051", "0.5466003", "0.5434218", "0.5390082", "0.5387614", "0.53731686", "0.53682715", "0.53276026", "0.5297058", "0.5292366", "0.52901614", "0.52713674", "0.5267604", "0.5235461", "0.52188444", "0.5214309", "0.5210592", "0.5207537", "0.51937157", "0.5185019", "0.5181983", "0.51777726", "0.5173789", "0.5155357", "0.5155086", "0.51331997" ]
0.7443054
0
Using the ONCat template to create projection used by oncat to return full information
def create_oncat_projection_from_template(with_location=False, template={}): projection = [] if with_location: projection = ['location'] nbr_columns = len(template) for _col in np.arange(nbr_columns): projection.append(template[_col]['path']) return projection
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def projection(self):\n pass", "def describe(self, template='projection_default.txt', engine='default'):\n raise NotImplementedError", "def output_projection(self):\n return self.projection(what='output')", "def makeProjection(self, variable, token, typed_token, constituent_dict):\n model = self.model\n typed_token_list = model.typed_tokens[typed_token].instances\n thing_list = []\n for label, thing in constituent_dict.items():\n if thing.genre == 'node':\n if thing.__dict__['class'] == 'node_interface':\n thing_list += ['']\n elif token in thing.tokens.keys():\n thing_list += thing.tokens[token]\n else:\n thing_list += ['']\n elif thing.genre == 'arc':\n if token == thing.token:\n thing_list += thing.typed_tokens\n else:\n thing_list += ['']\n mat = np.zeros((len(typed_token_list), len(thing_list)))\n for i, typed_token_instance in enumerate(typed_token_list):\n for j, thing in enumerate(thing_list):\n if typed_token_instance == thing:\n mat[i, j] = 1.\n return mat", "def __str__(self):\n s = \"Projection info:\\n\"\n s += \" #instances: \" + str(self.data_ninstances) + \"\\n\"\n s += \" data dimension: \" + str(self.data_dim) + \"\\n\"\n s += \" projection dimension: \" + str(self.projection_dim) + \"\\n\"\n s += \" data: \" + str(self.data[0]) + \"\\n\"\n s += \" \" + str(self.data[1]) + \"...\\n\"\n s += \" projection: \" + str(self.projection[0]) + \"\\n\"\n s += \" \" + str(self.projection[1]) + \"...\"\n return s", "def isolate_relevant_information(self):\n\n def get_formula(oncat_formula):\n \"\"\"will need to go from something like\n \"${value/10e11}`\"\n to something more pythonic\n \"{value/10e11}\"\"\"\n regular_expression = r'\\$(?P<formula>.+)\\`'\n m = re.search(regular_expression, oncat_formula)\n if m:\n return m.group('formula')\n else:\n return \"\"\n\n template_information = {}\n for _index, _element in enumerate(self._oncat_default_template):\n _title = _element[\"name\"]\n _path = _element[\"path\"]\n if \"units\" in _element:\n _units = _element[\"units\"]\n else:\n _units = \"\"\n if \"transform\" in _element:\n _formula = get_formula(_element[\"transform\"])\n else:\n _formula = \"\"\n template_information[_index] = {'title': _title,\n 'path': _path,\n 'units': _units,\n 'formula': _formula}\n self.template_information = template_information", "def projectionContext(*args, exists: bool=True, history: bool=True, image1: Union[AnyStr,\n bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\",\n name: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def projection(self):\n return [row[2:-2] for row in self[2:-2]]", "def Build(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_Build(self, *args)", "def input_projection(self):\n return self.projection(what='input')", "def new_police_report(self):\n\n d = {'category':'',\n 'original_text':'',\n 'line_num':0,\n 'address':'',\n 'map_scale':mapscale.UNKNOWN,\n 'date_month':0,\n 'date_day':0,\n 'date_year':0,\n 'lat':'',\n 'long':''}\n\n return d", "def projection(self):\n return {comp_field_label: 1\n for comp_field_label in self.agg_spec.keys()}", "def get_projection(attrs):\n df = load_df()\n\n X = get_all_vectors(df, attrs)\n logger.info('- Data shape original: {}'.format(X.shape))\n\n X = X if isinstance(X, np.ndarray) else X.toarray()\n X = dimension_reduction(X, attrs['decomposition'], attrs['distanceMetric'])\n return X, df", "def _prepare_proj(self, x):\n b, l, d = x.size()\n return x.view(b, l, self.num_heads, self.d_head).transpose(1, 2).contiguous().view(b * self.num_heads, l,\n self.d_head)", "def projections_input() -> str:\r\n recipe_list = [\"Organic Red Helles\", \"Organic Pilsner\",\r\n \"Organic Dunkel\"]\r\n return render_template(\"projection_input.html\", recipes=recipe_list)", "def info(self):\n tline = \"\"\n for (ii, projection) in enumerate(self._ProjectionList):\n tiltAngle = projection._tiltAngle\n transX = -projection._alignmentTransX\n transY = -projection._alignmentTransY\n rot = -(projection._alignmentRotation + 90.)\n mag = projection._alignmentMagnification\n tline = tline + (\"%3d: \" % ii)\n tline = tline + (\"%15s; \" % projection._filename)\n tline = tline + (\"tiltAngle=%9.3f; \" % tiltAngle)\n tline = tline + (\"transX=%9.3f; \" % transX)\n tline = tline + (\"transY=%9.3f; \" % transY)\n tline = tline + (\"rot=%9.3f; \" % rot)\n tline = tline + (\"mag=%9.3f\\n\" % mag)\n print(tline)", "def Projection(W, TYPE_PROJ = proj_l11ball, ETA = 100, AXIS = 0, ETA_STAR = 100, device = \"cpu\" ): \n \n #global TYPE_PROJ, ETA, ETA_STAR, AXIS, device \n if TYPE_PROJ == 'No_proj':\n W_new = W\n if (TYPE_PROJ == proj_l1ball or TYPE_PROJ == proj_l11ball or TYPE_PROJ == proj_l11ball_line ):\n W_new = TYPE_PROJ(W, ETA, device)\n if TYPE_PROJ == proj_l21ball or TYPE_PROJ == proj_l12ball:\n W_new = TYPE_PROJ(W, ETA, AXIS, device = device)\n if TYPE_PROJ == proj_nuclear:\n W_new = TYPE_PROJ(W, ETA_STAR, device=device)\n return W_new", "def exportOrgs ( c ) :\n assert str(type(c)) == \"<type '_mysql.connection'>\"\n xml = \"\"\n o = sqlQuery ( c, \"select * from Organizations;\" )\n for i in o:\n oL = sqlQuery ( c, \"select * from OrganizationLocations where orgID = '\"+i[0]+\"';\" )\n oER = sqlQuery ( c, \"select * from OrganizationExternalResources where orgID = '\"+i[0]+\"';\" )\n oTC = sqlQuery ( c, \"select * from OrganizationsToCrises where orgID = '\"+i[0]+\"';\" )\n pTO = sqlQuery ( c, \"select * from PeopleToOrganizations where orgID = '\"+i[0]+\"';\" )\n xml += openTagAtt ( \"Organization\", \"organizationIdent\", i[0])\n xml += openCloseTag ( \"Name\", i[1])\n xml += closeTagAtt ( \"Kind\", \"organizationKindIdent\", i[2])\n for j in oL :\n xml += openTag ( \"Location\" )\n xml += openCloseTag ( \"Locality\", j [ 1 ] )\n xml += openCloseTag ( \"Region\", j [ 2 ] )\n xml += openCloseTag ( \"Country\", j [ 3 ] )\n xml += closeTag ( \"Location\" )\n xml += openCloseTag (\"History\", i[3])\n xml += openTag ( \"ContactInfo\" )\n xml += openCloseTag (\"Telephone\", i[4])\n xml += openCloseTag (\"Fax\", i[5])\n xml += openCloseTag (\"Email\", i[6])\n xml += openTag (\"PostalAddress\")\n xml += openCloseTag (\"StreetAddress\", i[7])\n xml += openCloseTag ( \"Locality\", i[8])\n xml += openCloseTag ( \"Region\", i[9])\n xml += openCloseTag ( \"PostalCode\", i[10])\n xml += openCloseTag ( \"Country\", i[11])\n xml += closeTag ( \"PostalAddress\" )\n xml += closeTag ( \"ContactInfo\" )\n xml += openTag (\"ExternalResources\")\n for j in oER:\n xml += openCloseTag ( j[1], j[2])\n xml += closeTag (\"ExternalResources\")\n xml += openTag (\"RelatedCrises\")\n for j in oTC:\n xml += closeTagAtt (\"RelatedCrisis\", \"crisisIdent\", j[1])\n xml += closeTag (\"RelatedCrises\")\n xml += openTag (\"RelatedPersons\")\n for j in pTO:\n xml += closeTagAtt (\"RelatedPerson\", \"personIdent\", j[0])\n xml += closeTag (\"RelatedPersons\")\n xml += closeTag (\"Organization\")\n assert str ( type ( xml ) ) == \"<type 'str'>\"\n return xml", "def expand(self, model_pcoll):\n return (\n model_pcoll\n | 'Transforming the NDB models into Apache Beam entities' >> (\n beam.Map(job_utils.get_beam_entity_from_ndb_model))\n | 'Writing the NDB models to the datastore' >> (\n self.datastoreio.WriteToDatastore(feconf.OPPIA_PROJECT_ID))\n )", "def get_projection(self):\n return self.projection", "def get_image_information(client):\n\n pipeline = [{\"$match\": {\"camera_views\": {\"$exists\": 1}}}, {\"$unwind\": {\"path\": \"$camera_views\"}}, {\"$addFields\": {\n \"camera_views.average_linear_distance\": {\n \"$divide\": [\n \"$camera_views.total_linear_distance\",\n \"$camera_views.num_entities\"\n ]\n },\n \"camera_views.average_angular_distance\": {\n \"$divide\": [\n \"$camera_views.total_angular_distance\",\n \"$camera_views.num_entities\"\n ]\n },\n \"camera_views.timestamp\": \"$timestamp\",\n \"camera_views._id\": \"$_id\",\n \"camera_views.database\": client.database.name,\n \"camera_views.collection\": client.name,\n 'camera_views.file_id':\"$camera_views.images.file_id\", #Add the Color image id for downloading and testing\n }}, {\"$replaceRoot\": {\"newRoot\": \"$camera_views\"}}, {\"$project\": {\n \"_id\": 1,\n \"num_entities\": 1,\n \"average_linear_distance\": 1,\n \"average_angular_distance\": 1,\n \"timestamp\": 1,\n \"duplicate\": 1,\n \"database\":1,\n \"collection\":1,\n \"file_id\":{\"$arrayElemAt\":[\"$images.file_id\",0]}, # Only keep the first file id (The Color image)\n }}]\n pprint.pprint(pipeline)\n result = list(client.aggregate(pipeline))\n return result", "def car2car(map_car, template):\n\n project = template.copy()\n project.data = enmap.project(map_car.data, template.data.shape, template.data.wcs)\n return project", "def car_template(ncomp, ra0, ra1, dec0, dec1, res):\n\n if ncomp == 3:\n pre = (3,)\n else:\n pre = ()\n\n box = get_box(ra0, ra1, dec0, dec1)\n res = res * np.pi / (180 * 60)\n temp = so_map()\n shape, wcs = enmap.geometry(box, res=res, pre=pre)\n temp.data = enmap.zeros(shape, wcs=wcs, dtype=None)\n temp.pixel = \"CAR\"\n temp.nside = None\n temp.ncomp = ncomp\n temp.geometry = temp.data.geometry[1:]\n temp.coordinate = \"equ\"\n return temp", "def build_catalog_info(self, catalog_info):\n cat = SourceFactory.build_catalog(**catalog_info)\n catalog_info['catalog'] = cat\n # catalog_info['catalog_table'] =\n # Table.read(catalog_info['catalog_file'])\n catalog_info['catalog_table'] = cat.table\n catalog_info['roi_model'] =\\\n SourceFactory.make_fermipy_roi_model_from_catalogs([cat])\n catalog_info['srcmdl_name'] =\\\n self._name_factory.srcmdl_xml(sourcekey=catalog_info['catalog_name'])\n return CatalogInfo(**catalog_info)", "def _gen_cat_query(self,query_fields=None):\n if query_fields is None:\n object_id_fields = ['decals_id','brick_primary','brickid','ra','dec','gaia_pointsource']\n mag_fields = ['mag_g','mag_r','mag_z','mag_w1','mag_w2','mag_w3','mag_w4']\n snr_fields = ['snr_g','snr_r','snr_z','snr_w1','snr_w2','snr_w3','snr_w4']\n query_fields = object_id_fields+mag_fields+snr_fields\n \n database = \"ls_dr7.tractor\"\n self.query = dlsurvey._default_query_str(query_fields, database, self.coord, self.radius)", "def projection(self):\n return self.dataset.GetProjection() if self.dataset else None", "def apply_projection(projection, dataset):\n out = DatasetType(name=dataset.name, attributes=dataset.attributes)\n\n for var in projection:\n target, template = out, dataset\n while var:\n name, slice_ = var.pop(0)\n candidate = template[name]\n \n # apply slice\n if slice_:\n if isinstance(candidate, BaseType):\n candidate.data = candidate[slice_]\n elif isinstance(candidate, SequenceType):\n candidate = candidate[slice_[0]]\n elif isinstance(candidate, GridType):\n candidate = candidate[slice_]\n\n # handle structures\n if isinstance(candidate, StructureType):\n # add variable to target\n if name not in target.keys():\n if var:\n # if there are more children to add we need to clear the\n # candidate so it has only explicitly added children; \n # also, Grids are degenerated into Structures\n if isinstance(candidate, GridType):\n candidate = StructureType(candidate.name, candidate.attributes)\n candidate._keys = []\n target[name] = candidate\n target, template = target[name], template[name]\n else:\n target[name] = candidate\n\n # fix sequence data, including only variables that are in the sequence\n for seq in walk(out, SequenceType):\n seq.data = get_var(dataset, seq.id)[tuple(seq.keys())].data\n\n return out", "def annotations(self):\n annotations = {\"date\": self.date_trunc(\"usage_start\")}\n # { query_param: database_field_name }\n fields = self._mapper.provider_map.get(\"annotations\")\n for q_param, db_field in fields.items():\n annotations[q_param] = F(db_field)\n if (\n \"project\" in self.parameters.parameters.get(\"group_by\", {})\n or \"and:project\" in self.parameters.parameters.get(\"group_by\", {})\n or \"or:project\" in self.parameters.parameters.get(\"group_by\", {})\n ):\n annotations[\"project\"] = F(\"namespace\")\n\n return annotations", "def makeProjectionConversion(self, variable, token, typed_token,\n constituent_dict):\n model = self.model\n con_list = model.typed_tokens[typed_token].conversions\n thing_list = []\n for label, thing in constituent_dict.items():\n if thing.genre == 'node': # Only conversion in nodes\n if token in thing.tokens.keys() and thing.type == 'dynamic':\n try:\n thing_list += thing.injected_conversions[token]\n except: # HACK: I should be shot for doing this\n thing_list += [''] # Empty thing list\n else:\n thing_list += ['']\n mat = np.zeros((len(con_list), len(thing_list)))\n for i, token_conversion in enumerate(con_list):\n for j, thing in enumerate(thing_list):\n if token_conversion['label'] == thing:\n mat[i, j] = 1.\n return mat", "def getProjections(self): \n x, y, z = self.XYZCoordinate\n origin = self.SkeletonPoints[0]\n self.coorOrigin = origin\n self.XYProjections = [GeometryToolBox.projected_point(p, origin, x, y) for p in self.SkeletonPoints]\n self.XZProjections = [GeometryToolBox.projected_point(p, origin, x, z) for p in self.SkeletonPoints]" ]
[ "0.6503911", "0.61083275", "0.58378226", "0.53846645", "0.52837193", "0.52689284", "0.5229088", "0.52271664", "0.518275", "0.5146785", "0.51318693", "0.51146895", "0.5093305", "0.5028556", "0.50255054", "0.49924508", "0.49750137", "0.4933599", "0.49226946", "0.49170607", "0.4916366", "0.48552844", "0.48495683", "0.4846168", "0.48341677", "0.48237008", "0.48129472", "0.48112375", "0.4804322", "0.47952932" ]
0.67057735
0
Applies selected activation function to intermediate output.
def apply_activation(intermediate_output, intermediate_activation): if intermediate_activation is None: return intermediate_output if intermediate_activation == 'gelu': intermediate_output = nn.gelu(intermediate_output) elif intermediate_activation == 'relu': intermediate_output = nn.relu(intermediate_output) elif intermediate_activation == 'sigmoid': intermediate_output = nn.sigmoid(intermediate_output) elif intermediate_activation == 'softmax': intermediate_output = nn.softmax(intermediate_output) elif intermediate_activation == 'celu': intermediate_output = nn.celu(intermediate_output) elif intermediate_activation == 'elu': intermediate_output = nn.elu(intermediate_output) elif intermediate_activation == 'log_sigmoid': intermediate_output = nn.log_sigmoid(intermediate_output) elif intermediate_activation == 'log_softmax': intermediate_output = nn.log_softmax(intermediate_output) elif intermediate_activation == 'soft_sign': intermediate_output = nn.soft_sign(intermediate_output) elif intermediate_activation == 'softplus': intermediate_output = nn.softplus(intermediate_output) elif intermediate_activation == 'swish': intermediate_output = nn.swish(intermediate_output) elif intermediate_activation == 'tanh': intermediate_output = jnp.tanh(intermediate_output) else: raise NotImplementedError('%s activation function is not yet supported.' % intermediate_activation) return intermediate_output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def output_layer_activation(x):\n return x", "def uf_activate(self, output_reg):\n if len(self.inputs) is 2:\n self.two_activation(output_reg)\n elif len(self.inputs) is 3:\n self.three_activation(output_reg)\n else:\n self.large_activation(output_reg)", "def __call__(self, inputs):\n return self._hidden_activation(inputs)", "def activate(self, input_layer, funcname=None):\n if isinstance(funcname, tuple):\n funcname = funcname[0]\n params = funcname[1:]\n if funcname is None:\n funcname = self.activation_func\n if funcname == 'LINEAR':\n return input_layer\n activation_map = {\n 'RELU': tf.nn.relu,\n 'RELU6': tf.nn.relu6,\n 'ELU': tf.nn.elu,\n 'SIGMOID': tf.nn.sigmoid,\n 'TANH': tf.nn.tanh,\n 'LRELU': lambda x, name: tf.maximum(params[0]*x, x, name=name)\n }\n return activation_map[funcname](input_layer, name=funcname.lower())", "def compute_activation(self):\r\n\r\n x=0\r\n edges=self.in_edges\r\n for edge in edges:\r\n x+= edge.source.activation*edge.weight\r\n self.activation=1/(1+exp(-x))", "def _return_activation(x, nl):\n if nl == 'HS':\n x = Activation(_hard_swish)(x)\n if nl == 'RE':\n x = Activation(_relu6)(x)\n return x", "def _forward(z: np.array, W: np.array, b: np.array,\n activation: str) -> np.array:\n a = np.dot(z, W) + b\n if activation == 'sigmoid':\n return sigmoid(a)\n elif activation == 'identity':\n return identity(a)", "def __forward(self, A, W, b, activation_fn, output_layer=False):\n Z = np.dot(W, A) + b\n A_new = activation_fn(Z)\n D = np.ones_like(A_new) # Mask\n\n # Implement the Inverted Dropout Regularization\n if self.regularization == \"dropout\" and not output_layer:\n D = np.random.rand(A_new.shape[0], A_new.shape[1]) < self.keep_prob\n A_new = np.multiply(A_new, D) / self.keep_prob\n\n assert (Z.shape == (W.shape[0], A.shape[1]))\n assert (A_new.shape == (W.shape[0], A.shape[1]))\n\n cache = (A, W, b, Z, D)\n\n return A_new, cache", "def _hidden_activation(self, inputs):\n if self.act_enc is None:\n act_enc = lambda x: x\n else:\n act_enc = self.act_enc\n return act_enc(self._mappings(inputs))", "def activation_function(self, X):\n return self.net_input(X)", "def activation_function(self, X):\n return self.net_input(X)", "def linear_activation_calculation(A, W, b, activation_function):\n\n # Your code here\n return activation_function(linear_forward_calculation(A, W, b))\n # raise NotImplementedError", "def two_activation(self, output_reg):\n self._q_neuron.ccx(self.inputs[0], self.inputs[1], self._output[output_reg])\n self._q_neuron.cx(self.inputs[0], self._output[output_reg])\n self._q_neuron.cx(self.inputs[1], self._output[output_reg])", "def activation_function(self, x: np.array) -> np.array:\r\n\t\treturn self._activation_function(x)", "def three_activation(self, output_reg):\n self._q_neuron.ccx(self.inputs[0], self.inputs[1], self._output[output_reg])\n self._q_neuron.ccx(self.inputs[1], self.inputs[2], self._output[output_reg])\n self._q_neuron.ccx(self.inputs[0], self.inputs[2], self._output[output_reg])", "def encoder_activation_func(num_layer):\n ec_funct = []\n for i in range(num_layer):\n ec_funct.append('relu')\n ec_funct.append('softmax')\n\n return ec_funct", "def forward(W,X):\n return activation_func(np.dot(add_bias(X),W))", "def activation_func(activation, inplace=False):\n return nn.ModuleDict([\n ['relu', nn.ReLU(inplace=inplace)],\n ['leaky_relu', nn.LeakyReLU(negative_slope=0.2, inplace=inplace)],\n ['selu', nn.SELU(inplace=inplace)],\n ['none', nn.Identity()]\n ])[activation]", "def apply_activation(self, tens):\n if(self.activation == \"ReLU\"): # pylint: disable=no-else-return\n return tf.nn.relu(tens)\n elif(self.activation == \"Leaky_ReLU\"):\n return tf.nn.leaky_relu(tens)\n elif(self.activation == \"Tanh\"):\n return tf.nn.tanh(tens)\n elif(self.activation == \"Sigmoid\"):\n return tf.nn.sigmoid(tens)\n elif(self.activation == \"Linear\"):\n return tens\n else:\n raise InvalidActivationError(self.activation)", "def forward(self, output):\n \n hidden_states = self.extract_hidden_states(output)\n \n # Obtaining the attention weights\n weighted_states = self.w1(hidden_states)\n activated_states = self.tanh(weighted_states)\n score_weights = self.w2(activated_states)\n attention_weights = self.softmax(score_weights)\n \n # Applying attention to the matrix with hidden states\n attentional_vector = torch.bmm(torch.transpose(attention_weights,2,1),hidden_states) \n attentional_vector = self.fc(torch.transpose(attentional_vector,2,1)).squeeze(2)\n attentional_vector = self._activation_fn(attentional_vector)\n \n return attentional_vector", "def ensure_no_activation_applied(output_without_activation, output_with_activation, constant_connections,\n current_layer, node_map):\n # Need to keep the values where the\n for connection in constant_connections[current_layer]:\n # Need to convert to their position in the layer. Minus one because of python indexing\n output_position_within_layer = node_map[connection.output_node] - 1\n # The output node position is the node which shouldn't have any activations applied. So we use all the\n # values from before the activation was applied\n output_with_activation[:, output_position_within_layer] = \\\n output_without_activation[\n :, output_position_within_layer]\n\n return output_with_activation", "def large_activation(self, output_reg):\n inps = list(combinations(self.inputs, 1))\n for inp in inps:\n self._q_neuron.x(inp[0])\n self._q_neuron.mct(self.inputs, self._output[output_reg], self._ancillas)\n self._q_neuron.x(inp[0])\n self._q_neuron.mct(self.inputs, self._output[output_reg], self._ancillas)", "def _feedforward(self, sample):\n output = sample\n for weight in xrange(len(self.weights)):\n output = self.activation_function(\n np.dot(output, self.weights[weight]))\n return output", "def activation(x):\n # return np.tanh(x)\n return np.maximum(0,x)", "def _return_activation(x, nl):\n if nl == 'HS':\n x = KL.Activation(_hard_swish)(x)\n if nl == 'RE':\n x = KL.ReLU(6.)(x)\n\n return x", "def feed_forward(self):\n self.hidden_activation = self._sigmoid(np.dot(self.input_activation, self.w1))\n self.output_activation = self._sigmoid(np.dot(self.hidden_activation, self.w2))", "def activation_function(X):\n\tz = np.sum(w*x+b)\n\treturn z", "def activation_func(activation:str):\n return nn.ModuleDict([\n ['relu', nn.ReLU(inplace=True)],\n ['leaky_relu', nn.LeakyReLU(negative_slope=0.01, inplace=True)],\n ['selu', nn.SELU(inplace=True)],\n ['none', nn.Identity()]\n ])[activation]", "def _activation(self,components,activation):\r\n \r\n if activation == \"ReLU\":\r\n components.append(nn.ReLU())\r\n elif activation == \"Sigmoid\":\r\n components.append(nn.Sigmoid())\r\n else:\r\n raise Exception(\"Invalid activation fn: \"+activation)", "def forward(inputs,weights,function=sigmoid,step=-1):\n if step == 0:\n return inputs\n elif step == -1:\n step = len(weights) #go to output layer \n output = np.append(1, inputs)\n for i in range(step):\n output = np.append(1, function(np.dot(weights[i], output))) #calculating activation\n return output[1:]" ]
[ "0.7604806", "0.7194517", "0.6661993", "0.6523873", "0.64537185", "0.6431659", "0.64278513", "0.6370167", "0.63165534", "0.6285312", "0.6285312", "0.6214801", "0.6194766", "0.6191587", "0.6181813", "0.614133", "0.6128814", "0.6084095", "0.607957", "0.6078586", "0.60711646", "0.60320675", "0.6007484", "0.60049736", "0.59845054", "0.59764105", "0.59698856", "0.5969566", "0.59673834", "0.5958121" ]
0.7784417
0
Returns TF Bert config..
def get_tf_config(config_path): return modeling.BertConfig.from_json_file(config_path).__dict__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bert_config(config):\n if config.model_size == \"large\":\n args = {\"hidden_size\": 1024, \"num_hidden_layers\": 24}\n elif config.model_size == \"base\":\n args = {\"hidden_size\": 768, \"num_hidden_layers\": 12}\n elif config.model_size == \"small\":\n args = {\"hidden_size\": 256, \"num_hidden_layers\": 12}\n else:\n raise ValueError(\"Unknown model size\", config.model_size)\n args[\"vocab_size\"] = config.vocab_size\n args.update(**config.model_hparam_overrides)\n # by default the ff size and num attn heads are determined by the hidden size\n args[\"num_attention_heads\"] = max(1, args[\"hidden_size\"] // 64)\n args[\"intermediate_size\"] = 4 * args[\"hidden_size\"]\n args.update(**config.model_hparam_overrides)\n return modeling.BertConfig.from_dict(args)", "def create_config(config_dir: str) -> configs.BertConfig:\n with tf.io.gfile.GFile(config_dir) as config_file:\n bert_config = json.load(config_file)\n return configs.BertConfig(**bert_config)", "def get_config():\n return CONFIG", "def get_config_template(self) -> cconfig.Config:", "def get_config_template() -> dict:\n return {\n VENE_PAYMENTS_BAMBORA_API_URL: (str, \"https://payform.bambora.com/pbwapi\"),\n VENE_PAYMENTS_BAMBORA_API_KEY: str,\n VENE_PAYMENTS_BAMBORA_API_SECRET: str,\n VENE_PAYMENTS_BAMBORA_PAYMENT_METHODS: list,\n }", "def config():\n return _config", "def get_config():\n return _config", "def config():", "def config():", "def _get_MindtPy_config():\n CONFIG = ConfigBlock('MindtPy')\n\n _add_common_configs(CONFIG)\n _add_subsolver_configs(CONFIG)\n _add_tolerance_configs(CONFIG)\n _add_fp_configs(CONFIG)\n _add_bound_configs(CONFIG)\n _add_roa_configs(CONFIG)\n return CONFIG", "def get_config(self):\n config = {\n 'membership_transform': self.membership_transform,\n 'predictions_transform': self.predictions_transform,\n 'membership_kernel': self.membership_kernel,\n 'predictions_kernel': self.predictions_kernel,\n 'name': self.name,\n }\n config = {k: v for k, v in config.items() if v is not None}\n return self._serialize_config(config)", "def get_config(self):\n if self.faucet is not None:\n return self.faucet.get_config()\n return None", "def getConfig(self):\n pass", "def get_base_config():\n return dict(\n dim=768,\n ff_dim=3072,\n num_heads=12,\n num_layers=12,\n attention_dropout_rate=0.0,\n dropout_rate=0.1,\n representation_size=768,\n classifier='token'\n )", "def get_config(self):\n return self.cat_feats_cfg", "def config():\n return {\n \"CLEAN_OUTBOX\": \"TRUE\",\n \"COMPONENT_NAME\": \"testing-unpacker\",\n \"DEST_SITE\": \"WIPAC\",\n \"FILE_CATALOG_REST_TOKEN\": \"fake-file-catalog-token\",\n \"FILE_CATALOG_REST_URL\": \"http://kVj74wBA1AMTDV8zccn67pGuWJqHZzD7iJQHrUJKA.com/\",\n \"HEARTBEAT_PATCH_RETRIES\": \"3\",\n \"HEARTBEAT_PATCH_TIMEOUT_SECONDS\": \"30\",\n \"HEARTBEAT_SLEEP_DURATION_SECONDS\": \"60\",\n \"INPUT_STATUS\": \"unpacking\",\n \"LTA_REST_TOKEN\": \"fake-lta-rest-token\",\n \"LTA_REST_URL\": \"http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/\",\n \"OUTPUT_STATUS\": \"completed\",\n \"PATH_MAP_JSON\": \"/tmp/lta/testing/path_map.json\",\n \"RUN_ONCE_AND_DIE\": \"False\",\n \"SOURCE_SITE\": \"NERSC\",\n \"UNPACKER_OUTBOX_PATH\": \"/tmp/lta/testing/unpacker/outbox\",\n \"UNPACKER_WORKBOX_PATH\": \"/tmp/lta/testing/unpacker/workbox\",\n \"WORK_RETRIES\": \"3\",\n \"WORK_SLEEP_DURATION_SECONDS\": \"60\",\n \"WORK_TIMEOUT_SECONDS\": \"30\",\n }", "def _GetChangesForBert(config_sed_input):\n config_sed = config_sed_input\n\n config_sed.append((r'.*config_DGXA100_common\\.sh',\n (r'export CONT=mlperf-nvidia:language_model\\n'\n r'export NEXP=1')))\n config_sed.append((\n r'DATADIR=.*',\n r'DATADIR=\\/data\\/bert_data\\/hdf5\\/training-4320\\/hdf5_4320_shards_varlength'\n ))\n config_sed.append((\n r'DATADIR_PHASE2=.*',\n r'DATADIR_PHASE2=\\/data\\/bert_data\\/hdf5\\/training-4320\\/hdf5_4320_shards_varlength'\n ))\n config_sed.append(\n (r'EVALDIR=.*', r'EVALDIR=\\/data\\/bert_data\\/hdf5\\/eval_varlength'))\n config_sed.append(\n (r'CHECKPOINTDIR=.*', r'CHECKPOINTDIR=\\/data\\/bert_data\\/phase1'))\n config_sed.append((r'CHECKPOINTDIR_PHASE1=.*',\n r'CHECKPOINTDIR_PHASE1=\\/data\\/bert_data\\/phase1'))\n if BERT_BATCH_SIZE.value:\n config_sed.append((r'BATCHSIZE=.*', fr'BATCHSIZE={BERT_BATCH_SIZE.value}'))\n\n return config_sed", "def configuration():", "def config(ctx):\n return", "def get_config():\n app = NbConvertApp()\n app.load_config_file()\n return app.config", "def config(self) -> Dict[str, Any]:", "def get_config(self):\n config = super(Sc2Policy, self).get_config()\n config['eps'] = self.eps\n config['testing'] = self.testing\n return config", "def get_config():\n return _CONFIG", "def get_config():\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth=True\n return config", "def get_config():\n return {'address': ADDRESS, 'https': HTTPS == 'https',\n 'password': PASSWORD, 'username': USERNAME,\n 'port': PORT, 'version': VERSION}", "def get_config():\n\n return json.loads(CONFIG_FILE.read_text())", "def config():\n return {\n \"COMPONENT_NAME\": \"testing-deleter\",\n \"DEST_SITE\": \"NERSC\",\n \"DISK_BASE_PATH\": \"/path/to/rucio/rse/root\",\n \"HEARTBEAT_PATCH_RETRIES\": \"3\",\n \"HEARTBEAT_PATCH_TIMEOUT_SECONDS\": \"30\",\n \"HEARTBEAT_SLEEP_DURATION_SECONDS\": \"60\",\n \"INPUT_STATUS\": \"detached\",\n \"LTA_REST_TOKEN\": \"fake-lta-rest-token\",\n \"LTA_REST_URL\": \"http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/\",\n \"OUTPUT_STATUS\": \"source-deleted\",\n \"RUN_ONCE_AND_DIE\": \"False\",\n \"SOURCE_SITE\": \"WIPAC\",\n \"WORK_RETRIES\": \"3\",\n \"WORK_SLEEP_DURATION_SECONDS\": \"60\",\n \"WORK_TIMEOUT_SECONDS\": \"30\",\n }", "def _get_config():\n resp = requests.get(TRAEFIK_API_URL)\n if not resp.ok:\n raise Exception(\n \"Bad traefik response: %s %s\" % (resp.status_code, resp.text)\n )\n return resp.json()", "def load_bert(config: Config) -> Tuple[AutoModel, AutoTokenizer]:\n print(f\"Loading {config.bert_model}...\")\n\n base_bert_name = config.bert_model.split(\"/\")[-1]\n tokenizer_kwargs = config.tokenizer_kwargs.get(base_bert_name, {})\n if tokenizer_kwargs:\n print(f\"Using tokenizer kwargs: {tokenizer_kwargs}\")\n tokenizer = AutoTokenizer.from_pretrained(config.bert_model,\n **tokenizer_kwargs)\n\n model = AutoModel.from_pretrained(config.bert_model).to(config.device)\n\n print(\"Bert successfully loaded.\")\n\n return model, tokenizer", "def _get_config(self):\n return self.__config" ]
[ "0.66096175", "0.6400916", "0.6374486", "0.63508105", "0.6265168", "0.61243945", "0.6115964", "0.61093426", "0.61093426", "0.60153896", "0.601387", "0.5967145", "0.59554666", "0.59397674", "0.5929531", "0.59017366", "0.5877108", "0.5862486", "0.5856384", "0.585002", "0.58490485", "0.58326685", "0.5815562", "0.5812247", "0.5803848", "0.5798963", "0.57988304", "0.57696337", "0.57634085", "0.5759947" ]
0.7430698
0
Return tf mlperf model parameters in a dictionary format. Use get_tf_model_variables if using kerasBERT checkpoint. This function works
def get_mlperf_model_variables(config_path, init_checkpoint): # Load saved model configuration bert_config = modeling.BertConfig.from_json_file(config_path) seq_length = bert_config.max_position_embeddings tf_variables = {} max_predictions_per_seq = 76 # Generate BERT TF model and initiate variable update from checkpoint graph = tf.Graph() sess = tf.Session(graph=graph) with graph.as_default(): input_ids = tf.zeros((4, seq_length), dtype=tf.int32) input_mask = tf.zeros((4, seq_length), dtype=tf.int32) segment_ids = tf.zeros((4, seq_length), dtype=tf.int32) masked_lm_positions = tf.zeros((4, max_predictions_per_seq), dtype=tf.int32) masked_lm_ids = tf.zeros((4, max_predictions_per_seq), dtype=tf.int32) masked_lm_weights = tf.zeros((4, max_predictions_per_seq), dtype=tf.float32) next_sentence_labels = tf.zeros((4), dtype=tf.int32) tf_model = modeling.BertModel( config=bert_config, is_training=True, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=True) (masked_lm_loss, _, _) = get_masked_lm_output(bert_config, tf_model.get_sequence_output(), tf_model.get_embedding_table(), masked_lm_positions, masked_lm_ids, masked_lm_weights) (next_sentence_loss, _, _) = get_next_sentence_output(bert_config, tf_model.get_pooled_output(), next_sentence_labels) _ = masked_lm_loss + next_sentence_loss tvars = tf.trainable_variables() (assignment_map, _) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map) sess.run(tf.initializers.global_variables()) tvars_vals = sess.run(tvars) for var, val in zip(tvars, tvars_vals): tf_variables[var.name[:-2]] = val tf_config = bert_config.__dict__ return tf_config, tf_variables, tf_model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def describe_model():\n train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n msg = [\"\"]\n total = 0\n for v in train_vars:\n shape = v.get_shape()\n ele = shape.num_elements()\n total += ele\n msg.append(\"{}: shape={}, dim={}\".format(\n v.name, shape.as_list(), ele))\n size_mb = total * 4 / 1024.0**2\n msg.append(colored(\n \"Total param={} ({:01f} MB assuming all float32)\".format(total, size_mb), 'cyan'))\n logger.info(colored(\"Model Parameters: \", 'cyan') + '\\n'.join(msg))", "def convert_mlperf_param_dict_to_jax(tf_params, emb_dim, num_heads):\n jax_params = {}\n # mapping between mlperf model and JAX model\n # works for model in //third_party/tensorflow_models/mlperf/models/rough/bert\n tf_key_to_jax_key = [\n ('cls/seq_relationship/', 'classification/predictions_transform_logits/'),\n ('output_weights', 'kernel'),\n ('transform_logits/output_bias', 'transform_logits/bias'),\n ('cls/predictions/', 'masked_lm/cls_predictions_'),\n ('transform/dense', 'transform_dense'),\n ('transform/LayerNorm', 'transform_layernorm'),\n ('predictions_output_bias', 'predictions_output_bias/bias'),\n ('bert/embeddings/word_embeddings', 'word_embeddings/embedding'),\n ('bert/', 'transformer_encoder/'),\n ('embeddings/token_type_embeddings', 'type_embeddings/embedding'),\n ('embeddings/position_embeddings', 'position_embeddings/embedding'),\n ('attention/self', 'self_attention'),\n ('attention/output', 'self_attention_output'),\n ('layer_norm/layer_norm_', 'layer_norm/'),\n ('output/LayerNorm', 'output_layer_norm'),\n ('intermediate/dense', 'intermediate'),\n ('output/dense', 'output'),\n ('pooler/dense/', 'pooler_transform/'),\n ('self_attention_output_layer_norm', 'self_attention_layer_norm'),\n ('embeddings/LayerNorm', 'embeddings_layer_norm'),\n ('encoder/layer', 'encoder_layer'),\n (':0', ''),\n ('beta', 'bias'),\n ('gamma', 'scale')\n ]\n for tf_key, val in tf_params.items():\n jax_key = tf_key\n for tf_name, jax_name in tf_key_to_jax_key:\n jax_key = jax_key.replace(tf_name, jax_name)\n\n # Reshape kernels if necessary\n jax_params[jax_key] = tf_params[tf_key]\n if 'self_attention_output/kernel' in jax_key:\n param = tf_params[tf_key]\n jax_params[jax_key] = param.reshape(\n (num_heads, -1, emb_dim))\n\n # jax position embedding kernel has additional dimension\n pos_embedding = jax_params[\n 'transformer_encoder/position_embeddings/embedding']\n jax_params[\n 'transformer_encoder/position_embeddings/embedding'] = pos_embedding[\n np.newaxis, ...]\n\n # convert flat param dict into nested dict using `/` as delimeter\n outer_dict = {}\n for key, val in jax_params.items():\n tokens = key.split('/')\n inner_dict = outer_dict\n # each token except the very last should add a layer to the nested dict\n for token in tokens[:-1]:\n if token not in inner_dict:\n inner_dict[token] = {}\n inner_dict = inner_dict[token]\n inner_dict[tokens[-1]] = val\n\n return outer_dict", "def get_parameters():\n \n tf.set_random_seed(1) #so that your \"random\" numbers match ours\n sess = tf.Session() \n #First let's load meta graph and restore weights\n saver = tf.train.import_meta_graph(ckpt_dir + '/trained_model.meta')\n saver.restore(sess, tf.train.latest_checkpoint(ckpt_dir))\n\n #print all tensor name\n #print([n.name for n in graph.as_graph_def().node]) \n #extract parameters from saved session\n W1 = sess.run(\"W1:0\")\n b1 = sess.run(\"b1:0\")\n W2 = sess.run(\"W2:0\")\n b2 = sess.run(\"b2:0\")\n W3 = sess.run(\"W3:0\")\n b3 = sess.run(\"b3:0\") \n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2,\n \"W3\": W3,\n \"b3\": b3}\n \n return parameters, sess", "def _get_model(self):\n\n parameters = {keys._topology:self.topology,\n keys._size:self.size,\n keys._name:self.name,\n #keys._output_activation:self._outActiv_fun_key,\n #keys._hidden_activation:self._hiddenActiv_fun_key,\n keys._learning_rate:self.learningRate,\n keys._momentum:self.momentum}\n\n return parameters", "def get_lstm_parameters_dictionary():\n\n parameters = dict()\n\n parameters[\"window size\"] = lstm_hyper_parameters.get_window_size()\n parameters[\"encoding_dimension\"] = lstm_hyper_parameters.get_encoding_dimension()\n parameters[\"activation\"] = lstm_hyper_parameters.get_activation()\n parameters[\"loss\"] = lstm_hyper_parameters.get_loss()\n parameters[\"optimizer\"] = lstm_hyper_parameters.get_optimizer()\n parameters[\"threshold percent\"] = lstm_hyper_parameters.get_threshold()\n parameters[\"epochs\"] = lstm_hyper_parameters.get_epochs()\n\n return parameters", "def get_tf_model_variables(config_path, init_checkpoint):\n # Load saved model configuration\n config = configs.BertConfig.from_json_file(config_path)\n\n # Generate BERT TF model and initiate variable update from checkpoint\n seq_len = 20\n _, tf_model = bert_models.squad_model(config, seq_len)\n checkpoint = tf.train.Checkpoint(model=tf_model)\n checkpoint.restore(init_checkpoint).assert_existing_objects_matched()\n\n tf_config = config.__dict__\n tf_variables = {v.name: v.numpy() for v in tf_model.variables}\n\n return tf_config, tf_variables, tf_model", "def get_model_params(self):\n params_dict = vars(self).copy()\n exclude_params = ['input_size',\n 'model',\n 'train_generator',\n 'val_generator',\n 'callbacks',\n 'save_to_dir',\n 'keras_logs_folder',\n 'samples_seen',\n 'params_filepath',\n 'session_number',\n 'params_file_name',\n 'weights_file_name',\n 'checkpoint_filename',\n 'curr_folder'\n ]\n\n for key in exclude_params:\n params_dict.pop(key)\n return params_dict", "def model_dict(self):\n model_dict = {}\n model_dict[\"model_type\"] = self.model_type\n model_dict[\"num_features\"] = self.num_features\n model_dict[\"num_classes\"] = self.num_classes\n model_dict[\"normalize\"] = self.normalize\n model_dict[\"reparam_mode\"] = self.reparam_mode\n model_dict[\"prior_mode\"] = self.prior_mode\n model_dict[\"struct_dropout_mode\"] = self.struct_dropout_mode\n model_dict[\"dropout\"] = self.dropout\n model_dict[\"latent_size\"] = self.latent_size\n model_dict[\"sample_size\"] = self.sample_size\n model_dict[\"num_layers\"] = self.num_layers\n model_dict[\"with_relu\"] = self.with_relu\n model_dict[\"val_use_mean\"] = self.val_use_mean\n model_dict[\"reparam_all_layers\"] = self.reparam_all_layers\n model_dict[\"state_dict\"] = to_cpu_recur(self.state_dict())\n return model_dict", "def get_params(self):\n return {'threshold': self.threshold,\n 'subsample': self.subsample,\n 'estimator': self.estimator,\n 'n_folds': self.n_folds,\n 'stratify': self.stratify,\n 'random_state': self.random_state,\n 'n_jobs': self.n_jobs}", "def params_for_checkpoint(self):\n # We need real int types, not numpy.int64, not recognized by json\n # dumps.\n params = {\n 'nb_features_in': int(self.nb_features_in),\n 'nb_features_out': int(self.nb_features_out),\n 'key': self.key\n }\n return params", "def get_model_kwargs(parsed_args):\n parsed_args.model_name = parsed_args.model_name.lower()\n if parsed_args.model_name not in SUPPORTED_MODELS:\n raise ValueError(\"Model name must be in the set: {}\".format(SUPPORTED_MODELS))\n res = {'learning_rate': parsed_args.learning_rate}\n restore_ckpt_dir = parsed_args.restore_efficient_net_weights_from\n res[\"restore_ckpt_dir\"] = restore_ckpt_dir\n if parsed_args.lsd:\n res[\"rsd\"] = parsed_args.lsd\n res[\"feature_extractor_name\"] = parsed_args.feature_extractor_name\n res[\"l2\"] = parsed_args.l2\n res[\"final_layer_dropout_rate\"] = parsed_args.final_layer_dropout_rate\n res[\"label_smoothing\"] = parsed_args.label_smoothing\n if \"dice\" not in parsed_args.loss_name:\n res[\"dice\"] = False\n if parsed_args.sgd:\n res['optimizer'] = tf.train.GradientDescentOptimizer\n else:\n res['optimizer'] = partial(tf.train.AdamOptimizer, beta1=0)\n res['loss_name'] = parsed_args.loss_name\n res[\"n_rows\"] = parsed_args.image_size\n res[\"n_cols\"] = parsed_args.image_size\n return res", "def _get_current_training_params(self):\n params = {}\n params[\"lyap_relu_params\"] = copy.deepcopy(\n self.lyapunov_hybrid_system.lyapunov_relu.state_dict())\n if not self.R_options.fixed_R:\n params[\"R_params\"] = self.R_options._variables.clone()\n if isinstance(self.lyapunov_hybrid_system.system,\n feedback_system.FeedbackSystem):\n params[\"controller_params\"] = copy.deepcopy(\n self.lyapunov_hybrid_system.system.controller_network.\n state_dict())\n return params", "def get_default_model_params(self):\n\n model_params = {\n 'dropout_rate': 0.3,\n 'hidden_layer_size': 160,\n 'learning_rate': 0.01,\n 'minibatch_size': 64,\n 'max_gradient_norm': 0.01,\n 'num_heads': 1,\n 'stack_size': 1\n }\n\n return model_params", "def create_params():\n\n params = {\n # Optimizer parameters (for Adam)\n \"beta1\": 0.9,\n \"beta2\": 0.999,\n \"epsilon\": 1e-7,\n \"learning_rate\": 0.001,\n\n # Input pipeline parameters\n \"parallel_reads\": 8, # Number of parallel file\n # readers per host.\n \"train_dataset_path\": FLAGS.train_dataset_path, # Glob specifing TFRecord\n # files with tf.examples.\n \"eval_dataset_path\": FLAGS.eval_dataset_path, # Glob specifing TFRecord\n # files with tf.examples.\n\n # Training paramaeters\n \"global_batch_size\": 512, # Global batch size for training.\n \"eval_global_batch_size\": 512, # Global batch size for eval.\n \"train_epochs\": 5, # Number of times to run train/eval loop.\n \"steps_per_epoch\": 100, # Number of training steps per epoch.\n \"num_eval_steps\": 10, # Number of eval steps per epoch\n\n # TPU parameters\n \"gcp_project\": FLAGS.gcp_project, # Project TPU is in.\n \"tpu_zone\": FLAGS.tpu_zone, # GCE zone the TPU is in.\n \"tpu\": FLAGS.tpu, # Name of the TPU.\n \"iterations_per_loop\": 200, # Number of iterations per device\n # training loop.\n \"pipeline_execution\": False, # If True, speed up training by\n # overlaping embedding lookups with\n # dense layer computations. Embedding\n # lookups will be one step old.\n \"use_gradient_accumulation\": True, # If False, speed up training by\n # applying embedding optimizer in\n # batches smaller than global batch\n # size.\n \"use_tpu\": True, # If False, uses CPU to train.\n\n # Model parameters\n \"model_dir\": FLAGS.model_dir, # Directory in which to store checkpoints.\n \"model_layers\": [100, 75, 50], # Sizes of dense layers for model\n \"num_categories\": 10, # Number of output categories.\n \"table_1_dimension\": 128, # Dimension of embedding table 1.\n \"table_1_rows\": 100, # Number of feature values in table 1.\n \"table_2_dimension\": 256, # Dimension of embedding table 2.\n \"table_2_rows\": 1000, # Number of feature values in table 2.\n }\n\n tf.logging.info(\"Params: {}\".format(params))\n\n return params", "def params():\n return utils.Params('../experiments/base-model/params.json')", "def get_embedding_model_params(self, output_dict):\n output_dict['model_params'] = self.trained_model_params", "def get_model_params(self):\n\n results = self._model.fit()\n model_params = np.expand_dims(results.params.as_matrix(), 1)\n return model_params", "def Params(cls):\n p = hyperparams.InstantiableParams(cls)\n\n p.Define('task_dict', None, 'dataset_name -> task params')\n p.Define('task_name', None, 'High level task name')\n p.Define('logdir', None, 'Log directory')\n p.Define('train_program', None, 'Train program params')\n p.Define('train_executions_per_eval', 1, '')\n p.Define('dataset_names', [], 'List of all dataset names.')\n p.Define('num_splits_per_client', None, '')\n\n p.Define('ml_perf', hyperparams.Params(), 'MlPerf configuration.')\n\n mlp = p.ml_perf\n mlp.Define('benchmark_name', None, 'Benchmark name for compliance log.')\n mlp.Define('decoder_metric_name', None,\n 'Name of the decoder metric to report for compliance log.')\n mlp.Define('decoder_metric_success_threshold', None,\n 'Benchmark run must exceed this value to succeed.')\n mlp.Define('max_steps_to_train', None,\n 'Maximum number of steps to reach target accuracy')\n mlp.Define('steps_per_epoch', None, 'Number of training steps per epoch.')\n mlp.Define('global_batch_size', None, 'Global batch size.')\n mlp.Define('max_sequence_length', None, 'Maximum sequence length.')\n mlp.Define('optimizer_name', None, 'Optimizer used.')\n mlp.Define('base_learning_rate', None, 'Base learning rate.')\n mlp.Define('warmup_steps', None, 'Number of warm-up steps.')\n\n return p", "def Params(cls):\n p = hyperparams.InstantiableParams(cls)\n p.Define('task_dict', None, 'dataset_name -> task params')\n p.Define('task_name', None, 'High level task name')\n p.Define('logdir', None, 'Log directory')\n p.Define('train_program', None, 'Train program params')\n p.Define('train_executions_per_eval', 1, '')\n p.Define('eval_programs', [], 'List of eval program params.')\n p.Define('num_splits_per_client', None, '')\n p.Define('dataset_names', [], 'List of all dataset names.')\n p.Define('emails', [], 'List of emails to send metrics.')\n p.Define('summary_exporter', None, 'The summary exporter Params.')\n p.Define('async_postprocess', True,\n 'whether to CPU postprocess asynchronously with TPU train')\n p.Define(\n 'checkpoint_to_load', None,\n 'If set, the program will initially load from this checkpoint, '\n 'ignoring train_dir. Typically used for oneoff decode.')\n\n # TODO(blee): Clean these up.\n p.Define('ml_perf', hyperparams.Params(), 'MlPerf configuration.')\n mlp = p.ml_perf\n mlp.Define('submission_metadata', None,\n 'A dictionary of static submission metadata')\n mlp.Define('benchmark_name', None, 'Benchmark name for compliance log.')\n mlp.Define('steps_per_epoch', None, 'Number of training steps per epoch.')\n mlp.Define('decoder_metric_name', None,\n 'Name of the decoder metric to report for compliance log.')\n mlp.Define('decoder_metric_success_threshold', None,\n 'Benchmark run must exceed this value to succeed.')\n mlp.Define('max_steps_to_train', None,\n 'Maximum number of steps to reach target accuracy')\n return p", "def get_fixed_params(self):\n\n fixed_params = {\n 'total_time_steps': 40,\n 'num_encoder_steps': 39,\n 'num_epochs': 100,\n 'early_stopping_patience': 10,\n 'multiprocessing_workers': 2,\n }\n\n return fixed_params", "def get_lstm_new_model_parameters():\n\n return (\n lstm_hyper_parameters.get_window_size(),\n lstm_hyper_parameters.get_encoding_dimension(),\n lstm_hyper_parameters.get_activation(),\n lstm_hyper_parameters.get_loss(),\n lstm_hyper_parameters.get_optimizer(),\n lstm_hyper_parameters.get_threshold(),\n lstm_hyper_parameters.get_epochs()\n )", "def densenet_params(model_name):\n params_dict = {\n # Coefficients: growth_rate, num_init_features, res\n 'densenet121': (32, 64, 224),\n 'densenet161': (48, 96, 224),\n 'densenet169': (32, 64, 224),\n 'densenet201': (32, 64, 224),\n }\n return params_dict[model_name]", "def get_fixed_params():\n fixed_params = {\n 'total_time_steps': 8 * 24,\n 'num_encoder_steps': 7 * 24,\n 'num_epochs': 100,\n 'early_stopping_patience': 5,\n 'multiprocessing_workers': 5\n }\n return fixed_params", "def collect_params():\n\n collect_params_start_time = time.time()\n\n emb_ls = ['user_emb_w', 'item_emb_w', 'cate_emb_w']\n mlp_ls = ['fcn1/kernel', 'fcn2/kernel', 'fcn3/kernel', 'fcn3/bias', 'fcn1/bias', 'fcn2/bias']\n\n # collect input model sequence from niu_dir\n emb_dict_ls = []\n mlp_dict_ls = []\n for prev_num in reversed(range(train_config['seq_length'])):\n date_alias = 'date' + str(i - prev_num)\n alias = os.path.join('../IU/ckpts', train_config['niu_dir_name'], date_alias, 'Epoch*')\n restored_ckpt = search_ckpt(alias, mode=train_config['restored_ckpt_mode'])\n print('restored model {}: {}'.format(i - prev_num, restored_ckpt))\n emb_dict = {name: tf.train.load_checkpoint(restored_ckpt).get_tensor(name)\n for name, _ in tf.train.list_variables(restored_ckpt) if name in emb_ls}\n mlp_dict = {name: tf.train.load_checkpoint(restored_ckpt).get_tensor(name)\n for name, _ in tf.train.list_variables(restored_ckpt) if name in mlp_ls}\n emb_dict_ls.append(emb_dict)\n mlp_dict_ls.append(mlp_dict)\n\n # concat sequence for different parameters on the last axis\n emb_ls_dict_ = {}\n for k in emb_dict_ls[0].keys():\n for emb_dict in emb_dict_ls:\n if k not in emb_ls_dict_.keys():\n emb_ls_dict_[k] = np.expand_dims(emb_dict[k], axis=-1)\n else:\n emb_ls_dict_[k] = np.concatenate((emb_ls_dict_[k], np.expand_dims(emb_dict[k], axis=-1)), axis=-1)\n\n mlp_ls_dict_ = {}\n for k in mlp_dict_ls[0].keys():\n for mlp_dict in mlp_dict_ls:\n if k not in mlp_ls_dict_.keys():\n mlp_ls_dict_[k] = np.expand_dims(mlp_dict[k], axis=-1)\n else:\n mlp_ls_dict_[k] = np.concatenate((mlp_ls_dict_[k], np.expand_dims(mlp_dict[k], axis=-1)), axis=-1)\n\n # check that the shapes are correct\n for k in emb_ls_dict_.keys():\n print(k, np.shape(emb_ls_dict_[k]))\n for k in mlp_ls_dict_.keys():\n print(k, np.shape(mlp_ls_dict_[k]))\n\n print('collect params time elapsed: {}'.format(\n time.strftime('%H:%M:%S', time.gmtime(time.time() - collect_params_start_time))))\n\n return emb_ls_dict_, mlp_ls_dict_", "def model_fn(features, mode, params): # pylint: disable=unused-argument\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"input_type_ids\"]\n # label_ids = features[\"label_ids\"]\n vocab = vocab_list\n vocab_size = len(vocab_list)\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n\n\n \n # TRAIN\n if not is_predicting:\n\n (loss, predictions, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, vocab, vocab_size, bert_config, use_one_hot_embeddings)\n\n train_op = bert.optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=FLAGS.use_tpu)\n\n # if mode == tf.estimator.ModeKeys.TRAIN:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n ## else:\n # return tf.estimator.EstimatorSpec(mode=mode,\n # loss=loss,\n # eval_metric_ops=eval_metrics)\n else:\n (predictions, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, vocab, vocab_size, bert_config, use_one_hot_embeddings)\n\n predictions = {\n 'probabilities': log_probs,\n 'predictions': predictions\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode, predictions=predictions, scaffold_fn=scaffold_fn)\n\n return output_spec if use_tpu else output_spec.as_estimator_spec()", "def get_model_parameters(self, *model_parameters):\r\n\r\n parameters = {k: v for k, v in zip(self.model_parameter_names, model_parameters)}\r\n\r\n return parameters", "def model_2_parameters(num_features, num_classes):\n parameters = {}\n parameters['num_features'] = num_features\n parameters['num_classes'] = num_classes\n \n return parameters", "def extractModelParam(self):\n copasi_filename = self.genPathCopasi(\"extractor\")\n self.recentModel = model.loada(self.antString, copasi_filename)\n return self.recentModel.parameters.copy().squeeze().to_dict()", "def model_fn(features, labels, mode, params):\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = ClassifierModel.create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = bert.optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(\n label_ids,\n predicted_labels)\n auc = tf.metrics.auc(\n label_ids,\n predicted_labels)\n recall = tf.metrics.recall(\n label_ids,\n predicted_labels)\n precision = tf.metrics.precision(\n label_ids,\n predicted_labels)\n true_pos = tf.metrics.true_positives(\n label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(\n label_ids,\n predicted_labels)\n false_pos = tf.metrics.false_positives(\n label_ids,\n predicted_labels)\n false_neg = tf.metrics.false_negatives(\n label_ids,\n predicted_labels)\n\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = ClassifierModel.create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)", "def get_params(self):\n return {\n 'dropout': self._dropout,\n 'layer_size': self._layer_size,\n 'num_layers': self._num_layers,\n 'embedding_layer_size': self._embedding_layer_size,\n 'controller_type': self._controller_type\n }" ]
[ "0.65184647", "0.65112454", "0.638817", "0.63505626", "0.6223993", "0.6148675", "0.6122482", "0.6062248", "0.6055513", "0.6027013", "0.600799", "0.59653383", "0.59652144", "0.5957283", "0.59522676", "0.5892172", "0.58268005", "0.5810189", "0.58081865", "0.57768404", "0.57738113", "0.57725066", "0.576187", "0.57617754", "0.5758887", "0.57571816", "0.57444537", "0.5732309", "0.57299453", "0.5728798" ]
0.7301645
0
Convert TF BERT model config to be compatible with JAX BERT model.
def convert_tf_config_to_jax_bert(config): unnecessary_keys = ['initializer_range', 'backward_compatible', 'embedding_size'] for key in unnecessary_keys: if key in config: config.pop(key) # change TF parameter names to match JAX parameter names mapping = { 'attention_dropout_rate': 'attention_probs_dropout_prob', 'hidden_activation': 'hidden_act', 'dropout_rate': 'hidden_dropout_prob', 'emb_dim': 'hidden_size', 'mlp_dim': 'intermediate_size', 'max_len': 'max_position_embeddings', 'num_heads': 'num_attention_heads', 'num_layers': 'num_hidden_layers' } for jax_key, tf_key in mapping.items(): config[jax_key] = config.pop(tf_key) return config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bert_config(config):\n if config.model_size == \"large\":\n args = {\"hidden_size\": 1024, \"num_hidden_layers\": 24}\n elif config.model_size == \"base\":\n args = {\"hidden_size\": 768, \"num_hidden_layers\": 12}\n elif config.model_size == \"small\":\n args = {\"hidden_size\": 256, \"num_hidden_layers\": 12}\n else:\n raise ValueError(\"Unknown model size\", config.model_size)\n args[\"vocab_size\"] = config.vocab_size\n args.update(**config.model_hparam_overrides)\n # by default the ff size and num attn heads are determined by the hidden size\n args[\"num_attention_heads\"] = max(1, args[\"hidden_size\"] // 64)\n args[\"intermediate_size\"] = 4 * args[\"hidden_size\"]\n args.update(**config.model_hparam_overrides)\n return modeling.BertConfig.from_dict(args)", "def get_tf_config(config_path):\n return modeling.BertConfig.from_json_file(config_path).__dict__", "def convert_from_config(config):\n\n if isinstance(config, str):\n yamlConfig = parse_yaml_config(config)\n else:\n yamlConfig = config\n\n model = None\n if 'OnnxModel' in yamlConfig:\n if __onnx_enabled__:\n model = onnx_to_hls(yamlConfig)\n else:\n raise Exception(\"ONNX not found. Please install ONNX.\")\n elif 'PytorchModel' in yamlConfig:\n if __pytorch_enabled__:\n model = pytorch_to_hls(yamlConfig)\n else:\n raise Exception(\"PyTorch not found. Please install PyTorch.\")\n else:\n model = keras_to_hls(yamlConfig)\n\n return model", "def convert(encoder, bert_model):\n num_layers = encoder._config[\"num_layers\"]\n num_attention_heads = encoder._config[\"num_attention_heads\"]\n hidden_size = encoder._config[\"hidden_size\"]\n head_size = hidden_size // num_attention_heads\n assert head_size * num_attention_heads == hidden_size\n encoder._embedding_layer.set_weights(\n [bert_model[\"embeddings.word_embeddings.weight\"]])\n encoder._embedding_norm_layer.set_weights([\n bert_model[\"embeddings.LayerNorm.weight\"],\n bert_model[\"embeddings.LayerNorm.bias\"]\n ])\n encoder._type_embedding_layer.set_weights(\n [bert_model[\"embeddings.token_type_embeddings.weight\"]])\n encoder._position_embedding_layer.set_weights(\n [bert_model[\"embeddings.position_embeddings.weight\"]])\n for layer_num in range(num_layers):\n encoder._transformer_layers[\n layer_num]._attention_layer._key_dense.set_weights([\n bert_model[f\"encoder.layer.{layer_num}.attention.self.key.weight\"].T\n .reshape((hidden_size, num_attention_heads, head_size)),\n bert_model[f\"encoder.layer.{layer_num}.attention.self.key.bias\"]\n .reshape((num_attention_heads, head_size))\n ])\n encoder._transformer_layers[\n layer_num]._attention_layer._query_dense.set_weights([\n bert_model[f\"encoder.layer.{layer_num}.attention.self.query.weight\"]\n .T.reshape((hidden_size, num_attention_heads, head_size)),\n bert_model[f\"encoder.layer.{layer_num}.attention.self.query.bias\"]\n .reshape((num_attention_heads, head_size))\n ])\n encoder._transformer_layers[\n layer_num]._attention_layer._value_dense.set_weights([\n bert_model[f\"encoder.layer.{layer_num}.attention.self.value.weight\"]\n .T.reshape((hidden_size, num_attention_heads, head_size)),\n bert_model[f\"encoder.layer.{layer_num}.attention.self.value.bias\"]\n .reshape((num_attention_heads, head_size))\n ])\n encoder._transformer_layers[\n layer_num]._attention_layer._output_dense.set_weights([\n bert_model[\n f\"encoder.layer.{layer_num}.attention.output.dense.weight\"].T\n .reshape((num_attention_heads, head_size, hidden_size)),\n bert_model[f\"encoder.layer.{layer_num}.attention.output.dense.bias\"]\n ])\n encoder._transformer_layers[layer_num]._attention_layer_norm.set_weights([\n bert_model[\n f\"encoder.layer.{layer_num}.attention.output.LayerNorm.weight\"],\n bert_model[f\"encoder.layer.{layer_num}.attention.output.LayerNorm.bias\"]\n ])\n encoder._transformer_layers[layer_num]._intermediate_dense.set_weights([\n bert_model[f\"encoder.layer.{layer_num}.intermediate.dense.weight\"].T,\n bert_model[f\"encoder.layer.{layer_num}.intermediate.dense.bias\"]\n ])\n encoder._transformer_layers[layer_num]._output_dense.set_weights([\n bert_model[f\"encoder.layer.{layer_num}.output.dense.weight\"].T,\n bert_model[f\"encoder.layer.{layer_num}.output.dense.bias\"]\n ])\n encoder._transformer_layers[layer_num]._output_layer_norm.set_weights([\n bert_model[f\"encoder.layer.{layer_num}.output.LayerNorm.weight\"],\n bert_model[f\"encoder.layer.{layer_num}.output.LayerNorm.bias\"]\n ])", "def normalize_config(config):\n return json.loads(json.dumps(config, cls=NumpyEncoder))", "def convert_checkpoint(huggingface_model_name_or_path, output_path):\n output_dir, _ = os.path.split(output_path)\n tf.io.gfile.makedirs(output_dir)\n\n huggingface_bert_model, huggingface_bert_config = _get_huggingface_bert_model_and_config(\n huggingface_model_name_or_path)\n encoder = _create_fffner_model(huggingface_bert_config)\n sequence_length = 128\n batch_size = 2\n word_id_data = np.random.randint(\n 10, size=(batch_size, sequence_length), dtype=np.int32)\n mask_data = np.random.randint(\n 2, size=(batch_size, sequence_length), dtype=np.int32)\n type_id_data = np.random.randint(\n 2, size=(batch_size, sequence_length), dtype=np.int32)\n is_entity_token_pos = np.zeros((batch_size, 1), dtype=np.int32)\n entity_type_token_pos = np.ones((batch_size, 1), dtype=np.int32)\n inputs = {\n \"input_word_ids\": word_id_data,\n \"input_mask\": mask_data,\n \"input_type_ids\": type_id_data,\n \"is_entity_token_pos\": is_entity_token_pos,\n \"entity_type_token_pos\": entity_type_token_pos,\n }\n encoder(inputs)\n convert(encoder, huggingface_bert_model)\n tf.train.Checkpoint(encoder=encoder).write(output_path)", "def create_config(config_dir: str) -> configs.BertConfig:\n with tf.io.gfile.GFile(config_dir) as config_file:\n bert_config = json.load(config_file)\n return configs.BertConfig(**bert_config)", "def load_bert(config: Config) -> Tuple[AutoModel, AutoTokenizer]:\n print(f\"Loading {config.bert_model}...\")\n\n base_bert_name = config.bert_model.split(\"/\")[-1]\n tokenizer_kwargs = config.tokenizer_kwargs.get(base_bert_name, {})\n if tokenizer_kwargs:\n print(f\"Using tokenizer kwargs: {tokenizer_kwargs}\")\n tokenizer = AutoTokenizer.from_pretrained(config.bert_model,\n **tokenizer_kwargs)\n\n model = AutoModel.from_pretrained(config.bert_model).to(config.device)\n\n print(\"Bert successfully loaded.\")\n\n return model, tokenizer", "def convert_mlperf_param_dict_to_jax(tf_params, emb_dim, num_heads):\n jax_params = {}\n # mapping between mlperf model and JAX model\n # works for model in //third_party/tensorflow_models/mlperf/models/rough/bert\n tf_key_to_jax_key = [\n ('cls/seq_relationship/', 'classification/predictions_transform_logits/'),\n ('output_weights', 'kernel'),\n ('transform_logits/output_bias', 'transform_logits/bias'),\n ('cls/predictions/', 'masked_lm/cls_predictions_'),\n ('transform/dense', 'transform_dense'),\n ('transform/LayerNorm', 'transform_layernorm'),\n ('predictions_output_bias', 'predictions_output_bias/bias'),\n ('bert/embeddings/word_embeddings', 'word_embeddings/embedding'),\n ('bert/', 'transformer_encoder/'),\n ('embeddings/token_type_embeddings', 'type_embeddings/embedding'),\n ('embeddings/position_embeddings', 'position_embeddings/embedding'),\n ('attention/self', 'self_attention'),\n ('attention/output', 'self_attention_output'),\n ('layer_norm/layer_norm_', 'layer_norm/'),\n ('output/LayerNorm', 'output_layer_norm'),\n ('intermediate/dense', 'intermediate'),\n ('output/dense', 'output'),\n ('pooler/dense/', 'pooler_transform/'),\n ('self_attention_output_layer_norm', 'self_attention_layer_norm'),\n ('embeddings/LayerNorm', 'embeddings_layer_norm'),\n ('encoder/layer', 'encoder_layer'),\n (':0', ''),\n ('beta', 'bias'),\n ('gamma', 'scale')\n ]\n for tf_key, val in tf_params.items():\n jax_key = tf_key\n for tf_name, jax_name in tf_key_to_jax_key:\n jax_key = jax_key.replace(tf_name, jax_name)\n\n # Reshape kernels if necessary\n jax_params[jax_key] = tf_params[tf_key]\n if 'self_attention_output/kernel' in jax_key:\n param = tf_params[tf_key]\n jax_params[jax_key] = param.reshape(\n (num_heads, -1, emb_dim))\n\n # jax position embedding kernel has additional dimension\n pos_embedding = jax_params[\n 'transformer_encoder/position_embeddings/embedding']\n jax_params[\n 'transformer_encoder/position_embeddings/embedding'] = pos_embedding[\n np.newaxis, ...]\n\n # convert flat param dict into nested dict using `/` as delimeter\n outer_dict = {}\n for key, val in jax_params.items():\n tokens = key.split('/')\n inner_dict = outer_dict\n # each token except the very last should add a layer to the nested dict\n for token in tokens[:-1]:\n if token not in inner_dict:\n inner_dict[token] = {}\n inner_dict = inner_dict[token]\n inner_dict[tokens[-1]] = val\n\n return outer_dict", "def _GetChangesForBert(config_sed_input):\n config_sed = config_sed_input\n\n config_sed.append((r'.*config_DGXA100_common\\.sh',\n (r'export CONT=mlperf-nvidia:language_model\\n'\n r'export NEXP=1')))\n config_sed.append((\n r'DATADIR=.*',\n r'DATADIR=\\/data\\/bert_data\\/hdf5\\/training-4320\\/hdf5_4320_shards_varlength'\n ))\n config_sed.append((\n r'DATADIR_PHASE2=.*',\n r'DATADIR_PHASE2=\\/data\\/bert_data\\/hdf5\\/training-4320\\/hdf5_4320_shards_varlength'\n ))\n config_sed.append(\n (r'EVALDIR=.*', r'EVALDIR=\\/data\\/bert_data\\/hdf5\\/eval_varlength'))\n config_sed.append(\n (r'CHECKPOINTDIR=.*', r'CHECKPOINTDIR=\\/data\\/bert_data\\/phase1'))\n config_sed.append((r'CHECKPOINTDIR_PHASE1=.*',\n r'CHECKPOINTDIR_PHASE1=\\/data\\/bert_data\\/phase1'))\n if BERT_BATCH_SIZE.value:\n config_sed.append((r'BATCHSIZE=.*', fr'BATCHSIZE={BERT_BATCH_SIZE.value}'))\n\n return config_sed", "def create_model(max_seq_len, adapter_size=64):\n\n # adapter_size = 64 # see - arXiv:1902.00751\n\n # create the bert layer\n with tf.io.gfile.GFile(bert_config_file, \"r\") as reader:\n bc = StockBertConfig.from_json_string(reader.read())\n bert_params = map_stock_config_to_params(bc)\n bert_params.adapter_size = adapter_size\n bert = BertModelLayer.from_params(bert_params, name=\"bert\")\n\n input_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name=\"input_ids\")\n # token_type_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name=\"token_type_ids\")\n # output = bert([input_ids, token_type_ids])\n output = bert(input_ids)\n\n print(\"bert shape\", output.shape)\n cls_out = keras.layers.Lambda(lambda seq: seq[:, 0, :])(output)\n cls_out = keras.layers.Dropout(0.5)(cls_out)\n logits = keras.layers.Dense(units=1024, activation=\"tanh\")(cls_out)\n logits = keras.layers.Dropout(0.5)(logits)\n logits = keras.layers.Dense(units=2, activation=\"softmax\")(logits)\n\n # model = keras.Model(inputs=[input_ids, token_type_ids], outputs=logits)\n # model.build(input_shape=[(None, max_seq_len), (None, max_seq_len)])\n model = keras.Model(inputs=input_ids, outputs=logits)\n model.build(input_shape=(None, max_seq_len))\n\n # load the pre-trained model weights\n load_stock_weights(bert, bert_ckpt_file)\n\n # freeze weights if adapter-BERT is used\n if adapter_size is not None:\n freeze_bert_layers(bert)\n\n model.compile(optimizer=keras.optimizers.Adam(),\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[keras.metrics.SparseCategoricalAccuracy(name=\"acc\")])\n\n model.summary()\n\n return model", "def get_bert_clf():\n model = tf.keras.models.model_from_json(get_object('distilbert_model.json', 'r'))\n model.load_weights(model_dir/'distilbert_weights.hdf5')\n return model", "def build_model(cls, args, task):\n # print(\"In build_model !!!\")\n default_architecture(args)\n assert args.load_hf_bert_from != ''\n encoder = HuggingFaceBertEncoder(args, task.dictionary)\n\n return cls(args, encoder, task)", "def _make_bert_compatifier(do_masking):\n\n def bert_compatibility(data):\n # data['input_word_ids'] = data.pop('maybe_masked_input_ids')\n # data['input_mask'] = data.pop('token_mask')\n\n if do_masking:\n x = {\n 'input_word_ids': data['maybe_masked_input_ids'],\n 'input_mask': data['op_token_mask'],\n 'input_type_ids': tf.zeros_like(data['op_token_mask']), # segment ids\n 'masked_lm_positions': data['masked_lm_positions'],\n 'masked_lm_ids': data['masked_lm_ids'],\n 'masked_lm_weights': data['masked_lm_weights'],\n # next_sentence_label = 1 if instance.is_random_next else 0\n 'next_sentence_labels': tf.constant([0], tf.int32)\n }\n\n # y = data['masked_lm_weights']\n\n else:\n x = {\n 'input_word_ids': data['maybe_masked_input_ids'],\n 'input_mask': data['op_token_mask'],\n 'input_type_ids': tf.zeros_like(data['op_token_mask']), # segment ids\n }\n\n y = {'outcome': data['outcome'], 'treatment': data['treatment'],\n 'in_dev': data['in_dev'], 'in_test': data['in_test'], 'in_train': data['in_train'],\n 'y0': data['y0'], 'y1': data['y1'],\n 'index': data['index'], 'subreddit':data['subreddit']}\n\n return x, y\n\n return bert_compatibility", "def get_model_config(model_name, args):\n if model_name == 'Tacotron2':\n model_config = dict(\n # optimization\n mask_padding=args.mask_padding,\n # audio\n n_mel_channels=args.n_mel_channels,\n # symbols\n n_symbols=args.n_symbols,\n symbols_embedding_dim=args.symbols_embedding_dim,\n # encoder\n encoder_kernel_size=args.encoder_kernel_size,\n encoder_n_convolutions=args.encoder_n_convolutions,\n encoder_embedding_dim=args.encoder_embedding_dim,\n # attention\n attention_rnn_dim=args.attention_rnn_dim,\n attention_dim=args.attention_dim,\n # attention location\n attention_location_n_filters=args.attention_location_n_filters,\n attention_location_kernel_size=args.attention_location_kernel_size,\n # decoder\n n_frames_per_step=args.n_frames_per_step,\n decoder_rnn_dim=args.decoder_rnn_dim,\n prenet_dim=args.prenet_dim,\n max_decoder_steps=args.max_decoder_steps,\n gate_threshold=args.gate_threshold,\n p_attention_dropout=args.p_attention_dropout,\n p_decoder_dropout=args.p_decoder_dropout,\n # postnet\n postnet_embedding_dim=args.postnet_embedding_dim,\n postnet_kernel_size=args.postnet_kernel_size,\n postnet_n_convolutions=args.postnet_n_convolutions,\n decoder_no_early_stopping=args.decoder_no_early_stopping\n )\n return model_config\n elif model_name == 'WaveGlow':\n model_config = dict(\n n_mel_channels=args.n_mel_channels,\n n_flows=args.flows,\n n_group=args.groups,\n n_early_every=args.early_every,\n n_early_size=args.early_size,\n WN_config=dict(\n n_layers=args.wn_layers,\n kernel_size=args.wn_kernel_size,\n n_channels=args.wn_channels\n )\n )\n return model_config\n else:\n raise NotImplementedError(model_name)", "def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps):\n\n def model_fn(features, labels, mode, params):\n \"\"\"this is prototype syntax, all parameters are necessary.\"\"\"\n # obtain the data\n _info('*** Features ***')\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features['input_ids'] # [batch_size, seq_length]\n input_mask = features['input_mask'] # [batch_size, seq_length]\n\n # if mode != tf.estimator.ModeKeys.PREDICT:\n # # segment_idx = features['segment_dis']\n # masked_lm_positions = features['masked_lm_positions'] # [batch_size, seq_length], specify the answer\n # masked_lm_ids = features['masked_lm_ids'] # [batch_size, answer_seq_length], specify the answer labels\n # masked_lm_weights = features['masked_lm_weights'] # [batch_size, seq_length], [1, 1, 0], 0 refers to the mask\n # # next_sentence_labels = features['next_sentence_labels']\n # else:\n masked_lm_positions = features['masked_lm_positions']\n masked_lm_ids = features['masked_lm_ids']\n masked_lm_weights = features['masked_lm_weights']\n\n if bert_config.train_type == 'seq2seq':\n _info('Training seq2seq task.')\n elif bert_config.train_type == 'lm':\n _info('Training language model task.')\n \n # build model\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask)\n \n # compute loss\n loss, per_loss, log_probs, logits = get_masked_lm_output(bert_config,\n model.get_sequence_output(),\n model.embedding_table,\n model.projection_table,\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights,\n mode)\n \n if mode == tf.estimator.ModeKeys.PREDICT:\n masked_lm_predictions = tf.reshape(tf.argmax(log_probs, axis=-1, output_type=tf.int32), [-1])\n output_spec = tf.estimator.EstimatorSpec(mode, predictions=masked_lm_predictions)\n else:\n if mode == tf.estimator.ModeKeys.TRAIN: \n # restore from the checkpoint,\n # tf.estimator automatically restore from the model typically,\n # maybe here is for restore some pre-trained parameters\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n (assignment_map, initialized_variable_names) = get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n _info('*** Trainable Variables ***')\n for var in tvars:\n init_string = ''\n if var.name in initialized_variable_names:\n init_string = ', *INIT_FROM_CKPT*'\n _info('name = {}, shape={}{}'.format(var.name, var.shape, init_string))\n \n train_op = optimization.create_optimizer(\n loss, bert_config.learning_rate, num_train_steps, bert_config.lr_limit)\n\n # learning_rate = tf.train.polynomial_decay(bert_config.learning_rate,\n # tf.train.get_or_create_global_step(),\n # num_train_steps,\n # end_learning_rate=0.0,\n # power=1.0,\n # cycle=False)\n # optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n # gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=True)\n # clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)\n # train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step())\n output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)\n elif mode == tf.estimator.ModeKeys.EVAL:\n is_real_example = tf.ones(tf.shape(masked_lm_ids), dtype=tf.float32)\n \n def metric_fn(loss, label_ids, logits, is_real_example):\n \"\"\"\n Args:\n loss: tf.float32.\n label_ids: [b, s].\n logits: [b, s, v].\n \"\"\"\n # [b * s, v]\n logits = tf.reshape(logits, [-1, logits.shape[-1]])\n # [b * s, 1]\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n # [b * s]\n label_ids = tf.reshape(label_ids, [-1])\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions)\n loss = tf.metrics.mean(values=loss)\n return {'eval_accuracy': accuracy, 'eval_loss': loss}\n \n eval_metrics = metric_fn(loss, masked_lm_ids, logits, is_real_example)\n output_spec = tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics)\n\n return output_spec\n \n return model_fn", "def __init__(self, config: BertConfig):\r\n super().__init__(config)\r\n ### YOUR CODE HERE\r\n self.num_labels = config.num_labels # [0, 1] (start or end)\r\n self.bert = BertModel(config)\r\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # TODO: Not a separate FFN ? (For Start_FFN and End_FFN)\r\n\r\n ### END YOUR CODE\r\n\r\n # Don't forget initializing the weights\r\n self.init_weights()", "def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n # tf.logging.info(\"*** Features ***\")\n # for name in sorted(features.keys()):\n # tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n # output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n # mode=tf.estimator.ModeKeys.PREDICT,\n # predictions=probabilities)\n output_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.PREDICT,\n predictions=probabilities\n )\n return output_spec\n\n return model_fn", "def load_model(config, batchmanager):\n \n # this function returns a dictionary mapping\n # name of the task (string) --> number of classes in the task (int)\n tasks = batchmanager.getTasksWithNClasses()\n # this \"tasks\" object is used to initialize the model (with the right output layers)\n model = MultiTaskBERT(device = config.device, tasks = tasks)\n\n if not config.untrained_baseline:\n\n # if we evaluate only, model MUST be loaded.\n if config.k_shot_only:\n try :\n model.load_state_dict(torch.load(path_to_dicts(config), map_location = config.device))\n except Exception:\n print(f\"WARNING: the `--k_shot_only` flag was passed, but `{path_to_dicts(config)}` was NOT found!\")\n raise Exception()\n \n # if we saved the state dictionary, load it.\n elif config.resume:\n try :\n model.load_state_dict(torch.load(path_to_dicts(config), map_location = config.device))\n except Exception:\n print(f\"WARNING: the `--resume` flag was passed, but `{path_to_dicts(config)}` was NOT found!\")\n else:\n if os.path.exists(path_to_dicts(config)):\n print(f\"WARNING: `--resume` flag was NOT passed, but `{path_to_dicts(config)}` was found!\") \n\n return model", "def bert_module_fn(is_training):\n\n input_ids = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"input_ids\")\n input_mask = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"input_mask\")\n token_type = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"segment_ids\")\n\n config = modeling.BertConfig.from_json_file(config_path)\n model = modeling.BertModel(config=config, is_training=is_training,\n input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type)\n \n seq_output = model.all_encoder_layers[seq_layer]\n tok_output = model.all_encoder_layers[tok_layer]\n pool_output = model.get_pooled_output()\n\n config_file = tf.constant(value=config_path, dtype=tf.string, name=\"config_file\")\n vocab_file = tf.constant(value=vocab_path, dtype=tf.string, name=\"vocab_file\")\n lower_case = tf.constant(do_lower_case)\n\n tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS, config_file)\n tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS, vocab_file)\n \n input_map = {\"input_ids\": input_ids,\n \"input_mask\": input_mask,\n \"segment_ids\": token_type}\n \n output_map = {\"pooled_output\": pool_output,\n \"sequence_output\": seq_output,\n \"token_output\": tok_output}\n\n output_info_map = {\"vocab_file\": vocab_file,\n \"do_lower_case\": lower_case}\n \n hub.add_signature(name=\"tokens\", inputs=input_map, outputs=output_map)\n hub.add_signature(name=\"tokenization_info\", inputs={}, outputs=output_info_map)", "def load_model(config, bm):\n\n trainable_layers = [9, 10, 11]\n assert min(trainable_layers) >= 0 and max(trainable_layers) <= 11 # BERT has 12 layers!\n model = FineTunedBERT(device = config.device, n_classes = len(bm.classes()), trainable_layers = trainable_layers)\n\n # if we saved the state dictionary, load it.\n if config.resume:\n try :\n model.load_state_dict(torch.load(path_to_dicts(config), map_location = config.device))\n except Exception:\n print(f\"WARNING: the `--resume` flag was passed, but `{path_to_dicts(config)}` was NOT found!\")\n else:\n if os.path.exists(path_to_dicts(config)):\n print(f\"WARNING: `--resume` flag was NOT passed, but `{path_to_dicts(config)}` was found!\") \n\n return model", "def base_model_config():\n return {\n # TFRecord file pattern containing Example protos.\n \"input_file_pattern\": \"\",\n\n # Number of examples to keep in the input queue.\n \"input_queue_capacity\": 5 * 640000, # 5 shards of the BookCorpus.\n\n # Number of threads for prefetching TFRecord values.\n \"num_input_reader_threads\": 1,\n\n # Whether to shuffle the input data.\n \"shuffle_input_data\": True,\n\n # Scale of the random uniform initializer.\n \"uniform_init_scale\": 0.1,\n\n # Number of unique words in the vocab.\n \"vocab_size\": 20000,\n\n # Batch size (training and evaluation only).\n \"batch_size\": 128,\n\n # Word embedding dimension.\n \"word_embedding_dim\": 620,\n\n # Whether to use a bidirectional or unidirectional encoder RNN.\n \"bidirectional_encoder\": False,\n\n # Number of output dimensions of the sentence encoder.\n \"encoder_dim\": 2400,\n\n # Operation for combining the final states of the encoder GRU\n \"pooling_operation\": \"last\",\n }", "def model_fn_builder(bert_config, \n init_checkpoint, \n layer_indexes, \n use_tpu,\n use_one_hot_embeddings):\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n input_type_ids = features[\"input_type_ids\"]\n\n model = modeling.BertModel(config=bert_config,\n is_training=False,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=input_type_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n if mode != tf.estimator.ModeKeys.PREDICT:\n raise ValueError(\"Only PREDICT modes are supported: %s\" % (mode))\n\n tvars = tf.trainable_variables()\n scaffold_fn = None\n (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, \n init_checkpoint)\n if use_tpu:\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold() \n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape, init_string)\n\n all_layers = model.get_all_encoder_layers()\n\n predictions = {\"unique_id\": unique_ids}\n for (i, layer_index) in enumerate(layer_indexes):\n predictions[\"layer_output_%d\" % i] = all_layers[layer_index]\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, \n predictions=predictions, \n scaffold_fn=scaffold_fn)\n return output_spec\n \n\n return model_fn", "def override_config(args):\n args.transformer_enc_config = (\n \"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3\"\n )", "def override_config(args):\n args.transformer_enc_config = (\n \"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3\"\n )", "def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_one_hot_embeddings):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n input_mask = features[\"input_mask\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n is_prediction = (mode == tf.estimator.ModeKeys.PREDICT)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings, is_prediction)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, False)\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n )\n elif mode == tf.estimator.ModeKeys.EVAL:\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions)\n loss = tf.metrics.mean(values=per_example_loss)\n eval_metrics = {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics\n )\n else:\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": probabilities},\n export_outputs={'predict': tf.estimator.export.PredictOutput(outputs=probabilities)}\n )\n return output_spec\n\n return model_fn", "def model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n logits = create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"prediction\": tf.argmax(logits, axis=-1),\n }\n output_spec = tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n else:\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=3, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n label = features[\"label\"]\n\n loss = compute_loss(logits, label)\n predicted_classes = tf.argmax(logits, axis=-1)\n accuracy = tf.metrics.accuracy(labels=label, predictions=predicted_classes, name='acc_op')\n\n # global global_acc_list\n # global_acc_list.append(accuracy)\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n metrics = {'accuracy': accuracy}\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metric_ops=metrics,\n train_op=train_op)\n elif mode == tf.estimator.ModeKeys.EVAL:\n metrics = {'accuracy': accuracy}\n output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn", "def build_bert(self, verbose=True):\r\n # bert inputs\r\n bert_word_ids = Input(batch_shape=(None, self._params.max_sent_len), dtype=\"int32\", name=\"bert_word_input\")\r\n bert_mask_ids = Input(batch_shape=(None, self._params.max_sent_len), dtype=\"int32\", name='bert_mask_input')\r\n bert_segment_ids = Input(batch_shape=(None, self._params.max_sent_len), dtype=\"int32\", name=\"bert_segment_input\")\r\n \r\n inputs = [bert_word_ids, bert_mask_ids, bert_segment_ids]\r\n\r\n bert_out = BertLayer(n_fine_tune_layers=self._params.n_fine_tune_layers, bert_path=self._params.bert_path, name=\"bert_layer\")([bert_word_ids, bert_mask_ids, bert_segment_ids])\r\n\r\n features = bert_out\r\n\r\n if self._params.use_dict:\r\n if verbose: logging.info(\"use user dict features\")\r\n dict_ids = Input(batch_shape=(None, self._params.max_sent_len), dtype='int32', name='dict_input')\r\n inputs.append(dict_ids)\r\n\r\n dict_embeddings = Embedding(input_dim=self._params.dict_vocab_size,\r\n output_dim=self._params.dict_embedding_dim,\r\n mask_zero=True,\r\n name='dict_embedding')(dict_ids)\r\n\r\n features = Concatenate(name=\"bert_and_dict_features\")([features, dict_embeddings])\r\n\r\n z = Dense(self._params.fc_dim, activation='relu', name=\"fc_dense\")(features)\r\n\r\n if self._params.use_crf:\r\n if verbose: logging.info('use crf decode layer')\r\n crf = CRF(self._params.num_labels, sparse_target=False,\r\n learn_mode='marginal', test_mode='marginal', name='crf_out')\r\n loss = crf.loss_function\r\n pred = crf(z)\r\n else:\r\n loss = 'categorical_crossentropy'\r\n pred = Dense(self._params.num_labels, activation='softmax', name='softmax_out')(z)\r\n\r\n model = Model(inputs=inputs, outputs=pred)\r\n model.summary(print_fn=lambda x: logging.info(x + '\\n'))\r\n\r\n # It is recommended that you use this optimizer for fine tuning, since this\r\n # is how the model was trained (note that the Adam m/v variables are NOT\r\n # loaded from init_checkpoint.)\r\n optimizer = AdamWeightDecayOptimizer(\r\n learning_rate=1e-5,\r\n weight_decay_rate=0.01,\r\n beta_1=0.9,\r\n beta_2=0.999,\r\n epsilon=1e-6,\r\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\r\n \r\n model.compile(loss=loss, optimizer=optimizer)\r\n\r\n self.model = model", "def model_fn_builder(\n bert_config,\n num_labels,\n init_checkpoint,\n learning_rate,\n num_train_steps,\n num_warmup_steps,\n use_tpu,\n use_one_hot_embeddings,\n layer_indexes,\n):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n # tf.logging.info(\"*** Features ***\")\n # for name in sorted(features.keys()):\n # tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n is_real_example = None\n if \"is_real_example\" in features:\n is_real_example = tf.cast(features[\"is_real_example\"], dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings,\n )\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (\n assignment_map,\n initialized_variable_names,\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu\n )\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode, loss=total_loss, train_op=train_op, scaffold=scaffold_fn\n )\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, probabilities, is_real_example):\n\n logits_split = tf.split(probabilities, num_labels, axis=-1)\n label_ids_split = tf.split(label_ids, num_labels, axis=-1)\n # metrics change to auc of every class\n eval_dict = {}\n for j, logits in enumerate(logits_split):\n label_id_ = tf.cast(label_ids_split[j], dtype=tf.int32)\n current_auc, update_op_auc = tf.metrics.auc(label_id_, logits)\n eval_dict[str(j)] = (current_auc, update_op_auc)\n eval_dict[\"eval_loss\"] = tf.metrics.mean(values=per_example_loss)\n return eval_dict\n\n\n eval_metrics = metric_fn(\n per_example_loss, label_ids, probabilities, is_real_example\n )\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics,\n scaffold=scaffold_fn,\n )\n else:\n out = {\n \"input_ids\": input_ids,\n \"label_ids\": label_ids,\n }\n all_layers = model.get_all_encoder_layers()\n for (i, layer_index) in enumerate(layer_indexes):\n out[\"layer_output_%d\" % i] = all_layers[layer_index]\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode, predictions=out, scaffold=scaffold_fn\n )\n return output_spec\n\n return model_fn", "def load_bert(self):\n self.hermes.info(\"Load the bert...\")\n model = load_trained_model_from_checkpoint(self.conf.bert[\"config\"], self.conf.bert[\"checkpoint\"])\n\n self.hermes.info(\"Build the tokenizer...\")\n tokenizer = self.poseidon.build_tokenizer()\n\n return model, tokenizer" ]
[ "0.61332077", "0.6126715", "0.5987451", "0.5933029", "0.57321113", "0.57210857", "0.57176137", "0.568357", "0.56809187", "0.56193393", "0.55681133", "0.5533706", "0.5505984", "0.54603016", "0.54229367", "0.5420582", "0.5410694", "0.54047483", "0.53842753", "0.535301", "0.53173256", "0.5287168", "0.52839315", "0.5264656", "0.5264656", "0.5243691", "0.5242082", "0.523904", "0.52319175", "0.522272" ]
0.7478179
0
Modify TF mlperf model parameter dict to be compatible with JAX parameter dict. Convert parameter names in tf_params to match JAX parameter names and create a nested dictionary of parameters for each layer in the model using `/` in each key as a delimeter. This function uses mlperf model naming convention. Use convert_tf_param_dict_to_jax when using kerasBERT model configuration.
def convert_mlperf_param_dict_to_jax(tf_params, emb_dim, num_heads): jax_params = {} # mapping between mlperf model and JAX model # works for model in //third_party/tensorflow_models/mlperf/models/rough/bert tf_key_to_jax_key = [ ('cls/seq_relationship/', 'classification/predictions_transform_logits/'), ('output_weights', 'kernel'), ('transform_logits/output_bias', 'transform_logits/bias'), ('cls/predictions/', 'masked_lm/cls_predictions_'), ('transform/dense', 'transform_dense'), ('transform/LayerNorm', 'transform_layernorm'), ('predictions_output_bias', 'predictions_output_bias/bias'), ('bert/embeddings/word_embeddings', 'word_embeddings/embedding'), ('bert/', 'transformer_encoder/'), ('embeddings/token_type_embeddings', 'type_embeddings/embedding'), ('embeddings/position_embeddings', 'position_embeddings/embedding'), ('attention/self', 'self_attention'), ('attention/output', 'self_attention_output'), ('layer_norm/layer_norm_', 'layer_norm/'), ('output/LayerNorm', 'output_layer_norm'), ('intermediate/dense', 'intermediate'), ('output/dense', 'output'), ('pooler/dense/', 'pooler_transform/'), ('self_attention_output_layer_norm', 'self_attention_layer_norm'), ('embeddings/LayerNorm', 'embeddings_layer_norm'), ('encoder/layer', 'encoder_layer'), (':0', ''), ('beta', 'bias'), ('gamma', 'scale') ] for tf_key, val in tf_params.items(): jax_key = tf_key for tf_name, jax_name in tf_key_to_jax_key: jax_key = jax_key.replace(tf_name, jax_name) # Reshape kernels if necessary jax_params[jax_key] = tf_params[tf_key] if 'self_attention_output/kernel' in jax_key: param = tf_params[tf_key] jax_params[jax_key] = param.reshape( (num_heads, -1, emb_dim)) # jax position embedding kernel has additional dimension pos_embedding = jax_params[ 'transformer_encoder/position_embeddings/embedding'] jax_params[ 'transformer_encoder/position_embeddings/embedding'] = pos_embedding[ np.newaxis, ...] # convert flat param dict into nested dict using `/` as delimeter outer_dict = {} for key, val in jax_params.items(): tokens = key.split('/') inner_dict = outer_dict # each token except the very last should add a layer to the nested dict for token in tokens[:-1]: if token not in inner_dict: inner_dict[token] = {} inner_dict = inner_dict[token] inner_dict[tokens[-1]] = val return outer_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_tf_param_dict_to_jax(tf_params):\n jax_params = {}\n tf_key_to_jax_key = [\n ('embeddings/layer_norm', 'embeddings_layer_norm'),\n ('transformer/layer', 'encoder_layer'), ('embeddings:0', 'embedding'),\n (':0', ''), ('beta', 'bias'), ('gamma', 'scale'),\n ('position_embedding/', 'position_embeddings/')\n ]\n for tf_key in tf_params:\n jax_key = tf_key\n for tf_name, jax_name in tf_key_to_jax_key:\n jax_key = jax_key.replace(tf_name, jax_name)\n\n jax_params[jax_key] = tf_params[tf_key]\n\n # jax position embedding kernel has additional dimension\n pos_embedding = jax_params['position_embeddings/embedding']\n jax_params['position_embeddings/embedding'] = pos_embedding[np.newaxis, ...]\n\n # convert flat param dict into nested dict using `/` as delimeter\n outer_dict = {}\n for key, val in jax_params.items():\n tokens = key.split('/')\n inner_dict = outer_dict\n # each token except the very last should add a layer to the nested dict\n for token in tokens[:-1]:\n if token not in inner_dict:\n inner_dict[token] = {}\n inner_dict = inner_dict[token]\n inner_dict[tokens[-1]] = val\n\n # this layer doesn't have parameters, but key is required to be present\n outer_dict['self_attention_mask'] = 0.\n\n return outer_dict", "def update_parameters(\n model_param: Dict[str, Union[float, List[float]]]\n ) -> Dict[str, float]:\n\n updated_param = {}\n\n for i, _ in enumerate(model_param[\"teff\"]):\n updated_param[f\"teff_{i}\"] = model_param[\"teff\"][i]\n updated_param[f\"radius_{i}\"] = model_param[\"radius\"][i]\n\n if \"parallax\" in model_param:\n updated_param[\"parallax\"] = model_param[\"parallax\"]\n elif \"distance\" in model_param:\n updated_param[\"distance\"] = model_param[\"distance\"]\n\n return updated_param", "def _update_model_params(self, params, model_ID, model, param_grid):\n \n params = params.copy()\n param_grid = param_grid.copy()\n \n params_transform = {}\n \n for key in params.keys():\n \n if 'log10.' in key:\n log10_transform = True\n else:\n log10_transform = False\n \n key = key.replace('log10.','')\n \n type_str = str(type(param_grid[key][0]))\n \n if 'int' in type_str: \n if log10_transform:\n params_transform[key] = int(10**params['log10.'+key])\n else:\n params_transform[key] = int(params[key])\n \n elif 'float' in type_str:\n if log10_transform:\n params_transform[key] = float(10**params['log10.'+key])\n \n else:\n params_transform[key] = float(params[key])\n \n elif 'str' in type_str: #index the param grid for hyperparams using 'choice'\n params_transform[key] = param_grid[key][params[key]]\n \n if 'densenet' not in model_ID.lower(): \n model.__dict__[key] = params_transform[key]\n \n assert(type_str == str(type(params_transform[key]))), 'type(param_grid[key][0]) changed from '+type_str+' to '+str(type(param_grid[key][0]))+' after updating params for key:'+str(key)\n \n if 'str' in type_str:\n assert(params_transform[key] in param_grid[key]), 'params_transform['+key+']='+str(params_transform[key])+' is not in the list of valid parameter choices:'+str(param_grid[key])\n \n else:\n assert(params_transform[key]<=max(param_grid[key]) and params_transform[key]>=min(param_grid[key])), 'params_transform['+key+']='+str(params_transform[key])+' does not lie in the range of valid values:'+str([min(param_grid[key]),max(param_grid[key])] )\n \n if 'densenet' in model_ID.lower(): \n model = model(**params_transform)\n \n return params_transform, model", "def convert_tf_config_to_jax_bert(config):\n unnecessary_keys = ['initializer_range', 'backward_compatible',\n 'embedding_size']\n for key in unnecessary_keys:\n if key in config:\n config.pop(key)\n\n # change TF parameter names to match JAX parameter names\n mapping = {\n 'attention_dropout_rate': 'attention_probs_dropout_prob',\n 'hidden_activation': 'hidden_act',\n 'dropout_rate': 'hidden_dropout_prob',\n 'emb_dim': 'hidden_size',\n 'mlp_dim': 'intermediate_size',\n 'max_len': 'max_position_embeddings',\n 'num_heads': 'num_attention_heads',\n 'num_layers': 'num_hidden_layers'\n }\n for jax_key, tf_key in mapping.items():\n config[jax_key] = config.pop(tf_key)\n\n return config", "def _update_params(self, perf_params, loop_info):\n for vartype in list(perf_params.keys()):\n for var in perf_params[vartype]:\n self.tspec_params['performance_params'][var] = \\\n self.indent + 'param %s[] = %s;\\t#%s\\n' % (var, repr(default_perf_params[vartype]), vartype)\n\n #loop_info.vars: set of input vars", "def create_params():\n\n params = {\n # Optimizer parameters (for Adam)\n \"beta1\": 0.9,\n \"beta2\": 0.999,\n \"epsilon\": 1e-7,\n \"learning_rate\": 0.001,\n\n # Input pipeline parameters\n \"parallel_reads\": 8, # Number of parallel file\n # readers per host.\n \"train_dataset_path\": FLAGS.train_dataset_path, # Glob specifing TFRecord\n # files with tf.examples.\n \"eval_dataset_path\": FLAGS.eval_dataset_path, # Glob specifing TFRecord\n # files with tf.examples.\n\n # Training paramaeters\n \"global_batch_size\": 512, # Global batch size for training.\n \"eval_global_batch_size\": 512, # Global batch size for eval.\n \"train_epochs\": 5, # Number of times to run train/eval loop.\n \"steps_per_epoch\": 100, # Number of training steps per epoch.\n \"num_eval_steps\": 10, # Number of eval steps per epoch\n\n # TPU parameters\n \"gcp_project\": FLAGS.gcp_project, # Project TPU is in.\n \"tpu_zone\": FLAGS.tpu_zone, # GCE zone the TPU is in.\n \"tpu\": FLAGS.tpu, # Name of the TPU.\n \"iterations_per_loop\": 200, # Number of iterations per device\n # training loop.\n \"pipeline_execution\": False, # If True, speed up training by\n # overlaping embedding lookups with\n # dense layer computations. Embedding\n # lookups will be one step old.\n \"use_gradient_accumulation\": True, # If False, speed up training by\n # applying embedding optimizer in\n # batches smaller than global batch\n # size.\n \"use_tpu\": True, # If False, uses CPU to train.\n\n # Model parameters\n \"model_dir\": FLAGS.model_dir, # Directory in which to store checkpoints.\n \"model_layers\": [100, 75, 50], # Sizes of dense layers for model\n \"num_categories\": 10, # Number of output categories.\n \"table_1_dimension\": 128, # Dimension of embedding table 1.\n \"table_1_rows\": 100, # Number of feature values in table 1.\n \"table_2_dimension\": 256, # Dimension of embedding table 2.\n \"table_2_rows\": 1000, # Number of feature values in table 2.\n }\n\n tf.logging.info(\"Params: {}\".format(params))\n\n return params", "def update_parameters(mode = str(None), estimator_name = str(None), **kwargs):\n try:\n json_p = os.path.join(os.path.dirname(__file__), 'parameters.json')\n with open(json_p,'r',encoding='utf-8') as d_file:\n para = json.load(d_file)\n print(f\"Previous Parameters are: {para[mode][estimator_name]}\")\n para[mode][estimator_name] = kwargs\n print(f\"Current Parameters are updated as: {para[mode][estimator_name]}\")\n json_p = os.path.join(os.path.dirname(__file__), 'parameters.json')\n w_file = open(json_p, \"w\",encoding='utf-8')\n json.dump(para, w_file)\n w_file.close()\n print('Done with the parameters update.')\n except:\n print('Failed to update the parameters.')", "def param_name_dict():\n\n layer = caffe_pb2.LayerParameter()\n # get all parameter names (typically underscore case) and corresponding\n # type names (typically camel case), which contain the layer names\n # (note that not all parameters correspond to layers, but we'll ignore that)\n param_names = [s for s in dir(layer) if s.endswith('_param')]\n param_type_names = [type(getattr(layer, s)).__name__ for s in param_names]\n # strip the final '_param' or 'Parameter'\n param_names = [s[:-len('_param')] for s in param_names]\n param_type_names = [s[:-len('Parameter')] for s in param_type_names]\n return dict(zip(param_type_names, param_names))", "def inject_params(model_name: str) -> ListenerParams:\n params_file = model_name + '.params'\n try:\n with open(params_file) as f:\n pr.__dict__.update(compatibility_params, **json.load(f))\n except (OSError, ValueError, TypeError):\n if isfile(model_name):\n print('Warning: Failed to load parameters from ' + params_file)\n return pr", "def model_fn(features, labels, mode, params):\n\n #------hyper parameters------\n field_size = params['field_size']\n feature_size = params['feature_size']\n embedding_size = params['embedding_size']\n l2_reg = params['l2_reg']\n learning_rate = params['learning_rate']\n\n dropout = params['dropout']\n attention_factor = params['attention_factor']\n\n #------build weights------\n Global_Bias = tf.get_variable(\"bias\", shape=[1], initializer=tf.constant_initializer(0.0))\n Feat_Wgts = tf.get_variable(\"linear\", shape=[feature_size], initializer=tf.glorot_normal_initializer())\n Feat_Emb = tf.get_variable(\"emb\", shape=[feature_size, embedding_size], initializer=tf.glorot_normal_initializer())\n\n #------build feature------\n feat_ids = features['feat_ids']\n feat_vals = features['feat_vals']\n feat_ids = tf.reshape(feat_ids, shape=[-1, field_size])\n feat_vals = tf.reshape(feat_vals, shape=[-1, field_size]) # None * F\n\n #------build f(x)------\n\n # FM部分: sum(wx)\n with tf.variable_scope(\"Linear-part\"):\n feat_wgts = tf.nn.embedding_lookup(Feat_Wgts, feat_ids) # None * F * 1\n y_linear = tf.reduce_sum(tf.multiply(feat_wgts, feat_vals), 1)\n\n #Deep部分\n with tf.variable_scope(\"Embedding_Layer\"):\n embeddings = tf.nn.embedding_lookup(Feat_Emb, feat_ids) # None * F * K\n feat_vals = tf.reshape(feat_vals, shape=[-1, field_size, 1]) # None * F * 1\n embeddings = tf.multiply(embeddings, feat_vals) # None * F * K\n\n\n with tf.variable_scope(\"Pair-wise_Interaction_Layer\"):\n num_interactions = field_size * (field_size - 1) / 2\n element_wise_product_list = []\n for i in range(0, field_size):\n for j in range(i + 1, field_size):\n element_wise_product_list.append(tf.multiply(embeddings[:, i, :], embeddings[:, j, :]))\n element_wise_product_list = tf.stack(element_wise_product_list) # (F*(F-1)/2) * None * K stack拼接矩阵\n element_wise_product_list = tf.transpose(element_wise_product_list, perm=[1,0,2]) # None * (F(F-1)/2) * K\n\n # 得到Attention Score\n with tf.variable_scope(\"Attention_Netowrk\"):\n\n deep_inputs = tf.reshape(element_wise_product_list, shape=[-1, embedding_size]) # (None*F(F-1)/2) * K\n\n deep_inputs = contrib.layers.fully_connected(inputs=deep_inputs, num_outputs=attention_factor, activation_fn=tf.nn.relu, \\\n weights_regularizer=contrib.layers.l2_regularizer(l2_reg), scope=\"attention_net_mlp\")\n\n aij = contrib.layers.fully_connected(inputs=deep_inputs, num_outputs=1, activation_fn=tf.identity, \\\n weights_regularizer=contrib.layers.l2_regularizer(l2_reg), scope=\"attention_net_out\") # (None*F(F-1)/2) * 1\n\n # 得到attention score之后,使用softmax进行规范化\n aij = tf.reshape(aij, shape=[-1, int(num_interactions), 1])\n aij_softmax = tf.nn.softmax(aij, dim=1, name=\"attention_net_softout\") # None * num_interactions\n\n # TODO: 为什么要对attention score进行dropout那?? 这里不是很懂\n if mode == tf.estimator.ModeKeys.TRAIN:\n aij_softmax = tf.nn.dropout(aij_softmax, keep_prob=dropout[0])\n\n with tf.variable_scope(\"Attention-based_Pooling_Layer\"):\n deep_inputs = tf.multiply(element_wise_product_list, aij_softmax) # None * (F(F-1)/2) * K\n deep_inputs = tf.reduce_sum(deep_inputs, axis=1) # None * K Pooling操作\n\n # Attention-based Pooling Layer的输出也要经过Dropout\n if mode == tf.estimator.ModeKeys.TRAIN:\n deep_inputs = tf.nn.dropout(deep_inputs, keep_prob=dropout[1])\n\n # 该层的输出是一个K维度的向量\n\n with tf.variable_scope(\"Prediction_Layer\"):\n # 直接跟上输出单元\n deep_inputs = contrib.layers.fully_connected(inputs=deep_inputs, num_outputs=1, activation_fn=tf.identity, \\\n weights_regularizer=contrib.layers.l2_regularizer(l2_reg), scope=\"afm_out\") # None * 1\n y_deep = tf.reshape(deep_inputs, shape=[-1]) # None\n\n with tf.variable_scope(\"AFM_overall\"):\n y_bias = Global_Bias * tf.ones_like(y_deep, dtype=tf.float32)\n y = y_bias + y_linear + y_deep\n pred = tf.nn.sigmoid(y)\n\n # set predictions\n predictions = {\"prob\": pred}\n export_outputs = {tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: tf.estimator.export.PredictOutput(predictions)}\n # Provide an estimator spec for `ModeKeys.PREDICT`\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n export_outputs=export_outputs)\n\n #------build loss------\n loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y, labels=labels))+ l2_reg * tf.nn.l2_loss(Feat_Wgts) + l2_reg * tf.nn.l2_loss(Feat_Emb)\n log_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y, labels=labels))\n\n\n # Provide an estimator spec for `ModeKeys.EVAL`\n eval_metric_ops = {\n # \"logloss\": tf.losses.log_loss(pred, labels, weights=1.0, scope=None, epsilon=1e-07,loss_collection=tf.GraphKeys.LOSSES, reduction=tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS),\n \"auc\": tf.metrics.auc(labels, pred),\n }\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n eval_metric_ops=eval_metric_ops)\n\n\n #------build optimizer------\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-8)\n train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())\n\n # Provide an estimator spec for `ModeKeys.TRAIN`\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=log_loss, # 只打印pure log_loss,但是训练依旧按照整个的loss来训练\n train_op=train_op)", "def apply_param_transformations(self,params_edict,**kwargs):\n if self.params_transforms == None:\n return params_edict\n for key,transform_function in self.params_transforms.items():\n params_edict[key] = transform_function(params_edict,**kwargs)\n return params_edict", "def hp_optim_parameters(parameter_dict, ms_key='model_struct'):\n model_structs = parameter_dict[ms_key]\n parameter_dict = {\n k: v for k, v in parameter_dict.iteritems() if k is not ms_key}\n combos = []\n for ms in model_structs:\n it_dict = {}\n for k, v in parameter_dict.iteritems():\n if '_domain' in k:\n if isinstance(v, np.ndarray):\n v = pd.Series(v).to_json(orient='values')\n elif isinstance(v, basestring):\n pass\n else:\n v = json.dumps(v)\n it_dict[k] = v # Handle special-case hp optim flags here.\n it_dict[ms_key] = ms\n combos += [it_dict]\n return combos", "def _write_model_parameters(self, param_dir):\n parameters = {\n \"training_epochs\" : self.training_parameters.training_epochs,\n \"learning_rate\" : self.model_parameters.learning_rate,\n \"momentum\" : self.model_parameters.momentum,\n \"model\" : self.model_parameters.model,\n \"input_keep_probability\" : self.model_parameters.input_keep_probability,\n \"output_keep_probability\" : self.model_parameters.output_keep_probability,\n \"sequence_length\" : self.model_parameters.sequence_length,\n \"input_dimension\" : self.model_parameters.input_dimension,\n \"batch_size\" : self.model_parameters.batch_size,\n \"state_size\" : self.model_parameters.state_size,\n \"n_layers\" : self.model_parameters.n_layers,\n \"n_classes\" : self.model_parameters.n_classes,\n \"log_dir\" : self.directories.log_dir,\n \"checkpoint_dir\" : self.directories.checkpoint_dir,\n }\n\n with open(self._parameters_file(param_dir), \"w\") as f:\n json.dump(parameters, f, indent=4)", "def _addParametersToPypet(self, traj, params):\n\n def addParametersRecursively(traj, params, current_level):\n # make dummy list if just string\n if isinstance(current_level, str):\n current_level = [current_level]\n # iterate dict\n for key, value in params.items():\n # if another dict - recurse and increase level\n if isinstance(value, dict):\n addParametersRecursively(traj, value, current_level + [key])\n else:\n param_address = \".\".join(current_level + [key])\n value = \"None\" if value is None else value\n traj.f_add_parameter(param_address, value)\n\n addParametersRecursively(traj, params, [])", "def update_model_parameters(parameters, grads, learning_rate):\n L = len(parameters) /2 # number of layers in the neural network\n\n for l in range(int(L)):\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * grads[\"dW\" + str(l + 1)]\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * grads[\"db\" + str(l + 1)]\n return parameters\n # raise NotImplementedError", "def get_params(self, params):\n mapping = OrderedDict(\n (key, params[x]) if isinstance(x, str) else (key, float(x))\n for key, x in self.transformations.items()\n )\n return Params(**mapping)", "def pretrain_vit_param_trans(model, state_dicts, num_patches, seg_num, attention_type):\n if 'head' + '.weight' in state_dicts:\n del state_dicts['head' + '.weight']\n if 'head' + '.bias' in state_dicts:\n del state_dicts['head' + '.bias']\n\n total_len = len(model.state_dict())\n if num_patches + 1 != state_dicts['pos_embed'].shape[1]:\n pos_embed = state_dicts['pos_embed']\n cls_pos_embed = pos_embed[0, 0, :].unsqueeze(0).unsqueeze(1)\n other_pos_embed = pos_embed[0, 1:, :].unsqueeze(0).unsqueeze(1).transpose((0, 1, 3, 2))\n new_pos_embed = F.interpolate(\n other_pos_embed,\n size=(other_pos_embed.shape[-2], num_patches),\n mode='nearest'\n )\n new_pos_embed = new_pos_embed.squeeze(0).transpose((0, 2, 1))\n new_pos_embed = paddle.concat((cls_pos_embed, new_pos_embed), axis=1)\n state_dicts['pos_embed'] = new_pos_embed\n time.sleep(0.01)\n\n if 'time_embed' in state_dicts and seg_num != state_dicts['time_embed'].shape[1]:\n time_embed = state_dicts['time_embed'].transpose((0, 2, 1)).unsqueeze(0)\n new_time_embed = F.interpolate(\n time_embed,\n size=(time_embed.shape[-2], seg_num),\n mode='nearest'\n )\n state_dicts['time_embed'] = new_time_embed.squeeze(0).transpose((0, 2, 1))\n time.sleep(0.01)\n with tqdm(total=total_len, position=1, bar_format='{desc}', desc=\"Loading weights\") as desc:\n if attention_type == 'divided_space_time':\n new_state_dicts = state_dicts.copy()\n for key in tqdm(state_dicts):\n if 'blocks' in key and 'attn' in key:\n desc.set_description(\"Loading %s\" % key)\n new_key = key.replace('attn', 'temporal_attn')\n if not new_key in state_dicts:\n new_state_dicts[new_key] = state_dicts[key]\n else:\n new_state_dicts[new_key] = state_dicts[new_key]\n if 'blocks' in key and 'norm1' in key:\n desc.set_description(\"Loading %s\" % key)\n new_key = key.replace('norm1', 'temporal_norm1')\n if not new_key in state_dicts:\n new_state_dicts[new_key] = state_dicts[key]\n else:\n new_state_dicts[new_key] = state_dicts[new_key]\n time.sleep(0.01)\n ret_str = \"loading {:<20d} weights completed.\".format(len(model.state_dict()))\n desc.set_description(ret_str)\n return new_state_dicts", "def checkpoint_filter_fn(state_dict, model):\n if 'patch_embed.proj.weight' in state_dict:\n # Remap FB ResMlp models -> timm\n out_dict = {}\n for k, v in state_dict.items():\n k = k.replace('patch_embed.', 'stem.')\n k = k.replace('attn.', 'linear_tokens.')\n k = k.replace('mlp.', 'mlp_channels.')\n k = k.replace('gamma_', 'ls')\n if k.endswith('.alpha') or k.endswith('.beta'):\n v = v.reshape(1, 1, -1)\n out_dict[k] = v\n return out_dict\n return state_dict", "def model_fn(features,labels,mode,params):\n tf.logging.info('*** Features ***')\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s ,shape = %s\" % (name,features[name].shape))\n\n input_ids = features['input_ids']\n input_mask = features['input_mask']\n segment_ids = features['segment_ids']\n label_ids = features['label_ids']\n if 'is_real_example' in features:\n is_real_example = tf.cast(features['is_real_example'],dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids),dtype=tf.float32)\n\n is_training = (mode == tf_estimator.estimator.ModeKeys.TRAIN)\n\n (total_loss,per_example_loss,probabilities,predictions) = \\\n create_model(albert_config,is_training,input_ids,input_mask,\n segment_ids,label_ids,num_labels,\n use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n (assignment_map,initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint,assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf_estimator.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(total_loss,\n learning_rate,\n num_train_steps,\n num_warmup_steps,use_tpu=False)\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n )\n elif mode == tf_estimator.estimator.ModeKeys.EVAL:\n def metric_fn(per_example_loss,label_ids,logits,is_real_example):\n accuracy = tf.metrics.accuracy(\n labels=label_ids,predictions=predictions,\n weights=is_real_example\n )\n loss = tf.metrics.mean(\n values=per_example_loss,weights=is_real_example\n )\n return {\n 'eval_accuracy':accuracy,\n 'eval_loss':loss,\n }\n\n eval_metrics = metric_fn(per_example_loss,label_ids,predictions,is_real_example)\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics\n )\n else:\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n predictions={\n 'probabilities':probabilities,\n 'predictions':predictions,\n },\n )\n\n return output_spec", "def get_mlperf_model_variables(config_path, init_checkpoint):\n # Load saved model configuration\n bert_config = modeling.BertConfig.from_json_file(config_path)\n seq_length = bert_config.max_position_embeddings\n tf_variables = {}\n max_predictions_per_seq = 76\n\n # Generate BERT TF model and initiate variable update from checkpoint\n graph = tf.Graph()\n sess = tf.Session(graph=graph)\n with graph.as_default():\n input_ids = tf.zeros((4, seq_length), dtype=tf.int32)\n input_mask = tf.zeros((4, seq_length), dtype=tf.int32)\n segment_ids = tf.zeros((4, seq_length), dtype=tf.int32)\n masked_lm_positions = tf.zeros((4, max_predictions_per_seq), dtype=tf.int32)\n masked_lm_ids = tf.zeros((4, max_predictions_per_seq), dtype=tf.int32)\n masked_lm_weights = tf.zeros((4, max_predictions_per_seq), dtype=tf.float32)\n next_sentence_labels = tf.zeros((4), dtype=tf.int32)\n tf_model = modeling.BertModel(\n config=bert_config,\n is_training=True,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=True)\n (masked_lm_loss, _,\n _) = get_masked_lm_output(bert_config, tf_model.get_sequence_output(),\n tf_model.get_embedding_table(),\n masked_lm_positions, masked_lm_ids,\n masked_lm_weights)\n\n (next_sentence_loss, _,\n _) = get_next_sentence_output(bert_config, tf_model.get_pooled_output(),\n next_sentence_labels)\n _ = masked_lm_loss + next_sentence_loss\n\n tvars = tf.trainable_variables()\n (assignment_map,\n _) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)\n sess.run(tf.initializers.global_variables())\n tvars_vals = sess.run(tvars)\n for var, val in zip(tvars, tvars_vals):\n tf_variables[var.name[:-2]] = val\n\n tf_config = bert_config.__dict__\n\n return tf_config, tf_variables, tf_model", "def _MoeLayerParams(ff_p):\n assert issubclass(ff_p.cls,\n layers_with_attention.TransformerFeedForwardLayer)\n assert p.num_experts > 0\n moe_p = p.moe_layer_tpl.Copy()\n # Copy over the base params.\n base_layer.BaseLayer.CopyBaseParams(ff_p, moe_p)\n # Set other params.\n moe_p.name = ff_p.name\n moe_p.input_dim = ff_p.input_dim\n moe_p.output_dim = ff_p.output_dim\n moe_p.hidden_dim = ff_p.hidden_dim\n moe_p.activation = ff_p.activation\n moe_p.residual_dropout_prob = ff_p.residual_dropout_prob\n moe_p.relu_dropout_prob = ff_p.relu_dropout_prob\n moe_p.dropout_tpl = ff_p.residual_dropout_tpl.Copy()\n moe_p.num_groups = p.num_groups\n moe_p.min_group_size = p.min_group_size\n moe_p.num_experts = p.num_experts\n # weight_split_dims_mapping and activation_split_dims_mapping should have\n # been set through p.moe_layer_tpl params.\n return moe_p", "def Params(cls):\n p = hyperparams.InstantiableParams(cls)\n p.Define('task_dict', None, 'dataset_name -> task params')\n p.Define('task_name', None, 'High level task name')\n p.Define('logdir', None, 'Log directory')\n p.Define('train_program', None, 'Train program params')\n p.Define('train_executions_per_eval', 1, '')\n p.Define('eval_programs', [], 'List of eval program params.')\n p.Define('num_splits_per_client', None, '')\n p.Define('dataset_names', [], 'List of all dataset names.')\n p.Define('emails', [], 'List of emails to send metrics.')\n p.Define('summary_exporter', None, 'The summary exporter Params.')\n p.Define('async_postprocess', True,\n 'whether to CPU postprocess asynchronously with TPU train')\n p.Define(\n 'checkpoint_to_load', None,\n 'If set, the program will initially load from this checkpoint, '\n 'ignoring train_dir. Typically used for oneoff decode.')\n\n # TODO(blee): Clean these up.\n p.Define('ml_perf', hyperparams.Params(), 'MlPerf configuration.')\n mlp = p.ml_perf\n mlp.Define('submission_metadata', None,\n 'A dictionary of static submission metadata')\n mlp.Define('benchmark_name', None, 'Benchmark name for compliance log.')\n mlp.Define('steps_per_epoch', None, 'Number of training steps per epoch.')\n mlp.Define('decoder_metric_name', None,\n 'Name of the decoder metric to report for compliance log.')\n mlp.Define('decoder_metric_success_threshold', None,\n 'Benchmark run must exceed this value to succeed.')\n mlp.Define('max_steps_to_train', None,\n 'Maximum number of steps to reach target accuracy')\n return p", "def from_name_params(cls, name, params=None):\n layer = name\n p = params\n\n d = None\n if p is None:\n p = {}\n definition = {}\n # If one of the special names are in the params list, then add params to the root layer\n if \"node\" in p or \"plugin\" in p or \"style\" in p or \"attrs\" in p:\n definition.update(p)\n else:\n definition[\"attrs\"] = p\n definition.update({\"node\": layer}) # The user-specified node name ALWAYS takes precidence.\n d = OrderedDict({layer.replace(\".\", \"-\"): definition})\n\n return cls.from_definition(d)", "def from_name_params(cls, name, params=None):\n layer = name\n p = params\n\n d = None\n if p is None:\n p = {}\n definition = {}\n # If one of the special names are in the params list, then add params to the root layer\n if \"node\" in p or \"plugin\" in p or \"style\" in p or \"attrs\" in p:\n definition.update(p)\n else:\n definition[\"attrs\"] = p\n definition.update({\"node\": layer}) # The user-specified node name ALWAYS takes precidence.\n d = OrderedDict({layer.replace(\".\", \"-\"): definition})\n\n return cls.from_definition(d)", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (subject_logits, property_logits, value_logits) = self.create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n params=params,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions, depth):\n one_hot_positions = tf.one_hot(\n positions, depth=depth, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n # subject, property, value로 나오도록\n subject_label = features[\"subject\"]\n property_label = features[\"property\"]\n value_label = features[\"value\"]\n res_length = params[\"res_length\"]\n ont_length = params[\"ont_length\"]\n\n subject_loss = compute_loss(subject_logits, subject_label, res_length)\n property_loss = compute_loss(property_logits, property_label, ont_length)\n value_loss = compute_loss(value_logits, value_label, res_length)\n\n total_loss = (subject_loss + property_loss + value_loss) / 3.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"subject_logits\": subject_logits,\n \"property_logits\": property_logits,\n \"value_logits\": value_logits,\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec", "def Params(cls):\n p = hyperparams.InstantiableParams(cls)\n\n p.Define('task_dict', None, 'dataset_name -> task params')\n p.Define('task_name', None, 'High level task name')\n p.Define('logdir', None, 'Log directory')\n p.Define('train_program', None, 'Train program params')\n p.Define('train_executions_per_eval', 1, '')\n p.Define('dataset_names', [], 'List of all dataset names.')\n p.Define('num_splits_per_client', None, '')\n\n p.Define('ml_perf', hyperparams.Params(), 'MlPerf configuration.')\n\n mlp = p.ml_perf\n mlp.Define('benchmark_name', None, 'Benchmark name for compliance log.')\n mlp.Define('decoder_metric_name', None,\n 'Name of the decoder metric to report for compliance log.')\n mlp.Define('decoder_metric_success_threshold', None,\n 'Benchmark run must exceed this value to succeed.')\n mlp.Define('max_steps_to_train', None,\n 'Maximum number of steps to reach target accuracy')\n mlp.Define('steps_per_epoch', None, 'Number of training steps per epoch.')\n mlp.Define('global_batch_size', None, 'Global batch size.')\n mlp.Define('max_sequence_length', None, 'Maximum sequence length.')\n mlp.Define('optimizer_name', None, 'Optimizer used.')\n mlp.Define('base_learning_rate', None, 'Base learning rate.')\n mlp.Define('warmup_steps', None, 'Number of warm-up steps.')\n\n return p", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n input_mask = features[\"input_mask\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n is_prediction = (mode == tf.estimator.ModeKeys.PREDICT)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings, is_prediction)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, False)\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n )\n elif mode == tf.estimator.ModeKeys.EVAL:\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions)\n loss = tf.metrics.mean(values=per_example_loss)\n eval_metrics = {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics\n )\n else:\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": probabilities},\n export_outputs={'predict': tf.estimator.export.PredictOutput(outputs=probabilities)}\n )\n return output_spec", "def _LayerParams(ii):\n if isinstance(p.transformer_layer_params_tpl, list):\n factor = p.num_layers // len(p.transformer_layer_params_tpl)\n i = ii // factor\n p_ii = p.transformer_layer_params_tpl[i].Copy()\n else:\n p_ii = p.transformer_layer_params_tpl.Copy()\n p_ii.name = 'layer_%d' % ii\n p_ii.has_aux_atten = p.has_aux_atten\n p_ii.mask_self_atten = p.mask_self_atten\n p_ii.input_dim = p.mdl_dim or p_ii.input_dim\n p_ii.output_dim = p.mdl_dim or p_ii.output_dim\n p_ii.packed_input = p.packed_input\n if (not isinstance(p_ii.tr_atten_tpl.num_heads, list) and\n p.num_atten_heads is not None):\n p_ii.tr_atten_tpl.num_heads = p.num_atten_heads\n if p.dropout_prob is not None:\n p_ii.tr_atten_tpl.atten_dropout_prob = p.dropout_prob\n p_ii.tr_atten_tpl.residual_dropout_prob = p.dropout_prob\n p_ii.tr_fflayer_tpl.residual_dropout_prob = p.dropout_prob\n p_ii.tr_fflayer_tpl.relu_dropout_prob = p.dropout_prob\n if p.stochastic_depth_droppath_prob is not None:\n ratio = p.stochastic_depth_droppath_prob * ii / (p.num_layers - 1)\n p_ii.tr_atten_tpl.residual_droppath_prob = ratio\n p_ii.tr_fflayer_tpl.residual_droppath_prob = ratio\n if p.hidden_dim is not None:\n p_ii.tr_fflayer_tpl.hidden_dim = p.hidden_dim\n p_ii.tr_atten_tpl.add_unnormalized_input = p.add_unnormalized_input\n if ii in p.moe_layers:\n p_ii.tr_fflayer_tpl = _MoeLayerParams(p_ii.tr_fflayer_tpl)\n return p_ii", "def getParametersFromTraj(self, traj):\n # DO NOT use short names for star notation dicts\n runParams = self.traj.parameters.f_to_dict(short_names=not self.parameterSpace.star, fast_access=True)\n runParams = self._validatePypetParameters(runParams)\n return dotdict(runParams)", "def setParams(self, tfInputGraph=None, inputMapping=None, outputMapping=None, tfHParms=None):\n super(TFTransformer, self).__init__()\n kwargs = self._input_kwargs\n # Further conanonicalization, e.g. converting dict to sorted str pairs happens here\n return self._set(**kwargs)" ]
[ "0.7303152", "0.59408104", "0.544858", "0.5378973", "0.531307", "0.5166872", "0.50446033", "0.5018701", "0.49990663", "0.4953636", "0.49412417", "0.49221116", "0.49118844", "0.49070784", "0.49015772", "0.48552454", "0.48360878", "0.48341933", "0.48306793", "0.48175794", "0.48110288", "0.47620612", "0.4760321", "0.4760321", "0.4747372", "0.474568", "0.47451627", "0.47421262", "0.47368085", "0.47192532" ]
0.80498666
0
Modify TF parameter dict to be compatible with JAX parameter dict. Convert parameter names in tf_params to match JAX parameter names and create a nested dictionary of parameters for each layer in the model using `/` in each key as a delimeter.
def convert_tf_param_dict_to_jax(tf_params): jax_params = {} tf_key_to_jax_key = [ ('embeddings/layer_norm', 'embeddings_layer_norm'), ('transformer/layer', 'encoder_layer'), ('embeddings:0', 'embedding'), (':0', ''), ('beta', 'bias'), ('gamma', 'scale'), ('position_embedding/', 'position_embeddings/') ] for tf_key in tf_params: jax_key = tf_key for tf_name, jax_name in tf_key_to_jax_key: jax_key = jax_key.replace(tf_name, jax_name) jax_params[jax_key] = tf_params[tf_key] # jax position embedding kernel has additional dimension pos_embedding = jax_params['position_embeddings/embedding'] jax_params['position_embeddings/embedding'] = pos_embedding[np.newaxis, ...] # convert flat param dict into nested dict using `/` as delimeter outer_dict = {} for key, val in jax_params.items(): tokens = key.split('/') inner_dict = outer_dict # each token except the very last should add a layer to the nested dict for token in tokens[:-1]: if token not in inner_dict: inner_dict[token] = {} inner_dict = inner_dict[token] inner_dict[tokens[-1]] = val # this layer doesn't have parameters, but key is required to be present outer_dict['self_attention_mask'] = 0. return outer_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_mlperf_param_dict_to_jax(tf_params, emb_dim, num_heads):\n jax_params = {}\n # mapping between mlperf model and JAX model\n # works for model in //third_party/tensorflow_models/mlperf/models/rough/bert\n tf_key_to_jax_key = [\n ('cls/seq_relationship/', 'classification/predictions_transform_logits/'),\n ('output_weights', 'kernel'),\n ('transform_logits/output_bias', 'transform_logits/bias'),\n ('cls/predictions/', 'masked_lm/cls_predictions_'),\n ('transform/dense', 'transform_dense'),\n ('transform/LayerNorm', 'transform_layernorm'),\n ('predictions_output_bias', 'predictions_output_bias/bias'),\n ('bert/embeddings/word_embeddings', 'word_embeddings/embedding'),\n ('bert/', 'transformer_encoder/'),\n ('embeddings/token_type_embeddings', 'type_embeddings/embedding'),\n ('embeddings/position_embeddings', 'position_embeddings/embedding'),\n ('attention/self', 'self_attention'),\n ('attention/output', 'self_attention_output'),\n ('layer_norm/layer_norm_', 'layer_norm/'),\n ('output/LayerNorm', 'output_layer_norm'),\n ('intermediate/dense', 'intermediate'),\n ('output/dense', 'output'),\n ('pooler/dense/', 'pooler_transform/'),\n ('self_attention_output_layer_norm', 'self_attention_layer_norm'),\n ('embeddings/LayerNorm', 'embeddings_layer_norm'),\n ('encoder/layer', 'encoder_layer'),\n (':0', ''),\n ('beta', 'bias'),\n ('gamma', 'scale')\n ]\n for tf_key, val in tf_params.items():\n jax_key = tf_key\n for tf_name, jax_name in tf_key_to_jax_key:\n jax_key = jax_key.replace(tf_name, jax_name)\n\n # Reshape kernels if necessary\n jax_params[jax_key] = tf_params[tf_key]\n if 'self_attention_output/kernel' in jax_key:\n param = tf_params[tf_key]\n jax_params[jax_key] = param.reshape(\n (num_heads, -1, emb_dim))\n\n # jax position embedding kernel has additional dimension\n pos_embedding = jax_params[\n 'transformer_encoder/position_embeddings/embedding']\n jax_params[\n 'transformer_encoder/position_embeddings/embedding'] = pos_embedding[\n np.newaxis, ...]\n\n # convert flat param dict into nested dict using `/` as delimeter\n outer_dict = {}\n for key, val in jax_params.items():\n tokens = key.split('/')\n inner_dict = outer_dict\n # each token except the very last should add a layer to the nested dict\n for token in tokens[:-1]:\n if token not in inner_dict:\n inner_dict[token] = {}\n inner_dict = inner_dict[token]\n inner_dict[tokens[-1]] = val\n\n return outer_dict", "def _addParametersToPypet(self, traj, params):\n\n def addParametersRecursively(traj, params, current_level):\n # make dummy list if just string\n if isinstance(current_level, str):\n current_level = [current_level]\n # iterate dict\n for key, value in params.items():\n # if another dict - recurse and increase level\n if isinstance(value, dict):\n addParametersRecursively(traj, value, current_level + [key])\n else:\n param_address = \".\".join(current_level + [key])\n value = \"None\" if value is None else value\n traj.f_add_parameter(param_address, value)\n\n addParametersRecursively(traj, params, [])", "def update_parameters(\n model_param: Dict[str, Union[float, List[float]]]\n ) -> Dict[str, float]:\n\n updated_param = {}\n\n for i, _ in enumerate(model_param[\"teff\"]):\n updated_param[f\"teff_{i}\"] = model_param[\"teff\"][i]\n updated_param[f\"radius_{i}\"] = model_param[\"radius\"][i]\n\n if \"parallax\" in model_param:\n updated_param[\"parallax\"] = model_param[\"parallax\"]\n elif \"distance\" in model_param:\n updated_param[\"distance\"] = model_param[\"distance\"]\n\n return updated_param", "def update_params(coeffs_dict, params):\n for key in coeffs_dict.keys():\n try:\n # Batman\n params.__dict__[key] = coeffs_dict[key]\n except:\n # Normal dictionary\n params[key] = coeffs_dict[key]\n return params", "def tweak_params(it_exp):\n proc_it_exp = {}\n for k, v in it_exp.iteritems():\n if not isinstance(v, list):\n v = [v]\n elif any(isinstance(el, list) for el in v):\n v = flatten_list(v)\n proc_it_exp[k] = v\n return proc_it_exp", "def getParametersFromTraj(self, traj):\n # DO NOT use short names for star notation dicts\n runParams = self.traj.parameters.f_to_dict(short_names=not self.parameterSpace.star, fast_access=True)\n runParams = self._validatePypetParameters(runParams)\n return dotdict(runParams)", "def update_parameters(parameters: Dict,\n grads: Dict, learning_rate: float) -> Dict:\n L = len(parameters)//2 # number of layers\n\n for l in range(1, L+1):\n parameters['W'+str(l)] -= learning_rate * grads['dW'+str(l)]\n parameters['b'+str(l)] -= learning_rate * grads['db'+str(l)]\n\n return parameters", "def convert_tf_config_to_jax_bert(config):\n unnecessary_keys = ['initializer_range', 'backward_compatible',\n 'embedding_size']\n for key in unnecessary_keys:\n if key in config:\n config.pop(key)\n\n # change TF parameter names to match JAX parameter names\n mapping = {\n 'attention_dropout_rate': 'attention_probs_dropout_prob',\n 'hidden_activation': 'hidden_act',\n 'dropout_rate': 'hidden_dropout_prob',\n 'emb_dim': 'hidden_size',\n 'mlp_dim': 'intermediate_size',\n 'max_len': 'max_position_embeddings',\n 'num_heads': 'num_attention_heads',\n 'num_layers': 'num_hidden_layers'\n }\n for jax_key, tf_key in mapping.items():\n config[jax_key] = config.pop(tf_key)\n\n return config", "def _LayerParams(ii):\n if isinstance(p.transformer_layer_params_tpl, list):\n factor = p.num_layers // len(p.transformer_layer_params_tpl)\n i = ii // factor\n p_ii = p.transformer_layer_params_tpl[i].Copy()\n else:\n p_ii = p.transformer_layer_params_tpl.Copy()\n p_ii.name = 'layer_%d' % ii\n p_ii.has_aux_atten = p.has_aux_atten\n p_ii.mask_self_atten = p.mask_self_atten\n p_ii.input_dim = p.mdl_dim or p_ii.input_dim\n p_ii.output_dim = p.mdl_dim or p_ii.output_dim\n p_ii.packed_input = p.packed_input\n if (not isinstance(p_ii.tr_atten_tpl.num_heads, list) and\n p.num_atten_heads is not None):\n p_ii.tr_atten_tpl.num_heads = p.num_atten_heads\n if p.dropout_prob is not None:\n p_ii.tr_atten_tpl.atten_dropout_prob = p.dropout_prob\n p_ii.tr_atten_tpl.residual_dropout_prob = p.dropout_prob\n p_ii.tr_fflayer_tpl.residual_dropout_prob = p.dropout_prob\n p_ii.tr_fflayer_tpl.relu_dropout_prob = p.dropout_prob\n if p.stochastic_depth_droppath_prob is not None:\n ratio = p.stochastic_depth_droppath_prob * ii / (p.num_layers - 1)\n p_ii.tr_atten_tpl.residual_droppath_prob = ratio\n p_ii.tr_fflayer_tpl.residual_droppath_prob = ratio\n if p.hidden_dim is not None:\n p_ii.tr_fflayer_tpl.hidden_dim = p.hidden_dim\n p_ii.tr_atten_tpl.add_unnormalized_input = p.add_unnormalized_input\n if ii in p.moe_layers:\n p_ii.tr_fflayer_tpl = _MoeLayerParams(p_ii.tr_fflayer_tpl)\n return p_ii", "def update_parameters(params, grads, alpha):\n n_layers = len(params) // 2\n for i in range(n_layers):\n params['w%s' % (i+1)] = (\n params['w%s' % (i+1)] - alpha * grads['dw%s' % (i+1)])\n params['b%s' % (i+1)] = (\n params['b%s' % (i+1)] - alpha * grads['db%s' % (i+1)])\n return params", "def update_param_vals(pars, prefix, **kwargs):\n for key, val in kwargs.items():\n pname = \"%s%s\" % (prefix, key)\n if pname in pars:\n pars[pname].value = val\n return pars", "def unflatten_complex_params(input_dict, param_name):\n items_to_process = {}\n for k in input_dict.keys():\n if k.startswith(param_name):\n items_to_process[k] = input_dict[k]\n if len(items_to_process) == 0:\n return\n\n for k in items_to_process.keys():\n del input_dict[k]\n\n for k in items_to_process.keys():\n Unflattener._set_deep(k, input_dict, items_to_process[k])", "def process_params(nested_params: DictConfig, keys: list[str], flattened_params: DictConfig) -> None:\n if len({\"values\", \"min\", \"max\"}.intersection(nested_params.keys())) > 0:\n key = \".\".join(keys)\n flattened_params[key] = nested_params\n else:\n for name, cfg in nested_params.items():\n if isinstance(cfg, DictConfig):\n process_params(cfg, keys + [str(name)], flattened_params)", "def from_name_params(cls, name, params=None):\n layer = name\n p = params\n\n d = None\n if p is None:\n p = {}\n definition = {}\n # If one of the special names are in the params list, then add params to the root layer\n if \"node\" in p or \"plugin\" in p or \"style\" in p or \"attrs\" in p:\n definition.update(p)\n else:\n definition[\"attrs\"] = p\n definition.update({\"node\": layer}) # The user-specified node name ALWAYS takes precidence.\n d = OrderedDict({layer.replace(\".\", \"-\"): definition})\n\n return cls.from_definition(d)", "def from_name_params(cls, name, params=None):\n layer = name\n p = params\n\n d = None\n if p is None:\n p = {}\n definition = {}\n # If one of the special names are in the params list, then add params to the root layer\n if \"node\" in p or \"plugin\" in p or \"style\" in p or \"attrs\" in p:\n definition.update(p)\n else:\n definition[\"attrs\"] = p\n definition.update({\"node\": layer}) # The user-specified node name ALWAYS takes precidence.\n d = OrderedDict({layer.replace(\".\", \"-\"): definition})\n\n return cls.from_definition(d)", "def flatten(params, key=None):\n flat = {}\n for name, val in list(params.items()):\n if key is not None and not isinstance(key, int):\n name = \"%s[%s]\" % (key, name)\n if isinstance(val, dict):\n flat.update(flatten(val, name))\n elif isinstance(val, list):\n flat.update(flatten(dict(enumerate(val)), name))\n elif val is not None:\n flat[name] = val\n return flat", "def assign_params_from_flat(x, params):\n flat_size = lambda p: int(np.prod(p.shape.as_list()))\n splits = tf.split(x, [flat_size(p) for p in params])\n new_params = [tf.reshape(p_new, p.shape) for p, p_new in zip(params, splits)]\n return tf.group([tf.assign(p, p_new) for p, p_new in zip(params, new_params)])", "def update_model_parameters(parameters, grads, learning_rate):\n L = len(parameters) /2 # number of layers in the neural network\n\n for l in range(int(L)):\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * grads[\"dW\" + str(l + 1)]\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * grads[\"db\" + str(l + 1)]\n return parameters\n # raise NotImplementedError", "def _prepare_params(self, params):\n for key, value in params.items():\n if type(value) is list:\n params[key] = [(6, 0, value)]\n\n return params", "def param_name_dict():\n\n layer = caffe_pb2.LayerParameter()\n # get all parameter names (typically underscore case) and corresponding\n # type names (typically camel case), which contain the layer names\n # (note that not all parameters correspond to layers, but we'll ignore that)\n param_names = [s for s in dir(layer) if s.endswith('_param')]\n param_type_names = [type(getattr(layer, s)).__name__ for s in param_names]\n # strip the final '_param' or 'Parameter'\n param_names = [s[:-len('_param')] for s in param_names]\n param_type_names = [s[:-len('Parameter')] for s in param_type_names]\n return dict(zip(param_type_names, param_names))", "def substitute_params_keys(params, new_keys):\n for p in params:\n p['type'] = new_keys[p['type']]", "def _update_params(self, perf_params, loop_info):\n for vartype in list(perf_params.keys()):\n for var in perf_params[vartype]:\n self.tspec_params['performance_params'][var] = \\\n self.indent + 'param %s[] = %s;\\t#%s\\n' % (var, repr(default_perf_params[vartype]), vartype)\n\n #loop_info.vars: set of input vars", "def build_complex_list_params(self, params, items, label, names):\n for i, item in enumerate(items, 1):\n current_prefix = '%s.%s' % (label, i)\n for key, value in zip(names, item):\n full_key = '%s.%s' % (current_prefix, key)\n params[full_key] = value", "def get_params(self, params):\n mapping = OrderedDict(\n (key, params[x]) if isinstance(x, str) else (key, float(x))\n for key, x in self.transformations.items()\n )\n return Params(**mapping)", "def _update_parameters(self, topology, parameters):\n for pkey, parameter in self.parameters.items():\n\n value = parameters[pkey]\n name = parameter.attr_name()\n key = parameter.key()\n\n if isinstance(parameter, NodeParameter):\n topology.node_attribute(key=key, name=name, value=value)\n elif isinstance(parameter, EdgeParameter):\n topology.edge_attribute(key=key, name=name, value=value)\n else:\n msg = \"Parameter {} is neither a node nor an edge parameter! {}\"\n raise TypeError(msg.format(type(parameter)))", "def set_params(self, **params):\n\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n valid_params = self.get_params(deep=True)\n\n nested_params = defaultdict(dict) # grouped by prefix\n for key, value in params.items():\n key, delim, sub_key = key.partition('__')\n if key not in valid_params:\n raise ValueError('Invalid parameter %s for regressor %s. '\n 'Check the list of available parameters '\n 'with `regressor.get_params().keys()`.' %\n (key, self))\n\n if delim:\n nested_params[key][sub_key] = value\n else:\n setattr(self._regressor, key, value)\n valid_params[key] = value\n\n for key, sub_params in nested_params.items():\n valid_params[key].set_params(**sub_params)\n\n return self", "def update_parameters(mode = str(None), estimator_name = str(None), **kwargs):\n try:\n json_p = os.path.join(os.path.dirname(__file__), 'parameters.json')\n with open(json_p,'r',encoding='utf-8') as d_file:\n para = json.load(d_file)\n print(f\"Previous Parameters are: {para[mode][estimator_name]}\")\n para[mode][estimator_name] = kwargs\n print(f\"Current Parameters are updated as: {para[mode][estimator_name]}\")\n json_p = os.path.join(os.path.dirname(__file__), 'parameters.json')\n w_file = open(json_p, \"w\",encoding='utf-8')\n json.dump(para, w_file)\n w_file.close()\n print('Done with the parameters update.')\n except:\n print('Failed to update the parameters.')", "def initialize_parameters():\n\n W1 = tf.get_variable('W1', [3,3,3,64], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W2 = tf.get_variable('W2', [3,3,64,128], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W3 = tf.get_variable('W3', [3,3,128,256], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W4 = tf.get_variable('W4', [3,3,256,512], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W5 = tf.get_variable('W5', [3,3,512,512], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n\n ### END CODE HERE ###\n\n parameters = {\"W1\": W1,\n \"W2\": W2,\n \"W3\": W3,\n \"W4\": W4,\n \"W5\": W5\n }\n\n return parameters", "def _clean_and_encode_params(params: Mapping):\n # Keep only the parameters that were given a value\n params = {k: v for k, v in params.items() if v is not None}\n\n # All query parameters are later urlencoded - for projection, comma-separated\n # list is supported only on literal comma; convert comma-separated list\n # to a list of values which will be encoded to multiple query parameters\n try:\n params[\"projection\"] = [x.strip() for x in params[\"projection\"].split(\",\")]\n except KeyError:\n pass\n return params", "def transformer(root, modify_dict):\n AddParamTransformer(root).add(modify_dict)\n DelParamTransformer(root).delete(modify_dict)\n RenameParamTransformer(root).replace(modify_dict)\n RepAttributeTransformer(root).replace(modify_dict)\n return root" ]
[ "0.6692091", "0.574194", "0.57411915", "0.55651945", "0.5472306", "0.54638326", "0.53691083", "0.53339547", "0.5273296", "0.5266139", "0.5258614", "0.5244856", "0.52444565", "0.5239768", "0.5239768", "0.52044886", "0.5198123", "0.5191683", "0.51802343", "0.51680446", "0.5162708", "0.51570076", "0.5147552", "0.51311266", "0.51192474", "0.51170737", "0.5104814", "0.5097399", "0.50558525", "0.5054871" ]
0.7722715
0
>>> get_comparison_value('AS KS QS JS TS'.split())[1] 'Royal Flush' >>> get_comparison_value('KS QS JS TS 9S'.split())[1] 'Straight Flush' >>> get_comparison_value('8S 8C 8D 8H 3S'.split())[1] 'Four of a Kind' >>> get_comparison_value('8S 8C 8D 3H 3S'.split())[1] 'Full House' >>> get_comparison_value('2S QS JS TS 9S'.split())[1] 'Flush' >>> get_comparison_value('KD QS JS TS 9S'.split())[1] 'Straight' >>> get_comparison_value('8S 8C 8D 4H 3S'.split())[1] 'Three of a Kind' >>> get_comparison_value('8S 8C 4D 4H 3S'.split())[1] 'Two Pairs' >>> get_comparison_value('8S 8C 5D 4H 3S'.split())[1] 'One Pair' >>> get_comparison_value('8S 7C 5D 4H 3S'.split())[1] 'High Card'
def get_comparison_value(hand): suits = set(get_suit(card) for card in hand) values = set(get_value(card) for card in hand) is_flush = len(suits) == 1 is_straight = (len(values) == 5 and min(values) + 4 == max(values)) kinds = get_kinds(hand) kind_counts = [k.count for k in kinds] if is_flush and values == {10, 11, 12, 13, 14}: result = (100, 'Royal Flush') elif is_flush and is_straight: result = (90, 'Straight Flush') elif kind_counts == [4, 1]: result = (80, 'Four of a Kind') elif kind_counts == [3, 2]: result = (70, 'Full House') elif is_flush: result = (60, 'Flush') elif is_straight: result = (50, 'Straight') elif kind_counts == [3, 1, 1]: result = (40, 'Three of a Kind') elif kind_counts == [2, 2, 1]: result = (30, 'Two Pairs') elif kind_counts == [2, 1, 1, 1]: result = (20, 'One Pair', kinds[0].value) else: assert kind_counts == [1]*5 result = (10, 'High Card') return result + (max(values),)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def comparison(self) -> str:\n return self._values.get('comparison')", "def comparison(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"comparison\")", "def testWinkler(self): # - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n for pair in self.string_pairs:\n\n approx_str_value = stringcmp.winkler(pair[0],pair[1])\n\n assert (isinstance(approx_str_value,float)), \\\n '\"Winkler\" does not return a floating point number for:'+ \\\n str(pair)\n\n assert (approx_str_value >= 0.0), \\\n '\"Winkler\" returns a negative number for:'+str(pair)\n\n assert (approx_str_value <= 1.0), \\\n '\"Winkler\" returns a number larger than 1.0 for:'+str(pair)\n\n approx_str_value_1 = stringcmp.winkler(pair[0],pair[1])\n approx_str_value_2 = stringcmp.winkler(pair[1],pair[0])\n\n assert (approx_str_value_1 == approx_str_value_2), \\\n '\"Winkler\" returns different values for pair and swapped ' + \\\n 'pair: '+str(pair)+': '+str(approx_str_value_1)+', '+ \\\n str(approx_str_value_2)\n\n # Check for value 1.0 if the strings are the same\n #\n if (pair[0] == pair[1]):\n\n assert (approx_str_value == 1.0), \\\n '\"Winkler\" does not return 1.0 if strings are equal: '+str(pair)\n\n # Winkler should always return a value equal to or larger than Jaro\n #\n approx_str_value_winkler = stringcmp.winkler(pair[0],pair[1])\n approx_str_value_jaro = stringcmp.jaro(pair[0],pair[1])\n\n assert (approx_str_value_winkler >= approx_str_value_jaro), \\\n '\"Winkler\" value smaller than \"Jaro\" value for:'+str(pair)", "def get_compare_value_texts(self):\n return self.compare_value_texts", "def comparison(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"comparison\")", "def comparison(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"comparison\")", "def compare_snp_calls(gt_call, vcf_call):\n if vcf_call == 'NA':\n print(vcf_call)\n return 'NA'\n\n if not set('ACGT ').issuperset(gt_call):\n return 'NA'\n\n allele_freq, ref_allele, alt_allele = vcf_call.split(':')\n\n if gt_call == '{0} {1}'.format(ref_allele, ref_allele):\n gt_call = 0\n\n elif gt_call == '{0} {1}'.format(alt_allele, alt_allele):\n gt_call = 1\n\n elif ref_allele not in gt_call and alt_allele not in gt_call:\n gt_call = 0.5\n\n # 'If alleles do not match, e.g. genotyping gives C/T and SNP calling A/G!'\n else:\n print('')\n print(gt_call)\n print('{0} {1}'.format(ref_allele, ref_allele))\n print('{0} {1}'.format(alt_allele, alt_allele))\n print('')\n return 'NA'\n\n diff = gt_call - float(allele_freq)\n return diff", "def compare(string1: str, string2: str, /) -> int:\n ...", "def get (self,x):\r\n\r\n COMPTERMS = ['==','>=','<=','!=','>','<',]\r\n\r\n\r\n def contains_comp (x):\r\n \"\"\"True is x contains any of the COMP Terms\"\"\"\r\n\r\n for comp in COMPTERMS:\r\n if comp in x:\r\n return True\r\n return False\r\n\r\n def comp_split (phrase):\r\n \"\"\" Splits the phrase into a list of terms to be compared\"\"\"\r\n \r\n level = 0\r\n\r\n phrase = list(phrase)\r\n for index, x in enumerate(phrase):\r\n if 'x' == '(':\r\n level += 1\r\n elif 'x' == ')':\r\n level -= 1 \r\n if level == 0:\r\n found = False\r\n for comp in COMPTERMS:\r\n if len(comp) == 2 and x == comp[0] and phrase[index+1]==comp[1]:\r\n phrase[index] = '#'+comp[0]\r\n phrase[index+1] = comp[1]+'#'\r\n found = True \r\n \r\n elif not found and len(comp) == 1 and x == comp:\r\n \r\n phrase[index] = '#'+x+'#'\r\n\r\n phrase = ''.join(phrase).split('#')\r\n\r\n newphrase = []\r\n for x in phrase:\r\n if x in COMPTERMS:\r\n newphrase.append(x)\r\n else:\r\n newphrase.append(self.calculator.calculate(x))\r\n return newphrase\r\n \r\n def evaluate_comp_list (phrase):\r\n\r\n def compare (a,R,b):\r\n\r\n\r\n if R == '==':\r\n return a==b\r\n elif R == '!=':\r\n return a!=b\r\n elif R == '>':\r\n return a>b\r\n elif R == '<':\r\n return a<b\r\n elif R == '>=':\r\n return a>=b\r\n elif R == '<=':\r\n return a<=b\r\n def get_triads (phrase):\r\n triads = []\r\n for count, x in enumerate(phrase):\r\n\r\n if count % 2 == 0 and count+2 < len(phrase):\r\n triads.append((phrase[count],phrase[count+1],phrase[count+2]))\r\n return triads\r\n \r\n comp_results = []\r\n for x in get_triads(phrase):\r\n if not compare(x[0],x[1],x[2]):\r\n return False\r\n return True\r\n\r\n\r\n if x in self.variables.variables:\r\n val = self.variables.variables[x]\r\n\r\n\r\n return val\r\n else:\r\n\r\n if contains_comp(x):\r\n \r\n \r\n phrase = comp_split(x)\r\n if contains_comp(phrase):\r\n return evaluate_comp_list(phrase)\r\n else:\r\n phrase = [self.logic.interpret(self.logic.parse(x)) for x in phrase]\r\n for phr in phrase:\r\n if not phr:\r\n return False\r\n return True\r\n elif x in ['True','False','bTrue','bFalse']:\r\n return {'True':True,\r\n 'False':False,\r\n 'bTrue':True,\r\n 'bFalse':False}[x]\r\n \r\n else:\r\n x=self.calculator.calculate(x)\r\n if not x:\r\n \r\n return False\r\n return True", "def compare_to(this: str, other: str) -> int:\n return cmp(this, other, StringComparison.CurrentCulture)", "def get_best_alignment_score(dna1, dna2, match = 1, mismatch = -1, gap = -2):\n if dna1 == '':\n return (gap*len(dna2), '-'*len(dna2) , dna2)\n if dna2 == '':\n return (gap*len(dna1), dna1 , '-'*len(dna1))\n \n best_case = list(get_best_alignment_score(dna1[1:], dna2[1:], match, \\\n mismatch, gap))\n best_case[0] = get_alignment_score(dna1[0],dna2[0], match, mismatch,\\\n gap) + best_case[0]\n best_case[1] = dna1[0] + best_case[1]\n best_case[2] = dna2[0] + best_case[2]\n \n best_case_attempt = list(get_best_alignment_score(dna1, dna2[1:], match, \\\n mismatch, gap))\n \n best_case_attempt[0] = get_alignment_score('-',dna2[0], match, mismatch,\\\n gap) + best_case_attempt[0]\n best_case_attempt[1] = '-' + best_case_attempt[1]\n best_case_attempt[2] = dna2[0] + best_case_attempt[2]\n\n if best_case[0] < best_case_attempt[0]:\n best_case = best_case_attempt\n\n best_case_attempt = list(get_best_alignment_score(dna1[1:], dna2, match, \\\n mismatch, gap))\n best_case_attempt[0] = get_alignment_score(dna1[0],'-', match, mismatch,\\\n gap) + best_case_attempt[0]\n best_case_attempt[1] = dna1[0] + best_case_attempt[1]\n best_case_attempt[2] = '-' + best_case_attempt[2]\n \n if best_case[0] < best_case_attempt[0]:\n best_case = best_case_attempt\n \n return tuple(best_case)", "def compare_moves(p_move, c_move):\n#\ttie = \"It's a tie\"\n \tif p_move == \"r\" and c_move == \"p\":\n \t\treturn \"computer\"\n\telif p_move == \"r\" and c_move == \"s\":\n \t\treturn \"human\"\n\telif p_move == \"p\" and c_move == \"s\":\n \t\treturn \"computer\"\n\telif p_move == \"p\" and c_move == \"r\":\n\t\treturn \"human\"\n\telif p_move == \"s\" and c_move == \"r\":\n\t\treturn \"computer\"\n\telif p_move == \"s\" and c_move == \"p\":\n\t\treturn \"human\"", "def for_comparison(self):\n\t\tif len(self.values) < 5:\n\t\t\treturn unicode(self)\n\t\telse:\n\t\t\treturn u'-'.join(self._string_values(increment=1))", "def test_compare_difference_string_slower(self):\n test_algorithm = 'bubble'\n test_algorithm_time = 5\n test_sorted_time = 1\n result = calculate_compare_time_difference(test_algorithm_time, test_sorted_time, test_algorithm)\n self.assertEqual('bubble was 4 seconds slower.', result)", "def find_best_candidate(s_array):\n best_string = ''\n max_val = 0\n for s in s_array:\n score = compare(s)\n if score > max_val:\n max_val = score\n best_string = s\n return best_string", "def get_result(mishkaScore: int, chrisScore: int) -> bool:\n if mishkaScore > chrisScore:\n return \"M\"\n if mishkaScore < chrisScore:\n return \"C\"\n return \"D\"", "def comparator(a, b):\n a = re.split(\"[_=]\", a)[-2]\n b = re.split(\"[_=]\", b)[-2]\n if a > b:\n return 1\n elif a < b:\n return -1\n else:\n return 0", "def compare_strings(s1, s2):\n print(s1)\n for idx, c in enumerate(s2):\n if s1[idx] == c:\n cprint(c, fg='c', end='')\n else:\n cprint(c, bg='r', style='b', end='')\n print()", "def cmpValue(subInfo1, subInfo2):\n val1 = subInfo1[VALUE]\n val2 = subInfo2[VALUE]\n return val1 > val2", "def cmpValue(subInfo1, subInfo2):\n val1 = subInfo1[VALUE]\n val2 = subInfo2[VALUE]\n return val1 > val2", "def fuzzy_match_strings(ref, val):\n if not ref or not val:\n return 0\n ref_q = to_q(ref)\n val_q = to_q(val)\n if ref_q or val_q:\n return 100 if ref_q == val_q else 0\n simplified_val = unidecode(val).lower()\n simplified_ref = unidecode(ref).lower()\n\n # Return symmetric score\n r1 = fuzz.token_sort_ratio(simplified_val, simplified_ref)\n r2 = fuzz.token_sort_ratio(simplified_ref, simplified_val)\n r2 = r1\n return int(0.5*(r1+r2))", "def _compare(a, b):\n a = _split(a)\n b = _split(b)\n if a[0] != b[0]:\n if a[0] > b[0]:\n return 1\n else:\n return -1\n max_len = max(len(a[1]), len(b[1]))\n for i in range(max_len):\n if i > len(b[1]):\n return 1\n elif i > len(a[1]):\n return -1\n schar = a[1][i]\n ochar = b[1][i]\n if schar > ochar:\n return 1\n elif schar < ochar:\n return -1", "def Calcification(self):\n s = self.calcification\n assert s in range(1,7), \"Calcification score out of bounds.\"\n if s == 1: return 'Popcorn'\n elif s == 2: return 'Laminated'\n elif s == 3: return 'Solid'\n elif s == 4: return 'Non-central'\n elif s == 5: return 'Central'\n elif s == 6: return 'Absent'", "def get_result(self):\n\n x = self.rps_data[0][1].upper()\n y = self.rps_data[1][1].upper()\n if x[0] == '|':\n x = x[2:3]\n if y[0] == '|':\n y = y[2:3]\n if x == y:\n self.write_scores(\"Draw\")\n return \"Draw\"\n elif (x == 'R' and y == 'S') or (x == 'S' and y == 'P') or (x == 'P' and y == 'R'):\n self.write_scores(\"First\")\n return \"First\"\n else:\n self.write_scores(\"Second\")\n return \"Second\"", "def print_comparison(node, comparisons, search_string):\n\n # dostane sa na dummy kluc\n if node is None:\n print(\"\\nPorovnanie\", str(comparisons + 1) + \":\",\n colored(\"\\n -- DUMMY kluc: \" + search_string + \" --\", \"green\", attrs=['bold']),\n \"\\n Hladany retazec:\", colored(search_string, \"green\", attrs=['bold']),\n \"\\n Zhoda:\", colored(True, \"green\", attrs=['bold']),\n \"\\n\\n*******************\")\n\n # medzivysledok\n else:\n color = \"green\" if node.value == search_string else \"red\"\n print(\"\\nPorovnanie\", str(comparisons) + \":\",\n \"\\n Aktualny kluc:\", colored(node.value, color, attrs=['bold']),\n \"\\n Hladany retazec:\", colored(search_string, color, attrs=['bold']),\n \"\\n Zhoda:\", colored(node.value == search_string, color, attrs=['bold']),\n \"\\n\\n*******************\")", "def _rval(self, s):\n if common.is_num(s):\n return float(s)\n elif s.startswith('#'):\n return self.parent.constants[s[1:].lower()]\n else: # time-based ycomp code\n return s.lower()", "def compare_cards(card1, card2, deck, assigned_card_value):\n if card1 not in deck: \n raise ValueError(\"The card doesn't exist\")\n if card2 not in deck: \n raise ValueError(\"The card doesn't exist\")\n card1 = assigned_card_value.get(card1)\n card2 = assigned_card_value.get(card2)\n if card1 < card2:\n return 1\n elif card1 > card2:\n return 0\n elif card1 == card2:\n return -1", "def compare(self, other_sequence):\n response = []\n if self.name != other_sequence.name:\n response.append('DIFF: Sequence names: %s' % self.name)\n response.append('and %s' % other_sequence.name)\n if self.increment_by != other_sequence.increment_by:\n response.append('DIFF: Increment interval')\n response.append('is %d,' % self.increment_by)\n response.append('for %s' % other_sequence.name)\n response.append('it is %d' % other_sequence.increment_by)\n if self.min_value != other_sequence.min_value:\n response.append('DIFF: Min value is %d' % self.min_value)\n response.append(' for %s' % other_sequence.name)\n response.append('it is %d' % other_sequence.min_value)\n if self.max_value != other_sequence.max_value:\n response.append('DIFF: Max value is %d' % self.max_value)\n response.append(', for %s ' % other_sequence.name)\n response.append('it is %d' % other_sequence.max_value)\n # The only attribute we don't check is currval, becuase it will be \n # different in 999 cases out of a 1000\n return response", "def question_6():\n data_strings = [\"Result = 95%\", \"Final Score = 8%\", \"Relative Value = 178%\",\n \"Something else that's very important = 9.2%\", \"x = 42%\"]\n for string in data_strings:\n start_index = string.find(\"= \")\n value = float(string[start_index + 2:-1])\n print(value)", "def parse_query_value(combined_value):\n split = len(combined_value) - len(combined_value.lstrip('<>='))\n operator = combined_value[:split]\n if operator == '':\n operator = '='\n try:\n operator_func = search_operators[operator]\n except KeyError:\n raise ValueError(\n 'Numeric query should start with operator, choose from %s'\n % ', '.join(search_operators.keys()))\n value = combined_value[split:].strip()\n return operator_func, value" ]
[ "0.578843", "0.5683622", "0.5322386", "0.5317316", "0.5244176", "0.5244176", "0.52301955", "0.51247126", "0.5097643", "0.50959706", "0.5091257", "0.5083387", "0.50707597", "0.5040682", "0.5037145", "0.5023796", "0.5014674", "0.50132555", "0.50123906", "0.50123906", "0.49933115", "0.49746397", "0.49722785", "0.49670413", "0.49648923", "0.496185", "0.49558342", "0.49541515", "0.4943424", "0.49375087" ]
0.6191718
0
Compute the transformation matrix from Galactic spherical to Magellanic Stream coordinates.
def galactic_to_MS(): return MS_MATRIX
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MS_to_galactic():\n return matrix_transpose(MS_MATRIX)", "def get_transformation_matrix(theta=45):\n\n theta = theta/360 * 2 * np.pi # in radians\n hx = np.cos(theta)\n sy = np.sin(theta)\n\n S = np.array([[1, hx, 0],\n [0, sy, 0],\n [0, 0, 1]])\n #S_inv = np.linalg.inv(S)\n #old_coords = np.array([[2, 2, 1], [6, 6, 1]]).T\n #new_coords = np.matmul(S, old_coords)\n #recovered_coords = np.matmul(S_inv, new_coords)\n #print('new coords: ', new_coords)\n #print('recovered coords: ', recovered_coords)\n return S", "def get_transformation_matrix(self):\n\n s = self.sin()\n c = self.cos()\n return np.array(\n [\n c ** 2,\n c * s,\n -(c ** 2),\n -c * s,\n c * s,\n s ** 2,\n -c * s,\n -(s ** 2),\n -(c ** 2),\n -c * s,\n c ** 2,\n c * s,\n -c * s,\n -(s ** 2),\n c * s,\n s ** 2,\n ],\n dtype=np.float64,\n ).reshape(4, 4)", "def get_transformation_matrix(theta=45):\n\n theta = theta/360 * 2 * np.pi # in radians\n hx = np.cos(theta)\n sy = np.sin(theta)\n \n S = np.array([[1, hx, 0],\n [0, sy, 0],\n [0, 0, 1]])\n\n return S", "def get_shower_trans_matrix (azimuth,altitude):\n\n cos_z = sin(altitude)\n sin_z = cos(altitude)\n cos_az = cos(azimuth)\n sin_az = sin(azimuth)\n\n trans = np.zeros([3,3])\n trans[0][0] = cos_z*cos_az\n trans[1][0] = sin_az\n trans[2][0] = sin_z*cos_az\n\n trans[0][1] = -cos_z*sin_az\n trans[1][1] = cos_az\n trans[2][1] = -sin_z*sin_az\n\n trans[0][2] = -sin_z\n trans[1][2] = 0.\n trans[2][2] = cos_z\n\n return trans", "def SM2m(sm):\n return sm * 1609.344", "def _calc_average_rotation_matrix(self):\n # unit vectors to be transformed by astropy\n x_c = np.array([1.0, 0, 0])\n y_c = np.array([0, 1.0, 0])\n z_c = np.array([0, 0, 1.0])\n\n if isinstance(self.telescope_location, EarthLocation):\n axes_icrs = SkyCoord(\n x=x_c,\n y=y_c,\n z=z_c,\n obstime=self.time,\n location=self.telescope_location,\n frame=\"icrs\",\n representation_type=\"cartesian\",\n )\n axes_altaz = axes_icrs.transform_to(\"altaz\")\n else:\n axes_icrs = LunarSkyCoord(\n x=x_c,\n y=y_c,\n z=z_c,\n obstime=self.time,\n location=self.telescope_location,\n frame=\"icrs\",\n representation_type=\"cartesian\",\n )\n axes_altaz = axes_icrs.transform_to(\"lunartopo\")\n\n axes_altaz.representation_type = \"cartesian\"\n\n # This transformation matrix is generally not orthogonal to better than 10^-7,\n # so let's fix that.\n\n R_screwy = axes_altaz.cartesian.xyz\n R_really_orthogonal, _ = ortho_procr(R_screwy, np.eye(3))\n\n # Note the transpose, to be consistent with calculation in sct\n R_really_orthogonal = np.array(R_really_orthogonal).T\n\n return R_really_orthogonal", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n\n origin = np.array([location.x, location.y, location.z])\n return matrix, origin", "def generate_GS_matrix(self, matrix):\n GS_matrix = Matrix(QQ, matrix.transpose().gram_schmidt()[0]).transpose()\n return GS_matrix", "def get_matrix(self, transform):\r\n\r\n rotation = transform.rotation\r\n location = transform.location\r\n c_y = np.cos(np.radians(rotation.yaw))\r\n s_y = np.sin(np.radians(rotation.yaw))\r\n c_r = np.cos(np.radians(rotation.roll))\r\n s_r = np.sin(np.radians(rotation.roll))\r\n c_p = np.cos(np.radians(rotation.pitch))\r\n s_p = np.sin(np.radians(rotation.pitch))\r\n matrix = np.matrix(np.identity(4))\r\n matrix[0, 3] = location.x\r\n matrix[1, 3] = location.y\r\n matrix[2, 3] = location.z\r\n matrix[0, 0] = c_p * c_y\r\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\r\n matrix[1, 0] = s_y * c_p\r\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\r\n matrix[2, 0] = s_p\r\n matrix[2, 1] = -c_p * s_r\r\n matrix[2, 2] = c_p * c_r\r\n return matrix", "def as_homogenous_transformation(self):\n r3 = self.orientation.normalize().unit_quaternion_as_r3_rotation_matrix()\n return matrix.sqr((r3[0],r3[1],r3[2],self.translation[0],\n r3[3],r3[4],r3[5],self.translation[1],\n r3[6],r3[7],r3[8],self.translation[2],\n 0,0,0,1))", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n return matrix", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n return matrix", "def _get_transformation_matrix(rotation, shear, height_zoom, width_zoom, height_shift, width_shift):\n\n # CONVERT DEGREES TO RADIANS\n rotation = math.pi * rotation / 180.\n shear = math.pi * shear / 180.\n\n # ROTATION MATRIX\n c1 = tf.math.cos(rotation)\n s1 = tf.math.sin(rotation)\n one = tf.constant([1], dtype='float32')\n zero = tf.constant([0], dtype='float32')\n rotation_matrix = tf.reshape(tf.concat([\n c1, s1, zero, -s1, c1, zero, zero, zero, one\n ], axis=0), [3, 3])\n\n # SHEAR MATRIX\n c2 = tf.math.cos(shear)\n s2 = tf.math.sin(shear)\n shear_matrix = tf.reshape(tf.concat([\n one, s2, zero, zero, c2, zero, zero, zero, one\n ], axis=0), [3, 3])\n\n # ZOOM MATRIX\n zoom_matrix = tf.reshape(tf.concat([\n one/height_zoom, zero, zero, zero, one/width_zoom, zero, zero, zero, one\n ], axis=0), [3, 3])\n\n # SHIFT MATRIX\n shift_matrix = tf.reshape(tf.concat([\n one, zero, height_shift, zero, one, width_shift, zero, zero, one\n ], axis=0), [3, 3])\n\n return keras.backend.dot(\n keras.backend.dot(rotation_matrix, shear_matrix),\n keras.backend.dot(zoom_matrix, shift_matrix))", "def transformation_matrix(self):\n t = np.array([[0.0], [0.0], [0.0]])\n Rt = np.hstack([self.rotation_matrix, t])\n return np.vstack([Rt, np.array([0.0, 0.0, 0.0, 1.0])])", "def getMatrix(self, frame):\n radian=np.radians(self.getValue(frame))\n c=np.cos(radian)\n s=np.sin(radian)\n self.matrix[0, 0]=c\n self.matrix[0, 1]=s\n self.matrix[1, 0]=-s\n self.matrix[1, 1]=c\n return self.matrix", "def get_M(self, theta, phi, gamma, dx, dy, dz):\n w = self.width\n h = self.height\n f = self.focal\n # Projection 2D -> 3D matrix\n A1 = np.array([[1, 0, -w / 2],\n [0, 1, -h / 2],\n [0, 0, 1],\n [0, 0, 1]])\n # Rotation matrices around the X, Y, and Z axis\n RX = np.array([[1, 0, 0, 0],\n [0, np.cos(theta), -np.sin(theta), 0],\n [0, np.sin(theta), np.cos(theta), 0],\n [0, 0, 0, 1]])\n RY = np.array([[np.cos(phi), 0, -np.sin(phi), 0],\n [0, 1, 0, 0],\n [np.sin(phi), 0, np.cos(phi), 0],\n [0, 0, 0, 1]])\n RZ = np.array([[np.cos(gamma), -np.sin(gamma), 0, 0],\n [np.sin(gamma), np.cos(gamma), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n # Composed rotation matrix with (RX, RY, RZ)\n R = np.dot(np.dot(RX, RY), RZ)\n # Translation matrix\n T = np.array([[1, 0, 0, dx],\n [0, 1, 0, dy],\n [0, 0, 1, dz],\n [0, 0, 0, 1]])\n # Projection 3D -> 2D matrix\n A2 = np.array([[f, 0, w / 2, 0],\n [0, f, h / 2, 0],\n [0, 0, 1, 0]])\n # Final transformation matrix\n return np.dot(A2, np.dot(T, np.dot(R, A1)))", "def getMatrix(self, frame):\n radian=np.radians(self.getValue(frame))\n c=np.cos(radian)\n s=np.sin(radian)\n self.matrix[2, 2]=c\n self.matrix[2, 0]=s\n self.matrix[0, 2]=-s\n self.matrix[0, 0]=c\n return self.matrix", "def matrix(self):\n\t\treturn Matrix((\n\t\t\t( math.cos(self.val), -math.sin(self.val) ),\n\t\t\t( math.sin(self.val), math.cos(self.val) )\n\t\t))", "def getMatrix(self, frame):\n radian=np.radians(self.getValue(frame))\n c=np.cos(radian)\n s=np.sin(radian)\n self.matrix[1, 1]=c\n self.matrix[1, 2]=s\n self.matrix[2, 1]=-s\n self.matrix[2, 2]=c\n return self.matrix", "def momentToMatrix(m):\n angle = vectorops.norm(m)\n axis = vectorops.div(m,angle)\n return so3.rotation(axis,angle)", "def get_matrix_list(transform):\n c_y = np.cos(np.radians(transform[5]))\n s_y = np.sin(np.radians(transform[5]))\n c_r = np.cos(np.radians(transform[3]))\n s_r = np.sin(np.radians(transform[3]))\n c_p = np.cos(np.radians(transform[4]))\n s_p = np.sin(np.radians(transform[4]))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = transform[0]\n matrix[1, 3] = transform[1]\n matrix[2, 3] = transform[2]\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n origin = np.array(transform[:3])\n return matrix, origin", "def _init_transformation_matrix(self):\n # Set up basic transformation matrix\n c_transform = np.zeros((self.n_beads, self.n_beads))\n\n # Get auxiliary array with bead indices\n n = np.arange(1, self.n_beads + 1)\n\n # for k = 0\n c_transform[0, :] = 1.0\n\n for k in range(1, self.n_beads // 2 + 1):\n c_transform[k, :] = np.sqrt(2) * np.cos(2 * np.pi * k * n / self.n_beads)\n\n for k in range(self.n_beads // 2 + 1, self.n_beads):\n c_transform[k, :] = np.sqrt(2) * np.sin(2 * np.pi * k * n / self.n_beads)\n\n if self.n_beads % 2 == 0:\n c_transform[self.n_beads // 2, :] = (-1) ** n\n\n # Since matrix is initialized as C(k,n) does not need to be transposed\n c_transform /= np.sqrt(self.n_beads)\n c_transform = torch.from_numpy(c_transform)\n\n return c_transform", "def transform(o):\n\t\t\tp = o.pos() - self.pos()\n\t\t\tx = cosa * p.x + sina * p.y\n\t\t\ty = -sina * p.x + cosa * p.y\n\t\t\treturn tuple(px_to_mm(x,y))", "def get_transform_matrix(theta, phi = None, invert_rot = False, invert_focal = False):\n\n if phi is None:\n phi = const.PHI_IDX * 10.0\n\n #extrinsic x intrinsic\n camera_matrix = np.zeros((4, 4), dtype=np.float32)\n\n intrinsic_matrix = np.eye(4, dtype=np.float32)\n extrinsic_matrix = np.eye(4, dtype=np.float32)\n\n sin_phi = np.sin(float(phi) / 180.0 * np.pi)\n cos_phi = np.cos(float(phi) / 180.0 * np.pi)\n sin_theta = np.sin(float(-theta) / 180.0 * np.pi)\n cos_theta = np.cos(float(-theta) / 180.0 * np.pi)\n\n #theta rotation\n rotation_azimuth = np.zeros((3, 3), dtype=np.float32)\n rotation_azimuth[0, 0] = cos_theta\n rotation_azimuth[2, 2] = cos_theta\n rotation_azimuth[0, 2] = -sin_theta\n rotation_azimuth[2, 0] = sin_theta\n rotation_azimuth[1, 1] = 1.0\n\n #phi rotation\n rotation_elevation = np.zeros((3, 3), dtype=np.float32)\n rotation_elevation[0, 0] = cos_phi\n rotation_elevation[0, 1] = sin_phi\n rotation_elevation[1, 0] = -sin_phi\n rotation_elevation[1, 1] = cos_phi\n rotation_elevation[2, 2] = 1.0\n\n #rotate phi, then theta\n rotation_matrix = np.matmul(rotation_azimuth, rotation_elevation)\n if invert_rot:\n rotation_matrix = np.linalg.inv(rotation_matrix)\n\n displacement = np.zeros((3, 1), dtype=np.float32)\n displacement[0, 0] = const.DIST_TO_CAM\n displacement = np.matmul(rotation_matrix, displacement)\n\n #assembling 4x4 from R + T\n extrinsic_matrix[0:3, 0:3] = rotation_matrix\n extrinsic_matrix[0:3, 3:4] = -displacement\n\n if invert_focal:\n intrinsic_matrix[2, 2] = float(const.focal_length)\n intrinsic_matrix[1, 1] = float(const.focal_length)\n else:\n intrinsic_matrix[2, 2] = 1.0 / float(const.focal_length)\n intrinsic_matrix[1, 1] = 1.0 / float(const.focal_length)\n\n camera_matrix = np.matmul(extrinsic_matrix, intrinsic_matrix)\n return camera_matrix", "def trans_hellinger(m):\n m = asmatrix(m)\n row_sums = sum(m, axis=1)\n result = sqrt(m / row_sums)\n return result", "def matrix(self):\n m = Matrix.identity(4, 4)\n\n m[:3, :3] = self.rotation.matrix.data\n m[:3, 3:4] = self.translation.matrix.data\n\n return m", "def convert_to_slit(m,x,y,nx,ny,gamma=1.0,expand=1.0):\n # sanity\n if len(m) == 0: return []\n if type(m) == ma.core.MaskedArray:\n if m.count() == 0: return []\n # apply gamma factor\n logging.debug(\"Gamma = %f\" % gamma)\n mw = ma.power(m,gamma)\n # first find a rough center\n smx = ma.sum(mw*x)\n smy = ma.sum(mw*y)\n sm = ma.sum(mw)\n xm = smx/sm\n ym = smy/sm\n logging.debug('MOI::center: %f %f' % (xm,ym))\n (xpeak,ypeak) = np.unravel_index(mw.argmax(),mw.shape)\n logging.debug('PEAK: %f %f' % (xpeak,ypeak))\n if True:\n # center on peak\n # @todo but if (xm,ym) and (xpeak,ypeak) differ too much, e.g.\n # outside of the MOI body, something else is wrong\n xm = xpeak\n ym = ypeak\n # take 2nd moments w.r.t. this center\n x = x-xm\n y = y-ym\n mxx=m*x*x\n mxy=m*x*y\n myy=m*y*y\n #\n smxx=ma.sum(mxx)/sm\n smxy=ma.sum(mxy)/sm\n smyy=ma.sum(myy)/sm\n # MOI2\n moi = np.array([smxx,smxy,smxy,smyy]).reshape(2,2)\n w,v = la.eig(moi)\n a = math.sqrt(w[0])\n b = math.sqrt(w[1])\n phi = -math.atan2(v[0][1],v[0][0])\n if a < b: \n phi = phi + 0.5*np.pi\n logging.debug('MOI::a,b,phi(deg): %g %g %g' % (a,b,phi*180.0/np.pi))\n # ds9.reg format (image coords)\n sinp = np.sin(phi)\n cosp = np.cos(phi)\n # compute the line take both a and b into account,\n # since we don't even know or care which is the bigger one\n r = np.sqrt(a*a+b*b)\n x0 = xm - expand*r*cosp \n y0 = ym - expand*r*sinp \n x1 = xm + expand*r*cosp \n y1 = ym + expand*r*sinp \n # add 1 for ds9, which used 1 based pixels\n logging.debug(\"ds9 short line(%g,%g,%g,%g)\" % (x0+1,y0+1,x1+1,y1+1))\n if nx > 0:\n s = expand_line(x0,y0,x1,y1,nx,ny)\n logging.debug(\"ds9 full line(%g,%g,%g,%g)\" % (s[0],s[1],s[2],s[3]))\n return [float(s[0]),float(s[1]),float(s[2]),float(s[3])]\n else:\n return [float(x0),float(y0),float(x1),float(y1)]", "def sfm(points):\n # Construct the required W/Rh/Sh matrices.\n\t\n # Get ih/jh from Rh and use them to find Q.\n\n # Use Q, Rh, and Sh to get R and S.\n\n # Extract the F 2x3 rotation matrices from R and form an (F,2,3) array of\n # rotation matrices.\n\n # Build an orthonormal matrix that rotates the first R matrix into an\n # identity matrix.\n\n # Apply the computed rotation matrix to the rotation matrices and the\n # points in S.\n\n # Return the R matrices and an ** Nx3 ** matrix containing the\n # reconstructed 3D points (note that S is 3xN).\n return None", "def getMatrixFromSkeletonToGeometry(self):\n return _osgAnimation.RigGeometry_getMatrixFromSkeletonToGeometry(self)" ]
[ "0.6793291", "0.6397168", "0.6347205", "0.6225888", "0.6009966", "0.5733559", "0.5673262", "0.56630766", "0.56511235", "0.5618374", "0.5604634", "0.5580951", "0.5580951", "0.5563582", "0.55473304", "0.5542603", "0.55239236", "0.547165", "0.546832", "0.5466667", "0.54391974", "0.5428619", "0.5406545", "0.535259", "0.5340377", "0.5335498", "0.53167963", "0.5306502", "0.5302164", "0.5292859" ]
0.6850105
0
Compute the transformation matrix from Magellanic Stream coordinates to spherical Galactic.
def MS_to_galactic(): return matrix_transpose(MS_MATRIX)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def galactic_to_MS():\n return MS_MATRIX", "def get_transformation_matrix(theta=45):\n\n theta = theta/360 * 2 * np.pi # in radians\n hx = np.cos(theta)\n sy = np.sin(theta)\n\n S = np.array([[1, hx, 0],\n [0, sy, 0],\n [0, 0, 1]])\n #S_inv = np.linalg.inv(S)\n #old_coords = np.array([[2, 2, 1], [6, 6, 1]]).T\n #new_coords = np.matmul(S, old_coords)\n #recovered_coords = np.matmul(S_inv, new_coords)\n #print('new coords: ', new_coords)\n #print('recovered coords: ', recovered_coords)\n return S", "def get_transformation_matrix(self):\n\n s = self.sin()\n c = self.cos()\n return np.array(\n [\n c ** 2,\n c * s,\n -(c ** 2),\n -c * s,\n c * s,\n s ** 2,\n -c * s,\n -(s ** 2),\n -(c ** 2),\n -c * s,\n c ** 2,\n c * s,\n -c * s,\n -(s ** 2),\n c * s,\n s ** 2,\n ],\n dtype=np.float64,\n ).reshape(4, 4)", "def get_transformation_matrix(theta=45):\n\n theta = theta/360 * 2 * np.pi # in radians\n hx = np.cos(theta)\n sy = np.sin(theta)\n \n S = np.array([[1, hx, 0],\n [0, sy, 0],\n [0, 0, 1]])\n\n return S", "def get_shower_trans_matrix (azimuth,altitude):\n\n cos_z = sin(altitude)\n sin_z = cos(altitude)\n cos_az = cos(azimuth)\n sin_az = sin(azimuth)\n\n trans = np.zeros([3,3])\n trans[0][0] = cos_z*cos_az\n trans[1][0] = sin_az\n trans[2][0] = sin_z*cos_az\n\n trans[0][1] = -cos_z*sin_az\n trans[1][1] = cos_az\n trans[2][1] = -sin_z*sin_az\n\n trans[0][2] = -sin_z\n trans[1][2] = 0.\n trans[2][2] = cos_z\n\n return trans", "def generate_GS_matrix(self, matrix):\n GS_matrix = Matrix(QQ, matrix.transpose().gram_schmidt()[0]).transpose()\n return GS_matrix", "def _calc_average_rotation_matrix(self):\n # unit vectors to be transformed by astropy\n x_c = np.array([1.0, 0, 0])\n y_c = np.array([0, 1.0, 0])\n z_c = np.array([0, 0, 1.0])\n\n if isinstance(self.telescope_location, EarthLocation):\n axes_icrs = SkyCoord(\n x=x_c,\n y=y_c,\n z=z_c,\n obstime=self.time,\n location=self.telescope_location,\n frame=\"icrs\",\n representation_type=\"cartesian\",\n )\n axes_altaz = axes_icrs.transform_to(\"altaz\")\n else:\n axes_icrs = LunarSkyCoord(\n x=x_c,\n y=y_c,\n z=z_c,\n obstime=self.time,\n location=self.telescope_location,\n frame=\"icrs\",\n representation_type=\"cartesian\",\n )\n axes_altaz = axes_icrs.transform_to(\"lunartopo\")\n\n axes_altaz.representation_type = \"cartesian\"\n\n # This transformation matrix is generally not orthogonal to better than 10^-7,\n # so let's fix that.\n\n R_screwy = axes_altaz.cartesian.xyz\n R_really_orthogonal, _ = ortho_procr(R_screwy, np.eye(3))\n\n # Note the transpose, to be consistent with calculation in sct\n R_really_orthogonal = np.array(R_really_orthogonal).T\n\n return R_really_orthogonal", "def getMatrix(self, frame):\n radian=np.radians(self.getValue(frame))\n c=np.cos(radian)\n s=np.sin(radian)\n self.matrix[0, 0]=c\n self.matrix[0, 1]=s\n self.matrix[1, 0]=-s\n self.matrix[1, 1]=c\n return self.matrix", "def get_M(self, theta, phi, gamma, dx, dy, dz):\n w = self.width\n h = self.height\n f = self.focal\n # Projection 2D -> 3D matrix\n A1 = np.array([[1, 0, -w / 2],\n [0, 1, -h / 2],\n [0, 0, 1],\n [0, 0, 1]])\n # Rotation matrices around the X, Y, and Z axis\n RX = np.array([[1, 0, 0, 0],\n [0, np.cos(theta), -np.sin(theta), 0],\n [0, np.sin(theta), np.cos(theta), 0],\n [0, 0, 0, 1]])\n RY = np.array([[np.cos(phi), 0, -np.sin(phi), 0],\n [0, 1, 0, 0],\n [np.sin(phi), 0, np.cos(phi), 0],\n [0, 0, 0, 1]])\n RZ = np.array([[np.cos(gamma), -np.sin(gamma), 0, 0],\n [np.sin(gamma), np.cos(gamma), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n # Composed rotation matrix with (RX, RY, RZ)\n R = np.dot(np.dot(RX, RY), RZ)\n # Translation matrix\n T = np.array([[1, 0, 0, dx],\n [0, 1, 0, dy],\n [0, 0, 1, dz],\n [0, 0, 0, 1]])\n # Projection 3D -> 2D matrix\n A2 = np.array([[f, 0, w / 2, 0],\n [0, f, h / 2, 0],\n [0, 0, 1, 0]])\n # Final transformation matrix\n return np.dot(A2, np.dot(T, np.dot(R, A1)))", "def get_matrix(self, transform):\r\n\r\n rotation = transform.rotation\r\n location = transform.location\r\n c_y = np.cos(np.radians(rotation.yaw))\r\n s_y = np.sin(np.radians(rotation.yaw))\r\n c_r = np.cos(np.radians(rotation.roll))\r\n s_r = np.sin(np.radians(rotation.roll))\r\n c_p = np.cos(np.radians(rotation.pitch))\r\n s_p = np.sin(np.radians(rotation.pitch))\r\n matrix = np.matrix(np.identity(4))\r\n matrix[0, 3] = location.x\r\n matrix[1, 3] = location.y\r\n matrix[2, 3] = location.z\r\n matrix[0, 0] = c_p * c_y\r\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\r\n matrix[1, 0] = s_y * c_p\r\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\r\n matrix[2, 0] = s_p\r\n matrix[2, 1] = -c_p * s_r\r\n matrix[2, 2] = c_p * c_r\r\n return matrix", "def as_homogenous_transformation(self):\n r3 = self.orientation.normalize().unit_quaternion_as_r3_rotation_matrix()\n return matrix.sqr((r3[0],r3[1],r3[2],self.translation[0],\n r3[3],r3[4],r3[5],self.translation[1],\n r3[6],r3[7],r3[8],self.translation[2],\n 0,0,0,1))", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n\n origin = np.array([location.x, location.y, location.z])\n return matrix, origin", "def SM2m(sm):\n return sm * 1609.344", "def matrix(self):\n\t\treturn Matrix((\n\t\t\t( math.cos(self.val), -math.sin(self.val) ),\n\t\t\t( math.sin(self.val), math.cos(self.val) )\n\t\t))", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n return matrix", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n return matrix", "def scalar_g2r(al, be, ga, lon, lat):\n\n rad = mt.pi / 180\n al = al * rad\n be = be * rad\n ga = ga * rad\n\n rotate_matrix = np.zeros(shape=(3, 3))\n\n rotate_matrix[0, 0] = np.cos(ga) * np.cos(al) - np.sin(ga) * np.cos(be) * np.sin(al)\n rotate_matrix[0, 1] = np.cos(ga) * np.sin(al) + np.sin(ga) * np.cos(be) * np.cos(al)\n rotate_matrix[0, 2] = np.sin(ga) * np.sin(be)\n rotate_matrix[1, 0] = -np.sin(ga) * np.cos(al) - np.cos(ga) * np.cos(be) * np.sin(\n al\n )\n rotate_matrix[1, 1] = -np.sin(ga) * np.sin(al) + np.cos(ga) * np.cos(be) * np.cos(\n al\n )\n rotate_matrix[1, 2] = np.cos(ga) * np.sin(be)\n rotate_matrix[2, 0] = np.sin(be) * np.sin(al)\n rotate_matrix[2, 1] = -np.sin(be) * np.cos(al)\n rotate_matrix[2, 2] = np.cos(be)\n \n #rotate_matrix = np.linalg.pinv(rotate_matrix)\n \n lat = lat * rad\n lon = lon * rad\n\n # geographical Cartesian coordinates:\n xr = np.cos(lat) * np.cos(lon)\n yr = np.cos(lat) * np.sin(lon)\n zr = np.sin(lat)\n\n # rotated Cartesian coordinates:\n xg = rotate_matrix[0, 0] * xr + rotate_matrix[0, 1] * yr + rotate_matrix[0, 2] * zr\n yg = rotate_matrix[1, 0] * xr + rotate_matrix[1, 1] * yr + rotate_matrix[1, 2] * zr\n zg = rotate_matrix[2, 0] * xr + rotate_matrix[2, 1] * yr + rotate_matrix[2, 2] * zr\n\n # rotated coordinates:\n rlat = np.arcsin(zg)\n rlon = np.arctan2(yg, xg)\n\n a = np.where((np.abs(xg) + np.abs(yg)) == 0)\n if a:\n lon[a] = 0\n\n rlat = rlat / rad\n rlon = rlon / rad\n\n return (rlon, rlat)", "def getMatrix(self, frame):\n radian=np.radians(self.getValue(frame))\n c=np.cos(radian)\n s=np.sin(radian)\n self.matrix[2, 2]=c\n self.matrix[2, 0]=s\n self.matrix[0, 2]=-s\n self.matrix[0, 0]=c\n return self.matrix", "def getMatrix(self, frame):\n radian=np.radians(self.getValue(frame))\n c=np.cos(radian)\n s=np.sin(radian)\n self.matrix[1, 1]=c\n self.matrix[1, 2]=s\n self.matrix[2, 1]=-s\n self.matrix[2, 2]=c\n return self.matrix", "def sfm(points):\n # Construct the required W/Rh/Sh matrices.\n\t\n # Get ih/jh from Rh and use them to find Q.\n\n # Use Q, Rh, and Sh to get R and S.\n\n # Extract the F 2x3 rotation matrices from R and form an (F,2,3) array of\n # rotation matrices.\n\n # Build an orthonormal matrix that rotates the first R matrix into an\n # identity matrix.\n\n # Apply the computed rotation matrix to the rotation matrices and the\n # points in S.\n\n # Return the R matrices and an ** Nx3 ** matrix containing the\n # reconstructed 3D points (note that S is 3xN).\n return None", "def CartesianToGalactic(Cartesian,SolarPosition): \n\t \n # x,y,z->l,b,s\n tmp1 = SolarPosition[0]-Cartesian[:,0]\n tmp2 = Cartesian[:,1]\n tmp3 = Cartesian[:,2]-SolarPosition[1]\n s = np.sqrt(tmp1*tmp1+tmp2*tmp2+tmp3*tmp3)\n l = np.arctan2(tmp2,tmp1)\n b = np.arcsin(tmp3/s)\n l[l<0.] += 2.*np.pi; \n\n if(len(Cartesian[0,:])==3):\n Galactic = np.column_stack((l,b,s))\n else:\n \t # vx,vy,vz -> vlos,mu_lcos(b),mu_b\n vx = -Cartesian[:,3]-SolarPosition[2]\n vy = Cartesian[:,4]-SolarPosition[3]\n vz = Cartesian[:,5]-SolarPosition[4]\n cl = np.cos(l)\n sl = np.sin(l)\n cb = np.cos(b)\n sb = np.sin(b)\n vlos = vx*cl*cb+vy*sl*cb+vz*sb;\n mul = (-vx*sl+vy*cl)/(pm2vel*s)\n mub = (-vx*cl*sb-vy*sl*sb+vz*cb)/(pm2vel*s)\n Galactic = np.column_stack((l,b,s,vlos,mul,mub))\n \n return Galactic", "def _get_transformation_matrix(rotation, shear, height_zoom, width_zoom, height_shift, width_shift):\n\n # CONVERT DEGREES TO RADIANS\n rotation = math.pi * rotation / 180.\n shear = math.pi * shear / 180.\n\n # ROTATION MATRIX\n c1 = tf.math.cos(rotation)\n s1 = tf.math.sin(rotation)\n one = tf.constant([1], dtype='float32')\n zero = tf.constant([0], dtype='float32')\n rotation_matrix = tf.reshape(tf.concat([\n c1, s1, zero, -s1, c1, zero, zero, zero, one\n ], axis=0), [3, 3])\n\n # SHEAR MATRIX\n c2 = tf.math.cos(shear)\n s2 = tf.math.sin(shear)\n shear_matrix = tf.reshape(tf.concat([\n one, s2, zero, zero, c2, zero, zero, zero, one\n ], axis=0), [3, 3])\n\n # ZOOM MATRIX\n zoom_matrix = tf.reshape(tf.concat([\n one/height_zoom, zero, zero, zero, one/width_zoom, zero, zero, zero, one\n ], axis=0), [3, 3])\n\n # SHIFT MATRIX\n shift_matrix = tf.reshape(tf.concat([\n one, zero, height_shift, zero, one, width_shift, zero, zero, one\n ], axis=0), [3, 3])\n\n return keras.backend.dot(\n keras.backend.dot(rotation_matrix, shear_matrix),\n keras.backend.dot(zoom_matrix, shift_matrix))", "def compute_matrix(phi): # pylint: disable=arguments-differ\n c = qml.math.cos(phi / 2)\n s = qml.math.sin(phi / 2)\n\n if qml.math.get_interface(phi) == \"tensorflow\":\n c = qml.math.cast_like(c, 1j)\n s = qml.math.cast_like(s, 1j)\n\n js = 1j * s\n off_diag = qml.math.cast_like(\n qml.math.array(\n [\n [0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0],\n ],\n like=js,\n ),\n 1j,\n )\n if qml.math.ndim(phi) == 0:\n return qml.math.diag([1, c, c, 1]) + js * off_diag\n\n ones = qml.math.ones_like(c)\n diags = stack_last([ones, c, c, ones])[:, :, np.newaxis]\n return diags * np.eye(4) + qml.math.tensordot(js, off_diag, axes=0)", "def spherical2cylindrical(sph):\n cyl = np.zeros(sph.shape)\n cyl[:, 0] = sph[:, 0] * np.sin(sph[:, 2])\n cyl[:, 1] = sph[:, 1]\n cyl[:, 2] = sph[:, 0] * np.cos(sph[:, 2])\n return cyl", "def rotated_coord_transform(lon, lat, np_lon, np_lat,\n direction='rot2geo'):\n\n # Convert degrees to radians\n lon = (lon * math.pi) / 180.\n lat = (lat * math.pi) / 180.\n\n# SP_lon = SP_coor(1)\n# SP_lat = SP_coor(2)\n\n theta = 90. - np_lat # Rotation around y-axis\n phi = np_lon + 180. # Rotation around z-axis\n\n # Convert degrees to radians\n phi = (phi * math.pi) / 180.\n theta = (theta * math.pi) / 180.\n\n # Convert from spherical to cartesian coordinates\n x = math.cos(lon) * math.cos(lat)\n y = math.sin(lon) * math.cos(lat)\n z = math.sin(lat)\n\n # Regular -> Rotated\n if direction == 'geo2rot':\n\n x_new = (math.cos(theta) * math.cos(phi) * x +\n math.cos(theta) * math.sin(phi) * y +\n math.sin(theta) * z)\n y_new = (- math.sin(phi) * x +\n math.cos(phi) * y)\n z_new = (- math.sin(theta) * math.cos(phi) * x -\n math.sin(theta) * math.sin(phi) * y +\n math.cos(theta) * z)\n\n # Rotated -> Regular\n elif direction == 'rot2geo':\n \n phi = - phi\n theta = - theta\n \n x_new = (math.cos(theta) * math.cos(phi) * x +\n math.sin(phi) * y +\n math.sin(theta) * math.cos(phi) * z)\n y_new = (- math.cos(theta) * math.sin(phi) * x +\n math.cos(phi) * y -\n math.sin(theta) * math.sin(phi) * z)\n z_new = (- math.sin(theta) * x +\n math.cos(theta) * z)\n\n # Convert cartesian back to spherical coordinates\n lon_new = math.atan2(y_new, x_new)\n lat_new = math.asin(z_new)\n\n # Convert radians back to degrees\n lon_new = (lon_new * 180.) / math.pi\n lat_new = (lat_new * 180.) / math.pi;\n\n return (lon_new, lat_new)", "def get_transform_matrix(theta, phi = None, invert_rot = False, invert_focal = False):\n\n if phi is None:\n phi = const.PHI_IDX * 10.0\n\n #extrinsic x intrinsic\n camera_matrix = np.zeros((4, 4), dtype=np.float32)\n\n intrinsic_matrix = np.eye(4, dtype=np.float32)\n extrinsic_matrix = np.eye(4, dtype=np.float32)\n\n sin_phi = np.sin(float(phi) / 180.0 * np.pi)\n cos_phi = np.cos(float(phi) / 180.0 * np.pi)\n sin_theta = np.sin(float(-theta) / 180.0 * np.pi)\n cos_theta = np.cos(float(-theta) / 180.0 * np.pi)\n\n #theta rotation\n rotation_azimuth = np.zeros((3, 3), dtype=np.float32)\n rotation_azimuth[0, 0] = cos_theta\n rotation_azimuth[2, 2] = cos_theta\n rotation_azimuth[0, 2] = -sin_theta\n rotation_azimuth[2, 0] = sin_theta\n rotation_azimuth[1, 1] = 1.0\n\n #phi rotation\n rotation_elevation = np.zeros((3, 3), dtype=np.float32)\n rotation_elevation[0, 0] = cos_phi\n rotation_elevation[0, 1] = sin_phi\n rotation_elevation[1, 0] = -sin_phi\n rotation_elevation[1, 1] = cos_phi\n rotation_elevation[2, 2] = 1.0\n\n #rotate phi, then theta\n rotation_matrix = np.matmul(rotation_azimuth, rotation_elevation)\n if invert_rot:\n rotation_matrix = np.linalg.inv(rotation_matrix)\n\n displacement = np.zeros((3, 1), dtype=np.float32)\n displacement[0, 0] = const.DIST_TO_CAM\n displacement = np.matmul(rotation_matrix, displacement)\n\n #assembling 4x4 from R + T\n extrinsic_matrix[0:3, 0:3] = rotation_matrix\n extrinsic_matrix[0:3, 3:4] = -displacement\n\n if invert_focal:\n intrinsic_matrix[2, 2] = float(const.focal_length)\n intrinsic_matrix[1, 1] = float(const.focal_length)\n else:\n intrinsic_matrix[2, 2] = 1.0 / float(const.focal_length)\n intrinsic_matrix[1, 1] = 1.0 / float(const.focal_length)\n\n camera_matrix = np.matmul(extrinsic_matrix, intrinsic_matrix)\n return camera_matrix", "def spherical_parameters(self):\n phi_mu_list = []\n theta_mu_list = []\n \n for mu in self.mu_list:\n r, phi, theta = T_cartesian_to_spherical(x=mu[0], y=mu[1], z=mu[2])\n phi_mu_list.append(phi)\n theta_mu_list.append(theta)\n \n return phi_mu_list, theta_mu_list", "def trans_hellinger(m):\n m = asmatrix(m)\n row_sums = sum(m, axis=1)\n result = sqrt(m / row_sums)\n return result", "def spheric2cart(theta, phi):\n x = np.sin(theta) * np.cos(phi)\n y = np.sin(theta) * np.sin(phi)\n z = np.cos(theta)\n return x, y, z", "def calculate_mass_matrix(self, m, jsa):\n\n rho_i = self.r_i.local_coordinates - self.r_g_loc\n tr_rho_i = rho_i.transpose()\n x = np.array([self.r_j.local_coordinates - self.r_i.local_coordinates,\n self.u.local_coordinates, self.v.local_coordinates])\n a = np.matmul(inv(x), self.r_g_loc - self.r_i.local_coordinates)\n j_i = jsa + m * np.outer(rho_i, tr_rho_i)\n z = inv(x).dot(j_i).dot(inv(x.transpose()))\n\n self.mass_matrix = mass_matrix_assembly(m, z, a)" ]
[ "0.6758652", "0.6351473", "0.6289319", "0.6249192", "0.5900926", "0.58043545", "0.5739055", "0.55864763", "0.558487", "0.55819327", "0.55792", "0.55535346", "0.5547185", "0.5533851", "0.55322266", "0.55322266", "0.55189437", "0.551152", "0.5510809", "0.54767853", "0.5444235", "0.54396665", "0.54292464", "0.5412831", "0.54121965", "0.5396991", "0.5385427", "0.5383213", "0.5370137", "0.5367218" ]
0.67547804
1
Add the ``request.raven`` method and configure the `ravenjs` panel.
def includeme(config, get_raven=None, panel=None): # Compose. if get_raven is None: #pragma: no cover get_raven = get_raven_client if panel is None: #pragma: no cover panel = raven_js_panel # Unpack. settings = config.registry.settings # Provide the client as ``request.raven``. config.add_request_method(get_raven, 'raven', reify=True) # Configure the ``raven-js`` panel. if hasattr(config, "add_panel"): # Soft detect if we have pyramid_layout installed default_tmpl = 'pyramid_raven:templates/panel.mako' panel_tmpl = settings.get('pyramid_raven.panel_tmpl', default_tmpl) config.add_panel(panel, 'raven-js', renderer=panel_tmpl)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def includeme(config):\n config.add_subscriber(add_renderer_globals, BeforeRender)\n config.add_subscriber(add_localizer, NewRequest)\n config.add_subscriber(add_csrf_validation, NewRequest)\n config.add_subscriber(add_resources, NewRequest)", "def enable(self):\n LOGGER.info('Enabling WebAPI plugin WEB ...')", "def _add_sentry(app, level=logging.NOTSET):\n Sentry(app, logging=True, level=level)", "def includeme(config):\n\n document_path = config.registry.settings['{}.document'.format(MODULE_NAME)]\n\n definition = api.Api(document_path)\n config.registry.registerUtility(definition, api.IApi)\n\n config.add_directive('set_media_renderer', api.set_media_renderer)\n config.add_directive('add_deserializer', api.add_deserializer)\n config.add_directive('set_media_deserializer', api.set_media_deserializer)\n\n config.add_view(\n views.exception_view,\n context=Exception,\n renderer='json',\n )\n config.add_view(\n views.http_exception_view,\n context=pyramid.httpexceptions.HTTPException,\n renderer='json',\n )\n\n return None", "def register(self):\n # self.register_route(\"GET\", self.__route, lambda req, res: self.status(req, res))\n self.register_route(\"GET\", self.__route, None, self.status)", "def includeme(config):\r\n config.add_translation_dirs('faapp:locale', )\r\n config.add_subscriber('faapp.locale.add_renderer_globals', 'pyramid.events.BeforeRender')\r\n config.add_subscriber('faapp.locale.add_localizer', 'pyramid.events.NewRequest')", "def includeme(config):\n add_view(config)", "def setup(hass, config):\n hass.http.register_view(APIAIWebhookView)\n return True", "def includeme(config):\n config.add_subscriber(on_GET_request_setup_csrf_cookie, NewResponse)\n config.set_default_csrf_options(require_csrf=True, header=HEADER_NAME)", "def add(self, middleware):\n pass # pragma: no cover", "def includeme(config):\n\n config.add_translation_dirs('kotti_dashboard:locale')\n config.add_static_view('static-kotti_dashboard', 'kotti_dashboard:static')\n\n config.scan(__name__)", "def add_renderer_globals(event):\n request = event.get('request')\n # add globals for i18n\n event['_'] = request.translate\n event['localizer'] = request.localizer\n # add application globals from the config file\n settings = request.registry.settings\n event['brand_name'] = settings['anuket.brand_name']", "def add_routes(self):\n pass", "def includeme(config: pyramid.config.Configurator) -> None:\n if auth.is_enabled(config, ENV_KEY, CONFIG_KEY):\n config.add_route(\n \"c2c_db_maintenance\",\n config_utils.get_base_path(config) + r\"/db/maintenance\",\n request_method=\"GET\",\n )\n config.add_view(_db_maintenance, route_name=\"c2c_db_maintenance\", renderer=\"fast_json\", http_cache=0)\n _restore(config)\n LOG.info(\"Enabled the /db/maintenance API\")", "def register_to_event(request):\n pass", "def add_view( *args, **kwargs ):", "def _register_routes(self):\n dashboard = self\n\n @dashboard.app.after_request\n def prevent_caching(response):\n if 'Cache-Control' not in response.headers:\n response.headers['Cache-Control'] = 'no-store'\n return response\n\n @dashboard.app.context_processor\n def injections():\n session.setdefault('enabled_modules',\n [i for i in range(len(self.modules))\n if self.modules[i].enabled])\n return {\n 'APP_NAME': 'signac-dashboard',\n 'APP_VERSION': __version__,\n 'PROJECT_NAME': self.project.config['project'],\n 'PROJECT_DIR': self.project.config['project_dir'],\n 'modules': self.modules,\n 'enabled_modules': session['enabled_modules'],\n 'module_assets': self._module_assets\n }\n\n # Add pagination support from http://flask.pocoo.org/snippets/44/\n @dashboard.app.template_global()\n def url_for_other_page(page):\n args = request.args.copy()\n args['page'] = page\n return url_for(request.endpoint, **args)\n\n @dashboard.app.template_global()\n def modify_query(**new_values):\n args = request.args.copy()\n for key, value in new_values.items():\n args[key] = value\n return '{}?{}'.format(request.path, url_encode(args))\n\n @dashboard.app.errorhandler(404)\n def page_not_found(error):\n return self._render_error(str(error))\n\n self.add_url('views.home', ['/'])\n self.add_url('views.settings', ['/settings'])\n self.add_url('views.search', ['/search'])\n self.add_url('views.jobs_list', ['/jobs/'])\n self.add_url('views.show_job', ['/jobs/<jobid>'])\n self.add_url('views.get_file', ['/jobs/<jobid>/file/<path:filename>'])\n self.add_url('views.change_modules', ['/modules'], methods=['POST'])", "def init(config: pyramid.config.Configurator) -> None:\n broadcast.subscribe(\"c2c_sql_profiler\", _setup_profiler)\n\n config.add_route(\n \"c2c_sql_profiler\", config_utils.get_base_path(config) + r\"/sql_profiler\", request_method=\"GET\"\n )\n config.add_view(_sql_profiler_view, route_name=\"c2c_sql_profiler\", renderer=\"fast_json\", http_cache=0)\n LOG.info(\"Enabled the /sql_profiler API\")", "def __init__(self):\n self.websock_handlers = {}\n self.ajax_handlers = {'__dashboard__': self.get_dashboard_ui}\n self.dashboard_handlers = {}", "def configure_routing(config):\n # Static file access. Separate root for each subdirectory, because Pyramid\n # treats these as first-class routables rather than a last-ditch fallback\n config.add_static_view('/css', 'floof:assets/css')\n config.add_static_view('/files', 'floof:assets/files') # dummy file store\n config.add_static_view('/icons', 'floof:assets/icons')\n config.add_static_view('/images', 'floof:assets/images')\n config.add_static_view('/js', 'floof:assets/js')\n # TODO this doesn't actually work\n config.add_static_view('/favicon.ico', 'floof:assets/favicon.ico')\n\n\n r = config.add_route\n\n # Miscellaneous root stuff\n r('root', '/')\n r('filestore', '/filestore/{class_}/{key}', pregenerator=filestore_pregenerator)\n r('reproxy', '/reproxy')\n r('log', '/log')\n\n # Registration and auth\n r('account.login', '/account/login')\n r('account.login_begin', '/account/login_begin')\n r('account.login_finish', '/account/login_finish')\n r('account.register', '/account/register')\n r('account.add_identity', '/account/add_identity')\n r('account.persona.login', '/account/persona/login')\n r('account.logout', '/account/logout')\n\n r('account.profile', '/account/profile')\n\n # Regular user control panel\n r('controls.index', '/account/controls')\n r('controls.auth', '/account/controls/authentication')\n r('controls.persona', '/account/controls/persona')\n r('controls.persona.add', '/account/controls/persona/add')\n r('controls.persona.remove', '/account/controls/persona/remove')\n r('controls.openid', '/account/controls/openid')\n r('controls.openid.add', '/account/controls/openid/add')\n r('controls.openid.add_finish', '/account/controls/openid/add_finish')\n r('controls.openid.remove', '/account/controls/openid/remove')\n r('controls.rels', '/account/controls/relationships')\n r('controls.rels.watch', '/account/controls/relationships/watch')\n r('controls.rels.unwatch', '/account/controls/relationships/unwatch')\n r('controls.info', '/account/controls/user_info')\n\n r('controls.certs', '/account/controls/certificates')\n r('controls.certs.add', '/account/controls/certificates/add')\n r('controls.certs.generate_server',\n '/account/controls/certificates/gen/cert-{name}.p12')\n r('controls.certs.details',\n '/account/controls/certificates/details/{serial:[0-9a-f]+}')\n r('controls.certs.download',\n '/account/controls/certificates/download/cert-{name}-{serial:[0-9a-f]+}.pem')\n r('controls.certs.revoke',\n '/account/controls/certificates/revoke/{serial:[0-9a-f]+}')\n\n # User pages\n kw = sqla_route_options('user', 'name', model.User.name)\n r('users.view', '/users/{name}', **kw)\n r('users.art', '/users/{name}/art', **kw)\n r('users.art_by_album', '/users/{name}/art/{album}', **kw)\n r('users.profile', '/users/{name}/profile', **kw)\n r('users.watchstream', '/users/{name}/watchstream', **kw)\n r('albums.user_index', '/users/{name}/albums', **kw)\n\n r('api:users.list', '/users.json')\n\n # Artwork\n kw = sqla_route_options('artwork', 'id', model.Artwork.id)\n kw['pregenerator'] = artwork_pregenerator\n r('art.browse', '/art')\n r('art.upload', '/art/upload')\n r('art.view', r'/art/{id:\\d+}{title:(-.+)?}', **kw)\n r('art.add_tags', r'/art/{id:\\d+}/add_tags', **kw)\n r('art.remove_tags', r'/art/{id:\\d+}/remove_tags', **kw)\n r('art.rate', r'/art/{id:\\d+}/rate', **kw)\n\n # Tags\n # XXX what should the tag name regex be, if anything?\n # XXX should the regex be checked in the 'factory' instead? way easier that way...\n kw = sqla_route_options('tag', 'name', model.Tag.name)\n r('tags.list', '/tags')\n r('tags.view', '/tags/{name}', **kw)\n r('tags.artwork', '/tags/{name}/artwork', **kw)\n\n # Albums\n # XXX well this is getting complicated! needs to check user, needs to check id, needs to generate correctly, needs a title like art has\n user_router = SugarRouter(config, '/users/{user}', model.User.name)\n album_router = user_router.chain('/albums/{album}', model.Album.id, rel=model.Album.user)\n album_router.add_route('albums.artwork', '')\n\n # Administration\n r('admin.dashboard', '/admin')\n r('admin.log', '/admin/log')\n\n # Debugging\n r('debug.blank', '/debug/blank')\n r('debug.crash', '/debug/crash')\n r('debug.mako-crash', '/debug/mako-crash')\n r('debug.status.303', '/debug/303')\n r('debug.status.400', '/debug/400')\n r('debug.status.403', '/debug/403')\n r('debug.status.404', '/debug/404')\n\n # Comments; made complex because they can attach to different parent URLs.\n # Rather than hack around how Pyramid's routes works, we can just use our\n # own class that does what we want!\n\n # XXX 1: make this work for users as well\n # XXX 2: make the other routes work\n # XXX 3: possibly find a way to verify that the same logic is used here and for the main routes\n parent_route_names = ('art.view', 'user.view')\n mapper = config.get_routes_mapper()\n parent_routes = [mapper.get_route(name) for name in parent_route_names]\n commentables = dict(\n users=model.User.name,\n art=model.Artwork.id,\n )\n\n def comments_factory(request):\n # XXX prefetching on these?\n type = request.matchdict['type']\n identifier = request.matchdict['identifier']\n\n try:\n sqla_column = commentables[type]\n entity = model.session.query(sqla_column.parententity).filter(sqla_column == identifier).one()\n except (NoResultFound, KeyError):\n # 404!\n raise NotFound()\n\n if 'comment_id' not in request.matchdict:\n return contextualize(entity.discussion)\n\n # URLs to specific comments should have those comments as the context\n try:\n return contextualize(\n model.session .query(model.Comment)\n .with_parent(entity.discussion)\n .filter(model.Comment.id == request.matchdict['comment_id'])\n .one())\n except NoResultFound:\n raise NotFound()\n\n\n def comments_pregenerator(request, elements, kw):\n resource = None\n comment = kw.get('comment', None)\n\n if comment:\n kw['comment_id'] = comment.id\n\n if 'resource' not in kw:\n resource = comment.discussion.resource\n\n if not resource:\n resource = kw['resource']\n\n # XXX users...\n entity = resource.member\n kw['type'] = 'art'\n kw['identifier'] = entity.id\n return elements, kw\n\n r('comments.list', '/{type}/{identifier}/comments', factory=comments_factory)\n r('comments.write', '/{type}/{identifier}/comments/write', factory=comments_factory, pregenerator=comments_pregenerator)\n r('comments.view', '/{type}/{identifier}/comments/{comment_id}', factory=comments_factory, pregenerator=comments_pregenerator)\n r('comments.edit', '/{type}/{identifier}/comments/{comment_id}/edit', factory=comments_factory, pregenerator=comments_pregenerator)\n r('comments.reply', '/{type}/{identifier}/comments/{comment_id}/write', factory=comments_factory, pregenerator=comments_pregenerator)", "def includeme(config):\n config.add_route('home', '/')\n config.add_route('detail', '/detail/{id:\\d+}')\n config.add_route('update', '/edit/{id:\\d+}')\n config.add_route('create', '/create')", "def configure(app):\n api.add_resource(Event, '/event/')\n api.add_resource(EventItem, '/event/<event_id>')\n app.register_blueprint(bp_restapi)", "def add_handlers(web_app, config):\n base_url = web_app.settings['base_url']\n url = ujoin(base_url, config.page_url)\n assets_dir = config.assets_dir\n\n package_file = os.path.join(assets_dir, 'package.json')\n with open(package_file) as fid:\n data = json.load(fid)\n\n config.version = (config.version or data['jupyterlab']['version'] or\n data['version'])\n config.name = config.name or data['jupyterlab']['name']\n\n handlers = [\n (url + r'/?', LabHandler, {\n 'lab_config': config\n }),\n (url + r\"/(.*)\", FileFindHandler, {\n 'path': assets_dir\n }),\n\n ]\n\n # Backward compatibility.\n if 'publicPath' in data['jupyterlab']:\n handlers.append(\n (data['jupyterlab']['publicPath'] + r\"/(.*)\", FileFindHandler, {\n 'path': assets_dir\n })\n )\n\n web_app.add_handlers(\".*$\", handlers)", "def register(self, wsgi_app):\n wsgi_app.add_url_rule(\n rule=self.path,\n view_func=self.controller,\n methods=self.methods)", "def main(**settings):\n # Pyramid requires an authorization policy to be active.\n # Enable JWT authentication.\n all_routes = []\n for route in routes:\n if route not in all_routes:\n all_routes.append(route)\n config.add_route(*route)\n print route\n else:\n print \"Found conflicting routes, ignoring \"\n print route\n config.scan('app.base.api.main')\n return CORS(config.make_wsgi_app(), headers=\"*\", methods=\"*\", origin=\"*\")", "def configure_app(self):\n self.app.route('/', callback=self.get_api)", "def register_module():\n\n namespaced_handlers = [(ForceResponseHandler.URL, ForceResponseHandler)]\n return custom_modules.Module(\n 'FakeVisualizations', 'Provide visualizations requiring simple, '\n 'paginated, and multiple data streams for testing.',\n [], namespaced_handlers, register_on_enable, None)", "def registerWithSitemap(self):\n\n self.core.requireUniqueService('registerWithSitemap')\n\n #from soc.modules.seeder.views import seeder\n #self.core.registerSitemapEntry(seeder.view.getDjangoURLPatterns())", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.include('pyramid_chameleon')\n\n config.include('velruse.providers.google_oauth2')\n config.add_google_oauth2_login_from_settings()\n config.add_subscriber(before_render, BeforeRender)\n\n my_session_factory = session_factory_from_settings(settings)\n config.set_session_factory(my_session_factory)\n\n authentication_policy = AuthTktAuthenticationPolicy('seekrit',\n callback=None, hashalg='sha512')\n authorization_policy = ACLAuthorizationPolicy()\n\n config.set_authentication_policy(authentication_policy)\n config.set_authorization_policy(authorization_policy)\n\n mongo = MongoClient(settings['db_uri'])\n db = mongo[settings['db_name']]\n config.registry.db_mongo = db\n config.registry.admin_list = settings['admin'].split(',')\n config.registry.upload_path = settings['upload_path']\n config.registry.news_path = settings['news_path']\n config.registry.admin_path = settings['admin_path']\n config.registry.public_path = settings['public_path']\n config.registry.dataset_path = settings['dataset_path']\n config.registry.script_path = settings['script_path']\n config.registry.download_path = settings['download_path']\n config.registry.studies_path = settings['studies_path']\n config.registry.jbrowse_path = settings['jbrowse_path']\n config.registry.base_url = settings['base_url']\n\n\n # by default we don't sniff, ever\n #config.registry.es = Elasticsearch( [settings['elastic_host']])\n #config.registry.es_db = settings['elastic_db']\n #config.registry.es.indices.create(index=settings['elastic_db'], ignore=400)\n\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_static_view('app', 'rgv:webapp/app')\n config.add_route('autocomplete', '/autocomplete')\n config.add_route('checkgene', '/checkgene')\n config.add_route('browser_stat','/browser_stat')\n config.add_route('d_getter', '/data_frame')\n config.add_route('file_dataset','/dataset_download/{dir}/{file}')\n config.add_route('genelevel', '/genelevel')\n config.add_route('home', '/')\n config.add_route('hmtData', '/hmtData')\n config.add_route('login', '/user/login')\n config.add_route('logged', '/user/logged')\n config.add_route('newsfeed', '/newsfeed')\n config.add_route('scData', '/scData')\n config.add_route('scDataGenes', '/scDataGenes')\n config.add_route('studyfeed', '/studyfeed')\n config.add_route('user', '/user')\n config.add_route('user_register', '/user/register')\n config.add_route('user_recover', '/user/recover')\n config.add_route('user_confirm_recover', '/user/confirm_recover')\n config.add_route('user_confirm_email', '/user/confirm_email')\n config.add_route('user_validate', '/user/validate')\n config.add_route('user_delete', '/user/delete')\n config.add_route('user_info', '/user/{id}')\n config.add_route('read_file','/browser_genelevel_init')\n config.add_route('search', '/search')\n\n\n config.scan()\n\n # automatically serialize bson ObjectId and datetime to Mongo extended JSON\n json_renderer = JSON()\n def pymongo_adapter(obj, request):\n return json_util.default(obj)\n json_renderer.add_adapter(ObjectId, pymongo_adapter)\n json_renderer.add_adapter(datetime.datetime, pymongo_adapter)\n\n config.add_renderer('json', json_renderer)\n\n return config.make_wsgi_app()", "def add_renderer_globals(event):\n def fake_url(controller=None, action=None, **kwargs):\n if action == \"css\":\n return \"/css\"\n if action and controller:\n path = {}\n for key in 'name', 'pocket', 'subpath':\n if key in kwargs:\n path[key] = kwargs.pop(key)\n path['_query'] = dict((k,v) for k,v in kwargs.items() if v is not None)\n return request.route_path(controller+\"/\"+action, **path)\n if controller and controller.startswith(\"/\"):\n return controller\n return \"/unknown\"\n\n def fake_url_current(**kwargs):\n path = {}\n # XXX request.matchdict?\n if 'name' in kwargs:\n path['name'] = kwargs.pop('name')\n if 'action' in kwargs:\n path['_route_name'] = 'dex/'+kwargs.pop('action')\n path['_query'] = dict((k,v) for k,v in kwargs.items() if v is not None)\n return request.current_route_path(**path)\n\n def fake_translate(message, plural=None, n=None, context=None, comment=None):\n return unicode(message)\n\n renderer_globals = event\n request = event.get(\"request\") #or threadlocal.get_current_request()\n if not request:\n return\n config = request.registry.settings\n renderer_globals[\"config\"] = config\n renderer_globals[\"h\"] = splinehelpers\n renderer_globals[\"r\"] = request\n renderer_globals[\"c\"] = request.tmpl_context\n #renderer_globals[\"url\"] = request.url_generator\n renderer_globals[\"url\"] = fake_url\n fake_url.current = fake_url_current\n renderer_globals[\"_\"] = fake_translate\n renderer_globals[\"flash\"] = lib.Flash(request.session)\n\n request.tmpl_context.links = config['spline.plugins.links']\n\n # start timer\n request.tmpl_context.timer = lib.ResponseTimer()" ]
[ "0.59271264", "0.52645963", "0.5064856", "0.5035225", "0.5023455", "0.50157934", "0.5003367", "0.49658138", "0.49607152", "0.49281287", "0.48860234", "0.4869646", "0.48251504", "0.48205826", "0.48137787", "0.47873187", "0.47489318", "0.46791732", "0.46751204", "0.46577823", "0.46526173", "0.46136925", "0.46114415", "0.46098492", "0.46062037", "0.46033165", "0.45862976", "0.45791665", "0.4572065", "0.45703351" ]
0.7125961
0
Loading model weights and meta information from cfg and checkpoint. Subclasses could override this method to load extra meta information from ``checkpoint`` and ``cfg`` to model.
def _load_weights_to_model(self, model: nn.Module, checkpoint: Optional[dict], cfg: Optional[ConfigType]) -> None: if checkpoint is not None: _load_checkpoint_to_model(model, checkpoint) else: warnings.warn('Checkpoint is not loaded, and the inference ' 'result is calculated by the randomly initialized ' 'model!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model(self, ckpt_name=\"best_model.pth\"):\n path = \"/\".join(ckpt_name.split(\"/\")[:-1])\n chkpt = torch.load(ckpt_name)\n self.start_epoch = chkpt['epoch']\n self.best_metric = chkpt['best_metric']\n\n # fix the DataParallel caused problem with keys names\n if self.multi_gpu_flag:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=False)\n self.net.load_state_dict(new_state_dict)\n else:\n try:\n self.net.load_state_dict(chkpt['state_dict'])\n except:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=True)\n self.net.load_state_dict(new_state_dict)\n\n if self.load_optimizer_state:\n self.optimizer.load_state_dict(chkpt['optimizer'])\n logging.info(\"******** State loaded ********\")\n\n training_meta = pickle.load(open(f\"{path}/training_meta.pickle.dat\", \"rb\"))\n for k, v in training_meta.items():\n if k in self.__class__.__params:\n setattr(self, k, v)\n logging.info(\"******** Training params loaded ********\")", "def load_model(self, checkpoint):\n print(f'Load parameters from {checkpoint}')\n epoch = re.match(r\"[0-9]*\", os.path.basename(checkpoint)).group(0)\n self.epoch_i = int(epoch)\n self.model.load_state_dict(torch.load(checkpoint))", "def init_weights(self):\n if isinstance(self.pretrained, str):\n logger = get_root_logger()\n logger.info(f'load model from: {self.pretrained}')\n load_checkpoint(self, self.pretrained, strict=False, logger=logger)\n elif self.pretrained is None:\n pass\n else:\n raise TypeError('pretrained must be a str or None')", "def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]", "def update_params(self):\n if self._hyper_params[\"pretrain_model_path\"] != \"\":\n model_path = self._hyper_params[\"pretrain_model_path\"]\n state_dict = torch.load(model_path,\n map_location=torch.device(\"cpu\"))\n if \"model_state_dict\" in state_dict:\n state_dict = state_dict[\"model_state_dict\"]\n try:\n self.load_state_dict(state_dict, strict=True)\n except:\n self.load_state_dict(state_dict, strict=False)\n logger.info(\"Pretrained weights loaded from {}\".format(model_path))\n logger.info(\"Check md5sum of Pretrained weights: %s\" %\n md5sum(model_path))", "def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])", "def load_weights(self):\n try:\n print('loading weights from {}'.format(self.cfg.class_model_dir))\n self.load_state_dict(torch.load(self.cfg.class_model_dir + self.class_model_name + '.pth'))\n except Exception as e:\n print(\"load weights exception: {}\".format(e))", "def load(self, model_path):\n # TODO: include new params based on ConfigEnum\n checkpoint = torch.load(model_path)\n\n self.image_size = checkpoint['image_size']\n self.device = checkpoint['device']\n self.fp16 = checkpoint['fp16']\n self.accumulate_grad_steps = checkpoint['accumulate_grad_steps']\n self.experiment_id = checkpoint['experiment_id']\n self.experiment_tag = checkpoint['experiment_tag']\n self.seed = checkpoint['seed']\n self.train_batch_size = checkpoint['train_batch_size']\n self.valid_batch_size = checkpoint['valid_batch_size']\n self.test_batch_size = checkpoint['test_batch_size']\n self.dataloader_num_workers = checkpoint['dataloader_num_workers']\n self.train_dataloader_shuffle = checkpoint['train_dataloader_shuffle']\n self.optimizer_type = checkpoint['optimizer_type']\n self.optimizer_params = checkpoint['optimizer_params']\n self.scheduler_type = checkpoint['scheduler_type']\n self.scheduler_params = checkpoint['scheduler_params']\n self.step_scheduler_after = checkpoint['step_scheduler_after']\n self.step_scheduler_metric = checkpoint['step_scheduler_metric']\n self.compute_train_loss_after = checkpoint['compute_train_loss_after']\n self.compute_train_metric_after = checkpoint['compute_train_metric_after']\n self.compute_valid_loss_after = checkpoint['compute_valid_loss_after']\n self.compute_valid_metric_after = checkpoint['compute_valid_metric_after']\n self.training_stopping_criteria = checkpoint['training_stopping_criteria']\n self.stopping_criteria_params = checkpoint['stopping_criteria_params']\n self.max_epoch = checkpoint['max_epoch']\n self.train_on_all_data = checkpoint['train_on_all_data']\n self.validate_after = checkpoint['validate_after']\n self.validation_steps = checkpoint['validation_steps']\n self.run_lr_range_test= checkpoint['run_lr_range_test']\n self.sleep_in_epochs = checkpoint['sleep_in_epochs']\n self.sleep_time = checkpoint['sleep_time']\n self.checkpoint_epochs = checkpoint['checkpoint_epochs']\n\n self._best_score = checkpoint['_best_score']\n self._current_score = checkpoint['_current_score']\n self._counter = checkpoint['_counter']\n self.metrics = checkpoint['metrics']\n self.current_epoch = checkpoint['current_epoch']\n self.current_train_batch = checkpoint['current_train_batch']\n self.current_valid_batch = checkpoint['current_valid_batch']\n self.num_train_samples = checkpoint['num_train_samples']\n self.num_train_iterations = checkpoint['num_train_iterations']\n self.checkpoint_snapshot = checkpoint['checkpoint_snapshot'] \n\n # initialize optimizer, scheduler, and gradient scaler\n self.configure_optimizers()\n self.configure_schedulers()\n \n if self.fp16:\n self.scaler = torch.cuda.amp.GradScaler()\n\n if self.model:\n self.model.load_state_dict(checkpoint['state_dict'])\n self.model.to(self.device)\n\n if self.optimizer:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n if self.scheduler:\n self.scheduler.load_state_dict(checkpoint['scheduler'])\n\n #if self.scaler:\n # self.scaler.load_state_dict(checkpoint['scaler'])", "def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self", "def load(self):\n try:\n if self.model.is_cuda:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \"save_point.pth\")))\n else:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \\\n \"save_point.pth\"), map_location=\"cpu\"))\n except:\n sys.exit(\"Unable to load previous model\")", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load_model(sess, meta_file, checkpoint_file):\n saver = tf.train.import_meta_graph(meta_file)\n saver.restore(sess, checkpoint_file)\n \n configs = tf.get_collection('configs')\n pvars = tf.get_collection('placeholders')\n \n model_settings = dict()\n for c in configs:\n name = c.name.split(':')[0]\n model_settings[name] = sess.run(c)\n \n model_vars = dict()\n for p in pvars:\n name = p.name.split(':')[0]\n model_vars[name] = p\n model_vars['probs'] = tf.get_collection('probs')[0]\n \n return model_settings, model_vars", "def load_model_weights(self):\n raise NotImplementedError", "def load_checkpoint(self, model):\n print(f\"load model {self.save_model_path}\")\n model.load_state_dict(torch.load(self.save_model_path))", "def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n\n config = kwargs.pop(\"config\", None)\n state_dict = kwargs.pop(\"state_dict\", None)\n cache_dir = kwargs.pop(\"cache_dir\", None)\n from_tf = kwargs.pop(\"from_tf\", False)\n from_hf = kwargs.pop(\"from_hf\", False)\n output_loading_info = kwargs.pop(\"output_loading_info\", False)\n default_gpu = kwargs.pop(\"default_gpu\", True)\n\n # Load config\n assert config is not None\n model_kwargs = kwargs\n\n # Load model\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]\n elif os.path.isdir(pretrained_model_name_or_path):\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + \".index\")\n else:\n archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)\n else:\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n archive_file = pretrained_model_name_or_path + \".index\"\n else:\n archive_file = pretrained_model_name_or_path\n # redirect to the cache, if necessary\n try:\n resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)\n except EnvironmentError:\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n logger.error(\"Couldn't reach server at '{}' to download pretrained weights.\".format(archive_file))\n else:\n logger.error(\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url but couldn't find any file \"\n \"associated to this path or url.\".format(\n pretrained_model_name_or_path, \", \".join(cls.pretrained_model_archive_map.keys()), archive_file)\n )\n return None\n if default_gpu:\n if resolved_archive_file == archive_file:\n logger.info(\"loading weights file {}\".format(archive_file))\n else:\n logger.info(\"loading weights file {} from cache at {}\".format(archive_file, resolved_archive_file))\n\n # Instantiate model.\n model = cls(config, *model_args, **model_kwargs)\n\n if state_dict is None and not from_tf:\n state_dict = torch.load(resolved_archive_file, map_location=\"cpu\")\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n return cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'\n\n # Convert old format to new format if needed from a PyTorch state_dict\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if \"gamma\" in key:\n new_key = key.replace(\"gamma\", \"weight\")\n if \"beta\" in key:\n new_key = key.replace(\"beta\", \"bias\")\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n # Rename Bert parameters for our framework\n # NB: Assume 1 Bert layer is mapped to 1 layer only (cannot be used to init multiple layers)\n old_keys = []\n new_keys = []\n nums = []\n for key in state_dict.keys():\n new_key = None\n if \".layer.\" in key and from_hf:\n num = int(key.split(\".layer.\")[-1].split(\".\")[0])\n if \".attention.\" in key:\n new_key = key.replace(\".layer.%d.attention.\" % num,\n \".layer.%d.attention_\" % config.bert_layer2attn_sublayer.get(str(num), num))\n elif \".intermediate.\" in key:\n new_key = key.replace(\".layer.%d.intermediate.\" % num,\n \".layer.%d.intermediate.\" % config.bert_layer2ff_sublayer.get(str(num), num))\n elif \".output.\" in key:\n new_key = key.replace(\".layer.%d.output.\" % num,\n \".layer.%d.output.\" % config.bert_layer2ff_sublayer.get(str(num), num))\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n nums.append(num)\n for old_key, new_key, _ in sorted(zip(old_keys, new_keys, nums), key=lambda x: x[2], reverse=True):\n state_dict[new_key] = state_dict.pop(old_key)\n\n # Load from a PyTorch state_dict\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, \"_metadata\", None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=\"\"):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs,\n )\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + \".\")\n\n # Make sure we are able to load base models as well as derived models (with heads)\n start_prefix = \"\"\n model_to_load = model\n if not hasattr(model, cls.base_model_prefix) and any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n start_prefix = cls.base_model_prefix + \".\"\n if hasattr(model, cls.base_model_prefix) and not any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n model_to_load = getattr(model, cls.base_model_prefix)\n\n logger.info(start_prefix)\n load(model_to_load, prefix=start_prefix)\n if len(missing_keys) > 0 and default_gpu:\n logger.info(\n \"Weights of {} not initialized from pretrained model: {}\".format(model.__class__.__name__, missing_keys)\n )\n if len(unexpected_keys) > 0 and default_gpu:\n logger.info(\n \"Weights from pretrained model not used in {}: {}\".format(model.__class__.__name__, unexpected_keys)\n )\n if len(error_msgs) > 0 and default_gpu:\n raise RuntimeError(\n \"Error(s) in loading state_dict for {}:\\n\\t{}\".format(model.__class__.__name__, \"\\n\\t\".join(error_msgs))\n )\n\n if hasattr(model, \"tie_weights\"):\n model.tie_weights() # make sure word embedding weights are still tied\n\n # Set model in evaluation mode to desactivate DropOut modules by default\n model.eval()\n\n if output_loading_info:\n loading_info = {\n \"missing_keys\": missing_keys,\n \"unexpected_keys\": unexpected_keys,\n \"error_msgs\": error_msgs,\n }\n return model, loading_info\n\n return model", "def checkpoint_load(checkpoint_path, gpu):\n model_info = torch.load(checkpoint_path)\n model = models.vgg19(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n \n model.class_to_idx = model_info['class_to_idx']\n\n model = classifier(model)\n model.load_state_dict(model_info[\"model_state_dict\"])\n return model, model.class_to_idx", "def _init_model(\n self,\n cfg: ConfigType,\n weights: Optional[str],\n device: str = 'cpu',\n ) -> nn.Module:\n checkpoint: Optional[dict] = None\n if weights is not None:\n checkpoint = _load_checkpoint(weights, map_location='cpu')\n\n if not cfg:\n assert checkpoint is not None\n try:\n # Prefer to get config from `message_hub` since `message_hub`\n # is a more stable module to store all runtime information.\n # However, the early version of MMEngine will not save config\n # in `message_hub`, so we will try to load config from `meta`.\n cfg_string = checkpoint['message_hub']['runtime_info']['cfg']\n except KeyError:\n assert 'meta' in checkpoint, (\n 'If model(config) is not provided, the checkpoint must'\n 'contain the config string in `meta` or `message_hub`, '\n 'but both `meta` and `message_hub` are not found in the '\n 'checkpoint.')\n meta = checkpoint['meta']\n if 'cfg' in meta:\n cfg_string = meta['cfg']\n else:\n raise ValueError(\n 'Cannot find the config in the checkpoint.')\n cfg.update(\n Config.fromstring(cfg_string, file_format='.py')._cfg_dict)\n\n # Delete the `pretrained` field to prevent model from loading the\n # the pretrained weights unnecessarily.\n if cfg.model.get('pretrained') is not None:\n del cfg.model.pretrained\n\n model = MODELS.build(cfg.model)\n model.cfg = cfg\n self._load_weights_to_model(model, checkpoint, cfg)\n model.to(device)\n model.eval()\n return model", "def unpack_checkpoint(\n self,\n checkpoint: Dict,\n model=None,\n criterion=None,\n optimizer=None,\n scheduler=None,\n **kwargs,\n ) -> None:\n super().unpack_checkpoint(\n checkpoint,\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n scheduler=scheduler,\n **kwargs,\n )\n\n # NOTE: propper way to load state, docs:\n # https://nvidia.github.io/apex/amp.html#checkpointing\n if \"amp\" in checkpoint:\n amp.load_state_dict(checkpoint[\"amp\"])", "def unpack_checkpoint(\n self,\n checkpoint: Dict,\n model=None,\n criterion=None,\n optimizer=None,\n scheduler=None,\n **kwargs,\n ) -> None:\n super().unpack_checkpoint(\n checkpoint,\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n scheduler=scheduler,\n **kwargs,\n )\n\n # NOTE: propper way to load state, docs:\n # https://nvidia.github.io/apex/amp.html#checkpointing\n if \"amp\" in checkpoint:\n amp.load_state_dict(checkpoint[\"amp\"])", "def load_model(self):\n if torch.cuda.is_available():\n map_location=lambda storage, loc: storage.cuda()\n else:\n map_location='cpu'\n\n for index, agent in enumerate(self.agents):\n agent.actor_local.load_state_dict(torch.load('agent{}_checkpoint_actor.pth'.format(index + 1), map_location=map_location))\n agent.critic_local.load_state_dict(torch.load('agent{}_checkpoint_critic.pth'.format(index + 1), map_location=map_location))", "def load_checkpoint(checkpoint, model, optimizer=None):\n model_state_dict, optimizer_state_dict = torch.load(checkpoint)\n model.load_state_dict(model_state_dict)\n\n if optimizer is not None:\n optimizer.load_state_dict(optimizer_state_dict)", "def load(self):\n utils.get_previous_weights_from_gdrive(self.config.model_folder)\n last_used_model = utils.get_latest_model_name(self.config.model_folder)\n self.model = load_model(last_used_model)\n self.model.summary()", "def init_weights(self):\n if isinstance(self.pretrained, str):\n logger = get_root_logger()\n logger.info(f'load model from: {self.pretrained}')\n load_checkpoint(self, self.pretrained, strict=False, logger=logger)\n elif self.pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform')\n elif isinstance(m, nn.Linear):\n normal_init(m, std=0.01)\n elif isinstance(m, nn.BatchNorm2d):\n constant_init(m, 1)\n else:\n raise TypeError('pretrained must be a str or None')", "def _load_checkpoint_to_net(config, network):\n if config.existed_ckpt:\n if config.existed_ckpt.endswith(\".npz\"):\n weights = np.load(config.existed_ckpt)\n else:\n weights = load_checkpoint(config.existed_ckpt)\n for param in network.trainable_params():\n weights_name = param.name\n if weights_name not in weights:\n raise ValueError(f\"Param {weights_name} is not found in ckpt file.\")\n\n if isinstance(weights[weights_name], Parameter):\n param.set_data(weights[weights_name].data)\n elif isinstance(weights[weights_name], Tensor):\n param.set_data(Tensor(weights[weights_name].asnumpy(), config.dtype))\n elif isinstance(weights[weights_name], np.ndarray):\n param.set_data(Tensor(weights[weights_name], config.dtype))\n else:\n param.set_data(weights[weights_name])\n else:\n for param in network.trainable_params():\n name = param.name\n value = param.data\n if isinstance(value, Tensor):\n if name.endswith(\".gamma\"):\n param.set_data(one_weight(value.asnumpy().shape))\n elif name.endswith(\".beta\") or name.endswith(\".bias\"):\n if param.data.dtype == \"Float32\":\n param.set_data((weight_variable(value.asnumpy().shape).astype(np.float32)))\n elif param.data.dtype == \"Float16\":\n param.set_data((weight_variable(value.asnumpy().shape).astype(np.float16)))\n else:\n if param.data.dtype == \"Float32\":\n param.set_data(Tensor(weight_variable(value.asnumpy().shape).astype(np.float32)))\n elif param.data.dtype == \"Float16\":\n param.set_data(Tensor(weight_variable(value.asnumpy().shape).astype(np.float16)))", "def init_weights(self):\n if isinstance(self.pretrained, str):\n logger = get_root_logger()\n logger.info(f'load model from: {self.pretrained}')\n load_checkpoint(self, self.pretrained, strict=False, logger=logger)\n elif self.pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform')\n elif isinstance(m, nn.Linear):\n normal_init(m, std=0.01)\n else:\n raise TypeError('pretrained must be a str or None')", "def load_model(self):\n if self.ckpt_flag:\n LOG('Skip Loading Pre-trained Model......')\n else:\n if self.params.pre_trained_from is not None and os.path.exists(self.params.pre_trained_from):\n try:\n LOG('Loading Pre-trained Model at %s' % self.params.pre_trained_from)\n pretrain = torch.load(self.params.pre_trained_from)\n self.network.load_state_dict(pretrain)\n LOG('Pre-trained Model Loaded!')\n except:\n WARNING('Cannot load pre-trained model. Start training......')\n else:\n WARNING('Pre-trained model do not exits. Start training......')", "def get_weights(self, extra=None):\n self._set_cuda_device()\n\n if extra is None:\n extra = {}\n\n # by default return current weights, return best if requested via model type.\n self.phase = FlPhase.GET_WEIGHTS\n\n if ExtraItems.MODEL_TYPE in extra:\n model_type = extra.get(ExtraItems.MODEL_TYPE)\n if not isinstance(model_type, ModelType):\n raise ValueError(\n f\"Expected requested model type to be of type `ModelType` but received {type(model_type)}\"\n )\n if model_type in self.model_filepaths:\n model_path = os.path.join(self.bundle_root, cast(str, self.model_filepaths[model_type]))\n if not os.path.isfile(model_path):\n raise ValueError(f\"No best model checkpoint exists at {model_path}\")\n weights = torch.load(model_path, map_location=\"cpu\")\n # if weights contain several state dicts, use the one defined by `save_dict_key`\n if isinstance(weights, dict) and self.save_dict_key in weights:\n weights = weights.get(self.save_dict_key)\n weigh_type: WeightType | None = WeightType.WEIGHTS\n stats: dict = {}\n self.logger.info(f\"Returning {model_type} checkpoint weights from {model_path}.\")\n else:\n raise ValueError(\n f\"Requested model type {model_type} not specified in `model_filepaths`: {self.model_filepaths}\"\n )\n else:\n if self.trainer:\n weights = get_state_dict(self.trainer.network)\n # returned weights will be on the cpu\n for k in weights.keys():\n weights[k] = weights[k].cpu()\n weigh_type = WeightType.WEIGHTS\n stats = self.trainer.get_stats()\n # calculate current iteration and epoch data after training.\n stats[FlStatistics.NUM_EXECUTED_ITERATIONS] = self.trainer.state.iteration - self.iter_of_start_time\n # compute weight differences\n if self.send_weight_diff:\n weights = compute_weight_diff(global_weights=self.global_weights, local_var_dict=weights)\n weigh_type = WeightType.WEIGHT_DIFF\n self.logger.info(\"Returning current weight differences.\")\n else:\n self.logger.info(\"Returning current weights.\")\n else:\n weights = None\n weigh_type = None\n stats = dict()\n\n if not isinstance(stats, dict):\n raise ValueError(f\"stats is not a dict, {stats}\")\n return_weights = ExchangeObject(\n weights=weights,\n optim=None, # could be self.optimizer.state_dict()\n weight_type=weigh_type,\n statistics=stats,\n )\n\n # filter weights if needed (use to apply differential privacy, encryption, compression, etc.)\n if self.post_weight_filters is not None:\n for _filter in self.post_weight_filters:\n return_weights = _filter(return_weights, extra)\n\n return return_weights", "def load_checkpoint(path):\n\n # Get the model name\n model_name = path.split('-')[0]\n assert (model_name in ['vgg16', 'resnet50'\n ]), \"Path must have the correct model name\"\n\n # Load in checkpoint\n checkpoint = torch.load(path)\n\n if model_name == 'vgg16':\n model = models.vgg16(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.classifier = checkpoint['classifier']\n\n elif model_name == 'resnet50':\n model = models.resnet50(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.fc = checkpoint['fc']\n\n # Load in the state dict\n model.load_state_dict(checkpoint['state_dict'])\n\n total_params = sum(p.numel() for p in model.parameters())\n print(f'{total_params:,} total parameters.')\n total_trainable_params = sum(\n p.numel() for p in model.parameters() if p.requires_grad)\n print(f'{total_trainable_params:,} total gradient parameters.')\n\n # Move to gpu\n if multi_gpu:\n model = nn.DataParallel(model)\n\n if train_on_gpu:\n model = model.to('cuda')\n\n # Model basics\n model.class_to_idx = checkpoint['class_to_idx']\n model.idx_to_class = checkpoint['idx_to_class']\n model.epochs = checkpoint['epochs']\n\n # Optimizer\n optimizer = checkpoint['optimizer']\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return model, optimizer", "def load_state_dict(self, checkpoint):\n self.net.load_state_dict(checkpoint['Net'])\n self.optimizer.load_state_dict(checkpoint['Optimizer'])\n\n if ADVERSARIAL_FLAG:\n self.adv_net.load_state_dict(checkpoint['AdvNet'])\n self.adv_optimizer.load_state_dict(checkpoint['AdvOptimizer'])\n\n self.history = checkpoint['History']\n self.stats = checkpoint['Stats']\n\n # The following loops are used to fix a bug that was\n # discussed here: https://github.com/pytorch/pytorch/issues/2830\n # (it is supposed to be fixed in recent PyTorch version)\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.to(self.net.device)\n if ADVERSARIAL_FLAG:\n for adv_state in self.adv_optimizer.state.values():\n for k, v in adv_state.items():\n if isinstance(v, torch.Tensor):\n adv_state[k] = v.to(self.adv_net.device)" ]
[ "0.6934471", "0.69131255", "0.69076866", "0.67372805", "0.6686757", "0.6633907", "0.66300243", "0.6628675", "0.6621558", "0.65934366", "0.6577404", "0.6577404", "0.6514096", "0.6467032", "0.64553356", "0.64483273", "0.64396846", "0.6426327", "0.6404203", "0.6404203", "0.63859785", "0.6381733", "0.6380123", "0.6378871", "0.63752985", "0.63594675", "0.6356797", "0.63567364", "0.63563406", "0.6319989" ]
0.7453869
0
Initialize the ``collate_fn`` with the given config. The returned ``collate_fn`` will be used to collate the batch data.
def _init_collate(self, cfg: ConfigType) -> Callable: try: with FUNCTIONS.switch_scope_and_registry(self.scope) as registry: collate_fn = registry.get(cfg.test_dataloader.collate_fn) except AttributeError: collate_fn = pseudo_collate return collate_fn # type: ignore
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collate_fn(self, *args):\n return TupleMiniBatch(default_collate(*args))", "def build_collate_fn(\n cls, args: argparse.Namespace, train: bool\n ) -> Callable[[Sequence[Dict[str, np.ndarray]]], Dict[str, torch.Tensor]]:\n raise NotImplementedError", "def collate_fn(batch):\n\n flattened_batch = []\n for data in batch:\n num_examples = len(data['image'])\n for i in range(num_examples):\n flattened_batch.append({\n k: v[i] for k, v in data.items()\n })\n\n return default_collate(flattened_batch)", "def collate_fn(batch):\n metadata = []\n for el in batch:\n metadata.append(el[\"metadata\"])\n del el[\"metadata\"]\n\n batch = default_collate(batch)\n\n batch[\"metadata\"] = metadata\n\n return batch", "def view(\n self,\n collate_fn: Union[callable, str] = \"batch_of_g_and_y\",\n *args,\n **kwargs\n ):\n # provide default collate function\n if isinstance(collate_fn, str):\n collate_fn = getattr(self, collate_fn)\n\n return torch.utils.data.DataLoader(\n dataset=self,\n collate_fn=collate_fn,\n *args,\n **kwargs,\n )", "def collate_fn(batch):\n pad_index = 1 # the <PAD> index in vocabulary\n src_list = [sample[0] for sample in batch] # list of each language sentences\n trg_list = [sample[1] for sample in batch]\n\n def padding(sentence_list):\n \"\"\"padding each sentence to the right\"\"\"\n max_len = max([sentence.size(0) for sentence in sentence_list])\n pad_sen = [sen.tolist() + [pad_index] * max(0, max_len - len(sen))\n for sen in sentence_list]\n return torch.LongTensor(pad_sen).transpose(0, 1) # shape of (T, B)\n\n return padding(src_list), padding(trg_list)", "def create_loader(dataset: Dataset, cfg: trainer_configs.BaseDatasetConfig, batch_size: int, *,\r\n collate_fn: Optional[Callable[[List[Any]], Any]] = None) -> DataLoader:\r\n # return DataLoader(\r\n # dataset, batch_size=batch_size, num_workers=cfg.num_workers,\r\n # drop_last=cfg.drop_last, collate_fn=collate_fn) # type: ignore\r\n return DataLoader(\r\n dataset, batch_size=batch_size, shuffle=cfg.shuffle, num_workers=cfg.num_workers,\r\n drop_last=cfg.drop_last, collate_fn=collate_fn) # type: ignore\r", "def get_collate_fn(mixer_name: str, alpha: float) -> Callable:\n fn = cutmix if mixer_name == \"cutmix\" else mixup\n collate_fn = CustomCollate(alpha=alpha, mixer=fn)\n return collate_fn", "def customize_collate(batch):\n\n elem = batch[0]\n elem_type = type(elem)\n if isinstance(elem, torch.Tensor):\n # this is the main part to handle varied length data in a batch\n # batch = [data_tensor_1, data_tensor_2, data_tensor_3 ... ]\n # \n batch_new = pad_sequence(batch)\n \n out = None\n if torch.utils.data.get_worker_info() is not None:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n\n # allocate the memory based on maximum numel\n numel = max([x.numel() for x in batch_new]) * len(batch_new)\n storage = elem.storage()._new_shared(numel)\n out = elem.new(storage)\n return torch.stack(batch_new, 0, out=out)\n\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(customize_collate_err_msg.format(elem.dtype))\n # this will go to loop in the last case\n return customize_collate([torch.as_tensor(b) for b in batch])\n elif elem.shape == (): # scalars\n return torch.as_tensor(batch)\n \n elif isinstance(elem, float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(elem, int_classes):\n return torch.tensor(batch)\n elif isinstance(elem, string_classes):\n return batch\n elif isinstance(elem, container_abcs.Mapping):\n return {key: customize_collate([d[key] for d in batch]) for key in elem}\n elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple\n return elem_type(*(customize_collate(samples) \\\n for samples in zip(*batch)))\n elif isinstance(elem, container_abcs.Sequence):\n # check to make sure that the elements in batch have consistent size\n it = iter(batch)\n elem_size = len(next(it))\n if not all(len(elem) == elem_size for elem in it):\n raise RuntimeError('each element in batch should be of equal size')\n \n # zip([[A, B, C], [a, b, c]]) -> [[A, a], [B, b], [C, c]]\n transposed = zip(*batch)\n return [customize_collate(samples) for samples in transposed]\n\n raise TypeError(customize_collate_err_msg.format(elem_type))", "def pad_collate_fn(batch):\n length = [len(sentence) for sentence in batch]\n return pad_sequence([torch.LongTensor(s) for s in batch], batch_first=True), torch.LongTensor(length)", "def _collate_fn(batch):\n def _pad(seqs, dtype=torch.float32):\n \"\"\" Pads a batch of sequences of varying seq_len. \"\"\"\n assert len(seqs) > 0 and all(x.shape[1:] == seqs[0].shape[1:] for x in seqs)\n lens = torch.LongTensor([len(x) for x in seqs])\n max_seq_len = torch.max(lens)\n\n # padded_seq_dims: (batch, max_seq_len, ...).\n padded_seq_dims = (len(seqs), max_seq_len,) + seqs[0].shape[1:]\n res = torch.zeros(padded_seq_dims, dtype=dtype)\n for i, seq in enumerate(seqs):\n src_len = lens[i]\n res[i, :src_len] = torch.Tensor(seq)\n return res, lens\n\n assert all(len(x) == 2 for x in batch)\n # (1, batch, (seq_len, 68, 3))\n frames, captions = zip(*batch)\n\n # Merge sequences (from tuple of 1D tensor to 2D tensor)\n # (batch, seq_len, ...)\n src_seqs, src_lens = _pad(frames, dtype=torch.float32)\n tgt_seqs, tgt_lens = _pad(captions, dtype=torch.long)\n return src_seqs, src_lens, tgt_seqs, tgt_lens", "def __init__(self, *args, **kwargs):\n super(AudioDataLoader, self).__init__(*args, **kwargs)\n self.collate_fn = _collate_fn", "def collate_fn(batch):\r\n names, images, annos = zip(*batch)\r\n images = default_collate(images)\r\n return names, images, annos", "def collate_fn(batch):\r\n names, images, annos = zip(*batch)\r\n images = default_collate(images)\r\n return names, images, annos", "def custom_collate_fn(batch):\n images, bboxes, context_indices, labels = zip(*batch)\n # images = (img_1, ..., img_N) each element of size [3, img_H, img_W]\n # bboxes = (bboxes_1, ..., bboxes_N) each element of size [n_bboxes_in_image, 4]\n # context_indices = (ci_1, ..., ci_N) each element of size [n_bboxes_in_image, 2*context_size]\n # labels = (labels_1, ..., labels_N) each element of size [n_bboxes_in_image]\n \n images = torch.stack(images, 0)\n \n bboxes_with_batch_index = []\n observed_bboxes = 0\n for i, bbox in enumerate(bboxes):\n batch_indices = torch.Tensor([i]*bbox.shape[0]).view(-1,1)\n bboxes_with_batch_index.append(torch.cat((batch_indices, bbox), dim=1))\n context_indices[i][context_indices[i] != -1] += observed_bboxes\n observed_bboxes += bbox.shape[0]\n bboxes_with_batch_index = torch.cat(bboxes_with_batch_index)\n context_indices = torch.cat(context_indices)\n \n labels = torch.cat(labels)\n \n return images, bboxes_with_batch_index, context_indices, labels", "def collate_fn(self, batch):\r\n batch = list(map(torch.stack, zip(*batch)))\r\n max_seq_len = torch.max(torch.sum(batch[1], 1)).item()\r\n for i in range(len(batch) - 1):\r\n if batch[i].size()[1] > max_seq_len:\r\n batch[i] = batch[i][:, :max_seq_len]\r\n if self.truncate_label:\r\n batch[-1] = batch[-1][:, :max_seq_len]\r\n return batch", "def collate_fn(self, image_column_names: Optional[List] = None, per_gpu_batch_size: Optional[int] = None) -> Dict:\n fn = {}\n if self.requires_column_info:\n return NotImplementedError(\n f\"requires_column_info={self.requires_column_info} not implemented for OVD tasks.\"\n )\n\n fn.update(\n {\n self.image_key: PadCollator(pad_val=0),\n self.prompt_key: ListCollator(),\n self.image_meta_key: ListCollator(),\n }\n )\n return fn", "def dynamic_padding_collate_fn(batch_list):\n batch_uncollated = [[] for i in range(3)]\n\n for features in batch_list:\n length = features[1].sum().item()\n for i, feature in enumerate(features):\n batch_uncollated[i].append(feature[:length])\n\n batch_collated = []\n for batch in batch_uncollated:\n batch_collated.append(pad_sequence(batch, batch_first=True))\n\n return batch_collated", "def custom_collate_fn(data):\n features, labels = zip(*data)\n return pack_sequence(features, enforce_sorted=False), torch.tensor(labels)", "def customize_collate_from_batch(batch):\n\n elem = batch[0]\n elem_type = type(elem)\n if isinstance(elem, torch.Tensor):\n batch_new = pad_sequence(batch) \n out = None\n if torch.utils.data.get_worker_info() is not None:\n numel = max([x.numel() for x in batch_new]) * len(batch_new)\n storage = elem.storage()._new_shared(numel)\n out = elem.new(storage)\n # here is the difference\n return torch.cat(batch_new, 0, out=out)\n\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(customize_collate_err_msg.format(elem.dtype))\n return customize_collate_from_batch(\n [torch.as_tensor(b) for b in batch])\n elif elem.shape == (): # scalars\n return torch.as_tensor(batch)\n elif isinstance(elem, float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(elem, int_classes):\n return torch.tensor(batch)\n elif isinstance(elem, string_classes):\n return batch\n elif isinstance(elem, tuple):\n # concatenate two tuples\n tmp = elem\n for tmp_elem in batch[1:]:\n tmp += tmp_elem \n return tmp\n elif isinstance(elem, container_abcs.Sequence):\n it = iter(batch)\n elem_size = len(next(it))\n if not all(len(elem) == elem_size for elem in it):\n raise RuntimeError('each element in batch should be of equal size')\n transposed = zip(*batch)\n return [customize_collate_from_batch(samples) for samples in transposed]\n\n raise TypeError(customize_collate_err_msg.format(elem_type))", "def collate_fn(batch):\n text = [item[0] for item in batch]\n audio = [item[1] for item in batch]\n\n text_lengths = [len(x) for x in text]\n audio_lengths = [len(x) for x in audio]\n\n max_text = max(text_lengths)\n max_audio = max(audio_lengths)\n\n text_batch = np.stack(pad_text(x, max_text) for x in text)\n audio_batch = np.stack(pad_spectrogram(x, max_audio) for x in audio)\n\n return (torch.LongTensor(text_batch),\n torch.FloatTensor(audio_batch).permute(1, 0, 2),\n text_lengths, audio_lengths)", "def __init__(self, config, augment = None, balance=False):\n self.batch_size = config['batch_size']\n self.dataloaders = []\n self.num_dl = 0\n self.shuffle = config['shuffle']\n self.data_mode = config['slip_filter']\n self.create_eval_data = config['eval_data']\n self.eval_len = 0\n self.transform_type = config['data_transform']['type'] if 'data_transform' in config else None\n self.series_len = config['series_len']\n self.config = config\n self.augment = augment\n self.balance_data = balance\n\n if self.transform_type:\n assert self.transform_type == 'standard' or self.transform_type == 'minmax'", "def collate(\n batch,\n config,\n plate,\n base_directory=\"../..\",\n column=None,\n munge=False,\n csv_dir=\"analysis\",\n aws_remote=None,\n aggregate_only=False,\n tmp_dir=\"/tmp\",\n overwrite=False,\n add_image_features=True,\n image_feature_categories=[\"Granularity\", \"Texture\", \"ImageQuality\", \"Threshold\"],\n printtoscreen=True,\n):\n\n from pycytominer.cyto_utils.cells import SingleCells\n\n # Set up directories (these need to be abspaths to keep from confusing makedirs later)\n input_dir = pathlib.Path(f\"{base_directory}/analysis/{batch}/{plate}/{csv_dir}\")\n backend_dir = pathlib.Path(f\"{base_directory}/backend/{batch}/{plate}\")\n cache_backend_dir = pathlib.Path(f\"{tmp_dir}/backend/{batch}/{plate}\")\n\n aggregated_file = pathlib.Path(f\"{backend_dir}/{plate}.csv\")\n backend_file = pathlib.Path(f\"{backend_dir}/{plate}.sqlite\")\n cache_backend_file = pathlib.Path(f\"{cache_backend_dir}/{plate}.sqlite\")\n\n if not aggregate_only:\n if os.path.exists(cache_backend_file):\n if not overwrite:\n sys.exit(\n f\"An SQLite file for {plate} already exists at {cache_backend_file} and overwrite is set to False. Terminating.\"\n )\n else:\n os.remove(cache_backend_file)\n\n for eachdir in [input_dir, backend_dir, cache_backend_dir]:\n if not os.path.exists(eachdir):\n os.makedirs(eachdir, exist_ok=True)\n\n if aws_remote:\n remote_input_dir = f\"{aws_remote}/analysis/{batch}/{plate}/{csv_dir}\"\n\n remote_backend_file = f\"{aws_remote}/backend/{batch}/{plate}/{plate}.sqlite\"\n\n remote_aggregated_file = f\"{aws_remote}/backend/{batch}/{plate}/{plate}.csv\"\n\n sync_cmd = f\"aws s3 sync --exclude * --include */Cells.csv --include */Nuclei.csv --include */Cytoplasm.csv --include */Image.csv {remote_input_dir} {input_dir}\"\n if printtoscreen:\n print(f\"Downloading CSVs from {remote_input_dir} to {input_dir}\")\n run_check_errors(sync_cmd)\n\n if printtoscreen:\n print(f\"Ingesting {input_dir}\")\n # Run cytominer-database ingest\n if munge:\n cytominer_database.munge.munge(config_path=config, source=input_dir)\n\n cytominer_database.ingest.seed(\n source=input_dir,\n target=f\"sqlite:///{cache_backend_file}\",\n config_file=config,\n )\n\n # Create a sqlite3 connection\n with sqlite3.connect(cache_backend_file, isolation_level=None) as connection:\n cursor = connection.cursor()\n if column:\n if print:\n print(f\"Adding a Metadata_Plate column based on column {column}\")\n cursor.execute(\"ALTER TABLE Image ADD COLUMN Metadata_Plate TEXT;\")\n cursor.execute(f\"UPDATE image SET Metadata_Plate ={column};\")\n\n if printtoscreen:\n print(f\"Indexing database {cache_backend_file}\")\n cursor.execute(\n \"CREATE INDEX IF NOT EXISTS table_image_idx ON Image(TableNumber, ImageNumber);\"\n )\n for eachcompartment in [\"Cells\", \"Cytoplasm\", \"Nuclei\"]:\n cursor.execute(\n f\"\"\"CREATE INDEX IF NOT EXISTS table_image_object_{eachcompartment.lower()}_idx \n ON {eachcompartment}(TableNumber, ImageNumber, ObjectNumber);\"\"\"\n )\n cursor.execute(\n \"CREATE INDEX IF NOT EXISTS plate_well_image_idx ON Image(Metadata_Plate, Metadata_Well);\"\n )\n cursor.close()\n connection.close()\n\n if aws_remote:\n if printtoscreen:\n print(f\"Uploading {cache_backend_file} to {remote_backend_file}\")\n cp_cmd = [\"aws\", \"s3\", \"cp\", cache_backend_file, remote_backend_file]\n run_check_errors(cp_cmd)\n\n if printtoscreen:\n print(\n f\"Removing analysis files from {input_dir} and {cache_backend_dir}\"\n )\n import shutil\n\n shutil.rmtree(input_dir)\n\n if printtoscreen:\n print(f\"Renaming {cache_backend_file} to {backend_file}\")\n os.rename(cache_backend_file, backend_file)\n\n if printtoscreen:\n print(f\"Aggregating sqlite:///{backend_file}\")\n\n if aggregate_only and aws_remote:\n remote_backend_file = f\"{aws_remote}/backend/{batch}/{plate}/{plate}.sqlite\"\n\n remote_aggregated_file = f\"{aws_remote}/backend/{batch}/{plate}/{plate}.csv\"\n\n cp_cmd = [\"aws\", \"s3\", \"cp\", remote_backend_file, backend_file]\n if printtoscreen:\n print(\n f\"Downloading SQLite files from {remote_backend_file} to {backend_file}\"\n )\n run_check_errors(cp_cmd)\n\n if not os.path.exists(backend_file):\n sys.exit(f\"{backend_file} does not exist. Exiting.\")\n\n if add_image_features:\n pass\n else:\n image_feature_categories = None # defensive but not sure what will happen if we give a list but set to False\n\n database = SingleCells(\n f\"sqlite:///{backend_file}\",\n aggregation_operation=\"mean\",\n add_image_features=add_image_features,\n image_feature_categories=image_feature_categories,\n )\n database.aggregate_profiles(output_file=aggregated_file)\n\n if aws_remote:\n if printtoscreen:\n print(f\"Uploading {aggregated_file} to {remote_aggregated_file}\")\n csv_cp_cmd = [\"aws\", \"s3\", \"cp\", aggregated_file, remote_aggregated_file]\n run_check_errors(csv_cp_cmd)\n\n if printtoscreen:\n print(f\"Removing backend files from {backend_dir}\")\n import shutil\n\n shutil.rmtree(backend_dir)", "def list_data_collate(batch: Sequence):\n elem = batch[0]\n data = [i for k in batch for i in k] if isinstance(elem, list) else batch\n key = None\n try:\n if config.USE_META_DICT:\n data = pickle_operations(data) # bc 0.9.0\n if isinstance(elem, Mapping):\n ret = {}\n for k in elem:\n key = k\n data_for_batch = [d[key] for d in data]\n ret[key] = collate_meta_tensor(data_for_batch)\n else:\n ret = collate_meta_tensor(data)\n return ret\n except RuntimeError as re:\n re_str = str(re)\n if \"equal size\" in re_str:\n if key is not None:\n re_str += f\"\\nCollate error on the key '{key}' of dictionary data.\"\n re_str += (\n \"\\n\\nMONAI hint: if your transforms intentionally create images of different shapes, creating your \"\n + \"`DataLoader` with `collate_fn=pad_list_data_collate` might solve this problem (check its \"\n + \"documentation).\"\n )\n _ = dev_collate(data)\n raise RuntimeError(re_str) from re\n except TypeError as re:\n re_str = str(re)\n if \"numpy\" in re_str and \"Tensor\" in re_str:\n if key is not None:\n re_str += f\"\\nCollate error on the key '{key}' of dictionary data.\"\n re_str += (\n \"\\n\\nMONAI hint: if your transforms intentionally create mixtures of torch Tensor and numpy ndarray, \"\n + \"creating your `DataLoader` with `collate_fn=pad_list_data_collate` might solve this problem \"\n + \"(check its documentation).\"\n )\n _ = dev_collate(data)\n raise TypeError(re_str) from re", "def initialize(config = None):\n\n # initialize the cache\n cache.initialize(config)\n\n # initialize the translator\n translator.initialize(config)", "def collate_batch(self) -> Dict[str, Any]:\n pass", "def collate_fn(batch):\r\n transposed = zip(*batch)\r\n lbd = lambda batch:torch.cat([torch.from_numpy(b).long() for b in batch])\r\n return [lbd(samples) for samples in transposed]", "def initialize(self, embedding_fn: EmbeddingFn, inputs: Optional[torch.Tensor], sequence_length: Optional[torch.LongTensor]) ->HelperInitTuple:\n raise NotImplementedError", "def _init_pipeline(self, cfg: ConfigType) -> Callable:", "def _var_len_collate_fn(batch):\n def func(p):\n return p[0].size(1)\n\n longest_sample = max(batch, key=func)[0]\n freq_size = longest_sample.size(2)\n minibatch_size = len(batch)\n max_seqlength = longest_sample.size(1)\n inputs = torch.zeros(minibatch_size, 1, max_seqlength, freq_size)\n targets = []\n seq_lengths = []\n for x in range(minibatch_size):\n sample = batch[x]\n tensor = sample[0]\n target = sample[1]\n seq_length = tensor.size(1)\n seq_lengths.append(seq_length)\n inputs[x].narrow(1, 0, seq_length).copy_(tensor)\n targets.append(target)\n targets = torch.LongTensor(targets)\n return seq_lengths, inputs, targets" ]
[ "0.6244159", "0.599894", "0.5865807", "0.5771731", "0.56962913", "0.5471204", "0.5424939", "0.5283143", "0.52531415", "0.52518207", "0.5231545", "0.51880026", "0.5077874", "0.5077874", "0.49971217", "0.49632436", "0.49623293", "0.4961835", "0.4936827", "0.49166656", "0.48937365", "0.48364875", "0.48214546", "0.48180023", "0.47950238", "0.4794209", "0.47760612", "0.4770956", "0.4749742", "0.4734129" ]
0.7657155
0
List models defined in metafile of corresponding packages.
def list_models(scope: Optional[str] = None, patterns: str = r'.*'): matched_models = [] if scope is None: default_scope = DefaultScope.get_current_instance() assert default_scope is not None, ( 'scope should be initialized if you want ' 'to load config from metafile.') assert scope in MODULE2PACKAGE, ( f'{scope} not in {MODULE2PACKAGE}!, please make pass a valid ' 'scope.') root_or_mim_dir = BaseInferencer._get_repo_or_mim_dir(scope) for model_cfg in BaseInferencer._get_models_from_metafile( root_or_mim_dir): model_name = [model_cfg['Name']] model_name.extend(model_cfg.get('Alias', [])) for name in model_name: if re.match(patterns, name) is not None: matched_models.append(name) output_str = '' for name in matched_models: output_str += f'model_name: {name}\n' print_log(output_str, logger='current') return matched_models
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_models_from_metafile(dir: str):\n meta_indexes = load(osp.join(dir, 'model-index.yml'))\n for meta_path in meta_indexes['Import']:\n # meta_path example: mmcls/.mim/configs/conformer/metafile.yml\n meta_path = osp.join(dir, meta_path)\n metainfo = load(meta_path)\n yield from metainfo['Models']", "def get_models():\n all_models = gfile.Glob(os.path.join(MODELS_DIR, '*.meta'))\n model_filenames = [os.path.basename(m) for m in all_models]\n model_numbers_names = sorted([\n (shipname.detect_model_num(m), shipname.detect_model_name(m))\n for m in model_filenames])\n return model_numbers_names", "def get_model_list():\n with open(os.path.join(MODELS_FOLDER, \"models.json\"), \"r\") as model_file:\n model_list = json.load(model_file)\n return model_list", "def peak_all_models(self) -> List:\n models = list(self.meta.name)\n print(models)\n return models", "def list_models(self, fields=['model_name']):\n\t\tmodels = dbop.get_models(self, model_type='all', fields=fields)\n\t\treturn models", "def models(self):\n return self.config.models()", "def models(self):\n models = []\n for bundle in self.bundles.values():\n models.extend(list(bundle.models.values()))\n\n return models", "def ListModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def list_models():\n\tclsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n\tverif = lambda cls: 'Verified: {0}'.format(cls[1]().verified)\n\tfit_models = [ (cls[0], verif(cls)) for cls in clsmembers if cls[1].__bases__[0] == core.FitModel ]\n\treturn fit_models", "def models():\n return list(alg2module.keys())", "def list_supported_models() -> Sequence[str]:\r\n return list(_MODELS)", "def get_models(self, app_name):\n try:\n models = list(apps.get_app_config(app_name).get_models())\n return models\n except:\n raise LookupError(f\"this is no such app {app_name}\")", "def list_models(SortBy=None, SortOrder=None, NextToken=None, MaxResults=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None):\n pass", "def models() -> list[str]:\n return list(models_url.keys())", "def get_data_models(models_file):\n list_models = []\n model = []\n pos_numeric = [] # Position of numeric fields\n info_keys = [] # Info. about keys\n re_field = re.compile('\\s+\\w+\\s*=\\s*models\\.') # Line with field name\n re_class = re.compile('\\s+class ') # For Admin and Meta\n re_def = re.compile('\\s+def ')\n is_new_model = False\n\n for line in open(models_file):\n # The models start with 'class'\n if not is_new_model and line.startswith('class'):\n model_name = line.replace('class','').split('(')[0].strip()\n model.append(model_name)\n is_new_model = True\n elif is_new_model:\n if re_field.match(line):\n field_name = line.split('=')[0].strip()\n model.append(field_name)\n\n if 'models.DecimalField' in line or 'models.IntegerField' in line:\n pos_numeric.append(len(model)-2) # Discard model name.\n elif 'models.ForeignKey' in line:\n key_name = line.split('(')[-1].strip().strip(')')\n position = len(model)-2 # Discard model name.\n info_keys.append(':')\n info_keys.append(str(position) + ',')\n info_keys.append(key_name)\n # It is supposed that models in localization has at the end:\n # ('class Meta', 'class Admin', or some 'def')\n elif re_class.match(line) or re_def.match(line):\n if pos_numeric:\n pos_num2str = '#'\n for num in pos_numeric:\n pos_num2str += str(num)\n model.append(pos_num2str)\n model.append(':N') # To detect the numeric field.\n pos_numeric = []\n if info_keys:\n all_keys = \"\"\n for key in info_keys:\n all_keys += key\n model.append(all_keys)\n model.append(':K') # To detect fastly some key.\n info_keys = []\n list_models.append(model)\n model = []\n is_new_model = False\n\n return list_models", "def availablemodels(self):\n return self.__models.keys()", "def list_cmd(ctx):\n client = ctx.obj['CLIENT']\n models = client.list_models()\n\n x = PrettyTable()\n x.field_names = [\"Name\",\"Tag\",\"Created\"]\n for m in models:\n x.add_row([m[\"name\"],m[\"tag\"],m[\"uploaded_at\"]])\n print(x)", "def get_available_models():\n modelpath = os.path.join(os.path.dirname(__file__), \"train\", \"model\")\n models = sorted(item.name.replace(\".py\", \"\").replace(\"_\", \"-\")\n for item in os.scandir(modelpath)\n if not item.name.startswith(\"_\")\n and item.name.endswith(\".py\"))\n return models", "def list_model_packages(CreationTimeAfter=None, CreationTimeBefore=None, MaxResults=None, NameContains=None, NextToken=None, SortBy=None, SortOrder=None):\n pass", "def _get_model_list(self):\n model_filelist = os.listdir(os.path.join(self.pack_file_path, self.model_dir))\n for i in model_filelist:\n model_file = os.path.join(self.model_dir, i)\n self.filelist.append(model_file)", "def get_models(self):\n return [Doc(system_object) for system_object in self._get_documents()]", "def _get_models(self, req, is_detail):\n context = req.environ['meteos.context']\n\n search_opts = {}\n search_opts.update(req.GET)\n\n # Remove keys that are not related to model attrs\n search_opts.pop('limit', None)\n search_opts.pop('offset', None)\n sort_key = search_opts.pop('sort_key', 'created_at')\n sort_dir = search_opts.pop('sort_dir', 'desc')\n\n models = self.engine_api.get_all_models(\n context, search_opts=search_opts, sort_key=sort_key,\n sort_dir=sort_dir)\n\n limited_list = common.limited(models, req)\n\n if is_detail:\n models = self._view_builder.detail_list(req, limited_list)\n else:\n models = self._view_builder.summary_list(req, limited_list)\n return models", "def _get_model_list(self):\n self.model_arch_file = os.path.join(self.model_dir, self.model_name + '.prototxt')\n self.model_weight_file = os.path.join(self.model_dir, self.model_name + '.caffemodel')\n self.filelist.append(self.model_arch_file)\n self.filelist.append(self.model_weight_file)", "def models(self):\r\n return self.get_field('model')", "def models(self):\r\n return self.get_field('model')", "def get_object_models(self):\n parser = WorldParser(self.world_fpath)\n return parser.models", "def list_dashdb_models(self, fields=['model_name']):\n\t\tmodels = dbop.get_models(self, model_type='DashDB In-database Model', fields=fields)\n\t\treturn models", "def get_models(self):\n self.load()\n return self._models", "def index(self, req):\n return self._get_models(req, is_detail=False)", "def generate_model_list():\n\t\n\tmodels = [\n\t\tapi.v1.models.job.Job,\n\t]\n\treturn models" ]
[ "0.72860724", "0.6853126", "0.6804148", "0.6785551", "0.6757944", "0.66406256", "0.6595081", "0.65213096", "0.6515239", "0.64433444", "0.6440567", "0.6412613", "0.6402561", "0.63477504", "0.63247734", "0.63229364", "0.6300605", "0.6296517", "0.6291255", "0.6263", "0.62552387", "0.6249753", "0.6240447", "0.6235324", "0.6235324", "0.62267435", "0.6181433", "0.6172824", "0.61347264", "0.6113558" ]
0.70225334
1
By this save method we create user and make a relationship with contact model and secoud depend on conatc type.
def save(self,commit=True): instance = super(ClientSignupForm, self).save(commit=False) if commit: instance.username = self.cleaned_data['email'] instance.is_active = False instance.save() contact = Client.objects.create(first_name=self.cleaned_data['first_name'],last_name=self.cleaned_data['last_name'], user_role=self.cleaned_data['user_role'], department=self.cleaned_data['department'], user=instance) return instance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_model(self, request, obj, form, change):\n if not change:\n if form.is_valid():\n user = form.save()\n user.identity = Users.SUPERVISOR\n user.set_password(form.data.get('password'))\n user.iCode = InviteCls.encode_invite_code(user.id)\n user.save()\n UserExtra.objects.create(uid=user)\n UserBase.objects.create(\n uid=user,\n phone=user.username\n )\n UserBusiness.objects.create(uid=user)\n else:\n super().save_model(request, obj, form, change)", "def save_model(self, request, obj, form, change):\n if not change:\n if form.is_valid():\n user = form.save()\n user.identity = Users.SALESMAN\n user.set_password(form.data.get('password'))\n user.iCode = InviteCls.encode_invite_code(user.id)\n user.save()\n UserExtra.objects.create(uid=user)\n UserBase.objects.create(\n uid=user,\n phone=user.username\n )\n leader = Team.objects.get(id=form.data.get('team')).leader\n inviter_queryset = InviteRelationManager.objects.filter(invitee=leader)\n if inviter_queryset.exists():\n inviter_obj = inviter_queryset.first()\n superior = f'{inviter_obj.superior}|{leader.id}'\n else:\n superior = f'{leader.id}'\n InviteRelationManager.objects.create(inviter=leader, invitee=user, level=1, superior=superior)\n UserBusiness.objects.create(uid=user)\n super().save_model(request, obj, form, change)", "def save(self, commit=True):\n user = super(UserCreationForm, self).save(commit=False)\n user.set_password(self.cleaned_data['password1'])\n\n user.save()\n\n # Making user profile and assigning to CPCESU\n # CPCESU\n #group = Organization.objects.get(name='Colorado Plateau')\n\n # New profile with group\n profile = UserProfile(user=user, first_name=self.cleaned_data.get('first_name'),\n last_name=self.cleaned_data.get('last_name'))\n profile.save()\n\n return user", "def users_create():", "def create(self,contact: Contact) -> bool:\n try:\n contact_new=ContactSet(name=contact.name,birthdate=contact.birthdate\n ,contact_type=contact.contact_type, description=contact.description, phone=contact.phone)\n db.session.add(contact_new)\n db.session.commit()\n return True\n except Exception as ex:\n app.logger.error('Error creating a new Contact. {}'.format(ex))\n return False", "def save_model(self, request, obj, form, change):\n obj.propietario = request.user\n obj.save()", "def save_model(self, request, obj, form, change):\n if not change:\n obj.creator = request.user\n obj.save()", "def forwards(self, orm):\n \n location_type = orm['mooi.Location']\n if len(location_type.objects.all()) == 0:\n l = location_type(city=\"Lethbridge\", country=\"Canada\")\n l.save()\n else:\n l = location_type.objects.all()[0]\n \n \n user_type = orm['auth.User']\n user = user_type(username=self.suUsername, email=self.suEmail, password=self.suPassword)\n user.first_name = self.suFirstName\n user.last_name = self.suLastName\n user.is_superuser = True\n user.save()\n \n profile_type = orm['mooi.Profile']\n userProfile = profile_type(user=user, location=l, phone=self.suPhone)\n userProfile.location = l\n userProfile.phone = self.suPhone\n userProfile.save()", "def perform_create(self, serializer):\n serializer.save(using=UserConf.db)", "def commit(self):\n\t\t#firstly, get all variables and values of this model\n\t\tcontent = self.__dict__.copy() \n\t\t#if '_rev' is one of the variables of this model instance,\n\t\t#it means this user is retrived from database. \n\t\t#We are actually going to update the model document in database\n\t\t#instead of creating a new user document.\n\t\tres = dbop.update_create_user_in_database(self._id, content) \n\t\tself._id = res['id']\n\t\tself._rev = res['rev']", "def save(self)->None:\n database.cursor.execute(\n \"INSERT INTO users(firstname,lastname,othernames,email,phone,username,password,role) VALUES (%s,%s,%s,%s,%s,%s,%s,%s) RETURNING id\", (\n self.first_name,\n self.last_name,\n self.other_name,\n self.email,\n self.phone_number,\n self.user_name,\n self.password,\n self.is_admin\n ))\n super().save()", "def _create_user(self, email, mobile_number, password, **extra_fields):\n\n print('model number')\n print(mobile_number)\n \n user = self.model(email=email,mobile_number = mobile_number, **extra_fields)\n user.set_password(password)\n \n user.save(using=self._db)\n return user", "def save_user_ref(sender, created, instance, **_):\n if created:\n UserExtend.objects.create(user=instance)\n UserSettings.objects.create(user=instance)", "def save_user(self):\n db.session.add(self)\n db.session.commit()", "def create_user(self, phone_number, type, password, is_staff):\n return self.__create_user(phone_number, type, password, is_staff, False, False)", "def add_contact_to_db(self):\n self.init_db(self._testing)\n\n # make sure that the object is not in the db\n assert self.uid == \"\"\n\n self._insert_row_into_db(Contact.table_name, Contact.columns, self.values)\n\n # update this objects uid\n self.uid = self._get_id_of_last_row(Contact.table_name)", "def save(self, commit=True):\n\t\tprint('save django.user ')\n\t\tprint(self.cleaned_data)\n\t\tusr = User.objects.create_user(self.cleaned_data['username'], self.cleaned_data['email'], self.cleaned_data['pass1'])\n\t\tkuser = users.models.KUser()\n\t\tkuser.user = usr\n\t\tkuser.realName = self.cleaned_data['realName']\n\t\tkuser.job = self.cleaned_data['job']\n\t\tkuser.privilege = self.cleaned_data['privilege']\n\t\tkuser.employeeId = self.cleaned_data['employeeId']\n\t\tkuser.isManager = self.cleaned_data['isManager']\n\t\tkuser.gender = self.cleaned_data['gender']\n\t\tprint('create kuser:')\n\t\tprint(kuser)\n\n\t\tif commit:\n\t\t\tkuser.save()\n\t\treturn kuser", "def on_user_create(self, user):", "def perform_create(self,serializer):\n serializer.save(user_profile=self.request.user)", "def perform_create(self,serializer):\n serializer.save(user_profile=self.request.user)", "def createProfile(self):\n if self.profile:\n return\n from soc.modules.gsoc.models.profile import GSoCProfile\n user = self.createUser()\n properties = {'link_id': user.link_id, 'student_info': None, 'user': user,\n 'parent': user, 'scope': self.program, 'status': 'active'}\n self.profile = seeder_logic.seed(GSoCProfile, properties)", "def save_to_db(self):\n # update\n if self.user_db:\n self.db.session.query(UserDB).filter(UserDB.login == self.params['login']).\\\n update({'access_token': self.params['access_token'],\n# 'social_net': self.params['social_net'] or 'social_net',\n 'profile_url': self.params.get('profile_url', None),\n 'fio': self.params.get('name', None),\n 'email': self.params.get('email', None)},\n synchronize_session='fetch')\n self.db.commit()\n log.debug('Updated social user: %s', self.params['login'],)\n # create\n else:\n user = UserDB(self.params['login'],\n self.params['email'],\n fio = self.params['name'],\n avatar = '',\n access_token = self.params['access_token'],\n social_net = self.params['social_net'],\n profile_url = self.params['link']\n )\n\n self.db.create(user)\n log.debug('Social user <%s> created', self.params['login'])\n return {'success': True}", "def perform_create(self, serializer): # this method runs everytime a POST method is called\n serializer.save(user_profile=self.request.user)", "def save(self, *args, **kwargs):\n create = self.id is None\n if create:\n self.username = u'lc_' + self.city.lower()\n # Give the user a randomly generated password\n password = generate_random_string()\n self.password = password\n # Compose the email to send to the LC\n context = {\n 'lc': self,\n 'password': password,\n }\n subject = render_to_string(\n 'registration/lc_register_email_subject.txt')\n subject = subject.strip()\n message = render_to_string(\n 'registration/lc_register_email_text.txt',\n context)\n send_mail(subject,\n message,\n settings.DEFAULT_FROM_EMAIL,\n [self.email])\n # Delegate upward to the parent classes\n super(LocalCommittee, self).save(*args, **kwargs)", "def save(self):\n data = self.cleaned_data\n # Como este metodo no nos sirve para nada por eso tenemos que sacarlo,\n # se saca ya que solo es con el proposito de tener una contraseña con\n # su respectiva confirmacion. el modelo User no tiene ese campo por eso\n # se lo saca con el metodo *pop*\n data.pop('password_confirmation')\n # Los asteriscos lo que hacen es enviar la estructura desvaratada\n user = User.objects.create_user(**data)\n profile = Profile(user=user)\n profile.save()", "def save(self):\n # First save the parent form and get the user.\n new_user = super(SignupFormExtra, self).save()\n\n # Get the profile, the `save` method above creates a profile for each\n # user because it calls the manager method `create_user`.\n # See: https://github.com/django-userena-ce/django-userena-ce/blob/master/userena/managers.py#L65\n profile = new_user.my_profile\n profile.gender = self.cleaned_data['gender']\n profile.education = self.cleaned_data['education']\n profile.birthday = self.cleaned_data['birthday']\n profile.annual_income = self.cleaned_data['annual_income']\n profile.save()\n\n # Userena expects to get the new user from this form, so return the new\n # user.\n return new_user", "def _create(cls, model_class, *args, **kwargs):\n manager = cls._get_manager(model_class)\n # The default would use ``manager.create(*args, **kwargs)``\n return manager.create_user(*args, **kwargs)", "def _create_user(self, telephone,username, password, email=None, **kwargs):\n if not username:\n raise ValueError('请输入用户名')\n if not telephone:\n raise ValueError('请输入手机号')\n if not password:\n raise ValueError('请输入密码')\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(telephone=telephone, username=username, email=email, **kwargs)\n user.set_password(password)\n user.save()\n return user", "def post(self):\r\n return create_user(request)", "def save(self, commit=False):\n mail_result = self.send_email()\n if mail_result:\n self.instance.is_admin_notified = True\n\n contact = super().save(commit=commit)\n\n return contact" ]
[ "0.64130485", "0.6218375", "0.61476517", "0.60348797", "0.6014504", "0.5965818", "0.59625626", "0.5833716", "0.5819595", "0.5818768", "0.5805604", "0.5791148", "0.5760427", "0.575658", "0.5729577", "0.56765777", "0.5676002", "0.5647934", "0.56215864", "0.56215864", "0.56146765", "0.5602065", "0.56014323", "0.55961716", "0.5585647", "0.558099", "0.5552456", "0.5548479", "0.55462354", "0.55437565" ]
0.64635587
0
Must preserve data used at construction. Specifically for default averaging/length adjustments. averaging/length adjustments recalculate the underlying data
def _original_data(self, data: np.ndarray): if self._raw_data is None: self._raw_data = data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_avg(self):\n if self._data_type == 'coords':\n # default averaging is supported only for 'matrix' dataTypes\n return\n elif self._data_type == 'image':\n\n x, y = self._averaging, self._averaging\n\n if (x,y) == (1, 1):\n self.vectors = self._original_data\n # calling original data\n return\n\n tempdat = self._original_data\n range_x = tempdat.shape[0]\n range_y = tempdat.shape[1]\n x_offset = int((x - 1) / 2)\n y_offset = int((y - 1) / 2)\n\n kernel = np.ones(shape=(x, y)) / (x*y)\n\n output_mat = np.zeros_like(tempdat)\n output_mat_x = signal.convolve2d(tempdat[:, :, 0], kernel,\n mode='same', boundary='wrap')\n output_mat_y = signal.convolve2d(tempdat[:, :, 1], kernel,\n mode='same', boundary='wrap')\n\n output_mat[:, :, 0] = output_mat_x\n output_mat[:, :, 1] = output_mat_y\n\n self.vectors = (output_mat[x_offset:range_x-x_offset:x,\n y_offset:range_y-y_offset:y])", "def extend_model_data(self, newdata):\n logger.warning(\"Extend data is untested and may have unexpected consequences\")\n data_temp = newdata.copy()\n data_temp['X'] -= self.origin[0]\n data_temp['Y'] -= self.origin[1]\n data_temp['Z'] -= self.origin[2]\n data_temp['X'] /= self.scale_factor\n data_temp['Y'] /= self.scale_factor\n data_temp['Z'] /= self.scale_factor\n self.data.concat([self.data, data_temp], sort=True)", "def __call__(self,raw):\n\n #replace any \"out of range\" values in T_Arrays by NaN's\n self.cleanup(raw)\n\n #for those variables that are best represented as sums,\n #multiply by ntimes_ave to compensate for pre averaging\n \n #if hasattr(raw,'seeded_shots'):\n # raw.seeded_shots*=self.ntime_ave\n #if hasattr(raw,'shot_count'):\n # raw.shot_count*=self.ntime_ave\n\n if self.post_operator:\n self.post_operator(raw)\n return raw", "def _reset_dimensional_data(self, dataset):\n # local reference to input data\n raw = dataset.get_source_data('prep')\n\n nfids = raw.shape[-2]\n \n nfids = int(nfids/self.set.fids_to_average)\n \n data_shape = list(raw.shape)\n data_shape[-2] = nfids\n\n self.frequency_shift = np.zeros([nfids])\n self.phase_0 = np.zeros([nfids])\n self.measure_time = np.arange(nfids)\n\n self.data = np.zeros(data_shape, dtype=raw.dtype)\n if self.chain is not None:\n self.chain.reset_results_arrays()", "def reBuild(self): # redefine the rebuild method for loss function (polymorphism)\n self.updateRange()\n self.buildLine()\n self.normalize() # normalize loss function to have total area of 1 ", "def update_original_data(self):\n pass", "def apply_parameters(self):\n n_bins = int(self.record_length / self.bin_width)\n time_bins = self.bin_width * np.arange(n_bins)\n \n\n self.tau = np.arange(self.tau_start, self.tau_end+self.tau_delta, self.tau_delta)\n sequence = self.generate_sequence()\n self.n_laser = find_laser_pulses(sequence)\n \n FC.Configure(self.record_length, self.bin_width, self.n_laser)\n\n if self.keep_data and sequence == self.sequence and np.all(time_bins == self.time_bins): # if the sequence and time_bins are the same as previous, keep existing data\n self.old_count_data = self.count_data.copy()\n else:\n self.old_count_data = np.zeros_like(FC.GetData())\n \n self.sequence = sequence\n self.time_bins = time_bins\n self.n_bins = n_bins\n \n self.MW_source = {'mw':mw, 'mw2':mw2}[self.mw_source]", "def rescale(self):\n # forecast on real data, don't need this anymore\n pass", "def rescale_data(self):\n\n # Dividing every array of simulated data vectors by the mean of that array.\n '''# Didnt work\n for key in self.data.keys():\n self.data[key] /= np.mean(self.data[key])\n '''\n\n self.rescaled = True\n\n # Mean normalization\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.mean(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Median normalization\n \"\"\" didnt work, still dividing by large number \n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Divide by median\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.median(self.data[key]))\n \"\"\"\n\n # Take logarithm of data\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] = np.log10(self.data[key])\n \"\"\"\n\n # Scale by length of vector\n \"\"\"\n for key in self.data.keys():\n self.data[key] /= np.linalg.norm(self.Cl_noiseless)\n \"\"\"\n\n \n # Scale by negative of the natural logarithm \n for key in self.data.keys():\n self.data[key] = -1 * np.log(self.data[key]) \n \n \"\"\"\n # Scale by subtracting the mean and dividing by std\n std = np.nanstd(self.data['data'])\n mean = np.nanmean(self.data['data'])\n for key in self.data.keys():\n # self.data[key] -= np.log(self.Cl_noiseless) # -1* # scale this same way\n # self.data[key] -= self.Cl_noiseless # -1* # scale this same way\n self.data[key] -= mean \n self.data[key] /= std\n \"\"\"", "def __init__(self):\n self.last_obs = -1.0\n self.last_timestamp = -1.0\n self._fitted = False", "def _update_data(self):\n for attribute in [\"flow_rate\"]:\n self._data[attribute] = self._connection.measure", "def evolution_pre_allocation(self, dataset_length):\n #it will store the intial basis too, thus +1 on the dataset length \n self.basis_history=[[[np.zeros(self.basis[layer][sublayer].shape) for recording in range(dataset_length+1)] for sublayer in range(2**layer)] for layer in range(self.layers)]\n for layer in range(self.layers):\n for sublayer in range(2**layer):\n self.basis_history[layer][sublayer][0]=self.basis[layer][sublayer]\n self.noise_history=[[[0 for recording in range(dataset_length)] for sublayer in range(2**layer)] for layer in range(self.layers)]\n self.sparsity_history=[[[0 for recording in range(dataset_length)] for sublayer in range(2**layer)] for layer in range(self.layers)]\n self.sensitivity_history=[[[0 for recording in range(dataset_length)] for sublayer in range(2**layer)] for layer in range(self.layers)]\n self.learning_history=[[[0 for recording in range(dataset_length)] for sublayer in range(2**layer)] for layer in range(self.layers)]\n self.basis_distance=[[[np.zeros(len(self.basis[layer][sublayer])) for recording in range(dataset_length)] for sublayer in range(2**layer)] for layer in range(self.layers)]", "def normalize_dataset(self):", "def augment_train_data(self):\n # do not augment on evaluation dataset\n original_len = len(self.data_train)\n for i in range(len(self.data_train)):\n if i % 100 == 0:\n print(f\"Augmenting train data, progress: {i} / {original_len}\")\n title = self.data_train[i][\"Title\"]\n abstract = self.data_train[i][\"Abstract\"]\n label = self.data_train[i][\"Label\"]\n\n title = self.augmenter.augment(title)\n abstract = self.augmenter.augment(abstract)\n\n self.data_train.append({\"Title\": title, \"Abstract\": abstract, \"Label\": label})\n print(f\"Train data amount after augmenting: {len(self.data_train)}\")", "def _numberOfPoints_changed(self):\n self.reinitialiseData()", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.mean_\n del self.var_", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_", "def _reset(self):\n\n # Checking one attribute is enough, because they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.n_samples_seen_\n del self.mean_\n del self.var_", "def reinit(self):\n self.data_updating = {}\n self.reinitialization = True\n # force the bounds to be defined again\n self.bounds = None", "def norm_data(self):\n if (self.nrows, self.ncolumns) < self.data.shape:\n self.data = self.data[0:self.nrows, 0:self.ncolumns]\n if self.data.dtype != np.float64:\n self.data = self.data.astype(np.float64)\n self.meanval = self.data.mean()\n self.stdval = self.data.std()", "def extend(self,data):\n n = float(len(data))\n if n == 0:\n return self\n M2 = 0\n M3 = 0\n M4 = 0\n mean = 0\n vmin = None\n vmax = None\n for x in data:\n mean += x/n \n if vmin is None:\n vmax = x\n vmin = x\n if x < vmin:\n vmin = x\n if x > vmax:\n vmax = x\n for x in data:\n d = x-mean\n M2 += (d**2)\n M3 += (d**3)\n M4 += (d**4)\n x = LiveStat(self.name)\n x.vmin = vmin\n x.vmax = vmax\n x.vmean = mean\n x.vm2 = M2\n x.vm3 = M3\n x.vm4 = M4\n x.vcount = int(n)\n x.vcountsq = x.vcount**2\n x.dirty = True\n self.merge(x)\n return self", "def _format_data(self, data: np.ndarray) -> np.ndarray:\n if self._validate:\n if len(data.shape) <= self._axis:\n raise DataProcessorError(\n f\"Cannot average the {len(data.shape)} dimensional \"\n f\"array along axis {self._axis}.\"\n )\n\n return data", "def __init__(self):\n self.slope = -1.0\n self.last_obs = -1.0\n self.last_obs_ind = -1\n self._fitted = False", "def __apply_accumulators():\n self.__xdata = np.array([])\n self.__ydata = np.array([])\n for acc in self.signal_accumulators:\n self.__xdata = __array_append(self.__xdata,acc.attempt)\n self.__ydata = __array_append(self.__ydata,acc.count)\n self.__applied = True", "def calculateDataRate(self):\n pass", "def calculate(self):\n self.results['max'] = numpy.max(self.data)\n self.results['min'] = numpy.min(self.data)\n if self.type == 0:\n self.group_discrete_data()\n if self.type == 1:\n self.group_continuous_data()\n\n self.results['arithAvg'] = self.average([self.data[i] * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences)\n self.results['quadAvg'] = math.sqrt(\n self.average([(self.data[i] * self.data[i]) * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences))\n if self.results['min'] > 0:\n self.results['geoAvg'] = math.exp(\n self.average([numpy.log(self.data[i]) * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences))\n self.results['harmAvg'] = 1 / self.average(\n [(self.occurrences[i] / self.data[i]) for i in range(len(self.data))],\n self.totalOccurrences)\n else:\n self.results['geoAvg'] = self.results['harmAvg'] = \"N/A\"\n self.results['momentsR'] = self.moments(self.data, self.occurrences, 4)\n self.results['centralMomentsR'] = self.moments([(i - self.results['arithAvg']) for i in self.data],\n self.occurrences, 4)\n self.results['std'] = self.average(\n [self.occurrences[i] * abs(self.data[i] - self.results['arithAvg']) for i in range(len(self.data))],\n self.totalOccurrences)", "def recalculate() -> None:\n NotImplemented", "def __init__(self, size):\n self.data = list()\n self.data_len = 0\n self.start_idx = -1\n self.size = size\n self.average = None", "def readdata(self, fname):\n\t\tif not hasattr(self, 'sweepNumber') or not hasattr(self, 'channel'):\n\t\t\tself.sweepNumber=0\n\t\t\tself.channel=0\n\n\t\t# additional meta data\n\t\tself.fileFormat='abf'\n\n\t\tabf=pyabf.ABF(fname)\n\t\tabf.setSweep(sweepNumber=self.sweepNumber, channel=self.channel)\n\t\tscale=self._currentScale(abf)\n\n\t\t# If the Fs attribute doesn't exist set it\n\t\tif not hasattr(self, 'Fs'):\t\n\t\t\tself.Fs=abf.dataRate\n\t\t# else check if it s the same as before\n\t\telse:\n\t\t\tif self.Fs!=abf.dataRate:\n\t\t\t\traise metaTrajIO.SamplingRateChangedError(\"The sampling rate in the data file '{0}' has changed.\".format(f))\n\n\t\treturn abf.sweepY*scale" ]
[ "0.60771775", "0.5992512", "0.5975735", "0.5974339", "0.58970016", "0.5867703", "0.58252496", "0.58230865", "0.58149874", "0.57894266", "0.5773829", "0.5726125", "0.5725895", "0.570828", "0.56946415", "0.56483364", "0.56139916", "0.56139916", "0.5607044", "0.5600587", "0.55963033", "0.55920905", "0.5582612", "0.5579361", "0.55742526", "0.5564162", "0.5557394", "0.55548453", "0.5535021", "0.55293477" ]
0.60240406
1
Fully refresh the underlying visual.
def _refresh(self): self._need_display_update = True self._update()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redraw(self):\n self.vispy_viewer.canvas.update()", "def refresh(self):\n\t\tself.win.refresh()\n\t\tfor c in self.components:\n\t\t\tc.refresh()", "def refresh(self):\n self.getWindow().getDecorView().postInvalidate()", "def update_visualization(self) -> None:\n pass", "def redraw(self):\r\n self.c.update()", "def redraw(self):\n self.vispy_widget.canvas.update()", "def update(self):\n self.redraw()\n self._changed = False", "def update(self):\n self.redraw()\n self._changed = False", "def refresh(self):\n self.Refresh()", "def refresh(self) -> None:\n self.screen.refresh()", "def redraw(self):\n raise NotImplementedError()", "def refresh(self):\n\n self.ax.relim()\n self.ax.autoscale_view()\n self.canvas.draw()", "def update_figure(self):\n\n self.draw()", "def redraw(self) -> None:\n self.canvas.draw_idle()\n self.Refresh()", "def _update(self):\n if self._need_display_update:\n self._need_display_update = False\n\n self._set_view_slice(self.viewer.dims.indices)\n\n if self._need_visual_update:\n self._need_visual_update = False\n self._node.update()", "def update_visualizer(self):\n if self.visualizer:\n if self.frame_count == 2:\n self.visualizer.add_geometry(self.vis_points)\n self.visualizer.update_geometry(self.vis_points)\n self.visualizer.poll_events()\n self.visualizer.update_renderer()\n time.sleep(0.001)\n self.frame_count += 1", "def Refresh(self):\n pass", "def redraw(self):\n self._create()", "def refresh(self):\n self.__refresh()", "def plot_refresh():\n figure.canvas.draw()", "def repaint(self):\n pass", "def refresh_screen(self):", "def refresh_HDV(self):\n self.canvas.draw()\n self.dicom_navigation.parent.dicom_right_window.top_info.canvas_HDV.draw()", "def refresh(self):\n\n for w in self.windows.values():\n w.refresh()", "def redraw_figures(self):\n pass", "def redraw_figures(self):\n pass", "def redraw_viz():\n\tglobal g_last_draw\n\tif (rospy.Time.now().to_sec() > (refresh_rate + g_last_draw)):\n\t\tg_last_draw = rospy.Time.now().to_sec()\n\t\t# redraw imu box\n\t\tdoDraw()", "def redraw(self, **kwargs):\n #src_dict = self.data_sources\n #self.remove_sources(src_dict.keys())\n self.renderers = {}\n #self.renderers = {}\n self.figure = self.draw_figure(**kwargs)\n #self.add_sources(src_dict)\n # todo does the old figure linger on?\n self.render_sources(self.data_sources)\n self.bk_pane.object = self.figure", "def refresh(self):\n pass", "def refresh(self):\n pass" ]
[ "0.7797752", "0.76777506", "0.7624245", "0.7607786", "0.75885034", "0.74661344", "0.74655384", "0.74655384", "0.7456086", "0.7437477", "0.7407238", "0.7366163", "0.7329085", "0.7314451", "0.722998", "0.7220857", "0.7200671", "0.7197712", "0.7181533", "0.71769416", "0.7154286", "0.70699596", "0.7057484", "0.7045358", "0.70298964", "0.70298964", "0.7003218", "0.698895", "0.69641334", "0.69641334" ]
0.80525124
0
Generates list of mesh vertices and triangles from a list of vectors
def _generate_meshes(self, vectors, width): centers = np.repeat(vectors, 2, axis=0) offsets = segment_normal(vectors[::2, :], vectors[1::2, :]) offsets = np.repeat(offsets, 4, axis=0) signs = np.ones((len(offsets), 2)) signs[::2] = -1 offsets = offsets*signs vertices = centers + width*offsets/2 triangles = np.array([[2*i, 2*i+1, 2*i+2] if i % 2 == 0 else [2*i-1, 2*i, 2*i+1] for i in range(len(vectors))]).astype(np.uint32) return vertices, triangles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vertices(tri, vertex_list):\n dim = len(vertex_list[0])\n p = numpy.zeros((3, dim))\n for j in range(3):\n p[j] = vertex_list[tri[j]]\n return p", "def get_vertices(self):\n vertices = []\n V = [[-self.base_vectors[:,n], self.base_vectors[:,n]] for n in range(self.base_vectors.shape[1])]\n combs = list(itertools.product(*V))\n for cb in combs:\n cb = np.sum(np.array(cb).T, axis=1, keepdims=True)\n vertices.append(self.base_vertices + cb)\n\n vertices = np.concatenate(vertices,axis=1)\n return vertices", "def extract_triangles(mesh, materials_list):\n tri_list = []\n do_uv = bool(mesh.tessface_uv_textures)\n\n for mat in materials_list:\n for i, face in enumerate(mesh.tessfaces):\n f_v = face.vertices\n if mesh.materials[face.material_index].name != mat: continue\n\n uf = mesh.tessface_uv_textures.active.data[i] if do_uv else None\n\n fmt = 0\n if(do_uv): fmt = face.material_index\n\n if do_uv:\n f_uv = uf.uv\n\n if len(f_v) == 3:\n new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), fmt)\n if (do_uv):\n new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])\n else: new_tri.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n tri_list.append(new_tri)\n\n else: # it's a quad\n new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), fmt)\n new_tri_2 = tri_wrapper((f_v[0], f_v[2], f_v[3]), fmt)\n\n if (do_uv):\n new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])\n new_tri_2.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[2]), uv_key(f_uv[3])\n else:\n new_tri.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n new_tri_2.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n\n tri_list.append(new_tri)\n tri_list.append(new_tri_2)\n\n return tri_list", "def get_verts(ulist, vlist, func):\n verts = []\n for u in ulist:\n for v in vlist:\n verts.append(func(u, v))\n return verts", "def simplex(v1, v2, v3):\n res = polyhedron([], [], [])\n p0 = res.addVertex(vertex(0, 0, 0))\n p1 = res.addVertex(vertex(v1))\n p2 = res.addVertex(vertex(v2))\n p3 = res.addVertex(vertex(v3))\n p12 = res.addVertex(vertex(v1 + v2))\n p13 = res.addVertex(vertex(v1 + v3))\n p23 = res.addVertex(vertex(v2 + v3))\n p123 = res.addVertex(vertex(v1 + v2 + v3))\n res.addEdge(p0, p1)\n res.addEdge(p0, p2)\n res.addEdge(p0, p3)\n res.addEdge(p1, p12)\n res.addEdge(p1, p13)\n res.addEdge(p2, p12)\n res.addEdge(p2, p23)\n res.addEdge(p3, p13)\n res.addEdge(p3, p23)\n res.addEdge(p1, p123)\n res.addEdge(p2, p123)\n res.addEdge(p3, p123)\n res.addFace([p0, p2, p12, p1])\n res.addFace([p0, p1, p13, p3])\n res.addFace([p0, p3, p23, p2])\n res.addFace([p1, p12, p123, p13])\n res.addFace([p2, p23, p123, p12])\n res.addFace([p3, p13, p123, p23])\n return res", "def vertices(self):\n d = self.space_dimension()\n v = vector(ZZ, d)\n points = []\n for g in self.minimized_generators():\n for i in range(0,d):\n v[i] = g.coefficient(Variable(i))\n v_copy = copy.copy(v)\n v_copy.set_immutable()\n points.append(v_copy)\n return tuple(points)", "def plotVects(vList, colors='k'):\n polyhedron([vertex(v) for v in vList] + [vertex(0, 0, 0)], [edge(v) for v in vList], []).plot(plotEdges=True)", "def create_inner_tri(point, v1, v2, v3):\n return [(point, v1, v2), (point, v1, v3), (point, v2, v3)]", "def get_faces(ulist, vlist):\n width = len(ulist)\n faces = []\n for i in range(len(ulist) - 1):\n for j in range(len(vlist) - 1):\n topleft = j * width + i\n topright = topleft + 1\n bottomleft = ((j + 1) * width) + i\n bottomright = bottomleft + 1\n one = [topleft, topright, bottomleft]\n two = [bottomleft, topright, bottomright]\n faces.append(one)\n faces.append(two)\n\n return faces", "def split_triangles(mesh):\n triangles = np.asarray(mesh.triangles).copy()\n vertices = np.asarray(mesh.vertices).copy()\n\n triangles_3 = np.zeros_like(triangles)\n vertices_3 = np.zeros((len(triangles) * 3, 3), dtype=vertices.dtype)\n\n for index_triangle, t in enumerate(triangles):\n index_vertex = index_triangle * 3\n vertices_3[index_vertex] = vertices[t[0]]\n vertices_3[index_vertex + 1] = vertices[t[1]]\n vertices_3[index_vertex + 2] = vertices[t[2]]\n\n triangles_3[index_triangle] = np.arange(index_vertex, index_vertex + 3)\n\n mesh_return = deepcopy(mesh)\n mesh_return.triangles = o3d.utility.Vector3iVector(triangles_3)\n mesh_return.vertices = o3d.utility.Vector3dVector(vertices_3)\n mesh_return.triangle_normals = mesh.triangle_normals\n mesh_return.paint_uniform_color([0.5, 0.5, 0.5])\n return mesh_return", "def create_triangles(list_of_points):\n # create the first two triangle using the create_two_init_triangles with\n # the first 4 points in the given list\n tri_list = create_two_init_triangles(list_of_points[0:FIRST_FOUR_POINTS])\n # run over the point list from the 5th point and on\n for i in range(FIRST_FOUR_POINTS, len(list_of_points)):\n # run on the existing triangles\n for j in range(0, len(tri_list)):\n # check if the point is inside the current triangle\n if is_point_inside_triangle(list_of_points[i], tri_list[j][0],\n tri_list[j][1], tri_list[j][2])[0]:\n # if the point is inside the current triangle, create 3 new\n # triangles using the old triangle vertexes and the new point\n # adding them to the triangle list instead of the triangle the\n # point was in\n tri_list[j:j+1] = create_inner_tri(list_of_points[i],\n tri_list[j][0],\n tri_list[j][1],\n tri_list[j][2])\n break\n return tri_list", "def _generate_vertexes(self):\n # generate list of sets for each vms\n self._graph = [set() for _ in range(self._vm_count)]", "def normals(t, v):\n n = numpy.zeros((len(t), 3))\n for i in range(0, len(t)):\n p = vertices(t[i], v)\n n[i] = triangle.normal(p)\n return n", "def strang_mesh(filename):\n\n from math import pi\n from anuga.utilities.numerical_tools import anglediff\n\n\n fid = open(filename)\n points = [] # List of x, y coordinates\n triangles = [] # List of vertex ids as listed in the file\n\n for line in fid.readlines():\n fields = line.split()\n if len(fields) == 2:\n # we are reading vertex coordinates\n points.append([float(fields[0]), float(fields[1])])\n elif len(fields) == 3:\n # we are reading triangle point id's (format ae+b)\n triangles.append([int(float(fields[0]))-1,\n int(float(fields[1]))-1,\n int(float(fields[2]))-1])\n else:\n raise Excetion('wrong format in %s' % filename)\n\n elements = [] #Final list of elements\n\n for t in triangles:\n #Get vertex coordinates\n v0 = t[0]\n v1 = t[1]\n v2 = t[2]\n\n x0 = points[v0][0]\n y0 = points[v0][1]\n x1 = points[v1][0]\n y1 = points[v1][1]\n x2 = points[v2][0]\n y2 = points[v2][1]\n\n #Check that points are arranged in counter clock-wise order\n vec0 = [x1-x0, y1-y0]\n vec1 = [x2-x1, y2-y1]\n vec2 = [x0-x2, y0-y2]\n\n a0 = anglediff(vec1, vec0)\n a1 = anglediff(vec2, vec1)\n a2 = anglediff(vec0, vec2)\n\n if a0 < pi and a1 < pi and a2 < pi:\n elements.append([v0, v1, v2])\n else:\n elements.append([v0, v2, v1])\n\n return points, elements", "def create_single_triangle_mesh():\n vertices = np.array(\n ((0, 0, 0), (1, 0, 0), (0, 1, 0)), dtype=np.float32)\n faces = np.array(((0, 1, 2),), dtype=np.int32)\n return vertices, faces", "def faces_as_vertices(self) -> Iterable[List[Vec3]]:\n v = self.vertices\n for face in self.faces:\n yield [v[index] for index in face]", "def vertexes(self):\n theta = self.orientation\n shifts = np.array([np.cos(theta), np.sin(theta)]) * self.a\n return self.coords + (shifts[:, None] * [-1, 1]).T", "def getVectors(self):\n l = len(self.points)\n return [Vector.createFromTwoPoints(self.points[i % l], self.points[(i + 1) % l], \\\n color=self.side_color, width=self.side_width) for i in range(l)]", "def get_sound_vertices(self):\n\n vals = np.sum(np.abs(self.base_vectors), axis=1, keepdims=True)\n V = [[v,-v] for v in vals]\n combs = list(itertools.product(*V))\n\n vertices = []\n for cb in combs:\n vertices.append(self.base_vertices+np.array(cb))\n vertices = np.concatenate(vertices, axis=1)\n return vertices", "def get_vectors(nodes, mode=\"xform\"):\n for each in nodes:\n position = (0, 0, 0)\n\n if mode == \"xform\":\n position = cmds.xform(\n each,\n query=True,\n translation=True,\n worldSpace=True,\n )\n\n elif mode == \"pivot\":\n position = cmds.xform(\n each,\n query=True,\n translation=True,\n rotatePivot=True,\n worldSpace=True,\n )\n\n # when using xform on component like faces or edge, the returned value\n # will be a list of each vertices position, so we need to average that\n if len(position) > 3:\n vectors = [\n MVector(position[i : i + 3])\n for i in range(0, len(position), 3)\n ]\n result = MVector()\n for vector in vectors:\n result += vector\n position = result / len(vectors)\n\n yield MVector(position)", "def create( vertices, faces, colour=(0.6,0.6,0.6) ):\r\n \r\n mesh = GLUtils.GLMesh()\r\n \r\n for vertex in vertices:\r\n mesh.addVertex( PyUtils.toPoint3d(vertex) )\r\n \r\n for face in faces:\r\n poly = GLUtils.GLIndexedPoly()\r\n for index in face:\r\n poly.addVertexIndex( index )\r\n mesh.addPoly(poly)\r\n \r\n try:\r\n mesh.setColour( *colour )\r\n except TypeError:\r\n mesh.setColour( *(colour + (1,)) )\r\n\r\n mesh.computeNormals()\r\n\r\n return mesh", "def construct_convex_hull(vertices: Sequence[Point]) -> Polyhedron:\n coords = np.zeros((len(vertices),3))\n for i,vertex in enumerate(vertices):\n coords[i,:] = vertex.coordinates\n hull = qconvex(\"i\", coords)\n n_facets = int(hull[0])\n facets = []\n for facet_vertices_str in hull[1:]:\n facet_vertices_idx = [int(x) for x in facet_vertices_str.split(' ')]\n facet_vertices = [vertices[i] for i in facet_vertices_idx]\n facet = Facet([Contour.from_vertices(facet_vertices)])\n facets.append(facet)\n polyhedron = Polyhedron(facets)\n return polyhedron", "def creaLados(vertices):\n\tlados = []\n\tfor i in range(len(vertices)-1):\n\t\tlados.append((i, i+1))\n\tlados.append((i+1,0))\n\treturn lados", "def calculateMeshInv(mesh_face_vertices):\n mesh_inv = []\n for mesh in mesh_face_vertices:\n U = np.array([\n [mesh[0, 0], mesh[1, 0], mesh[2, 0]],\n [mesh[0, 1], mesh[1, 1], mesh[2, 1]],\n [1, 1, 1],\n ])\n mesh_inv.append(np.linalg.inv(U))\n return np.array(mesh_inv)", "def vector(self):\n \n v_list = Householder.triangle_operation(self)[1]\n \n return(v_list)", "def create_hull(vertices):\n dt = np.dtype([('vertex', np.float64, (2,)),\n ('length', np.float64),\n ('is_processed', bool)])\n\n hull = np.empty(len(vertices), dtype=dt)\n for i, v in enumerate(vertices):\n j = 0 if i == len(vertices)-1 else i+1\n hull[i] = (v, dist(v, vertices[j]), False)\n\n return np.rec.array(hull)", "def cube_vertices(x, y, z, n):\r\n return [\r\n x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\r\n x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\r\n x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\r\n x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\r\n x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\r\n x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\r\n ]", "def createMesh(width, height):\r\n mesh = [(x,y) for x in range(0, width+1) for y in range(0,height+1)]\r\n return mesh", "def getVectors(self,graph):\n return [Vector.createFromTwoTuples(graph[i],graph[i+1]) for i in range(len(graph)-1)]", "def get_mesh_boundary(triangles):\n # Create edges and sort each vertices on each edge.\n edge0 = triangles[:,0:2]\n edge1 = triangles[:,1:3]\n edge2 = triangles.take((0,2), axis=1)\n edges = np.concatenate((edge0, edge1, edge2), axis=0)\n edge_sort = np.sort(edges, axis=1)\n\n # Get unique edges that are only present once.\n (uniq, uniq_ids, counts) = np.unique(edge_sort, axis=0, return_index=True, return_counts=True)\n edge_inds = np.arange(edge_sort.shape[0], dtype=int)\n outer_edge_ids = edge_inds[np.in1d(edge_inds, uniq_ids[counts==1])]\n outer_edges = edge_sort[outer_edge_ids,:]\n num_outer_edges = outer_edges.shape[0]\n\n # Assume we need to close the polygon.\n num_outer_verts = num_outer_edges + 1\n\n # Loop over outer edges and use traversal method to get ordered vertices.\n v_start = outer_edges[0,0]\n v_end = outer_edges[0,1]\n vert_inds = -1*np.ones(num_outer_verts, dtype=int)\n vert_inds[0] = v_start\n vert_inds[1] = v_end\n vert_num = 2\n outer_edges[0,:] = -1\n for edge_num in range(1,num_outer_edges):\n edge_inds_next = np.where(outer_edges == v_end)\n if (edge_inds_next[0].shape[0] < 1):\n msg = \"Next edge not found for vertex %d\" % v_end\n raise ValueError(msg)\n edge_ind_next = edge_inds_next[0][0]\n vert_ind_next = 0\n if (edge_inds_next[1][0] == 0):\n vert_ind_next = 1\n vert_inds[vert_num] = outer_edges[edge_ind_next, vert_ind_next]\n outer_edges[edge_ind_next, :] = -1\n v_end = vert_inds[vert_num]\n vert_num += 1\n\n return vert_inds" ]
[ "0.70674706", "0.67971975", "0.6688014", "0.6595674", "0.65747494", "0.65282524", "0.6382806", "0.63815814", "0.63469017", "0.623005", "0.62253344", "0.6019658", "0.60096925", "0.60038024", "0.6003564", "0.59902227", "0.59812725", "0.5978787", "0.59492654", "0.59366006", "0.5893783", "0.5866787", "0.58515954", "0.58126247", "0.580837", "0.5798436", "0.57900834", "0.5790011", "0.5785579", "0.575019" ]
0.6878562
1
Sets the view given the indices to slice with.
def _set_view_slice(self, indices): vertices = self._mesh_vertices faces = self._mesh_triangles if len(faces) == 0: self._node.set_data(vertices=None, faces=None) else: self._node.set_data(vertices=vertices[:, ::-1], faces=faces, color=self.color) self._need_visual_update = True self._update()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, indices: Tuple[int, ...], slices: Tuple[slice, ...] = (slice(0, 0, 0),)):\n self.indices = indices\n self.slices = slices", "def _set_neighs_slice(self, key):\n ## Condition to use slice type\n self._constant_neighs = True\n self.ks = range(1) if self.ks is None else self.ks\n ## Possible options\n if key is None:\n self.idxs = slice(0, self._n, 1)\n elif isinstance(key, slice):\n start = 0 if key.start is None else key.start\n stop = self._n if key.stop is None else key.stop\n stop = self._n if key.stop > 10*16 else key.stop\n step = 1 if key.step is None else key.step\n self.idxs = slice(start, stop, step)\n elif type(key) in inttypes:\n self.idxs = slice(0, key, 1)\n elif type(key) == tuple:\n self.idxs = slice(key[0], key[1], 1)\n self._setted = True", "def _setSubset(self, label, indices):\n\n for label in FLAG_ALIASES.get(label, [label]):\n self._subsets[label] = indices", "def _set_view_slice(self):\n nd = self.dims.not_displayed\n\n if self.multichannel:\n # if multichannel need to keep the final axis fixed during the\n # transpose. The index of the final axis depends on how many\n # axes are displayed.\n order = self.dims.displayed_order + (self.dims.ndisplay,)\n else:\n order = self.dims.displayed_order\n\n # Slice thumbnail\n indices = np.array(self.dims.indices)\n downsampled = indices[nd] / self.level_downsamples[-1, nd]\n downsampled = np.round(downsampled.astype(float)).astype(int)\n downsampled = np.clip(downsampled, 0, self.level_shapes[-1, nd] - 1)\n indices[nd] = downsampled\n\n image = np.asarray(self.data[-1][tuple(indices)]).transpose(order)\n\n if self.multichannel and image.dtype.kind == 'f':\n self._data_thumbnail = np.clip(image, 0, 1)\n else:\n self._data_thumbnail = image\n\n # Slice currently viewed level\n indices = np.array(self.dims.indices)\n level = self.data_level\n downsampled = indices[nd] / self.level_downsamples[level, nd]\n downsampled = np.round(downsampled.astype(float)).astype(int)\n downsampled = np.clip(downsampled, 0, self.level_shapes[level, nd] - 1)\n indices[nd] = downsampled\n\n disp_shape = self.level_shapes[level, self.dims.displayed]\n scale = np.ones(self.ndim)\n for d in self.dims.displayed:\n scale[d] = self.level_downsamples[self.data_level][d]\n self._scale = scale\n self.events.scale()\n\n if np.any(disp_shape > self._max_tile_shape):\n for d in self.dims.displayed:\n indices[d] = slice(\n self._top_left[d],\n self._top_left[d] + self._max_tile_shape,\n 1,\n )\n self.translate = self._top_left * self.scale\n else:\n self.translate = [0] * self.ndim\n\n image = np.asarray(self.data[level][tuple(indices)]).transpose(order)\n\n if self.multichannel and image.dtype.kind == 'f':\n self._data_view = np.clip(image, 0, 1)\n else:\n self._data_view = image\n\n self._update_thumbnail()\n self._update_coordinates()\n self.events.set_data()", "def ResetSliceViews(self):\n num = slicer.mrmlScene.GetNumberOfNodesByClass('vtkMRMLSliceCompositeNode')\n for i in range(num):\n sliceViewer = slicer.mrmlScene.GetNthNodeByClass(i, 'vtkMRMLSliceCompositeNode')\n sliceViewer.SetBackgroundVolumeID(None)\n sliceViewer.SetForegroundVolumeID(None)", "def set_subset(self):\r\n if self._random_subset:\r\n perm = torch.randperm(len(self._indices))\r\n self._subset = self._indices[perm][:self._subset_size]\r\n else:\r\n self._subset = torch.Tensor(self._indices[:self._subset_size])", "def __setitem__(self, index, value):\n if isinstance(index, types.SliceType):\n keys = self._main._sequence[index]\n if len(keys) != len(value):\n raise ValueError('attempt to assign sequence of size %s '\n 'to slice of size %s' % (len(name), len(keys)))\n # FIXME: efficiency? Would be better to calculate the indexes\n # directly from the slice object\n # NOTE: the new keys can collide with existing keys (or even\n # contain duplicates) - these will overwrite\n for key, val in zip(keys, value):\n self._main[key] = val\n else:\n self._main[self._main._sequence[index]] = value", "def set_preds_as_viewed(preds):\n predictions.set_preds_as_viewed(preds)", "def SetViewParameters(ref, args, request):\n del ref # unused\n\n if not args.view:\n request.table.view = None\n\n return request", "def test_slice_setslice_forbidden(self):\n global setVal\n class foo:\n def __setslice__(self, i, j, value):\n global setVal\n setVal = i, j, value\n def __setitem__(self, index, value):\n global setVal\n setVal = index, value\n\n foo()[::] = 23\n self.assertEqual(setVal, (slice(None, None, None), 23))\n foo()[::None] = 23\n self.assertEqual(setVal, (slice(None, None, None), 23))", "def setPosition(self, position, view) -> None:\n ...", "def views(self, views):\n\n self._views = views", "def index_update(tensor, indices, values):\n tensor[indices] = values\n return tensor", "def test_write_slices(self):\n dt = np.dtype('(3,)i')\n\n data1 = np.ones((2,), dtype=dt)\n data2 = np.ones((4,5), dtype=dt)\n\n dset = self.f.create_dataset('x', (10,9,11), dtype=dt)\n\n dset[0,0,2:4] = data1\n self.assertArrayEqual(dset[0,0,2:4], data1)\n\n dset[3, 1:5, 6:11] = data2\n self.assertArrayEqual(dset[3, 1:5, 6:11], data2)", "def set_indexes(self, indexes):\n if not isinstance(indexes, list) or not all(isinstance(i, int) for i in indexes):\n raise ValueError(\"The indexes should be a list and all its elements should be int\")\n self._indexes = indexes\n return self", "def SetDataSlice(vDataSet,arr,aIndexZ,aIndexC,aIndexT):\r\n nx = vDataSet.GetSizeX()\r\n ny = vDataSet.GetSizeY()\r\n nz = vDataSet.GetSizeZ()\r\n dtype = GetType(vDataSet)\r\n\r\n if DEBUG:\r\n print(\"SetDataVolume\")\r\n print(\"vDataSet:\",(nz,ny,nx),GetType(vDataSet))\r\n print(arr.shape)\r\n print(arr.dtype)\r\n print(aIndexC)\r\n print(aIndexT)\r\n\r\n #Make sure the data is in range and convert the array\r\n s = arr\r\n if dtype != arr.dtype:\r\n miset,maset = GetTotalRange(vDataSet)\r\n arr[arr<miset]=miset\r\n arr[arr>maset]=maset\r\n s = arr.astype(dtype)\r\n\r\n s = s.swapaxes(0,1)\r\n if dtype == np.uint8:\r\n SetData = vDataSet.SetDataSliceBytes\r\n elif dtype == np.uint16:\r\n SetData = vDataSet.SetDataSliceShorts\r\n elif dtype == np.float32:\r\n SetData = vDataSet.SetDataSliceFloat32\r\n\r\n SetData(s,aIndexZ,aIndexC,aIndexT)\r\n #vDataSet.SetChannelRange(aIndexC,miset,maset)\r", "def set_view(self, index):\n view = index\n self.reset_scroll_area()\n self.clearSelection()\n\n if view == self.SURFACE_VIEW:\n self.toggle_surface_mode()\n self.view = \"Surface View\"\n elif view == self.BORDER_VIEW:\n self.toggle_border_mode()\n self.view = \"Border View\"\n elif view == self.GEOM_VIEW:\n if self.show_geom() == \"Canceled\":\n self.canceled = True\n else:\n self.view = \"Geom View\"\n elif view == self.MESH_VIEW:\n if self.show_mesh() == \"Canceled\":\n self.canceled = True\n else:\n self.view = \"Mesh View\"", "def slice(self, start_date, end_date = None):\n\n if end_date is None:\n end_date = self.series.index[-1]\n self.series = self.series.loc[start_date:end_date]", "def subset_from_indices(self, indices):\n return self.extract_inds(indices)", "def _read_index_slice(self, *args, **kwargs): # real signature unknown\n pass", "def slice(self,*Indices):\n \n Ind = tuple(Indices)\n\n\n try:\n \n OutShape=shape((1*self[(0,)*Dimension])[Indices])\n except:\n raise IndexError(\"Wrong format for indices\")\n \n Out = BZO(shape=OutShape)\n \n for Ind in self.IndList():\n\n Out[Ind]=array(self[Ind][Indices])\n \n Out.CleanUp()\n \n return Out", "def index_points(points, idx):\n device = points.device\n B = points.shape[0]\n view_shape = list(idx.shape)\n view_shape[1:] = [1] * (len(view_shape) - 1)\n repeat_shape = list(idx.shape)\n repeat_shape[0] = 1\n batch_indices = torch.arange(B, dtype=torch.long).view(view_shape).repeat(repeat_shape)\n new_points = points[batch_indices, idx, :]\n return new_points", "def slice(self, num_slices, slice_index):\n return ClippedDataset(CachedGenotypeDataset(self.basename, self.vector_names,\n _ceiling_partition(len(self), num_slices),\n self.sample_id),\n num_slices=num_slices, slice_index=slice_index)", "def direct_set(self, neighs, sp_relative_pos=None):\n self.idxs = neighs\n self.sp_relative_pos = sp_relative_pos\n self.assert_goodness()", "def set_index(self, index):\n self.index = index", "def setReference(self, updatedIndices):\n # self.colors[:] = [self.colors[i] for i in updatedIndices]\n self.cellData[:] = [self.cellData[i] for i in updatedIndices]", "def _index_dset(dset, indices):\n # get dset and arr shape\n dset_shape = dset.shape\n arr_shape = _get_dset_shape(dset, indices)\n\n # create empty array of dset dtype\n arr = np.empty(arr_shape, dtype=dset.dtype)\n\n # get arr and dset indices for each dimension in indices\n dset_indices = []\n arr_indices = []\n for i, dset_inds in enumerate(indices):\n if isinstance(dset_inds, (int, np.integer)):\n # this dimension is len 1, so slice is fine\n arr_indices.append([slice(None)])\n dset_indices.append([[dset_inds]])\n\n elif isinstance(dset_inds, slice):\n # this dimension is just a slice, so slice is fine\n arr_indices.append([slice(None)])\n dset_indices.append([dset_inds])\n\n elif isinstance(dset_inds, list):\n if isinstance(dset_inds[0], (int, np.integer)):\n # this is a list of integers, append slice\n arr_indices.append([slice(None)])\n dset_indices.append([dset_inds])\n elif isinstance(dset_inds[0], slice):\n # this is a list of slices, need list of slice lens\n slens = [_get_slice_len(s, dset_shape[i]) for s in dset_inds]\n ssums = [sum(slens[:j]) for j in range(len(slens))]\n arr_inds = [slice(s, s + l) for s, l in zip(ssums, slens)]\n arr_indices.append(arr_inds)\n dset_indices.append(dset_inds)\n\n # iterate over each of the 4 axes and fill the array\n for blt_arr, blt_dset in zip(arr_indices[0], dset_indices[0]):\n for spw_arr, spw_dset in zip(arr_indices[1], dset_indices[1]):\n for freq_arr, freq_dset in zip(arr_indices[2], dset_indices[2]):\n for pol_arr, pol_dset in zip(arr_indices[3], dset_indices[3]):\n # index dset and assign to arr\n arr[blt_arr, spw_arr, freq_arr, pol_arr] = dset[\n blt_dset, spw_dset, freq_dset, pol_dset\n ]\n\n return arr", "def view_list(self, view_list):\n\n self._view_list = view_list", "def get_slice(x, indices):\n return x[indices]", "def set_next_slice(self, start_idx, end_idx, strip_slice: bool = False):\n si, ei = int(start_idx), int(end_idx)\n if strip_slice:\n si, ei = _strip_slice_of_string(self.string, si, ei)\n self._next_slice = (si, ei)" ]
[ "0.62833464", "0.61686224", "0.6001683", "0.58963263", "0.5595757", "0.5592054", "0.5561091", "0.55347645", "0.5515838", "0.55009526", "0.5443674", "0.5426867", "0.5424508", "0.53574157", "0.52848315", "0.52843475", "0.5270382", "0.5265261", "0.52636987", "0.5232416", "0.52287483", "0.5218011", "0.52006", "0.5191687", "0.5168884", "0.51576096", "0.5157263", "0.51524496", "0.51509464", "0.5142711" ]
0.77993363
0
Utilities for spatio temporal analysis zed.uchicago.edu Fit dataproc with specified grid parameters and create timeseries for date boundaries specified by INIT, THRESHOLD, and END which do not have to match the arguments first input to the dataproc
def fit(self,grid=None,INIT=None,END=None,THRESHOLD=None,csvPREF='TS'): if INIT is not None: self._INIT=INIT if END is not None: self._END=END if grid is not None: self._grid=grid assert(self._END is not None) assert(self._coord1 in self._grid) assert(self._coord2 in self._grid) assert('Eps' in self._grid) if self._types is not None: for key in self._types: self.timeseries(self._grid[self._coord1], self._grid[self._coord2], self._grid['Eps'], key, CSVfile=csvPREF+stringify(key)+'.csv', THRESHOLD=THRESHOLD) return else: assert(self._value_limits is not None) self.timeseries(self._grid[self._coord1], self._grid[self._coord2], self._grid['Eps'], None, CSVfile=csvPREF+'.csv', THRESHOLD=THRESHOLD) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_timeseries(xdates, ydata):\n\n pass", "def fit_surface(tdata,\n scale_params=[40.0, 1.25]):\n agg_tdata = aggregate_data(tdata)\n\n # unpack data\n mt = agg_tdata.mean_temp\n dt = agg_tdata.daily_temp\n scaled_dt = utils.scale_daily_temp(mt, dt, scale_params)\n\n obs_mean = agg_tdata.obs_mean\n obs_std = agg_tdata.obs_std*15.0\n study_sizes = agg_tdata.study_sizes\n\n # mt = tdata.mean_temp\n # dt = tdata.daily_temp\n # scaled_dt = utils.scale_daily_temp(mt, dt, scale_params)\n\n # obs_mean = tdata.obs_mean\n # obs_std = tdata.obs_std\n # study_sizes = np.array([1]*obs_mean.size)\n\n # create spline\n mt_knots = np.linspace(mt.min(), mt.max(), 2)\n dt_knots = np.linspace(scaled_dt.min(), scaled_dt.max(), 2)\n mt_degree = 3\n dt_degree = 3\n spline_list = [xspline.ndxspline(2,\n [mt_knots, dt_knots],\n [mt_degree, dt_degree])]\n\n # create mrbrt object\n x_cov_list = [{\n 'cov_type': 'ndspline',\n 'spline_id': 0,\n 'mat': np.vstack((mt, scaled_dt))\n }]\n z_cov_list = [{\n 'cov_type': 'linear',\n 'mat': np.ones(mt.size)\n }]\n\n mr = mrbrt.MR_BRT(obs_mean,\n obs_std,\n study_sizes,\n x_cov_list,\n z_cov_list,\n spline_list)\n\n # add priors\n prior_list = [\n {\n 'prior_type': 'ndspline_shape_function_uprior',\n 'x_cov_id': 0,\n 'interval': [[mt.min(), mt.max()],\n [scaled_dt.min(), scaled_dt.max()]],\n 'indicator': [-1.0, 1.0],\n 'num_points': [20, 20]\n },\n # {\n # 'prior_type': 'x_cov_gprior',\n # 'x_cov_id': 0,\n # 'prior': np.array([[0.0]*mr.k_beta, [10.0]*mr.k_beta])\n # },\n {\n 'prior_type': 'ndspline_shape_monotonicity',\n 'x_cov_id': 0,\n 'dim_id': 1,\n 'interval': [[mt.min(), mt.max()],\n [0.5, scaled_dt.max()]],\n 'indicator': 'increasing',\n 'num_points': [20, 10]\n },\n {\n 'prior_type': 'ndspline_shape_monotonicity',\n 'x_cov_id': 0,\n 'dim_id': 1,\n 'interval': [[mt.min(), mt.max()],\n [scaled_dt.min(), -0.75]],\n 'indicator': 'decreasing',\n 'num_points': [20, 10]\n },\n {\n 'prior_type': 'z_cov_uprior',\n 'z_cov_id': 0,\n 'prior': np.array([[1e-6]*mr.k_gamma, [1e-6]*mr.k_gamma])\n }\n ]\n mr.addPriors(prior_list)\n\n # fit the model and store the result\n mr.fitModel()\n\n # compute posterior variance for the beta\n # extract the matrix\n k_beta = mr.lt.k_beta\n k_gamma = mr.lt.k_gamma\n X = mr.lt.JF(mr.lt.beta)\n S = mr.lt.S\n V = S**2\n\n beta_var = (X.T/V).dot(X)\n\n if mr.lt.use_regularizer:\n H = np.vstack([mr.lt.H(np.hstack(np.eye(1, k_beta, i).reshape(k_beta,),\n np.zeros(k_gamma)))\n for i in range(k_beta)]).T\n SH = mr.lt.h[1]\n VH = SH**2\n beta_var += (H.T/VH).dot(VH)\n\n beta_var = np.linalg.inv(beta_var)\n\n\n surface_result = utils.SurfaceResult(\n mr.beta_soln,\n beta_var,\n spline_list[0],\n scale_params=scale_params)\n\n return surface_result, agg_tdata", "def manipulate_data(ds, var, predef_clim, predef_trnd, trn_yrs, all_yrs, \n apply_latw=True, apply_detrending=True, dropna=True):\n\n \n if((var=='SD')|(var=='sd')|(var=='snowc')): \n ds[var] = ds[var].where(ds[var]>=0, other=0.0)\n ds[var] = ds[var].where(ds[var]==0, other=1.0)\n #ds[var].values = Gauss_filter(ds[var].values, (0,3,3))\n \n \"\"\"\n if((var=='hgt')|(var=='z')|(var=='GPT')):\n months = ds.time.to_index().month; ssn_ends = (months==2)|(months==5)|(months==8)|(months==11)\n ds = ds.sel(time=ssn_ends)\n else: \n ds = ds.resample(time='3M').mean()\n \"\"\"\n \n ds = ds.resample(time='3M').mean()\n\n ds = ds.sel(time=slice(str(all_yrs[0])+'-01-01', str(all_yrs[-1])+'-12-31')) \n \n try: \n clim = predef_clim\n ds = ds.groupby('time.season') - clim\n print('Predefined climatology used')\n except:\n clim = ds.sel(time=slice(str(trn_yrs[0])+'-01-01', str(trn_yrs[-1])+'-12-31')).groupby('time.season').mean('time')\n ds = ds.groupby('time.season') - clim\n print('Climatology calculated from data')\n \n if(apply_latw): ds[var].values = lat_weighting(ds[var].values, \n ds.lat, ds.lon)\n if(dropna):\n ds = ds.stack(gridcell=('lat', 'lon')).dropna(dim='gridcell',how='any')\n else: \n ds = ds.stack(gridcell=('lat', 'lon')).fillna(0)\n \n \n trend_models = { }\n if(apply_detrending): \n ds = ds.load()\n for ssn in ('DJF', 'MAM', 'JJA', 'SON'):\n #ssn_idx = ds['time.season'] == ssn\n \n trn_idx = bool_index_to_int_index(np.isin(ds['time.season'], ssn) & np.isin(ds['time.year'], trn_yrs))\n all_idx = bool_index_to_int_index(np.isin(ds['time.season'], ssn) & np.isin(ds['time.year'], all_yrs))\n \n trn_x = np.array(ds.time[trn_idx].values.tolist()).reshape(-1,1)\n all_x = np.array(ds.time[all_idx].values.tolist()).reshape(-1,1)\n try:\n trend = predef_trnd[ssn].predict(all_x)\n trend_models[ssn] = predef_trnd[ssn]\n print('Predefined trend model used')\n except:\n #_, trend_model = define_trends(ds[var][trn_idx], trn_x)\n _, trend_model = define_trends(ds[var][all_idx], all_x)\n trend = trend_model.predict(all_x)\n trend_models[ssn] = trend_model\n print('Trends calculated from data')\n \n ds[var][all_idx] = ds[var][all_idx] - trend\n \n\n \n return ds, clim, trend_models", "def __init__(self, ts_df, time_format=\"%Y-%m-%d %H:%M:%S\", freq='D',\n fill_method='ffill',\n n_test=0, n_val=0,\n hyper_params=None,\n test='adf',\n trend=None,\n seasonal=False,\n seasonal_periods=1,\n **kwds):\n self._ts_df_cols = ['ds', 'y']\n\n self.ts_df = ts_df\n self.time_format = time_format\n self.freq = freq\n self.fill_method = fill_method.lower()\n self.n_test = int(n_test)\n self.n_val = int(n_val)\n self.transform = None\n self._boxcox_lmbda = None\n\n self._mode = ''\n\n self._train_dt = None\n self._test_dt = None\n self._val_dt = None\n\n self.model_fit = None\n self.fittedvalues = None\n self.residuals = None\n self.rmse = 0\n self._gs = tsa.GridSearchClass()\n self.hyper_params = hyper_params\n self.best_model = dict()\n\n \"\"\"\n self.rmse_test = 0\n self.rmse_val = 0\n \"\"\"\n\n self.upper_whisker_res = None\n self.lower_conf_int = None\n self.upper_conf_int = None\n\n self.forecast = None\n self.residuals_forecast = None\n\n self._res_decomp = None\n self._arr_seasonal = None\n self._arr_trend = None\n self._arr_baseline = None\n\n self._test = test\n self._trend = trend\n if self._trend is not None:\n self._trend = self._trend.lower()\n self._seasonal = seasonal\n if isinstance(self._seasonal, str):\n self._seasonal = self._seasonal.lower()\n self._seasonal_periods = seasonal_periods\n\n self._uvts_cls_logger = Logger('uvts_cls')\n\n UVariateTimeSeriesClass.assertions(self)\n # work with ts_df\n self.ts_df = self.ts_df.reset_index()\n self.ts_df.columns = self._ts_df_cols\n self.ts_df['y'] = self.ts_df['y'].apply(np.float64, errors='coerce')\n self.ts_df.set_index('ds', inplace=True)\n self._uvts_cls_logger.info(\n \"Received time series data of range: \" + str(min(self.ts_df.index)) + ' - ' + str(\n max(self.ts_df.index)) + \" and shape: \" + str(self.ts_df.shape))\n\n if not isinstance(self.ts_df.index, pd.DatetimeIndex):\n self._uvts_cls_logger.warning(\"Time conversion required...\")\n self.ts_df = self.ts_df.reset_index()\n try:\n self.ts_df['ds'] = self.ts_df['ds'].apply(\n lambda x: datetime.datetime.strptime(\n str(x).translate({ord('T'): ' ', ord('Z'): None})[:-1],\n self.time_format))\n except ValueError as e:\n self._uvts_cls_logger.warning(\"Zulu time conversion not successful: {}\".format(e))\n self._uvts_cls_logger.warning(\"Will try without assuming zulu time...\")\n try:\n self.ts_df['ds'] = self.ts_df['ds'].apply(\n lambda x: datetime.datetime.strptime(str(x), self.time_format))\n except ValueError as e:\n self._uvts_cls_logger.info(\"Time conversion not successful. Check your time_format: {}\".format(e))\n sys.exit(\"STOP\")\n else:\n self._uvts_cls_logger.info(\"Time conversion successful!\")\n else:\n self._uvts_cls_logger.info(\"Time conversion successful!\")\n # set index\n self.ts_df.set_index('ds', inplace=True)\n #\n self.ts_df.index = pd.to_datetime(self.ts_df.index)\n self.ts_df.sort_index(inplace=True)\n # resample\n self.ts_resample()\n UVariateTimeSeriesClass.assertions(self, post=True)\n #\n if self.n_val > len(self.ts_df) - self.n_test:\n self.n_val = len(self.ts_df) - self.n_test\n\n if self.n_test == 0 and self.n_val == 0:\n self._mode = 'forecast'\n elif self.n_test > 0:\n self._mode = 'test'\n elif self.n_test == 0 and self.n_val > 0:\n self._mode = 'validate'\n \n # delegate just for good programming style here\n super(UVariateTimeSeriesClass, self).__init__(**kwds)", "def prepare(gpi, start_date, end_date, models, satellites, kind=\"clim\", window=35):\n # read data\n ts_input = data_object.read_gpi(gpi, start_date, end_date, models, satellites)\n #ts_input = interp.add_nan(ts_input)\n ts_input = interp.iter_fill(ts_input, max_gap=5)\n\n # either calc climatology, apply moving average filter, or do nothing\n if kind == 'clim':\n ts_smooth = smooth.iter_climats(ts_input)\n plot_title = 'Climatology'\n elif kind == 'movav':\n ts_smooth = smooth.iter_movav(ts_input, window)\n #ts_smooth = ts_gapfill\n plot_title = 'Moving average'\n elif kind == None:\n # return original data\n ts_smooth = ts_input\n else:\n raise NotImplementedError\n pass\n\n # drop rows with missing values\n #ts_smooth = ts_smooth.dropna()\n\n # scale satellite data to model data\n ts_scaled = scaling.scale(ts_smooth, 'mean_std_nan', 0)\n # drop nan rows for slope funcs\n return ts_scaled #.dropna()", "def prep_data_fn(self, st_train_dt, end_train_dt, st_val_dt, end_val_dt, st_test_dt, end_test_dt):\n df = self.get_prep_data()\n train = df[(df['ft_data_dt'] >= st_train_dt) & (df['ft_data_dt'] <= end_train_dt)]\n val = df[(df['ft_data_dt'] >= st_val_dt) & (df['ft_data_dt'] <= end_val_dt)].sample(frac=0.4, random_state=2021)\n test = df[(df['ft_data_dt'] >= st_test_dt) & (df['ft_data_dt'] <= end_test_dt)]\n print(f'----train----')\n print(train[['ft_data_dt', 'target', 'idd']].groupby(['ft_data_dt', 'target']).agg(['count']))\n print(f'----validation----')\n print(val[['ft_data_dt', 'target', 'idd']].groupby(['ft_data_dt', 'target']).agg(['count']))\n print(f'----test----')\n print(test[['ft_data_dt', 'target', 'idd']].groupby(['ft_data_dt', 'target']).agg(['count']))\n self.set_train(train)\n self.set_validation(val)\n self.set_test(test)\n train_X = train[[c for c in train.columns if c not in ['idd', 'ft_data_dt', 'target']]]\n train_y = train['target']\n val_X = val[[c for c in train.columns if c not in ['idd', 'ft_data_dt', 'target']]]\n val_y = val['target']\n test_X = test[[c for c in train.columns if c not in ['idd', 'ft_data_dt', 'target']]]\n test_y = test['target']\n self.set_train_X(train_X)\n self.set_train_y(train_y)\n self.set_val_X(val_X)\n self.set_val_y(val_y)\n self.set_test_X(test_X)\n self.set_test_y(test_y)", "def prepare_data_for_dfe(data, size_timeseries, num_timeseries):\n num_timesteps = size_timeseries\n window_size = size_timeseries\n\n if num_timeseries > CORRELATION_MAX_NUM_TIMESERIES:\n sys.stderr.write(\n 'Number of Time series should be less or equal to ' +\n str(CORRELATION_MAX_NUM_TIMESERIES) +\n '. Terminating!')\n sys.stderr.flush()\n sys.exit(-1)\n elif window_size < 2:\n sys.stderr.write(\n 'Window size must be equal or greater than 2. Terminating!')\n sys.stderr.flush()\n sys.exit(-1)\n elif num_timesteps > size_timeseries:\n sys.stderr.write(\n 'Number of Time steps should be less or equal to' +\n ' size of Time series. Terminating!')\n sys.stderr.flush()\n sys.exit(-1)\n\n precalculations = []\n data_pairs = []\n\n sums = [[0.0] * num_timeseries] * num_timesteps\n sums_sq = [[0.0] * num_timeseries] * num_timesteps\n inv = [[0.0] * num_timeseries] * num_timesteps\n\n # 2 DFE input streams: precalculations and data pairs\n for i in range(num_timesteps):\n for j in range(num_timeseries):\n old = 0.0 if i < window_size else data[j][i - window_size]\n new = data[j][i]\n\n if i == 0:\n sums[i][j] = new\n sums_sq[i][j] = new * new\n else:\n sums[i][j] = sums[i-1][j] + new - old\n sums_sq[i][j] = sums_sq[i-1][j] + new * new - old * old\n\n inv[i][j] = 1 / math.sqrt(window_size * sums_sq[i][j] -\n sums[i][j] * sums[i][j])\n\n # Precalculations REORDERED in DFE ORDER\n precalculations.append(sums[i][j])\n precalculations.append(inv[i][j])\n\n # Data pairs REORDERED in DFE ORDER\n data_pairs.append(new)\n data_pairs.append(old)\n\n return (precalculations, data_pairs)", "def build_shape_data(self, start=None, end=None):\n # If start and end are None, then set them to be min/max of self.df_demand\n if start is None:\n start = self.df_demand['date'].min()\n if end is None:\n end = self.df_demand['date'].max()\n print(f\"date range for shape data is from {start} to {end}\")\n # Extract part of df_demand that is within start and end\n df_sub = self.df_demand[(self.df_demand['date'] >= start) & (self.df_demand['date'] <= end)]\n assert df_sub['date'].min() >= start\n assert df_sub['date'].max() <= end\n num_days = len(pd.date_range(iso8601.parse_date(start), iso8601.parse_date(end), freq='d'))\n print(f\"number of days is {num_days}\")\n # When finding variance and mean, add in missing days as 0s\n # Obtain the counts for each lat/lng region\n counts = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat']).size().reset_index(name='counts')\n # Group demand data by lat/lng region and average across other cols\n df = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat'])[['avail_count', 'avail_mins', 'trips', 'prob_scooter_avail', 'adj_trips']].mean().reset_index()\n df = df.merge(counts, on=['left_lng', 'right_lng', 'lower_lat', 'upper_lat'])\n # print(df.head())\n # Modify averages by multiplying each by count and divide by num_days\n vars = ['avail_count', 'avail_mins', 'trips', 'prob_scooter_avail', 'adj_trips']\n for var in vars:\n df[var] = df[var]*df['counts']/num_days\n # print(df.head())\n # Calculate the variance for prob_scooter_avail\n probVariance = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat']).apply(lambda x: ((x['prob_scooter_avail'] - (x['prob_scooter_avail'].sum()/num_days))**2).sum()/(num_days-1)).reset_index(name='prob_scooter_avail')\n # print(probVariance.head())\n df['prob_scooter_avail_var'] = probVariance['prob_scooter_avail']\n # Check to see if there are any Nan values\n print(f\"Nan values in df? {df.isnull().values.any()}\")\n # print(df.head())\n # For each var col, create corresponding color columns (log and unlog)\n # Also create the factors list that get passed into self.create_rectangle_lst\n factors = [('avail_count', 'decimal'), ('avail_mins', 'decimal'),\n ('trips', 'decimal'), ('prob_scooter_avail', 'percent'), ('adj_trips', 'decimal')]\n i = 0\n original_len = len(factors)\n while i < original_len:\n name, type = factors[i]\n # print(f\"name={name}, type={type}\")\n # Create color column\n df = self.map_values_to_color(df, name)\n # If type is not percent than create log version\n if type != 'percent':\n df = self.create_log_column(df, name)\n factors.append(('log_'+name, type))\n i += 1\n # Deal with estimated demand and unmet demand\n # Filter out rows where prob_scooter_avail sig diff from 0\n sigDiffIdx = df.apply(lambda x: utils.sig_diff_from_zero(x['prob_scooter_avail'], x['prob_scooter_avail_var']), axis=1)\n # print(sigDiffIdx.head())\n df_sig_diff = df[sigDiffIdx]\n # Calculate estimated demand and unmet demand\n df_sig_diff = self.calculate_demand(df_sig_diff)\n # print(df_sig_diff.head())\n # Create color column and log column for unmet demand\n df_sig_diff = self.map_values_to_color(df_sig_diff, 'unmet_demand')\n df_sig_diff = self.map_values_to_color(df_sig_diff, 'estimated_demand')\n df_sig_diff = self.create_log_column(df_sig_diff, 'unmet_demand')\n factors.extend([('estimated_demand', 'decimal'), ('unmet_demand', 'decimal'), ('log_unmet_demand', 'decimal')])\n # Fill in the colors for the grid cells that aren't significantly different\n df_not_sig_diff = df[~sigDiffIdx]\n # print(df_not_sig_diff.head())\n df = pd.concat([df_sig_diff, df_not_sig_diff])\n # df.to_csv('../../../data_files/20210427_estimatedDemand.csv', index=False)\n # Create Rectangle information\n rectangles = self.create_rectangle_lst(df, factors)\n return rectangles, start, end", "def create_grid(spl,\n var_to_plot,\n xmin=None,\n xmax=None,\n ymin=None,\n ymax=None):\n\n # Find the lowest and the highest heights to calibrate the ylims\n\n max_heights, min_heights, dates = [], [], []\n for df in spl:\n max_heights.append(df['height [> 0: top, < 0: bottom of elem.] (cm)'].iloc[-1])\n min_heights.append(df['height [> 0: top, < 0: bottom of elem.] (cm)'].iloc[0])\n dates.append(df['dates'].iloc[0])\n\n if ymin and ymax:\n max_height = ymax\n min_height = ymin\n else:\n max_height = np.max(max_heights)\n min_height = np.min(min_heights)\n max_height = round(max_height ,5 ,'up')\n min_height = round(min_height ,5 ,'down')\n\n grid_resolution = 100\n vertical_grid = np.linspace(min_height ,max_height ,grid_resolution)\n\n # Trim spl to fit specified xmin, xmax\n\n if xmin and xmax:\n\n spl = [sp for (date,sp) in zip(dates,spl) if (xmin < date < xmax)]\n dates = [date for date in dates if (xmin < date < xmax)]\n\n grid = np.full((grid_resolution, len(spl)), np.nan)\n\n for count, df in enumerate(spl):\n\n heights = np.array(df['height [> 0: top, < 0: bottom of elem.] (cm)'])\n variables = np.array(df[var_to_plot])\n\n # regular_variables = np.interp(vertical_grid, heights, variables, left = np.nan, right = np.nan)\n\n kind = 'nearest'\n\n my_interp = interpolate.interp1d(heights,\n variables,\n kind=kind,\n bounds_error = False,\n fill_value = (np.nan ,np.nan))\n\n regular_variables = my_interp(vertical_grid)\n\n grid[: ,count] = np.flip(regular_variables, axis=0)\n\n return_dict = {'grid':grid,\n 'max_height':max_height,\n 'min_height':min_height,\n 'dates':dates}\n\n return(return_dict)", "def _training(self, data, ts_start, ts_end, min_ts_length=None, min_ts_mean=None, min_ts_mean_window=None,\n max_ft_freq=None, include_holidays=None, optimize=None, **kwargs):\n from numpy.linalg import LinAlgError\n\n freq = self._params['freq']\n\n try:\n\n if data is None:\n raise ValueError('Not enough data to train due to recent change point')\n\n endog = data[self._imputed_metric]\n\n index = pd.date_range(start=ts_start, end=ts_end, freq=freq) # Holidays are always daily.\n\n de_obj = DataExploration()\n exog_data = de_obj._get_exog_data(ts_start, ts_end, index) if self._params[\n 'include_holidays_exog'] else None\n\n # always run the model first without holiday exogenous variables\n result, order = self._fit(endog=endog, endog_end=ts_end, min_ts_mean=min_ts_mean,\n min_ts_mean_window=min_ts_mean_window, include_holidays=include_holidays,\n min_ts_length=min_ts_length, max_ft_freq=max_ft_freq, exog_data=exog_data,\n optimize=optimize)\n\n result['training_tail'] = data.loc[:ts_end].values.tolist()[-3:]\n\n except(LinAlgError, ValueError, LADStructuralError) as e:\n result = {'ErrorMessage': str(e)}\n return result, None\n\n return result, order", "def grid_data(self, method='mean'): \n \n # Add a few extra variables in that I print out each month as I can then\n # compare these with the IDL output to see if there are discrepancies in\n # the number of profiles being used:\n rej_profiles = 0.\n nWOD9 = 0.\n nGTSPP0 = 0.\n nGTSPP999 = 0.\n index = np.where(self.qc == False)\n print('No. of rejected temperature values', np.shape(index))\n \n # Make the salinity values missing where the salinity QC flag is bad:\n fv = self.z.fill_value\n \n # Having set fv to be the general fill_value, check that this is the \n # recognised fill value for the other variables that are assessed by it\n # and raise a ValueError if not:\n if not self.data.fill_value == fv:\n raise ValueError('Incompatible fill value between depth and data')\n \n if not self.ps.fill_value == fv:\n raise ValueError('Incompatible fill value between depth and psal.')\n \n # Now make the salinity values missing where the QC flag is bad:\n self.ps[np.where(self.psalqc == False)] = fv\n index = np.where(self.psalqc == False)\n print('No. of rejected salinity values', np.shape(index))\n\n # Restrict to profiles with good posqc:\n self.data = self.data[self.posqc] \n self.x = self.x[self.posqc]\n self.y = self.y[self.posqc]\n self.p = self.p[self.posqc]\n self.pn = self.pn[self.posqc]\n self.ir = self.ir[self.posqc]\n self.ps = self.ps[self.posqc]\n self.z = self.z[self.posqc]\n self.qc = self.qc[self.posqc]\n self.posqc = self.posqc[self.posqc]\n \n # Store the maximum depth (with a good quality flag) of each profile:\n zvar1 = np.ma.masked_where(self.qc == False, self.z)\n maxdepth = np.amax(zvar1, axis = 1)\n # Get only the profiles that aren't entirely bad:\n self.data = self.data[maxdepth.mask == False]\n self.x = self.x[maxdepth.mask == False]\n self.y = self.y[maxdepth.mask == False]\n self.p = self.p[maxdepth.mask == False]\n self.pn = self.pn[maxdepth.mask == False]\n self.ir = self.ir[maxdepth.mask == False]\n self.ps = self.ps[maxdepth.mask == False]\n self.z = self.z[maxdepth.mask == False]\n self.qc = self.qc[maxdepth.mask == False]\n self.posqc = self.posqc[maxdepth.mask == False] \n maxdepth = maxdepth[maxdepth.mask == False]\n \n # I could thin to the second element of zbounds as if profiles don't go\n # that deep then I won't be able to use them (this would be true even if\n # the first element of zbounds wasn't zero) - it also removes the need \n # for an OHC dep value in my configuration file:\n self.data = self.data[maxdepth >= self.zbounds[1]]\n self.x = self.x[maxdepth >= self.zbounds[1]]\n self.y = self.y[maxdepth >= self.zbounds[1]]\n self.p = self.p[maxdepth >= self.zbounds[1]]\n self.pn = self.pn[maxdepth >= self.zbounds[1]]\n self.ir = self.ir[maxdepth >= self.zbounds[1]]\n self.ps = self.ps[maxdepth >= self.zbounds[1]]\n self.z = self.z[maxdepth >= self.zbounds[1]]\n self.qc = self.qc[maxdepth >= self.zbounds[1]]\n self.posqc = self.posqc[maxdepth >= self.zbounds[1]]\n year = int(self.fname[-9:-5])\n \n # Filter out low quality XBTs:\n rem = []\n for p in range(len(self.p)):\n xbt = inst_type.is_xbt(self.pn[p], self.ir[p], self.ps[p], fv, \n self.z[p], fv)\n if xbt[0][0] >= 0:\n # Remove any XBTs sourced from WOD where the fall rate equation\n # is unknown:\n if xbt[3] == 9:\n rem.append(p)\n nWOD9 += 1\n # Remove any GTSPP XBTs where the type is unknown and year is \n # >= 1995. \n # Or if type is unknown and it may not be a T4/T6/T7/DB because \n # the depth it reaches is too deep. Some of these will have been\n # given the Hanawa correction and so be inaccurate - this \n # happens in EN processing if probe code is zero:\n projectName = ''.join(self.pn[p])\n if projectName[0:5] == 'GTSPP':\n if (xbt[4] == 0 or xbt[4] == 99 or xbt[4] == 999) and year >= 1995:\n rem.append(p)\n if xbt[4] == 0:\n nGTSPP0 += 1\n else:\n nGTSPP999 += 1\n if (xbt[4] == 0 and xbt[1] > 900):\n rem.append(p)\n \n # Get rid of the low quality XBTs:\n nolowxbt = np.array(np.setdiff1d(range(len(self.p)),rem))\n self.data = self.data[nolowxbt]\n self.x = self.x[nolowxbt]\n self.y = self.y[nolowxbt]\n self.p = self.p[nolowxbt]\n self.pn = self.pn[nolowxbt]\n self.ps = self.ps[nolowxbt]\n self.ir = self.ir[nolowxbt]\n self.z = self.z[nolowxbt]\n self.qc = self.qc[nolowxbt]\n self.posqc = self.posqc[nolowxbt]\n \n # Do the vertical averaging:\n self.p = np.array(range(len(self.p)))\n \n # Check if the maxgap really is fixed:\n print('Fixed maxgap? ', self.fixedgap == True)\n \n # Loop over these profiles and do vertical averages:\n # Storage vectors need to start off filled with fill values otherwise\n # you get zero values being included in the averages instead of being\n # discarded. Make these storage matrices, not vectors so you can store \n # avergae values for multiple depth levels in them.\n all_mT = np.zeros((len(self.p),np.shape(self.zbounds)[0]-1))\n all_mT.fill(fv)\n all_lT = np.zeros((len(self.p),np.shape(self.zbounds)[0]-1))\n all_lT.fill(fv)\n all_dep = np.zeros((len(self.p),np.shape(self.zbounds)[0]-1))\n all_dep.fill(fv)\n all_x = np.zeros((len(self.p),np.shape(self.zbounds)[0]-1))\n all_x.fill(fv)\n all_y = np.zeros((len(self.p),np.shape(self.zbounds)[0]-1))\n all_y.fill(fv)\n for p in range(len(self.p)):\n # 1. Select the profile of interest and make sure no tar_t1 values\n # are carried over from a previous profile:\n x_p = self.x[p]\n y_p = self.y[p]\n qc_p = np.where(np.logical_and(self.qc[p] == True, self.z[p].mask == False))\n data_p = self.data[p][qc_p]\n z_p = self.z[p][qc_p].data\n tar_t1 = fv\n # 1a. Sanity check to make sure there are no missing data going into\n # the averaging process:\n tempanddeppres = np.where(np.logical_and(data_p != fv, z_p != fv))[0]\n data_p = data_p[tempanddeppres]\n z_p = z_p[tempanddeppres]\n # 2. Make sure that depths are in correct order, but sorting takes time\n # therefore only sort if I've identified non-ascending depths:\n if np.any((z_p[1:] - z_p[:-1]) < 0):\n sortz = np.argsort(z_p)\n data_p = data_p[sortz]\n z_p = z_p[sortz]\n # To be in line with the IDL code also need to check for very wrong \n # depth values that might have slipped through:\n udep = np.where(np.logical_and(z_p > -99.9, z_p < 10000))\n z_p = z_p[udep]\n data_p = data_p[udep]\n # 3. Find the temperature at the exact depth level - this is now \n # more in depth as there are multiple depth levels to look at - AT \n # THE MOMENT THIS STILL ASSUMES THAT THE FIRST LEVEL IS 0 TO SOME\n # DEPTH - THIS IS STILL A SLIGHT SIMPLIFICATION:\n dval = 0\n for dep in self.zbounds[1:]:\n # Have added in this switch to allow you to specify whether\n # you're using a fixed gap or not:\n if self.fixedgap == True:\n maxgap = 200\n else:\n maxgap = max(0.3*(self.zbounds[dval+1]),100)\n # Get only the levels of the profile in the depth range of \n # interest:\n LTi1 = np.where(np.logical_and(z_p < dep, \n z_p >= self.zbounds[dval]))\n GEi1 = np.where(z_p >= dep)\n if (np.shape(LTi1)[1] != 0 and np.shape(GEi1)[1] != 0):\n # Get the depth differences between layers and the mean temps across\n # layers:\n nk = np.shape(LTi1)[1] + 1\n dz = np.zeros(nk)\n mt = np.zeros(nk)\n if dval == 0:\n dz[0] = z_p[0]\n mt[0] = data_p[0]\n for kk in range(1, nk):\n dz[kk] = z_p[kk] - z_p[kk-1]\n mt[kk] = 0.5 * (data_p[kk] + data_p[kk-1])\n else:\n # Effectively missing the first layer as dz will be zero\n # there if you've calculated a temperature at that depth\n # for tar_t1 on the previous loop, but it won't exist if\n # you haven't been able to calculate a tar_t1 value, so\n # then you'll have to do what you do when you're at the \n # first depth level and aren't garunteed a value at 0m.\n if tar_t1 != fv:\n dz[0] = z_p[LTi1[0][0]] - self.zbounds[dval]\n mt[0] = 0.5 * (data_p[LTi1[0][0]] + tar_t1)\n else:\n dz[0] = z_p[LTi1[0][0]] - self.zbounds[dval]\n mt[0] = data_p[LTi1[0][0]]\n for kk in range(0, nk-1):\n dz[kk+1] = z_p[LTi1[0][kk]+1] - z_p[LTi1[0][kk]]\n mt[kk+1] = 0.5 * (data_p[LTi1[0][kk]+1] + data_p[LTi1[0][kk]])\n \n # Work out the temp at the target depth:\n if z_p[GEi1[0][0]] == dep:\n #print('A sampled depth is equal to the desired level')\n tar_t1 = data_p[GEi1[0][0]]\n else:\n deltaT = data_p[GEi1[0][0]] - data_p[GEi1[0][0] -1]\n deltaZ = z_p[GEi1[0][0]] - z_p[GEi1[0][0] -1]\n tar_t1 = (dep - z_p[GEi1[0][0] -1])*(deltaT/deltaZ) + data_p[GEi1[0][0] -1]\n dz[nk -1] = dep - z_p[GEi1[0][0] -1]\n mt[nk -1] = 0.5*(data_p[GEi1[0][0] -1] + tar_t1)\n \n # Check if there are unacceptable gaps between layers:\n test_gap = np.where(dz > maxgap)\n if np.shape(test_gap)[1] != 0:\n mean_t1 = fv\n mean_t2 = fv\n tar_t1 = fv\n else:\n mean_t1 = sum(np.multiply(mt,dz))/(self.zbounds[dval+1] - self.zbounds[dval])\n \n # Make sure there are no crazy mean values:\n if (abs(mean_t1) > 100 and mean_t1 != fv):\n raise ValueError('Extreme values found')\n # Save the mean temperature at that depth and the temp at the target \n # depth, also save the depth and profile number:\n all_mT[p,dval] = mean_t1\n all_lT[p,dval] = tar_t1\n all_dep[p,dval] = dep # Lower bound of depth\n all_x[p,dval] = x_p\n all_y[p,dval] = y_p\n else:\n tar_t1 = fv # Make sure that if you have no data in a \n # specific depth range you don't carry an old tar_t1 value\n # over.\n \n dval +=1\n\n # Reshape\n self.data_1d = self.reshape_1d(self.data)\n self.x_1d = self.reshape_1d(self.x)\n self.y_1d = self.reshape_1d(self.y)\n self.p_1d = self.reshape_1d(self.p)\n self.z_1d = self.reshape_1d(self.z)\n self.qc_1d = self.reshape_1d(self.qc)\n self.posqc_1d = self.reshape_1d(self.posqc)\n \n # Apply QC - Technically the self.posqc_1d step shouldn't be needed as \n # these profiles will have got filtered out with the earlier QC, but \n # it's good to leave it in there as then if I remove the depth \n # restriction step, this step will still catch badly positioned profiles:\n #qcind = (self.qc_1d == True) & (self.posqc_1d == True)\n qcind = (self.qc_1d == True)\n self.qc_1d = self.qc_1d[qcind]\n self.posqc_1d = self.posqc_1d[qcind]\n self.data_1d = self.data_1d[qcind] # Still seems to have 99999 values in\n # it, and I can't see where they get filtered out, but they must get \n # filtered out somewhere or the mean values wouldn't be sensible.\n self.x_1d = self.x_1d[qcind]\n self.y_1d = self.y_1d[qcind]\n self.p_1d = self.p_1d[qcind]\n self.z_1d = self.z_1d[qcind]\n # Trying to work out why I use self.posqc here, not qcind, I think it's \n # because qcind will be one value for each level for each profile,\n # whereas posqc is one value for a whole profile - though I don't think\n # there should be any values left with a bad self.posqc - so this step \n # is probably redundant:\n #all_mTqc = all_mT[self.posqc]\n # Make sure I'm not including values where a mean_t1 value couldn't be \n # calculated:\n #all_mTqc1 = all_mTqc[np.where(all_mTqc != 99999.0)]\n # Need to remember that all_mT may well now be an array not a vector...\n all_mTqc1 = all_mT[all_mT != 99999.0]\n all_depqc = all_dep[all_mT != 99999.0]\n all_xqc = all_x[all_mT != 99999.0]\n all_yqc = all_y[all_mT != 99999.0]\n \n # Prepare data for gridding\n self.init_xgrid()\n self.init_ygrid()\n self.init_zgrid()\n \n # Getting the unique profile references:\n punique = np.unique(self.p_1d, return_index = True)[1]\n \n # From the unique profile references selecting only those profiles that \n # have a mean temperature over the depth of interest:\n #puniqueqc = punique[all_mT != 99999.0]\n \n # Getting references for all the points - so every temp value will have \n # a depth, lat and long i.e. you will still have multiple points per \n # profile:\n points = np.vstack([self.z_1d, self.y_1d, self.x_1d]).transpose()\n \n # Getting a single reference for each profile, this will just take the \n # first value in points, for each profile => points2 will have a \n # latitude, a longitude and the shallowest accepted depth of the profile\n # => This point will always (pretty much) be put into the top set of \n # grid boxes, would therefore need to consider this further if I wanted\n # to populate multiple depth level grid boxes at once:\n points2 = np.vstack([self.z_1d[punique], self.y_1d[punique], \n self.x_1d[punique]]).transpose()\n \n # points3 is like points 2, but gets coordinates for only profiles that\n # have a mean temperature down to the depth of interest:\n #points3 = np.vstack([np.ones(len(puniqueqc)), self.y_1d[puniqueqc], \n # self.x_1d[puniqueqc]]).transpose()\n points3 = np.vstack([all_depqc - 1, all_yqc, all_xqc]).transpose()\n \n # Pretty self explanatory - the boundaries of the grid boxes:\n bins = [self.zbounds, self.ybounds, self.xbounds]\n \n # Grid data:\n grid_count, binedges, binno = scipy.stats.binned_statistic_dd(\n points, self.data_1d, statistic='count', bins=bins)\n grid_sum, binedges, binno = scipy.stats.binned_statistic_dd(\n points, self.data_1d, statistic='sum', bins=bins)\n grid_meansum, binedges, binno = scipy.stats.binned_statistic_dd(\n points3, all_mTqc1, statistic = 'sum', bins = bins)\n grid_pcount, binedges, binno = scipy.stats.binned_statistic_dd(\n points3, all_mTqc1, statistic='count', bins=bins)\n# grid_pcount, binedges, binno = scipy.stats.binned_statistic_dd(\n# points3, self.data_1d[puniqueqc], statistic='count', bins=bins)\n# grid_max, binedges, binno = scipy.stats.binned_statistic_dd(\n# points, self.data_1d, statistic = 'max', bins=bins)\n# grid_min, binedges, binno = scipy.stats.binned_statistic_dd(\n# points, self.data_1d, statistic = 'min', bins=bins)\n# grid_med, binedges, binno = scipy.stats.binned_statistic_dd(\n# points, all_mTqc1, statistic = 'median', bins = bins)\n \n # Sum of valid temps/ number of valid obs:\n grid_tmean = grid_sum / grid_count\n grid_tmean = np.ma.MaskedArray(grid_tmean, mask = (grid_count == 0))\n # Sum of valid mean temps/ number of valid profiles:\n grid_meantmean = grid_meansum / grid_pcount\n grid_meantmean = np.ma.MaskedArray(grid_meantmean, mask = (grid_pcount == 0))\n self.grid_tmean = grid_tmean\n self.grid_count = grid_count\n self.grid_sum = grid_sum\n self.grid_meansum = grid_meansum\n self.grid_pcount = grid_pcount\n self.grid_meantmean = grid_meantmean", "def main(\n ts_dir: str,\n save_dir: str,\n figure_dir: str,\n arima_order: Tuple[int] = (1, 0, 1),\n start = datetime(2000, 1, 1),\n end = datetime(2019, 9, 30),\n verbose: bool = True\n) -> None:\n # Load dataset\n df = pd.read_csv(\n ts_dir,\n index_col=0,\n header=0,\n parse_dates=[\"DATE\"],\n date_parser=lambda d: datetime.strptime(d, \"%Y-%m-%d\")\n )\n df.replace(\".\", np.NaN, inplace=True)\n df = df.astype(np.float32)\n if verbose:\n print(\"** Raw Dataset **\")\n df.info()\n print(f\"Nan ratio: {np.mean(np.isnan(df.values.squeeze())) * 100: .2}%\")\n\n # select subset.\n def _select_range(df):\n return df[np.logical_and(\n df.index >= start, df.index <= end\n )]\n df = _select_range(df)\n if verbose:\n print(\"** Raw Dataset **\")\n df.info()\n\n df_filled = utils.arima_interpolate(\n raw=df,\n arima_order=arima_order,\n verbose=verbose\n )\n df_filled.to_csv(save_dir)\n # Visualize Interpolation results.\n utils.visualize_interpolation(df, df_filled, figure_dir)\n # return df, df_filled\n return None", "def gendata(params,xmin,xmax,npts=4000):\n F = lorentzian.ForwardFactory\n def gensample(F, xmin, xmax):\n from numpy import arange\n import random\n a = arange(xmin, xmax, (xmax-xmin)/200.)\n ymin = 0\n ymax = F(a).max()\n while 1:\n t1 = random.random() * (xmax-xmin) + xmin\n t2 = random.random() * (ymax-ymin) + ymin\n t3 = F(t1)\n if t2 < t3:\n return t1\n fwd = F(params)\n return array([gensample(fwd, xmin,xmax) for i in xrange(npts)])", "def _create_historic_forecasts(\n data, time_dt, frt_dt, standard_grid_metadata=\"uk_ens\", number_of_days=5, **kwargs\n):\n historic_forecasts = iris.cube.CubeList([])\n for day in range(number_of_days):\n new_frt_dt = frt_dt + datetime.timedelta(days=day)\n new_time_dt = time_dt + datetime.timedelta(days=day)\n historic_forecasts.append(\n set_up_variable_cube(\n data - 2 + 0.2 * day,\n time=new_time_dt,\n frt=new_frt_dt,\n standard_grid_metadata=standard_grid_metadata,\n **kwargs,\n )\n )\n return historic_forecasts", "def generate_obs_grid(start_date, end_date, storm_report_path, model_grid_path, proj_str):\n\n grid = xr.open_dataset(model_grid_path)\n for coord in ['lon', 'lat']:\n grid[coord].values = grid[coord].astype('float32')\n valid_dates = pd.date_range(start_date, end_date, freq='1h')\n\n obs_list = []\n\n for report_type in ['filtered_torn', 'filtered_wind', 'filtered_hail']:\n\n ds_list = []\n\n obs = combine_storm_reports(valid_dates.min(), valid_dates.max(), storm_report_path, report_type)\n\n for valid_date in valid_dates:\n\n ds = grid.expand_dims('time').assign_coords(valid_time=('time', [valid_date]))\n ds[report_type.split('_')[-1]] = ds['lat'] * 0\n\n obs_sub = obs[obs['Actual_Date'] == valid_date]\n obs_indx = find_coord_indices(ds['lon'].values, ds['lat'].values, obs_sub['Lon'], obs_sub['Lat'], proj_str)\n for i in obs_indx:\n if i is not None:\n ds[report_type.split('_')[-1]][i[0], i[1]] += 1\n else:\n continue\n ds_list.append(ds)\n\n obs_list.append(xr.concat(ds_list, dim='time'))\n\n return xr.merge(obs_list)", "def main(args, **kwargs):\n data_file = os.path.join(kwargs['data_dir'],\n '{}_{}m_{}_{}.npz'.format(args.area, args.resolution, args.year, args.tar_date))\n data = np.load(data_file)\n tar_label_mat = data['label_mat']\n tar_dynamic_mat = data['dynamic_mat']\n static_mat, tar_static_mat = data['static_mat'], data['static_mat']\n mapping_mat = data['mapping_mat']\n dynamic_features, static_features = list(data['dynamic_features']), list(data['static_features'])\n\n \"\"\" extract information from all time period \"\"\"\n dynamic_mat, label_mat = [], []\n for date in args.dates:\n data_file = os.path.join(kwargs['data_dir'],\n '{}_{}m_{}_{}.npz'.format(args.area, args.resolution, args.year, date))\n data = np.load(data_file)\n dynamic_mat.append(data['dynamic_mat'])\n label_mat.append(data['label_mat'])\n dynamic_mat = np.concatenate(dynamic_mat)\n label_mat = np.concatenate(label_mat)\n\n data_obj = DataObj(label_mat, dynamic_mat, static_mat,\n tar_label_mat, tar_dynamic_mat, tar_static_mat,\n dynamic_features, static_features, mapping_mat)\n\n \"\"\" load train, val, test locations \"\"\"\n data_obj.train_loc, data_obj.val_loc, data_obj.test_loc = load_train_val_test(kwargs['train_val_test_file'], args)\n\n data_obj.train_y = data_obj.gen_train_val_test_label(data_obj.label_mat, data_obj.train_loc)\n data_obj.val_y = data_obj.gen_train_val_test_label(data_obj.label_mat, data_obj.val_loc)\n data_obj.test_y = data_obj.gen_train_val_test_label(data_obj.tar_label_mat, data_obj.test_loc)\n\n logging.info('Number of features = {}.'.format(data_obj.n_features))\n logging.info('Number of dynamic features = {}.'.format(data_obj.n_dynamic_features))\n logging.info('Number of static features = {}.'.format(data_obj.n_static_features))\n logging.info('Number of time points = {}.'.format(data_obj.n_times))\n logging.info('Shape of the matrix = ({}, {}).'.format(data_obj.n_rows, data_obj.n_cols))\n\n \"\"\" normalize data \"\"\"\n data_obj.dynamic_x = normalize_mat(data_obj.dynamic_mat, if_retain_last_dim=True)\n data_obj.static_x = normalize_mat(data_obj.static_mat, if_retain_last_dim=True)\n data_obj.tar_dynamic_x = normalize_mat(data_obj.tar_dynamic_mat, if_retain_last_dim=True)\n data_obj.tar_static_x = normalize_mat(data_obj.tar_static_mat, if_retain_last_dim=True)\n\n \"\"\" load auto-encoder model \"\"\"\n ae = torch.load(os.path.join(kwargs['model_dir'], kwargs['ae_model_name'] + '.pkl'))\n\n \"\"\" define DeepAP model \"\"\"\n dap = DeepAP(in_dim=data_obj.n_features,\n ae_en_h_dims=[64, 32, 16],\n ae_de_h_dims=[16, 32, 64],\n\n conv_lstm_in_size=(data_obj.n_rows, data_obj.n_cols),\n conv_lstm_in_dim=args.ae_h_dim, # ae_h_dim\n conv_lstm_h_dim=[args.dap_h_dim], # dap_h_dim\n conv_lstm_kernel_sizes=args.kernel_sizes, # kernel_sizes\n conv_lstm_n_layers=1,\n\n fc_in_dim=args.dap_h_dim * len(args.kernel_sizes),\n fc_h_dims=args.fc_h_dims, # fc_h_dims\n fc_out_dim=1,\n\n ae_pretrain_weight=ae.state_dict(),\n if_trainable=True,\n fc_p_dropout=0.1,\n\n mask_thre=args.mask_thr,\n device=kwargs['device'])\n\n dap = dap.to(kwargs['device'])\n train(dap, data_obj, args, **kwargs)", "def get_time_series(this_lat, this_lon, case, varnames):\n\n cesmdir = '/gpfs/fs1/collections/cdg/data/cesmLE/CESM-CAM5-BGC-LE/atm/proc/tseries/monthly'\n\n if 'LE' in case:\n\n from observational_large_ensemble.params import karen_params_cesm\n\n mode_lag = karen_params_cesm.mode_lag\n cvdp_loc = karen_params_cesm.cvdp_loc\n AMO_cutoff_freq = karen_params_cesm.AMO_cutoff_freq\n\n name_conversion = {'tas': 'TREFHT', 'pr': 'PRECC', 'slp': 'PSL'}\n cesm_names = [name_conversion[v] for v in varnames]\n this_member = int((case).split('-')[-1])\n cvdp_file = '%s/CESM1-CAM5-BGC-LE_#%i.cvdp_data.1920-2018.nc' % (cvdp_loc, this_member)\n\n # Historical filenames for CESM. Will need to append part of RCP8.5 to get full period\n filenames = []\n for var in cesm_names:\n file_str = '%s/%s/b.e11.B20TRC5CNBDRD.f09_g16.%03d.cam.h0.%s.??????-200512.nc' % (cesmdir, var,\n this_member, var)\n this_file = glob(file_str)[0]\n filenames.append(this_file)\n\n daX, df_shifted, _ = get_obs(case, varnames[0], filenames,\n karen_params_cesm.valid_years, mode_lag,\n cvdp_file, AMO_cutoff_freq, name_conversion)\n\n this_ts = daX.sel({'lat': this_lat, 'lon': this_lon}, method='nearest')\n\n else:\n\n from observational_large_ensemble.params import karen_params_obs\n\n mode_lag = karen_params_obs.mode_lag\n cvdp_loc = karen_params_obs.cvdp_loc\n AMO_cutoff_freq = karen_params_obs.AMO_cutoff_freq\n\n tas_dir = karen_params_obs.tas_dir\n pr_dir = karen_params_obs.pr_dir\n slp_dir = karen_params_obs.slp_dir\n cvdp_file = '%s/HadISST.cvdp_data.1920-2018.nc' % cvdp_loc\n file_dict = {'tas': '%s/Complete_TAVG_LatLong1.nc' % tas_dir,\n 'pr': '%s/full_data_monthly_v2020.nc' % pr_dir,\n 'slp': '%s/prmsl.mon.mean.nc' % slp_dir}\n\n filenames = []\n for var in varnames:\n filenames.append(file_dict[var])\n\n name_conversion = {'tas': 'temperature', 'pr': 'precip', 'slp': 'prmsl'}\n\n daX, df_shifted, _ = get_obs(case, varnames[0], filenames[0],\n karen_params_obs.valid_years, mode_lag,\n cvdp_file, AMO_cutoff_freq, name_conversion)\n\n this_ts = daX.sel({'lat': this_lat, 'lon': this_lon}, method='nearest')\n\n return this_ts, df_shifted", "def run_regression(auto_input,\n variables: list = ['magasin', 'tilsig'],\n regions: list = ['NO1', 'NO2', 'NO3', 'NO4', 'NO5', 'SE1', 'SE2', 'SE3', 'SE4'],\n jupyter: bool = False,\n backup: bool = False,\n loop: bool = False,\n write: bool = True,\n week_nb: int = False,\n year: int = False) -> None:\n\n start_tuning = utctime_now()\n\n for region in regions:\n\n if not region in ['NO1', 'NO2', 'NO3', 'NO4', 'NO5', 'SE1', 'SE2', 'SE3', 'SE4']:\n sys.exit(\"Region must be one out of: 'NO1', 'NO2', 'NO3', 'NO4', 'NO5', 'SE1', 'SE2', 'SE3', 'SE4'\")\n\n for variable in variables:\n\n if not variable in ['magasin', 'tilsig']:\n sys.exit(\"Variable must be either 'tilsig' or 'magasin'\")\n\n print('---------------------------------------------------------------')\n print(' {}, {} '.format(region, variable))\n print('---------------------------------------------------------------')\n\n \n \n df_week, MagKap = auto_input[variable]\n \n period, forecast_time, read_start = rs.get_timeperiods(variable, week_nb, year)\n \n reg_end = (pd.to_datetime(time.strftime(forecast_time), format=\"%Y.%m.%d\") - Timedelta(days=7)).strftime(\n '%Y.%m.%d')\n\n if (0 <= today.weekday() <= 1) or (today.weekday() == 2 and today.hour < 14): # True for tipping\n last_forecast = forecast_time\n else:\n last_forecast = forecast_time\n\n df_cleaned = deletingNaNs(df_week.loc[:last_forecast])\n\n if loop:\n if variable == 'tilsig':\n max_kandidater = 196\n min_kandidater = 2\n\n else:\n max_kandidater = 171\n min_kandidater = 2\n\n max_weeks = 238 #288\n min_weeks = 16\n print('max ant. kandidater: {}, min ant. kandidater: {}'.format(max_kandidater, min_kandidater))\n print('max ant. uker: {}, min ant. uker: {}'.format(max_weeks, min_weeks))\n\n start_time_loop = utctime_now()\n fasit, fasit_key = rs.make_fasit(variable, region, reg_end, period)\n print('Fasit er lest inn.\\n')\n\n if fasit[fasit_key][:reg_end].isnull().any():\n print('OBS: Det mangler verdier på fasiten! Går videre til neste region i loopen..')\n print(fasit[fasit_key][:reg_end].isnull())\n continue\n\n sorted_r2 = get_R2_sorted(variable, df_cleaned, fasit, fasit_key)\n\n if loop:\n max_p = 0.001 # Beste testresultat SvF\n\n # First loop: Tuning number of candidates for best possible R2 combined\n df_ant_kandidater = pd.DataFrame(columns=columns)\n for antall in range(min_kandidater, max_kandidater + 1, 1):\n if antall > len(sorted_r2):\n chosen_r2 = sorted_r2\n else:\n chosen_r2 = sorted_r2[:antall]\n output = make_estimate(df_cleaned, fasit, fasit_key, last_forecast, first_period, max_p, chosen_r2,\n loop=True)\n df_ant_kandidater = df_ant_kandidater.append(\n {columns[0]: output[0], columns[1]: output[1], columns[2]: output[2], columns[3]: output[3],\n columns[4]: output[4], columns[5]: output[5], columns[6]: output[6]}, ignore_index=True)\n if antall > len(sorted_r2):\n print('Feilmelding: Ønsket antall kandidater overskrider maks (%i).\\n' % len(sorted_r2))\n break\n idx_max = df_ant_kandidater.r2_samlet.idxmax(skipna=True)\n ant_kandidater_beste = int(df_ant_kandidater.ant_kandidater.values[idx_max])\n print('Beste ant_kandidater loop 1: ', ant_kandidater_beste)\n\n # Second loop: tuning length of the short regression for best possible R2 combined, using the best number of\n # candidates found in the First loop.\n final_chosen_r2 = sorted_r2[:ant_kandidater_beste]\n df_short_period = pd.DataFrame(columns=columns)\n for short_period in range(min_weeks, max_weeks + 1, 1):\n short_period = int(short_period)\n output = make_estimate(df_cleaned, fasit, fasit_key, last_forecast, short_period, max_p,\n final_chosen_r2, loop=True)\n df_short_period = df_short_period.append(\n {columns[0]: output[0], columns[1]: output[1], columns[2]: output[2], columns[3]: output[3],\n columns[4]: output[4], columns[5]: output[5], columns[6]: output[6]}, ignore_index=True)\n idx_max = df_short_period.r2_samlet.idxmax(skipna=True)\n short_period_beste = int(df_short_period.short_period.values[idx_max])\n print('Beste short_period loop 2: ', short_period_beste)\n\n # Getting the best input variables from loop and write to input_variables_from_tuning.txt\n df_all_methods = pd.concat([df_ant_kandidater, df_short_period], ignore_index=True, sort=False)\n idx_max = df_all_methods.r2_samlet.idxmax(skipna=True)\n ant_kandidater_beste = int(df_all_methods.ant_kandidater.values[idx_max])\n chosen_r2_beste = sorted_r2[:ant_kandidater_beste]\n short_period_beste = df_all_methods.short_period.values[idx_max]\n ws.write_input_variables_to_file(region, variable, max_p, ant_kandidater_beste, short_period_beste)\n\n print('\\nTuning for regionen tok %.0f minutter. \\n' % ((utctime_now() - start_time_loop) / 60))\n\n else:\n # getting the best variables from input_variables_from_tuning.txt or input_variables_backup.txr\n short_period_beste, max_p, ant_kandidater_beste, input_file = rs.get_input_variables_from_file(variable,region,backup)\n chosen_r2_beste = sorted_r2[:ant_kandidater_beste]\n print(\"Input variables was read from: \", input_file)\n\n # MAKE ESTIMATE AND SHOW RESULTS\n input1 = make_estimate(df_cleaned, fasit, fasit_key, last_forecast, short_period_beste, max_p,\n chosen_r2_beste, loop=False)\n input2 = fasit_key, ant_kandidater_beste, max_p, reg_end, read_start\n\n\n #WRITE RESULTS\n if write:\n # Write results from the regression to SMG.\n fasit, long_results, short_results, df_tot, chosen_p, chosen_r2, r2_modelled, r2_modelled_long, prediction, tipping_ps, short_period, nb_weeks_tipping = input1\n\n # write to SMG:\n ws.write_SMG_regresjon(variable, region, tipping_ps[-1:])\n\n # write to SMG, virtual:\n ws.write_V_SMG_Regresjon(short_results, chosen_p, fasit_key, r2_modelled, r2_modelled_long, short_period_beste, MagKap)\n\n if jupyter:\n ws.show_result_jupyter(input1, input2)\n else:\n ws.show_result(input1, input2, input_file)\n\n print('---------------------------------------------------------------')\n print(' SLUTT ')\n print('---------------------------------------------------------------')\n print('\\nRegresjon for alle regioner og variabler brukte totalt %.0f minutter. \\n' % (\n (utctime_now() - start_tuning) / 60))", "def par_fit(init_file):\n check_presence_init(init_file)\n\n dict_ = read(init_file)\n np.random.seed(dict_[\"SIMULATION\"][\"seed\"])\n\n # We perform some basic consistency checks regarding the user's request.\n check_presence_estimation_dataset(dict_)\n # check_initialization_dict2(dict_)\n # check_init_file(dict_)\n\n # Distribute initialization information.\n data = read_data(dict_[\"ESTIMATION\"][\"file\"])\n num_treated = dict_[\"AUX\"][\"num_covars_treated\"]\n num_untreated = num_treated + dict_[\"AUX\"][\"num_covars_untreated\"]\n\n _, X1, X0, Z1, Z0, Y1, Y0 = process_data(data, dict_)\n\n if dict_[\"ESTIMATION\"][\"maxiter\"] == 0:\n option = \"init\"\n else:\n option = dict_[\"ESTIMATION\"][\"start\"]\n\n # Read data frame\n\n # define starting values\n x0 = start_values(dict_, data, option)\n opts, method = optimizer_options(dict_)\n dict_[\"AUX\"][\"criteria\"] = calculate_criteria(dict_, X1, X0, Z1, Z0, Y1, Y0, x0)\n dict_[\"AUX\"][\"starting_values\"] = backward_transformation(x0)\n rslt_dict = bfgs_dict()\n if opts[\"maxiter\"] == 0:\n rslt = adjust_output(None, dict_, x0, X1, X0, Z1, Z0, Y1, Y0, rslt_dict)\n else:\n opt_rslt = minimize(\n minimizing_interface,\n x0,\n args=(dict_, X1, X0, Z1, Z0, Y1, Y0, num_treated, num_untreated, rslt_dict),\n method=method,\n options=opts,\n )\n rslt = adjust_output(\n opt_rslt, dict_, opt_rslt[\"x\"], X1, X0, Z1, Z0, Y1, Y0, rslt_dict\n )\n # Print Output files\n print_logfile(dict_, rslt)\n\n if \"comparison\" in dict_[\"ESTIMATION\"].keys():\n if dict_[\"ESTIMATION\"][\"comparison\"] == 0:\n pass\n else:\n write_comparison(data, rslt)\n else:\n write_comparison(data, rslt)\n\n return rslt", "def form_data(stocks, init_param):\r\n \r\n rs = stocks[1].rsi\r\n ts = stocks[1].tsi\r\n a = 1\r\n \r\n \r\n for date in init_param.train_dates:\r\n try:\r\n training_data\r\n except NameError:\r\n training_data = LearningData()\r\n training_data.construct(stocks, date, init_param.future_day, init_param.features)\r\n else:\r\n training_data.append(stocks, date, init_param.future_day, init_param.features)\r\n \r\n for date in init_param.test_dates:\r\n try:\r\n test_data\r\n except NameError:\r\n test_data = LearningData()\r\n test_data.construct(stocks, date, init_param.future_day, init_param.features)\r\n else:\r\n test_data.append(stocks, date, init_param.future_day, init_param.features)\r\n \r\n #reference_date = dateutl.days_since_1900('1991-01-01')\r\n #test_data.construct(stocks,[reference_date, day_history, init_param.future_day])\r\n \r\n return training_data, test_data", "def grid_interp_ts(df, time_col, x_col, y_col, data_col, grid_res, from_crs=None, to_crs=2193, interp_fun='cubic', agg_ts_fun=None, period=None, digits=2):\n\n #### Create the grids\n df1 = df.copy()\n\n #### Resample the time series data\n if agg_ts_fun is not None:\n df1a = df1.set_index(time_col)\n if agg_ts_fun == 'sum':\n df2 = df1a.groupby([TimeGrouper(period), Grouper(y_col), Grouper(x_col)])[data_col].sum().reset_index()\n elif agg_ts_fun == 'mean':\n df2 = df1a.groupby([TimeGrouper(period), Grouper(y_col), Grouper(x_col)])[data_col].mean().reset_index()\n else:\n raise ValueError(\"agg_ts_fun should be either 'sum' or 'mean'.\")\n time = df2[time_col].unique()\n else:\n df2 = df1\n\n time = df2[time_col].sort_values().unique()\n\n if from_crs is None:\n x = df2.loc[df2[time_col] == time[0], x_col].values\n y = df2.loc[df2[time_col] == time[0], y_col].values\n else:\n data1 = df2.loc[df2[time_col] == time[0]]\n from_crs1 = convert_crs(from_crs, pass_str=True)\n to_crs1 = convert_crs(to_crs, pass_str=True)\n geometry = [Point(xy) for xy in zip(data1[x_col], data1[y_col])]\n gpd = GeoDataFrame(data1.index, geometry=geometry, crs=from_crs1)\n gpd1 = gpd.to_crs(crs=to_crs1)\n x = gpd1.geometry.apply(lambda p: p.x).round(digits).values\n y = gpd1.geometry.apply(lambda p: p.y).round(digits).values\n\n xy = column_stack((x, y))\n\n max_x = x.max()\n min_x = x.min()\n\n max_y = y.max()\n min_y = y.min()\n\n new_x = arange(min_x, max_x, grid_res)\n new_y = arange(min_y, max_y, grid_res)\n x_int, y_int = meshgrid(new_x, new_y)\n\n #### Create new df\n x_int2 = x_int.flatten()\n y_int2 = y_int.flatten()\n xy_int = column_stack((x_int2, y_int2))\n time_df = repeat(time, len(x_int2))\n x_df = tile(x_int2, len(time))\n y_df = tile(y_int2, len(time))\n new_df = DataFrame({'time': time_df, 'x': x_df, 'y': y_df, data_col: repeat(0, len(time) * len(x_int2))})\n\n new_lst = []\n for t in to_datetime(time):\n set1 = df2.loc[df2[time_col] == t, data_col]\n# index = new_df[new_df['time'] == t].index\n new_z = griddata(xy, set1.values, xy_int, method=interp_fun).round(digits)\n new_z[new_z < 0] = 0\n new_lst.extend(new_z.tolist())\n# print(t)\n new_df.loc[:, data_col] = new_lst\n\n #### Export results\n return(new_df[new_df[data_col].notnull()])", "def build_params(clsite, dt):\n hp = GaugeParams()\n lp = GaugeParams()\n if clsite is not None:\n with get_sqlalchemy_conn(\"coop\") as conn:\n df = pd.read_sql(\n \"SELECT year, high, low from alldata WHERE \"\n \"station = %s and sday = %s\",\n conn,\n params=(clsite, f\"{dt:%m%d}\"),\n index_col=\"year\",\n )\n hp.minval = df[\"high\"].min()\n hp.maxval = df[\"high\"].max()\n hp.avgval = df[\"high\"].mean()\n hp.stddev = df[\"high\"].std()\n hp.ptiles = df[\"high\"].quantile(np.arange(0.1, 0.91, 0.1)).to_list()\n lp.maxval = df[\"low\"].max()\n lp.minval = df[\"low\"].min()\n lp.avgval = df[\"low\"].mean()\n lp.stddev = df[\"low\"].std()\n lp.ptiles = df[\"low\"].quantile(np.arange(0.1, 0.91, 0.1)).to_list()\n\n return hp, lp", "def __init__(self, prefix, params, begin, end, simthreshold_NS = 0.5, \r\n simthreshold_pbias = 25., simthreshold_rsr = 0.7, \r\n with_valid_data = False, shift_one_day = False):\r\n self.simthreshold_NS = simthreshold_NS\r\n self.simthreshold_pbias = simthreshold_pbias\r\n self.simthreshold_rsr = simthreshold_rsr\r\n self.with_valid_data = with_valid_data\r\n self.shift_one_day = shift_one_day\r\n # Open the simulation file\r\n try:\r\n task_id = int(os.environ.get('SGE_TASK_ID',0))\r\n except:\r\n task_id = 0\r\n postfix = '.%04i' % task_id if task_id else ''\r\n self.outfile_sim = open(prefix + '-simulation.csv' + postfix,'w')\r\n # Open the parameter file\r\n self.outfile_param = open(prefix + '-parameters.csv' + postfix,'w')\r\n # Make the header\r\n if task_id<2:\r\n header = 'logNS,' + \"pbias,\" + \"rsr,\" + ','.join(p.name for p in params)\r\n self.outfile_param.write(header + '\\n')\r\n # Prolong the header with the data\r\n t = begin\r\n # add the validation period if wanted\r\n if self.with_valid_data:\r\n end = datetime.datetime(1988,12,31)\r\n # if the whole timeseries is shifted add one day to the header\r\n if self.shift_one_day:\r\n end = datetime.datetime(1989,1,1)\r\n while t <= end:\r\n header += ', ' + datetime.datetime.strftime(t,'%Y-%m-%d')\r\n t += datetime.timedelta(days=1)\r\n self.outfile_sim.write(header + '\\n')", "def extract_dates(self,dates, tol= 0.05 , in_place=False, verbose=True):\n###################################################################\n\n # import \n import inspect\n import numpy as np\n import pyacs.gts\n\n # check data is not None\n from pyacs.gts.lib.errors import GtsInputDataNone\n \n try:\n if self.data is None:\n # raise exception\n raise GtsInputDataNone(inspect.stack()[0][3],__name__,self)\n except GtsInputDataNone as error:\n # print PYACS WARNING\n print( error )\n return( self )\n\n # working gts\n new_gts = self.copy()\n \n # case .data_xyz is None\n \n if new_gts.data_xyz is None:\n new_gts.neu2xyz(corr=True)\n\n else:\n # check data/data_xyz consistency\n try:\n if not new_gts.cdata(data=True):\n # raise exception\n from pyacs.gts.lib.errors import GtsCDataError\n raise GtsCDataError( inspect.stack()[0][3],__name__,self )\n except GtsCDataError as error:\n print( error )\n return( self )\n \n \n new_data=None\n \n # extract dates\n \n index = np.array( pyacs.gts.Gts.get_index_from_dates(dates, self.data, tol=tol) )\n \n if verbose:\n print('-- Extracting ',index.shape[0],' entries from Gts or code: ',self.code)\n \n if index.shape[0] > 0:\n new_data_xyz= self.data_xyz[index,:]\n new_sigma = self.data[index,4:]\n else:\n new_data=None\n if verbose:\n print(\"-- time series \",self.code,\" does not have dates at the requested dates \")\n\n \n # handles outliers\n\n if new_data is not None: \n ldate_outliers=self.data[:,0][self.outliers]\n lupdated_outliers=pyacs.gts.Gts.get_index_from_dates(ldate_outliers, new_data, tol=tol)\n else:\n lupdated_outliers = []\n\n if verbose:\n print('-- Transmitting ',len(lupdated_outliers),' outliers to the extracted Gts ')\n \n # case observations\n \n new_gts.data_xyz = new_data_xyz\n \n # handle outliers\n\n ldate_outliers=self.data[:,0][self.outliers]\n lupdated_outliers=pyacs.gts.Gts.get_index_from_dates(ldate_outliers, new_data_xyz, tol=0.05)\n \n # handles offsets_date\n \n upd_offsets=[]\n for offset_date in self.offsets_dates:\n if offset_date>=new_data_xyz[0,0] and offset_date<=new_data_xyz[-1,0]:\n upd_offsets.append(offset_date)\n \n # handles X0,Y0,Z0\n \n new_gts.X0 = new_data_xyz[0,1]\n new_gts.Y0 = new_data_xyz[0,2]\n new_gts.Z0 = new_data_xyz[0,3]\n \n # re-generate NEU time series\n new_gts.xyz2neu(corr=False)\n\n # re-populate the uncertainties columns\n new_gts.data[:,4:] = new_sigma\n \n # offsets & outliers\n \n new_gts.offsets_dates=upd_offsets\n new_gts.outliers=lupdated_outliers\n \n if in_place:\n self = new_gts\n return(self)\n else:\n return(new_gts)", "def make_data(args):\n mass_MJ = 1.142\n radius_RJ = 1.138\n gravity_SI = 23.970 \n Rs_Rsun = 0.805\n inc = 85.71\n t0 = 2454037.612\n sma = 8.839304998 # semi major axis in stellar radiu\n orb_per = 2.21857545 #in days\n ecc = 0.0041\n w_peri = -24.1 # longiutude of periastron\n limbdark = \"linear\"\n \n u_limbdark = [0.35]\n \n num_transit = 1\n \n dates = [2458383.77055943, 2458383.77384704, 2458383.77707875,\n 2458383.78030307, 2458383.78358918, 2458383.78681399,\n 2458383.79004101, 2458383.79326712, 2458383.79655574,\n 2458383.79984545, 2458383.80307906, 2458383.80629228,\n 2458383.80958299, 2458383.8128124 , 2458383.81603942,\n 2458383.81925973, 2458383.82248474, 2458383.82577195,\n 2458383.82900097, 2458383.83223048, 2458383.8354501 ,\n 2458383.83874811, 2458383.84196822, 2458383.84520053,\n 2458383.84847654, 2458383.85170346, 2458383.85493727,\n 2458383.85821578, 2458383.86144419, 2458383.86466921,\n 2458383.86790322, 2458383.87118233, 2458383.87441074,\n 2458383.87763435, 2458383.88092406, 2458383.88414957],\n #don't forget the coma at the end if there is only one transit !!!!!\n \n\n\n # Wmean = [2400.695909757236,2328.5343131275904,1972.9809993156186,\n # 1927.2107049022654,]\n # Wmean = [1634.5200937047302,1600.8109822367207],[1670.071564637037,1634.5459486709924,1600.8124596368639],\n Wmean = [2328.5343131275904], \n orderstot = [33]\n orders = [33],\n # orderstot = [46,47,48]\n # orders = [47,48],[46,47,48],\n \n # Vfiles = [\"Vcorr47_DRS2.txt\",\n # \"Vcorr48_DRS2.txt\",\n # ],[\"Vcorr46_Jun19-1_DRS2.txt\",\n # \"Vcorr47_Jun19-1_DRS2.txt\",\n # \"Vcorr48_Jun19-1_DRS2.txt\"\n # ],\n Vfiles = [\"V33_CO.txt\"], \n \n Ifiles = [\"I33_CO.txt\"],\n \n # if Stdfiles are not needed, for example with the Brogi likelihood, \n # uncomment the next line\n #Stdfiles = []\n Stdfiles = [\"Std33_CO.txt\"],\n \n lambdas = np.array([[ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [2291.84518119, 2362.55271775],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [1939.42197854, 1998.81548771],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [1758.50261646, 1812.39702422],\n [1718.50054581, 1771.64067835],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [1512.43747007, 1558.89713666],\n [1484.77586677, 1528.30354258],\n [1457.06015806, 1498.88570675],\n [1429.75333156, 1470.19096444],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [1306.967007 , 1343.21643463],\n [1285.02046052, 1320.56072659],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [1167.78440327, 1198.13940642],\n [1150.59417256, 1178.48372217],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ],\n [ 0. , 0. ]])\n\n return dict(\n mass_MJ=mass_MJ,\n radius_RJ=radius_RJ,\n\t\tgravity_SI = gravity_SI,\n\t\tRs_Rsun = Rs_Rsun,\n\t\tinc = inc,\n\t\tt0 = t0,\n\t\tsma = sma,\n\t\torb_per = orb_per,\n\t\tecc = ecc,\n\t\tw_peri = w_peri,\n Wmean = Wmean,\n\t\tlimbdark = limbdark,\n\t\tu_limbdark = u_limbdark,\n\t\tdates = dates,\n\t\tlambdas = lambdas,\n orders = orders,\n orderstot=orderstot,\n num_transit=num_transit,\n\t\tVfiles = Vfiles,\n\t\tIfiles = Ifiles,\n\t\tStdfiles = Stdfiles\n\t\t )", "def params(timeseries_input):\n # Settings for Nelder Mead Algorithm\n global timeseries\n timeseries=timeseries_input\n\n NumIters = 1 # First Iteration\n MaxIters = 1e3 # Maximum number of iterations\n Tolerance = 1e-5 # Tolerance on best and worst function values\n N = 5 # Number of Heston and Nandi parameters\n r = 0.01 / 252.0 # Risk Free Rate\n\n # Heston and Nandi parameter starting values (vertices) in vector form\n\n x = [[0 for i in range(N + 1)] for j in range(N)]\n x[0][0] = 5.02e-6;\n x[0][1] = 5.12e-6;\n x[0][2] = 5.00e-6;\n x[0][3] = 4.90e-6;\n x[0][4] = 4.95e-6;\n x[0][5] = 4.99e-6 # omega\n x[1][0] = 1.32e-6;\n x[1][1] = 1.25e-6;\n x[1][2] = 1.35e-6;\n x[1][3] = 1.36e-6;\n x[1][4] = 1.30e-6;\n x[1][5] = 1.44e-6 # alpha\n x[2][0] = 0.79;\n x[2][1] = 0.80;\n x[2][2] = 0.78;\n x[2][3] = 0.77;\n x[2][4] = 0.81;\n x[2][5] = 0.82 # beta\n x[3][0] = 427.0;\n x[3][1] = 421.0;\n x[3][2] = 425.0;\n x[3][3] = 419.1;\n x[3][4] = 422.1;\n x[3][5] = 430.0 # gamma\n x[4][0] = 0.21;\n x[4][1] = 0.20;\n x[4][2] = 0.22;\n x[4][3] = 0.19;\n x[4][4] = 0.18;\n x[4][5] = 0.205 # lambda\n\n # Run Nelder Mead and output Nelder Mead results\n B = NelderMead(LogLike, N, NumIters, MaxIters, Tolerance, x, r)\n\n #\tprint(\"Nelder Mead Minimization of Log-Likelihood for Heston and Nandi parameters\")\n #\tprint(\"---------------------------------\")\n #\tprint(\"omega = \", B[0])\n #\tprint(\"alpha = \", B[1])\n #\tprint(\"beta = \", B[2])\n #\tprint(\"gamma = \", B[3])\n #\tprint(\"lambda = \", B[4])\n #\tprint(\"Value of Objective Function = \", B[N])\n #\tprint(\"Number of Iterations = \", B[N+1])\n #\tprint(\"Persistence \", B[2]+B[1]*(B[3]**2) )\n #\tprint(\"---------------------------------\")\n\n # alpha,beta,gamma,omega,lambda\n return [B[1], B[2], B[3], B[0], B[4]]", "def find_calibration_parameters(df, temperature, cal_mode, calibration_statistics, num_iterations, optimal_t=25):\n\n if \"temp\" in cal_mode:\n # create a column of T - optimal_T (mean temperature for each still bout minus the optimal temperature)\n # i.e. the deviation in T from the optimal\n df[\"T_dev\"] = temperature.data - optimal_t\n\n for i in range(num_iterations):\n # do linear regression:\n x_results, y_results, z_results = dataframe_regression(df, cal_mode, do_or_undo=\"do\")\n\n # results.params() gives the calibration parameters thus:\n # x_results.params() = [x_scale, x_offset, x_temp_offset] (last item only applies if temperature is used)\n df = dataframe_transformation(df, x_results.params, y_results.params, z_results.params,\n cal_mode)\n # update the \"matched\" arrays to reflect the new \"closest points\" after the dataframe transformation\n update_matched(df)\n\n # Regress the backup copy of the original input against the transformed version,\n # to calculate offset, scale and temperature offset scalar (if temperature used)\n x_results_final, y_results_final, z_results_final = dataframe_regression(df, cal_mode, do_or_undo=\"undo\")\n\n calibration_parameters = {\"x_offset\": x_results_final.params[1],\n \"x_scale\": x_results_final.params[0],\n \"y_offset\": y_results_final.params[1],\n \"y_scale\": y_results_final.params[0],\n \"z_offset\": z_results_final.params[1],\n \"z_scale\": z_results_final.params[0]\n }\n\n if \"temp\" in cal_mode:\n calibration_parameters[\"x_temp_offset\"] = x_results_final.params[2]\n calibration_parameters[\"y_temp_offset\"] = y_results_final.params[2]\n calibration_parameters[\"z_temp_offset\"] = z_results_final.params[2]\n else:\n calibration_parameters[\"x_temp_offset\"] = 0\n calibration_parameters[\"y_temp_offset\"] = 0\n calibration_parameters[\"z_temp_offset\"] = 0\n\n # if enhanced calibration statistics are required...\n if calibration_statistics:\n\n ######################\n\n # extract the error in the final regression fit for each axis\n calibration_parameters[\"x_rsquared\"] = x_results_final.rsquared\n calibration_parameters[\"y_rsquared\"] = y_results_final.rsquared\n calibration_parameters[\"z_rsquared\"] = z_results_final.rsquared\n\n x_bse = x_results_final.bse\n y_bse = y_results_final.bse\n z_bse = z_results_final.bse\n\n calibration_parameters[\"x_scale_se\"] = x_bse[0]\n calibration_parameters[\"y_scale_se\"] = y_bse[0]\n calibration_parameters[\"z_scale_se\"] = z_bse[0]\n\n calibration_parameters[\"x_offset_se\"] = x_bse[1]\n calibration_parameters[\"y_offset_se\"] = y_bse[1]\n calibration_parameters[\"z_offset_se\"] = z_bse[1]\n\n if \"temp\" in cal_mode:\n calibration_parameters[\"x_temp_offset_se\"] = x_bse[2]\n calibration_parameters[\"y_temp_offset_se\"] = y_bse[2]\n calibration_parameters[\"z_temp_offset_se\"] = z_bse[2]\n\n #########################\n\n return calibration_parameters", "def start_params(unique_id):\n\n temp = unique_id.GradientTemp\n temp_k = unique_id.Temp_K\n logtraitval = unique_id.logTraitValue\n midpoint = unique_id.logTraitValue.idxmax()\n midtemp = unique_id.GradientTemp[unique_id.logTraitValue.idxmax()]\n y = NaN\n\n # at midpoint, split the data into two arrays\n EhVect = where(unique_id.GradientTemp >= midtemp, logtraitval, y)\n EVect = where(unique_id.GradientTemp < midtemp, logtraitval, y)\n\n Len_EVect = len(EVect)\n Len_EhVect = len(EhVect)\n\n if Len_EVect < 3: # all the points are one side of the 'midpoint'):\n EGrad = stats.linregress(temp, logtraitval)\n else:\n try:\n EGrad = stats.linregress(\n temp[:midpoint], logtraitval[:midpoint])\n except ValueError as error:\n logging.error(\"ValueError encountered %r\", error)\n EGrad = stats.linregress(temp, logtraitval)\n\n if Len_EhVect < 3:\n EhGrad = stats.linregress(temp, logtraitval)\n else:\n try:\n EhGrad = stats.linregress(\n temp[midpoint:], logtraitval[midpoint:])\n except ValueError as error:\n #logging.error(\"ValueError encountered %r\", error)\n EhGrad = stats.linregress(temp, logtraitval)\n\n B0 = exp(EGrad[0] * (1 / (k * 283.15)) + EGrad[1])\n\n if Len_EhVect < 2:\n Th = midtemp\n else:\n try:\n Th = (\n ((mean(logtraitval) - logtraitval[B0]) / EhGrad[0]))**(-1) / k\n except BaseException:\n Th = temp_k[logtraitval.idxmax()] # close to where 50% deactivation would occur\n\n return DataFrame({\n \"E\": EGrad[0],\n \"Eh\": EhGrad[0],\n \"B0\": B0,\n \"Th\": Th\n }, index=[0]\n ) # index assignment as passing scalar values", "def create_lat_lon_date_data(gt_id,\n target_horizon,\n experiment,\n past_gt_ids=[\"contest_precip\", \"contest_tmp2m\"],\n forecast_models=[\"nmme\",\"nmme0\"],\n other_lat_lon_date_features=[\"contest_rhum.sig995\",\n \"contest_pres.sfc.gauss\"]):\n\n time_start = time.time()\n\n # Add forecasts to list of forecast IDs\n forecast_variable = get_forecast_variable(gt_id) # 'prate' or 'tmp2m'\n forecast_ids = ['{}-{}-{}'.format(forecast, forecast_variable, target_horizon)\n for forecast in forecast_models]\n\n # -----------\n # Generate relevant variable and column names\n # -----------\n\n # Identify measurement variable name\n measurement_variable = get_measurement_variable(gt_id) # 'tmp2m' or 'prate'\n\n # Keep track of relevant column names\n gt_col = measurement_variable\n clim_col = measurement_variable+\"_clim\"\n anom_col = measurement_variable+\"_anom\"\n\n # Inverse of standard deviation of anomalies for each start_date\n anom_inv_std_col = anom_col+\"_inv_std\"\n\n # --------\n # Prepare experiment cache directory and saved file names\n # --------\n\n # Name of cache directory for storing non-submission-date specific\n # intermediate files\n cache_dir = os.path.join('results', experiment, 'shared',\n '{}_{}'.format(gt_id, target_horizon))\n # e.g., cache_dir = 'results/regression/shared/contest_precip_34w'\n\n # if cache_dir doesn't exist, create it\n if not os.path.isdir(cache_dir):\n os.makedirs(cache_dir)\n\n # Filenames for data file to be stored in cache_dir\n lat_lon_date_data_file = os.path.join(\n cache_dir, \"lat_lon_date_data-{}_{}.h5\".format(gt_id, target_horizon))\n\n # --------\n # Load mask indicating which grid points count in the contest (1=in, 0=out)\n # --------\n print \"Loading contest mask\"\n t = time.time()\n mask_df = get_contest_mask()\n print \"Elapsed: {}s\".format(time.time() - t)\n\n # --------\n # Creates and saves lat_lon_date_data dataframe\n # --------\n # Load masked lat lon date features restricted to years >= get_first_year(gt_id)\n # Note: contest lat lon date features and forecasts are pre-masked, so there\n # is no need to mask explcitily\n print \"Loading lat lon date features\"\n num_gt_ids = len(past_gt_ids)\n # For each measurement,\n # get number of days between start date of observation period used for prediction\n # (2 weeks + 1 submission day behind for most predictors) and start date of\n # target period (2 or 4 weeks ahead)\n past_start_deltas = [get_start_delta(target_horizon, past_gt_id)\n for past_gt_id in past_gt_ids]\n other_start_deltas = [get_start_delta(target_horizon, other_gt_id)\n for other_gt_id in other_lat_lon_date_features]\n # Additionally keep track of days between forecast date and start date of\n # target period\n forecast_delta = get_forecast_delta(target_horizon)\n\n lat_lon_date_data = get_lat_lon_date_features(\n gt_ids=other_lat_lon_date_features + other_lat_lon_date_features\n + other_lat_lon_date_features,\n gt_masks=None,\n gt_shifts=other_start_deltas +\n [2*delta for delta in other_start_deltas] +\n [365]*len(other_lat_lon_date_features),\n forecast_ids=forecast_ids + forecast_ids,\n forecast_masks=None,\n forecast_shifts=[None]*len(forecast_ids) + [forecast_delta]*len(forecast_ids),\n anom_ids=[gt_id] + past_gt_ids + past_gt_ids + past_gt_ids,\n anom_masks=None,\n anom_shifts=[None] + past_start_deltas +\n [2*delta for delta in past_start_deltas] +\n [365]*len(past_gt_ids),\n first_year=get_first_year(gt_id)\n )\n\n print \"Loading additional lat lon date features\"\n t = time.time()\n # Add CFSv2 mean as feature\n if 'cfsv2' in forecast_models:\n cfsv2_models = ['cfsv2_op_delta_2w_1d_6h', 'cfsv2_op_delta_2w_1d_12h',\n 'cfsv2_op_delta_2w_1d_18h', 'cfsv2_op_delta_2w_2d_0h',\n 'cfsv2_op_delta_2w_2d_6h', 'cfsv2_op_delta_2w_2d_12h',\n 'cfsv2_op_delta_2w_2d_18h', 'cfsv2_op_delta_2w_3d_0h']\n lat_lon_date_data['cfsv2_mean'] = lat_lon_date_data[cfsv2_models].mean(axis=1)\n lat_lon_date_data[\"cfsv2_mean_shift\"+str(start_delta)] = lat_lon_date_data[\n [model+\"_shift\"+str(start_delta) for model in cfsv2_models]].mean(axis=1)\n # Add inverse of standard deviation of anomalies for each start_date\n lat_lon_date_data[anom_inv_std_col] = \\\n 1.0/lat_lon_date_data.groupby([\"start_date\"])[anom_col].transform('std')\n\n print \"Elapsed: {}s\".format(time.time() - t)\n\n # Save lat lon date features to disk\n print \"Saving lat lon date features to \"+lat_lon_date_data_file\n t = time.time()\n lat_lon_date_data.to_hdf(lat_lon_date_data_file, key=\"data\", mode=\"w\")\n subprocess.call(\"chmod a+w \"+lat_lon_date_data_file, shell=True)\n print \"Elapsed: {}s\".format(time.time() - t)\n print \"Finished generating lat_lon_date_data matrix.\"\n print \"Total time elapsed: {}s\".format(time.time()-time_start)\n return list(lat_lon_date_data)", "def generate_testdata(dt, maxt=10., meanrate=10., amp=20.e-12, ampvar=5.e-12, \n noise=2.5e-12, taus=[0.001, 0.010], baseclass=None, func=None, sign=1, \n expseed=None, noiseseed=None,\n bigevent=None):\n if baseclass is None and func is not None:\n raise ValueError('Need base class definition')\n tdur = 0.020\n timebase = np.arange(0., maxt, dt) # in ms\n t_psc = np.arange(0., tdur, dt) # time base for single event template in ms\n if func is None:\n tau_1 = taus[0] # ms\n tau_2 = taus[1] # ms\n Apeak = amp # pA\n Aprime = (tau_2/tau_1)**(tau_1/(tau_1-tau_2))\n g = Aprime * (-np.exp(-t_psc/tau_1) + np.exp((-t_psc/tau_2)))\n gmax = np.max(g)\n print('gmax: ', gmax)\n g = sign*g*amp/gmax\n print(f'max g: {np.min(g):.6e}')\n else:\n baseclass._make_template()\n gmax = np.min(baseclass.template)\n g = sign*amp*baseclass.template/gmax\n print('gmaxb: ', np.max(gmax))\n \n testpsc = np.zeros(timebase.shape)\n if expseed is None:\n eventintervals = np.random.exponential(1./meanrate, int(maxt*meanrate))\n else:\n np.random.seed(expseed)\n eventintervals = np.random.exponential(1./meanrate, int(maxt*meanrate))\n eventintervals = eventintervals[eventintervals < 10.]\n events = np.cumsum(eventintervals)\n if bigevent is not None:\n events = np.append(events, bigevent['t'])\n events = np.sort(events)\n t_events = events[events < maxt] # time of events with exp distribution\n i_events = np.array([int(x/dt) for x in t_events])\n testpsc[i_events] = np.random.normal(1., ampvar/amp, len(i_events))\n if bigevent is not None:\n ipos = int(bigevent['t']/dt) # position in array\n testpsc[ipos] = bigevent['I']\n testpsc = scipy.signal.convolve(testpsc, g, mode='full')[:timebase.shape[0]]\n # f, ax = mpl.subplots(1,1)\n # ax.plot(dt*np.arange(len(testpsc)), testpsc)\n # mpl.show()\n if noise > 0:\n if noiseseed is None:\n testpscn = testpsc + np.random.normal(0., noise, testpsc.shape)\n else:\n np.random.seed(noiseseed)\n testpscn = testpsc + np.random.normal(0., noise, testpsc.shape)\n else:\n testpscn = testpsc\n return timebase, testpsc, testpscn, i_events" ]
[ "0.6216409", "0.61393374", "0.60502726", "0.59144783", "0.5902388", "0.56675595", "0.56113106", "0.55587673", "0.55573833", "0.5554575", "0.55389374", "0.55208224", "0.55186224", "0.54923666", "0.5484279", "0.5448603", "0.5447749", "0.5442355", "0.5438024", "0.5410568", "0.5405809", "0.54031134", "0.5401165", "0.5400915", "0.53947645", "0.53747934", "0.5360897", "0.53567594", "0.53286064", "0.53182304" ]
0.6764931
0
Utility function zed.uchicago.edu Converts list into string separated by dashes or empty string if input list is not list or is empty
def stringify(List): if List is None: return '' if not List: return '' return '-'.join(str(elem) for elem in List)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_to_string(in_list):\n if not in_list:\n return \"[]\"\n else:\n return \"\\n- \" + \"\\n- \".join(in_list)", "def list_to_str( L ):\n if len(L) == 0: return ''\n return L[0] + list_to_str( L[1:] )", "def list_to_str( L ):\n if len(L) == 0: return ''\n return L[0] + list_to_str( L[1:] )", "def process_list(a_list: list):\n\n return ', '.join(str(s) for s in a_list) if a_list else Presenter.DEFAULT", "def convertListToString(list):\n return re.sub(r'[^\\w ]', '', str(list))", "def unicode_list_to_str(u_code_list): #This is just a function for me. Has nothing to do with flask or anything, okay?\n out_list = \"\"\n for item in u_code_list:\n out_list = out_list + str(item) + \"-\"\n return out_list.rstrip(\"-\") #removes the extra '-' (i.e 2-3-4-1-)", "def humanise_list(lst):\n assert len(lst) > 0\n if len(lst) == 1:\n return lst[0]\n head = \", \".join(lst[:-1])\n tail = lst[-1]\n return f\"{head} and {tail}\"", "def list_join(the_list):\n return ' '.join(the_list)", "def stringer(list):\n\tstring = \"\"\n\tfor x in list:\n\t\tstring = string + str(x)\n\treturn string", "def format_list(list):\n return \" \".join(str(tok) for tok in list)", "def format(lis):\n if lis:\n return \";\".join(\",\".join(str(i) for i in n) for n in lis)\n else:\n return \"NULL\"", "def list_str(lis):\r\n as_str = \"\"\r\n for item in lis:\r\n as_str += \" \" + str(item) + \",\"\r\n return as_str[:-1]", "def list_to_str(input_str):\r\n\r\n return \" \".join([str(val) for val in input_str])", "def list_string(join_list):\n joined_list = '[{}]'.format(join_list, join_list)\n return joined_list", "def __join_if_list(text_or_list: Union[List[str], str]) -> str:\n\n if isinstance(text_or_list, list):\n return ' '.join(text_or_list)\n return text_or_list", "def list_to_string(inputlist):\n outstring = \"\"\n numusers = len(inputlist)\n if numusers == 1: # foo\n outstring += inputlist[0]\n if numusers == 2: # foo and bar\n outstring += (inputlist[0] + \" and \" + inputlist[1])\n if numusers >= 3: # foo, bar and baz\n for x in range(numusers-2):\n outstring += inputlist[x] + \", \"\n outstring += (inputlist[-2] + \" and \" + inputlist[-1])\n return outstring", "def format_list(my_list):\n \n new_list = my_list[2: -1]\n new_list = new_list[: : 2]\n new_list = [my_list[0]] + new_list\n new_list = new_list + [\"and \" + my_list[-1]]\n \n string = ', '.join(new_list)\n print(string)", "def flatten_list(items: List[str]) -> str:\n if len(items) == 1:\n return f'\"{items[0]}\"'\n\n try:\n last = items[-1]\n except IndexError:\n # Empty list\n raise ValueError('Empty list of values received')\n\n return ', '.join(f'\"{item}\"' for item in items[:-1]) + f' or \"{last}\"'", "def formatlist(input_list):\n\n output_list = []\n for item in input_list:\n item = str(item)\n item = item.replace(\" \", \"_\")\n output_list.append(item)\n return output_list", "def format_list(my_list):\r\n\treturn \", \".join(my_list[::2]) + (\" and \" + my_list[-1])", "def pretty_list(input_list, separator=', '):\n if input_list:\n output = ' %s' % separator.join(input_list)\n else:\n output = ' empty'\n return output", "def listToString(s):\n # initialize an empty string\n str1 = \"\"\n\n # traverse in the string\n for ele in s:\n try:\n str1 = str1 + \" \" + ele\n except:\n pass\n\n # return string\n return str1", "def get_list_as_str(list_to_convert):\n return \", \".join([\"'{}'\".format(list_item) for list_item in list_to_convert])", "def listToStringFormat(self, list) ->str:\n string = ''\n for element in list:\n string = string + str(element) + \"\\n\"\n return string", "def list2string(a_list):\n\n the_string = ''\n for elem in a_list:\n the_string += str(elem)\n return the_string", "def str_transform_list(L):\n return [str(x) for x in L]", "def human_list(lst, connector='and'):\n # we don't want to listify non iterables\n if not getattr(lst, '__iter__', False):\n return lst\n else:\n s = ''\n max_idx = len(lst) - 1\n for i, item in enumerate(lst):\n if i == 0:\n t = '%s'\n elif i == max_idx and max_idx > 1:\n t = ', ' + connector + ' %s'\n elif i == max_idx and max_idx == 1:\n t = ' ' + connector + ' %s'\n else:\n t = ', %s'\n s += t % filter.conditional_escape(item)\n return mark_safe(s)", "def join_and_sanitize(list_):\n if isinstance(list_, str):\n return list_\n\n new_list = []\n for item in list_:\n if isinstance(item, str):\n new_list.append(item)\n elif isinstance(item, int):\n new_list.append(str(item))\n elif isinstance(item, float):\n new_list.append(str(item))\n elif isinstance(item, unicode):\n new_list.append(str(item))\n else:\n raise Exception('Invalid type when attempting to join and sanitize')\n\n return ' '.join(new_list)", "def _to_space_separated_string(l):\n s = '';\n for x in l:\n if len(s)>0: s += ' '\n s += repr(x)\n return s", "def join_and_sanitize(list_):\n if isinstance(list_, str):\n return list_\n\n new_list = []\n for item in list_:\n if isinstance(item, str):\n new_list.append(item)\n elif isinstance(item, int):\n new_list.append(str(item))\n elif isinstance(item, float):\n new_list.append(str(item))\n else:\n raise Exception('Invalid type when attempting to join and sanitize')\n\n return ' '.join(new_list)" ]
[ "0.7693768", "0.6974077", "0.6974077", "0.6955682", "0.6715619", "0.6706818", "0.66814554", "0.6577999", "0.6566847", "0.65541935", "0.65537137", "0.65157914", "0.6480404", "0.64604145", "0.6455032", "0.6404342", "0.63729745", "0.63619614", "0.63449293", "0.6314343", "0.62951", "0.6284942", "0.6260856", "0.62586826", "0.6239111", "0.622174", "0.6217191", "0.617336", "0.6147292", "0.6132864" ]
0.7753648
0
Utilities for storing and manipulating XPFSA models inferred by XGenESeSS zed.uchicago.edu Calculates the distance between all models and stores them under the distance key of each model; modifies instance in place No I/O
def augmentDistance(self): for key,value in self._models.iteritems(): src=[float(i) for i in value['src'].replace('#',' ').split()] tgt=[float(i) for i in value['tgt'].replace('#',' ').split()] dist = haversine((np.mean(src[0:2]),np.mean(src[2:])), (np.mean(tgt[0:2]),np.mean(tgt[2:])), miles=True) self._models[key]['distance'] = dist return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distance_train(self):\n\n for self.epoch in range(self.args.epochs):\n # switch to train mode\n self.set_train()\n data_loading_time = 0\n gpu_time = 0\n before_op_time = time.time()\n\n for batch_idx, inputs in enumerate(self.train_loader):\n data_loading_time += (time.time() - before_op_time)\n before_op_time = time.time()\n # -- PUSH INPUTS DICT TO DEVICE --\n self.inputs_to_device(inputs)\n\n # -- DISTANCE ESTIMATION --\n outputs, features = self.predict_distances(inputs)\n\n # -- POSE ESTIMATION --\n outputs.update(self.predict_poses(inputs, features))\n\n # -- PHOTOMETRIC LOSSES --\n losses, outputs = self.photometric_losses(inputs, outputs)\n\n # -- COMPUTE GRADIENT AND DO OPTIMIZER STEP --\n self.optimizer.zero_grad()\n losses[\"distance_loss\"].mean().backward()\n self.optimizer.step()\n\n duration = time.time() - before_op_time\n gpu_time += duration\n\n if batch_idx % self.args.log_frequency == 0:\n self.log_time(batch_idx, duration, losses[\"distance_loss\"].mean().cpu().data,\n data_loading_time, gpu_time)\n self.distance_statistics(\"train\", inputs, outputs, losses)\n data_loading_time = 0\n gpu_time = 0\n\n self.step += 1\n before_op_time = time.time()\n\n self.lr_scheduler.step()\n\n if (self.epoch + 1) % self.args.save_frequency == 0:\n self.save_model()\n\n print(\"Training complete!\")", "def _forward_densepose_smooth_save(self, features: Dict[str, torch.Tensor], instances: List[Instances]):\n if not self.densepose_on:\n return {} if self.training else instances\n\n ## MLQ added\n assert not self.training\n self._register_hooks()\n self.cnt = 1\n self.smooth_k = cfg.SMOOTH_K\n self.prev_instances = None\n # self.data_dir = \"/esat/dragon/liqianma/datasets/Pose/youtube/youtube_single\"\n # self.data_dir = \"/esat/dragon/liqianma/datasets/Pose/youtube/liqian01\"\n self.data_dir = cfg.DATA_DIR\n print(\"--> data_dir: \", self.data_dir)\n self.in_dir = os.path.join(self.data_dir, \"DP_fea\")\n if self.smooth_k>0 and os.path.exists(self.in_dir) and len(os.listdir(self.in_dir))>0:\n self.out_dir = os.path.join(self.data_dir, \"DP_fea_smooth{}\".format(self.smooth_k))\n else:\n self.out_dir = os.path.join(self.data_dir, \"DP_fea\")\n if not os.path.exists(self.out_dir):\n os.makedirs(self.out_dir)\n\n pred_boxes = [x.pred_boxes for x in instances]\n scores = [x.scores for x in instances]\n # pdb.set_trace()\n if self.smooth_k>0:\n pred_boxes, idx = self._smooth_bbox(self.in_dir, self.cnt, self.smooth_k, single_person=True)\n \n for i in range(len(instances)):\n if len(instances[i])==0:\n instances = copy.copy(self.prev_instances)\n pred_boxes = [instances[i].pred_boxes]\n elif len(instances[i])>1:\n try:\n instances[i] = instances[i][idx.item()]\n except:\n print(idx)\n instances[i] = instances[i][idx]\n instances[i].pred_boxes = pred_boxes[i]\n else:\n instances[i].pred_boxes = pred_boxes[i]\n # except:\n # pdb.set_trace()\n self.prev_instances = copy.copy(instances)\n\n if self.use_decoder:\n features = [self.decoder(features)]\n\n \"TODO: (1) smooth the pred_boxes with t+-1, save all bbox and load for (track) smooth;\" \n \"TODO: (2) save densepose_outputs, confidences\"\n \"TODO: (3) track bbox for multi-person via densepose similarity\"\n features_dp = self.densepose_pooler(features, pred_boxes)\n if len(features_dp) > 0:\n densepose_head_outputs = self.densepose_head(features_dp)\n densepose_outputs, _, confidences, _ = self.densepose_predictor(\n densepose_head_outputs\n )\n else:\n # If no detection occurred instances\n # set densepose_outputs to empty tensors\n empty_tensor = torch.zeros(size=(0, 0, 0, 0), device=features_dp.device)\n densepose_outputs = tuple([empty_tensor] * 4)\n confidences = tuple([empty_tensor] * 6)\n\n # pdb.set_trace()\n # out_dict = {\"pred_boxes\":pred_boxes, \"densepose_outputs\":densepose_outputs,\n # \"confidences\":confidences, \"scores\":scores}\n # pdb.set_trace()\n out_dict = {\"pred_boxes\":self.to_cpu(pred_boxes), \n \"densepose_outputs\":self.to_cpu(densepose_outputs),\n \"confidences\":self.to_cpu(confidences), \n \"scores\":self.to_cpu(scores),\n \"height\":instances[0].image_size[0],\n \"width\":instances[0].image_size[1],\n \"instances\":instances}\n # pdb.set_trace()\n path = os.path.join(self.out_dir, \"frame_{:06d}.pkl\".format(self.cnt))\n pickle.dump(out_dict, open(path,\"wb\"))\n self.cnt += 1\n\n densepose_inference(densepose_outputs, confidences, instances)\n return instances", "def fit(self, graph, instances):\n self.walks_ = []\n b_triples = self.sc.broadcast(graph)\n # for walker in self.walkers:\n # self.walks_ += list(walker.extract(graph, instances))\n # print('Extracted {} walks for {} instances!'.format(len(self.walks_), len(instances)))\n\n folder = \"./walks/\"\n # folder = walk_folder\n if os.path.isdir(folder):\n shutil.rmtree(folder)\n os.mkdir(folder)\n for walker in self.walkers:\n # self.walks_ += list(walker.extract(graph, instances))\n filename = os.path.join(\n folder, \"randwalks_n%d_depth%d_pagerank_uniform.txt\" % (walker.walks_per_graph, walker.depth)\n )\n print(filename)\n start_time = time.time()\n rdd = self.sc.parallelize(instances).map(lambda n: walk_sequence(walker, b_triples.value, n))\n rdd.saveAsTextFile(filename)\n elapsed_time = time.time() - start_time\n print(\"Time elapsed to generate features:\", time.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time)))\n print(f\"Extracted {len(self.walks_)} walks for {len(instances)} instances!\")\n\n # sentences = [list(map(str, x)) for x in self.walks_]\n\n pattern = \"uniform\"\n\n # vector_output = './vectors/'\n # trainModel(entities, id2entity, walk_folder, model_folder, vector_file, pattern, maxDepth)\n\n sentences = MySentences(folder, filename=pattern)\n self.model_ = Word2Vec(\n sentences,\n size=self.vector_size,\n window=self.window,\n workers=self.n_jobs,\n sg=self.sg,\n iter=self.max_iter,\n negative=self.negative,\n min_count=self.min_count,\n seed=42,\n )", "def fit(self, graph, instances):\n if self.wl:\n graph.weisfeiler_lehman(iterations=self.wl_iterations)\n\n all_walks = []\n for i, instance in tqdm.tqdm(enumerate(instances)):\n if self.wl:\n walks = self._extract_wl_walks(graph, Vertex(str(instance)))\n else:\n walks = self._extract_random_walks(graph, \n Vertex(str(instance)))\n\n walks = sorted(walks)\n all_walks += list(walks)\n\n all_walks = sorted(all_walks)\n self.print_walks(all_walks)\n import pickle\n pickle.dump(self.label_map, open('label_map.p', 'wb+'))\n if self.wl:\n pickle.dump(graph._label_map, open('wl_label_map.p', 'wb+')) \n input()\n\n print('Extracted {} walks for {} instances!'.format(len(all_walks),\n len(instances)))\n sentences = [list(map(str, x)) for x in all_walks]\n\n self.model_ = Word2Vec(sentences, size=self.vector_size, \n window=self.window, workers=self.n_jobs, \n sg=self.sg, iter=self.max_iter, \n negative=self.negative, \n min_count=self.min_count, seed=42)", "def class_weights_embedding():\n\n # set the name of the experiment\n now = datetime.datetime.now()\n experiment_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute)\n experiment_name = 'class_weights_embedding_' + str(experiment_id)\n\n # define if you want to use preprocessed data from file\n use_prep_data = False\n if use_prep_data:\n set_params(preproc_data_id='16_5_10.16.47')\n else:\n set_params(use_preproc_data=False)\n\n # define the changing parameter and its value\n changing_param_name = 'class_weights'\n changing_param_value = [{0: 1, 1: 2}, {0: 15, 1: 85}]\n # {0:4, 1:100}, {0:3, 1:100}, {0:2, 1:100}, {0:1, 1:100}] #[{0:1, 1:1}, {0:15, 1:85}]#\n\n # set constant parameters\n set_params(use_word_emb=1)\n # save constant parameters to a new \"experiment_..\" file\n save_constant_parameters(experiment_name, changing_param_name)\n\n # run experiment for every parameter value\n for value in changing_param_value:\n process = psutil.Process(os.getpid())\n print(\"-----MEMORY before starting experiment ------\", int(process.memory_info().rss/(8*10**(3))), \"KB\")\n\n # update the parameter value\n set_params(class_weights_1=value)\n\n # update the model_id for this new model\n now = datetime.datetime.now()\n new_model_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute) + \".\" + str(now.second)\n set_params(model_id = new_model_id)\n\n # evaluate the new model and save the results in the experiment file\n oneExperiment = Process(target=run_experiment, args=(experiment_name, new_model_id, changing_param_name, value,))\n oneExperiment.start()\n oneExperiment.join()\n\n set_params(preproc_data_id=new_model_id)", "def get_models(self, offset=0, sum_=False):\n \n self.models = {}\n self.predict = pd.DataFrame()\n min_value = min(self.conf[\"w_sizes\"].values())\n \n output_width = int(30/self.conf[\"time_step\"])\n \n \n for name in self.conf[\"w_sizes\"].keys():\n \n size = self.conf[\"w_sizes\"][name]\n self.create_train_test(name=name, f_size=size, offset=offset, output_width=output_width, sum_=sum_)\n model, loss, val_loss = get_model(name, self.trainX, self.trainY)\n \n pred = pd.DataFrame({name: model.predict(self.testX).tolist()},\n index=range(size-min_value, len(self.testY)+(size-min_value)))\n \n pred[name] = pred[name].apply(lambda x: np.array(x))\n \n self.predict = pd.concat([self.predict, pred], axis=1)\n \n self.models[name] = model\n \n del model, pred\n \n self.create_train_test(name=\"CNN\", f_size=min_value, offset=offset, output_width=output_width, sum_=sum_)\n self.predict[\"test\"] = self.testY.tolist()\n self.create_train_test(name=\"MLP\", f_size=min_value, offset=offset, output_width=output_width, sum_=sum_)\n self.predict[\"test_dis\"] = self.testY.tolist()\n \n self.predict.dropna(inplace=True)", "def forc_model(self):\n lag1_loc = self.X[self.model_mask_cols].columns.get_loc('shrink_value_per_day_lag1_by_store')\n lag2_loc = self.X[self.model_mask_cols].columns.get_loc('shrink_value_per_day_lag2_by_store')\n for add in self.X.address1.unique():\n add_mask = self.X.address1 == add\n foo = self.X[ add_mask ].sort_values('visit_date', ascending=False)\n top_index = foo.index[0]\n clust = int(foo.cluster.values[0])\n # get values from last visit for store\n base_input = foo[self.model_mask_cols].values[0]\n base_actual = self.y[top_index]\n lag2_val = base_input[lag1_loc]\n lag1_val = base_actual\n\n for i in range(1, self.num_periods + 1):\n model = self.model_list[clust]\n inputs = base_input\n inputs[lag1_loc] = lag1_val\n inputs[lag2_loc] = lag2_val\n \n pred = model.predict(inputs.reshape(1, -1))\n self._update_cust_table(add, i, pred)\n \n lag2_val = lag1_val\n lag1_val = pred", "def trainModels():\n\n # load actives from ChEMBL\n actives = {}\n if not os.path.exists(DATA_FOLDER_PATH):\n os.mkdir(DATA_FOLDER_PATH)\n actives_file = [x for x in os.listdir(DATA_FOLDER_PATH) if x.startswith('actives_chembl') and x.endswith('.p')]\n if not actives_file or RELOAD_DATA and not USE_DOWNLOADED_STRUCTS:\n actives = chembl.loadChEMBLData(ACCESSION, IC_50_THRESHOLD, DATA_FOLDER_PATH)\n else:\n actives = pickle.load(open(DATA_FOLDER_PATH + actives_file[0], 'rb'))\n\n if not actives_file or RELOAD_DATA and not USE_DOWNLOADED_STRUCTS:\n chembl.computeConsensualIC50(actives, DATA_FOLDER_PATH)\n chembl.appendRDKitMols(actives, DATA_FOLDER_PATH)\n\n # load decoys downloaded from DUD\n decoys = {}\n if os.path.exists(DECOYS_SDF_FILE_PATH[:-4] + \".p\"):\n decoys = pickle.load(open(DECOYS_SDF_FILE_PATH[:-4] + \".p\", 'rb'))\n else:\n if os.path.exists(DECOYS_SDF_FILE_PATH):\n decoys = dud.getDecoys(DECOYS_SDF_FILE_PATH)\n else:\n print \"Decoys not found in: \" + DECOYS_SDF_FILE_PATH\n print \"Make sure you set the right path.\"\n exit()\n\n # merge both data sets\n compounds_all = {}\n compounds_all.update(actives)\n compounds_all.update(decoys)\n\n # compute Morgan fingerprints\n if os.path.exists(MERGED_DATASET_PATH) and not RELOAD_DATA:\n print \"Loading previously created dataset...\"\n compounds_all = pickle.load(open(MERGED_DATASET_PATH, 'rb'))\n else:\n fingerprinter.appendMorganFingerprints(compounds_all)\n\n actives = { cmpndid : compounds_all[cmpndid] for cmpndid in compounds_all.keys() if compounds_all[cmpndid]['active']}\n pickle.dump(actives, open(ACTIVES_DUMP, 'wb'))\n decoys = { cmpndid : compounds_all[cmpndid] for cmpndid in compounds_all.keys() if not compounds_all[cmpndid]['active']}\n\n # train and cross-validate multiple Naive Bayes Classifiers\n classification_results = dict()\n if not os.path.exists(CLASS_RESULTS_SAVE_FILE_PATH) or RELOAD_DATA:\n classification_results = classification.naiveBayesClassifierTraining(compounds_all)\n print \"Saving results...\"\n pickle.dump(classification_results, open(CLASS_RESULTS_SAVE_FILE_PATH, 'wb'))\n print \"Finished analysis.\"\n else:\n print \"Loading previous results...\"\n classification_results = pickle.load(open(CLASS_RESULTS_SAVE_FILE_PATH, 'rb'))\n\n # have fun with the classification results\n print \"# CLASSIFICATION STATISTICS #\"\n classification.playWithResults(classification_results)\n\n # cluster actives according to their similarity and keep only the diverse molecules\n actives_testset = dict()\n if CLUSTER:\n clusters = utilities.clusterMols(actives)\n actives_kept = dict()\n for cluster in clusters:\n actives_kept[cluster[0]] = actives[cluster[0]]\n remains = cluster[1:]\n actives_filtered_out = {chmblid : actives[chmblid] for chmblid in remains}\n actives_testset.update(actives_filtered_out)\n actives = actives_kept\n\n # estimate maximum distances between active molecules to set threshold for the application domain\n # distance_actives = regression.estimateDistanceThreshold(actives) # median of distances between two actives\n # min_distance_decoys, max_distance_decoys = regression.compareDistances(actives, decoys) # average min/max distance of closest/farthest decoy from any of the actives\n # print \"median of distances between two actives: \" + str(distance_actives)\n # print \"average min/max distance of closest/farthest decoy from any of the actives: \" + str(min_distance_decoys) + \"/\" + str(max_distance_decoys)\n\n # Support vector regression\n regression_results = dict()\n if not os.path.exists(REGRESS_RESULTS_SAVE_FILE_PATH) or RELOAD_DATA:\n regression_results = regression.supportVectorRegression(actives)\n pickle.dump(regression_results, open(REGRESS_RESULTS_SAVE_FILE_PATH, 'wb'))\n else:\n regression_results = pickle.load(open(REGRESS_RESULTS_SAVE_FILE_PATH, 'rb'))\n\n\n # do something with the regression results\n print \"# REGRESSION STATISTICS #\"\n regression.playWithResults(regression_results, decoys, actives_testset)\n\n return classification_results['final_model'], regression_results['final_model']", "def _create_models_by_madx(accel_inst, dpps):\n model_creator = creator.CREATORS[accel_inst.NAME][\"nominal\"]\n model_creator.prepare_run(accel_inst, accel_inst.model_dir)\n madx_script = accel_inst.get_multi_dpp_job(dpps)\n model_creator.run_madx(madx_script,\n logfile=os.path.join(accel_inst.model_dir, \"w_analysis_multidpp.log\"),\n writeto=os.path.join(accel_inst.model_dir, \"w_analysis_multidpp.madx\"),\n )", "def build(self):\n\n self.W_AA = self.init([self.n_atom_input_feat, self.n_hidden_AA])\n self.b_AA = model_ops.zeros(shape=[\n self.n_hidden_AA,\n ])\n\n self.W_PA = self.init([self.n_pair_input_feat, self.n_hidden_PA])\n self.b_PA = model_ops.zeros(shape=[\n self.n_hidden_PA,\n ])\n\n self.W_A = self.init([self.n_hidden_A, self.n_atom_output_feat])\n self.b_A = model_ops.zeros(shape=[\n self.n_atom_output_feat,\n ])\n\n self.trainable_weights = [\n self.W_AA, self.b_AA, self.W_PA, self.b_PA, self.W_A, self.b_A\n ]\n if self.update_pair:\n self.W_AP = self.init([self.n_atom_input_feat * 2, self.n_hidden_AP])\n self.b_AP = model_ops.zeros(shape=[\n self.n_hidden_AP,\n ])\n\n self.W_PP = self.init([self.n_pair_input_feat, self.n_hidden_PP])\n self.b_PP = model_ops.zeros(shape=[\n self.n_hidden_PP,\n ])\n\n self.W_P = self.init([self.n_hidden_P, self.n_pair_output_feat])\n self.b_P = model_ops.zeros(shape=[\n self.n_pair_output_feat,\n ])\n\n self.trainable_weights.extend(\n [self.W_AP, self.b_AP, self.W_PP, self.b_PP, self.W_P, self.b_P])", "def wmd4Docs(docs):\n\n fullpath = path.join(prefix,results_folder, project_folder)\n fullname = fullpath + simWMDMatrixFile\n if os.path.exists(fullname):\n print(\" ... reading WMD matrix from disk ...\")\n start = timer()\n vals = []\n\n reader = csv.reader(open(fullname), delimiter=\",\")\n x = list(reader)\n vals = np.array(x).astype(\"float\")\n print(\" ... Done in {0:5.2f} seconds.\\n\".format(timer()-start))\n return vals\n\n wmd_corpus = []\n for doc in docs:\n # doc = nltkPreprocessing(doc) # already done outside\n wmd_corpus.append(doc)\n\n\n print(\"## Building model for WMD .... \")\n start = timer()\n modelWord2Vec = trainingModel4wmd(wmd_corpus)\n print(\"... Done in {0:5.2f} seconds.\\n\".format(timer()-start))\n\n print (\"## Computing distances using WMD (parallel version [p={0}])\\\n ...\".format(nCores))\n\n nDocs = len(wmd_corpus)\n start = timer()\n # create list of tasks for parallel processing\n tasks = []\n for i in range(nDocs):\n for j in range(i+1,nDocs):\n tasks.append([docs[i],docs[j]])\n\n p = Pool(nCores)\n # results = p.starmap(model.wmdistance, product(docs, repeat=2))\n results = p.starmap(modelWord2Vec.wmdistance, tasks)\n p.close()\n p.join()\n print(\"... done with distance computation in {0:5.2f} seconds.\\n\".format(timer()-start))\n\n\n print(\"Copying matrix ...\")\n start = timer()\n # copy upper triangular vector into matrix\n vals = [ [0 for i in range(nDocs)] for j in range(nDocs)]\n progr = 0\n for i in range(nDocs):\n for j in range(i):\n vals[i][j] = vals[j][i]\n for j in range(i+1,nDocs):\n vals[i][j] = results[progr]\n progr += 1\n \n print(\"... Done in {0:5.2f} seconds.\\n\".format(timer()-start))\n\n # save matrix on disk\n\n with open(fullname, \"w\") as output:\n writer = csv.writer(output, lineterminator='\\n')\n writer.writerows(vals)\n\n print(\"Word Mover's Distances written on disk file '\",fullname, \"' \")\n \n return vals", "def distances(self):", "def main():\n\n # Create argument parser\n parser = ArgumentParser()\n parser.add_argument('datadir', type=str, help='Directory of LC files')\n parser.add_argument('metatable', type=str,\n help='Metatable containing each object, redshift, peak time guess, mwebv, object type')\n parser.add_argument('--zpt', type=float, default=DEFAULT_ZPT, help='Zero point of LCs')\n parser.add_argument('--lm', type=float, default=DEFAULT_LIM_MAG, help='Survey limiting magnitude')\n parser.add_argument('--outdir', type=str, default='./products/',\n help='Path in which to save the LC data (single file)')\n args = parser.parse_args()\n\n objs, redshifts, obj_types, peaks, ebvs = read_in_meta_table(args.metatable)\n\n # Grab all the LC files in the input directory\n file_names = []\n for obj in objs:\n file_name = args.datadir + 'PS1_PS1MD_' + obj + '.snana.dat'\n file_names.append(file_name)\n\n # Create a list of LC objects from the data files\n lc_list = read_in_LC_files(file_names, objs)\n\n # This needs to be redone when retrained\n # TODO: Need to change this whenever you retrain...\n filt_dict = {'g': 0, 'r': 1, 'i': 2, 'z': 3}\n wvs = np.asarray([5460, 6800, 7450, 8700])\n\n # Update the LC objects with info from the metatable\n my_lcs = []\n for i, my_lc in enumerate(lc_list):\n my_lc.add_LC_info(zpt=args.zpt, mwebv=ebvs[i],\n redshift=redshifts[i], lim_mag=args.lm,\n obj_type=obj_types[i])\n my_lc.get_abs_mags()\n my_lc.sort_lc()\n pmjd = my_lc.find_peak(peaks[i])\n my_lc.shift_lc(pmjd)\n my_lc.correct_time_dilation()\n my_lc.filter_names_to_numbers(filt_dict)\n my_lc.correct_extinction(wvs)\n my_lc.cut_lc()\n my_lc.make_dense_LC(4)\n my_lcs.append(my_lc)\n save_lcs(my_lcs, args.outdir)", "def fit(self,vector_graphs,perms):\n if self._interm_averaged_model_num_instances > 0:\n # starting from a previously saved model\n sum_weight_vectors = self._interm_averaged_model * self._interm_averaged_model_num_instances\n done_instances = self._interm_averaged_model_num_instances\n print('x')\n else:\n done_instances = 0\n sum_weight_vectors = np.zeros(shape=self._weights.shape)\n \n vector_graphs, perms = shuffle(vector_graphs,perms)\n learning_curve_pairs = [(0,0) for x in vector_graphs]\n learning_curve_triples = [(0,0) for x in vector_graphs]\n learning_curve_quads = [(0,0) for x in vector_graphs]\n exact_matches = [0 for x in vector_graphs]\n seen_instances = [0 for x in vector_graphs]\n total_instances_seen = 0\n save_gap = 5000 #000 # 5000 # the maximum number of instances between pickles\n for ind in range(self._num_iters):\n instance_index = 0\n error_instances = []\n for G,correct_perm in zip(vector_graphs,perms):\n if total_instances_seen >= done_instances:\n if self._greedy_inference:\n predicted_perm = self.greedy_inference(G)\n else:\n predicted_perm = find_min_hamiltonian_path(G,self._weights)\n if predicted_perm == None:\n continue\n self._weights = self._weights + \\\n (self._eta * ( G.get_sum_path(correct_perm) - G.get_sum_path(predicted_perm) )).todense()\n try:\n predicted_perm_c = convert_edges_perm(predicted_perm)\n correct_perm_c = convert_edges_perm(correct_perm)\n learning_curve_pairs[instance_index] = \\\n num_agreeing_tuples(predicted_perm_c,correct_perm_c,2)\n learning_curve_triples[instance_index] = \\\n num_agreeing_tuples(predicted_perm_c,correct_perm_c,3)\n learning_curve_quads[instance_index] = \\\n num_agreeing_tuples(predicted_perm_c,correct_perm_c,4)\n seen_instances[instance_index] = 1\n exact_matches[instance_index] = (1 if set(predicted_perm) == set(correct_perm) else 0) \n self.print_learning_curve(ind,instance_index,learning_curve_pairs,learning_curve_triples,\\\n learning_curve_quads,1.0*sum(exact_matches)/sum(seen_instances))\n sum_weight_vectors = sum_weight_vectors + self._weights\n except Exception:\n error_instances.append(instance_index)\n print('Incorrect decoding in training. Instance '+str(ind)+' '+str(instance_index))\n instance_index += 1\n total_instances_seen += 1\n if len(perms) > save_gap and self._model_pickle and total_instances_seen % save_gap == 0:\n self._interm_averaged_model = sum_weight_vectors / total_instances_seen\n self._interm_averaged_model_num_instances = total_instances_seen\n f_pickle = open(self._model_pickle+'_'+str(total_instances_seen)+'inst','w')\n pickle.dump(self,f_pickle,pickle.HIGHEST_PROTOCOL)\n f_pickle.close()\n if self._calc_train_acc:\n print('Training accuracy iteration #'+str(ind)+':'+str(self.test_on_data(vector_graphs,perms)))\n if self._model_pickle:\n self._interm_averaged_model = sum_weight_vectors / ((ind+1) * len(perms))\n self._interm_averaged_model_num_instances = (ind+1) * len(perms)\n f_pickle = open(self._model_pickle+'_iter'+str(ind),'w')\n pickle.dump(self,f_pickle,pickle.HIGHEST_PROTOCOL)\n f_pickle.close()\n if self._averaged:\n self._weights = sum_weight_vectors / (self._num_iters * len(perms))\n self._interm_averaged_model = self._weights\n self._interm_averaged_model_num_instances = (self._num_iters * len(perms))\n if self._model_pickle:\n f_pickle = open(self._model_pickle+'_averaged','w')\n pickle.dump(self,f_pickle,pickle.HIGHEST_PROTOCOL)\n f_pickle.close()\n if self._calc_train_acc:\n print('Training accuracy (after averaging):'+str(self.test_on_data(vector_graphs,perms)))\n return error_instances", "def run(model_filename, params, seed, H_max=14.0):\n\n model_file = ossssim.ModelOutputFile(model_filename)\n model_file.colnames = ['a', 'e', 'inc', 'node', 'peri', 'M', 'H', 'x', 'y', 'z', 'delta',\n 'vx', 'vy', 'vz', 'delta_v', 'dt', 'comp']\n # model_file.colors = ossssim.definitions.COLORS.values()\n model_file.longitude_neptune=6.876 * units.rad\n model_file.epoch=2456839.5 * units.day\n model_file.write_header(seed)\n\n res_table = Table.read(params, format='ascii')\n H_model = 8.66\n N1 = H_cfd(H_model)\n N2 = H_cfd(H_max)\n print(f\"Scaling up by {N2/N1}\")\n for row in res_table:\n models = {}\n j=int(row['j'])\n k=int(row['k'])\n q_c = float(row['q_c'])\n q_w = float(row['q_w'])\n sigma_i = float(row['sigma_i'])\n comp=f\"Res-{j}:{k}\"\n N = int(row['MedianPop']*N2/N1)\n print(f\"Generating {N} sources for row:\\n{row}\")\n components = ['sym', 'leading', 'trailing']\n selections = {components[0]: 1.0,\n components[1]: 2.0,\n components[2]: 2.0}\n models[components[0]] = Symmetric(j=j, k=k, component=comp,\n q_c=row['q_c'], q_w=row['q_w'], sigma_i=sigma_i, H_max=H_max, res_amp_low=20.0*units.degree,\n res_amp_high=160*units.degree, res_amp_mid=95*units.degree, size=min(N, 1000000))\n if k == 1:\n selections = {components[0]: 0.3,\n components[1]: 0.3 + 0.35,\n components[2]: 1}\n models[components[1]] = Asymmetric(j=j, k=k, component=comp,\n q_c=q_c, q_w=q_w, sigma_i=sigma_i, H_max=H_max, size=min(N, 1000000))\n models[components[2]] = Asymmetric(leading=False, j=j, k=k, component=comp,\n q_c=q_c, q_w=q_w, sigma_i=sigma_i, H_max=H_max, size=min(N, 1000000))\n\n niter = 0\n while niter<N:\n niter += 1\n selection = numpy.random.rand()\n # print(components, selection, selections)\n component = list(filter(lambda x: selection<selections[x], components))[0]\n particle = next(models[component])\n delta = (particle['x']**2 + particle['y']**2 + particle['z']**2)**0.5\n particle_v = (particle['vx']**2 + particle['vy']**2 + particle['vz']**2)**0.5\n if delta < nhtraj['r']:\n continue\n dpos = [particle[c] - nhtraj[c] for c in ['x', 'y', 'z']]\n dvel = [nhtraj[c] - particle[c] for c in ['vx', 'vy', 'vz']]\n long, lat, r = LongLat(dpos[0].to('au'), dpos[1].to('au'), dpos[2].to('au'))\n vlong, vlat, vel = LongLat(dvel[0].to('au/year'), dvel[1].to('au/year'), dvel[2].to('au/year'))\n dt = r / vel\n sep = r*numpy.arccos(numpy.sin(vlat)*numpy.sin(lat)+numpy.cos(vlat)*numpy.cos(lat)*numpy.cos(vlong-long))/units.rad\n delta_v = numpy.fabs(sep / dt)\n if delta_v < 3.00*units.km/units.second:\n tdict = {}\n for col in particle.colnames:\n tdict[col] = particle[col]\n tdict['delta_v'] = delta_v\n tdict['dt'] = dt\n tdict['delta'] = delta.to('au').value\n model_file.write_row(tdict)\n\n model_file.write_footer(n_iter=0, n_hits=0, n_track=0)", "def __init__(self, obs_tab, leg_tab, hap_tab_per_group, number_of_haplotypes_per_group, models_dict, ancestral_makeup):\n\n print('Initializing the algorithm for distant admixtures:')\n\n if not all(len(leg_tab)==len(hap_tab) for hap_tab in hap_tab_per_group.values()):\n raise Exception('Error: the number of SNPs in the LEGEND file differ from the number of SNPs in the HAP file.')\n\n if sum(ancestral_makeup.values())<0.999:\n raise Exception('Error: ancestry proportions must sum to one.')\n\n self.models_dict = models_dict\n self.number_of_haplotypes_per_group = number_of_haplotypes_per_group\n self.ancestral_makeup = ancestral_makeup #Keys correspond to group2 names and values to haplotype proportions.\n\n for group2 in ancestral_makeup:\n if group2 not in hap_tab_per_group:\n raise Exception('Error: %s is missing from the reference panel.' % group2)\n\n self.hap_dict_per_group = {}\n self.fraction_of_matches = {}\n\n for group2, hap_tab in hap_tab_per_group.items():\n\n self.hap_dict_per_group[group2], self.fraction_of_matches[group2] = \\\n self.build_hap_dict(obs_tab, leg_tab, hap_tab, number_of_haplotypes_per_group[group2])\n print('%.2f%% of the observed alleles matched the reference panel of %s.' % (100*self.fraction_of_matches[group2], group2))", "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def test_input_mutations(self):\n local_sf = copy.copy(self.sf)\n local_dist = copy.deepcopy(self.distance)\n local_radius = copy.deepcopy(self.radius)\n local_min_core_neighbors = copy.deepcopy(self.min_core_neighbors)\n\n local_model = tc.dbscan.create(\n self.sf,\n distance=self.distance,\n radius=self.radius,\n min_core_neighbors=self.min_core_neighbors,\n verbose=False,\n )\n\n assert_sframe_equal(self.sf, local_sf)\n self.assertEqual(self.distance, local_dist)\n self.assertEqual(self.radius, local_radius)\n self.assertEqual(self.min_core_neighbors, local_min_core_neighbors)", "def main(**kwargs):\n data_file = kwargs.get('data_file', None)\n predict_unlabelled = kwargs.get('predict_unlabelled', False)\n output_preds = kwargs.get('output_preds', True)\n eval_results = kwargs.get('eval_results', True)\n\n # Prepare run_str\n run_str = datetime.now().strftime('%Y%m%d%H%M')\n\n initialise_print_logger('logs/prediction-' + run_str + '.log')\n\n print('Starting sharecast prediction:', run_str)\n\n # Load and divide data\n share_data = load_data(data_file)\n gc.collect()\n\n print('Number of \"NA\" symbols:',\n share_data[share_data['symbol'] == 'NA'].shape[0])\n\n # Divide data into symbols and general data for training an testing\n if predict_unlabelled:\n # Only return x values\n df_all_x, df_symbol_date = prepare_data_for_model(share_data, False)\n else:\n # Return x and y values\n df_all_x, df_all_y, df_all_actuals, df_symbol_date = prepare_data_for_model(\n share_data, True)\n\n del df_all_y\n\n del share_data\n gc.collect()\n\n print('Number of \"NA\" symbols:',\n df_symbol_date[df_symbol_date['symbol'] == 'NA'].shape[0])\n\n # Retain model names for train and test\n print('Retaining model name data. Number of rows:', len(df_all_x))\n model_names = df_all_x['model'].values\n gics_sectors = df_all_x['GICSSector'].values\n gics_industry_groups = df_all_x['GICSIndustryGroup'].values\n gics_industries = df_all_x['GICSIndustry'].values\n\n # Fix the names used in the GICS data - remove '&' ',' and ' '\n gics_sectors = fix_categorical(gics_sectors)\n gics_industry_groups = fix_categorical(gics_industry_groups)\n gics_industries = fix_categorical(gics_industries)\n\n # Drop model names and GICS values\n df_all_x = df_all_x.drop(\n ['model', 'GICSSector', 'GICSIndustryGroup', 'GICSIndustry'], axis=1)\n\n print('Loading pre-processing models')\n # Load pre-processing models\n symbol_encoder = load('models/se.pkl.gz')\n imputer = load('models/imputer.pkl.gz')\n scaler = load('models/scaler.pkl.gz')\n\n print('Executing pre-processing. Number of rows:', len(df_all_x))\n # Execute pre-processing\n df_all_x = execute_preprocessor(df_all_x, symbol_encoder, imputer, scaler)\n\n print('Loading keras models. Number of rows:', len(df_all_x))\n # Load keras models\n keras_models = {\n 'mape_model': load_model('models/keras-mape-model.h5', custom_objects={\n 'k_mean_absolute_percentage_error': k_mean_absolute_percentage_error,\n 'k_mae_mape': k_mae_mape,\n }),\n 'mae_model': load_model('models/keras-mae-model.h5', custom_objects={\n 'k_mean_absolute_percentage_error': k_mean_absolute_percentage_error,\n 'k_mae_mape': k_mae_mape,\n }),\n 'mae_intermediate_model': load_model('models/keras-mae-intermediate-model.h5',\n custom_objects={\n 'k_mean_absolute_percentage_error': k_mean_absolute_percentage_error,\n 'k_mae_mape': k_mae_mape,\n }),\n }\n\n print('Loading xgboost model list')\n xgb_models = load_xgb_models()\n\n print('Loading xgboost industry model list')\n xgb_industry_models = load_xgb_models('industry')\n\n predictions = execute_model_predictions(\n df_all_x, model_names, gics_industry_groups, xgb_models, xgb_industry_models, keras_models)\n\n print('Loading bagging models')\n bagging_model = load_model('models/keras-bagging-model.h5', custom_objects={\n 'k_mean_absolute_percentage_error': k_mean_absolute_percentage_error,\n 'k_mae_mape': k_mae_mape,\n })\n bagging_scaler = load('models/deep-bagging-scaler.pkl.gz')\n deep_bagged_predictions = execute_deep_bagging(\n bagging_model, bagging_scaler, predictions)\n predictions['deep_bagged_predictions'] = deep_bagged_predictions\n\n if eval_results:\n assess_results(predictions, model_names, df_all_actuals, run_str)\n\n if output_preds:\n output_predictions(predictions, df_symbol_date, run_str)\n\n print('Prediction completed')", "def __init__(self):\n\n # Loop over the models.\n for model_index in range(len(MODELS)):\n # Aliases.\n model = MODELS[model_index]\n model_text = MODEL_TEXT[model_index]\n\n # Loop over the tags.\n for tag in TAGS:\n # Set up the variables to loop over.\n if model in ['rotor', 'free_rotor']:\n vars = ['Z']\n elif model in ['iso_cone_free_rotor', 'iso_cone_torsionless']:\n vars = ['X']\n elif model in ['iso_cone']:\n vars = ['X', 'Z']\n elif model in ['double_rotor', 'pseudo-ellipse_free_rotor', 'pseudo-ellipse_torsionless']:\n vars = ['X', 'Y']\n elif model in ['pseudo-ellipse']:\n vars = ['X', 'Y', 'Z']\n else:\n raise RelaxError(\"Unknown model '%s'.\" % model)\n\n # Loop over the variables.\n for var in vars:\n # The file name.\n file_name = '_%s_%s_theta_%s_calc.agr' % (model, tag, lower(var))\n print(\"Creating the '*%s' files.\" % file_name)\n\n # Set up the eigenframe.\n self.setup_eigenframe(tag=tag)\n\n # The Kronecker product of the eigenframe rotation.\n Rx2_eigen = kron_prod(self.eigenframe, self.eigenframe)\n\n # Set the initial storage structures.\n self.init_storage()\n\n # Loop over the angle incs.\n for i in range(INC+1):\n # Get the angle for the increment.\n theta = self.get_angle(i-1, model=model, var=var)\n\n # Vary X.\n if var == 'X':\n theta_x = theta\n theta_y = THETA_Y\n theta_z = THETA_Z\n\n # Vary Y.\n elif var == 'Y':\n theta_x = THETA_X\n theta_y = theta\n theta_z = THETA_Z\n\n # Vary Z.\n elif var == 'Z':\n theta_x = THETA_X\n theta_y = THETA_Y\n theta_z = theta\n\n # Calculate the frame order matrices.\n if model == 'rotor':\n self.first_frame_order[i] = rotor.compile_1st_matrix_rotor(self.first_frame_order[i], self.eigenframe, theta_z)\n self.second_frame_order[i] = rotor.compile_2nd_matrix_rotor(self.second_frame_order[i], Rx2_eigen, theta_z)\n elif model == 'free_rotor':\n self.first_frame_order[i] = free_rotor.compile_1st_matrix_free_rotor(self.first_frame_order[i], self.eigenframe)\n self.second_frame_order[i] = free_rotor.compile_2nd_matrix_free_rotor(self.second_frame_order[i], Rx2_eigen)\n elif model == 'iso_cone':\n self.first_frame_order[i] = iso_cone.compile_1st_matrix_iso_cone(self.first_frame_order[i], self.eigenframe, theta_x, theta_z)\n self.second_frame_order[i] = iso_cone.compile_2nd_matrix_iso_cone(self.second_frame_order[i], Rx2_eigen, theta_x, theta_z)\n elif model == 'iso_cone_free_rotor':\n self.first_frame_order[i] = iso_cone_free_rotor.compile_1st_matrix_iso_cone_free_rotor(self.first_frame_order[i], self.eigenframe, theta_x)\n self.second_frame_order[i] = iso_cone_free_rotor.compile_2nd_matrix_iso_cone_free_rotor(self.second_frame_order[i], Rx2_eigen, theta_x)\n elif model == 'iso_cone_torsionless':\n self.first_frame_order[i] = iso_cone_torsionless.compile_1st_matrix_iso_cone_torsionless(self.first_frame_order[i], self.eigenframe, theta_x)\n self.second_frame_order[i] = iso_cone_torsionless.compile_2nd_matrix_iso_cone_torsionless(self.second_frame_order[i], Rx2_eigen, theta_x)\n elif model == 'pseudo-ellipse':\n self.first_frame_order[i] = pseudo_ellipse.compile_1st_matrix_pseudo_ellipse(self.first_frame_order[i], self.eigenframe, theta_x, theta_y, theta_z)\n self.second_frame_order[i] = pseudo_ellipse.compile_2nd_matrix_pseudo_ellipse(self.second_frame_order[i], Rx2_eigen, theta_x, theta_y, theta_z)\n elif model == 'pseudo-ellipse_free_rotor':\n self.first_frame_order[i] = pseudo_ellipse_free_rotor.compile_1st_matrix_pseudo_ellipse_free_rotor(self.first_frame_order[i], self.eigenframe, theta_x, theta_y)\n self.second_frame_order[i] = pseudo_ellipse_free_rotor.compile_2nd_matrix_pseudo_ellipse_free_rotor(self.second_frame_order[i], Rx2_eigen, theta_x, theta_y)\n elif model == 'pseudo-ellipse_torsionless':\n self.first_frame_order[i] = pseudo_ellipse_torsionless.compile_1st_matrix_pseudo_ellipse_torsionless(self.first_frame_order[i], self.eigenframe, theta_x, theta_y)\n self.second_frame_order[i] = pseudo_ellipse_torsionless.compile_2nd_matrix_pseudo_ellipse_torsionless(self.second_frame_order[i], Rx2_eigen, theta_x, theta_y)\n elif model == 'double_rotor':\n self.first_frame_order[i] = double_rotor.compile_1st_matrix_double_rotor(self.first_frame_order[i], self.eigenframe, theta_y, theta_x)\n self.second_frame_order[i] = double_rotor.compile_2nd_matrix_double_rotor(self.second_frame_order[i], Rx2_eigen, theta_y, theta_x)\n else:\n raise RelaxError(\"Unknown model '%s'.\" % model)\n\n # Write the data.\n self.write_data(file_name=file_name, model=model, model_text=model_text, var=var)", "def from_naadsm_file(self, root, ns):\n models=root.find(\"models\")\n self.disease_by_type=dict()\n self.disease_by_id=dict()\n for disease_model in models.findall(\"disease-model\", ns):\n production_type=disease_model.attrib[\"production-type\"]\n production_id=disease_model.attrib[\"production-type-id\"]\n dm=DiseaseModel()\n dm.from_naadsm_file(disease_model, ns)\n self.disease_by_type[production_type]=dm\n self.disease_by_id[production_id]=dm\n logger.debug(\"result of find quarantine {0}\".format(models.find(\n \"quarantine-model\", ns)))\n if models.find(\"quarantine-model\", ns) is not None:\n logger.debug(\"Using quarantine model\")\n self.quarantine=QuarantineModel()\n else:\n self.quarantine=NoQuarantineModel()\n\n self.global_detection=GlobalDetectionModel()\n self.detect_models=dict()\n for detect_model in models.findall(\"detection-model\", ns):\n production_type=detect_model.attrib[\"production-type\"]\n production_id=detect_model.attrib[\"production-type-id\"]\n dm=DetectionModel(self.global_detection)\n dm.from_naadsm_file(detect_model, ns)\n self.detect_models[production_type]=dm\n logger.debug(dm)\n\n self.spread_models=dict()\n for airborne in [\"airborne-spread-exponential-model\",\n \"airborne-spread-model\"]:\n for neighbor_model in models.findall(airborne, ns):\n from_production=neighbor_model.attrib[\"from-production-type\"]\n to_production=neighbor_model.attrib[\"to-production-type\"]\n im=InfectNeighborModel()\n im.from_naadsm_file(neighbor_model, airborne, ns)\n self.spread_models[(from_production, to_production)]=im\n logger.debug(\"Spread models for {0}\".format(self.spread_models.keys()))\n\n self.contact_models=collections.defaultdict(list)\n for indirect_model in models.findall(\"contact-spread-model\", ns):\n from_production=indirect_model.attrib[\"from-production-type\"]\n to_production=indirect_model.attrib[\"to-production-type\"]\n contact_type=indirect_model.attrib[\"contact-type\"]\n if contact_type==\"indirect\":\n inm=IndirectModel()\n inm.from_naadsm_file(indirect_model, ns)\n logger.debug(\"from_naadsm_file: indirect {0}\".format(inm))\n self.contact_models[from_production].append(inm)\n elif contact_type==\"direct\":\n logger.warn(\"Ignoring direct contact model\")\n else:\n logger.warn(\"Unknown contact spread model {0}\".format(\n contact_type))\n\n self.farm_models=dict() # production_type => farm model\n for production_type in self.disease_by_type.keys():\n f=Farm()\n f.production_type=production_type\n f.disease=self.disease_by_type[production_type]\n f.quarantine=self.quarantine\n f.detection=self.detect_models[production_type]\n self.farm_models[production_type]=f\n\n self.models_loaded=True", "def save_predict_results():\n\n ori_lst = []\n for i in range(1, 4):\n ori_df = pd.read_csv('Training_Model'+str(i)+'.csv')\n ori_list = ori_df['SMILES'].tolist()\n ori_lst.append(ori_list)\n frames = []\n gen_mols = []\n gen_fps = []\n for i, group in enumerate(['all', 'class3', 'prom']):\n gen_df = pd.read_csv('novel_sampled_cano_script_'+group+'_until.csv')\n gen_list = gen_df['SMILES'].tolist()\n print('Number of molecules in training for model {} is {}'.format(i+1, len(ori_lst[i])))\n over, num, smi_list = get_smi_list_overlap(ori_lst[i], gen_list)\n smi_mols = get_mols(smi_list)\n smi_fps, failed_mols = get_fingerprints(smi_mols)\n for idx in sorted(failed_mols, reverse=True):\n del smi_list[idx]\n smi_df = pd.Series(data=smi_list, name='SMILES').to_frame()\n smi_df.loc[:,'Group'] = i+1\n frames.append(smi_df)\n\n unique_df = pd.concat(frames)\n gen_smi = unique_df['SMILES'].tolist()\n gen_mols = get_mols(gen_smi)\n gen_fps, _ = get_fingerprints(gen_mols)\n unique_df['Gaps'] = predict_property('gbdt_regessor_gap_regu.joblib', gen_fps)\n unique_df['Dips'] = predict_property('gbdt_regessor_dip_reg.joblib', gen_fps)\n promising_df = unique_df.loc[(unique_df['Gaps'] <= 2.0) & (unique_df['Dips']<=3.66)]\n unique_df.to_csv('Unique_models_15epoch.csv', index=False)\n promising_df.to_csv('Promising_models_15epoch.csv', index=False)", "def generateModelData(self, params, standoffDistance, range_tof, nBins_tof, ddnXSfxn,\n dedxfxn, beamTimer, nSamples, getPDF=False, storeDTOF=False):\n beamE, eLoss, scale, s, scaleFactor = params\n e0mean = 900.0\n dataHist = np.zeros((self.x_bins, self.eD_bins))\n \n dedxForODE = lambda x, y: dedxfxn(energy=y,x=x)\n \n nLoops = int(np.ceil(nSamples / self.nEvPerLoop))\n for loopNum in range(0, nLoops):\n eZeros = np.repeat(beamE, self.nEvPerLoop)\n eZeros -= lognorm.rvs(s=s, loc=eLoss, scale=scale, size=self.nEvPerLoop)\n checkForBadEs = True\n while checkForBadEs:\n badIdxs = np.where(eZeros <= 0.0)[0]\n nBads = badIdxs.shape[0]\n if nBads == 0:\n checkForBadEs = False\n replacements = np.repeat(beamE, nBads) - lognorm.rvs(s=s, loc=eLoss, scale=scale, size=nBads)\n eZeros[badIdxs] = replacements\n \n \n odesolver = ode( dedxForODE ).set_integrator('dopri5').set_initial_value(eZeros)\n eD_atEachX = np.zeros(self.eD_bins)\n for idx, xEvalPoint in enumerate(self.x_binCenters):\n sol = odesolver.integrate( xEvalPoint )\n data_weights = ddnXSfxn.evaluate(sol)\n hist, edEdges = np.histogram( sol, bins=self.eD_bins,\n range=(self.eD_minRange,\n self.eD_maxRange),\n weights=data_weights)\n dataHist[idx,:] += hist\n hist, edEdges = np.histogram(sol, bins=self.eD_bins,\n range=(self.eD_minRange,\n self.eD_maxRange),\n density=False)\n eD_atEachX = np.vstack((eD_atEachX, hist))\n\n dataHist /= np.sum(dataHist*self.eD_binSize*self.x_binSize)\n e0mean = np.mean(eZeros)\n drawHist2d = (np.rint(dataHist * nSamples)).astype(int)\n tofs = []\n tofWeights = []\n eN_list = []\n eN_atEachX = np.zeros(self.eD_bins)\n# dtofs = []\n for index, weight in np.ndenumerate( drawHist2d ):\n cellLocation = self.x_binCenters[index[0]]\n effectiveDenergy = (e0mean + self.eD_binCenters[index[1]])/2\n tof_d = getTOF( masses.deuteron, effectiveDenergy, cellLocation )\n neutronDistance = (distances.tunlSSA_CsI.cellLength - cellLocation +\n standoffDistance )\n tof_n = getTOF(masses.neutron, self.eN_binCenters[index[1]],\n neutronDistance)\n zeroD_times, zeroD_weights = self.zeroDegTimeSpreader.getTimesAndWeights( self.eN_binCenters[index[1]] )\n tofs.append( tof_d + tof_n + zeroD_times )\n tofWeights.append(weight * zeroD_weights)\n eN_list.append(weight)\n# if storeDTOF:\n# dtofs.append(tof_d)\n if index[1] == self.eD_binMax:\n eN_arr = np.array(eN_list)\n eN_atEachX = np.vstack((eN_atEachX, eN_arr))\n eN_list = []\n \n\n tofData, tofBinEdges = np.histogram( tofs, bins=nBins_tof, range=range_tof,\n weights=tofWeights, density=getPDF)\n return (scaleFactor * self.beamTiming.applySpreading(tofData), \n eN_atEachX,\n eD_atEachX)", "def compare_inpaintings(root_dir,idx,sess,images_placeholder,embeddings,phase_train_placeholder):\n\n image_dir = os.path.join(root_dir,str(idx))\n original_image_path = os.path.join(image_dir,'original.jpg')\n gen_images_dir = os.path.join(image_dir,'gen')\n image_paths = []\n image_paths.append(os.path.join(image_dir,'original.jpg'))\n generated_image_paths = [os.path.join(gen_images_dir,f) for f in os.listdir(gen_images_dir) if os.path.isfile(os.path.join(gen_images_dir, f))]\n for path in generated_image_paths:\n image_paths.append(path)\n\n images = create_image_list(image_paths)\n\n # Run forward pass to calculate embeddings\n feed_dict = { images_placeholder: images, phase_train_placeholder:False }\n emb = sess.run(embeddings, feed_dict=feed_dict)\n\n nrof_images = len(image_paths)\n\n\n # Print distance matrix\n print('Distances w.r.t. original : {}'.format(original_image_path))\n dist_list = []\n dist_list.append(original_image_path) # Add path for DB indexing\n for i in range(1,nrof_images):\n model_name = image_paths[i].split('/')[-1].split('.')[0]\n #dist = np.sqrt(np.sum(np.square(np.subtract(emb[0,:], emb[i,:]))))\n dist = cosine(emb[0,:],emb[i,:])\n dist_list.append(dist)\n print('{} :: {}'.format(model_name.upper(),dist))\n return dist_list", "def generate_dps(wordcorpus):\r\n\r\n print('Loading model')\r\n model = KeyedVectors.load_word2vec_format(BIN_NAME, binary=True)\r\n print('Model loaded!')\r\n\r\n # Limit the corpus to words in the model\r\n wcl = list(word for word in wordcorpus if word in model.vocab)\r\n\r\n # Precompute word vectors so the loops are faster\r\n corpus = np.array([matutils.unitvec(model.word_vec(word)) for word in wcl])\r\n print('Created corpus with {} elements'.format(len(corpus)))\r\n\r\n print('Computing norms')\r\n model.init_sims(replace=True)\r\n\r\n # Save memory by deleting non-normed data\r\n syn0norm = model.syn0norm\r\n del model\r\n\r\n # Convert sys0norm to a memmap to further reduce memory\r\n print('Saving to memmap')\r\n inarr = np.memmap('inmemmap.dat', dtype=syn0norm.dtype, mode='w+', shape=syn0norm.shape)\r\n inarr[:] = syn0norm[:]\r\n outarr = np.memmap('outmemmap.dat', dtype=syn0norm.dtype, mode='w+', shape=(syn0norm.shape[0],))\r\n\r\n # Discard the array now that it's stored in a memmap\r\n del syn0norm\r\n\r\n print('Computing dot products')\r\n for c in range(0, int(inarr.shape[0]/CHUNKSIZE)):\r\n cend = min(inarr.shape[0], (c+1)*CHUNKSIZE)\r\n outarr[c*CHUNKSIZE:cend] = np.amax(np.inner(inarr[c*CHUNKSIZE:cend], corpus), axis=1)\r\n\r\n np.save(DP_NAME, outarr)\r\n\r\n del inarr\r\n del outarr\r\n gc.collect()\r\n\r\n os.remove('inmemmap.dat')\r\n os.remove('outmemmap.dat')", "def hexapodZernikeMultiLinearModel_hexapodcoordinate():\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n Vfile = '/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_validate.cp'\n b=p.load(open(Tfile))\n vb=p.load(open(Vfile))\n nobs = len(b)\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n xh = x*1000 # convert to hexapod coordinate\n yh = -y*1000\n zh = -z*1000\n xtilth = - thetay\n ytilth = - thetax\n dataX = b[:,8:68]\n coeff_xh = sm.WLS(xh,dataX).fit().params\n coeff_yh = sm.WLS(yh,dataX).fit().params\n coeff_zh = sm.WLS(zh,dataX).fit().params\n coeff_xtilth = sm.WLS(xtilth,dataX).fit().params\n coeff_ytilth = sm.WLS(ytilth,dataX).fit().params\n coeff = np.array([coeff_xh,coeff_yh,coeff_zh,coeff_xtilth,coeff_ytilth])\n vx = vb[:,0]\n vy = vb[:,1]\n vz = vb[:,2]\n vtheta = vb[:,3]\n vphi = vb[:,4]\n vfwhm = vb[:,5]\n ve1 = vb[:,6]\n ve2 = vb[:,7]\n vthetax = vtheta*np.cos(np.deg2rad(vphi))\n vthetay = vtheta*np.sin(np.deg2rad(vphi))\n vxh = vx*1000 # convert to hexapod coordinate\n vyh = -vy*1000\n vzh = -vz*1000\n vxtilth = - vthetay\n vytilth = - vthetax\n vdataX = vb[:,8:68]\n fit = np.dot(vdataX,coeff.T)\n bp.bin_scatter(vxh,fit[:,0],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vyh,fit[:,1],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vzh,fit[:,2],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vxtilth,fit[:,3],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vytilth,fit[:,4],nbins=20,fmt='bo',scatter=True)", "def __init__(self,models,extraparams=None,outputcontraction=None,\n interpolation='linear',interpolationdirection='y',\n offgrid=None):\n from operator import isMappingType\n\n if len(models)==2 and isMappingType(models[1]):\n modtype = get_model_class(models[0])\n params = np.array(models[1].values())\n params = [dict([(models[1].keys()[i],v)] for v in t) for i,t in enumerate(params.T)]\n models = [modtype(**params[i]) for m in range(len(params))]\n\n params = None\n\n for m in models:\n if params is None:\n params = m.params\n else:\n if m.params != params:\n raise ValueError('model %s does not match parameters for other models'%m)\n\n if extraparams is not None:\n self._extraparams = {}\n for n,ps in extraparams.iteritems():\n arr = np.array(ps)\n if extraparams[n].size != len(models):\n raise ValueError('too many/few extra parameters for parameter %s'%n)\n self._extraparams[n] = arr\n else:\n self._extraparams = None\n\n self._params = params\n self.models = tuple(models)\n self._extraparams = extraparams\n\n self.outputcontraction = outputcontraction\n self.interpolation = interpolation\n self.interpolationdirection = interpolationdirection\n self.offgrid = offgrid", "def create_data_model(list_of_vehicles, list_of_products):\n data = {}\n data['distance_matrix'] =[\n [\n 0, 18.5, 12.5, 10.5, 5.3, 34\n ],\n [\n 18.5, 0, 8.4, 10.9, 24.7, 20\n ],\n [\n 12.5, 8.4, 0, 4.5, 17.6, 22.2\n ],\n [\n 10.5, 10.9, 4.5, 0, 15.2, 24.6\n ],\n [\n 5.3, 24.7, 17.6, 15.2,0, 34.4\n ],\n [ 34, 20, 22.2, 24.6, 34.4, 0\n\n ],\n ]#map matrix\n #list_of_vheight = []\n #list_of_vwidth = []\n list_of_vlarge = []\n list_of_vweight = []\n for element in list_of_vehicles:\n #list_of_vheight.append(element..__trunk_dimension.__height)\n #list_of_vhwidht.append(element.__model.__trunk_dimension.__width)\n list_of_vlarge.append(element.GetModel().GetTrunkDimension().GetLarge())\n list_of_vweight.append(element.GetModel().GetTrunkDimension().GetWeight())\n\n #list_height = []\n #list_width = []\n list_large = []\n list_weight = []\n for element in list_of_products:\n #list_height.append(element.__product_dimension.__height)\n #list_width.append(element.__product_dimension.__width)\n list_large.append(element.GetProductDimension().GetLarge())\n list_weight.append(element.GetProductDimension().GetWeight())\n\n\n data['num_vehicles'] = len(list_of_vehicles) #number of vehicles\n data['depot'] = 0\n #data['vehicle_capacities_height'] = list_of_vheight\n #data['vehicle_capacities_width'] = list_of_vwidth\n data['vehicle_capacities_large'] = list_of_vlarge\n data['vehicle_capacities_weight'] = list_of_vweight\n #data['demand_height'] = list_height\n #data['demand_width'] = list_width\n data['demand_large'] = list_large\n data['demand_weight'] = list_weight\n return data", "def build_model(allidx,MAX_LENGTH,onlyArg):\n wordidx, labelidx, featuresidx, extraidx=allidx\n posidx, neridx, depidx, distanceidx, chnkidx, wikineridx, dbpedianeridx, subneridx = featuresidx\n\n main_input = Input(shape=(MAX_LENGTH,), name='main_input', dtype='int32')\n inputNodes=[main_input]\n\n w2vmodel=\"../embeddings/Domain-Word2vec.model\"\n\n embedding_matrix,EMBEDDING_DIM,vocabulary_size=prepare.wv_embedded(wordidx,w2vmodel)\n \n x = Embedding(output_dim=EMBEDDING_DIM, weights=[embedding_matrix],input_dim=vocabulary_size, input_length=MAX_LENGTH, mask_zero=False)(main_input)\n numnode=int(EMBEDDING_DIM/2)\n\n # pos embedding\n inputNodes,pos_layer=layers.embedlayer(inputNodes,\"pos_input\",posidx,MAX_LENGTH)\n x=Concatenate()([x,pos_layer])\n numnode+=int(len(posidx)/2)\n\n # ner embedding\n inputNodes,ner_layer=layers.embedlayer(inputNodes,\"ner_input\",neridx,MAX_LENGTH)\n x=Concatenate()([x,ner_layer])\n numnode+=int(len(neridx)/2)\n\n inputNodes,wikiner_layer=layers.embedlayer(inputNodes,\"wikiner_input\",wikineridx,MAX_LENGTH)\n x=Concatenate()([x,wikiner_layer])\n numnode+=int(len(wikineridx)/2)\n\n inputNodes,dbpedianer_layer=layers.embedlayer(inputNodes,\"dbpedianer_input\",dbpedianeridx,MAX_LENGTH)\n x=Concatenate()([x,dbpedianer_layer])\n numnode+=int(len(dbpedianeridx)/2)\n\n # dep embedding\n inputNodes,dep0_layer=layers.embedlayer(inputNodes,\"dep0_input\",depidx,MAX_LENGTH)\n x=Concatenate()([x,dep0_layer])\n numnode+=int(len(depidx)/2)\n\n inputNodes,dep1_layer=layers.embedlayer(inputNodes,\"dep1_input\",depidx,MAX_LENGTH)\n x=Concatenate()([x,dep1_layer])\n numnode+=int(len(depidx)/2)\n\n inputNodes,dep2_layer=layers.embedlayer(inputNodes,\"dep2_input\",depidx,MAX_LENGTH)\n x=Concatenate()([x,dep2_layer])\n numnode+=int(len(depidx)/2)\n\n # chnk embedding\n inputNodes,lvl_layer=layers.embedlayer(inputNodes,\"lvl_input\",distanceidx,MAX_LENGTH)\n x=Concatenate()([x,lvl_layer])\n numnode+=int(len(distanceidx)/2)\n\n inputNodes,chnk_layer=layers.embedlayer(inputNodes,\"chnk_input\",chnkidx,MAX_LENGTH)\n x=Concatenate()([x,chnk_layer])\n numnode+=int(len(chnkidx)/2)\n\n # wikiclass embedding\n inputNodes,subner_layer=layers.embedlayer(inputNodes,\"subner_input\",subneridx,MAX_LENGTH)\n x=Concatenate()([x,subner_layer])\n numnode+=int(len(subneridx)/2)\n\n if onlyArg:\n neartrigger_input = Input(shape=(MAX_LENGTH,), name='neartrigger_input', dtype='int32')\n inputNodes.append(neartrigger_input)\n neartrigger_layer = Embedding(output_dim=EMBEDDING_DIM, weights=[embedding_matrix],input_dim=vocabulary_size, \\\n input_length=MAX_LENGTH, mask_zero=False)(neartrigger_input)\n x=Concatenate()([x,neartrigger_layer])\n numnode+=50\n inputNodes,x,numnode=layers.extralayer(inputNodes,x,numnode,extraidx,featuresidx,MAX_LENGTH)\n\n lstm_out = Bidirectional(LSTM(numnode, dropout=0.2,return_sequences=True))(x)\n numnode=int((numnode+len(labelidx))*2/3)\n\n if onlyArg:\n lstm_out = SeqSelfAttention(attention_activation='tanh', attention_width=5)(lstm_out)\n\n lstm_out = Dropout(0.2)(lstm_out)\n out = Dense(numnode)(lstm_out)\n\n crf = CRF(len(labelidx), sparse_target=False) # CRF layer\n main_output=crf(out)\n loss=crf_loss #crf.loss_function\n acc=[crf_accuracy]\n\n model = Model(inputs=inputNodes, outputs=main_output) \n model.compile(loss=loss,optimizer=Adam(0.001),metrics=acc)\n model.summary()\n\n return model", "def export_embeddings(self):\n save_path = self.config.path_embeddings / self.model.model_name\n save_path.mkdir(parents=True, exist_ok=True)\n \n idx2ent = self.model.config.knowledge_graph.read_cache_data('idx2entity')\n idx2rel = self.model.config.knowledge_graph.read_cache_data('idx2relation')\n\n\n series_ent = pd.Series(idx2ent)\n series_rel = pd.Series(idx2rel)\n series_ent.to_pickle(save_path / \"ent_labels.pickle\")\n series_rel.to_pickle(save_path / \"rel_labels.pickle\")\n\n with open(str(save_path / \"ent_labels.tsv\"), 'w') as l_export_file:\n for label in idx2ent.values():\n l_export_file.write(label + \"\\n\")\n\n with open(str(save_path / \"rel_labels.tsv\"), 'w') as l_export_file:\n for label in idx2rel.values():\n l_export_file.write(label + \"\\n\")\n\n for parameter in self.model.parameter_list:\n all_ids = list(range(0, int(parameter.shape[0])))\n stored_name = parameter.name.split(':')[0]\n # import pdb; pdb.set_trace()\n\n if len(parameter.shape) == 2:\n all_embs = parameter.numpy()\n with open(str(save_path / (\"%s.tsv\" % stored_name)), 'w') as v_export_file:\n for idx in all_ids:\n v_export_file.write(\"\\t\".join([str(x) for x in all_embs[idx]]) + \"\\n\")\n\n df = pd.DataFrame(all_embs)\n df.to_pickle(save_path / (\"%s.pickle\" % stored_name))" ]
[ "0.5727781", "0.55241746", "0.55023146", "0.5344562", "0.5287414", "0.5233412", "0.5179906", "0.51727074", "0.5120181", "0.51190937", "0.5085125", "0.5083492", "0.50830466", "0.50505143", "0.5047023", "0.5040699", "0.50249517", "0.50155187", "0.50076103", "0.49986377", "0.4995664", "0.49813443", "0.49779236", "0.4977586", "0.49621275", "0.4960005", "0.49579188", "0.4955438", "0.49535397", "0.4953379" ]
0.6418641
0
Path of the directory that stores all the instances.
def instance_dir(self): return os.path.join(self.basedir, self.yml['instdir'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _InstanceDir(cls, instance_name):\n return utils.PathJoin(cls._ROOT_DIR, instance_name)", "def store_path(self):\n return path.join(env.store_home, self._store_path)", "def data_directory(self):\n\n return self.get_raw(\"data_directory\")", "def path(self):\n return self._container_dir", "def host_cache_dir(self):\n cache_dir = SpaCyModel.model_class_dir() / \"cache\"\n cache_dir.mkdir(exist_ok=True, parents=True)\n return cache_dir", "def path(self):\n return os.path.join(self.config.get('path', os.getcwd()))", "def service_directory(self) -> str:\n return pulumi.get(self, \"service_directory\")", "def config_dir(self) -> Path:\n return self._config_dir", "def data_dir(self) -> Path:\n return self._data_dir", "def get_directory(self):\n mypath = mlblocks.get_primitives_paths()[-1]\n return mypath", "def log_directory(self):\n\n return self.get_raw(\"log_directory\")", "def path(self) -> pathlib.Path:\n return DAEMON_RESOURCES_PATH / self.daemon_id", "def dirpath(self):\n return self.__edir", "def dirpath(self) -> str:\n return self._dirpath", "def config_directory(self):\n\n return self.get_raw(\"config_directory\")", "def host_cache_dir(self):\n cache_dir = Transformer.model_class_dir() / \"cache\"\n cache_dir.mkdir(exist_ok=True, parents=True)\n return cache_dir", "def path(self):\n path = os.path.join(self.base_dir, self.store().replace(' ', '_'), self.package_name())\n return os.path.abspath(path)", "def get_instance_config_path():\n return join(settings.PROJECT_DIR, \"conf\", \"eoxserver.conf\")", "def Directory(self) -> str:", "def get_daemon_storage_dir(cls):\n\n return os.environ[cls.CLOUDIFY_DAEMON_STORAGE_DIRECTORY_KEY]", "def data_directories(self):\n\n return [simulation.outdir for simulation in self.simulations]", "def home_directory(self):\n out = self._call(\"GETHOMEDIRECTORY\")\n return out.json()[\"Path\"]", "def datadir(cls): # pylint: disable=E0213\n mod = sys.modules[cls.__module__]\n return osp.join(osp.dirname(osp.abspath(mod.__file__)), 'data')", "def homeDirectory(self):\n\t\treturn self.__homeDirectory", "def GetPath(self):\r\n\r\n return self.directory", "def confDir(self):\r\n return self._confDir", "def save_dir(self):\n return self._save_dir", "def getPath(self):\n path = os.path.dirname(os.path.realpath(__file__)) #Finds the path of the application\n path =(os.path.dirname(os.path.realpath(__file__))+ '\\\\Enigma Settings') #Adds to the directory to create a folder\n \n return path #Returns the folders directory", "def ifaces_dir(self):\n return self.system_path(self._ifaces_dir)", "def path(self):\n return self.storage.path(self.name)" ]
[ "0.78036046", "0.7167195", "0.702", "0.6844666", "0.6840358", "0.6783134", "0.676215", "0.6758423", "0.67428887", "0.6675447", "0.6670271", "0.66667795", "0.66445255", "0.66307175", "0.66128314", "0.6605219", "0.6593088", "0.6589724", "0.6554919", "0.65276265", "0.6510622", "0.64901406", "0.6487794", "0.64626694", "0.64529943", "0.6429961", "0.64256555", "0.6407152", "0.6388761", "0.6388316" ]
0.7848447
0
Collects all successful runs and optionally parses their output.
def collect_successful_results(self, parse_fn=None): def successful_runs(verbose=False): for run in self.discover_all_runs(): finished = os.access(run.output_file_path('status'), os.F_OK) if not finished: if verbose: print("Skipping unfinished run {}/{}[{}]".format(run.experiment.name, run.instance.shortname, run.repetition)) continue if run.get_status().is_negative: if verbose: print("Skipping failed run {}/{}[{}]".format(run.experiment.name, run.instance.shortname, run.repetition)) continue yield run self.writeback_status_cache() if parse_fn: msg = "Calling 'Config.collect_successful_results()' with a parse function is deprecated and will be " \ "removed in future versions. Instead, call it without any parameters and it will return a " \ "generator of successful simexpal.base.Run objects." warnings.warn(msg, DeprecationWarning) res = [] for run in successful_runs(verbose=True): with open(run.output_file_path('out'), 'r') as f: res.append(parse_fn(run, f)) self.writeback_status_cache() return res else: return successful_runs()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collect_output(self):\n pass", "def collect_output(self):\n pass", "def task_parse_results():\n pass", "def __parse_success(self, fullname, results):\n match = NUMBER_PASSED_RE.match(results[0])\n if not match:\n raise ValueError(\"All passed line incorrect: '%s'\"\n % results[0])\n classname = self.__create_classname(fullname)\n nitems = int(match.group(1))\n cases = []\n for line in results[1:1+nitems]:\n match = ALLPASS_TEST_NAMES_RE.match(line)\n if not match:\n raise ValueError(\"Unexpected information line in \"\n \"all pass case: %s\" % line)\n ntests, name = int(match.group(1)), match.group(2)\n for idx in range(ntests):\n cases.append(TestCaseReport(classname, name, failure_descr=None))\n #endfor\n return cases", "def _parse_output(self, output):\n matched = False\n line_number = 0\n lines = output.strip().split(\"\\n\")\n for line in lines:\n line_number += 1\n line = line.rstrip()\n # import pdb; pdb.set_trace()\n if re.match(re.compile(r\"\\[SUCCESS\\]\"), line) and self.run_status != \"WARN\":\n self.run_status = \"SUCCESS\"\n self.run_summary = line\n self.run_status_details = self._parse_output_status_details(lines[line_number:])\n matched = True\n elif re.match(re.compile(r\"\\[FAILURE\\]\"), line):\n self.run_status = \"FAILURE\"\n self.run_summary = line\n self.run_status_details = self._parse_output_status_details(lines[line_number:])\n matched = True\n break\n elif re.match(re.compile(r\"\\[WARN\\]\"), line):\n self.run_status = \"WARN\"\n self.run_summary = line\n self.run_status_details = self._parse_output_status_details(lines[line_number:])\n matched = True\n if not matched:\n self.run_status = \"UNKNOWN\"\n self.run_summary = \"[UNKNOWN] log missing SUCCESS, FAILURE, or WARN message.\"\n\n return matched", "def send_results(self, collected_results: list):\n\n for scan in collected_results:\n raw_scan = scan.original_results\n scan_time = raw_scan.scan_info.scan_start_time.ToJsonString()\n logger.info('Scan: ' + raw_scan.tool_name + ' run at ' + scan_time)\n for issue in raw_scan.issues:\n logger.info('Issue: ' + str(issue))", "def collect(self,outfilename):\n # TODO actually gather results and check if run is successful\n if os.path.isfile(outfilename):\n self.completed=True\n else:\n self.completed=False", "def _compile_results(self):\n self.statements = stmts_from_json(self.__statement_jsons.values())\n if self.use_obtained_counts:\n self.__source_counts = get_available_source_counts(self.statements)\n self.__evidence_counts = get_available_ev_counts(self.statements)", "async def collect_final_outputs(self) -> None: # pylint: disable=too-many-branches\n self._become_current()\n\n missing_outputs = False\n assert self.step is not None\n\n did_sleep = False\n\n for pattern in sorted(self.step.output): # pylint: disable=too-many-nested-blocks\n formatted_pattern = fmt_capture(self.kwargs, pattern)\n if is_phony(pattern):\n Invocation.up_to_date[formatted_pattern] = UpToDate(self.name, self.newest_input_mtime_ns + 1)\n continue\n\n try:\n paths = glob_paths(formatted_pattern)\n if not paths:\n Logger.debug(f\"Did not make the optional output(s): {pattern}\")\n else:\n for path in paths:\n self.built_outputs.append(path)\n\n global touch_success_outputs # pylint: disable=invalid-name\n if touch_success_outputs.value:\n if not did_sleep:\n await self.done(asyncio.sleep(1.0))\n did_sleep = True\n Logger.file(f\"Touch the output: {path}\")\n Stat.touch(path)\n\n mtime_ns = Stat.stat(path).st_mtime_ns\n Invocation.up_to_date[path] = UpToDate(self.name, mtime_ns)\n\n if Logger.isEnabledFor(logging.DEBUG):\n if path == formatted_pattern:\n Logger.debug(f\"Has the output: {path} \" f\"time: {_datetime_from_nanoseconds(mtime_ns)}\")\n else:\n Logger.debug(\n f\"Has the output: {pattern} -> {path} \"\n f\"time: {_datetime_from_nanoseconds(mtime_ns)}\"\n )\n\n except NonOptionalException:\n self._become_current()\n Logger.error(f\"Missing the output(s): {pattern}\")\n missing_outputs = True\n break\n\n if missing_outputs:\n self.abort(\"Missing some output(s)\")", "def _get_check_run_results(\n self, commits: List[dict]) -> List[str]:\n failed_status = {'failure', 'cancelled', 'timed_out', 'action_required'}\n check_run_results = []\n for commit in commits:\n commit_ref = commit['sha']\n commit_check_run_results = get_commit_check_runs(\n self._repo_name, commit_ref, self._auth)\n if not commit_check_run_results:\n continue\n num_check_runs = commit_check_run_results['total_count']\n if num_check_runs == 0:\n check_run_results.append('none')\n continue\n status = 'passed'\n for commit_check_run_result in commit_check_run_results[\n 'check_runs']:\n conclusion = commit_check_run_result['conclusion']\n if conclusion in failed_status:\n status = 'failed'\n break\n check_run_results.append(status)\n return check_run_results", "def run_all(self):\n failures, errors = [], []\n\n # Run each test case registered with us and agglomerate the results.\n for case_ in self.cases:\n case_.run()\n update_results(failures, errors, case_)\n\n # Display our results.\n print_errors(errors)\n print_failures(failures)\n print_overview(errors, failures)\n\n # Exit with 0 if all tests passed, >0 otherwise.\n sys.exit(len(failures) + len(errors))", "def run(self, **kwargs):\n try:\n super().run(**kwargs)\n self.extract_nfvbench_results()\n self.__logger.info(\"NFVbench results were successfully parsed\")\n except Exception: # pylint: disable=broad-except\n self.__logger.exception(\"Cannot parse NFVbench results\")\n return self.EX_RUN_ERROR\n return self.EX_OK", "def _main(self):\n\n i = 0\n\n for i, step in enumerate(self._run_list):\n return_code = self._check_step_completed(i)\n\n if return_code == 0:\n logger.debug('Successful: \"{}\".'.format(list(step.keys())[0]))\n else:\n return_code = 1\n self._submit_step(i)\n\n # do not enter while loop for continuous monitoring\n if not self.monitor:\n break\n\n time.sleep(1)\n while return_code == 1 and self.monitor:\n time.sleep(5)\n return_code = self._check_step_completed(i)\n\n if return_code == 2:\n module, f_config = self._get_command_config(i)\n raise ExecutionError('Pipeline failed at step '\n '{} \"{}\" {}'\n .format(i, module, f_config))\n\n if i + 1 == len(self._run_list) and return_code == 0:\n logger.info('Pipeline job \"{}\" is complete.'\n .format(self._config.name))\n logger.debug('Output directory is: \"{}\"'\n .format(self._config.dirout))", "def getResults(self, cleanup=True):\n self.wait_on_job()\n stdout_str = self.ofile_string()\n stderr_str = self.efile_string()\n if cleanup:\n self.erase_files()\n return (stdout_str, stderr_str)", "def process_results(self):\n return self._do_action_under_lock(self._process_results)", "def do_results(self, args):\n LeetTerminal.prompt = \"LEET> \"\n self._notified = False\n\n if self.finished_jobs:\n for job in self.finished_jobs:\n pretty_print(job)\n else:\n print(\"***No jobs have been completed.\")", "def collect_initial_outputs(self) -> None: # pylint: disable=too-many-branches\n assert self.step is not None\n missing_outputs = []\n for pattern in sorted(self.step.output):\n formatted_pattern = fmt_capture(self.kwargs, pattern)\n self.expanded_outputs.append(formatted_pattern)\n\n if is_phony(formatted_pattern):\n self.phony_outputs.append(formatted_pattern)\n Invocation.phony.add(formatted_pattern)\n continue\n\n try:\n paths = glob_paths(formatted_pattern)\n if not paths:\n Logger.debug(f\"Nonexistent optional output(s): {pattern}\")\n else:\n for path in paths:\n self.initial_outputs.append(path)\n if path == pattern:\n Logger.debug(f\"Existing output: {path}\")\n else:\n Logger.debug(f\"Existing output: {pattern} -> {path}\")\n except NonOptionalException:\n Logger.debug(f\"Nonexistent required output(s): {pattern}\")\n self.missing_output = formatted_pattern\n missing_outputs.append(capture2re(formatted_pattern))\n\n if self.new_persistent_actions:\n for path in self.old_persistent_outputs:\n if path in self.initial_outputs:\n continue\n\n was_reported = False\n for regexp in missing_outputs:\n if re.fullmatch(regexp, path):\n was_reported = True\n break\n\n if was_reported:\n continue\n\n if Stat.exists(path):\n Logger.debug(f\"Changed to abandon the output: {path}\")\n self.abandoned_output = path\n else:\n Logger.debug(f\"Missing the old built output: {path}\")\n self.missing_output = path\n\n Stat.forget(path)\n\n if (\n self.must_run_action\n or self.phony_outputs\n or self.missing_output is not None\n or self.abandoned_output is not None\n ):\n return\n\n for output_path in sorted(self.initial_outputs):\n if is_exists(output_path):\n continue\n output_mtime_ns = Stat.stat(output_path).st_mtime_ns\n if self.oldest_output_path is None or self.oldest_output_mtime_ns > output_mtime_ns:\n self.oldest_output_path = output_path\n self.oldest_output_mtime_ns = output_mtime_ns\n\n if Logger.isEnabledFor(logging.DEBUG) and self.oldest_output_path is not None:\n Logger.debug(\n f\"Oldest output: {self.oldest_output_path} \"\n f\"time: {_datetime_from_nanoseconds(self.oldest_output_mtime_ns)}\"\n )", "def collect(results, **kwargs):\n l = kwargs.get('logger')\n l.info(\n u'#{} Collect ADD.'.format(u'-' * 8)\n )\n\n l.info(\n u'#{} {} results from {} total items.'.format(\n u'-' * 12, len(results), sum([x['items_processed'] for x in results])\n )\n )\n \n final_result = sum([x['result'] for x in results])\n\n l.info(\n u'#{} Final result: {}.'.format(\n u'-' * 12, final_result\n )\n )\n\n return final_result", "def _checker_worker(self):\n results = {}\n for cmd in self.check_cmds:\n res = subprocess.call(cmd.split(), stdout=open('/dev/null', 'w'))\n self.log(\"'%s' finished, result: %s\" % (cmd, res))\n results[cmd] = res\n if rospy.is_shutdown():\n return\n with self._lock:\n # just add results into the data structure\n self._results.add(results)", "def check():\n \n overall_report = dict()\n\n # source code analysis\n # ====================\n # currently empty\n \n # compile\n # =======\n ret_makefile = subprocess.run([config.compiler] + config.compiler_args, # command\n stdout=subprocess.PIPE, # capture stdout\n stderr=subprocess.PIPE, # capture stderr\n universal_newlines=True) # use text mode for std* file objects\n overall_report['makefile'] = ret_makefile\n \n # runtime analysis\n # ================\n with open('compile.txt', 'r') as f:\n if 'error' not in f.read().lower(): # if compilation succeeded\n overall_report, test_case_report_list = runtime_analysis(config, overall_report)\n \n # pass this info to next tools for subsequent processing\n # ======================================================\n pp(overall_report)\n # results from runtime analysis\n if 'runtime_analysis_done' in overall_report:\n success_count = 0\n for report in test_case_report_list:\n if 'timeout' in report:\n util.addFinding(\"Time limit exceeded!\", 0, \"\", \"TEST_080006\")\n elif report['return_code'] != 0:\n if report['stderr_stream'] != '': # ASan/LeakSan/Stack protector probably reported something\n pass # but these findings will be added by analyze.py\n else:\n util.addFinding(\"It seems your program might have crashed.\", 0,\"\",\"TEST_100006\")\n # output_match == None means the user might have tried to print to outfile\n elif report['stdout_stream'] != '' or report['output_match'] is None:\n util.addFinding(\"A test case failed! Make sure you are not trying to print something.\",\n 0,\"\",\"TEST_100006\")\n elif not all(report['output_match']): # not all test cases passed\n util.addFinding(\"A test case failed!\", 0, \"\", \"TEST_100006\")\n else:\n success_count += 1\n\n with open('stderr.txt', 'a') as f:\n f.write(report['stderr_stream'])\n with open('stdout.txt', 'a') as f:\n f.write(report['outfile'])\n\n if success_count == len(test_case_report_list):\n util.addFinding(\"Program behaves as expected!\", 1, \"CHALLENGE_PASS\", \"TEST_900006\")\n \n util.dumpFindings()\n \n # next tools\n subprocess.run([\"./analyse.py\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n subprocess.run([\"./ai.py\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)", "def print_results(self):\n for test_cases in self._tests:\n for test_case in test_cases:\n print('{} ...ok'.format(test_case.get_name()))\n return 0", "def all_results(self):\n res = [(True, result) for result in self.successes]\n res.extend([(False, result) for result in self.failures])\n return res", "def __parse_document(self, results):\n fullname = self.__extract_fullname(results[0])\n if not results[1].startswith(\"-\"):\n raise ValueError(\"Invalid second line of output: '%s'. \"\\\n \"Expected a title underline.\"\n % text[1])\n results = results[2:] # trim off top two lines of header information\n maintests, cleanup = self.__split_on_cleanup(results)\n overall_success = not (maintests[0] == FAILURE_MARKER)\n\n if overall_success:\n testcases = self.__parse_success(fullname, maintests)\n else:\n testcases = self.__parse_failures(fullname, maintests)\n\n return testcases", "def results(self) -> ResultProcessor:\n if self.isAnalysisCompleted():\n return ResultProcessor('input')\n else:\n raise ValueError('Results were not available')", "def collect_results(self) -> None:\n ready = multiprocessing.connection.wait(\n self.waitables.keys() - [self._direct_scheduler_conn], timeout=0\n )\n\n for sentinel in ready:\n if sentinel is self._direct_scheduler_conn:\n continue\n processor = cast(DagFileProcessorProcess, self.waitables[sentinel])\n self.waitables.pop(processor.waitable_handle)\n self._processors.pop(processor.file_path)\n self._collect_results_from_processor(processor)\n\n self.log.debug(\"%s/%s DAG parsing processes running\", len(self._processors), self._parallelism)\n\n self.log.debug(\"%s file paths queued for processing\", len(self._file_path_queue))", "def runtime_analysis(config, overall_report):\n test_case_report_list = []\n \n for test_suite in config.get_test_suite():\n report = dict()\n report['stdout_stream'] = ''\n report['stderr_stream'] = ''\n report['outfile'] = ''\n\n input_for_stdin = config.get_test_suite_input_for_stdin(test_suite)\n # using Popen instead of run because I need access to the pid\n # See comment under \"except subprocess.TimeoutExpired:\"\n infile = \"xinfile_\" + uuid.uuid4().hex[0:16] + \".txt\"\n outfile = \"xoutfile_\" + uuid.uuid4().hex[0:16] + \".txt\"\n p = subprocess.Popen(['./run_jail.sh',\n config.output_filename,\n str(len(test_suite)), infile, outfile], # command\n stdout=subprocess.PIPE, # capture stdout\n stderr=subprocess.PIPE, # capture stderr\n stdin=subprocess.PIPE, # capture stdin\n universal_newlines=True, # use text mode for std* file objects\n start_new_session=True, # otherwise killing the process group will also kill the Python interpreter\n )\n\n try:\n # send test suite input\n with open(infile, \"w\") as f:\n f.write(input_for_stdin)\n (stdout_stream, stderr_stream) = p.communicate(timeout=config.timeout)\n \n report['return_code'] = p.returncode\n report['stderr_stream'] += stderr_stream\n report['stdout_stream'] += stdout_stream\n with open(outfile, \"r\") as f:\n current_outfile = f.read()\n report['outfile'] += current_outfile\n \n # check if test cases passed\n ret_output_match = config.check_for_output_match(current_outfile, test_suite)\n report['test_suite'] = test_suite\n report['output_match'] = ret_output_match\n \n except subprocess.TimeoutExpired:\n # kill the process group so that all child processes spawned by the process are also killed\n # The child need to be killed because, in addition to wasting CPU cycles,\n # it can hold stdout and then Python will wait indefinitely even if the timeout is expired\n os.killpg(os.getpgid(p.pid), signal.SIGKILL) \n report['timeout'] = True\n finally:\n test_case_report_list.append(report)\n \n overall_report['runtime_analysis_done'] = True\n\n return overall_report, test_case_report_list", "def process_results(_load_manifest, _stma_report):\n _junit_results = []\n # the first \"test\" is that the two lists should have the same number of items\n if len(_load_manifest) == len(_stma_report):\n print(\"result count test: pass\")\n else:\n print(\"result count test: fail\")\n\n # for the rest, each item in the load manifest equates to a test\n for _load_item in _load_manifest:\n _pass = True\n # get its associated entry from the _stma_report\n _stma_item = find_first_match(_stma_report, _load_item)\n if _stma_item is None:\n _pass = False\n print(\"test \" + str(_load_item) + \": fail due to missing stma result\")\n else:\n # verify details reported by stma\n _pass = equivalent_dicts(_load_item, _stma_item) and\\\n equivalent_dicts(_stma_item, _load_item)\n if not _pass:\n print(\"test \" + str(_load_item) + \": fail due to mismatching result\")\n print(\"test \" + str(_load_item) + \": \" + str(_pass))\n\n return _junit_results", "def load_results(self):\n self.find_benchmark_directories()\n for (benchmark, producer), result in self.results.items():\n print('Reading results for ' + benchmark + ' ' + producer)\n if not result.directory:\n print('No results found for ' + benchmark + ' ' + producer)\n else:\n print('Generating report for: ' + result.directory)\n report = Report(result.directory)\n result.reports = report.generate()", "def execute_testsets(testsets):\n group_results = dict() #results, by group\n group_failure_counts = dict()\n total_failures = 0\n myinteractive = False\n\n for testset in testsets:\n mytests = testset.tests\n myconfig = testset.config\n mybenchmarks = testset.benchmarks\n\n #Make sure we actually have tests to execute\n if not mytests and not mybenchmarks:\n # no tests in this test set, probably just imports.. skip to next test set\n break\n\n myinteractive = True if myinteractive or myconfig.interactive else False\n\n #Run tests, collecting statistics as needed\n for test in mytests:\n #Initialize the dictionaries to store test fail counts and results\n if test.group not in group_results:\n group_results[test.group] = list()\n group_failure_counts[test.group] = 0\n\n result = run_test(test, test_config = myconfig)\n result.body = None # Remove the body, save some memory!\n\n if not result.passed: #Print failure, increase failure counts for that test group\n logging.error('Test Failed: '+test.name+\" URL=\"+test.url+\" Group=\"+test.group+\" HTTP Status Code: \"+str(result.response_code))\n\n if test.validators is not None:\n for validator in test.validators:\n if validator.passed == False:\n logging.warning(\" Validation Failed: \" + str(validator))\n\n #Increment test failure counts for that group (adding an entry if not present)\n failures = group_failure_counts[test.group]\n failures = failures + 1\n group_failure_counts[test.group] = failures\n\n else: #Test passed, print results\n logging.info('Test Succeeded: '+test.name+\" URL=\"+test.url+\" Group=\"+test.group)\n\n #Add results for this test group to the resultset\n group_results[test.group].append(result)\n\n # handle stop_on_failure flag\n if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:\n print 'STOP ON FAILURE! stopping test set execution, continuing with other test sets'\n break\n\n for benchmark in mybenchmarks: # Run benchmarks, analyze, write\n if not benchmark.metrics:\n logging.debug('Skipping benchmark, no metrics to collect')\n continue\n\n logging.info(\"Benchmark Starting: \"+benchmark.name+\" Group: \"+benchmark.group)\n curl = configure_curl(benchmark, myconfig)\n benchmark_result = run_benchmark(curl, benchmark, myconfig)\n print benchmark_result\n logging.info(\"Benchmark Done: \"+benchmark.name+\" Group: \"+benchmark.group)\n\n if benchmark.output_file: # Write file\n write_method = OUTPUT_METHODS[benchmark.output_format]\n my_file = open(benchmark.output_file, 'w') # Overwrites file\n logging.debug(\"Benchmark writing to file: \" + benchmark.output_file)\n write_method(my_file, benchmark_result, benchmark, test_config = myconfig)\n my_file.close()\n\n if myinteractive:\n # a break for when interactive bits are complete, before summary data\n print \"===================================\"\n\n #Print summary results\n for group in sorted(group_results.keys()):\n test_count = len(group_results[group])\n failures = group_failure_counts[group]\n total_failures = total_failures + failures\n if (failures > 0):\n print u'Test Group '+group+u' FAILED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'\n else:\n print u'Test Group '+group+u' SUCCEEDED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'\n\n return total_failures", "def basic_process_run_results_f(f):\r\n infiles_lists, out_filepaths = parse_tmp_to_final_filepath_map_file(f)\r\n for infiles_list, out_filepath in zip(infiles_lists, out_filepaths):\r\n try:\r\n of = open(out_filepath, 'w')\r\n except IOError:\r\n raise IOError(\"Poller can't open final output file: %s\" % out_filepath +\r\n \"\\nLeaving individual jobs output.\\n Do you have write access?\")\r\n\r\n for fp in infiles_list:\r\n for line in open(fp):\r\n of.write('%s\\n' % line.strip('\\n'))\r\n of.close()\r\n # It is a good idea to have your clean_up_callback return True.\r\n # That way, if you get mixed up and pass it as check_run_complete_callback,\r\n # you'll get an error right away rather than going into an infinite loop\r\n return True" ]
[ "0.6443427", "0.6443427", "0.6302049", "0.6148188", "0.6071832", "0.6046133", "0.5980672", "0.59162134", "0.5890119", "0.58879757", "0.58761954", "0.5875161", "0.5846488", "0.5807116", "0.57725614", "0.57704043", "0.5761875", "0.57333165", "0.57006025", "0.5662879", "0.5659969", "0.5654569", "0.56464005", "0.563388", "0.5619787", "0.56147873", "0.5603549", "0.56005114", "0.55833715", "0.55818135" ]
0.7599388
0
Exports experiments based on their status.
def export_experiments(self, included_statuses=None): experiment_list = [] if included_statuses is not None: for run in self.discover_all_runs(): status = run.get_status() if status in included_statuses: experiment_list.append(( run.experiment.name, tuple(variant.name for variant in run.experiment.variation), run.instance.shortname, str(status) )) self.writeback_status_cache() return experiment_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_comparisons(self):\n print(\"Exporting comparisons:\")\n\n return", "def run(self,\n example_input : Union[str,Path,None] = None) -> EasyDict :\n outputs = []\n ok = True\n for export_config in self.export_configs :\n exporter = create_exporter(\n config=export_config,\n experiment_name=self.experiment_name,\n image_size=self.image_size,\n output_directory=(self.experiment_directory),\n )\n ok = exporter(\n predictor=self.predictor,\n class_names=self.class_names,\n example_image_path=example_input\n ) and ok\n outputs.append(str(exporter.filename))\n print('model is exported to:', ', '.join(outputs))\n # TODO specify which export is failed\n result = EasyDict({'export_status' : ok})\n return result", "def export(self, exdata = True, exlights = True, exaovs = True, exshaders = True, exmaster = True):\n\t\tif exdata:\n\t\t\tself.exportData()\n\t\tif exshaders:\n\t\t\tself.exportShaders()\n\t\tif exlights:\n\t\t\tself.exportLights()\n\t\tif exaovs:\n\t\t\tself.exportAovs()\n\t\tif exmaster:\n\t\t\tself.exportMasterLayerSettings()", "def export(ctx):\n LOG.info(\"Running scout export\")", "def export_everything(self):\n orderby = self.orderby.get()\n currentregion = self.region.get()\n previoustext = self.tabs.window.statuslabel['text']\n res = tkinter.messagebox.askyesno(\n 'Export Everything',\n 'Exporting data on all AIS stations, this may take some time.')\n if res:\n outpath = tkinter.filedialog.askdirectory()\n if outpath:\n self.tabs.window.statuslabel.config(\n text='Exporting all AIS station data to - {}'.format(\n outpath),\n fg='black', bg='gold')\n self.update_idletasks()\n export.export_overview(\n self.tabs.window.aistracker,\n self.tabs.window.nmeatracker,\n self.tabs.window.messagelog,\n outpath, orderby=orderby, region=currentregion)\n export.export_everything(\n self.tabs.window.aistracker,\n self.tabs.window.messagelog,\n outpath, orderby=orderby, region=currentregion)\n self.tabs.window.statuslabel.config(\n text=previoustext, bg='light grey')\n else:\n raise ExportAborted(\n 'Export of all AIS data cancelled by user.')\n else:\n raise ExportAborted('Export of all AIS data cancelled by user.')", "def export_caliper(args):\n if args.type == 'normalise':\n clarity_epp.export.caliper.samplesheet_normalise(lims, args.process_id, args.output_file)\n elif args.type == 'dilute':\n clarity_epp.export.caliper.samplesheet_dilute(lims, args.process_id, args.output_file)", "def statuses(ctx, job, page):\n\n def get_experiment_statuses():\n try:\n response = PolyaxonClient().experiment.get_statuses(\n user, project_name, _experiment, page=page)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could get status for experiment `{}`.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n meta = get_meta_response(response)\n if meta:\n Printer.print_header('Statuses for experiment `{}`.'.format(_experiment))\n Printer.print_header('Navigation:')\n dict_tabulate(meta)\n else:\n Printer.print_header('No statuses found for experiment `{}`.'.format(_experiment))\n\n objects = list_dicts_to_tabulate(\n [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status')\n for o in response['results']])\n if objects:\n Printer.print_header(\"Statuses:\")\n objects.pop('experiment', None)\n dict_tabulate(objects, is_list_dict=True)\n\n def get_experiment_job_statuses():\n try:\n response = PolyaxonClient().experiment_job.get_statuses(user,\n project_name,\n _experiment,\n _job,\n page=page)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get status for job `{}`.'.format(job))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n meta = get_meta_response(response)\n if meta:\n Printer.print_header('Statuses for Job `{}`.'.format(_job))\n Printer.print_header('Navigation:')\n dict_tabulate(meta)\n else:\n Printer.print_header('No statuses found for job `{}`.'.format(_job))\n\n objects = list_dicts_to_tabulate(\n [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status')\n for o in response['results']])\n if objects:\n Printer.print_header(\"Statuses:\")\n objects.pop('job', None)\n dict_tabulate(objects, is_list_dict=True)\n\n page = page or 1\n\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n\n if job:\n _job = get_experiment_job_or_local(job)\n get_experiment_job_statuses()\n else:\n get_experiment_statuses()", "def list_experiments(self):\n subfolders = self.um.list_subfolders(\"data/*/\")\n experiment_folders = self.um.list_experiments(subfolders)\n experiments = list()\n for exp in experiment_folders:\n try:\n date = self.um.timestamp_to_date(int(exp) / 1000)\n exp_class = experiment.experiment(new_experiment=False, ts=exp)\n\n if \"label\" in exp_class.metadata:\n label = exp_class.metadata[\"label\"]\n else:\n label = None\n\n exp_dict = {\"date\": date,\n \"ts\": exp,\n \"label\": label\n }\n experiments.append(exp_dict)\n except:\n print \"Skipped\"\n\n return render_template('experiments.html', user=experiments)", "def main():\n parser = get_parser()\n options = get_options(parser)\n\n # Set up the logger.\n logger = logging.getLogger(consts.MAIN)\n logger.setLevel(logging.DEBUG if options[consts.DEBUG] else logging.INFO)\n file_handler = logging.FileHandler(os.path.join(options[consts.EXPORT_DIR], 'log.txt'), mode='w')\n logger.addHandler(file_handler)\n console_handler = logging.StreamHandler()\n logger.addHandler(console_handler)\n\n # Log the options given through the command-line arguments.\n logger.info('options: {}'.format(str(options)))\n\n experiment_id = 0\n status_path = os.path.join(options[consts.EXPORT_DIR], \"status.pickle\")\n # Check if the execution is a new one or a resumption of a previous experiment.\n if not options[consts.CONTINUE]:\n # Set up a new execution.\n options_path = os.path.join(options[consts.EXPORT_DIR], 'options.pickle')\n with open(options_path, 'wb') as file:\n pickle.dump(options, file)\n best_experiment_test_score = -float('inf')\n best_experiment_id = -1\n best_epoch_num = -1\n best_config = None\n status = 'working'\n with open(status_path, 'wb') as file:\n pickle.dump([best_experiment_test_score, best_experiment_id, best_config, status], file)\n with open(os.path.join(options[consts.EXPORT_DIR], 'id'), 'w') as file:\n file.write(experiments.experiment.execution_identifier)\n else:\n # Load the old execution from the export directory.\n epoch_stamp_path = os.path.join(options[consts.EXPORT_DIR], \"epoch_stamp.pickle\")\n with open(epoch_stamp_path, 'rb') as file:\n dictionary = pickle.load(file)\n with open(status_path, 'rb') as file:\n best_experiment_test_score, best_experiment_id, best_epoch_num, best_config, status = pickle.load(file)\n with open(os.path.join(options[consts.EXPORT_DIR], 'id'), 'r') as file:\n experiments.experiment.execution_identifier = file.read()\n\n # Check if the execution is still in progress. This check should fail when an ended execution is resumed.\n if status == 'working':\n # Iterate through the different configurations of hyperparameters ad create an experiment for each.\n for config in iterate_configs(parser, options):\n # If this a resumed execution, check if this experiment has already had finished.\n if options[consts.CONTINUE] and experiment_id < dictionary[consts.EXPERIMENT_ID]:\n experiment_id += 1\n continue\n # If this a resumed execution and this is the experiment that was running when the last checkpoint was\n # created.\n elif options[consts.CONTINUE] and experiment_id == dictionary[consts.EXPERIMENT_ID]:\n # Log the configurations of the present experiment.\n logger.info('continuing on config: {}'.format(str(config)))\n checkpoint_dir = os.path.join(config[consts.EXPORT_DIR],\n \"checkpoints\",\n \"experiment_%09d\" % experiment_id,\n \"epoch_%09d\" % dictionary[consts.EPOCH_NUMBER])\n # Create an experiment for the configuration at hand.\n experiment = Experiment(config=config, experiment_id=experiment_id,\n load_from_directory=checkpoint_dir)\n # If this is a new experiment.\n else:\n logger.info('starting on config: {}'.format(str(config)))\n # Create an experiment for the configuration at hand.\n experiment = Experiment(config=config, experiment_id=experiment_id)\n\n # Run the present experiment.\n experiment_test_score = experiment.run()\n\n # Record the results of the experiment and compare them to the results so far.\n logger.info('Experiment {} test score: {}'.format(experiment_id, experiment_test_score))\n if experiment_test_score > best_experiment_test_score:\n best_experiment_test_score = experiment_test_score\n best_experiment_id = experiment_id\n best_epoch_num = experiment.best_epoch_number\n best_config = config\n\n # Store the best results so far in a file.\n with open(status_path, 'wb') as file:\n pickle.dump([best_experiment_test_score, best_experiment_id, best_epoch_num, best_config, status],\n file)\n experiment_id += 1\n\n # Mark the execution as over.\n status = 'ended'\n\n # Store the best results in a file.\n with open(status_path, 'wb') as file:\n pickle.dump([best_experiment_test_score, best_experiment_id, best_epoch_num, best_config, status], file)\n \n # Report the best results.\n logger.info('Execution is over. Best experiment test score: {}'\n '\\nBest experiment config: {}'.format(best_experiment_test_score, str(best_config)))", "def export_getInstancesByStatus( self, status ):\n return gVirtualMachineDB.getInstancesByStatus( status )", "def all_experiments():\n elo_explain_experiments()\n alpha_beta_experiments()\n mtcs_experiments()", "def experimentExport(request, experiment_id):\n json_data = ExperimentAdmin.exportToJSON(experiment_id)\n response = HttpResponse(json.dumps(json_data), content_type='application/json')\n response['Content-Disposition'] = 'attachment; filename=\\\"' + Experiment.objects.get(id=experiment_id).exp_name + '.json\\\"'\n return response", "def export(exp_data: ExportData) -> None:\n pass", "def _delete_experiments(self):\n response = self.tsp_client.fetch_experiments()\n for experiment in response.model.experiments:\n self.tsp_client.delete_experiment(experiment.UUID)\n assert response.status_code == 200", "def export_files(self):\n if self.tabs.window.serverrunning:\n tkinter.messagebox.showwarning(\n 'WARNING', 'Cannot export files whilst server is running')\n elif self.tabs.window.aistracker.messagesprocessed == 0:\n tkinter.messagebox.showwarning(\n 'WARNING', 'Nothing to export.')\n else:\n commands = {'OVERVIEW': self.export_overview,\n 'EVERYTHING': self.export_everything,\n 'CSV': self.export_csv,\n 'TSV': self.export_tsv,\n 'KML': self.export_kml,\n 'KMZ': self.export_kmz,\n 'JSON': self.export_json,\n 'VERBOSE JSON': self.export_verbose_json,\n 'GEOJSON': self.export_geojson,\n 'AIS MESSAGES (DEBUG)': self.export_debug}\n option = self.exportoptions.get()\n try:\n commands[option]()\n tkinter.messagebox.showinfo(\n 'Export Files', 'Export Successful')\n except Exception as err:\n AISLOGGER.exception('export error')\n tkinter.messagebox.showerror(type(err).__name__, str(err))", "def experiments(ctx, metrics, params, query, sort, page):\n user, project_name, _group = get_project_group_or_local(ctx.obj.get('project'),\n ctx.obj.get('group'))\n page = page or 1\n try:\n response = PolyaxonClient().experiment_group.list_experiments(username=user,\n project_name=project_name,\n group_id=_group,\n metrics=metrics,\n params=params,\n query=query,\n sort=sort,\n page=page)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get experiments for group `{}`.'.format(_group))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n meta = get_meta_response(response)\n if meta:\n Printer.print_header('Experiments for experiment group `{}`.'.format(_group))\n Printer.print_header('Navigation:')\n dict_tabulate(meta)\n else:\n Printer.print_header('No experiments found for experiment group `{}`.'.format(_group))\n\n if metrics:\n objects = get_experiments_with_metrics(response)\n elif params:\n objects = get_experiments_with_params(response)\n else:\n objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))\n for o in response['results']]\n objects = list_dicts_to_tabulate(objects)\n if objects:\n Printer.print_header(\"Experiments:\")\n objects.pop('experiment_group', None)\n objects.pop('experiment_group_name', None)\n objects.pop('project_name', None)\n dict_tabulate(objects, is_list_dict=True)", "def test_export_dashboards_versioned_export(app_context, fs):\n # pylint: disable=reimported, redefined-outer-name\n import superset.cli.importexport # noqa: F811\n\n # reload to define export_dashboards correctly based on the\n # feature flags\n importlib.reload(superset.cli.importexport)\n\n runner = app.test_cli_runner()\n with freeze_time(\"2021-01-01T00:00:00Z\"):\n response = runner.invoke(superset.cli.importexport.export_dashboards, ())\n\n assert response.exit_code == 0\n assert Path(\"dashboard_export_20210101T000000.zip\").exists()\n\n assert is_zipfile(\"dashboard_export_20210101T000000.zip\")", "def start_continuous_export():\n pass", "def export_assets(self, asset_dir):\n return self.examples_inputter.export_assets(asset_dir)", "def exports():", "def export_applications(self):\n print('\\n=== Exporting all application data...')\n\n for application in self.client.applications:\n print('- Exporting application:', application.name)\n\n json = {\n 'id': self.get_id(application),\n 'href': application.href,\n 'name': application.name,\n 'description': application.description,\n 'status': application.status,\n 'createdAt': application.created_at.isoformat(),\n 'modifiedAt': application.modified_at.isoformat(),\n 'customData': self.get_custom_data(application),\n 'default_account_store_mapping': None,\n 'default_group_store_mapping': None,\n 'account_store_mappings': [],\n #'verificationEmails': [],\n }\n\n default_account_store_mapping = application.default_account_store_mapping\n default_group_store_mapping = application.default_group_store_mapping\n\n if default_account_store_mapping:\n json['default_account_store_mapping'] = {\n 'id': application.default_account_store_mapping.href.split('/')[-1],\n 'href': application.default_account_store_mapping.href,\n 'type': application.default_account_store_mapping.account_store.__class__.__name__,\n 'name': application.default_account_store_mapping.account_store.name,\n 'list_index': application.default_account_store_mapping.list_index,\n }\n\n if default_group_store_mapping:\n json['default_group_store_mapping'] = {\n 'id': application.default_group_store_mapping.href.split('/')[-1],\n 'href': application.default_group_store_mapping.href,\n 'type': application.default_group_store_mapping.account_store.__class__.__name__,\n 'name': application.default_group_store_mapping.account_store.name,\n 'list_index': application.default_group_store_mapping.list_index,\n }\n\n for account_store_mapping in application.account_store_mappings:\n json['account_store_mappings'].append({\n 'id': self.get_id(account_store_mapping),\n 'href': account_store_mapping.href,\n 'account_store': {\n 'type': account_store_mapping.account_store.__class__.__name__,\n 'id': self.get_id(account_store_mapping.account_store),\n 'href': account_store_mapping.account_store.href,\n 'name': account_store_mapping.account_store.name,\n 'description': account_store_mapping.account_store.description,\n 'status': account_store_mapping.account_store.status,\n },\n 'list_index': account_store_mapping.list_index,\n 'is_default_account_store': account_store_mapping.is_default_account_store,\n 'is_default_group_store': account_store_mapping.is_default_group_store,\n })\n\n tenant = self.get_id(application.tenant)\n self.write('%s/%s/applications/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def experiment(task, eid, event_type, output, metric, sort, output_fields):\n event_type = EVENT_TYPES[event_type]\n ServerManager.get()\n try:\n result = ServerManager.api.experiment_details(task, eid, event_type=event_type, metric=metric)\n prop_name_loc = {k: i for i, k in enumerate(output_fields)}\n result_df = experiment_to_df(exp=result, prop_name_loc=prop_name_loc, event_type=event_type, sort=sort)\n if output is None:\n click.echo(result_df)\n else:\n result_df.to_csv(output)\n except ApiException as e:\n click.echo(click.style(json.loads(e.body)['detail'], fg='red'))", "def export_workflow(args):\n if args.type == 'magnis':\n clarity_epp.export.workflow.helix_magnis(lims, args.process_id, args.output_file)\n elif args.type == 'mip':\n clarity_epp.export.workflow.helix_mip(lims, args.process_id, args.output_file)", "def describe_continuous_exports(exportIds=None, maxResults=None, nextToken=None):\n pass", "def logreg_experiments_to_deploy():\n\n # A dataframe with all the results (parameters, performance, stored models...)\n df = logreg_results_to_pandas()\n\n # Choose a few good results (maybe apply diversity filters or ensemble selection or...)\n # These decisions where informed by some plotting (see tutorial)\n # (e.g., keep number maneageble, keep most regularized amongst these with higher performance...)\n deployment_cond_1 = df.query('cv_seed < 5 and '\n 'num_present_folds == num_cv_folds and '\n 'penalty == \"l1\" and '\n 'C == 1 and '\n 'class_weight == \"auto\" and '\n 'tol == 1E-4 and '\n 'folder_size < 1 and '\n 'folder_seed == -1 and '\n 'auc_mean > 0.92')\n\n deployment_cond_2 = df.query('num_present_folds == num_cv_folds and '\n 'penalty == \"l2\" and '\n 'C == 5 and '\n 'class_weight == \"auto\" and '\n 'tol == 1E-4 and '\n 'folder_size < 1 and '\n 'folder_seed == -1 and '\n 'auc_mean > 0.93')\n\n deployers = pd.concat([deployment_cond_1, deployment_cond_2]).reset_index()\n\n return deployers", "def export_data(self):\n return self.export_all_data()", "def main(ini_path=None, overwrite_flag=False,\n tile_cols='', tile_rows='', delay=0):\n logging.info('\\nExport annual ET/ETrF/ETr/count image tiles')\n\n # Read config file\n ini = inputs.read(ini_path)\n inputs.parse_section(ini, section='INPUTS')\n inputs.parse_section(ini, section='INTERPOLATE')\n inputs.parse_section(ini, section='EXPORT')\n inputs.parse_section(ini, section=ini['INPUTS']['et_model'])\n\n if os.name == 'posix':\n shell_flag = False\n else:\n shell_flag = True\n\n # Limit tile ranges from command line\n # Eventually move to config file?\n try:\n tile_cols_list = list(utils.parse_int_set(tile_cols))\n except:\n tile_cols_list = []\n try:\n tile_rows_list = list(utils.parse_int_set(tile_rows))\n except:\n tile_rows_list = []\n\n logging.debug('\\nInitializing Earth Engine')\n ee.Initialize()\n\n # Get current running tasks\n tasks = utils.get_ee_tasks()\n\n # Get list of existing images/files\n if ini['EXPORT']['export_dest'] == 'ASSET':\n logging.debug('\\nGetting GEE asset list')\n asset_list = utils.get_ee_assets(\n ini['EXPORT']['output_ws'], shell_flag=shell_flag)\n logging.debug(asset_list)\n # elif ini['EXPORT']['export_dest'] == 'CLOUD':\n # logging.debug('\\nGetting cloud storage file list')\n # cloud_list = utils.get_bucket_files(\n # ini['EXPORT']['project_name'], ini['EXPORT']['output_ws'],\n # shell_flag=shell_flag)\n # # It may be necessary to remove image tile notation\n # elif ini['EXPORT']['export_dest'] == 'GDRIVE':\n # logging.debug('\\nGetting Google drive file list')\n # gdrive_list = [\n # os.path.join(ini['EXPORT']['output_ws'], x)\n # for x in os.listdir(ini['EXPORT']['output_ws'])]\n # # It may be necessary to remove image tile notation\n # # Very large tiles may get split up automatically by EE\n # # Strip the EE tile notation data from the image list\n # # gdrive_list = list(set([\n # # re.sub('-\\d{10}-\\d{10}.tif', '.tif', x)\n # # for x in os.listdir(ini['EXPORT']['output_ws'])]))\n # # logging.debug(gdrive_list)\n\n # Get list of tiles that intersect the study area\n logging.debug('\\nBuilding export list')\n export_list = list(ard_tile_export_generator(\n ini['INPUTS']['study_area_path'],\n wrs2_coll=ini['INPUTS']['wrs2_coll'],\n cell_size=ini['EXPORT']['cell_size'],\n wrs2_tile_list=ini['INPUTS']['wrs2_tiles'],\n wrs2_tile_field=ini['INPUTS']['wrs2_tile_field'],\n wrs2_buffer=ini['INPUTS']['wrs2_buffer']))\n if not export_list:\n logging.error('\\nEmpty export list, exiting')\n return False\n\n # Save export list to json\n with open('export_tiles.json', 'w') as json_f:\n json.dump(export_list, json_f)\n\n\n # Process each tile separately\n logging.info('\\nImage Exports')\n for export_n, export_info in enumerate(export_list):\n tile_col = int(export_info['index'][1:4])\n tile_row = int(export_info['index'][5:8])\n if tile_cols_list and int(tile_col) not in tile_cols_list:\n logging.debug('ARD Tile: {} ({}/{}), skipping'.format(\n export_info['index'], export_n + 1, len(export_list)))\n continue\n elif tile_rows_list and int(tile_row) not in tile_rows_list:\n logging.debug('ARD Tile: {} ({}/{}), skipping'.format(\n export_info['index'], export_n + 1, len(export_list)))\n continue\n else:\n logging.info('ARD Tile: {} ({}/{})'.format(\n export_info['index'], export_n + 1, len(export_list)))\n\n logging.debug(' Shape: {}'.format(export_info['shape']))\n logging.debug(' Transform: {}'.format(export_info['geo']))\n logging.debug(' Extent: {}'.format(export_info['extent']))\n logging.debug(' MaxPixels: {}'.format(export_info['maxpixels']))\n logging.debug(' WRS2 tiles: {}'.format(\n ', '.join(export_info['wrs2_tiles'])))\n\n\n if ini['INPUTS']['et_model'] == 'EEFLUX':\n # Get the Landsat collection\n landsat_coll = landsat.get_landsat_coll(\n wrs2_tile_list=export_info['wrs2_tiles'],\n cloud_cover=ini['INPUTS']['cloud_cover'],\n start_date=ini['INTERPOLATE']['start_date'],\n end_date=ini['INTERPOLATE']['end_date'],\n landsat5_flag=ini['INPUTS']['landsat5_flag'],\n landsat7_flag=ini['INPUTS']['landsat7_flag'],\n landsat8_flag=ini['INPUTS']['landsat8_flag'],\n landsat_type='RAD')\n\n # Compute ETf for each Landsat scene\n # The 'BQA' band is also being returned by the etrf method\n def apply_et_fraction(image):\n etrf_obj = eeflux.EEFlux(ee.Image(image)).etrf\n etrf_img = ee.Image(etrf_obj.select(['etrf'], ['etf'])) \\\n .clamp(-1, 2)\n cloud_mask = landsat.landsat_bqa_cloud_mask_func(\n ee.Image(etrf_obj. select(['BQA'])))\n return etrf_img.updateMask(cloud_mask) \\\n .copyProperties(image, ['system:time_start'])\n scene_et_fraction_coll = ee.ImageCollection(\n landsat_coll.map(apply_et_fraction))\n\n else:\n logging.error('\\nInvalid/unsupported ET Model: {}'.format(\n ini['INPUTS']['et_model']))\n return False\n\n\n # Daily reference ET collection\n # Is the \"refet_source\" a function of the model, interpolation, or other?\n # The \"refet_type\" parameter is currently being ignored\n if ini[ini['INPUTS']['et_model']]['refet_source'] == 'GRIDMET':\n daily_et_reference_coll = ee.ImageCollection('IDAHO_EPSCOR/GRIDMET') \\\n .filterDate(ini['INPUTS']['start_date'], ini['INPUTS']['end_date']) \\\n .select(['etr'], ['et_reference'])\n elif ini[ini['INPUTS']['et_model']]['refet_source'] == 'CIMIS':\n daily_et_reference_coll = ee.ImageCollection('projects/climate-engine/cimis/daily') \\\n .filterDate(ini['INPUTS']['start_date'],\n ini['INPUTS']['end_date']) \\\n .select(['etr_asce'], ['et_reference'])\n\n # Compute composite/mosaic images for each image date\n daily_et_fraction_coll = ee.ImageCollection(interpolate.aggregate_daily(\n image_coll=scene_et_fraction_coll,\n start_date=ini['INTERPOLATE']['start_date'],\n end_date=ini['INTERPOLATE']['end_date']))\n\n # Interpolate daily ETf, multiply by daily ETr, and sum to ET\n daily_et_actual_coll = ee.ImageCollection(interpolate.interp_et_coll(\n et_reference_coll=daily_et_reference_coll,\n et_fraction_coll=daily_et_fraction_coll,\n interp_days=ini['INTERPOLATE']['interp_days'],\n interp_type=ini['INTERPOLATE']['interp_type']))\n\n # Export products\n # for product in ini['EXPORT']['products']:\n\n # logging.debug('\\n Product: {}'.format(product))\n export_id = ini['EXPORT']['export_id_fmt'].format(\n model=ini['INPUTS']['et_model'].lower(),\n # product=product.lower(),\n study_area=ini['INPUTS']['study_area_name'],\n index=export_info['index'],\n start=ini['INPUTS']['start_date'],\n end=ini['INPUTS']['end_date'],\n export=ini['EXPORT']['export_dest'].lower())\n export_id = export_id.replace('-', '')\n logging.debug(' Export ID: {}'.format(export_id))\n\n # if product == 'scene_id':\n # # Export the scene list CSV to Google Drive\n # if ini['EXPORT']['export_dest'] == 'GDRIVE':\n # export_path = os.path.join(\n # ini['EXPORT']['output_ws'], export_id + '.csv')\n # elif ini['EXPORT']['export_dest'] == 'CLOUD':\n # export_path = '{}/{}/{}'.format(\n # ini['EXPORT']['output_ws'], product, export_id + '.csv')\n # if ini['EXPORT']['export_dest'] == 'CLOUD':\n # # Write each product to a separate folder\n # export_path = '{}/{}/{}'.format(\n # ini['EXPORT']['output_ws'], product, export_id + '.tif')\n # elif ini['EXPORT']['export_dest'] == 'GDRIVE':\n # export_path = os.path.join(\n # ini['EXPORT']['output_ws'], export_id + '.tif')\n if ini['EXPORT']['export_dest'] == 'ASSET':\n # Write each product to a separate folder\n export_path = '{}/{}'.format(\n ini['EXPORT']['output_ws'], export_id)\n else:\n logging.warning(' Unsupported product type, skipping')\n continue\n logging.debug(' Export folder: {}'.format(\n os.path.dirname(export_path)))\n logging.debug(' Export file: {}'.format(\n os.path.basename(export_path)))\n\n if overwrite_flag:\n if export_id in tasks.keys():\n logging.debug(' Task already submitted, cancelling')\n ee.data.cancelTask(tasks[export_id])\n\n # This is intentionally not an \"elif\" so that a task can be\n # cancelled and an existing image/file/asset can be removed\n if (ini['EXPORT']['export_dest'] == 'ASSET' and\n export_path in asset_list):\n logging.debug(' Asset already exists')\n subprocess.check_output(\n ['earthengine', 'rm', export_path],\n shell=shell_flag)\n # Files in cloud storage are easily overwritten\n # so it is unneccesary to manually remove them\n # # This would remove an existing file\n # subprocess.call(['gsutil', 'rm', export_path])\n # if (ini['EXPORT']['export_dest'] == 'CLOUD' and\n # export_path in cloud_list):\n # logging.debug(' Export image already exists')\n # # Files in cloud storage are easily overwritten\n # # so it is unneccesary to manually remove them\n # # # This would remove an existing file\n # # subprocess.check_output(['gsutil', 'rm', export_path])\n # elif (ini['EXPORT']['export_dest'] == 'GDRIVE' and\n # export_path in gdrive_list):\n # logging.debug(' Export image already exists, removing')\n # os.remove(export_path)\n # # Remove automatically generated image tiles\n # # for f in glob.glob(export_path.replace('.tif', '*.tif')):\n # # os.remove(f)\n else:\n if export_id in tasks.keys():\n logging.debug(' Task already submitted, skipping')\n continue\n if (ini['EXPORT']['export_dest'] == 'ASSET' and\n export_path in asset_list):\n logging.debug(' Asset already exists, skipping')\n continue\n # elif (ini['EXPORT']['export_dest'] == 'CLOUD' and\n # export_path in cloud_list):\n # logging.debug(' Export file already exists, skipping')\n # continue\n # elif (ini['EXPORT']['export_dest'] == 'GDRIVE' and\n # os.path.isfile(export_path)):\n # logging.debug(' Export file already exists, skipping')\n # continue\n\n # Compute target product\n # if product == 'scene_id':\n # def scene_id_extract(image):\n # return ee.Feature(None).setMulti({\n # 'SCENE_ID': ee.String(image.get('SCENE_ID'))})\n # scene_id_coll = ee.FeatureCollection(\n # scene_et_fraction_coll.map(scene_id_extract)).sort('SCENE_ID')\n\n output_images = []\n for product_i, product in enumerate(ini['EXPORT']['products']):\n logging.debug(' Product: {}'.format(product))\n if product == 'et_actual':\n # Sum daily ET to total ET\n output_images.append(\n ee.Image(daily_et_actual_coll.sum()).toFloat())\n elif product == 'et_reference':\n # Sum daily reference ET to total reference ET\n output_images.append(\n ee.Image(daily_et_reference_coll.sum()).toFloat())\n elif product == 'et_fraction':\n # Compute mean ETf (ET / ETr)\n output_images.append(\n ee.Image(daily_et_actual_coll.sum()) \\\n .divide(ee.Image(daily_et_reference_coll.sum())).toFloat())\n elif product == 'count':\n # Filter count date range to same period as reference ET\n output_images.append(ee.Image(\n daily_et_fraction_coll.filterDate(\n ini['INPUTS']['start_dt'],\n ini['INPUTS']['end_dt'] + datetime.timedelta(days=1)).count())\\\n .toUint8())\n\n # DEADEEF - Consider saving other input parameters\n # CLOUD_COVER_LAND, number of interpolation days, ?\n output_image = ee.Image(ee.Image(output_images) \\\n .rename(ini['EXPORT']['products']) \\\n .setMulti({\n 'system:time_start': ini['INPUTS']['start_date'],\n 'index': export_info['index']}))\n # print(output_image.get('system:time_start').getInfo())\n # input('ENTER')\n\n # Build export tasks\n # if product == 'scene_id':\n # if ini['EXPORT']['export_dest'] == 'CLOUD':\n # task = ee.batch.Export.table.toCloudStorage(\n # scene_id_coll,\n # description=export_id,\n # bucket=ini['EXPORT']['bucket_name'],\n # fileNamePrefix='{}/{}/{}'.format(\n # ini['EXPORT']['bucket_folder'], product, export_id),\n # fileFormat='CSV')\n # elif ini['EXPORT']['export_dest'] == 'GDRIVE':\n # # Export the scene list CSV to Google Drive\n # task = ee.batch.Export.table.toDrive(\n # scene_id_coll,\n # description=export_id,\n # folder=os.path.basename(ini['EXPORT']['output_ws']),\n # fileNamePrefix=export_id,\n # fileFormat='CSV')\n # elif ini['EXPORT']['export_dest'] == 'CLOUD':\n # # Export the image to cloud storage\n # task = ee.batch.Export.image.toCloudStorage(\n # output_image,\n # description=export_id,\n # bucket=ini['EXPORT']['bucket_name'],\n # fileNamePrefix='{}/{}/{}'.format(\n # ini['EXPORT']['bucket_folder'], product, export_id),\n # dimensions=export_info['shape'],\n # crs=export_info['crs'],\n # crsTransform=export_info['geo'],\n # # shardSize=,\n # # fileDimensions=,\n # maxPixels=export_info['maxpixels'])\n # elif ini['EXPORT']['export_dest'] == 'GDRIVE':\n # # Export the images to your Google Drive\n # task = ee.batch.Export.image.toDrive(\n # output_image,\n # description=export_id,\n # folder=os.path.basename(ini['EXPORT']['output_ws']),\n # fileNamePrefix=export_id,\n # dimensions=export_info['shape'],\n # crs=export_info['crs'],\n # crsTransform=export_info['geo'],\n # maxPixels=export_info['maxpixels'])\n if ini['EXPORT']['export_dest'] == 'ASSET':\n # Export the image to cloud storage\n task = ee.batch.Export.image.toAsset(\n output_image,\n description=export_id,\n assetId='{}/{}'.format(ini['EXPORT']['output_ws'], export_id),\n # pyramidingPolicy='mean',\n dimensions=export_info['shape'],\n crs=export_info['crs'],\n crsTransform=export_info['geo'],\n maxPixels=export_info['maxpixels'])\n else:\n logging.debug(' Export task not built, skipping')\n # continue\n\n # Try to start the export task a few times\n logging.debug(' Starting export task')\n for i in range(1, 10):\n try:\n task.start()\n break\n except Exception as e:\n logging.error(\n ' Error: {}\\n Retrying ({}/10)'.format(e, i))\n time.sleep(i ** 2)\n i += 1\n # logging.debug(' Active: {}'.format(task.active()))\n # logging.debug(' Status: {}'.format(task.status()))\n\n if delay and delay > 0:\n time.sleep(delay)\n elif delay and delay == -1:\n input('ENTER')", "def export_to(short_name):\r\n (app, owner, n_tasks, n_task_runs,\r\n overall_progress, last_activity) = app_by_shortname(short_name)\r\n title = app_title(app, gettext(\"Export\"))\r\n loading_text = gettext(\"Exporting data..., this may take a while\")\r\n\r\n try:\r\n require.app.read(app)\r\n except HTTPException:\r\n if app.hidden:\r\n raise abort(403)\r\n else: # pragma: no cover\r\n raise\r\n\r\n def respond():\r\n return render_template('/applications/export.html',\r\n title=title,\r\n loading_text=loading_text,\r\n app=app,\r\n owner=owner)\r\n\r\n def gen_json(table):\r\n n = db.session.query(table)\\\r\n .filter_by(app_id=app.id).count()\r\n sep = \", \"\r\n yield \"[\"\r\n for i, tr in enumerate(db.session.query(table)\r\n .filter_by(app_id=app.id).yield_per(1), 1):\r\n item = json.dumps(tr.dictize())\r\n if (i == n):\r\n sep = \"\"\r\n yield item + sep\r\n yield \"]\"\r\n\r\n def format_csv_properly(row):\r\n keys = sorted(row.keys())\r\n values = []\r\n for k in keys:\r\n values.append(row[k])\r\n return values\r\n\r\n\r\n def handle_task(writer, t):\r\n if (type(t.info) == dict):\r\n values = format_csv_properly(t.info)\r\n writer.writerow(values)\r\n else: # pragma: no cover\r\n writer.writerow([t.info])\r\n\r\n def handle_task_run(writer, t):\r\n if (type(t.info) == dict):\r\n values = format_csv_properly(t.info)\r\n writer.writerow(values)\r\n else: # pragma: no cover\r\n writer.writerow([t.info])\r\n\r\n def get_csv(out, writer, table, handle_row):\r\n for tr in db.session.query(table)\\\r\n .filter_by(app_id=app.id)\\\r\n .yield_per(1):\r\n handle_row(writer, tr)\r\n yield out.getvalue()\r\n\r\n def respond_json(ty):\r\n tables = {\"task\": model.task.Task, \"task_run\": model.task_run.TaskRun}\r\n try:\r\n table = tables[ty]\r\n except KeyError:\r\n return abort(404)\r\n return Response(gen_json(table), mimetype='application/json')\r\n\r\n def create_ckan_datastore(ckan, table, package_id):\r\n tables = {\"task\": model.task.Task, \"task_run\": model.task_run.TaskRun}\r\n new_resource = ckan.resource_create(name=table,\r\n package_id=package_id)\r\n ckan.datastore_create(name=table,\r\n resource_id=new_resource['result']['id'])\r\n ckan.datastore_upsert(name=table,\r\n records=gen_json(tables[table]),\r\n resource_id=new_resource['result']['id'])\r\n\r\n def respond_ckan(ty):\r\n # First check if there is a package (dataset) in CKAN\r\n tables = {\"task\": model.task.Task, \"task_run\": model.task_run.TaskRun}\r\n msg_1 = gettext(\"Data exported to \")\r\n msg = msg_1 + \"%s ...\" % current_app.config['CKAN_URL']\r\n ckan = Ckan(url=current_app.config['CKAN_URL'],\r\n api_key=current_user.ckan_api)\r\n app_url = url_for('.details', short_name=app.short_name, _external=True)\r\n\r\n try:\r\n package, e = ckan.package_exists(name=app.short_name)\r\n if e:\r\n raise e\r\n if package:\r\n # Update the package\r\n owner = User.query.get(app.owner_id)\r\n package = ckan.package_update(app=app, user=owner, url=app_url,\r\n resources=package['resources'])\r\n\r\n ckan.package = package\r\n resource_found = False\r\n for r in package['resources']:\r\n if r['name'] == ty:\r\n ckan.datastore_delete(name=ty, resource_id=r['id'])\r\n ckan.datastore_create(name=ty, resource_id=r['id'])\r\n ckan.datastore_upsert(name=ty,\r\n records=gen_json(tables[ty]),\r\n resource_id=r['id'])\r\n resource_found = True\r\n break\r\n if not resource_found:\r\n create_ckan_datastore(ckan, ty, package['id'])\r\n else:\r\n owner = User.query.get(app.owner_id)\r\n package = ckan.package_create(app=app, user=owner, url=app_url)\r\n create_ckan_datastore(ckan, ty, package['id'])\r\n #new_resource = ckan.resource_create(name=ty,\r\n # package_id=package['id'])\r\n #ckan.datastore_create(name=ty,\r\n # resource_id=new_resource['result']['id'])\r\n #ckan.datastore_upsert(name=ty,\r\n # records=gen_json(tables[ty]),\r\n # resource_id=new_resource['result']['id'])\r\n flash(msg, 'success')\r\n return respond()\r\n except requests.exceptions.ConnectionError:\r\n msg = \"CKAN server seems to be down, try again layer or contact the CKAN admins\"\r\n current_app.logger.error(msg)\r\n flash(msg, 'danger')\r\n except Exception as inst:\r\n if len(inst.args) == 3:\r\n t, msg, status_code = inst.args\r\n msg = (\"Error: %s with status code: %s\" % (t, status_code))\r\n else: # pragma: no cover\r\n msg = (\"Error: %s\" % inst.args[0])\r\n current_app.logger.error(msg)\r\n flash(msg, 'danger')\r\n finally:\r\n return respond()\r\n\r\n def respond_csv(ty):\r\n # Export Task(/Runs) to CSV\r\n types = {\r\n \"task\": (\r\n model.task.Task, handle_task,\r\n (lambda x: True),\r\n gettext(\r\n \"Oops, the application does not have tasks to \\\r\n export, if you are the owner add some tasks\")),\r\n \"task_run\": (\r\n model.task_run.TaskRun, handle_task_run,\r\n (lambda x: type(x.info) == dict),\r\n gettext(\r\n \"Oops, there are no Task Runs yet to export, invite \\\r\n some users to participate\"))}\r\n try:\r\n table, handle_row, test, msg = types[ty]\r\n except KeyError:\r\n return abort(404)\r\n\r\n out = StringIO()\r\n writer = UnicodeWriter(out)\r\n t = db.session.query(table)\\\r\n .filter_by(app_id=app.id)\\\r\n .first()\r\n if t is not None:\r\n if test(t):\r\n writer.writerow(sorted(t.info.keys()))\r\n\r\n return Response(get_csv(out, writer, table, handle_row),\r\n mimetype='text/csv')\r\n else:\r\n flash(msg, 'info')\r\n return respond()\r\n\r\n export_formats = [\"json\", \"csv\"]\r\n if current_user.is_authenticated():\r\n if current_user.ckan_api:\r\n export_formats.append('ckan')\r\n\r\n ty = request.args.get('type')\r\n fmt = request.args.get('format')\r\n if not (fmt and ty):\r\n if len(request.args) >= 1:\r\n abort(404)\r\n return render_template('/applications/export.html',\r\n title=title,\r\n loading_text=loading_text,\r\n ckan_name=current_app.config.get('CKAN_NAME'),\r\n app=app,\r\n owner=owner)\r\n if fmt not in export_formats:\r\n abort(415)\r\n return {\"json\": respond_json, \"csv\": respond_csv, 'ckan': respond_ckan}[fmt](ty)", "def is_export(self, t, final_time, nb_iterations):\n if final_time is not None:\n nb_its_between_exports = self.nb_iterations_between_exports\n if nb_its_between_exports is None:\n # export at the end\n return t >= final_time\n else:\n # export every N iterations\n return nb_iterations % nb_its_between_exports == 0\n else:\n # if steady state, export\n return True", "def exportEvaluation(self,results,url):\n profbox()\n if not os.path.exists(url):\n open(url, 'w').close()\n myfile = open(url, 'a')\n\n wr = csv.writer(myfile)\n r = numpy.array(results)\n if len(r.shape) == 1:\n wr.writerow(results)\n else:\n wr.writerows(results)" ]
[ "0.5686498", "0.5624677", "0.5623435", "0.5498768", "0.546386", "0.5412859", "0.53924197", "0.5347752", "0.53329897", "0.53323716", "0.52957964", "0.5271474", "0.5201375", "0.5200578", "0.5200364", "0.5186541", "0.5160601", "0.51597124", "0.51319706", "0.5105871", "0.5100903", "0.5094342", "0.5089738", "0.50822717", "0.5061362", "0.5018982", "0.5017211", "0.50123286", "0.5004871", "0.4989085" ]
0.67712706
0
devbuilds only have a source directory instead of a repo and clone directory
def source_dir(self): assert self.revision.is_dev_build rev = self._get_dev_build_suffix() return os.path.join(self._cfg.basedir, 'develop', self.name + rev)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fork(args):\n subprocess.check_call([\"git\", \"config\", \"--global\",\n \"--add\", \"safe.directory\", args.src])\n head = subprocess.check_output([\"git\", \"rev-parse\", args.rev], cwd=args.src).strip()\n obj_dir = subprocess.check_output([\"git\", \"rev-parse\", \"--git-path\", \"objects\"],\n cwd=args.src)\n obj_dir = os.path.join(args.src, obj_dir.decode())\n\n # Create an empty git repository. Native clone is too slow because the\n # typical gerrit source repo has a huge number of refs and git has to\n # inspect all of them. This approach lets us ignore all of that to only\n # use the rev we were asked to build.\n os.mkdir(\"/build/%s\" %(args.project))\n os.chdir(\"/build/%s\" %(args.project))\n subprocess.check_call([\"git\", \"init\", \"-q\"])\n\n # Setup alternates so we can see all the objects in the source repo\n with open(\".git/objects/info/alternates\", \"w\") as F:\n F.write(obj_dir)\n F.write(\"\\n\")\n\n # Create a branch using the only remote HEAD we care about\n subprocess.check_call([\"git\", \"checkout\", \"-q\", \"-b\", \"build\", \"--no-progress\", head])\n subprocess.check_call([\"git\", \"--no-pager\", \"log\", \"--oneline\", \"-n1\"])\n\n if args.project == \"kernel\":\n copy(\"%s/.config\" %(args.src), \"/build/%s\" %(args.project))\n\n args.src = \"/build/%s\" %(args.project)\n args.rev = head", "def do_stage(self, mirror_only=False):\n super().do_stage(mirror_only)\n stsrc = self.stage.source_path\n srcpath = os.path.join( stsrc, self.build_directory )\n ppath = ancestor (srcpath)\n shutil.move(stsrc, stsrc+\"_old\")\n mkdirp(ppath)\n shutil.move(stsrc+\"_old\",srcpath)", "def _copy_sources():\n shutil.rmtree(SRC_DIR_LOCAL, ignore_errors=True)\n os.mkdir(SRC_DIR_LOCAL)\n\n shutil.copy(os.path.join(SRC_DIR_REPO, 'LICENSE.txt'), SRC_DIR_LOCAL)\n shutil.copy(os.path.join(SRC_DIR_REPO, 'z3.pc.cmake.in'), SRC_DIR_LOCAL)\n shutil.copy(os.path.join(SRC_DIR_REPO, 'CMakeLists.txt'), SRC_DIR_LOCAL)\n shutil.copytree(os.path.join(SRC_DIR_REPO, 'cmake'), os.path.join(SRC_DIR_LOCAL, 'cmake'))\n shutil.copytree(os.path.join(SRC_DIR_REPO, 'scripts'), os.path.join(SRC_DIR_LOCAL, 'scripts'))\n\n # Copy in src, but avoid recursion\n def ignore_python_setup_files(src, _):\n if os.path.normpath(src).endswith('api/python'):\n return ['core', 'dist', 'MANIFEST', 'MANIFEST.in', 'setup.py', 'z3_solver.egg-info']\n return []\n shutil.copytree(os.path.join(SRC_DIR_REPO, 'src'), os.path.join(SRC_DIR_LOCAL, 'src'),\n ignore=ignore_python_setup_files)", "def stage_dev():\n _setup_env()\n\n if not 'stage' in _config:\n abort('Could not find \"stage\" in config file')\n\n # Make sure cdn exists\n exists(dirname(env.cdn_path), required=True)\n\n # Build version\n build()\n\n # Copy to local CDN repository\n cdn_path = join(env.cdn_path, 'dev')\n clean(cdn_path)\n\n for r in _config['stage']:\n static.copy(_config, [{\n \"src\": r['src'],\n \"dst\": cdn_path, \"regex\": r['regex']}])\n\n # Create zip file in local CDN repository\n _make_zip(join(cdn_path, '%(name)s.zip' % _config))", "def source_repo(\n vcs, vcs_commands, repo_base_dir, target_repo, source_repo_branch, source_test_file_content,\n target_test_file_name, target_test_file_source_content, source_test_file_name, target_repo_branch,\n source_repo_name, source_repo_is_related):\n path = repo_base_dir.join(source_repo_name)\n os.makedirs(path.strpath)\n if source_repo_is_related:\n if vcs == 'bzr':\n path = path.join(source_repo_branch)\n subprocess.check_call(\n vcs_commands['clone'] + [target_repo.strpath, path.strpath])\n if vcs != 'bzr':\n subprocess.check_call(\n vcs_commands['branch'] + [source_repo_branch], cwd=path.strpath)\n else:\n subprocess.check_call(\n vcs_commands['init'] + [path.strpath])\n subprocess.check_call(\n vcs_commands['branch'] + [source_repo_branch], cwd=path.strpath)\n if vcs == 'bzr':\n path = path.join(source_repo_branch)\n if 'config' in vcs_commands:\n for commands in vcs_commands['config']:\n subprocess.check_call(commands, cwd=path.strpath)\n source_test_file = path.join(source_test_file_name)\n source_test_file.ensure()\n source_test_file.open('w').write(source_test_file_content)\n path.join(target_test_file_name).open(\n 'w').write(target_test_file_source_content)\n subprocess.check_call(vcs_commands['add'], cwd=path.strpath)\n subprocess.check_call(vcs_commands['commit'], cwd=path.strpath)\n if vcs == 'git':\n subprocess.check_call(vcs_commands['bare'], cwd=path.strpath)\n\n return path", "def source(conf, cwd=None):\n if not cwd:\n cwd = \".\"\n\n if conf.endswith(\".git\"):\n proc = subprocess.Popen(\n [\"git\", \"clone\", conf, \".\"],\n cwd=cwd, stdout=sys.stdout, stderr=sys.stderr)\n proc.communicate()\n\n else:\n download(conf)\n\n # pkg = os.path.join(cwd, \"requirements.in\")\n # if os.path.exists(pkg):\n # proc = subprocess.Popen(\n # [\"pip-compile\", pkg], stdout=sys.stdout, stderr=sys.stderr)\n # proc.communicate()\n\n pkg = os.path.join(cwd, \"requirements.txt\")\n if os.path.exists(pkg):\n proc = subprocess.Popen(\n [\"pip\", \"install\", \"--exists-action\", \"i\", \"-r\", pkg],\n stdout=sys.stdout, stderr=sys.stderr)\n proc.communicate()", "def test_not_github(self):\n project_src_path = 'project-src'\n os.environ['PROJECT_SRC_PATH'] = project_src_path\n generic_ci_env = platform_config.BasePlatformConfig()\n self.assertEqual(generic_ci_env.project_src_path, project_src_path)", "def install_source():\n from .project import sudo_project, git_repository, git_root\n\n with sudo():\n git.install()\n\n with sudo_project() as username:\n path = git_root()\n debian.mkdir(path, owner=username, group=username)\n with cd(path):\n repository = git_repository()\n path, cloned = git.clone(repository['url'], branch=repository['branch'])\n if cloned is None:\n abort('Failed to install source, aborting!')\n\n return cloned", "def update_source():\n\n require('environment', provided_by=env.environments)\n with cd(env.code_root):\n sudo('git pull', user=env.deploy_user)\n sudo('git checkout %(branch)s' % env, user=env.deploy_user)", "def _merge_source(self):\n\n with Dir(self.source_path()):\n # gather source repo short sha for audit trail\n rc, out, err = exectools.cmd_gather([\"git\", \"rev-parse\", \"--short\", \"HEAD\"])\n self.source_sha = out.strip()\n rc, out, err = exectools.cmd_gather([\"git\", \"rev-parse\", \"HEAD\"])\n self.full_source_sha = out.strip()\n\n rc, out, err = exectools.cmd_gather([\"git\", \"remote\", \"get-url\", \"origin\"])\n out = out.strip()\n self.source_url = out.replace(':', '/').replace('.git', '').replace('git@', 'https://')\n\n # See if the config is telling us a file other than \"Dockerfile\" defines the\n # distgit image content.\n if self.config.content.source.dockerfile is not Missing:\n dockerfile_name = self.config.content.source.dockerfile\n else:\n dockerfile_name = \"Dockerfile\"\n\n # The path to the source Dockerfile we are reconciling against\n source_dockerfile_path = os.path.join(self.source_path(), dockerfile_name)\n\n # Clean up any files not special to the distgit repo\n for ent in os.listdir(\".\"):\n\n # Do not delete anything that is hidden\n # protects .oit, .gitignore, others\n if ent.startswith(\".\"):\n continue\n\n # Skip special files that aren't hidden\n if ent in [\"additional-tags\"]:\n continue\n\n # Otherwise, clean up the entry\n if os.path.isfile(ent):\n os.remove(ent)\n else:\n shutil.rmtree(ent)\n\n # Copy all files and overwrite where necessary\n recursive_overwrite(self.source_path(), self.distgit_dir)\n\n if dockerfile_name != \"Dockerfile\":\n # Does a non-distgit Dockerfile already exist from copying source; remove if so\n if os.path.isfile(\"Dockerfile\"):\n os.remove(\"Dockerfile\")\n\n # Rename our distgit source Dockerfile appropriately\n os.rename(dockerfile_name, \"Dockerfile\")\n\n # Clean up any extraneous Dockerfile.* that might be distractions (e.g. Dockerfile.centos)\n for ent in os.listdir(\".\"):\n if ent.startswith(\"Dockerfile.\"):\n os.remove(ent)\n\n notify_owner = False\n\n # In a previous implementation, we tracked a single file in .oit/Dockerfile.source.last\n # which provided a reference for the last time a Dockerfile was reconciled. If\n # we reconciled a file that did not match the Dockerfile.source.last, we would send\n # an email the Dockerfile owner that a fundamentally new reconciliation had taken place.\n # There was a problem with this approach:\n # During a sprint, we might have multiple build streams running side-by-side.\n # e.g. builds from a master branch and builds from a stage branch. If the\n # Dockerfile in these two branches happened to differ, we would notify the\n # owner as we toggled back and forth between the two versions for the alternating\n # builds. Instead, we now keep around an history of all past reconciled files.\n\n source_dockerfile_hash = hashlib.sha256(open(source_dockerfile_path, 'rb').read()).hexdigest()\n\n if not os.path.isdir(\".oit/reconciled\"):\n os.mkdir(\".oit/reconciled\")\n\n dockerfile_already_reconciled_path = '.oit/reconciled/{}.Dockerfile'.format(source_dockerfile_hash)\n\n # If the file does not exist, the source file has not been reconciled before.\n if not os.path.isfile(dockerfile_already_reconciled_path):\n # Something has changed about the file in source control\n notify_owner = True\n # Record that we've reconciled against this source file so that we do not notify the owner again.\n shutil.copy(source_dockerfile_path, dockerfile_already_reconciled_path)\n\n # Leave a record for external processes that owners will need to notified.\n\n if notify_owner:\n with Dir(self.source_path()):\n author_email = None\n err = None\n rc, sha, err = exectools.cmd_gather('git log -n 1 --pretty=format:%H {}'.format(dockerfile_name))\n if rc == 0:\n rc, ae, err = exectools.cmd_gather('git show -s --pretty=format:%ae {}'.format(sha))\n if rc == 0:\n if ae.lower().endswith('@redhat.com'):\n self.logger.info('Last Dockerfile commiter: {}'.format(ae))\n author_email = ae\n else:\n err = 'Last commiter email found, but is not @redhat.com address: {}'.format(ae)\n if err:\n self.logger.info('Unable to get author email for last {} commit: {}'.format(dockerfile_name, err))\n\n owners = []\n if self.config.owners is not Missing and isinstance(self.config.owners, list):\n owners = list(self.config.owners)\n if author_email:\n owners.append(author_email)\n sub_path = self.config.content.source.path\n if not sub_path:\n source_dockerfile_subpath = dockerfile_name\n else:\n source_dockerfile_subpath = \"{}/{}\".format(sub_path, dockerfile_name)\n self.runtime.add_record(\"dockerfile_notify\", distgit=self.metadata.qualified_name, image=self.config.name,\n dockerfile=os.path.abspath(\"Dockerfile\"), owners=','.join(owners),\n source_alias=self.config.content.source.get('alias', None),\n source_dockerfile_subpath=source_dockerfile_subpath)", "def update_source(self):\n cwd = None\n if os.path.exists(self.path):\n cwd = self.path\n cmd = 'git fetch && git reset --hard origin/master'\n else:\n cmd = 'git clone %s %s' % (self.repo_url, self.path)\n Command(cmd, cwd=cwd)", "def deploy():\n build()\n rsync_project(\n local_dir=os.path.abspath(env.config['destination']) + \"/\",\n remote_dir=env.remote_dir,\n delete=True,\n extra_opts='--exclude=\".DS_Store\"',\n )", "def develop():\n# Install package in development mode\n sh('python setup.py develop')", "def main(repo):\n print(subprocess.call(['make', 'setup']))\n with Docker('doppins') as docker:\n print(docker.run('git clone {repo} cloned'.format(repo=repo)).out)", "def build(session: nox.Session) -> None:\n\n dist_dir = DIR.joinpath(\"dist\")\n if dist_dir.exists():\n shutil.rmtree(dist_dir)\n\n session.install(\".[dev]\")\n session.run(\"flit\", \"build\")", "def test_idea_missing_sources(self):\n self._idea_test(['testprojects/src/java/org/pantsbuild/testproject/missing_sources'])", "def copy_source():\n shutil.copytree(\"src\", os.path.join(BUILD_DIR, \"src\"))\n for file in os.listdir(\".\"):\n if os.path.isfile(file):\n shutil.copyfile(file, os.path.join(BUILD_DIR, file))", "def non_git_repo(init_source_repo):\n _, parent_dir, _ = init_source_repo\n\n # Create\n non_git_dir_path = create_dir(\n full_path=os.path.join(tempfile.gettempdir(), \"non-git-repo\"),\n on_conflict=\"replace\",\n )\n\n yield non_git_dir_path\n\n # Delete the non-git repo\n delete_dir(non_git_dir_path)", "def main(no_dev: bool):\n is_dev = not no_dev\n rewrite_pyproject(is_dev)\n if is_dev:\n make_dev_pyproject()", "def test_clone_repository_into_other_directory(koan, assert_cloned_repo_exists_in_other_directory):\n koan.shell('')", "def test_correct_host_repo_path(self, _, __):\n image_repo_path = '/src/repo_dir'\n with tempfile.TemporaryDirectory() as tmp_dir, mock.patch(\n 'build_specified_commit.detect_main_repo',\n return_value=('inferred_url', image_repo_path)):\n builder = self._create_builder(tmp_dir)\n builder.build_image_and_checkout_src()\n\n self.assertEqual(os.path.basename(builder.host_repo_path),\n os.path.basename(image_repo_path))", "def push_sources():\n ensure_src_dir()\n push_rev = getattr(env, 'push_rev', None)\n if push_rev is None:\n push_rev = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n local(\"git tag -a {0} -m \\\"Tagged for release\\\"\".format(push_rev))\n local(\"git push origin master --tags\")\n\n with cd(SRC_DIR):\n run(\"git pull origin master\")\n run(\"git fetch -t\")\n run(\"git checkout {0}\".format(push_rev))", "def grunt_build():\n local('cd {{ project_name }} && grunt build')", "def test_build_dir(self):\n build_dir = local.path(str(CFG['build_dir']))\n self.assertTrue(build_dir.exists())", "def relative_to_buildroot(self):\n return [os.path.join(self.rel_path, source) for source in self.source_paths]", "def clone_into_project(git_repo_name):\n repo_dir = git_dir + \"/%s.git\" % git_repo_name\n with cd(remote_dir):\n run('rm -rf myproject')\n run(\"git clone %s %s\" % (repo_dir, project_name))\n run(\"echo 'MY_ENV=\\\"prod\\\"' > %s/%s/site_settings.py\" % (project_name,project_name))\n update_conf_file()", "def development():\n env.branch = 'development'", "def setup_srcdir():\n script = \"\"\"\n mkdir -p {srcdir}\n cd {srcdir}\n git init\n git remote add origin {repo}\n git fetch origin\n git checkout {tag}\"\"\".\\\n format(srcdir=SRCDIR, repo=REPO, tag=TAG)\n rc = os.system(\"/bin/bash -x -e -c '{script}'\".format(script=script))\n if rc != 0:\n shutil.rmtree(SRCDIR)\n raise Exception(\"Unable to create srcdir.\")", "def process_develop_setup():\n if 'develop' in sys.argv and os.path.exists('build'):\n # Remove `build` directory created by a regular installation\n shutil.rmtree('build')\n elif 'develop' not in sys.argv and os.path.exists('gfootball_engine'):\n # If `pip install .` is called after development mode,\n # remove the 'fonts' directory copied by a `develop` setup\n copied_fonts = 'third_party/gfootball_engine/fonts'\n if os.path.exists(copied_fonts):\n shutil.rmtree(copied_fonts)\n # Remove .so files (.pyd on Windows)\n for empty_lib in glob.glob(\"brainball_cpp_engine*\"):\n os.remove(empty_lib)\n # Finally, remove symlink to the gfootball_engine directory\n if not os.path.exists('gfootball_engine'):\n return\n if os.path.islink('gfootball_engine'):\n if platform.system() == 'Windows':\n os.remove('gfootball_engine')\n else:\n os.unlink('gfootball_engine')\n else:\n shutil.rmtree('gfootball_engine')", "def bootstrap_development_distribution(project_name: str, dest_dir: Path):\n src_dir = Path(__file__).parent.parent.absolute()\n print(f\"Bootstrap: {src_dir} -> {dest_dir}\")\n shutil.copytree(\n src_dir,\n dest_dir,\n ignore=shutil.ignore_patterns(\n project_name.lower(),\n \".git\",\n \"build\",\n \"dist\",\n \"docs\",\n \".pytest_cache\",\n \".eggs\",\n \"templates\",\n \"__pycache__\",\n ),\n )" ]
[ "0.6243398", "0.6228782", "0.6211728", "0.62041354", "0.6192375", "0.61694247", "0.6141966", "0.60571873", "0.60399204", "0.6031721", "0.6004611", "0.5989618", "0.5989434", "0.59862226", "0.5955796", "0.59555477", "0.5943259", "0.5915231", "0.5878496", "0.58773404", "0.5875513", "0.58680755", "0.58635485", "0.58176565", "0.5809047", "0.57987475", "0.5759426", "0.5752679", "0.57469666", "0.5743412" ]
0.67784536
0
Calculates the correlation coefficients between columns. Displays them in descending order of their absolute values.
def correlation(data, method, caption): columns = list(data) coefficients = data.astype(float).corr(method=method) results = [] for i in range(len(columns)): for j in range(i + 1, len(columns)): coefficient = coefficients[columns[i]][columns[j]] results.append(( abs(coefficient), coefficient, columns[i] + ' x ' + columns[j])) print('# ' + caption + ', ' + method) for result in reversed(sorted(results)): abs_coefficient, coefficient, columns_pair = result print (coefficient, columns_pair)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def correlate_columns(matrix):\n return np.dot(matrix.T, matrix) / (la.norm(matrix) ** 2)", "def get_correlation(df):\n frame_correlation = df.corr()\n return frame_correlation", "def _calculate_correlation(self, anomaly):\n if self.silence_level <= 1:\n print(\"Calculating partial correlation matrix at zero lag from \"\n \"anomaly values...\")\n\n # Calculate the correlation matrix, cast to float64 for precise\n # calculation of inverse matrix.\n C = np.corrcoef(anomaly.transpose()).astype(\"float64\")\n\n # Calculate the inverse correlation matrix\n if np.linalg.det(C) != 0.0:\n C_inv = np.linalg.inv(C)\n else:\n C_inv = np.linalg.pinv(C)\n\n # Clean up\n del C\n\n # Get the diagonal of the inverse correlation matrix\n diag = C_inv.diagonal()[:]\n\n # Calculate matrix of normalizations\n norm = np.sqrt(abs(np.outer(diag, diag)))\n\n return - C_inv / norm", "def correlation(row):\n return row['correlation']", "def calculate_correlation_coefficient(column1: pd.Series, column2: pd.Series) -> np.float64:\n\n corr = column1.corr(column2)\n return corr", "def show_correlations(regressors, features, targets):\r\n \r\n def mae(v1, v2):\r\n #\"\"\"Return the MAE (mean absolute error) of v1 & v2.\"\"\"\r\n return mean(abs(v1 - v2))\r\n\t\r\n from sklearn.metrics import matthews_corrcoef\r\n from scipy.stats import pearsonr\r\n\t\r\n for regressor in regressors:\r\n regressor['preds'] = regressor['regressor'].predict(features)\r\n \r\n print('=============== MAE Comparison =================')\r\n for regressor in regressors:\r\n print('{} : {}'.format(regressor['name'], mae(regressor['preds'], targets)))\r\n \r\n print(\"=============== Pearson's Correlation Comparison =================\")\r\n for regressor in regressors:\r\n print('{} : {}'.format(regressor['name'], pearsonr(regressor['preds'], targets)))", "def compute_correlations(struc_df, option, gamma, alpha):\n n_states = len(np.unique(struc_df.objnum))\n nodes = network.temp_node_info()\n adjacency = network.adjacency_mat(nodes)\n L = compute_limit_matrix(0.5, adjacency, n_states)\n L_vector = L.flatten()\n M = learn_sr(struc_df, gamma, alpha)\n M = M[2, 6]\n M_vector = M.flatten()\n\n if option == \"norm\":\n print(\"Norm of L - M: \")\n print(la.norm(L_vector - M_vector, np.inf))\n\n if option == \"correlation\":\n print(\"Correlation of L, M: \")\n print(np.dot(L_vector, M_vector) /\n (la.norm(L_vector) * la.norm(M_vector)))", "def determine_correlation(var1,var2):\n v1 = np.array(var1)\n v2 = np.array(var2)\n mat = np.c_[(v1,v2)]# np.vstack((v1,v2)) #\n corr = np.corrcoef(mat.T)\n return corr[0][1]", "def corrcoef(self):\n return self.cov / self.std / self.std[:, None]", "def correlation(self) -> List[float]:\n self.pearson_corr = self.sim_data[\"Human (mean)\"].corr(self.sim_data[\"assigned_sim\"], method=\"pearson\")\n self.spearman_corr = self.sim_data[\"Human (mean)\"].corr(self.sim_data[\"assigned_sim\"], method=\"spearman\")\n return [self.pearson_corr, self.spearman_corr]", "def plot_correlations(data):\n\n from matplotlib import cm\n \n cols = data.columns.tolist()\n fig = plt.figure(figsize=(12,12))\n ax = fig.add_subplot(111)\n \n # Plot absolute value of pairwise correlations since we don't\n # particularly care about the direction of the relationship,\n # just the strength of it\n cax = ax.matshow(data.corr().abs(), cmap=cm.YlOrRd)\n \n fig.colorbar(cax)\n ax.set_xticks(np.arange(len(cols)))\n ax.set_yticks(np.arange(len(cols)))\n ax.set_xticklabels(cols)\n ax.set_yticklabels(cols)", "def corrcoef(self):\r\n return np.corrcoef(self.input.data)", "def calculate_correlation(data):\n pass", "def get_top_correlations(dataframe,columns,frame_type='spark'):\n if frame_type == 'spark':\n import math\n correlation_list = []\n correlations_finished = [] #hold correlatons done to prevent repitition\n for i, col_i in enumerate(columns):\n for j, col_j in enumerate(columns):\n if col_i+col_j not in correlations_finished: # don't repeat\n columns = [col_i,col_j]\n correlation = dataframe.stat.corr(col_i,col_j)\n if math.isnan(correlation):\n correlation=0.0\n correlation_list.append({\n 'columns': columns,\n 'correlation': correlation,\n 'correlation_abs':math.fabs(correlation),\n })\n # print({\n # 'columns': columns,\n # 'correlation': correlation,\n # 'correlation_abs':math.fabs(correlation),\n # })\n correlations_finished.append(col_i+col_j)\n #sort the list so highest correlations are first\n correlation_list = sorted(correlation_list, key=lambda x: x['correlation_abs'], reverse=True)\n return correlation_list\n else:\n pass", "def correlation(data):\n return corrcoef(np.transpose(np.reshape(data, ((data.shape[0] * data.shape[1]), data.shape[2]))))", "def Corr(x,y):\n \n cocoeff1 = np.empty((y.shape[1],y.shape[2]))\n cocoeff2 = np.empty((y.shape[1],y.shape[2]))\n for i in xrange(y.shape[1]):\n for j in xrange(y.shape[2]):\n cocoeff1[i,j],cocoeff2[i,j] = sts.pearsonr(x[:,i,j],y[:,i,j])\n \n print 'Completed: Correlation calculations!'\n \n return cocoeff1, cocoeff2", "def calculate_correlation(self):\n self.network.index_nodes()\n self._calculate_dist()\n pearson_correlation, pearson_pvalue = scipy.stats.pearsonr(self.dist[:,0], self.dist[:,1])\n spearman_correlation, spearman_pvalue = scipy.stats.spearmanr(self.dist[:,0], self.dist[:,1])\n return pearson_correlation, pearson_pvalue, spearman_correlation, spearman_pvalue", "def corr_list(self):\n c = self.df.corr().abs()\n s = c.unstack()\n so = s.sort_values(ascending=False)\n i = int(len(so) ** (1/2))\n charts = so[i:]\n charts = charts[::2]\n if len(charts) > 3:\n charts = charts[:3]\n return charts.index, charts.values", "def matthews_corr(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = p1 + q1\n\n if n == 0:\n return np.nan\n elif a == n or d == n:\n # only one (diagonal) cell is non-zero\n return 0.5\n elif b == n or c == n:\n # only one (non-diagonal) cell is non-zero\n return -0.5\n elif p1 == n or p2 == n or q1 == n or q2 == n:\n # one row or column is zero, another non-zero\n return 0.0\n\n return _div(self.covar(), sqrt(p1 * q1 * p2 * q2))", "def _compute_corr(fmap):\n fmap = fmap.view(fmap.size(0), fmap.size(1), -1)\n fmap = nn.functional.normalize(fmap, dim=2, eps=1e-08)\n corr = torch.bmm(fmap.permute(0, 2, 1), fmap)\n return corr.view(corr.size(0), -1)", "def plot_corr_matrix(df):\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr().abs(), fignum=f.number)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16)", "def _pearson_correlation_coeff(x_data, y_data):\n reg = linregress(x_data, y_data)\n return reg.rvalue", "def corr_coeff(self) -> float:\n correlation_coefficient = np.corrcoef(self.true, self.predicted)[0, 1]\n return float(correlation_coefficient)", "def cross_correlation(values1, values2, lags=100):\n lags, corr, line, x = pl.xcorr( values1, values2, maxlags=lags, usevlines=False, marker=None)\n return lags, corr", "def correlation(C):\n\n if type(C) is not np.ndarray:\n raise TypeError('C must be a numpy.ndarray')\n if len(C.shape) < 2 or C.shape[0] is not C.shape[1]:\n raise ValueError('C must be a 2D square matrix')\n return C / np.sqrt(np.outer(np.diagonal(C), np.diagonal(C)))", "def pairwise_corr(df1, df2):\n res = []\n for i in range(df2.shape[1]):\n res.append(df1.corrwith(df2.ix[:, i]))\n res = pd.concat(res, axis=1)\n res.columns = df2.columns\n return res", "def fast_corr(df, col_name):\n\n if not isinstance(df, pd.DataFrame):\n raise TypeError(\"The type of the input data must be dataframe.\")\n\n if not isinstance(col_name, list):\n raise TypeError(\"The col_name must be list.\")\n\n if all(isinstance(item, str) for item in col_name) is False and all(\n isinstance(item, int) for item in col_name) is False:\n raise ValueError(\n \"The col_name must be a list of strings or a list of integers.\")\n\n if len(col_name) < 2:\n raise ValueError(\n \"At least two columns must be selected for correlation analysis.\")\n\n if all(isinstance(item, str) for item in col_name) is True and all(\n elem in df.columns.to_list() for elem in col_name) is False:\n raise ValueError(\"The column names were not found.\")\n\n if all(isinstance(item, int) for item in col_name) is True and max(\n col_name) > (df.shape[1] - 1):\n raise ValueError(\"The column indexes were out of range.\")\n\n if all(isinstance(item, str) for item in col_name):\n data = df.loc[:, col_name]\n else:\n data = df.iloc[:, col_name]\n\n data2 = data._get_numeric_data()\n rm_n = data.shape[1] - data2.shape[1]\n print(\"Removed\", rm_n, \"non-numberical columns from your selected columns\")\n\n sns.set(style=\"white\")\n corr = data2.corr()\n mask = np.triu(np.ones_like(corr, dtype=np.bool))\n f, ax = plt.subplots(figsize=(9, 11))\n ax.set_title('Correlation Matrix', size=20)\n ax.tick_params(axis='x', labelsize=15)\n ax.tick_params(axis='y', labelsize=15)\n\n cmap = sns.diverging_palette(220, 20, as_cmap=True)\n p = sns.heatmap(corr, mask=mask, cmap=cmap, vmin=-1, vmax=1, center=0,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5})\n p.set_yticklabels(p.get_yticklabels(), rotation=360)\n return p", "def correlation_matrix(self):\n correlation_matrix = self.model.covariance.copy()\n sigmaD = np.sqrt(np.diag(correlation_matrix))\n for ii in range(correlation_matrix.shape[0]):\n for jj in range(correlation_matrix.shape[1]):\n correlation_matrix[ii, jj] /= sigmaD[ii] * sigmaD[jj]\n return correlation_matrix", "def mp_corr(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = p1 + q1\n\n if n == 0:\n return np.nan\n elif a == n or d == n:\n # only one (diagonal) cell is non-zero\n return 0.5\n elif b == n or c == n:\n # only one (non-diagonal) cell is non-zero\n return -0.5\n\n return _div(2 * self.covar(), p1 * q1 + p2 * q2)", "def corr(self):\n pass" ]
[ "0.684586", "0.6635547", "0.6557074", "0.65286785", "0.6519738", "0.63619393", "0.63094735", "0.6283914", "0.62756026", "0.627133", "0.62701637", "0.62639886", "0.6246803", "0.6214507", "0.620774", "0.6157019", "0.6151323", "0.6137466", "0.61353064", "0.6128442", "0.61145526", "0.6088707", "0.6083887", "0.60820204", "0.6067601", "0.6057047", "0.6049308", "0.6044414", "0.60371536", "0.6007488" ]
0.76043904
0
Generate new UUIDs for all rows in a table
def assign_uuids( model: Any, session: Session, batch_size: int = DEFAULT_BATCH_SIZE ) -> None: bind = op.get_bind() table_name = model.__tablename__ count = session.query(model).count() # silently skip if the table is empty (suitable for db initialization) if count == 0: return start_time = time.time() print(f"\nAdding uuids for `{table_name}`...") # Use dialect specific native SQL queries if possible for dialect, sql in uuid_by_dialect.items(): if isinstance(bind.dialect, dialect): op.execute( f"UPDATE {dialect().identifier_preparer.quote(table_name)} SET uuid = {sql}" ) print(f"Done. Assigned {count} uuids in {time.time() - start_time:.3f}s.\n") return for obj in paginated_update( session.query(model), lambda current, total: print( f" uuid assigned to {current} out of {total}", end="\r" ), batch_size=batch_size, ): obj.uuid = uuid4 print(f"Done. Assigned {count} uuids in {time.time() - start_time:.3f}s.\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def function_uuid():\r\n yield uuid.uuid4()", "def _generate_uuid(self):\n\n return uuid.uuid4()", "def generate_uuids():\n uuid_start = str(uuid())\n while uuid_start.startswith(\"zzzzzzzz\"):\n uuid_start = str(uuid())\n uuid_end = list(deepcopy(uuid_start))\n \n char_pool = list(string.digits) + \\\n list(string.ascii_uppercase) + \\\n list(string.ascii_lowercase) \n # print(f\"char_pool: {char_pool}\")\n substitute_char = ''\n i = 0\n while i < 8:\n char_from_start_uuid = uuid_start[i]\n if char_from_start_uuid == \"z\":\n i += 1\n continue\n else:\n next_index_in_pool = char_pool.index(char_from_start_uuid) + 1\n substitute_char = char_pool[next_index_in_pool]\n break\n uuid_end[i] = substitute_char\n uuid_end = ''.join(uuid_end)\n print(f\"generated uuids: {uuid_start}, {uuid_end}\")\n return uuid_start, str(uuid_end)", "def generate_random_id(table):\n characters = [['!', '@', '#', '$', '%', '^', '&', '*'], [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]]\n characters.append(list(string.ascii_uppercase))\n characters.append(list(string.ascii_lowercase))\n generated = ''\n is_unique = False\n id_table = []\n for element in table:\n id_table.append(element)\n\n while not is_unique:\n is_unique = True\n for i in range(2):\n generated += str(characters[0][random.randint(0, len(characters[0])-1)])\n generated += str(characters[1][random.randint(0, len(characters[1])-1)])\n generated += str(characters[2][random.randint(0, len(characters[2])-1)])\n generated += str(characters[3][random.randint(0, len(characters[3])-1)])\n if generated in id_table:\n is_unique = False\n\n return generated", "def test_uuid():\n for _ in range(1000):\n uuid = uuid_generator()\n assert len(uuid) == 36\n assert uuid.count('-') == 4", "def generate_id():\n return str(uuid.uuid4())[:5].replace('e','a')", "def UUIDGen():\n\trandGen = random.Random()\n\trandGen.seed()\n\thashGen = sha.new(randStr512(randGen))\n\twhile 1:\n\t\thashGen.update(randStr512(randGen))\n\t\thashed = hashGen.digest()\n\t\tyield '%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x' % (\n\t\t\tord(hashed[0]), ord(hashed[1]), ord(hashed[2]), ord(hashed[3]),\n\t\t\tord(hashed[4]), ord(hashed[5]),\n\t\t\tord(hashed[6]) & 0x0F | 0x40, ord(hashed[7]),\n\t\t\tord(hashed[8]) & 0x3F | 0x80, ord(hashed[9]),\n\t\t\tord(hashed[10]), ord(hashed[11]),\n\t\t\tord(hashed[12]), ord(hashed[13]), ord(hashed[14]), ord(hashed[15]) )", "def __random_table_name(self):\n return 'tmp_%s_%s' % (self._xid(), ''.join(\n random.choice('abcdefghijklmnopqrstuvwxyz')\n for _ in range(8)\n ))", "def id_generator():\r\n new_id = uuid.uuid4()\r\n return new_id.hex", "def row_uuids(self) -> list:\n return self.__row_uuids", "def _make_uuid():\n parts = [Record._hex_string(k) for k in Record.UUID_PARTS]\n return \"-\".join(parts)", "def _fix_uuids(self, instance):\n for atom in instance.atoms():\n if isinstance(atom.value, uuid.UUID):\n setattr(instance, atom.name, str(atom.value))\n\n return instance", "def generate_uuid():\n return uuid.uuid4().hex", "def generate_uuid():\n return uuid.uuid4()", "def generate_uuid():\n return uuid.uuid4()", "def uuid(self, column, nullable=False, length=36):\n self._last_column = self.table.add_column(\n column, \"uuid\", nullable=nullable, length=length\n )\n return self", "def uuid():\n from dallinger.experiment import Experiment\n\n click.echo(Experiment.make_uuid())", "def get_all_customer_ids_from_table(table):\n\n # your code", "def gen_uuid():\n return str( uuid.uuid4() )", "def gen_uuid():\n return str( uuid.uuid4() )", "def _NewUUIDString ():\n if __HaveUUID:\n return uuid.uuid1().urn\n return '%s:%08.8x' % (time.strftime('%Y%m%d%H%M%S'), random.randint(0, 0xFFFFFFFF))", "def gen_uuid():\n return str(uuid.uuid4())", "def generate_table(self, rows):\n ...", "def _generate_uuid():\n return str(uuid.uuid4())", "def new_uid():\n return str(uuid.uuid1())[:30]", "def uuid(seed):\n return uuid4().get_hex()", "def _rs() -> str:\n return uuid.uuid4().hex", "def __generateUserIDs(self,_count):\n return map(lambda x:self.__getNewUserID(),range(_count))", "def uuid( *args ):\n t = long( time.time() * 1000 )\n r = long( random.random()*100000000000000000L )\n try:\n a = socket.gethostbyname( socket.gethostname() )\n except:\n # if we can't get a network address, just imagine one\n a = random.random()*100000000000000000L\n data = str(t)+' '+str(r)+' '+str(a)+' '+str(args)\n data = hashlib.md5(data).hexdigest()\n return data", "def uuid(self, value):\n self.unique_id = UUID(str(value)).hex" ]
[ "0.6252362", "0.6069835", "0.60602546", "0.6031163", "0.5899798", "0.58836985", "0.58688223", "0.58331877", "0.5799059", "0.5787476", "0.57632875", "0.5743339", "0.5730291", "0.57265776", "0.57037145", "0.5674246", "0.5665568", "0.5646195", "0.56417024", "0.56417024", "0.56398696", "0.5622136", "0.55994016", "0.5598553", "0.559701", "0.5567847", "0.5565922", "0.5550518", "0.5545763", "0.55451685" ]
0.65638924
0
If param == 0, sets turn angle to default value. Converts current position angle from radians to degrees. Converts negative angles to positive. COntinues to turn left until the current distance to the goal is greater than the previous distance, meaning that the goal has been passed.
def left(self, param): global estop_flag, move_state #If input angle is zero, set angle to default if param: angle = param else: angle = riu.default_angle signal.alarm(0) #Disable timer interrupt for the duration of the movement #safely grab current yaw with self.move_state_lock: current_yaw = (math.degrees(move_state['yaw']) + 360) % 360 #Set goal to yaw+angle. Add 360 then mod to account for negative angles but avoid going over 360 goal = (current_yaw + angle) % 360 half_goal = (current_yaw + angle/2) % 360 if self.angle_lock: if goal >= 315 and goal < 45: goal = self.zeroed_angle elif goal >= 45 and goal < 135: goal = self.zeroed_angle + 90 elif goal >= 135 and goal < 225: goal = self.zeroed_angle + 180 elif goal >= 225 and goal < 315: goal = self.zeroed_angle + 270 goal = goal % 360 half_goal = (current_yaw + angle/2) % 360 halfway_flag = False #used to flag if we've already sent out a halfway message #Anonymous function that calculates the current counterclockwise distance to the goal chkdist = lambda pos, goal: round(goal - pos + 360 * (goal < pos), 1) #Gets current distance and initially sets previous distance = distance distance = chkdist(current_yaw, goal) prev_dist = distance """Continues to move while absolute distance is not within angular_error and counterclockwise distance is not increasing. NOTE: absolute distance is the shortest distance in either direction, while counterclockwise distance is the distance using only counterclockwise movement. The angular_error condition was added because the movements tended to end within the first few cycles due to some float error. With the error condition, the movement can only end when inside at least the general area of the goal.""" while distance <= prev_dist or self.get_abs_dist(current_yaw, goal) > riu.angular_error: if estop_flag: self.publisher.publish(Mover.stop_msg) else: #Construct and publish left turn message twist_msg = Twist() twist_msg.angular.z = riu.turn_rate self.publisher.publish(twist_msg) #If distance to goal is less than half the initial distance, publish the half done message if distance <= half_goal and not halfway_flag: halfway_flag = True self.status_pub.publish(String("half")) #Update current position with self.move_state_lock: current_yaw = (math.degrees(move_state['yaw']) + 360) % 360 #Set previous distance, then update distance based on new position prev_dist = distance distance = chkdist(current_yaw, goal) rospy.sleep(.2) #After loop exit, publish stop message and send done message to cmd_queue self.publisher.publish(Mover.stop_msg) self.status_pub.publish(String("done")) signal.alarm(Mover.ready_message_interval) #Restart timer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def right(self, param):\n\t\tglobal estop_flag, move_state\n\t\t#If input angle is zero, set angle to default\n\t\tif param:\n\t\t\tangle = param\n\t\telse:\n\t\t\tangle = riu.default_angle\n\n\t\tsignal.alarm(0) #Disable timer interrupt for the duration of the movement\n\t\t#safely grab current yaw\n\t\twith self.move_state_lock:\n\t\t\tcurrent_yaw = (math.degrees(move_state['yaw']) + 360) % 360\n\t\t#Set goal to yaw+angle. Add 360 then mod to account for negative angles but avoid going over 360\n\t\tgoal = (current_yaw - angle + 360) % 360\n\t\tif self.angle_lock:\n\t\t\tif goal >= 315 and goal < 45:\n\t\t\t\tgoal = self.zeroed_angle\n\t\t\telif goal >= 45 and goal < 135:\n\t\t\t\tgoal = self.zeroed_angle + 90\n\t\t\telif goal >= 135 and goal < 225:\n\t\t\t\tgoal = self.zeroed_angle + 180\n\t\t\telif goal >= 225 and goal < 315:\n\t\t\t\tgoal = self.zeroed_angle + 270\n\t\tgoal = goal % 360\n\t\thalf_goal = (current_yaw - angle/2 + 360) % 360\n\t\thalfway_flag = False #used to flag if we've already sent out a halfway message\n\t\t#Anonymous function that calculates the current clockwise distance to the goal\n\t\tchkdist = lambda pos, goal: round(pos - goal + 360 * (goal > pos), 1)\n\t\t#Gets current distance and initially sets previous distance = distance\n\t\tdistance = chkdist(current_yaw, goal)\n\t\tprev_dist = distance\n\t\t\"\"\"Continues to move while absolute distance is not within angular_error and clockwise\n\t\tdistance is not increasing. NOTE: absolute distance is the shortest distance in either direction,\n\t\twhile clockwise distance is the distance using only clockwise movement.\n\t\tThe angular_error condition was added because the movements tended to end within the first few \n\t\tcycles due to some float error. With the error condition, the movement can only end when inside\n\t\tat least the general area of the goal.\"\"\"\n\t\twhile distance <= prev_dist or self.get_abs_dist(current_yaw, goal) > riu.angular_error:\n\t\t\tif estop_flag:\n\t\t\t\tself.publisher.publish(Mover.stop_msg)\n\t\t\telse:\n\t\t\t\t#Build and publish right turn message\n\t\t\t\ttwist_msg = Twist()\n\t\t\t\ttwist_msg.angular.z = -1 * riu.turn_rate\n\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#If distance to goal is less than half the initial distance, publish the half done message\n\t\t\t\tif distance <= half_goal and not halfway_flag:\n\t\t\t\t\thalfway_flag = True\n\t\t\t\t\tself.status_pub.publish(String(\"half\"))\n\t\t\t\t#Update current position\n\t\t\t\twith self.move_state_lock:\n\t\t\t\t\tcurrent_yaw = (math.degrees(move_state['yaw']) + 360) % 360\n\t\t\t\t#Update previous distance, then update distance based on current position\n\t\t\t\tprev_dist = distance\n\t\t\t\tdistance = chkdist(current_yaw, goal)\n\t\t\trospy.sleep(.2)\n\t\t#After loop end, send stop message and send done message to cmd_queue\t\n\t\tself.publisher.publish(Mover.stop_msg)\n\t\tself.status_pub.publish(String(\"done\"))\n\t\tsignal.alarm(Mover.ready_message_interval) #Restart timer", "def changeDir(turn, angle):\n # Converts each argument to the corrent type\n turn = str(turn)\n angle = int(angle)\n if turn == 'L': # If Left, set the negative of the angle, and divide by 90 to get 3/2/1/0\n return int(-angle / 90)\n elif turn == 'R':\n return int(angle / 90) # If Left, set the negative of the angle, and divide by 90 to get 3/2/1/0", "def turn_to_pivot(self, goal_pivot):\n\n\t\tgoal_pivot = self.check_pivot_bounds(goal_pivot)\n\n\t\tturn_angle = goal_pivot - self.current_pivot # determines direction to turn\n\t\tprint(\"Turning {} degrees..\".format(turn_angle))\n\n\t\trospy.sleep(1)\n\n\t\tif turn_angle < -self.min_pivot_tolerance:\n\t\t\tself.turn_left(goal_pivot) # start turning left\n\t\telif turn_angle > self.min_pivot_tolerance:\n\t\t\tself.turn_right(goal_pivot) # start turning right\n\t\telse:\n\t\t\tprint(\"Turn angle is zero, canceling turn request..\")\n\t\t\treturn # don't turn if angle is 0", "def translate_angle_with_imu(self, goal_angle):\n\t\t_turn_val = self.no_turn_val # initializes turn to not turn\n\n\t\tprint(\"Angle to translate: {}\".format(goal_angle))\n\n\t\tif goal_angle > 0:\n\t\t\tprint(\"Turning right..\")\n\t\t\t_turn_val = self.turn_right_val # value to turn right\n\t\telif goal_angle < 0:\n\t\t\tprint(\"Turning left..\")\n\t\t\t_turn_val = self.turn_left_val # value to turn left\n\n\t\tturn_angle = 0\n\t\tlast_angle = self.get_jackal_rot().jackal_rot # get angle from IMU (in radians)\n\n\t\t# while abs(turn_angle) < abs(goal_angle) and not self.at_flag and not rospy.is_shutdown():\n\t\twhile abs(turn_angle) < abs(radians(goal_angle)) and not self.at_flag and not rospy.is_shutdown():\n\n\t\t\t# self.cmd_vel.publish(move_cmd)\n\n\t\t\t# print(\"Current angle: {}, Current pivot: {}\".format(self.last_angle, self.current_pivot))\n\n\t\t\tself.articulator_pub.publish(_turn_val)\n\n\t\t\trospy.sleep(1.0/self.rate)\n\n\t\t\tcurr_angle = self.get_jackal_rot().jackal_rot\n\t\t\tdelta_angle = self.normalize_angle(curr_angle - last_angle)\n\t\t\tturn_angle += delta_angle\n\t\t\tlast_angle = curr_angle\n\n\t\t\tif delta_angle == 0.0:\n\t\t\t\t# print(\"Delta angle is 0, breaking out of turning loop..\")\n\t\t\t\tbreak\n\n\t\tself.articulator_pub.publish(self.no_turn_val) # stop turning once goal angle is reached.\n\n\t\t# if self.emergency_stop:\n\t\t# \tprint(\"Emergency stop from RF remote received, stopping turning routine..\")\n\n\t\treturn", "def turn(self, angle):\n self.logger.debug(\"turn \" + str(angle))", "def _go_around(self, angle, dist):\n ignore = Obstacle.IS_SONAR\n if self.avoid_targets is True:\n ignore |= Obstacle.TAG_TARGET\n elif self.avoid_home is True:\n # Need to ignore both for this because target tags are likely to\n # be in view inside the home nest.\n ignore |= Obstacle.TAG_TARGET | Obstacle.TAG_HOME\n\n cur_heading = self.swarmie.get_odom_location().get_pose().theta\n turn_result = self.swarmie.set_heading(\n cur_heading + angle,\n ignore=ignore,\n throw=False\n )\n drive_result = self.swarmie.drive(dist,\n ignore=Obstacle.SONAR_BLOCK,\n throw=False)\n\n return turn_result, drive_result", "def go_to_angle(user_theta):\n global rate\n theta_new = user_theta - theta\n if theta_new > 0:\n # Left\n while abs(user_theta - theta) > 0.05:\n speed.linear.x = 0\n speed.angular.z = 0.4\n pub.publish(speed)\n rate.sleep()\n else:\n # Take a Right\n while abs(user_theta - theta) > 0.05:\n speed.linear.x = 0\n speed.angular.z = - 0.4\n pub.publish(speed)\n rate.sleep()\n speed.linear.x = 0\n speed.angular.z = 0\n pub.publish(speed)", "def adjust(self, turnDeg): \n if abs(turnDeg) > self.MAX_TURN_PER_CYCLE:\n if (turnDeg < 0):\n self.cmdRotateTo(-self.MAX_TURN_PER_CYCLE)\n else:\n self.cmdRotateTo(self.MAX_TURN_PER_CYCLE)\n else:\n self.cmdRotateTo(turnDeg)", "def turn_degrees(self, degrees_to_turn, turn_speed_sp):\n if degrees_to_turn > 0:\n degrees_through = degrees_to_turn * 4.4375\n self.left_motor.run_to_rel_pos(speed_sp=turn_speed_sp,\n position_sp=-degrees_through,\n stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.right_motor.run_to_rel_pos(speed_sp=turn_speed_sp,\n position_sp=degrees_through,\n stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n if degrees_to_turn < 0:\n degrees_through = degrees_to_turn * 4.4375\n self.left_motor.run_to_rel_pos(speed_sp=turn_speed_sp,\n position_sp=-degrees_through,\n stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.right_motor.run_to_rel_pos(speed_sp=-turn_speed_sp,\n position_sp=degrees_through,\n stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.right_motor.wait_while(ev3.Motor.STATE_RUNNING)\n self.left_motor.wait_while(ev3.Motor.STATE_RUNNING)", "def gravity_turn(mission):\n vessel = mission.conn.space_center.active_vessel\n\n apoapsis = vessel.orbit.apoapsis_altitude\n altitude = vessel.flight().mean_altitude\n apo_time = vessel.orbit.time_to_apoapsis\n per_time = vessel.orbit.time_to_periapsis\n target_altitude = mission.parameters.get('target_altitude', 100000)\n turn_end_alt = mission.parameters.get('turn_end_alt', target_altitude * 0.6)\n turn_start_alt = mission.parameters.get('turn_start_alt', 1000)\n min_pitch = mission.parameters.get('min_pitch', 10)\n target_apt = mission.parameters.get('target_apt', 40)\n max_autostage = mission.parameters.get('max_autostage', 0)\n\n if mission.current_step[\"first_call\"]:\n mission.parameters[\"pid\"] = PID(0.2, 0.01, 0.1, 0.1, 1)\n\n if apoapsis > target_altitude:\n del mission.parameters[\"pid\"]\n vessel.control.throttle = 0\n mission.next('coast_to_space')\n return\n\n if altitude > vessel.orbit.body.atmosphere_depth:\n mission.next('burn_to_apo')\n return\n\n if vessel.flight().static_pressure < 100:\n target_apt = 60.0\n mission.parameters[\"target_apt\"] = target_apt\n\n if len(find_all_fairings(vessel)) > 0 and not vessel.available_thrust:\n drop_fairings(vessel)\n\n auto_stage(vessel, max_autostage)\n\n frac_den = turn_end_alt - turn_start_alt\n frac_num = altitude - turn_start_alt\n turn_angle = 90 * frac_num / frac_den\n target_pitch = max(min_pitch, 90 - turn_angle)\n vessel.auto_pilot.target_pitch_and_heading(target_pitch, 90)\n mission.parameters[\"target_pitch\"] = target_pitch\n\n if per_time < apo_time:\n new_thr = 1\n else:\n new_thr = mission.parameters[\"pid\"].seek(target_apt, apo_time, mission.ut())\n\n vessel.control.throttle = new_thr", "def cutDownAngle_def(state, raySortie, rayInter):\n position = state.my_goal\n diff = state.ball_pos - position\n diff.norm = max(min(raySortie, diff.norm - rayInter), 20.)\n position += diff\n return goTo(state,position)", "def my_turn_in_place(robot, angle, speed):\n\t# ####\n\t# TODO: Implement your version of a rotating in place function using the\n\t# robot.drive_wheels() function.\n\t# ####\n\tnormalizedAngle = angle % 360\n\tturnLeft = normalizedAngle <= 180\n\tinnerAngle = normalizedAngle if turnLeft else 360 - normalizedAngle\n\n\tdist = get_distance_between_wheels() * math.pi * (innerAngle/360.0)\n\ttimeToWait = dist / (speed * 1.0)\n\t\n\tturnLeftTransformation = -1 if turnLeft else 1\n\trobot.drive_wheels(turnLeftTransformation * speed, -1 * turnLeftTransformation * speed, duration=timeToWait)\n\t# time.sleep(timeToWait)\n\trobot.drive_wheels(0, 0)\n\trobot.stop_all_motors()", "def cutDownAngle_gk(state, raySortie):\n position = state.my_goal\n diff = state.ball_pos - position\n diff.norm = raySortie\n position += diff\n return goTo(state,position)", "def adjust_starting_position(self, direction):\n\n direction = 1 if direction in [\"left\", 1] else -1\n\n self.angle = direction * 25\n self.distance = 12\n self.drive_thread.reset()\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)\n\n self.angle = 0\n self.distance = 12\n self.drive_thread.reset()\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)\n\n self.angle = direction * -25\n self.distance = 12\n self.drive_thread.reset()\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)", "def turn_round(self, agent, param):\n return agent.turn(param, self.bet_hist, self.pot)", "def turned(self,angle: \"radians to turn\") -> Position:\n return Position(self.x, self.y, self.facing + angle)", "def my_go_to_pose1(robot, x, y, angle_z):\n # Assuming positive y is to the robot's left and positive x is the direction the robot is already facing\n hypotenuse = numpy.sqrt(x*x + y*y)\n angle_offset_of_target_point = numpy.arcsin(y/hypotenuse)*180/math.pi\n my_turn_in_place(robot, angle_offset_of_target_point , 30)\n my_drive_straight(robot, hypotenuse, 50)\n my_turn_in_place(robot, angle_z-angle_offset_of_target_point, 30)\n time.sleep(1)", "def u_turn(self, direction, diameter_in):\n \n# pdb.set_trace()\n # Calculate radius of turn for the inside wheel.\n r_in = diameter_in / 2\n\n # Outside radius is 20 inches from inside radius.\n r_out = r_in + MuleBot.WHEEL_BASE_LENGTH\n \n # Outside travel distance\n travel = r_out * 3.14159\n travel_revolutions = travel / MuleBot.CIRCUM_IN\n \n r_ratio = r_out / r_in\n #r_ratio_half = r_ratio / 2\n\n speed_multiplier = MuleBot.MAX_RPM / r_ratio\n\n outside_rpm = r_ratio * speed_multiplier\n inside_rpm = speed_multiplier\n \n \n # \n # minutes at outside_rpm\n minutes = travel_revolutions / outside_rpm\n seconds = minutes * MuleBot.SECONDS_PER_MINUTE\n \n # Something isn't quite perfect.\n if direction == 'left':\n if diameter_in < 25:\n seconds -= 1\n else:\n seconds -= 2\n else:\n if diameter_in < 25:\n seconds += 1\n else:\n seconds += 2\n\n if direction == 'left':\n v_l = self.rpm_to_rps(inside_rpm)\n v_r = self.rpm_to_rps(outside_rpm)\n else:\n v_r = self.rpm_to_rps(inside_rpm)\n v_l = self.rpm_to_rps(outside_rpm)\n\n #print(\"2inside: rpm: \", inside_rpm)\n #print(\"2outside: rpm: \", outside_rpm)\n \n #print(\"2.1: v_l: \", v_l)\n #print(\"2.1: v_r: \", v_r)\n\n # Set wheel drive rates.\n self.set_wheel_drive_rates(v_l, v_r)\n\n # Sleep during the turn.\n time.sleep(seconds)\n\n # Stop\n self.stop()\n \n # Move forward 24 inches.\n self.forward(24)", "def TurnRobot(r, i, hdg_tgt, precision):\n # Continue refining the angle until we're under the req. precision\n while abs(CalculateCompassDifference(i.GetHeading(), hdg_tgt)) > precision:\n delta = CalculateCompassDifference(hdg_tgt, i.GetHeading())\n\n if delta > 0:\n # Clockwise turn\n r.driveDirect(1, -1)\n\n # Wait until the turn has finished.\n while abs(CalculateCompassDifference(hdg_tgt, i.GetHeading())) > precision:\n time.sleep(0.001)\n elif delta < 0:\n # Counter-clockwise turn\n r.driveDirect(-1, 1)\n\n # Wait until the turn has finished.\n while abs(CalculateCompassDifference(hdg_tgt, i.GetHeading())) > precision:\n time.sleep(0.001)\n \n # Stop and regauge\n r.stop()\n time.sleep(0.25)\n \n return CalculateCompassDifference(hdg_tgt, i.GetHeading())", "def left(self, angle: Degrees):\n prev = self.angle\n self.angle = self.angle - angle\n if self.angle < 0:\n self.angle += 360.0", "def navToPose(goal):\n #compute angle required to make straight-line move to desired pose\n global xPosition\n global yPosition\n global theta\n #capture desired x and y positions\n desiredY = goal.pose.position.y\n desiredX = goal.pose.position.x\n #capture desired angle\n quat = goal.pose.orientation\n q = [quat.x, quat.y, quat.z, quat.w]\n roll, pitch, yaw = euler_from_quaternion(q)\n desiredT = yaw * (180.0/math.pi)\n #compute distance to target\n distance = math.sqrt(math.pow((desiredX - xPosition), 2) + math.pow((desiredY - yPosition), 2))\n adjustedX = goal.pose.position.x - xPosition\n adjustedY = goal.pose.position.y - yPosition\n print goal.pose.position.x, goal.pose.position.y\n print xPosition, yPosition\n print adjustedX, adjustedY\n #compute initial turn amount\n initialTurn = (math.atan2(adjustedY, adjustedX) * (180 / math.pi)) - theta\n\n print \"moving from (\" + str(xPosition) + \", \" + str(yPosition) + \") @ \" + str(theta) + \" degrees\"\n print \"moving to (\" + str(desiredX) + \", \" + str(desiredY) + \") @ \" + str(desiredT) + \" degrees\"\n print \"distance: \" + str(distance) + \", initial turn: \" + str(initialTurn)\n rotateDegrees(initialTurn)\n driveSmooth(0.25, distance)\n rospy.sleep(2)\n finalTurn = desiredT - theta\n rotateDegrees(finalTurn)", "def cutDownAngle(state, raySortie, rayInter):\n position = state.my_goal\n diff = state.ball_pos - position\n diff.norm = max(raySortie, diff.norm - rayInter)\n position += diff\n return goTo(state,position)", "def turn(self, angular_distance, speed=0.5):\n while (self._last_odom_msg == None):\n rospy.sleep(1.0)\n start = copy.deepcopy(self._last_odom_msg.pose.pose.orientation)\n curr_yaw = self.quaternion_to_yaw(start)\n rate = rospy.Rate(10)\n direction = -1 if (angular_distance < 0) else 1\n angular_distance = angular_distance % (2 * math.pi)\n goal_angle = curr_yaw + angular_distance\n goalPos = self.rad_to_coor(goal_angle)\n # TODO: CONDITION should check if the robot has rotated the desired amount\n # TODO: Be sure to handle the case where the desired amount is negative!\n curPos = self.rad_to_coor(curr_yaw) #self.quaternion_to_yaw(self._last_odom_msg.pose.pose.orientation)\n while not self.reached_goal_state(curPos, goalPos):#distance_to_goal(curr_yaw, goal_yaw, direction) > 0:\n # TODO: you will probably need to do some math in this loop to check the CONDITION\n self.move(0, direction * speed)\n curr_yaw = self.quaternion_to_yaw(self._last_odom_msg.pose.pose.orientation)\n curPos = self.rad_to_coor(curr_yaw)\n rate.sleep()", "def do_left_turn(robot_name):\n global current_direction_index\n\n current_direction_index -= 1\n if current_direction_index < 0:\n current_direction_index = 3\n\n return True, ' > '+robot_name+' turned left.'", "def rotate_waypoint(self, direction: str, argument: int):\n if direction == \"R\":\n angle = radians(argument)\n else:\n angle = -1 * radians(argument)\n y = self.waypoint_vector[0]\n x = self.waypoint_vector[1]\n self.waypoint_vector[0] = int(round(x * sin(angle) + y * cos(angle)))\n self.waypoint_vector[1] = int(round(x * cos(angle) - y * sin(angle)))", "def turn(self, tank_angle, target_angle):\n angle_diff = periodic_difference_of_angles(tank_angle, target_angle)\n if ((angle_diff + 2 * math.pi) % 2\n * math.pi >= math.pi and abs(angle_diff) > MIN_ANGLE_DIF):\n self.tank.stop_moving()\n self.tank.turn_left()\n elif ((angle_diff + 2 * math.pi) % 2 * math.pi\n < math.pi and abs(angle_diff) > MIN_ANGLE_DIF):\n self.tank.stop_moving()\n self.tank.turn_right()", "def abs_angle(self, angle):\n steps = int(angle / 360 * self.steps_per_rev)\n steps -= self.current_position % self.steps_per_rev\n self.steps(steps)", "def turn(robot, alpha=0.524): # 0.524 rad = 30 degrees\n\n journey = Journey(robot, angle=alpha)\n journey.start()\n robot.position.turn(alpha)\n sleep(0.5)", "def randomWalk(t, turns, distance=20):\n for x in range(turns):\n if x % 2 == 0:\n t.left(random.randint(-180, 180))\n else:\n t.right(random.randint(-180, 180))\n t.forward(random.randint(1,distance))\n sleep(10)", "def judge_goal(self):\n err_pos = math.sqrt((self.y_des - self.y)**2 +(self.x_des - self.x)**2)\n print(\"t= %s\" % rospy.get_time()+\"-----------\")\n print('destination position=['+str(self.x_des)+','+str(self.y_des)+\"]\")\n print('the current position=['+str(self.x)+','+str(self.y)+\"]\")\n print('the current yaw angle=['+str(self.yaw))\n print('distance to destination='+str(err_pos))\n\n if(err_pos < 0.8):\n print('reach goal!!!!!')\n self.goal_flag=1" ]
[ "0.7021173", "0.70137686", "0.67029786", "0.65796685", "0.63092864", "0.61473304", "0.6123353", "0.61182714", "0.61117864", "0.60715616", "0.60414594", "0.60361344", "0.59939015", "0.5975596", "0.59737754", "0.5938437", "0.59375", "0.59341085", "0.59288204", "0.5903714", "0.58730096", "0.5866315", "0.58418113", "0.58407307", "0.58113104", "0.5796257", "0.5794379", "0.5788609", "0.57843125", "0.57717776" ]
0.7418729
0
If param == 0, sets turn angle to default value. Converts current position angle from radians to degrees. Converts negative angles to positive. COntinues to turn left until the current distance to the goal is greater than the previous distance, meaning that the goal has been passed.
def right(self, param): global estop_flag, move_state #If input angle is zero, set angle to default if param: angle = param else: angle = riu.default_angle signal.alarm(0) #Disable timer interrupt for the duration of the movement #safely grab current yaw with self.move_state_lock: current_yaw = (math.degrees(move_state['yaw']) + 360) % 360 #Set goal to yaw+angle. Add 360 then mod to account for negative angles but avoid going over 360 goal = (current_yaw - angle + 360) % 360 if self.angle_lock: if goal >= 315 and goal < 45: goal = self.zeroed_angle elif goal >= 45 and goal < 135: goal = self.zeroed_angle + 90 elif goal >= 135 and goal < 225: goal = self.zeroed_angle + 180 elif goal >= 225 and goal < 315: goal = self.zeroed_angle + 270 goal = goal % 360 half_goal = (current_yaw - angle/2 + 360) % 360 halfway_flag = False #used to flag if we've already sent out a halfway message #Anonymous function that calculates the current clockwise distance to the goal chkdist = lambda pos, goal: round(pos - goal + 360 * (goal > pos), 1) #Gets current distance and initially sets previous distance = distance distance = chkdist(current_yaw, goal) prev_dist = distance """Continues to move while absolute distance is not within angular_error and clockwise distance is not increasing. NOTE: absolute distance is the shortest distance in either direction, while clockwise distance is the distance using only clockwise movement. The angular_error condition was added because the movements tended to end within the first few cycles due to some float error. With the error condition, the movement can only end when inside at least the general area of the goal.""" while distance <= prev_dist or self.get_abs_dist(current_yaw, goal) > riu.angular_error: if estop_flag: self.publisher.publish(Mover.stop_msg) else: #Build and publish right turn message twist_msg = Twist() twist_msg.angular.z = -1 * riu.turn_rate self.publisher.publish(twist_msg) #If distance to goal is less than half the initial distance, publish the half done message if distance <= half_goal and not halfway_flag: halfway_flag = True self.status_pub.publish(String("half")) #Update current position with self.move_state_lock: current_yaw = (math.degrees(move_state['yaw']) + 360) % 360 #Update previous distance, then update distance based on current position prev_dist = distance distance = chkdist(current_yaw, goal) rospy.sleep(.2) #After loop end, send stop message and send done message to cmd_queue self.publisher.publish(Mover.stop_msg) self.status_pub.publish(String("done")) signal.alarm(Mover.ready_message_interval) #Restart timer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def left(self, param):\n\t\tglobal estop_flag, move_state\n\t\t#If input angle is zero, set angle to default\n\t\tif param:\n\t\t\tangle = param\n\t\telse:\n\t\t\tangle = riu.default_angle\n\n\t\tsignal.alarm(0) #Disable timer interrupt for the duration of the movement\n\t\t#safely grab current yaw\n\t\twith self.move_state_lock:\n\t\t\tcurrent_yaw = (math.degrees(move_state['yaw']) + 360) % 360\n\t\t#Set goal to yaw+angle. Add 360 then mod to account for negative angles but avoid going over 360\n\t\tgoal = (current_yaw + angle) % 360\n\t\thalf_goal = (current_yaw + angle/2) % 360\n\t\tif self.angle_lock:\n\t\t\tif goal >= 315 and goal < 45:\n\t\t\t\tgoal = self.zeroed_angle\n\t\t\telif goal >= 45 and goal < 135:\n\t\t\t\tgoal = self.zeroed_angle + 90\n\t\t\telif goal >= 135 and goal < 225:\n\t\t\t\tgoal = self.zeroed_angle + 180\n\t\t\telif goal >= 225 and goal < 315:\n\t\t\t\tgoal = self.zeroed_angle + 270\n\t\tgoal = goal % 360\n\t\thalf_goal = (current_yaw + angle/2) % 360\n\t\thalfway_flag = False #used to flag if we've already sent out a halfway message\n\t\t#Anonymous function that calculates the current counterclockwise distance to the goal\n\t\tchkdist = lambda pos, goal: round(goal - pos + 360 * (goal < pos), 1)\n\t\t#Gets current distance and initially sets previous distance = distance\n\t\tdistance = chkdist(current_yaw, goal)\n\t\tprev_dist = distance\n\t\t\"\"\"Continues to move while absolute distance is not within angular_error and counterclockwise\n\t\tdistance is not increasing. NOTE: absolute distance is the shortest distance in either direction,\n\t\twhile counterclockwise distance is the distance using only counterclockwise movement.\n\t\tThe angular_error condition was added because the movements tended to end within the first few \n\t\tcycles due to some float error. With the error condition, the movement can only end when inside\n\t\tat least the general area of the goal.\"\"\"\n\t\twhile distance <= prev_dist or self.get_abs_dist(current_yaw, goal) > riu.angular_error:\n\t\t\tif estop_flag:\n\t\t\t\tself.publisher.publish(Mover.stop_msg)\n\t\t\telse:\n\t\t\t\t#Construct and publish left turn message\n\t\t\t\ttwist_msg = Twist()\n\t\t\t\ttwist_msg.angular.z = riu.turn_rate\n\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#If distance to goal is less than half the initial distance, publish the half done message\n\t\t\t\tif distance <= half_goal and not halfway_flag:\n\t\t\t\t\thalfway_flag = True\n\t\t\t\t\tself.status_pub.publish(String(\"half\"))\n\t\t\t\t#Update current position\n\t\t\t\twith self.move_state_lock:\n\t\t\t\t\tcurrent_yaw = (math.degrees(move_state['yaw']) + 360) % 360\n\t\t\t\t#Set previous distance, then update distance based on new position\n\t\t\t\tprev_dist = distance\n\t\t\t\tdistance = chkdist(current_yaw, goal)\n\t\t\trospy.sleep(.2)\n\t\t#After loop exit, publish stop message and send done message to cmd_queue\n\t\tself.publisher.publish(Mover.stop_msg)\n\t\tself.status_pub.publish(String(\"done\"))\n\t\tsignal.alarm(Mover.ready_message_interval) #Restart timer", "def changeDir(turn, angle):\n # Converts each argument to the corrent type\n turn = str(turn)\n angle = int(angle)\n if turn == 'L': # If Left, set the negative of the angle, and divide by 90 to get 3/2/1/0\n return int(-angle / 90)\n elif turn == 'R':\n return int(angle / 90) # If Left, set the negative of the angle, and divide by 90 to get 3/2/1/0", "def turn_to_pivot(self, goal_pivot):\n\n\t\tgoal_pivot = self.check_pivot_bounds(goal_pivot)\n\n\t\tturn_angle = goal_pivot - self.current_pivot # determines direction to turn\n\t\tprint(\"Turning {} degrees..\".format(turn_angle))\n\n\t\trospy.sleep(1)\n\n\t\tif turn_angle < -self.min_pivot_tolerance:\n\t\t\tself.turn_left(goal_pivot) # start turning left\n\t\telif turn_angle > self.min_pivot_tolerance:\n\t\t\tself.turn_right(goal_pivot) # start turning right\n\t\telse:\n\t\t\tprint(\"Turn angle is zero, canceling turn request..\")\n\t\t\treturn # don't turn if angle is 0", "def translate_angle_with_imu(self, goal_angle):\n\t\t_turn_val = self.no_turn_val # initializes turn to not turn\n\n\t\tprint(\"Angle to translate: {}\".format(goal_angle))\n\n\t\tif goal_angle > 0:\n\t\t\tprint(\"Turning right..\")\n\t\t\t_turn_val = self.turn_right_val # value to turn right\n\t\telif goal_angle < 0:\n\t\t\tprint(\"Turning left..\")\n\t\t\t_turn_val = self.turn_left_val # value to turn left\n\n\t\tturn_angle = 0\n\t\tlast_angle = self.get_jackal_rot().jackal_rot # get angle from IMU (in radians)\n\n\t\t# while abs(turn_angle) < abs(goal_angle) and not self.at_flag and not rospy.is_shutdown():\n\t\twhile abs(turn_angle) < abs(radians(goal_angle)) and not self.at_flag and not rospy.is_shutdown():\n\n\t\t\t# self.cmd_vel.publish(move_cmd)\n\n\t\t\t# print(\"Current angle: {}, Current pivot: {}\".format(self.last_angle, self.current_pivot))\n\n\t\t\tself.articulator_pub.publish(_turn_val)\n\n\t\t\trospy.sleep(1.0/self.rate)\n\n\t\t\tcurr_angle = self.get_jackal_rot().jackal_rot\n\t\t\tdelta_angle = self.normalize_angle(curr_angle - last_angle)\n\t\t\tturn_angle += delta_angle\n\t\t\tlast_angle = curr_angle\n\n\t\t\tif delta_angle == 0.0:\n\t\t\t\t# print(\"Delta angle is 0, breaking out of turning loop..\")\n\t\t\t\tbreak\n\n\t\tself.articulator_pub.publish(self.no_turn_val) # stop turning once goal angle is reached.\n\n\t\t# if self.emergency_stop:\n\t\t# \tprint(\"Emergency stop from RF remote received, stopping turning routine..\")\n\n\t\treturn", "def turn(self, angle):\n self.logger.debug(\"turn \" + str(angle))", "def _go_around(self, angle, dist):\n ignore = Obstacle.IS_SONAR\n if self.avoid_targets is True:\n ignore |= Obstacle.TAG_TARGET\n elif self.avoid_home is True:\n # Need to ignore both for this because target tags are likely to\n # be in view inside the home nest.\n ignore |= Obstacle.TAG_TARGET | Obstacle.TAG_HOME\n\n cur_heading = self.swarmie.get_odom_location().get_pose().theta\n turn_result = self.swarmie.set_heading(\n cur_heading + angle,\n ignore=ignore,\n throw=False\n )\n drive_result = self.swarmie.drive(dist,\n ignore=Obstacle.SONAR_BLOCK,\n throw=False)\n\n return turn_result, drive_result", "def go_to_angle(user_theta):\n global rate\n theta_new = user_theta - theta\n if theta_new > 0:\n # Left\n while abs(user_theta - theta) > 0.05:\n speed.linear.x = 0\n speed.angular.z = 0.4\n pub.publish(speed)\n rate.sleep()\n else:\n # Take a Right\n while abs(user_theta - theta) > 0.05:\n speed.linear.x = 0\n speed.angular.z = - 0.4\n pub.publish(speed)\n rate.sleep()\n speed.linear.x = 0\n speed.angular.z = 0\n pub.publish(speed)", "def adjust(self, turnDeg): \n if abs(turnDeg) > self.MAX_TURN_PER_CYCLE:\n if (turnDeg < 0):\n self.cmdRotateTo(-self.MAX_TURN_PER_CYCLE)\n else:\n self.cmdRotateTo(self.MAX_TURN_PER_CYCLE)\n else:\n self.cmdRotateTo(turnDeg)", "def turn_degrees(self, degrees_to_turn, turn_speed_sp):\n if degrees_to_turn > 0:\n degrees_through = degrees_to_turn * 4.4375\n self.left_motor.run_to_rel_pos(speed_sp=turn_speed_sp,\n position_sp=-degrees_through,\n stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.right_motor.run_to_rel_pos(speed_sp=turn_speed_sp,\n position_sp=degrees_through,\n stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n if degrees_to_turn < 0:\n degrees_through = degrees_to_turn * 4.4375\n self.left_motor.run_to_rel_pos(speed_sp=turn_speed_sp,\n position_sp=-degrees_through,\n stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.right_motor.run_to_rel_pos(speed_sp=-turn_speed_sp,\n position_sp=degrees_through,\n stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.right_motor.wait_while(ev3.Motor.STATE_RUNNING)\n self.left_motor.wait_while(ev3.Motor.STATE_RUNNING)", "def gravity_turn(mission):\n vessel = mission.conn.space_center.active_vessel\n\n apoapsis = vessel.orbit.apoapsis_altitude\n altitude = vessel.flight().mean_altitude\n apo_time = vessel.orbit.time_to_apoapsis\n per_time = vessel.orbit.time_to_periapsis\n target_altitude = mission.parameters.get('target_altitude', 100000)\n turn_end_alt = mission.parameters.get('turn_end_alt', target_altitude * 0.6)\n turn_start_alt = mission.parameters.get('turn_start_alt', 1000)\n min_pitch = mission.parameters.get('min_pitch', 10)\n target_apt = mission.parameters.get('target_apt', 40)\n max_autostage = mission.parameters.get('max_autostage', 0)\n\n if mission.current_step[\"first_call\"]:\n mission.parameters[\"pid\"] = PID(0.2, 0.01, 0.1, 0.1, 1)\n\n if apoapsis > target_altitude:\n del mission.parameters[\"pid\"]\n vessel.control.throttle = 0\n mission.next('coast_to_space')\n return\n\n if altitude > vessel.orbit.body.atmosphere_depth:\n mission.next('burn_to_apo')\n return\n\n if vessel.flight().static_pressure < 100:\n target_apt = 60.0\n mission.parameters[\"target_apt\"] = target_apt\n\n if len(find_all_fairings(vessel)) > 0 and not vessel.available_thrust:\n drop_fairings(vessel)\n\n auto_stage(vessel, max_autostage)\n\n frac_den = turn_end_alt - turn_start_alt\n frac_num = altitude - turn_start_alt\n turn_angle = 90 * frac_num / frac_den\n target_pitch = max(min_pitch, 90 - turn_angle)\n vessel.auto_pilot.target_pitch_and_heading(target_pitch, 90)\n mission.parameters[\"target_pitch\"] = target_pitch\n\n if per_time < apo_time:\n new_thr = 1\n else:\n new_thr = mission.parameters[\"pid\"].seek(target_apt, apo_time, mission.ut())\n\n vessel.control.throttle = new_thr", "def cutDownAngle_def(state, raySortie, rayInter):\n position = state.my_goal\n diff = state.ball_pos - position\n diff.norm = max(min(raySortie, diff.norm - rayInter), 20.)\n position += diff\n return goTo(state,position)", "def my_turn_in_place(robot, angle, speed):\n\t# ####\n\t# TODO: Implement your version of a rotating in place function using the\n\t# robot.drive_wheels() function.\n\t# ####\n\tnormalizedAngle = angle % 360\n\tturnLeft = normalizedAngle <= 180\n\tinnerAngle = normalizedAngle if turnLeft else 360 - normalizedAngle\n\n\tdist = get_distance_between_wheels() * math.pi * (innerAngle/360.0)\n\ttimeToWait = dist / (speed * 1.0)\n\t\n\tturnLeftTransformation = -1 if turnLeft else 1\n\trobot.drive_wheels(turnLeftTransformation * speed, -1 * turnLeftTransformation * speed, duration=timeToWait)\n\t# time.sleep(timeToWait)\n\trobot.drive_wheels(0, 0)\n\trobot.stop_all_motors()", "def cutDownAngle_gk(state, raySortie):\n position = state.my_goal\n diff = state.ball_pos - position\n diff.norm = raySortie\n position += diff\n return goTo(state,position)", "def adjust_starting_position(self, direction):\n\n direction = 1 if direction in [\"left\", 1] else -1\n\n self.angle = direction * 25\n self.distance = 12\n self.drive_thread.reset()\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)\n\n self.angle = 0\n self.distance = 12\n self.drive_thread.reset()\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)\n\n self.angle = direction * -25\n self.distance = 12\n self.drive_thread.reset()\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)", "def turn_round(self, agent, param):\n return agent.turn(param, self.bet_hist, self.pot)", "def turned(self,angle: \"radians to turn\") -> Position:\n return Position(self.x, self.y, self.facing + angle)", "def my_go_to_pose1(robot, x, y, angle_z):\n # Assuming positive y is to the robot's left and positive x is the direction the robot is already facing\n hypotenuse = numpy.sqrt(x*x + y*y)\n angle_offset_of_target_point = numpy.arcsin(y/hypotenuse)*180/math.pi\n my_turn_in_place(robot, angle_offset_of_target_point , 30)\n my_drive_straight(robot, hypotenuse, 50)\n my_turn_in_place(robot, angle_z-angle_offset_of_target_point, 30)\n time.sleep(1)", "def u_turn(self, direction, diameter_in):\n \n# pdb.set_trace()\n # Calculate radius of turn for the inside wheel.\n r_in = diameter_in / 2\n\n # Outside radius is 20 inches from inside radius.\n r_out = r_in + MuleBot.WHEEL_BASE_LENGTH\n \n # Outside travel distance\n travel = r_out * 3.14159\n travel_revolutions = travel / MuleBot.CIRCUM_IN\n \n r_ratio = r_out / r_in\n #r_ratio_half = r_ratio / 2\n\n speed_multiplier = MuleBot.MAX_RPM / r_ratio\n\n outside_rpm = r_ratio * speed_multiplier\n inside_rpm = speed_multiplier\n \n \n # \n # minutes at outside_rpm\n minutes = travel_revolutions / outside_rpm\n seconds = minutes * MuleBot.SECONDS_PER_MINUTE\n \n # Something isn't quite perfect.\n if direction == 'left':\n if diameter_in < 25:\n seconds -= 1\n else:\n seconds -= 2\n else:\n if diameter_in < 25:\n seconds += 1\n else:\n seconds += 2\n\n if direction == 'left':\n v_l = self.rpm_to_rps(inside_rpm)\n v_r = self.rpm_to_rps(outside_rpm)\n else:\n v_r = self.rpm_to_rps(inside_rpm)\n v_l = self.rpm_to_rps(outside_rpm)\n\n #print(\"2inside: rpm: \", inside_rpm)\n #print(\"2outside: rpm: \", outside_rpm)\n \n #print(\"2.1: v_l: \", v_l)\n #print(\"2.1: v_r: \", v_r)\n\n # Set wheel drive rates.\n self.set_wheel_drive_rates(v_l, v_r)\n\n # Sleep during the turn.\n time.sleep(seconds)\n\n # Stop\n self.stop()\n \n # Move forward 24 inches.\n self.forward(24)", "def TurnRobot(r, i, hdg_tgt, precision):\n # Continue refining the angle until we're under the req. precision\n while abs(CalculateCompassDifference(i.GetHeading(), hdg_tgt)) > precision:\n delta = CalculateCompassDifference(hdg_tgt, i.GetHeading())\n\n if delta > 0:\n # Clockwise turn\n r.driveDirect(1, -1)\n\n # Wait until the turn has finished.\n while abs(CalculateCompassDifference(hdg_tgt, i.GetHeading())) > precision:\n time.sleep(0.001)\n elif delta < 0:\n # Counter-clockwise turn\n r.driveDirect(-1, 1)\n\n # Wait until the turn has finished.\n while abs(CalculateCompassDifference(hdg_tgt, i.GetHeading())) > precision:\n time.sleep(0.001)\n \n # Stop and regauge\n r.stop()\n time.sleep(0.25)\n \n return CalculateCompassDifference(hdg_tgt, i.GetHeading())", "def left(self, angle: Degrees):\n prev = self.angle\n self.angle = self.angle - angle\n if self.angle < 0:\n self.angle += 360.0", "def navToPose(goal):\n #compute angle required to make straight-line move to desired pose\n global xPosition\n global yPosition\n global theta\n #capture desired x and y positions\n desiredY = goal.pose.position.y\n desiredX = goal.pose.position.x\n #capture desired angle\n quat = goal.pose.orientation\n q = [quat.x, quat.y, quat.z, quat.w]\n roll, pitch, yaw = euler_from_quaternion(q)\n desiredT = yaw * (180.0/math.pi)\n #compute distance to target\n distance = math.sqrt(math.pow((desiredX - xPosition), 2) + math.pow((desiredY - yPosition), 2))\n adjustedX = goal.pose.position.x - xPosition\n adjustedY = goal.pose.position.y - yPosition\n print goal.pose.position.x, goal.pose.position.y\n print xPosition, yPosition\n print adjustedX, adjustedY\n #compute initial turn amount\n initialTurn = (math.atan2(adjustedY, adjustedX) * (180 / math.pi)) - theta\n\n print \"moving from (\" + str(xPosition) + \", \" + str(yPosition) + \") @ \" + str(theta) + \" degrees\"\n print \"moving to (\" + str(desiredX) + \", \" + str(desiredY) + \") @ \" + str(desiredT) + \" degrees\"\n print \"distance: \" + str(distance) + \", initial turn: \" + str(initialTurn)\n rotateDegrees(initialTurn)\n driveSmooth(0.25, distance)\n rospy.sleep(2)\n finalTurn = desiredT - theta\n rotateDegrees(finalTurn)", "def cutDownAngle(state, raySortie, rayInter):\n position = state.my_goal\n diff = state.ball_pos - position\n diff.norm = max(raySortie, diff.norm - rayInter)\n position += diff\n return goTo(state,position)", "def do_left_turn(robot_name):\n global current_direction_index\n\n current_direction_index -= 1\n if current_direction_index < 0:\n current_direction_index = 3\n\n return True, ' > '+robot_name+' turned left.'", "def turn(self, angular_distance, speed=0.5):\n while (self._last_odom_msg == None):\n rospy.sleep(1.0)\n start = copy.deepcopy(self._last_odom_msg.pose.pose.orientation)\n curr_yaw = self.quaternion_to_yaw(start)\n rate = rospy.Rate(10)\n direction = -1 if (angular_distance < 0) else 1\n angular_distance = angular_distance % (2 * math.pi)\n goal_angle = curr_yaw + angular_distance\n goalPos = self.rad_to_coor(goal_angle)\n # TODO: CONDITION should check if the robot has rotated the desired amount\n # TODO: Be sure to handle the case where the desired amount is negative!\n curPos = self.rad_to_coor(curr_yaw) #self.quaternion_to_yaw(self._last_odom_msg.pose.pose.orientation)\n while not self.reached_goal_state(curPos, goalPos):#distance_to_goal(curr_yaw, goal_yaw, direction) > 0:\n # TODO: you will probably need to do some math in this loop to check the CONDITION\n self.move(0, direction * speed)\n curr_yaw = self.quaternion_to_yaw(self._last_odom_msg.pose.pose.orientation)\n curPos = self.rad_to_coor(curr_yaw)\n rate.sleep()", "def rotate_waypoint(self, direction: str, argument: int):\n if direction == \"R\":\n angle = radians(argument)\n else:\n angle = -1 * radians(argument)\n y = self.waypoint_vector[0]\n x = self.waypoint_vector[1]\n self.waypoint_vector[0] = int(round(x * sin(angle) + y * cos(angle)))\n self.waypoint_vector[1] = int(round(x * cos(angle) - y * sin(angle)))", "def turn(self, tank_angle, target_angle):\n angle_diff = periodic_difference_of_angles(tank_angle, target_angle)\n if ((angle_diff + 2 * math.pi) % 2\n * math.pi >= math.pi and abs(angle_diff) > MIN_ANGLE_DIF):\n self.tank.stop_moving()\n self.tank.turn_left()\n elif ((angle_diff + 2 * math.pi) % 2 * math.pi\n < math.pi and abs(angle_diff) > MIN_ANGLE_DIF):\n self.tank.stop_moving()\n self.tank.turn_right()", "def abs_angle(self, angle):\n steps = int(angle / 360 * self.steps_per_rev)\n steps -= self.current_position % self.steps_per_rev\n self.steps(steps)", "def turn(robot, alpha=0.524): # 0.524 rad = 30 degrees\n\n journey = Journey(robot, angle=alpha)\n journey.start()\n robot.position.turn(alpha)\n sleep(0.5)", "def randomWalk(t, turns, distance=20):\n for x in range(turns):\n if x % 2 == 0:\n t.left(random.randint(-180, 180))\n else:\n t.right(random.randint(-180, 180))\n t.forward(random.randint(1,distance))\n sleep(10)", "def judge_goal(self):\n err_pos = math.sqrt((self.y_des - self.y)**2 +(self.x_des - self.x)**2)\n print(\"t= %s\" % rospy.get_time()+\"-----------\")\n print('destination position=['+str(self.x_des)+','+str(self.y_des)+\"]\")\n print('the current position=['+str(self.x)+','+str(self.y)+\"]\")\n print('the current yaw angle=['+str(self.yaw))\n print('distance to destination='+str(err_pos))\n\n if(err_pos < 0.8):\n print('reach goal!!!!!')\n self.goal_flag=1" ]
[ "0.7420052", "0.70118314", "0.67038393", "0.6579593", "0.6309404", "0.6147523", "0.6123622", "0.61167604", "0.6110599", "0.6072676", "0.6042288", "0.60363406", "0.59952015", "0.59768206", "0.5973986", "0.59382665", "0.59378344", "0.59349597", "0.5929142", "0.59034985", "0.5872981", "0.5867371", "0.5842684", "0.58425194", "0.5810758", "0.5797589", "0.57944334", "0.57896066", "0.5786857", "0.5773941" ]
0.7022378
1
Calls linear_move. If no parameter, defaults to default_dist
def forward(self, param): if param: self.linear_move(param * .3048) else: self.linear_move(riu.default_dist * .3048)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def linear_move(self, initial_position, final_position):\n if any(initial_position - final_position):\n # The desired position is not the actual position (would make a 'divide by zero' error otherwise)\n\n # Compute directional vector\n dir_vector = final_position - initial_position\n\n # Divide directional vector as a series of vector of norm 10um\n step_vector = 15 * dir_vector/np.linalg.norm(dir_vector)\n\n # Number of sub-directional vector to make\n nb_step = np.linalg.norm(dir_vector) / 15.\n\n # Moving the arm\n for step in range(1, int(nb_step)+1):\n intermediate_position = step * self.inv_mat * step_vector\n self.arm.absolute_move_group(self.inv_mat*initial_position + intermediate_position, [0, 1, 2])\n time.sleep(0.1)\n\n # make final move to desired position\n self.arm.absolute_move_group(self.inv_mat*final_position, [0, 1, 2])\n pass", "def linear_move(self, dist):\n\t\tglobal estop_flag, move_state\n\t\tsignal.alarm(0) #Disable timer interrupt for the duration of the movement\n\t\thalfway_flag = False\n\t\t\n\t\twith self.move_state_lock:\n\t\t\tstart_x, start_y, start_z = move_state['x'], move_state['y'], move_state['z']\n\t\tcurrent_x = start_x\n\t\tcurrent_y = start_y\n\t\tcurrent_z = start_z\n\t\t#While the distance travelled is less than target distance\n\t\twhile math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) < abs(dist):\n\t\t\t#Check if the emergency stop flag is set, if so, break the current loop and reset velocity\t\n\t\t\tif estop_flag:\n\t\t\t\tself.publisher.publish(Mover.stop_msg)\n\t\t\telse:\n\t\t\t\t#If the distance goal is negative, move backward\n\t\t\t\tif dist < 0:\n\t\t\t\t\t#Send negative velocity\n\t\t\t\t\ttwist_msg = Twist()\n\t\t\t\t\ttwist_msg.linear.x = -1 * riu.move_rate\n\t\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#If distance goal is positive, move forward\n\t\t\t\telif dist > 0:\n\t\t\t\t\t#Send positive velocity\n\t\t\t\t\ttwist_msg = Twist()\n\t\t\t\t\ttwist_msg.linear.x = riu.move_rate\n\t\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#Check if the current movement is half completed, if so, send a Half message and set flag to avoid message duplication\n\t\t\t\tif (math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) >= abs(dist)/2\n\t\t\t\t\tand not halfway_flag):\n\t\t\t\t\thalfway_flag = True\n\t\t\t\t\tself.status_pub.publish(String(\"half\"))\n\t\t\t\t#update current_x, current_y, and current_z (using local variables to be thread safe)\n\t\t\t\twith self.move_state_lock:\n\t\t\t\t\tcurrent_x = move_state['x']\n\t\t\t\t\tcurrent_y = move_state['y']\n\t\t\t\t\tcurrent_z = move_state['z']\n\t\t\trospy.sleep(.2)\n\t\t\t\t\n\t\t#previously had while, finally block -> illegal syntax in python. Just moved to outside loop.\n\t\tself.publisher.publish(Mover.stop_msg)\n\t\tself.status_pub.publish(String(\"done\"))\n\t\tsignal.alarm(Mover.ready_message_interval)", "def _move(self, linear, angluar):\n vel_msg = Twist()\n\t\t# Linear velocity in the x-axis.\n vel_msg.linear.x = linear\n vel_msg.linear.y = 0\n vel_msg.linear.z = 0\n\n\t\t# Angular velocity in the z-axis.\n vel_msg.angular.x = 0\n vel_msg.angular.y = 0\n vel_msg.angular.z = angluar\n self.velocity_publisher.publish(vel_msg)\n self.rate.sleep()", "def linear_track(self, dist):\n\t\tglobal estop_flag, move_state\n\n\t\t#Disable timer interrupt, reset halfway flag, set target distance\n\t\tsignal.alarm(0) \n\t\thalfway_flag = False\n\n\t\t#Set starting position\n\t\twith self.move_state_lock:\n\t\t\tstart_x, start_y, start_z = move_state['x'], move_state['y'], move_state['z']\n\t\t#Set current position initially to start position\n\t\tcurrent_x, current_y, current_z = start_x, start_y, start_z\n\t\t#Check if the distance travelled is greater than the goal distance\n\t\twhile math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) < abs(dist):\n\t\t\t#Check if the estop flag is set, if so, kill movement\n\t\t\tif estop_flag:\n\t\t\t\tself.publisher.publish(Mover.stop_msg)\n\t\t\telse:\n\t\t\t\ttwist_msg = Twist()\n\t\t\t\tif dist < 0:\n\t\t\t\t\tif self.correction == riu.no_correction:\n\t\t\t\t\t\ttwist_msg.linear.x = -1 * riu.move_rate\n\t\t\t\t\telse:\n\t\t\t\t\t\ttwist_msg.linear.x = -1 * riu.move_rate/2\n\t\t\t\t\tif self.correction == \"left\":\n\t\t\t\t\t\ttwist_msg.angular.z = -1 * riu.turn_rate/2\n\t\t\t\t\telif self.correction == \"right\":\n\t\t\t\t\t\ttwist_msg.angular.z = riu.turn_rate/2\n\t\t\t\t#If distance goal is positive, move forward\n\t\t\t\telif dist > 0:\n\t\t\t\t\tif self.correction == riu.no_correction:\n\t\t\t\t\t\ttwist_msg.linear.x = riu.move_rate\n\t\t\t\t\telse:\n\t\t\t\t\t\ttwist_msg.linear.x = riu.move_rate/2\n\t\t\t\t\tif self.correction == \"left\":\n\t\t\t\t\t\ttwist_msg.angular.z = riu.turn_rate/2\n\t\t\t\t\telif self.correction == \"right\":\n\t\t\t\t\t\ttwist_msg.angular.z = -1 * riu.turn_rate/2\n\n\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#Check if the current movement is half completed, if so, send a Half message and set flag to avoid message duplication\n\t\t\t\tif (math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) >= abs(dist)/2\n\t\t\t\t\tand not halfway_flag):\n\t\t\t\t\thalfway_flag = True\n\t\t\t\t\tself.status_pub.publish(String(\"half\"))\n\n\t\t\t\t#update current_x, current_y, and current_z (using local variables to be thread safe)\n\t\t\t\twith self.move_state_lock:\n\t\t\t\t\tcurrent_x = move_state['x']\n\t\t\t\t\tcurrent_y = move_state['y']\n\t\t\t\t\tcurrent_z = move_state['z']\n\t\t\trospy.sleep(.2)\n\t\tself.publisher.publish(Mover.stop_msg)\n\t\tself.status_pub.publish(String(\"done\"))\n\t\tsignal.alarm(Mover.ready_message_interval)", "def move(x,y):\r\n pass", "def move(source, dest, speed=0):\n norm = normalise(source, dest)\n new_pos = (source[0] + norm[0], source[1] + norm[1])\n return new_pos", "def move(self, rel_pos):\n self.pos = (self.pos[0] + rel_pos[0] * GRID, self.pos[1] + rel_pos[1] * GRID)", "def move(self, twist: Optional[Twist] = None):\n if twist is None:\n left = right = 0\n self.navigation_goal = None\n else:\n linear = np.clip(twist.linear.x, -1, 1)\n angular = np.clip(twist.angular.x, -1, 1)\n left, right = (linear - angular) / 2, (linear + angular) / 2\n # # always give a robot the full velocity at least on one side\n # if (greater := max(abs(left), abs(right))) > 0:\n # left, right = left / greater, right / greater\n\n self.locomotion_lock.acquire()\n self.v_left = SPEEDUP * left\n self.v_right = SPEEDUP * right\n self.locomotion_lock.release()", "def _move(self, pos):\n self.put_par(\"drive\", pos)", "def move_straight(robot, dist):\n journey = Journey(robot, distance=dist)\n journey.start()\n robot.position.move(dist)\n sleep(0.5)", "def move(self): # AH note. Swich move with extra_steps?\n if self.adjustment < 0:\n self.position += self.extra_steps\n super().move()\n self.no_moves += 1\n # Do the regular move", "def do_move(self, rel=True):\n cmd = self.MGMSG_MOT_MOVE_ABSOLUTE\n if rel:\n cmd = self.MGMSG_MOT_MOVE_RELATIVE\n self.__send_short(cmd, self.__chan, 0x00)", "def move(self, *args, **kw):\n return self.execute_action('move', *args, **kw)", "def backward(self, param):\n\t\tif param:\n\t\t\tself.linear_move(-1 * param * .3048)\n\t\telse:\n\t\t\tself.linear_move(-1 * riu.default_dist * .3048)", "def move(self, coordinates, direction):\n pass", "def move(*args, absolute: bool=True, componentOffset: bool=True, componentSpace: bool=True,\n constrainAlongNormal: bool=True, deletePriorHistory: bool=True, localSpace: bool=True,\n moveX: bool=True, moveXY: bool=True, moveXYZ: bool=True, moveXZ: bool=True, moveY:\n bool=True, moveYZ: bool=True, moveZ: bool=True, objectSpace: bool=True, orientJoint:\n AnyStr=\"\", parameter: bool=True, preserveChildPosition: bool=False,\n preserveGeometryPosition: bool=False, preserveUV: bool=False, reflection: bool=True,\n reflectionAboutBBox: bool=True, reflectionAboutOrigin: bool=True, reflectionAboutX:\n bool=True, reflectionAboutY: bool=True, reflectionAboutZ: bool=True,\n reflectionTolerance: float=0.0, relative: bool=True, rotatePivotRelative: bool=True,\n scalePivotRelative: bool=True, secondaryAxisOrient: AnyStr=\"\", symNegative: bool=True,\n worldSpace: bool=True, worldSpaceDistance: bool=True, xformConstraint: AnyStr=\"\",\n **kwargs)->None:\n pass", "def drive(self, distance, linear_speed):\n current_pose = [self.px, self.py, self.pth]\n \tinitial_pose = current_pose\n # final pose is distance to be moved by the robot in the x direction\n \tdistance_traveled = 0\n \ttolerance = 0.01\n\n self.send_speed(linear_speed, 0.0)\n \twhile abs(distance-distance_traveled) > tolerance:\n current_pose = [self.px, self.py, self.pth]\n distance_traveled = math.sqrt((current_pose[0]-initial_pose[0])*(current_pose[0]-initial_pose[0])+(current_pose[1]-initial_pose[1])*(current_pose[1]-initial_pose[1]))\n #print(final_pose[0]-current_pose[0])\n \tself.send_speed(0.0,0.0)", "def DoMove(position, move):\n return position - move", "def move(self, move):\n raise NotImplementedError()", "def move(self, linear_speed, angular_speed):\n twist = Twist()\n twist.linear.x = linear_speed\n twist.angular.z = angular_speed\n self.pub.publish(twist)", "def move(self):\n self.pos += self.direc\n self.nearest_node = self.pixel_to_node()", "async def move_to(self, distance: float, speed: float) -> None:\n time = self.time_for_distance(distance, speed)\n await self.rmotor.run(-speed, time)\n await self.lmotor.run(speed, time)", "def _move(self, dx, dy):\n pass # must override in subclass", "def move(self, x, y):\n self.x = x\n self.y = y\n self.call('move', x, y)", "def _move(self):\n self.pos += self.direction # add direction vector\n self.direction += self.gravity # add gravity to direction\n self.direction = self.direction.elementwise() * self.drag # apply drag to direction", "def move(): #py:move\n RUR._move_()", "def movePoseTotal(self, pose=MyPose(), linear=False):\n try:\n now = rospy.Time.now()\n self.listener.waitForTransform(self.ns+\"/panda_link0\", \"map\", now, rospy.Duration(4.0))\n (pos, rot) = self.listener.lookupTransform(self.ns+\"/panda_link0\", \"map\", now)\n except: # ExtrapolationException:\n self.syncTime.publish(std_msg.Bool(True))\n time.sleep(0.5)\n now = rospy.Time.now()\n self.listener.waitForTransform(self.ns+\"/panda_link0\", \"map\", now, rospy.Duration(4.0))\n (pos, rot) = self.listener.lookupTransform(self.ns+\"/panda_link0\", \"map\", now)\n \n poseRel = MyPose(tuple(pos), tuple(rot))\n poseRel = pose-poseRel\n \n if linear:\n self.movePoseLin(poseRel)\n else:\n self.movePose(poseRel)", "def move_global(self, xyz):\n self.position += xyz", "def choose_move(self):\n return 0", "def _move(self, direction, difference):\n future_tile_number = self.get_number() + difference\n if future_tile_number in range(1, Tile.total_tiles + 1):\n future_tile = Tile.get_tile(future_tile_number)\n if future_tile.walkable:\n self.set_target(future_tile)\n self.rotate(direction)" ]
[ "0.6693261", "0.66415036", "0.65412503", "0.6132586", "0.6122272", "0.60313886", "0.5981754", "0.59595865", "0.59388167", "0.58818024", "0.5879753", "0.58745044", "0.58463347", "0.5828148", "0.5824566", "0.58189654", "0.57764554", "0.5741105", "0.5720051", "0.5718488", "0.5692794", "0.5692753", "0.56545836", "0.5592447", "0.5591775", "0.5579371", "0.5560207", "0.55597234", "0.555166", "0.55274487" ]
0.7202606
0
Checks the tracking variable updated by the tracker callback. If no correction is needed, sends a linear twist message. If correction is needed, sends a left or right angular twist as appropriate. Acquires a lock on the move state to update its position. Checks for estop every cycle. Disables ready messages for duration of movement. Stops moving after absolute distance from start is equal to the given distance.
def linear_track(self, dist): global estop_flag, move_state #Disable timer interrupt, reset halfway flag, set target distance signal.alarm(0) halfway_flag = False #Set starting position with self.move_state_lock: start_x, start_y, start_z = move_state['x'], move_state['y'], move_state['z'] #Set current position initially to start position current_x, current_y, current_z = start_x, start_y, start_z #Check if the distance travelled is greater than the goal distance while math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) < abs(dist): #Check if the estop flag is set, if so, kill movement if estop_flag: self.publisher.publish(Mover.stop_msg) else: twist_msg = Twist() if dist < 0: if self.correction == riu.no_correction: twist_msg.linear.x = -1 * riu.move_rate else: twist_msg.linear.x = -1 * riu.move_rate/2 if self.correction == "left": twist_msg.angular.z = -1 * riu.turn_rate/2 elif self.correction == "right": twist_msg.angular.z = riu.turn_rate/2 #If distance goal is positive, move forward elif dist > 0: if self.correction == riu.no_correction: twist_msg.linear.x = riu.move_rate else: twist_msg.linear.x = riu.move_rate/2 if self.correction == "left": twist_msg.angular.z = riu.turn_rate/2 elif self.correction == "right": twist_msg.angular.z = -1 * riu.turn_rate/2 self.publisher.publish(twist_msg) #Check if the current movement is half completed, if so, send a Half message and set flag to avoid message duplication if (math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) >= abs(dist)/2 and not halfway_flag): halfway_flag = True self.status_pub.publish(String("half")) #update current_x, current_y, and current_z (using local variables to be thread safe) with self.move_state_lock: current_x = move_state['x'] current_y = move_state['y'] current_z = move_state['z'] rospy.sleep(.2) self.publisher.publish(Mover.stop_msg) self.status_pub.publish(String("done")) signal.alarm(Mover.ready_message_interval)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def linear_move(self, dist):\n\t\tglobal estop_flag, move_state\n\t\tsignal.alarm(0) #Disable timer interrupt for the duration of the movement\n\t\thalfway_flag = False\n\t\t\n\t\twith self.move_state_lock:\n\t\t\tstart_x, start_y, start_z = move_state['x'], move_state['y'], move_state['z']\n\t\tcurrent_x = start_x\n\t\tcurrent_y = start_y\n\t\tcurrent_z = start_z\n\t\t#While the distance travelled is less than target distance\n\t\twhile math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) < abs(dist):\n\t\t\t#Check if the emergency stop flag is set, if so, break the current loop and reset velocity\t\n\t\t\tif estop_flag:\n\t\t\t\tself.publisher.publish(Mover.stop_msg)\n\t\t\telse:\n\t\t\t\t#If the distance goal is negative, move backward\n\t\t\t\tif dist < 0:\n\t\t\t\t\t#Send negative velocity\n\t\t\t\t\ttwist_msg = Twist()\n\t\t\t\t\ttwist_msg.linear.x = -1 * riu.move_rate\n\t\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#If distance goal is positive, move forward\n\t\t\t\telif dist > 0:\n\t\t\t\t\t#Send positive velocity\n\t\t\t\t\ttwist_msg = Twist()\n\t\t\t\t\ttwist_msg.linear.x = riu.move_rate\n\t\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#Check if the current movement is half completed, if so, send a Half message and set flag to avoid message duplication\n\t\t\t\tif (math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) >= abs(dist)/2\n\t\t\t\t\tand not halfway_flag):\n\t\t\t\t\thalfway_flag = True\n\t\t\t\t\tself.status_pub.publish(String(\"half\"))\n\t\t\t\t#update current_x, current_y, and current_z (using local variables to be thread safe)\n\t\t\t\twith self.move_state_lock:\n\t\t\t\t\tcurrent_x = move_state['x']\n\t\t\t\t\tcurrent_y = move_state['y']\n\t\t\t\t\tcurrent_z = move_state['z']\n\t\t\trospy.sleep(.2)\n\t\t\t\t\n\t\t#previously had while, finally block -> illegal syntax in python. Just moved to outside loop.\n\t\tself.publisher.publish(Mover.stop_msg)\n\t\tself.status_pub.publish(String(\"done\"))\n\t\tsignal.alarm(Mover.ready_message_interval)", "def __check_move(self):\n move = self.communications.get_move()\n if move is not None and move in self.bot.movements:\n self.communications.set_status(\"Moving Bot {}\".format(move))\n self.make_move(move)\n\n self.communications.send_proximity_data(self.proximity_sensors.read_sensors())", "def update_state(self):\n self.last_position = self.current_position\n self.last_distance = self.current_distance\n self.last_collision_time_stamp = self.current_collision_time_stamp\n self.current_kinematics = self.airsim_client.simGetGroundTruthKinematics(vehicle_name=self.drone_name)\n self.current_position = self.current_kinematics.position + self.base_offset\n self.current_collision_time_stamp = self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name).time_stamp\n # print(\"DEBUG: simGetCollisionInfo:\", self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name))\n # self.pending_death = self.airsim_client.simIsRacerDisqualified(vehicle_name=self.drone_name)\n self.objective_status = self.current_objective.next_gate_status(self.last_position, self.current_position)\n if self.objective_status == GateStatus.CROSSED or self.objective_status == GateStatus.PASSED:\n if self.switch_to_next_objective(): # if track is finished (changes self.last_distance)\n self.track_complete = True\n self.current_distance = self.current_position.distance_to(self.current_objective.gate_pose.position)", "def wait_untel_pos_eq(target_pos):\n global joints\n TriggerSimualtion()\n err = abs(np.array(target_pos) - np.array(joints))\n global err_old\n global position_geted\n while (err != err_old).all() or not position_geted:\n global err_old\n global position_geted\n global joints\n err_old = err\n TriggerSimualtion()\n #sleep(0.1)\n position_geted=False", "def checkMotion(self):\n res = 0\n while(self.ser.inWaiting() > 0):\n res = self.ser.readline().strip()\n\n try:\n if self.state == 1 and time.time() - self.last_move > self.config[\"keep_on_time\"]:\n self.execOff()\n\n if res == \"1\":\n self.last_move = time.time()\n\n if res == \"1\" and self.state == 0:\n self.execOn()\n except Exception as e:\n self.logger.error(e)", "def drive(self, distance, linear_speed):\n current_pose = [self.px, self.py, self.pth]\n \tinitial_pose = current_pose\n # final pose is distance to be moved by the robot in the x direction\n \tdistance_traveled = 0\n \ttolerance = 0.01\n\n self.send_speed(linear_speed, 0.0)\n \twhile abs(distance-distance_traveled) > tolerance:\n current_pose = [self.px, self.py, self.pth]\n distance_traveled = math.sqrt((current_pose[0]-initial_pose[0])*(current_pose[0]-initial_pose[0])+(current_pose[1]-initial_pose[1])*(current_pose[1]-initial_pose[1]))\n #print(final_pose[0]-current_pose[0])\n \tself.send_speed(0.0,0.0)", "async def movement_tick(self):\n self.movement_progress += self.sub.power.get_power(\"engines\")\n threshold = get_square(self.x, self.y).difficulty()\n if \"blessing\" in self.sub.upgrades.keywords:\n # Bound difficulty above by four (normal waters)\n threshold = min(4, threshold)\n if self.movement_progress >= threshold:\n self.movement_progress -= threshold\n direction = self.direction # Direction can change as result of movement.\n message = await self.move()\n move_status = (\n f\"Moved **{self.sub.name()}** in direction **{direction.upper()}**!\\n\"\n f\"**{self.sub.name()}** is now at position **{self.get_position()}**.\"\n )\n\n # Do all the puzzles stuff.\n await self.sub.puzzles.movement_tick()\n\n # Cancel trades, if necessary.\n trade_messages = self.sub.inventory.timeout_trade()\n\n # Finally, return our movement.\n if message:\n return f\"{message}\\n{move_status}\", trade_messages\n return move_status, trade_messages\n return None, {}", "def update_position(steps):\n\n global position_x, position_y\n new_x = position_x\n new_y = position_y\n\n if directions[current_direction_index] == 'forward':\n new_y = new_y + steps\n elif directions[current_direction_index] == 'right':\n new_x = new_x + steps\n elif directions[current_direction_index] == 'back':\n new_y = new_y - steps\n elif directions[current_direction_index] == 'left':\n new_x = new_x - steps\n\n if is_position_allowed(new_x, new_y):\n position_x = new_x\n position_y = new_y\n return True\n return False", "def run(self):\n move_cmd = Twist()\n move_cmd.linear.x = 0\n move_cmd.angular.z = 0\n\n while not rospy.is_shutdown():\n # bump logic as previous psets\n if self.bump:\n self.bump = False\n # move backwards\n move_cmd.linear.x = LIN_SPEED * -1\n for i in range(5):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n rospy.sleep(1)\n\n # turn randomly in a random direction\n move_cmd.linear.x = 0\n move_cmd.angular.z = ROT_SPEED * ((-1)**random.randint(1,2))\n\n if self.bump == 0:\n move_cmd.angular.z = ROT_SPEED * (-1)\n elif self.bump == 2:\n move_cmd.angular.z = ROT_SPEED\n\n for i in range(random.randint(5,15)):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n rospy.sleep(1)\n\n move_cmd.angular.z = 0\n # if somethin in the screen is really close\n elif self.min_val < MIN_THRESHOLD:\n # make sure it's not the sock/leg warmer, and is actually an obstacle\n if self.obstacle_x <= self.x or self.obstacle_x >= self.x + self.w or abs(self.min_val - self.dist) > 0.1:\n move_cmd.linear.x = 0\n # turn away\n if self.obstacle_x > 320:\n move_cmd.angular.z = ROT_SPEED / 2\n else:\n move_cmd.angular.z = -ROT_SPEED / 2\n # self.min_val = 100\n for i in range(10):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n self.last_move = rospy.Time.now()\n else:\n rospy.loginfo(\"Perimeter \" + str(self.perimeter_size))\n rospy.loginfo(\"Distance is \" + str(self.dist))\n\n # normalize angle error to rot speed\n ang_error_norm = -float(self.ang_error) / 100\n\n # set min and max rot speed\n if ang_error_norm < -ROT_SPEED:\n ang_error_norm = -ROT_SPEED\n elif ang_error_norm > ROT_SPEED:\n ang_error_norm = ROT_SPEED\n\n move_cmd.angular.z = ang_error_norm\n\n if RACE == False:\n # normalize dist error to lin speed\n self.dist_error = self.dist - 0.5\n dist_error_norm = float(self.dist_error) / 2\n\n if dist_error_norm < 0:\n # if NaN (self.dist gets set to -1)\n if dist_error_norm > -0.7:\n self.lost = 0\n # if too close\n else:\n self.lost += 1\n # if it's been more than 2 seconds\n if rospy.Time.now() > self.last_move + rospy.Duration(2):\n dist_error_norm = 0\n # if been lost for a while rotate and beep\n if self.lost > 20:\n move_cmd.angular.z = ROT_SPEED / 4\n self.beep.publish(4)\n else:\n # continue as previous\n dist_error_norm = self.last_speed\n else:\n # set max lin speed\n if dist_error_norm > LIN_SPEED:\n dist_error_norm = LIN_SPEED\n\n # reset lost stats\n self.lost = 0\n self.last_speed = dist_error_norm\n self.last_move = rospy.Time.now()\n\n move_cmd.linear.x = dist_error_norm\n else:\n move_cmd.linear.x = LIN_SPEED\n\n self.cmd_vel.publish(move_cmd)", "def check_wheel_move_before_feedback(data, **_):\n # Get tuple of wheel times and positions within 100ms of feedback\n traces = traces_by_trial(\n data[\"wheel_timestamps\"],\n data[\"wheel_position\"],\n start=data[\"feedback_times\"] - 0.05,\n end=data[\"feedback_times\"] + 0.05,\n )\n metric = np.zeros_like(data[\"feedback_times\"])\n # For each trial find the displacement\n for i, trial in enumerate(traces):\n pos = trial[1]\n if pos.size > 1:\n metric[i] = pos[-1] - pos[0]\n\n # except no-go trials\n metric[data[\"choice\"] == 0] = np.nan # NaN = trial ignored for this check\n nans = np.isnan(metric)\n passed = np.zeros_like(metric) * np.nan\n\n passed[~nans] = (metric[~nans] != 0).astype(float)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed", "def left(self, param):\n\t\tglobal estop_flag, move_state\n\t\t#If input angle is zero, set angle to default\n\t\tif param:\n\t\t\tangle = param\n\t\telse:\n\t\t\tangle = riu.default_angle\n\n\t\tsignal.alarm(0) #Disable timer interrupt for the duration of the movement\n\t\t#safely grab current yaw\n\t\twith self.move_state_lock:\n\t\t\tcurrent_yaw = (math.degrees(move_state['yaw']) + 360) % 360\n\t\t#Set goal to yaw+angle. Add 360 then mod to account for negative angles but avoid going over 360\n\t\tgoal = (current_yaw + angle) % 360\n\t\thalf_goal = (current_yaw + angle/2) % 360\n\t\tif self.angle_lock:\n\t\t\tif goal >= 315 and goal < 45:\n\t\t\t\tgoal = self.zeroed_angle\n\t\t\telif goal >= 45 and goal < 135:\n\t\t\t\tgoal = self.zeroed_angle + 90\n\t\t\telif goal >= 135 and goal < 225:\n\t\t\t\tgoal = self.zeroed_angle + 180\n\t\t\telif goal >= 225 and goal < 315:\n\t\t\t\tgoal = self.zeroed_angle + 270\n\t\tgoal = goal % 360\n\t\thalf_goal = (current_yaw + angle/2) % 360\n\t\thalfway_flag = False #used to flag if we've already sent out a halfway message\n\t\t#Anonymous function that calculates the current counterclockwise distance to the goal\n\t\tchkdist = lambda pos, goal: round(goal - pos + 360 * (goal < pos), 1)\n\t\t#Gets current distance and initially sets previous distance = distance\n\t\tdistance = chkdist(current_yaw, goal)\n\t\tprev_dist = distance\n\t\t\"\"\"Continues to move while absolute distance is not within angular_error and counterclockwise\n\t\tdistance is not increasing. NOTE: absolute distance is the shortest distance in either direction,\n\t\twhile counterclockwise distance is the distance using only counterclockwise movement.\n\t\tThe angular_error condition was added because the movements tended to end within the first few \n\t\tcycles due to some float error. With the error condition, the movement can only end when inside\n\t\tat least the general area of the goal.\"\"\"\n\t\twhile distance <= prev_dist or self.get_abs_dist(current_yaw, goal) > riu.angular_error:\n\t\t\tif estop_flag:\n\t\t\t\tself.publisher.publish(Mover.stop_msg)\n\t\t\telse:\n\t\t\t\t#Construct and publish left turn message\n\t\t\t\ttwist_msg = Twist()\n\t\t\t\ttwist_msg.angular.z = riu.turn_rate\n\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#If distance to goal is less than half the initial distance, publish the half done message\n\t\t\t\tif distance <= half_goal and not halfway_flag:\n\t\t\t\t\thalfway_flag = True\n\t\t\t\t\tself.status_pub.publish(String(\"half\"))\n\t\t\t\t#Update current position\n\t\t\t\twith self.move_state_lock:\n\t\t\t\t\tcurrent_yaw = (math.degrees(move_state['yaw']) + 360) % 360\n\t\t\t\t#Set previous distance, then update distance based on new position\n\t\t\t\tprev_dist = distance\n\t\t\t\tdistance = chkdist(current_yaw, goal)\n\t\t\trospy.sleep(.2)\n\t\t#After loop exit, publish stop message and send done message to cmd_queue\n\t\tself.publisher.publish(Mover.stop_msg)\n\t\tself.status_pub.publish(String(\"done\"))\n\t\tsignal.alarm(Mover.ready_message_interval) #Restart timer", "def _distanceCheck(self):\n\n # Catches the occasional polling error that occurs with the ultrasonic distance sensor\n try:\n # 3 point averager to smooth out distance data\n dist = self.u.distance\n sleep(0.05)\n dist += self.u.distance\n sleep(0.05)\n dist += self.u.distance\n dist = dist/3\n\n #print(\"Distance check reading: {0:1.3f}\".format(dist))\n\n if( dist <= self.detectDist ):\n if( self.birdHere == 0 ):\n self.statusWrite(\"in\")\n self.birdHere = 1\n\n else:\n if( self.birdHere == 1 ):\n self.statusWrite(\"out\")\n self.birdHere = 0\n\n except RuntimeError:\n pass", "def adjust_starting_position(self, direction):\n\n direction = 1 if direction in [\"left\", 1] else -1\n\n self.angle = direction * 25\n self.distance = 12\n self.drive_thread.reset()\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)\n\n self.angle = 0\n self.distance = 12\n self.drive_thread.reset()\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)\n\n self.angle = direction * -25\n self.distance = 12\n self.drive_thread.reset()\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)", "def wait_focus_move(self):\n\n import time\n\n T0 = time.time()\n\n finished = False\n aborted = False\n\n while not finished:\n T1 = time.time()\n the_status = self.status\n\n if not the_status['F_move'] or the_status['FD_endStop']:\n finished = True\n\n if 1e3 * (T1 - T0) > self.move_timeout_ms:\n finished = True\n aborted = True\n\n if aborted:\n print('wait_focus_move timed out')\n elif self.verbose:\n print('move complete in {}s'.format(T1 - T0))", "def right(self, param):\n\t\tglobal estop_flag, move_state\n\t\t#If input angle is zero, set angle to default\n\t\tif param:\n\t\t\tangle = param\n\t\telse:\n\t\t\tangle = riu.default_angle\n\n\t\tsignal.alarm(0) #Disable timer interrupt for the duration of the movement\n\t\t#safely grab current yaw\n\t\twith self.move_state_lock:\n\t\t\tcurrent_yaw = (math.degrees(move_state['yaw']) + 360) % 360\n\t\t#Set goal to yaw+angle. Add 360 then mod to account for negative angles but avoid going over 360\n\t\tgoal = (current_yaw - angle + 360) % 360\n\t\tif self.angle_lock:\n\t\t\tif goal >= 315 and goal < 45:\n\t\t\t\tgoal = self.zeroed_angle\n\t\t\telif goal >= 45 and goal < 135:\n\t\t\t\tgoal = self.zeroed_angle + 90\n\t\t\telif goal >= 135 and goal < 225:\n\t\t\t\tgoal = self.zeroed_angle + 180\n\t\t\telif goal >= 225 and goal < 315:\n\t\t\t\tgoal = self.zeroed_angle + 270\n\t\tgoal = goal % 360\n\t\thalf_goal = (current_yaw - angle/2 + 360) % 360\n\t\thalfway_flag = False #used to flag if we've already sent out a halfway message\n\t\t#Anonymous function that calculates the current clockwise distance to the goal\n\t\tchkdist = lambda pos, goal: round(pos - goal + 360 * (goal > pos), 1)\n\t\t#Gets current distance and initially sets previous distance = distance\n\t\tdistance = chkdist(current_yaw, goal)\n\t\tprev_dist = distance\n\t\t\"\"\"Continues to move while absolute distance is not within angular_error and clockwise\n\t\tdistance is not increasing. NOTE: absolute distance is the shortest distance in either direction,\n\t\twhile clockwise distance is the distance using only clockwise movement.\n\t\tThe angular_error condition was added because the movements tended to end within the first few \n\t\tcycles due to some float error. With the error condition, the movement can only end when inside\n\t\tat least the general area of the goal.\"\"\"\n\t\twhile distance <= prev_dist or self.get_abs_dist(current_yaw, goal) > riu.angular_error:\n\t\t\tif estop_flag:\n\t\t\t\tself.publisher.publish(Mover.stop_msg)\n\t\t\telse:\n\t\t\t\t#Build and publish right turn message\n\t\t\t\ttwist_msg = Twist()\n\t\t\t\ttwist_msg.angular.z = -1 * riu.turn_rate\n\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#If distance to goal is less than half the initial distance, publish the half done message\n\t\t\t\tif distance <= half_goal and not halfway_flag:\n\t\t\t\t\thalfway_flag = True\n\t\t\t\t\tself.status_pub.publish(String(\"half\"))\n\t\t\t\t#Update current position\n\t\t\t\twith self.move_state_lock:\n\t\t\t\t\tcurrent_yaw = (math.degrees(move_state['yaw']) + 360) % 360\n\t\t\t\t#Update previous distance, then update distance based on current position\n\t\t\t\tprev_dist = distance\n\t\t\t\tdistance = chkdist(current_yaw, goal)\n\t\t\trospy.sleep(.2)\n\t\t#After loop end, send stop message and send done message to cmd_queue\t\n\t\tself.publisher.publish(Mover.stop_msg)\n\t\tself.status_pub.publish(String(\"done\"))\n\t\tsignal.alarm(Mover.ready_message_interval) #Restart timer", "def wait_for_fixation_start(self):\n\t\t\n\t\t# # # # #\n\t\t# EyeTribe method\n\n\t\tif self.eventdetection == 'native':\n\t\t\t\n\t\t\t# print warning, since EyeTribe does not have a fixation start\n\t\t\t# detection built into their API (only ending)\n\t\t\t\n\t\t\tprint(\"WARNING! 'native' event detection has been selected, \\\n\t\t\t\tbut EyeTribe does not offer fixation detection; \\\n\t\t\t\tPyGaze algorithm will be used\")\n\t\t\t\n\t\t\t\n\t\t# # # # #\n\t\t# PyGaze method\n\t\t\n\t\t# function assumes a 'fixation' has started when gaze position\n\t\t# remains reasonably stable for self.fixtimetresh\n\t\t\n\t\t# get starting position\n\t\tspos = self.sample()\n\t\twhile not self.is_valid_sample(spos):\n\t\t\tspos = self.sample()\n\t\t\n\t\t# get starting time\n\t\tt0 = clock.get_time()\n\n\t\t# wait for reasonably stable position\n\t\tmoving = True\n\t\twhile moving:\n\t\t\t# get new sample\n\t\t\tnpos = self.sample()\n\t\t\t# check if sample is valid\n\t\t\tif self.is_valid_sample(npos):\n\t\t\t\t# check if new sample is too far from starting position\n\t\t\t\tif (npos[0]-spos[0])**2 + (npos[1]-spos[1])**2 > self.pxfixtresh**2: # Pythagoras\n\t\t\t\t\t# if not, reset starting position and time\n\t\t\t\t\tspos = copy.copy(npos)\n\t\t\t\t\tt0 = clock.get_time()\n\t\t\t\t# if new sample is close to starting sample\n\t\t\t\telse:\n\t\t\t\t\t# get timestamp\n\t\t\t\t\tt1 = clock.get_time()\n\t\t\t\t\t# check if fixation time threshold has been surpassed\n\t\t\t\t\tif t1 - t0 >= self.fixtimetresh:\n\t\t\t\t\t\t# return time and starting position\n\t\t\t\t\t\treturn t1, spos", "def handle_start_stop(self, req):\n\n # if turtlebot is moving, stop it\n if self.is_moving:\n self.is_moving = False\n\n\t r = rospy.Rate(2)\n\n cmd_msg = Twist()\n cmd_msg.linear.x = NO_SPEED\n cmd_msg.angular.z = NO_SPEED\n self.cmd_pub.publish(cmd_msg)\n\n r.sleep()\n\n return TriggerResponse(True, \"Robot safely stopped.\")\n\n # if turtlebot is not moving, start it\n else:\n self.is_moving = True\n self.state.reinitialize()\n\n return TriggerResponse(True, \"Robot safely started.\")", "def approach_gps(g_lat,g_lon,emily_lat_start, emily_lon_start, pose_rad, Parameters): #approach a gps position using potential fields\r\n\tx_goal,y_goal = latlongtoxy(g_lat,g_lon,g_lat)\r\n\tx_e_start,y_e_start = latlongtoxy(emily_lat_start,emily_lon_start,g_lat)\r\n\r\n\tprint (\"\\n HERE I AM\\n\\n\")\r\n\r\n\tdist = haver_distance(g_lat, g_lon, emily_lat_start, emily_lon_start)\r\n\tinitial_dist = dist\r\n\r\n\tprint ('Distance: ',dist)\r\n\theading = get_heading(emily_lat_start, emily_lon_start, g_lat, g_lon)\r\n print ('After get heading')\r\n\t# Eric: I'm not sure if turn_towards is necessary for a successful run.\r\n\t#turn_towards(heading)\r\n\tprint ('After Turn towards')\r\n\t#turn towards the goal initially\r\n\r\n\tstart_time = time.time()\r\n\tcurrent_time = 0\r\n\tdstore = []\r\n\thstore = []\r\n\twhile(dist >= goal_radius):\r\n\r\n\t\t#------------ code for reading gps location of emily and its orientation ------\r\n\t\te_lat = vehicle.location.global_frame.lat\r\n\t\te_lon = vehicle.location.global_frame.lon\r\n\t\te_heading = vehicle.heading * pi/180\t\t# convert heading to radians\r\n\t\t#------------------ get e_lat,e_lon, e_orient ---------------------\r\n\r\n\r\n\t\tx_e,y_e = latlongtoxy(e_lat,e_lon,g_lat)\t\t\t#change latitude and longitude to xy\r\n\r\n\t\t#x,y are given to approach victim function as y,x to algin the north heading and direction in x,y\r\n\r\n\t\tdx,dy = approach_victim_behaviour(y_goal,x_goal, y_e,x_e, pose_rad, Parameters)\t#get potential field vector\r\n\t\trc1, rc3 = dxdytorc(dx,dy, e_heading,g_lon)\t\t\t\t\t#get rc parameters\r\n\t\tdist = haver_distance(g_lat, g_lon, e_lat, e_lon)\t\t\t\t#haversine distance\r\n\r\n\t\tcurrent_time = time.time() - start_time\r\n\t\tprint (\"Time, Heading, Distance\")\r\n\t\tprint (current_time, e_heading*180/pi, dist)\r\n\t\tdstore.append(dist)\r\n\t\thstore.append(e_heading*180/pi)\r\n\t\t#code for sending the writing the rc commands\r\n\t\t# 3 is the thrust control\r\n\t\t#vehicle.channels.overrides = {'3':rc3}\r\n\t\tsendThrottleCommand(rc3, enableThrottle)\r\n\t\ttime.sleep(0.5)\r\n\t\tvehicle.channels.overrides = {'1':rc1}\r\n\t\tprint (\"Rudder: \",rc1)\r\n\t\tprint (\"Throttle: \",rc3)\r\n\t\tsaveToLog(e_lat, e_lon,dist,rc1,rc3)\r\n\t\ttime.sleep(0.5)\r\n\tprint(initial_dist)\r\n\tprint(\"intial \", emily_lat_start,emily_lon_start)\r\n\tprint(\"final \",e_lat,e_lon)\r\n\tplt.plot(dstore)\r\n\t#plt.title('Distance form home vs time')\r\n\tplt.xlabel(\"Time\")\r\n\tplt.ylabel('Distance')\r\n\tplt.show()\r\n\tplt.plot(hstore)\r\n\tplt.show()", "def waypoints_cb(self, msg):\n t = time.time()\n waypoints = msg.waypoints\n num_wp = len(waypoints)\n\n if self.base_waypoints and self.next_waypoint is not None:\n # Normally we assume that waypoint list doesn't change (or, at least, not\n # in the position where the car is located). If that happens, just handle it.\n if not self.is_same_waypoint(self.base_waypoints[self.next_waypoint],\n waypoints[self.next_waypoint]):\n self.next_waypoint = None # We can't assume previous knowledge of waypoint\n self.base_waypoints = None # Just for debugging. Will be updated later\n rospy.logwarn(\"Base waypoint list changed\")\n else:\n # No change. We could probably return here.\n pass\n\n \"\"\"\n # -- Uncomment for debugging\n # Stamp waypoint index in PoseStamped and TwistStamped headers of internal messages\n for idx in range(len(waypoints)):\n waypoints[idx].pose.header.seq = idx\n waypoints[idx].twist.header.seq = idx\n \"\"\"\n\n self.base_wp_orig_v = [self.get_waypoint_velocity(waypoints, idx) for idx in range(num_wp)]\n\n if debugging and not self.base_waypoints:\n dist = self.distance(waypoints, 0, num_wp-1)\n rospy.loginfo(\"Received: %d waypoints, %.1f m, %.1f m/wp in t=%f\", num_wp, dist, dist/num_wp, time.time()-t)\n\n self.base_waypoints = waypoints\n\n if self.unsubscribe_base_wp:\n self.base_wp_sub.unregister()", "def update_action(self, status, x, y, dist_in):\n print(\"CURRENT STATE:\", self.status)\n print(\"DISTANCE IN IN: \", dist_in)\n\n # Startup state\n if status == 'STARTUP':\n # Determine the states needed for our path\n self.calculate_path()\n\n # Wait for user input to start\n raw_input('Press Enter to continue...')\n self.next_state()\n\n # Drive forward state\n elif status == 'FORWARD':\n # If the current apriltag in view is either the smallbot's right or left tag\n # and the current apriltag's x position is within the camera bounds\n if (x < Constants.MAX_CAM_X_BOUND and x > Constants.MIN_CAM_X_BOUND) and \\\n (self.current_tag == self.left_tag or self.current_tag == self.right_tag):\n self.current_action = \"drive\"\n else:\n # Reset current action for 1 iteration to avoid data overlap\n self.current_action = 'none'\n print(\"DONE DRIVING STRAIGHT---------------------------------\")\n self.next_state()\n\n # Drive backwards state\n elif status == 'BACKWARDS':\n # If the current apriltag in view is either the smallbot's right or left tag\n # and as long as the apriltag's x position is less than or equal to the max camera bound\n if (x <= Constants.MAX_CAM_X_BOUND) and \\\n (self.current_tag == self.left_tag or self.current_tag == self.right_tag):\n self.current_action = \"drivebackwards\"\n else:\n self.next_state()\n\n # Turn right state\n elif status == 'TURN_RIGHT':\n # Keep turning right while the smallbot is not done achieving its goal angle aka 90 deg\n if self.is_done_turning() != 'done_turning':\n self.current_action = 'turnright'\n else:\n # Reset current action for 1 iteration to avoid data overlap\n self.current_action = 'none'\n # Capture the current apriltag's distance from the camera after turn\n self.dist_after_turn = dist_in\n print(\"CAPTURED DIST: \", self.dist_after_turn)\n self.next_state()\n\n # Turn left state\n elif status == 'TURN_LEFT':\n print(\"INSIDE TURN LEFT\")\n # Keep turning left while the smallbot is not done achieving its goal angle aka -90 deg\n if self.is_done_turning() != 'done_turning':\n self.current_action = 'turnleft'\n else:\n # Reset current action for 1 iteration to avoid data overlap\n self.current_action = 'none'\n # Capture the current apriltag's distance from the camera after turn\n self.dist_after_turn = dist_in\n print(\"CAPTURED DIST: \", self.dist_after_turn)\n self.next_state()\n\n # Creep forward state\n elif status == 'CREEP_FORWARD':\n print(\"current Y VAL AT Y: \", y)\n # If it sees the back apriltag then keep going straight for the defined TRAVEL_DIST\n if (dist_in < self.dist_after_turn + Constants.FWD_TRAVEL_DIST) and self.current_tag == self.back_tag:\n self.current_action = \"drive\"\n print(\"INSIDE IF STATMT----CURRENT Y VAL: \", y)\n else:\n print(\"----------GONE TO NEXT STATE----------\")\n self.next_state()\n\n # Creep backwards state\n elif status == 'CREEP_BACKWARD':\n print(\"current Y VAL AT Y: \", y)\n # If it sees the back apriltag then keep going backwards for the defined\n # TRAVEL_DIST times the number of times it creeped forward\n if (dist_in > self.dist_after_turn - (\n Constants.FWD_TRAVEL_DIST * self.times_driven_forward)) and self.current_tag == self.back_tag:\n self.current_action = \"drivebackwards\"\n print(\"INSIDE IF STATMT----CURRENT Y VAL: \", y)\n else:\n print(\"----------GONE TO NEXT STATE----------\")\n self.next_state()\n\n # Halt state\n elif status == 'HALT':\n # First stop\n self.current_action = 'stop'\n # Then go to next state\n self.next_state()\n\n # Dump state\n elif status == 'DUMP':\n self.current_action = 'dump'\n self.next_state()\n\n # Stop state\n elif status == 'STOP':\n self.current_action = 'stop'\n\n # Default state\n else:\n self.current_action = 'none'", "def __wait_for_move(self, verbose=False):\r\n res = self.__wait_for([(2, 1), (2, 2), (2, 3)], verbose=verbose)\r\n if res[1] == 2 and verbose:\r\n # message id 2 : Stopped\r\n self.print_msg(\"The motor has been stopped.\")\r\n if res[1] == 3:\r\n # message id 3 : LimitUpdated - reached rev limit\r\n self.print_msg(\"Warning: the motor has reached its revolution limit.\")\r\n self.print_msg(\" - current position: {:0.4f}mm.\"\r\n .format(self.get_position()))\r\n\r\n return res", "def handleUpdateTimer(self):\n self.mustRun(task = self.position,\n ret_signal = self.positionUpdate)", "def start(self):\n global trackWidth\n trackWidth = self.getTrackWidth()\n print(\"track width = \" + str(trackWidth))\n #motors.moveForward(0,2)\n initTheta = self.getTheta(trackWidth)\n motors.pivot(\"left\", 30, 0.25) #spin left for 1 second\n print(\"Moved\")\n newTheta = self.getTheta(trackWidth)\n #Checks if the robot is pointed even further of course or not, corrects for whichever\n if newTheta < initTheta:\n while self.getTheta(trackWidth) >=rads: #Spins while the robot is pointed more than 0.122 rads from straight\n motors.pivot(\"left\", 30, 0.25) #spin left for 0.25 second\n elif newTheta > initTheta:\n while self.getTheta(trackWidth) >= rads:\n motors.pivot(\"right\", 30, 0.25) #spin right for 0.25 second", "def move(self, pos, relative=False, wait=0, update=False,\n check_limits=True, check_start=3, check_end=True,\n check_problems=True, dial=False, elog=False, silent=False):\n # Check input\n if not self._usable_number(pos):\n errmsg = \"Recieved invalid pos {0} for motor {1} (pv {2})... aborting.\"\n logprint(errmsg.format(pos, self.name, self.pvname), print_screen=True)\n return False\n\n # Apply relative and dial\n here = self.wm()\n if dial and update:\n dial_offset = self.get_par(\"offset\")\n if relative:\n pos += here\n elif dial:\n pos += dial_offset\n if not self.within_limits(pos, pypslog=True):\n return False\n\n # Log move intention\n logmsg = \"moving {0} (pv {1}) to {2}, previous position: {3}\"\n logprint(logmsg.format(self.name, self.pvname, pos, here))\n \n if update and not silent:\n txt = \"Initial position: {}\"\n if dial:\n print txt.format(self.wm_string_dial()) \n else:\n print txt.format(self.wm_string())\n\n # Set up dmov monitor to look for transition 1 -> 0 if applicable\n if check_start:\n self._monitor_move_start(here)\n\n # The important part\n self._move(pos)\n readback = self.get_pvobj(\"readback\")\n\n # Check that we started: wait on dmov 1 -> 0 monitor if hasn't happened\n # If dmov is not available, wait for rbv to move outside of mres\n if check_start:\n if self._usable_number(check_start):\n did_start = self._wait_move_start(check_start)\n else:\n did_start = self._wait_move_start()\n if not did_start:\n self.stop()\n logmsg = \"motor {0} (pv {1}) failed to start\"\n logprint(logmsg.format(self.name, self.pvname), print_screen=True)\n return False\n\n # Watch for problems\n if check_problems:\n self._add_wait_cb(self.check_stall)\n\n # We have to wait if elog\n if elog and not (wait or update):\n wait = True\n\n # We're done if we aren't waiting\n if not (wait or update):\n return True\n\n # Interpret wait timeout\n wait_timeout = -1\n if wait:\n if self._usable_number(wait):\n wait_timeout = wait\n\n # Wait/interrupt block\n if wait or update:\n if update:\n if dial:\n display_offset = dial_offset\n else:\n display_offset = 0\n show_pos = self._update_cb(wait_timeout, display_offset)\n else:\n show_pos = lambda e=None: None\n with CallbackContext(readback, show_pos):\n try:\n if wait_timeout <= 0:\n motion_time = self.estimatedTimeForMotion(abs(here-pos))\n if motion_time is None:\n wait_ok = self.wait(60)\n else:\n wait_ok = self.wait(max(motion_time * 2.0, 60))\n else:\n wait_ok = self.wait(timeout=wait_timeout)\n except KeyboardInterrupt:\n print \"\\rCtrl+c pressed, stopping motor.\"\n return self._move_cleanup(False, elog, here, pos)\n except Exception: # Handle other exceptions cleanly before raise\n self._move_cleanup(False, elog, here, pos)\n show_pos()\n traceback.print_exc()\n raise\n show_pos()\n if not wait_ok:\n return self._move_cleanup(False, elog, here, pos)\n\n # Check that we made it\n if check_end and not self.at_pos(pos):\n logmsg = \"Motor {0} (pv {1}) reached {2} instead of desired pos {3}\"\n logprint(logmsg.format(self.name, self.pvname, self.wm(), pos),\n print_screen=True)\n return self._move_cleanup(False, elog, here, pos)\n\n # If everything went ok, return True\n return self._move_cleanup(True, elog, here, pos)", "def _update_cb(self, timeout, offset=0):\n if timeout <= 0:\n timeout = float(\"inf\")\n start = time.time()\n is_done = [False]\n def cb(e=None):\n if e is None and not is_done[0]:\n now = time.time()\n if now-start > timeout:\n is_done[0] = True\n blutil.notice(\"motor position: {1:{0}}\".format(self._prec(), self.wm() - offset))\n return cb", "def _detect_stop(func):\n def wrapper(*args,**kwargs):\n self = args[0]\n self.episode_length -= 1\n if self.episode_length <=0:\n \"\"\"if the episode is end\"\"\"\n self.end = True\n else:\n if self.adsorption:\n \"\"\"just stop moving and wait until the end of episode\"\"\"\n self.state = self.previous_state\n else:\n func(*args,**kwargs)\n self._detect_obstacles()\n\n # func(*args,**kwargs)\n # self._detect_obstacles()\n # if self.adsorption:\n # \"\"\"if this step update is invalid, the point will rebond\"\"\"\n # self.state = self.previous_state\n\n if self.distance <= 0.02:\n \"\"\"if the point reached the boundary around the goal, let it stop and reset the punishment(self.reward)\"\"\"\n self.end = True\n self.reward = 0\n if self.state[0] <0 or self.state[0] > 10 or self.state[1] <0 or self.state[1] > 10:\n # self.end = True\n self.reward = -800\n return np.array(self.state), self.reward, self.end, self.distance\n return wrapper", "def ismoving(self):\n return not self.get_par(\"done_moving\")", "def cb_move(self, event):\n if not self.move_timer.IsRunning():\n self.move_timer.StartOnce(2000)", "def judge_goal(self):\n err_pos = math.sqrt((self.y_des - self.y)**2 +(self.x_des - self.x)**2)\n print(\"t= %s\" % rospy.get_time()+\"-----------\")\n print('destination position=['+str(self.x_des)+','+str(self.y_des)+\"]\")\n print('the current position=['+str(self.x)+','+str(self.y)+\"]\")\n print('the current yaw angle=['+str(self.yaw))\n print('distance to destination='+str(err_pos))\n\n if(err_pos < 0.8):\n print('reach goal!!!!!')\n self.goal_flag=1", "def _monitor_move_start(self, start_pos):\n self._move_started = threading.Event()\n queue = Queue.Queue()\n\n dmov = self.get_pvobj(\"done_moving\")\n if dmov.isinitialized:\n def cb(e=None):\n if e is None:\n if not dmov.value:\n self._move_started.set()\n dmov.del_monitor_callback(queue.get())\n id = dmov.add_monitor_callback(cb)\n else:\n rbv = self.get_pvobj(\"readback\")\n res = self.get_par(\"resolution\")\n low = start_pos - res\n high = start_pos + res\n def cb(e=None):\n if e is None:\n if not low < rbv.value < high:\n self._move_started.set()\n rbv.del_monitor_callback(queue.get())\n id = rbv.add_monitor_callback(cb)\n\n queue.put(id)" ]
[ "0.6857079", "0.6112145", "0.5854988", "0.5841018", "0.58113366", "0.57477736", "0.5714076", "0.57138836", "0.5696555", "0.56912374", "0.5585474", "0.5583985", "0.557774", "0.5564837", "0.5528463", "0.55114955", "0.5441816", "0.54202133", "0.5392405", "0.53795177", "0.5377141", "0.5353219", "0.5347171", "0.5336205", "0.5328628", "0.5324685", "0.53244466", "0.5323229", "0.53041995", "0.5302407" ]
0.71479756
0
Moves the robot a distance equal to dist. Checks for estop on each iteration. Publishes a Done message after completion and a Half message when the current distance is equal to half of the goal distance.
def linear_move(self, dist): global estop_flag, move_state signal.alarm(0) #Disable timer interrupt for the duration of the movement halfway_flag = False with self.move_state_lock: start_x, start_y, start_z = move_state['x'], move_state['y'], move_state['z'] current_x = start_x current_y = start_y current_z = start_z #While the distance travelled is less than target distance while math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) < abs(dist): #Check if the emergency stop flag is set, if so, break the current loop and reset velocity if estop_flag: self.publisher.publish(Mover.stop_msg) else: #If the distance goal is negative, move backward if dist < 0: #Send negative velocity twist_msg = Twist() twist_msg.linear.x = -1 * riu.move_rate self.publisher.publish(twist_msg) #If distance goal is positive, move forward elif dist > 0: #Send positive velocity twist_msg = Twist() twist_msg.linear.x = riu.move_rate self.publisher.publish(twist_msg) #Check if the current movement is half completed, if so, send a Half message and set flag to avoid message duplication if (math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) >= abs(dist)/2 and not halfway_flag): halfway_flag = True self.status_pub.publish(String("half")) #update current_x, current_y, and current_z (using local variables to be thread safe) with self.move_state_lock: current_x = move_state['x'] current_y = move_state['y'] current_z = move_state['z'] rospy.sleep(.2) #previously had while, finally block -> illegal syntax in python. Just moved to outside loop. self.publisher.publish(Mover.stop_msg) self.status_pub.publish(String("done")) signal.alarm(Mover.ready_message_interval)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drive(self, distance, tolerance=0.0, tolerance_step=0.5,\n max_attempts=10, avoid_targets=True, avoid_home=False,\n use_waypoints=True):\n self.cur_loc = self.swarmie.get_odom_location()\n start = self.cur_loc.get_pose()\n\n goal = Point()\n goal.x = start.x + distance * math.cos(start.theta)\n goal.y = start.y + distance * math.sin(start.theta)\n\n return self.drive_to(\n goal,\n tolerance=tolerance,\n tolerance_step=tolerance_step,\n max_attempts=max_attempts,\n avoid_targets=avoid_targets,\n avoid_home=avoid_home,\n use_waypoints=use_waypoints\n )", "def move_straight(robot, dist):\n journey = Journey(robot, distance=dist)\n journey.start()\n robot.position.move(dist)\n sleep(0.5)", "def linear_track(self, dist):\n\t\tglobal estop_flag, move_state\n\n\t\t#Disable timer interrupt, reset halfway flag, set target distance\n\t\tsignal.alarm(0) \n\t\thalfway_flag = False\n\n\t\t#Set starting position\n\t\twith self.move_state_lock:\n\t\t\tstart_x, start_y, start_z = move_state['x'], move_state['y'], move_state['z']\n\t\t#Set current position initially to start position\n\t\tcurrent_x, current_y, current_z = start_x, start_y, start_z\n\t\t#Check if the distance travelled is greater than the goal distance\n\t\twhile math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) < abs(dist):\n\t\t\t#Check if the estop flag is set, if so, kill movement\n\t\t\tif estop_flag:\n\t\t\t\tself.publisher.publish(Mover.stop_msg)\n\t\t\telse:\n\t\t\t\ttwist_msg = Twist()\n\t\t\t\tif dist < 0:\n\t\t\t\t\tif self.correction == riu.no_correction:\n\t\t\t\t\t\ttwist_msg.linear.x = -1 * riu.move_rate\n\t\t\t\t\telse:\n\t\t\t\t\t\ttwist_msg.linear.x = -1 * riu.move_rate/2\n\t\t\t\t\tif self.correction == \"left\":\n\t\t\t\t\t\ttwist_msg.angular.z = -1 * riu.turn_rate/2\n\t\t\t\t\telif self.correction == \"right\":\n\t\t\t\t\t\ttwist_msg.angular.z = riu.turn_rate/2\n\t\t\t\t#If distance goal is positive, move forward\n\t\t\t\telif dist > 0:\n\t\t\t\t\tif self.correction == riu.no_correction:\n\t\t\t\t\t\ttwist_msg.linear.x = riu.move_rate\n\t\t\t\t\telse:\n\t\t\t\t\t\ttwist_msg.linear.x = riu.move_rate/2\n\t\t\t\t\tif self.correction == \"left\":\n\t\t\t\t\t\ttwist_msg.angular.z = riu.turn_rate/2\n\t\t\t\t\telif self.correction == \"right\":\n\t\t\t\t\t\ttwist_msg.angular.z = -1 * riu.turn_rate/2\n\n\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#Check if the current movement is half completed, if so, send a Half message and set flag to avoid message duplication\n\t\t\t\tif (math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) >= abs(dist)/2\n\t\t\t\t\tand not halfway_flag):\n\t\t\t\t\thalfway_flag = True\n\t\t\t\t\tself.status_pub.publish(String(\"half\"))\n\n\t\t\t\t#update current_x, current_y, and current_z (using local variables to be thread safe)\n\t\t\t\twith self.move_state_lock:\n\t\t\t\t\tcurrent_x = move_state['x']\n\t\t\t\t\tcurrent_y = move_state['y']\n\t\t\t\t\tcurrent_z = move_state['z']\n\t\t\trospy.sleep(.2)\n\t\tself.publisher.publish(Mover.stop_msg)\n\t\tself.status_pub.publish(String(\"done\"))\n\t\tsignal.alarm(Mover.ready_message_interval)", "def drive(self, distance, linear_speed):\n current_pose = [self.px, self.py, self.pth]\n \tinitial_pose = current_pose\n # final pose is distance to be moved by the robot in the x direction\n \tdistance_traveled = 0\n \ttolerance = 0.01\n\n self.send_speed(linear_speed, 0.0)\n \twhile abs(distance-distance_traveled) > tolerance:\n current_pose = [self.px, self.py, self.pth]\n distance_traveled = math.sqrt((current_pose[0]-initial_pose[0])*(current_pose[0]-initial_pose[0])+(current_pose[1]-initial_pose[1])*(current_pose[1]-initial_pose[1]))\n #print(final_pose[0]-current_pose[0])\n \tself.send_speed(0.0,0.0)", "def run(self, distance=1):\n\t\tforwards = Twist()\n\t\tforwards.linear.x = .1\n\t\tstop_command = Twist()\n\t\twhile not rospy.is_shutdown():\n\t\t\tforwards.angular.z = self.doMath(distance)\n\t\t\tself.vizualize_wall()\n\t\t\tself.publisher.publish(forwards)\n\t\t\tself.r.sleep()", "def move2goal(self):\n \n global points, point, point_old, distance_tolerance, trigger, start\n\n goal_pose = Pose()\n\n # Get the input from the user.\n goal_pose.x = points[point][0] # float(input(\"Set your x goal: \"))\n goal_pose.y = points[point][1] # float(input(\"Set your y goal: \"))\n\n vel_msg = Twist()\n\n data = [['nameservers','panel'], ['nameservers','panel']]\n\n file_name2 = \"/home/kmro/wr_ws/src/zad2_package/short_distances/distances-p%d\" % point\n short_distances = open(file_name2, \"w\")\n \n file_name1 = \"/home/kmro/wr_ws/src/zad2_package/distances/distances-p%d\" % point\n all_distances_file = open(file_name1, \"w\")\n\n val = \"dx%d\\t\\t\" % (point-1) + \"dy%d\\t\\t\" % (point-1) + \"dx%d\\t\\t\" % point + \"dy%d\\n\" % point \n short_distances.write(str(val))\n\n val = \"dx\\t\\tdy\"\n for i in range(22):\n val = val + \"\\t\\tdx%d\\t\\t\" % i + \"dy%d\" % i \n all_distances_file.write(str(val))\n\n while self.euclidean_distance(goal_pose) >= distance_tolerance:\n\n # Porportional controller.\n # https://en.wikipedia.org/wiki/Proportional_control\n\n # Linear velocity in the x-axis.\n vel_msg.linear.x = self.linear_vel(goal_pose, vel_mult)\n vel_msg.linear.y = 0\n vel_msg.linear.z = 0\n\n # Angular velocity in the z-axis.\n vel_msg.angular.x = 0\n vel_msg.angular.y = 0\n vel_msg.angular.z = self.angular_vel(goal_pose, rot_mult)\n\n # Publishing our vel_msg\n self.velocity_publisher.publish(vel_msg)\n\n # Print results to files\n if point_old != point:\n print(\"point = \", point)\n point_old = point\n if point > 0:\n val = \"{:.3f}\\t\".format(points[point-1][0] - self.pose.x)\n short_distances.write(str(val))\n # print(val, end=' ')\n val = \"{:.3f}\\t\".format(points[point-1][1] - self.pose.y)\n short_distances.write(str(val))\n # print(val, end=' ')\n val = \"{:.3f}\\t\".format(points[point][0] - self.pose.x)\n short_distances.write(str(val))\n # print(val, end=' ')\n val = \"{:.3f}\\t\".format(points[point][1] - self.pose.y)\n short_distances.write(str(val))\n # print(val, end=' ')\n if trigger == True:\n smallest_distances.append(((points[point-1][0] - self.pose.x)**2 + (points[point-1][1] - self.pose.y)**2)**0.5)\n trigger = False\n short_distances.write(\"\\n\")\n\n val = \"{:.3f}\\t\".format(goal_pose.x - self.pose.x)\n all_distances_file.write(str(val))\n val = \"{:.3f}\\t\".format(goal_pose.y - self.pose.y)\n all_distances_file.write(str(val))\n for i in range(1,len(points)):\n val = \"{:.3f}\\t\".format(points[i-1][0] - self.pose.x)\n all_distances_file.write(str(val))\n # print(val, end=' ')\n val = \"{:.3f}\\t\".format(points[i-1][1] - self.pose.y)\n all_distances_file.write(str(val))\n # print(val, end=' ')\n all_distances_file.write(\"\\n\")\n\n # Publish at the desired rate.\n self.rate.sleep()\n \n short_distances.close()\n all_distances_file.close()\n\n # If it was not the last goal, then move to the second one\n if point < len(points) - 1:\n trigger = True\n point = point + 1\n goal_pose.x = points[point][0]\n goal_pose.y = points[point][1]\n vel_msg.linear.x = self.linear_vel(goal_pose, vel_mult)\n vel_msg.angular.z = self.angular_vel(goal_pose, rot_mult)\n self.move2goal()\n # Stopping our robot after the movement is over.\n else:\n vel_msg.linear.x = 0\n vel_msg.angular.z = 0\n suma = 0\n i = 0\n for j in smallest_distances:\n print(\"p%d: \" % i , \"%.3f error\" % j)\n i = i + 1\n print(\"error_sum(22) = %.3f\" % sum(smallest_distances))\n end = time.time()\n print(\"Elapsed time: \", end - start)\n exit()\n \n point = point + 1\n self.velocity_publisher.publish(vel_msg)\n\n # If we press control + C, the node will stop.\n rospy.spin()", "def publish_moved_distance(self):\n rospy.spin()\n \n \n \n # spin() simply keeps python from exiting until this node is stopped\n rospy.spin()", "def cozmo_drive_straight(robot, dist, speed):\n robot.drive_straight(distance_mm(dist), speed_mmps(speed)).wait_for_completed()", "def move(self, distance):\n self._go(distance)", "def cozmo_drive_straight(robot, dist, speed):\n\trobot.drive_straight(distance_mm(dist), speed_mmps(speed)).wait_for_completed()", "def wait_until_summit_moved_distance(self, distance):\n rate = rospy.Rate(5)\n while self._summit_mved_distance.data < distance:\n rate.sleep()\n rospy.loginfo(\"Summit has moved disatance=\"+str(self._summit_mved_distance.data))", "def cruise(self):\n while self.dist() > self.SAFE_STOP_DIST:\n time.sleep(.2)\n self.fwd()\n self.stop()", "def drive_to(self, goal, tolerance=0.0, tolerance_step=0.5,\n max_attempts=10, avoid_targets=True, avoid_home=False,\n use_waypoints=True, start_location=None,\n distance_threshold=None):\n print('\\nRequest received')\n self.fail_count = 0\n self.tolerance = tolerance\n\n self.avoid_targets = avoid_targets\n if avoid_targets is True and avoid_home is True:\n avoid_home = False\n self.avoid_home = avoid_home\n\n current_ignore = Obstacle.IS_SONAR\n if self.avoid_targets is True:\n current_ignore |= Obstacle.TAG_TARGET\n elif self.avoid_home is True:\n current_ignore |= Obstacle.TAG_HOME\n\n self.goal.x = goal.x\n self.goal.y = goal.y\n\n self.cur_loc = self.swarmie.get_odom_location()\n self.current_state = Planner.STATE_IDLE\n self.prev_state = Planner.STATE_IDLE\n\n while (not self.cur_loc.at_goal(self.goal,\n Planner.DISTANCE_OK + self.tolerance)\n and self.fail_count < max_attempts):\n\n\n if use_waypoints is True:\n # get new plan and try to drive to first point in it\n point = self._get_next_waypoint(tolerance_step)\n else:\n point = goal\n\n self.prev_state = self.current_state\n self.current_state = Planner.STATE_DRIVE\n # Turn to approximate goal heading while ignoring sonar and tags\n # helps to prevent rover from trying to jump around obstacles\n # before it even starts along its new path\n self.result = self._face_point(\n point,\n ignore=current_ignore ^ Obstacle.IS_SONAR\n )\n\n if self.result == MoveResult.SUCCESS:\n self.result = self.swarmie.drive_to(\n point,\n ignore=Obstacle.SONAR_BLOCK,\n throw=False\n )\n\n if self.result == MoveResult.SUCCESS:\n # Success, we got to our waypoint, or got ourselves out of\n # whatever pickle we were just in.\n # Just get a new plan and drive to next point\n self.fail_count = 0\n self.prev_state = self.current_state\n self.current_state = Planner.STATE_IDLE\n print('Successfully drove to first point in nav plan.')\n\n # otherwise, something went wrong or we found home\n elif self.result == MoveResult.OBSTACLE_HOME:\n self.set_home_locations()\n\n # get around the home tag obstacle\n count = 0\n\n # Give the rover 3 tries to avoid any tags nearby before\n # getting a new nav plan. MoveResult.OBSTACLE_SONAR takes\n # priority in the driver code, so it should be safe to continue\n # this loop if the MoveResult is just an OBSTACLE_HOME\n # self.fail_count may exceed limit here, but I'll let it go\n while count < 3 and self.result == MoveResult.OBSTACLE_HOME:\n print('\\nObstacle: Found Home.')\n count += 1\n self.fail_count += 1\n\n detections = self.swarmie.get_latest_targets().detections\n inside_home = self.is_inside_home_ring(detections)\n if inside_home:\n print('\\nGetting out of the home ring!!')\n angle, dist = self.get_angle_and_dist_to_escape_home(\n detections\n )\n self.swarmie.turn(\n angle,\n ignore=Obstacle.IS_SONAR | Obstacle.IS_VISION\n )\n self.result = self.swarmie.drive(\n dist,\n ignore=Obstacle.IS_SONAR | Obstacle.IS_VISION\n )\n\n if self.avoid_home is False:\n # turn back around\n self.swarmie.turn(\n math.pi,\n ignore=Obstacle.IS_SONAR | Obstacle.IS_VISION\n )\n print('Obstacle: Found Home.')\n return MoveResult.OBSTACLE_HOME\n else:\n if self.avoid_home is False:\n print('Obstacle: Found Home.')\n return MoveResult.OBSTACLE_HOME\n\n self.result = self._avoid_tag(id=256,\n ignore=current_ignore)\n\n elif self.result == MoveResult.OBSTACLE_TAG:\n # get around the tag obstacle\n count = 0\n\n # Give the rover 3 tries to avoid any tags nearby before\n # getting a new nav plan. MoveResult.OBSTACLE_SONAR takes\n # priority in the driver code, so it should be safe to continue\n # this loop if the MoveResult is just an OBSTACLE_TAG\n # self.fail_count may exceed limit here, but I'll let it go\n while count < 3 and self.result == MoveResult.OBSTACLE_TAG:\n print('\\nObstacle: Found a Tag.')\n\n if self.avoid_targets is False:\n if not self.sees_home_tag():\n return self.result\n\n count += 1\n self.fail_count += 1\n\n self.result = self._avoid_tag(id=0,\n ignore=current_ignore)\n\n elif self.result == MoveResult.OBSTACLE_SONAR:\n # Check for home and tag obstacles just to be safe, because\n # sonar MoveResults take priority, and would mask a home or\n # target tag in view.\n obstacle = self.swarmie.get_obstacle_condition()\n\n if (obstacle & Obstacle.TAG_HOME == Obstacle.TAG_HOME and\n self.avoid_home is False):\n self.set_home_locations()\n return MoveResult.OBSTACLE_HOME\n\n if (obstacle & Obstacle.TAG_TARGET == Obstacle.TAG_TARGET and\n self.avoid_targets is False):\n return MoveResult.OBSTACLE_TAG\n\n # get around the sonar obstacle\n self.fail_count += 1\n\n print('\\nObstacle: Sonar.')\n left_blocked, center_blocked, right_blocked = \\\n self._check_sonar_obstacles()\n\n if (not left_blocked and\n not center_blocked and not right_blocked):\n print('\\nFake sonar obstacle??')\n pass # 'fake' obstacle?\n\n elif not left_blocked and center_blocked and right_blocked:\n print('Left looks clear, turning left.')\n self.prev_state = self.current_state\n self.current_state = Planner.STATE_AVOID_LEFT\n self._go_around(math.pi / 4, 0.7)\n # self.swarmie.drive_to(point, throw=False)\n\n elif left_blocked and center_blocked and not right_blocked:\n print('Right looks clear, turning right.')\n self.prev_state = self.current_state\n self.current_state = Planner.STATE_AVOID_RIGHT\n self._go_around(-math.pi / 4, 0.7)\n # self.swarmie.drive_to(point, throw=False)\n\n elif left_blocked and not center_blocked and not right_blocked:\n print('Only left blocked, turning a little right.')\n self.prev_state = self.current_state\n self.current_state = Planner.STATE_AVOID_RIGHT\n self._go_around(-math.pi / 6, 0.6)\n # self.swarmie.drive_to(point, throw=False)\n\n elif not left_blocked and not center_blocked and right_blocked:\n print('Only right blocked, turning a little left.')\n self.prev_state = self.current_state\n self.current_state = Planner.STATE_AVOID_LEFT\n self._go_around(math.pi / 6, 0.6)\n # self.swarmie.drive_to(point, throw=False)\n\n else:\n print('Neither left or right look clear.')\n\n # Only back up if we're far enough away from home for it\n # to be safe. Don't want to back up into the nest!\n if self._is_safe_to_back_up():\n print('Backing up.')\n self.swarmie.drive(\n -0.3,\n ignore=Obstacle.IS_SONAR,\n throw=False\n )\n\n if (self.current_state == Planner.STATE_AVOID_RIGHT or\n self.prev_state == Planner.STATE_AVOID_RIGHT):\n self.prev_state = self.current_state\n self.current_state = Planner.STATE_AVOID_RIGHT\n self.clear(-math.pi / 4, ignore=current_ignore,\n reset_heading=False)\n self._go_around(-math.pi / 4, 0.75)\n\n else:\n self.prev_state = self.current_state\n self.current_state = Planner.STATE_AVOID_LEFT\n self.clear(math.pi / 4, ignore=current_ignore,\n reset_heading=False)\n self._go_around(math.pi / 4, 0.75)\n\n elif self.result == MoveResult.PATH_FAIL:\n # shit, hope we can back up if this ever happens\n self.fail_count += 1\n\n print('\\nPath Failure. Backing up.')\n self.prev_state = self.current_state\n self.current_state = Planner.STATE_AVOID_REVERSE\n self.swarmie.drive(\n -0.5,\n ignore=Obstacle.IS_SONAR | Obstacle.IS_VISION,\n throw=False\n )\n\n self.cur_loc = self.swarmie.get_odom_location()\n\n if self.fail_count >= max_attempts:\n print('Failed to drive to goal {} times.'.format(\n max_attempts)\n )\n raise PathException(MoveResult.PATH_FAIL)\n\n if start_location is not None:\n current_loc = self.cur_loc.get_pose()\n dist = math.sqrt((start_location.x - current_loc.x) ** 2\n + (start_location.y - current_loc.y) ** 2)\n if dist > distance_threshold:\n raise PathException(MoveResult.PATH_FAIL)\n\n print('Successfully executed nav plan.')\n return MoveResult.SUCCESS", "def sendAgentMovement(self, degree, distance):\r\n print \"SEND & WAIT: AgentMovement\"\r\n waitForFullExec(self, self.sender.sendAgentMovement(degree, distance))", "def go_forward(self, distance, speed=0.1):\n while (self._last_odom_msg == None):\n\t rospy.sleep(1.0)\n start = copy.deepcopy(self._last_odom_msg.pose.pose.position)\n rate = rospy.Rate(10)\n while self.distance_fn(self._last_odom_msg.pose.pose.position, start) < math.fabs(distance):\n direction = -1 if distance < 0 else 1\n self.move(direction * speed, 0)\n rate.sleep()", "def run(self):\n move_cmd = Twist()\n move_cmd.linear.x = 0\n move_cmd.angular.z = 0\n\n while not rospy.is_shutdown():\n # bump logic as previous psets\n if self.bump:\n self.bump = False\n # move backwards\n move_cmd.linear.x = LIN_SPEED * -1\n for i in range(5):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n rospy.sleep(1)\n\n # turn randomly in a random direction\n move_cmd.linear.x = 0\n move_cmd.angular.z = ROT_SPEED * ((-1)**random.randint(1,2))\n\n if self.bump == 0:\n move_cmd.angular.z = ROT_SPEED * (-1)\n elif self.bump == 2:\n move_cmd.angular.z = ROT_SPEED\n\n for i in range(random.randint(5,15)):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n rospy.sleep(1)\n\n move_cmd.angular.z = 0\n # if somethin in the screen is really close\n elif self.min_val < MIN_THRESHOLD:\n # make sure it's not the sock/leg warmer, and is actually an obstacle\n if self.obstacle_x <= self.x or self.obstacle_x >= self.x + self.w or abs(self.min_val - self.dist) > 0.1:\n move_cmd.linear.x = 0\n # turn away\n if self.obstacle_x > 320:\n move_cmd.angular.z = ROT_SPEED / 2\n else:\n move_cmd.angular.z = -ROT_SPEED / 2\n # self.min_val = 100\n for i in range(10):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n self.last_move = rospy.Time.now()\n else:\n rospy.loginfo(\"Perimeter \" + str(self.perimeter_size))\n rospy.loginfo(\"Distance is \" + str(self.dist))\n\n # normalize angle error to rot speed\n ang_error_norm = -float(self.ang_error) / 100\n\n # set min and max rot speed\n if ang_error_norm < -ROT_SPEED:\n ang_error_norm = -ROT_SPEED\n elif ang_error_norm > ROT_SPEED:\n ang_error_norm = ROT_SPEED\n\n move_cmd.angular.z = ang_error_norm\n\n if RACE == False:\n # normalize dist error to lin speed\n self.dist_error = self.dist - 0.5\n dist_error_norm = float(self.dist_error) / 2\n\n if dist_error_norm < 0:\n # if NaN (self.dist gets set to -1)\n if dist_error_norm > -0.7:\n self.lost = 0\n # if too close\n else:\n self.lost += 1\n # if it's been more than 2 seconds\n if rospy.Time.now() > self.last_move + rospy.Duration(2):\n dist_error_norm = 0\n # if been lost for a while rotate and beep\n if self.lost > 20:\n move_cmd.angular.z = ROT_SPEED / 4\n self.beep.publish(4)\n else:\n # continue as previous\n dist_error_norm = self.last_speed\n else:\n # set max lin speed\n if dist_error_norm > LIN_SPEED:\n dist_error_norm = LIN_SPEED\n\n # reset lost stats\n self.lost = 0\n self.last_speed = dist_error_norm\n self.last_move = rospy.Time.now()\n\n move_cmd.linear.x = dist_error_norm\n else:\n move_cmd.linear.x = LIN_SPEED\n\n self.cmd_vel.publish(move_cmd)", "def move(self, distance: int, direction: float, max_steering=np.pi / 2):\n if direction > max_steering:\n direction = max_steering\n if direction < -max_steering:\n direction = -max_steering\n\n if distance < 0.0:\n distance = 0.0\n\n self.total_distance_covered += distance\n\n self.theta = (self.theta + direction) % (2.0 * np.pi)\n self.x = self.x + (np.cos(self.theta) * distance)\n self.y = self.y + (np.sin(self.theta) * distance)", "def move(self, direction, distance):\r\n distance = float(distance)\r\n distance = int(round(distance*100))\r\n return self.send_command('%s %s' % (direction, distance))", "def _go(self, distance):\n ende = self._position + self._orient * distance\n self._goto(ende)", "def judge_goal(self):\n err_pos = math.sqrt((self.y_des - self.y)**2 +(self.x_des - self.x)**2)\n print(\"t= %s\" % rospy.get_time()+\"-----------\")\n print('destination position=['+str(self.x_des)+','+str(self.y_des)+\"]\")\n print('the current position=['+str(self.x)+','+str(self.y)+\"]\")\n print('the current yaw angle=['+str(self.yaw))\n print('distance to destination='+str(err_pos))\n\n if(err_pos < 0.8):\n print('reach goal!!!!!')\n self.goal_flag=1", "def move_distance(self, distance, speed=1.0):\n distance = random.normalvariate(distance, self.standard_deviation)\n\n start_point = self.get_position()\n traveled_distance = 0\n while traveled_distance < distance:\n self.forward(speed)\n current_point = self.get_position()\n traveled_distance = math.sqrt(\n math.pow((start_point[0] - current_point[0]), 2) + math.pow((start_point[1] - current_point[1]), 2))\n self.stop()", "def publish_goal(self):\n goal_publisher = rospy.Publisher(self.goal_distance_topic, Float64, queue_size=10)\n while not rospy.is_shutdown() and (goal_publisher.get_num_connections() == 0):\n rospy.sleep(1)\n msg = Float64()\n msg.data = self.distance\n goal_publisher.publish(msg)", "def driveSmooth(speed, distance):\n global pose\n\n initialX = pose.pose.position.x\n initialY = pose.pose.position.y\n atTarget = False\n rampSpeed = 0.0\n sleepTime = 0.05\n rampPercentage = 0.3\n step = speed / ((rampPercentage * (distance / speed)) / sleepTime)\n print \"Step size: \" + str(step)\n while (not atTarget and not rospy.is_shutdown()):\n currentX = pose.pose.position.x\n currentY = pose.pose.position.y\n currentDistance = math.sqrt(math.pow((currentX - initialX), 2) + math.pow((currentY - initialY), 2))\n if (currentDistance >= distance):\n atTarget = True\n sendMoveMsg(0, 0)\n else:\n if ((distance - currentDistance) <= distance * rampPercentage and rampSpeed >= 0):\n rampSpeed -= step\n sendMoveMsg(rampSpeed, 0)\n elif ((distance - currentDistance) >= distance * (1.0 - rampPercentage) and rampSpeed <= speed):\n rampSpeed += step\n sendMoveMsg(rampSpeed, 0)\n else:\n sendMoveMsg(speed, 0)\n rospy.sleep(sleepTime)", "def move_right(self, dist):\r\n self.send_command_without_response(f'right {dist}')", "def move_forward(self, dist):\r\n self.send_command_without_response(f'forward {dist}')", "def move_dolly(self, distance: int, direction: int, time: int = None):\n\n self.__do_action(self.motor.move(direction, distance, time))", "def _go_around(self, angle, dist):\n ignore = Obstacle.IS_SONAR\n if self.avoid_targets is True:\n ignore |= Obstacle.TAG_TARGET\n elif self.avoid_home is True:\n # Need to ignore both for this because target tags are likely to\n # be in view inside the home nest.\n ignore |= Obstacle.TAG_TARGET | Obstacle.TAG_HOME\n\n cur_heading = self.swarmie.get_odom_location().get_pose().theta\n turn_result = self.swarmie.set_heading(\n cur_heading + angle,\n ignore=ignore,\n throw=False\n )\n drive_result = self.swarmie.drive(dist,\n ignore=Obstacle.SONAR_BLOCK,\n throw=False)\n\n return turn_result, drive_result", "def driveStraight(self, speed, distance):\n origin = copy.deepcopy(self._current) #hint: use this\n\n\n q = [origin.orientation.x,\n origin.orientation.y,\n origin.orientation.z,\n origin.orientation.w] # quaternion nonsense\n\n xOrigin=self._current.position.x\n yOrigin=self._current.position.y\n atTarget=False\n\n move_msg=Twist()\n move_msg.linear.x=speed\n move_msg.angular.z=0\n\n stop_msg=Twist()\n stop_msg.linear.x=0\n stop_msg.linear.z=0\n\n currentDistance=0\n #for extra credit ramp speed from 0 to speed and from speed to 1/4 speed when past half way\n vel=0\n\n while(not atTarget and not rospy.is_shutdown()):\n if(currentDistance>=distance):\n print('driveStraight: stoped')\n atTarget=True\n self._vel_pub.publish(stop_msg)\n else:\n print('driveStraight: moving')\n origin=copy.deepcopy(self._current)\n xCurrent=self._current.position.x\n yCurrent=self._current.position.y\n currentDistance=math.sqrt(math.pow((xCurrent-xOrigin),2)+math.pow((yCurrent-yOrigin),2))\n self._vel_pub.publish(move_msg)\n print('current x: '+str(xCurrent)+'current y: '+str(yCurrent)+'origin x: '+str(xOrigin)+'origin y:'+str(yOrigin))\n print('\\n distance: '+str(currentDistance))\n # rospy.sleep(.15)", "def right(self, param):\n\t\tglobal estop_flag, move_state\n\t\t#If input angle is zero, set angle to default\n\t\tif param:\n\t\t\tangle = param\n\t\telse:\n\t\t\tangle = riu.default_angle\n\n\t\tsignal.alarm(0) #Disable timer interrupt for the duration of the movement\n\t\t#safely grab current yaw\n\t\twith self.move_state_lock:\n\t\t\tcurrent_yaw = (math.degrees(move_state['yaw']) + 360) % 360\n\t\t#Set goal to yaw+angle. Add 360 then mod to account for negative angles but avoid going over 360\n\t\tgoal = (current_yaw - angle + 360) % 360\n\t\tif self.angle_lock:\n\t\t\tif goal >= 315 and goal < 45:\n\t\t\t\tgoal = self.zeroed_angle\n\t\t\telif goal >= 45 and goal < 135:\n\t\t\t\tgoal = self.zeroed_angle + 90\n\t\t\telif goal >= 135 and goal < 225:\n\t\t\t\tgoal = self.zeroed_angle + 180\n\t\t\telif goal >= 225 and goal < 315:\n\t\t\t\tgoal = self.zeroed_angle + 270\n\t\tgoal = goal % 360\n\t\thalf_goal = (current_yaw - angle/2 + 360) % 360\n\t\thalfway_flag = False #used to flag if we've already sent out a halfway message\n\t\t#Anonymous function that calculates the current clockwise distance to the goal\n\t\tchkdist = lambda pos, goal: round(pos - goal + 360 * (goal > pos), 1)\n\t\t#Gets current distance and initially sets previous distance = distance\n\t\tdistance = chkdist(current_yaw, goal)\n\t\tprev_dist = distance\n\t\t\"\"\"Continues to move while absolute distance is not within angular_error and clockwise\n\t\tdistance is not increasing. NOTE: absolute distance is the shortest distance in either direction,\n\t\twhile clockwise distance is the distance using only clockwise movement.\n\t\tThe angular_error condition was added because the movements tended to end within the first few \n\t\tcycles due to some float error. With the error condition, the movement can only end when inside\n\t\tat least the general area of the goal.\"\"\"\n\t\twhile distance <= prev_dist or self.get_abs_dist(current_yaw, goal) > riu.angular_error:\n\t\t\tif estop_flag:\n\t\t\t\tself.publisher.publish(Mover.stop_msg)\n\t\t\telse:\n\t\t\t\t#Build and publish right turn message\n\t\t\t\ttwist_msg = Twist()\n\t\t\t\ttwist_msg.angular.z = -1 * riu.turn_rate\n\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#If distance to goal is less than half the initial distance, publish the half done message\n\t\t\t\tif distance <= half_goal and not halfway_flag:\n\t\t\t\t\thalfway_flag = True\n\t\t\t\t\tself.status_pub.publish(String(\"half\"))\n\t\t\t\t#Update current position\n\t\t\t\twith self.move_state_lock:\n\t\t\t\t\tcurrent_yaw = (math.degrees(move_state['yaw']) + 360) % 360\n\t\t\t\t#Update previous distance, then update distance based on current position\n\t\t\t\tprev_dist = distance\n\t\t\t\tdistance = chkdist(current_yaw, goal)\n\t\t\trospy.sleep(.2)\n\t\t#After loop end, send stop message and send done message to cmd_queue\t\n\t\tself.publisher.publish(Mover.stop_msg)\n\t\tself.status_pub.publish(String(\"done\"))\n\t\tsignal.alarm(Mover.ready_message_interval) #Restart timer", "def driveStraight(speed, distance):\n global pose\n\n initialX = pose.pose.position.x\n initialY = pose.pose.position.y\n\n atTarget = False\n while (not atTarget and not rospy.is_shutdown()):\n currentX = pose.pose.position.x\n currentY = pose.pose.position.y\n currentDistance = math.sqrt(math.pow((currentX - initialX), 2) + math.pow((currentY - initialY), 2))\n if (currentDistance >= distance):\n atTarget = True\n sendMoveMsg(0, 0)\n else:\n sendMoveMsg(speed, 0)\n rospy.sleep(0.15)" ]
[ "0.64928085", "0.6398207", "0.63323027", "0.63294876", "0.62530607", "0.61761117", "0.6161925", "0.6121459", "0.60939133", "0.6092826", "0.60628915", "0.6010287", "0.5977998", "0.5970592", "0.5965475", "0.59644395", "0.5960455", "0.5934109", "0.5906981", "0.5897367", "0.5876129", "0.58391607", "0.58306247", "0.57950425", "0.5780067", "0.57594544", "0.5734706", "0.57331914", "0.5721887", "0.5700933" ]
0.708726
0
Clean the distributed compute pipeline.
def cleanup(): dist.destroy_process_group()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_up(self):\n dist.destroy_process_group()", "def cleanup(self):\n with hide(\"output\", \"warnings\", \"running\"):\n self.stop_all()\n self._execute_standard(\"rm -rf {model_repo}\".format(model_repo=MODEL_REPO))\n self._execute_root(\"docker rmi --force $(docker images -q)\", warn_only=True)\n self._execute_root(\"docker network rm clipper_nw\", warn_only=True)", "def dist_cleanup():\n dist.destroy_process_group()", "def cleanup_infrastructure_compute(config, context):\n log.info(\"### Cleaning up infrastructure ###\")\n admin = context.getAdministrationService()\n for datacenter in admin.listDatacenters():\n cleanup_infrastructure_storage(config, datacenter)\n cleanup_infrastructure_network(config, datacenter)\n # This will remove the datacenter and all hypervisors\n # (if they don't contain deplopyed VMs)\n log.info(\"Removing datacenter %s...\" % datacenter.getName())\n datacenter.delete()", "def deinit_distributed(args: dict):\n\n if is_distributed(args):\n dist.destroy_process_group()", "def clean():\n clean_flatbuffer_binaries()\n clean_webp_textures()", "def cleanup(self):\n if self.cleanup_allowed:\n shutil.rmtree(self.out_dir)\n self.train_df, self.valid_df, self.test_df = None, None, None", "def clean(c):\n clean_docker(c)\n clean_repo(c)", "def clean(self):\n self._raw_execute(\"clean\", {\"job_id\": self.job_id})", "def cleanup(self):\n self.GP.cleanup()", "def cleanup_processor(self):\n pass", "def finalize(self):\n if not self.inputs.clean_workdir:\n return\n cleaned_calcs = []\n for calculation in self.ctx.calculations:\n try:\n # noinspection PyProtectedMember\n calculation.outputs.remote_folder._clean()\n cleaned_calcs.append(calculation)\n except ValueError as ex:\n self.logger.warning(\"Exception caught while cleaning remote folders: {}\".format(ex))\n if cleaned_calcs:\n self.report('Cleaned remote folders of calculations: {}'.format(' '.join(map(str, cleaned_calcs))))", "def server_clean(self):\n # Kill any doas servers running on the hosts\n self.kill()\n # Clean up any files that exist on the hosts\n self.clean_files()", "def clean_master():", "def clean(all):\n docker_clean(all)", "def clean_azml_workspace(ctx):\n\n ws = get_workspace(config)\n\n # remove compute clusters\n for _, compute in ws.compute_targets.items():\n if not compute.provisioning_state == \"Deleting\":\n print(f\"Deleting {compute.name}\")\n compute.delete()", "async def clean(self, ctx):\n pass", "def clean():\n clean_files()", "def cleanup(self):\n self.msgmap.clear()\n self.droppedmsgs.clear()\n self.chan.stop_receiving_messages()\n\n # TODO: enable\n #self.cmdMap.clear()\n #self.cmdCliSubmitQueue.clear()\n #self.cmdSvrComputeQueue.clear()\n #self.droppedCommands.clear()\n #self.ch.stop_receiving_commands()", "def CleanUp(self):\n self.cmd.CleanUp()", "def network_cleanup(self, args):\n pass", "def _cleanup(self):\n\n self.netIns = []\n self.netOuts = []\n self.Gradients = [None]*self.size", "def cleanUp(self):\n self.isConnected=False\n self.spawnProc=None", "def cleanup():", "def cleanup(self):\n logging.debug(\"cleanup called\")\n self.delete_networks()\n self.delete_machines()", "def remove_compute(compute_targets):\n for name, ct in compute_targets.items():\n compute_targets[name].delete()", "def _final_cleanup(self):\n # Clean up and remove the temporary gisdbase\n self._cleanup()\n # Remove resource directories\n if \"error\" in self.run_state or \"terminated\" in self.run_state:\n self.storage_interface.remove_resources()", "def cleanup():\n cmd='docker rmi --force $(docker images -a -q)'\n bash_command(\"Deleting all images\", cmd)", "def shutdown(self):\n # Delete build source packages only if user regooglquested no cache\n if self._save_storage_cache:\n logger.debug(\"Requested to save workflow sources, skipping cleanup.\")\n else:\n for package in self._build_packages:\n blob = self.bucket.blob(package)\n if blob.exists():\n logger.debug(\"Deleting blob %s\" % package)\n blob.delete()\n\n # perform additional steps on shutdown if necessary\n super().shutdown()", "def clean_all(self):\n for p in ['process_manager.py', 'mongo']:\n cmd = (\"ps aux | grep %s | grep -v grep | awk '{ print $2 }'\"\n \" | xargs kill -s 9\") % p\n self._ssh(cmd, use_pwd=False)" ]
[ "0.71976703", "0.6800443", "0.6734573", "0.64912385", "0.63518906", "0.63412476", "0.62536824", "0.6208296", "0.6204782", "0.61962974", "0.6179033", "0.617475", "0.61652684", "0.61405635", "0.61350846", "0.6127614", "0.60969156", "0.6074719", "0.6072437", "0.6032038", "0.60145396", "0.60046256", "0.5993718", "0.5985793", "0.5976468", "0.5960002", "0.5955069", "0.59533256", "0.5943152", "0.5938708" ]
0.68549407
1
Reset all OATH data. This action will delete all accounts and restore factory settings for the OATH application on the YubiKey.
def reset(ctx, force): force or click.confirm( "WARNING! This will delete all stored OATH accounts and restore factory " "settings. Proceed?", abort=True, err=True, ) session = ctx.obj["session"] click.echo("Resetting OATH data...") old_id = session.device_id session.reset() keys = ctx.obj["oath_keys"] if old_id in keys: del keys[old_id] keys.write() logger.info("Deleted remembered access key") click.echo("Success! All OATH accounts have been deleted from the YubiKey.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(ctx):\n\n controller = ctx.obj['controller']\n click.echo('Resetting OATH data...')\n old_id = controller.id\n controller.reset()\n\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n if old_id in keys:\n del keys[old_id]\n settings.write()\n\n click.echo(\n 'Success! All OATH credentials have been cleared from your YubiKey.')", "def reset(self):\n if self._key:\n self._lib.StObjectReset(self._key)\n os.chdir(self._cwd)\n self._layers.clear() # layer: index\n self._substrate = None\n self._experiments.clear() # analyzed experiments\n self._tmpstandards.clear()", "def hard_reset(self) -> None:\n os.system('rm -fr \"$HOME/.daf/\"')", "def reset(self):\n for provider in self.providers.values():\n provider.reset()\n\n for observation in self.observations.values():\n observation.reset()", "def reset(self):\n # Clear mutable data, but leave the immutables intact\n self.train_data = {}\n self.val_data = {}\n self.test_data = {}\n self.model_files = []\n self.custom_data = {}\n # Remove all the physical assets\n for item in os.scandir(self.root_path):\n os.remove(item.path)\n # Reserialize\n self.serialize()", "async def admin_reset(self, ctx: commands.Context):\n await self.config.clear_all()\n await self.initialize_internals()\n await ctx.send('Global team management factory reset complete.')", "def _soft_reset(self):\n self._reset_specific_envs(self.episodes_done)\n self._update_other_info()", "def clearAllSettings(self) -> None:\n ...", "def resetData(self):\n self.currentHoursLeft = self.maxHoursLeft\n self.currentRound = self.currentRound + 1\n # reset empire data\n for empireID, myEmpire in self.empires.iteritems():\n myEmpire.resetData()\n myEmpire.resetRoundData()\n \n # reset system data\n for systemID, mySystem in self.systems.iteritems():\n mySystem.setWarpConnections()\n \n # reset ship data\n for shipID, myShip in self.ships.iteritems():\n myShip.resetData()\n \n # reset regiment data\n for regimentID, myRegiment in self.regiments.iteritems():\n myRegiment.resetData()\n \n # reset ship orders\n for shipID, myShip in self.ships.iteritems():\n myShip.setAvailSystems()\n \n # reset regiment orders\n for regimentID, myRegiment in self.regiments.iteritems():\n myRegiment.setMyPotentialOrders()\n \n # set intel reports\n for systemID, mySystem in self.systems.iteritems():\n mySystem.setIntelReports()\n \n # set empire stats\n self.setEmpireStats()", "def reset(self):\n self.train_loss.reset_states()\n self.train_accuracy.reset_states()\n self.val_loss.reset_states()\n self.val_accuracy.reset_states()\n self.train_mIoU.reset_states()\n self.val_mIoU.reset_states()", "def reset(self):\n self.reset_image_estimate()\n self.init_m_aux()\n self.reset_hessian_and_bias()\n self.reset_adadelta_variables()", "def clear_data_base():\n\n\tcommand = 'rm object_models/*.json'\n\tos.system(command)\n\tprint(\"data base cleared\")", "def reset(self):\r\n # TODO: have reset flag such that it forces all the bottom changes\r\n self.pwm_freq = self._default[\"pwm_freq\"]\r\n self.gate_logic = self._default[\"gate_logic\"]\r\n self.max_pwm = self._default[\"max_pwm\"]\r\n self.lase_on_power_up = self._default[\"lase_on_power_up\"]\r\n\r\n self.mode = self._default[\"mode\"]\r\n self.lase = self._default[\"lase\"]\r\n self.percent = self._default[\"percent\"] # in percent\r", "def reset(self):\n self._pkgs.clear()\n self._catalogs.clear()\n self._categories.clear()\n self._command_to_category.clear()\n self._version = None", "def reset_all_requests(self):\n self._send_request(\"/reset\")", "def reset(self):\n self.manager.delete_all()\n for name, val in DEFAULT_SETTINGS.items():\n val['name'] = name\n val['default_value'] = val['value']\n self.manager.from_dict(val)", "def clear_all(self):\n self.clear_files_paths()\n self.clear_programs()", "def reset(self):\n self.data = self._defaults", "def reset(self):\n self.fscore_history = []", "def reset(self):\n self.values = None\n self.keys = None\n self.mask = None", "def reset(self):\n self.registry = {}", "def reset(self):\n self.files = []\n self.regions = []\n self.headers = {}\n self.radial_data = []\n self.histogram_data = []\n self.p2p_data = []\n self.ptable = None", "def reset(yes):\n ok = yes or confirm('Do you really want to destroy all your data? (y/n) ')\n if not ok:\n return\n\n dbpath = os.path.realpath(os.path.expanduser(config.dbserver.file))\n\n # user must be able to access and write the databse file to remove it\n if os.path.isfile(dbpath) and os.access(dbpath, os.W_OK):\n if dbserver.get_status() == 'running':\n if config.dbserver.multi_user:\n sys.exit('The oq dbserver must be stopped '\n 'before proceeding')\n else:\n pid = logs.dbcmd('getpid')\n os.kill(pid, signal.SIGTERM)\n time.sleep(.5) # give time to stop\n assert dbserver.get_status() == 'not-running'\n print('dbserver stopped')\n try:\n os.remove(dbpath)\n print('Removed %s' % dbpath)\n except OSError as exc:\n print(exc, file=sys.stderr)\n\n # fast way of removing everything\n purge_all(fast=True) # datastore of the current user", "def clear_orc8r():\n print('#### Clearing swagger database from Orc8r ####')\n subprocess.check_call(['./run.py --clear-db'], shell=True, cwd=orc8_docker_path)\n print(\n '#### Remember you may need to delete '\n 'gateway certs from the AGW and FEG ####',\n )", "async def _reset_all_settings(self, ctx):\n await self._reset_settings(ctx)\n await self._reset_memberships(ctx)\n await self._reset_games(ctx)\n await self._reset_cooldowns(ctx)", "def reset(self):\n self._setupObjects()", "def _hard_reset(self):\n self._reset_specific_envs(np.ones_like(self.episodes_done))\n self._update_other_info()", "def __mode_reset(self):\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tval.reset_restart()", "def clear(self) -> None:\n self._REGISTERED_ENVS.clear()\n self._manifests = []\n self._sync = True", "def clear_all(self):\n self._data = {}\n self.uncache()\n self.dirty = True\n self.shipping_method = None\n self.payment_method = None\n self.customer_comment = \"\"" ]
[ "0.8467011", "0.65540415", "0.63808334", "0.6317176", "0.6243645", "0.62291396", "0.6131616", "0.6092173", "0.6083494", "0.6074787", "0.6063735", "0.6057817", "0.6048862", "0.6033866", "0.60303414", "0.6019029", "0.601429", "0.5997502", "0.5983064", "0.59774274", "0.59740347", "0.59664077", "0.59662217", "0.5952756", "0.5949686", "0.591343", "0.5910454", "0.59067124", "0.58966374", "0.58931184" ]
0.8467517
0
Manage and use OATH accounts.
def accounts():
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def accounts():\n pass", "def open_account():\n print(\"\\n\")\n print(messages.open_account)\n u_id = pyip.inputInt(\"Id: \", greaterThan=0)\n name = pyip.inputCustom(raiseNameError, prompt=\"Name: \")\n address = pyip.inputCustom(raiseAddressError, prompt=\"Address: \")\n email = pyip.inputEmail(\"Email: \")\n balance = pyip.inputInt(\"Balance: \", min=0)\n password = pyip.inputPassword(\"Password: \")\n\n user_data = [u_id, name, address, balance, email, password]\n result = BankOperationsBackend.open_account(user_data)\n\n start_again() if result else BankOperationsUi.open_account()", "def acctLogin(self):\n self.acctObj.email = \"[email protected]\"\n self.password = \"default\"\n self._displayName = \"defaultUser\"\n return True", "def simple_banking_management_oop():\n bank = Bank('My_bank') # Initiate bank\n\n # Create users, choose between private and company, return user directly if needed\n ricky = bank.register_user('private', 'Ricky', 'Wysocki', 222222)\n bank.register_user('company', 'E_will_inc', 666666)\n bank.register_user('private', 'Paul', 'Mcbeth', 111111)\n bank.register_user('private', 'Page', 'Pierce', 121212)\n bank.register_user('private', 'Super', 'Man', 123456)\n bank.register_user('private', 'Ricky', 'Wysocki', 221122)\n\n # Search for user no match -> returns no match\n user = bank.search_user('Rikki', 'Whysolucky', 222222)\n print(user)\n\n # Search for user more than one match -> returns prompt to specify search and details about results\n user = bank.search_user('Ricky', 'Wysocki')\n print(user)\n\n # Search for user one match -> Returns user object\n user = bank.search_user('E_will_inc')\n print(user)\n\n # Same search works with different args for both private and company -> return user\n company_user = bank.search_user(666666)\n print(company_user)\n\n # Register an account, specify which type -> None\n ricky.register_account('savings')\n ricky.register_account('salary')\n\n # Deposit to specified account or access directly from account\n ricky.deposit('savings', 100)\n ricky.accounts['savings'].deposit(100)\n ricky.deposit('salary', 20)\n\n # Make a withdrawal if sufficient funds\n ricky.withdraw('savings', 50)\n\n # Prints an exception with explanation\n ricky.withdraw('salary', 30)\n ricky.accounts['salary'].withdraw(30)", "def HandleAccounts(self, result):\n self.logger.debug('Checking for changes to user accounts.')\n configured_users = self.utils.GetConfiguredUsers()\n enable_oslogin = self._GetEnableOsLoginValue(result)\n enable_two_factor = self._GetEnableTwoFactorValue(result)\n if enable_oslogin:\n desired_users = {}\n self.oslogin.UpdateOsLogin(True, two_factor_desired=enable_two_factor)\n else:\n desired_users = self._GetAccountsData(result)\n self.oslogin.UpdateOsLogin(False)\n remove_users = sorted(set(configured_users) - set(desired_users.keys()))\n self._UpdateUsers(desired_users)\n self._RemoveUsers(remove_users)\n self.utils.SetConfiguredUsers(desired_users.keys())", "def test_02_account_index(self):\r\n # As Anonymou user\r\n url = \"/account\"\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def account(request):\r\n # if auth fails, it'll raise an HTTPForbidden exception\r\n with ReqAuthorize(request):\r\n user = UserMgr.get(username=request.user.username)\r\n\r\n return {\r\n 'user': user,\r\n 'username': user.username,\r\n }", "def test_02_account_index(self):\r\n # As Anonymou user\r\n url = \"/account\"\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should not be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should not be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def user():", "def withdraw_money():\n print(\"\\n\")\n print(messages.account_credentials)\n u_id = pyip.inputInt(\"Your Id: \", greaterThan=0)\n password = pyip.inputPassword(\"Your Password: \")\n\n credentials = {\"id\":u_id, \"password\":password}\n result = BankOperationsBackend.withdraw_money(credentials)\n start_again() if result else BankOperationsUi.withdraw_money()", "def save_accounts(account):\n account.save_account()", "def save_accounts(account):\n account.save_account()", "def setup_user(self):\r\n self.email = '[email protected]'\r\n self.password = 'bar'\r\n self.username = 'test'\r\n self.create_account(self.username,\r\n self.email, self.password)\r\n self.activate_user(self.email)\r\n self.login(self.email, self.password)", "def __init__(self,account,username, password):\n self.account = account\n self.username = username\n self.password = password", "def account():\n return render_template('user/account.html')", "def __init__(self,account,username, password):\n self.user_name = username\n self.password = password\n self.account = account", "def _generate_accounts(self):\n accounts = []\n auth_url = 'http://{}:5000/v3/'.format(self.host)\n\n for tenant, network in self.tenants:\n account = RwcalYang.CloudAccount.from_dict({\n 'name': 'rift.auto.openstack',\n 'account_type': 'openstack',\n 'openstack': {\n 'key': self.user or self._DEFAULT_USERNAME,\n 'secret': self._DEFAULT_PASSWORD,\n 'auth_url': auth_url,\n 'tenant': tenant,\n 'mgmt_network': network}})\n\n accounts.append(account)\n\n return accounts", "def new_account(cursor: Cursor, owner: Owner) -> ResultSet[Optional[Password]]:\n username = owner_name(owner)\n results = ResultSet[Optional[Password]]()\n results.add(pgsql.add_user(cursor, username), True)\n if isinstance(owner, Member):\n results.extend(sync_member_roles(cursor, owner))\n elif isinstance(owner, Society):\n results.extend(sync_society_roles(cursor, owner))\n return results", "def deposit_money():\n print(\"\\n\")\n print(messages.account_credentials)\n u_id = pyip.inputInt(\"Your Id: \", greaterThan=0)\n password = pyip.inputPassword(\"Your Password: \")\n\n credentials = {\"id\":u_id, \"password\":password}\n result = BankOperationsBackend.deposit_money(credentials)\n start_again() if result else BankOperationsUi.deposit_money()", "def list_accounts(self):\n pass", "def home_edituser():\n\tpass", "def make_logged_in_owner(self):\n response = self.client.post('', self.credentials, follow=True)", "def __init__(self,account_name, username, password):\n self.account_name = account_name\n self.username = username\n self.password = password", "def impersonate_user(self, username, password):", "def open_account():\n # TODO: refactor this endpoint be secure\n # HINT: this route should hash the password before it is saved\n holder = request.json.get(\"holder\")\n account = Account.query.filter_by(holder=holder).first()\n if account:\n return jsonify({\"error\": \"Account already exists\"})\n account = Account(holder=holder)\n db.session.add(account)\n db.session.commit()\n return (\n jsonify(\n {\"message\": f\"An account for {account.holder} has been created\"}\n ),\n 201,\n )", "def test_duo_account_get(self):\n pass", "def test_duo_account_list(self):\n pass", "def manage():\n if current_user.is_agency:\n form = ManageAgencyUserAccountForm(user=current_user)\n else:\n form = ManageUserAccountForm(user=current_user)\n\n if request.method == \"POST\":\n if form.validate_on_submit():\n update_openrecords_user(form)\n redirect(url_for(\"auth.manage\"))\n else:\n flash(\"Account cannot be updated.\", category=\"danger\")\n return render_template(\"auth/manage_account.html\", form=form)\n else:\n form.autofill()\n\n return render_template(\n \"auth/manage_account.html\", form=form, is_agency=current_user.is_agency\n )", "def __init__(self,Account,username,password):\n self.Account = Account\n self.username = username\n self.password = password", "def account():\n\n return render_template('account_page.html', title='Account')" ]
[ "0.68266904", "0.65521026", "0.6182628", "0.60371274", "0.6025271", "0.6007449", "0.5981175", "0.5968835", "0.59599483", "0.59120464", "0.58873284", "0.58873284", "0.58796024", "0.58731365", "0.5872677", "0.5826017", "0.582573", "0.58219486", "0.5776078", "0.57698965", "0.57683146", "0.5764205", "0.57629573", "0.574437", "0.5734462", "0.5710443", "0.5708966", "0.57068104", "0.5695461", "0.56843007" ]
0.7143794
0
Generate codes. Generate codes from OATH accounts stored on the YubiKey. Provide a query string to match one or more specific accounts. Accounts of type HOTP, or those that require touch, requre a single match to be triggered.
def code(ctx, show_hidden, query, single, password, remember): _init_session(ctx, password, remember) session = ctx.obj["session"] entries = session.calculate_all() creds = _search(entries.keys(), query, show_hidden) if len(creds) == 1: cred = creds[0] code = entries[cred] if cred.touch_required: prompt_for_touch() try: if cred.oath_type == OATH_TYPE.HOTP: with prompt_timeout(): # HOTP might require touch, we don't know. # Assume yes after 500ms. code = session.calculate_code(cred) elif code is None: code = session.calculate_code(cred) except ApduError as e: if e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED: raise CliFail("Touch account timed out!") entries[cred] = code elif single and len(creds) > 1: _error_multiple_hits(ctx, creds) elif single and len(creds) == 0: raise CliFail("No matching account found.") if single and creds: if is_steam(cred): click.echo(calculate_steam(session, cred)) else: click.echo(code.value) else: outputs = [] for cred in sorted(creds): code = entries[cred] if code: if is_steam(cred): code = calculate_steam(session, cred) else: code = code.value elif cred.touch_required: code = "[Requires Touch]" elif cred.oath_type == OATH_TYPE.HOTP: code = "[HOTP Account]" else: code = "" outputs.append((_string_id(cred), code)) longest_name = max(len(n) for (n, c) in outputs) if outputs else 0 longest_code = max(len(c) for (n, c) in outputs) if outputs else 0 format_str = "{:<%d} {:>%d}" % (longest_name, longest_code) for name, result in outputs: click.echo(format_str.format(name, result))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def code(ctx, show_hidden, query, single):\n\n ensure_validated(ctx)\n\n controller = ctx.obj['controller']\n creds = [(cr, c)\n for (cr, c) in controller.calculate_all()\n if show_hidden or not cr.is_hidden\n ]\n\n creds = _search(creds, query)\n\n if len(creds) == 1:\n cred, code = creds[0]\n if cred.touch:\n prompt_for_touch()\n try:\n if cred.oath_type == OATH_TYPE.HOTP:\n # HOTP might require touch, we don't know.\n # Assume yes after 500ms.\n hotp_touch_timer = Timer(0.500, prompt_for_touch)\n hotp_touch_timer.start()\n creds = [(cred, controller.calculate(cred))]\n hotp_touch_timer.cancel()\n elif code is None:\n creds = [(cred, controller.calculate(cred))]\n except APDUError as e:\n if e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED:\n ctx.fail('Touch credential timed out!')\n\n elif single:\n _error_multiple_hits(ctx, [cr for cr, c in creds])\n\n if single:\n click.echo(creds[0][1].value)\n else:\n creds.sort()\n\n outputs = [\n (\n cr.printable_key,\n c.value if c\n else '[Touch Credential]' if cr.touch\n else '[HOTP Credential]' if cr.oath_type == OATH_TYPE.HOTP\n else ''\n ) for (cr, c) in creds\n ]\n\n longest_name = max(len(n) for (n, c) in outputs) if outputs else 0\n longest_code = max(len(c) for (n, c) in outputs) if outputs else 0\n format_str = u'{:<%d} {:>%d}' % (longest_name, longest_code)\n\n for name, result in outputs:\n click.echo(format_str.format(name, result))", "def generate_code_endpoint():\n resp = MessagingResponse()\n body = request.values.get('From', None)\n local_number = \"0\" + body[3:]\n parent = db.session.query(ParentModel).filter_by(mobile_number=local_number)\n parent_id = parent.first().as_dict()[\"id\"]\n if parent.count() == 1:\n # here we generate a code for them and add it to the Db\n code = randint(1000, 9999)\n available_code = db.session.query(OTPModel).filter_by(code=code)\n\n # find a new code if it's not unique\n while available_code.count() != 0:\n code = randint(1000, 9999)\n available_code = db.session.query(OTPModel).filter_by(code=code)\n\n current_code_location = db.session.query(OTPModel).filter_by(parent_id=parent_id)\n if current_code_location.count() == 1:\n db.session.delete(current_code_location.first())\n db.session.commit()\n\n otp = OTPModel()\n data = {\"code\": code, \"parent_id\": parent_id}\n valid = otp.load(data)\n\n if valid:\n db.session.add(otp)\n db.session.commit()\n resp.message(\"Your AuthOut code is \" + str(code) + \".\")\n else:\n resp.message(\"Error creating AuthOut code. Please try again.\")\n else:\n resp.message(\"You've messaged the Admin verification system for AuthOut, if this was intended \"\n \"please contact an admin to register yourself in the system.\")\n\n return str(resp)", "def generate_payu_hash_android(data):\n hash_keys = ('txnid', 'amount', 'productinfo', 'firstname', 'email', 'udf1', 'udf2', 'udf3', 'udf4', 'udf5')\n\n hashes = {}\n pkey = config('PAYU_KEY')\n salt = config('PAYU_SALT')\n\n value = pkey\n for key in hash_keys:\n value += \"{}{}\".format('|', data.get(key, ''))\n\n value += \"{}{}\".format('||||||', salt)\n hashes['payment_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n cmnNameMerchantCodes = 'get_merchant_ibibo_codes'\n value = pkey + '|' + cmnNameMerchantCodes + '|default|' + salt\n hashes['get_merchant_ibibo_codes_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n cmnMobileSdk = 'vas_for_mobile_sdk'\n value = pkey + '|' + cmnMobileSdk + '|default|' + salt\n hashes['vas_for_mobile_sdk_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n cmnEmiAmountAccordingToInterest = 'getEmiAmountAccordingToInterest'\n value = pkey + '|' + cmnEmiAmountAccordingToInterest + '|' + str(data.get('amount', '')) + '|' + salt\n hashes['emi_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n cmnPaymentRelatedDetailsForMobileSdk1 = 'payment_related_details_for_mobile_sdk'\n value = pkey + '|' + cmnPaymentRelatedDetailsForMobileSdk1 + '|default|' + salt\n hashes['payment_related_details_for_mobile_sdk_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n cmnVerifyPayment = 'verify_payment'\n value = pkey + '|' + cmnVerifyPayment + '|' + data.get('txnid', '') + '|' + salt\n hashes['verify_payment_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n if data.get('user_credentials'):\n cmnNameDeleteCard = 'delete_user_card'\n value = pkey + '|' + cmnNameDeleteCard + '|' + data['user_credentials'] + '|' + salt\n hashes['delete_user_card_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n cmnNameGetUserCard = 'get_user_cards'\n value = pkey + '|' + cmnNameGetUserCard + '|' + data['user_credentials'] + '|' + salt\n hashes['get_user_cards_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n cmnNameEditUserCard = 'edit_user_card'\n value = pkey + '|' + cmnNameEditUserCard + '|' + data['user_credentials'] + '|' + salt\n hashes['edit_user_card_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n cmnNameSaveUserCard = 'save_user_card'\n value = pkey + '|' + cmnNameSaveUserCard + '|' + data['user_credentials'] + '|' + salt\n hashes['save_user_card_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n cmnPaymentRelatedDetailsForMobileSdk = 'payment_related_details_for_mobile_sdk'\n value = pkey + '|' + cmnPaymentRelatedDetailsForMobileSdk + '|' + data['user_credentials'] + '|' + salt\n hashes['payment_related_details_for_mobile_sdk_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n if data.get('udf3'):\n cmnSend_Sms = 'send_sms'\n value = pkey + '|' + cmnSend_Sms + '|' + data['udf3'] + '|' + salt\n hashes['send_sms_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n if data.get('offerKey'):\n cmnCheckOfferStatus = 'check_offer_status'\n value = pkey + '|' + cmnCheckOfferStatus + '|' + data['offerKey'] + '|' + salt\n hashes['check_offer_status_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n if data.get('cardBin'):\n cmnCheckIsDomestic = 'check_isDomestic'\n value = pkey + '|' + cmnCheckIsDomestic + '|' + data['cardBin'] + '|' + salt\n hashes['check_isDomestic_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n return hashes", "def search_for_oath_code(hsm, key_handle, nonce, aead, counter, user_code, interval=30):\n key_handle = pyhsm.util.input_validate_key_handle(key_handle)\n nonce = pyhsm.util.input_validate_nonce(nonce, pad = False)\n aead = pyhsm.util.input_validate_aead(aead)\n \"\"\" counter will not be used since we validate against time instead of counter \"\"\"\n counter = pyhsm.util.input_validate_int(counter, 'counter')\n user_code = pyhsm.util.input_validate_int(user_code, 'user_code')\n hsm.load_temp_key(nonce, key_handle, aead)\n\n timecounter = timecode(datetime.datetime.now(), interval)\n secret = struct.pack(\"> Q\", timecounter)\n hmac_result = hsm.hmac_sha1(pyhsm.defines.YSM_TEMP_KEY_HANDLE, secret).get_hash()\n this_code = truncate(hmac_result)\n if this_code == user_code:\n return timecounter \n return None", "def search_for_oath_code(hsm, key_handle, nonce, aead, counter, user_code, look_ahead=1):\n key_handle = pyhsm.util.input_validate_key_handle(key_handle)\n nonce = pyhsm.util.input_validate_nonce(nonce, pad = False)\n aead = pyhsm.util.input_validate_aead(aead)\n counter = pyhsm.util.input_validate_int(counter, 'counter')\n user_code = pyhsm.util.input_validate_int(user_code, 'user_code')\n hsm.load_temp_key(nonce, key_handle, aead)\n # User might have produced codes never sent to us, so we support trying look_ahead\n # codes to see if we find the user's current code.\n for j in xrange(look_ahead):\n this_counter = counter + j\n secret = struct.pack(\"> Q\", this_counter)\n hmac_result = hsm.hmac_sha1(pyhsm.defines.YSM_TEMP_KEY_HANDLE, secret).get_hash()\n this_code = truncate(hmac_result)\n if this_code == user_code:\n return this_counter + 1\n return None", "def test_generate_access_code(self):\n prepared_uri = properties.APP_URI.replace(':', '%3A').replace('/', '%2F')\n url = account.generate_access_code(test=True)\n\n self.assertTrue(properties.SCOPES.replace(' ', '+') in url)\n self.assertTrue(prepared_uri in url)\n self.assertTrue(properties.APP_ID in url)", "def request_two_factor_code(self):\n code = ''\n while not code:\n code = input('Enter 2FA code: ')\n return code", "def generate_access_code(user_data):\n\n try:\n user = User.objects.get(cn=user_data['cn'])\n return user.code\n except User.DoesNotExist:\n user = User(cn=user_data['cn'])\n\n event_id = os.environ.get('EVENTBRITE_EVENTID')\n ticket_id = os.environ.get('EVENTBRITE_TICKETID')\n\n # now = datetime.datetime.utcnow()\n # end_time = now + datetime.timedelta(minutes=30) # valid for 30 min\n # end_time = end_time.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n access_code = hexlify(os.urandom(3)).decode()\n\n req = eventbrite.post('/events/' + str(event_id) + '/access_codes/', {\n 'access_code.code': access_code,\n 'access_code.ticket_ids': [ticket_id],\n 'access_code.quantity_available': 1, # can only use once\n # 'access_code.end_date': end_time\n })\n\n if 'code' in req:\n user.code = req['code']\n user.save()\n return user.code\n else:\n return None", "def main():\n import sys\n\n def log(message):\n print(message)\n\n def print_usage():\n log('usage: %s <api username> <api password> pay <customber_mobile> <amount> <reason>' % sys.argv[0])\n log('usage: %s <api username> <api password> transfer <customber_mobile> <amount> <reason>' % sys.argv[0])\n log(' %s <api username> <api password> balance ' % sys.argv[0])\n\n if len(sys.argv) > 4 and sys.argv[3] == 'transfer':\n username, password, account, amount = sys.argv[1], sys.argv[2], sys.argv[4], sys.argv[5]\n yo = YoPayments(username, password)\n\n if len(sys.argv) > 6:\n reason = sys.argv[6]\n log(yo.transfer(amount, account, reason))\n else:\n log(yo.transfer(amount, account))\n elif len(sys.argv) > 4 and sys.argv[3] == 'pay':\n username, password, account, amount = sys.argv[1], sys.argv[2], sys.argv[4], sys.argv[5]\n yo = YoPayments(username, password)\n\n if len(sys.argv) > 6:\n reason = sys.argv[6]\n log(yo.send_money(amount, account, reason))\n else:\n log(yo.send_money(amount, account))\n elif len(sys.argv) > 3 and sys.argv[3] == 'balance':\n username, password = sys.argv[1], sys.argv[2]\n yo_payments = YoPayments(username, password)\n log(yo_payments.check_balance())\n else:\n print_usage()\n sys.exit(1)\n\n sys.exit(0)", "def gen_CRM(call_text, response_text):\n pass", "def generate_otp(email):\n\tprint \"generate_otp\"\n\totp_key = pyotp.random_base32()\n\ttotp = pyotp.TOTP(otp_key)\n\n\t# Data for generating user specific QR code\n\tqrcode_data = totp.provisioning_uri(email)\n\tprint \"otp_key = \", otp_key\n\tprint \"qrcode_data = \", qrcode_data\n\n\treturn (otp_key, qrcode_data)", "def validate_code(request):\n user_id = api.keystone.get_user_id(request)\n print \"USER CHECK\"\n print user_id\n user = api.keystone.user_get(request, user_id)\n user_auth_code = request.GET.get('auth_code', None)\n secret = request.GET.get('secret', None)\n\n #Generate a code form our side using algorithm and use it to validate\n generated_code = api.keystone.generate_totp(secret)\n\n print secret\n print user_auth_code\n print generated_code\n print 'entering code comparison'\n \n data = {}\n extra = {}\n\n #Code comparison\n if user_auth_code == generated_code:\n data['totp_authenticated'] = True\n extra['two_factor_enabled'] = True\n\textra['secret_key'] = secret\n api.keystone.enable_2fa(request, user, **extra)\n else:\n \tprint 'falseeeeee'\n data['totp_authenticated'] = False\n return JsonResponse(data)", "def generate_code(self):\n code = ''.join(\n random.choices(string.ascii_lowercase + string.digits, k=5))\n self.code = '{}{}'.format(self.user.id, code)", "def createOTP():\n\t code = []\n\t for i in range(6):\n\t\t code.append(random.randint(0,9))\n\t return \"\".join(str(code) for c in code)", "def gen_code():\n return ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(10)])", "def make_ticket_code(prefix, code_dict):\r\n while (True): # continue until we find a unique code\r\n letters = random.sample(_CHAR_LIST, 4) # generate 4 random letters\r\n code = prefix + ''.join(letters) # turn letters to string\r\n if not code_dict.has_key(code): # code is unique\r\n return code", "def orcids() -> st.SearchStrategy[str]:\n\n def make_orcid(digits: str) -> str:\n digits = digits[:-1] + orcid_checksum(digits)\n return \"https://orcid.org/\" + \"-\".join(\n digits[i : i + 4] for i in range(0, 16, 4)\n )\n\n return st.text(alphabet=\"0123456789\", min_size=16, max_size=16).map(make_orcid)", "def otp_generate(request):\n phone = request.GET.get('phone', None)\n otp = UserAuth(phone).generate_otp()\n return Response(\n {\n 'success': True,\n 'phone': phone,\n 'otp': otp\n }\n )", "def test_get_user_bypass_codes(self):\n response = self.client.get_user_bypass_codes(\"DU012345678901234567\")\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"GET\")\n self.assertEqual(uri, \"/admin/v1/users/DU012345678901234567/bypass_codes\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def generateMatchClause(code, type, i):\n match = WOQLQuery().woql_and(\n WOQLQuery().idgen(\"doc:\" + type, [code], \"v:ID_\"+str(i)),\n WOQLQuery().cast(code, \"xsd:string\", \"v:Label_\"+ str(i))\n #WOQLQuery().idgen(\"doc:\" + type, [{\"@value\": code, \"@type\": \"xsd:string\"}], \"v:ID_\"+str(i)),\n #WOQLQuery().cast({\"@value\": code, \"@type\": \"xsd:string\"}, \"xsd:string\", \"v:Label_\"+ str(i))\n )\n return match", "def calculate_auth_code(self, data) -> str:\n return (\n hmac.new(\n bytes(self.config.get(VENE_PAYMENTS_BAMBORA_API_SECRET), \"latin-1\"),\n msg=bytes(data, \"latin-1\"),\n digestmod=hashlib.sha256,\n )\n .hexdigest()\n .upper()\n )", "def search_general(abe, q):\n def process(row):\n (name, code3) = row\n return { 'name': name + ' (' + code3 + ')',\n 'uri': 'chain/' + str(name) }\n ret = map(process, abe.store.selectall(\"\"\"\n SELECT chain_name, chain_code3\n FROM chain\n WHERE UPPER(chain_name) LIKE '%' || ? || '%'\n OR UPPER(chain_code3) LIKE '%' || ? || '%'\n \"\"\", (q.upper(), q.upper())))\n return ret", "def gen_info(middle_db_input):\n\n query = \"\"\n query_add = \"\"\n # Queries will be build from several part: \"select\" statement, followed by\n # what should be selected and what tables to select from. The last part is\n # extended using the information from the webpage request.\n\n # Two types of query body:\n # first to be used for selection by any accession number;\n query_t0 = \"from locus l, cds c, accession a where a.locus_id=l.id and c.locus_id=l.id and \"\n # second to be used in all other queries, as only primary (latest) accession\n # number will be displayed on the webpage.\n query_t1 = query_t0 + \"a.latest_version='T' and \"\n\n # Columns to be selected from the respective tables.\n locus = \"l.whole_seq as locus_sequence, l.chr_location, l.locus_name, l.chr_name\"\n cds = \"c.gene_name, c.product_name, c.product_id, c.seq_location, c.whole_seq, c.translation, c.complement\"\n accession = \"a.accession_num\"\n # Columns to be selected, when the user selects a cytogenic location.\n cyt_loc_cds = \"c.gene_name, c.product_name, c.product_id\"\n cyt_loc = \"l.chr_location\"\n \n # Query construction \n search = middle_db_input[\"name\"]\n # Type 0 (gene identifier) and 1 (product name) contain information on a\n # single element, hence no information repeats would be present in the\n # output; therefore just one query is generated\n if middle_db_input[\"type\"]==0:\n query = \"select \" + accession + \", \" + locus + \", \" + cds + \" \" + query_t1 + \"c.gene_name\" + \"=\" + \"'\"+search+\"'\"\n elif middle_db_input[\"type\"]==1:\n query = \"select \" + accession + \", \" + locus + \", \" + cds + \" \" + query_t1 + \"c.product_name\" + \"=\" + \"'\"+search+\"'\"\n # Type 2 (locus accession number) and 3 (cytogenic location) could have\n # multiple elements - multiple CDS or multiple loci and CDS, respetively).\n # Using one query would lead to information repeats. Using two queries \n # avoids unnecesary repetitions.\n elif middle_db_input[\"type\"]==2:\n query = \"select \" + locus + \" \" + query_t0 + \"a.accession_num\" + \"=\" + \"'\"+search+\"'\"\n query_add = \"select \" + cds + \" \" + query_t0 + \"a.accession_num\" + \"=\" + \"'\"+search+\"'\"\n elif middle_db_input[\"type\"]==3:\n query = \"select \" + cyt_loc_cds + \" \" + query_t1+ \"l.chr_location\" + \" like \" + \"'\"+search+\"%\"+\"'\"\n query_add = \"select \" + accession + \", \" + cyt_loc + \" \" + query_t1+ \"l.chr_location\" + \" like \" + \"'\"+search+\"%\"+\"'\"\n elif middle_db_input[\"type\"]==4:\n search2 = middle_db_input[\"product_id\"]\n query = \"select \" + accession + \", \" + locus + \", \" + cds + \" \" + query_t1 + \"c.product_id\" + \"=\" + \"'\"+search2+\"'\"\n \n\n db = pymysql.connect(db='0a002', user='0a002', passwd='0a002', host='hope', port=3306, cursorclass = pymysql.cursors.DictCursor)\n \n # Creating output from cursors depending on the query type.\n db_middle_output = [middle_db_input]\n if middle_db_input[\"type\"]==0 or middle_db_input[\"type\"]==1:\n cursor = db.cursor()\n q = cursor.execute(query)\n data = cursor.fetchall()\n db_middle_output += data\n elif middle_db_input[\"type\"]==2 or middle_db_input[\"type\"]==3:\n cursor1 = db.cursor()\n cursor2 = db.cursor()\n q1 = cursor1.execute(query)\n q2 = cursor2.execute(query_add)\n unit1 = cursor1.fetchall()\n unit2 = cursor2.fetchall()\n db_middle_output =db_middle_output + list(unit1) + list(unit2)\n # output includes the input dictionary for convenience of the front end.\n elif middle_db_input[\"type\"]==4:\n cursor = db.cursor()\n q = cursor.execute(query)\n data = cursor.fetchall()\n db_middle_output += data\n \n\n return(db_middle_output)", "def auth_request(phenny, input):\n admins = phenny.config.admins\n pattern = '(' + '|'.join([re.escape(x) for x in admins]) + ')'\n matches = re.findall(pattern, input)\n for x in matches:\n phenny.msg('NickServ', 'ACC ' + x)", "def apply_auth():\n\tclient = BaiduOpenApi()\n\tapi = client.device.code\n\tresp = client.device.code.get(response_type=\"device_code\", scope=\"netdisk\")\n\t# open grant page and wait for user confirm\n\twebbrowser.open_new_tab(r\"http://openapi.baidu.com/device?code=%s\"%resp[\"user_code\"])\n\t# yield to main\n\tyield\n\t# main will tell user to confirm and it will take a while\n\t# polling to wait server back\n\tpolling_tokens(resp[\"device_code\"], resp[\"interval\"], resp[\"expires_in\"])", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n fields.update(dict(context.table))\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)", "def generate_authentication_code(user):\n\n salt = 'd9!1l@39#c3'\n\n expire_timestamp = time.time() + EXPIRE_TIME_LIMIT\n # Make a string which depends on restaurant id\n # Same encoding mechanism will be used in seerpod hardware\n\n composite_string = \"%s%s%s\" % (user.id, user.password, salt)\n\n str_hex = hashlib.md5(composite_string).hexdigest()\n decoded_str = str(user.owner_email_id) + str(user.id) + \"_\" + str(expire_timestamp) + \"_\" + str_hex\n\n # Encoded string will be a multiple line string, if it is greater\n # than maximum bin size of 76. Browser strips the newline character\n # in the url.\n encoded = base64.encodestring(decoded_str).strip().replace('\\n', '')\n return encoded", "def get_steamguard_code_manual(email_address = ''):\n code = input('get_steamguard_code[' + email_address + '] --->')\n return code", "def request_access_code(self, id_card, name_surname, access_type, email_address, days):\n my_request = AccessRequest(id_card, name_surname, access_type, email_address, days)\n code = my_request.access_code\n my_request.store_request()\n return code", "def main(argv):\n try: \n opts, args = getopt.getopt(argv, \"hg:d\", [\"help\", \"grammar=\"])\n except getopt.GetoptError:\n usage()\n sys.exit(2) \n\n accounts = get_account_info(type)\n with open('accounts.csv', 'w') as output:\n csvwriter = csv.writer(output)\n for id, cn, mail in accounts:\n csvwriter.writerow([id, cn, mail])" ]
[ "0.5709946", "0.54599285", "0.52223974", "0.5158887", "0.511579", "0.49772036", "0.4926266", "0.48736855", "0.482959", "0.48240364", "0.47595084", "0.47581586", "0.47494113", "0.47327673", "0.47232696", "0.47091013", "0.47038928", "0.46369225", "0.4620117", "0.46148378", "0.46135148", "0.45901537", "0.45852196", "0.45488998", "0.45394194", "0.4535973", "0.4535259", "0.4534841", "0.4509637", "0.45069143" ]
0.60336894
0
Rename an account (requires YubiKey 5.3 or later). \b QUERY a query to match a single account (as shown in "list")
def rename(ctx, query, name, force, password, remember): _init_session(ctx, password, remember) session = ctx.obj["session"] creds = session.list_credentials() hits = _search(creds, query, True) if len(hits) == 0: click.echo("No matches, nothing to be done.") elif len(hits) == 1: cred = hits[0] if ":" in name: issuer, name = name.split(":", 1) else: issuer = None new_id = _format_cred_id(issuer, name, cred.oath_type, cred.period) if any(cred.id == new_id for cred in creds): raise CliFail( f"Another account with ID {new_id.decode()} " "already exists on this YubiKey." ) if force or ( click.confirm( f"Rename account: {_string_id(cred)} ?", default=False, err=True, ) ): session.rename_credential(cred.id, name, issuer) click.echo(f"Renamed {_string_id(cred)} to {new_id.decode()}.") else: click.echo("Rename aborted by user.") else: _error_multiple_hits(ctx, hits)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_name(change_account):\n change_data(change_account, changed_data='name')", "def userRenamed(self, oldname, newname):\n # Send messasge to Server bot.\n self.data_in(text=\"\", type=\"renamed\", oldname=oldname, newname=newname)", "def change_username(self, accountid, oldusername, newusername):\n auth = 'appkey='+ self._lr_object._get_api_key()+ '&appsecret='+ self._lr_object._get_api_secret() + '&accountid=' + accountid\n payload = {'oldusername': oldusername, 'newusername': newusername}\n url = SECURE_API_URL + \"raas/v1/account/changeusername\" + \"?\" + auth\n return self._lr_object._post_json(url, payload)", "def rename(self,newName):\n self.userName = newName", "def change_account(self, account):\r\n check_account = Account(account, steem_instance=self.steem)\r\n self.account = check_account[\"name\"]\r\n self.refresh()", "def changeName(self, userId, newName):\n\t\turi = \"{}/users/{}\".format(tt_base_uri, userId)\n\t\turi_args = {\"name\":newName}\n\t\tr = requests.put(uri, json=uri_args, cookies={\"PLAY_SESSION\":self.play_session, \"__uvt\":\"\"})\n\t\tprint(\"change name: status code:\", r.status_code)", "def rename_cmd(args):\n cmd = commands.Rename([args.args[0], 'NEW'])\n return cmd", "def set_username(self, accountid, newusername):\n auth = 'appkey='+ self._lr_object._get_api_key()+ '&appsecret='+ self._lr_object._get_api_secret() + '&accountid=' + accountid\n payload = {'newusername': newusername}\n url = SECURE_API_URL + \"raas/v1/account/setusername\" + \"?\" + auth\n return self._lr_object._post_json(url, payload)", "async def change_username(self, new_username: str, password: str):\n\n data = {\"username\": f\"{new_username}\", \"password\": f\"{password}\"}\n ee = await self.request.request(url=f'https://auth.roblox.com/v2/username', method='post', data=data)\n return ee", "def rename():\n database.ask(mode='single')\n F = database.check(single=True)\n res = askItems([['Name',database.names[0]]],\n caption = 'Rename variable')\n if res:\n name = res['Name']\n export({name:F})\n database.forget()\n database.set(name)", "def rename(oldname, newname):", "def change_name(self, address, name):\n with self.connect() as c:\n cur = c.cursor()\n cur.execute(\"UPDATE AddressBook SET name = '{}' WHERE address = '{}'\".format(name, address))\n return True", "def set_username(old_name, new_name):\n if not validate_username(new_name):\n return \"käyttäjänimi on väärää muotoa\"\n if user_exists(new_name):\n return \"käyttäjänimi on jo käytössä\"\n sql = \"UPDATE users \" \\\n \"SET username=:new \" \\\n \"WHERE username=:old\"\n db.session.execute(sql, {\"new\": new_name, \"old\": old_name})\n db.session.commit()\n return \"ok\"", "def updateUsername(self, username):\n\t\turl = \"https://habitica.com/api/v3/user/auth/update-username\"\n\t\tpayload = {\"username\": username}\n\t\treturn(putUrl(url, self.credentials, payload))", "def change_surname(change_account):\n change_data(change_account, changed_data='surname')", "def updateName( user, login, name, sid, postfix=0 ):\n try:\n print \"Trying to update name with login_name=\", login\n user.first_name = name\n newlogin = login\n #strip the username of any special characters, including spaces\n \n if postfix:\n newlogin=\"%s%03d\" % ( login, postfix )\n user.username = newlogin\n user.save()\n except Exception, e:\n print \"Couldn't update name, rolling back\", e\n transaction.savepoint_rollback(sid)\n updateName( user, login, name, sid, postfix+1 )", "def setName(self, newName):\n self.__username = newName", "def change_nick(self, before, after):\n userdata = self.users[irc.strings.lower(before)]\n self.del_user(before)\n self.add_user(after, userdata)", "def change_username():\n if request.method == 'POST':\n username = get_username()\n new_username = request.form['change_username']\n user_id = get_id_from_username(username)\n #TODO: Error handling on database writes lol\n change_username_from_id(user_id, new_username )\n return redirect(url_for('users.account_page', username=new_username))", "def rename(self, serial, name):\n api_page = \"/configuration/object/ap_rename\"\n url = \"{}{}?{}&UIDARUBA={}\".format(\n self.base_url,\n api_page,\n self.config_path,\n self.uidaruba)\n\n obj_dict = {'serial-num': serial, 'new-name': name}\n obj_json = json.loads(json.dumps(obj_dict))\n\n resp = self.post(url, obj_json)\n\n print(resp.status_code)\n print(resp.text)", "def alias(alias, new_alias):\n s = db.Series.alias_lookup(alias)\n s.alias = new_alias\n try:\n db.session.commit()\n except:\n db.session.rollback()\n else:\n output.chapter('Changing alias \"{}\" to \"{}\"'.format(alias, new_alias))", "def change_username(self, name):\n self.username = name", "async def botname(ctx, *, new_name=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n member_roles = ctx.message.author.roles\n member_admin = discord.utils.find(lambda r: r.name.lower() in admin_roles, member_roles)\n if member_admin is not None:\n bot_member = discord.utils.find(lambda m: m.id == amor_manager.user.id, ctx.message.server.members)\n await amor_manager.change_nickname(bot_member, new_name)", "def set_real_name(user: User, real_name: str=\"\") -> Result:\n current, *rest = user.pw_gecos.split(\",\")\n if current == real_name:\n return Result(State.unchanged)\n command([\"/usr/bin/chfn\", \"--full-name\", real_name, user.pw_name])\n user.pw_gecos = \",\".join([real_name, *rest])\n return Result(State.success)", "def re_name(name,new_name):\n\n try:\n os.rename(config_tools.full_dest+name,config_tools.full_dest+new_name)\n except OSError:\n print(f\"Не удалось переименовать {name}\")\n else:\n print(f\"{name} успешно переименновавано в {new_name}\")", "def client_name_updated(query):\n return rename_hook(query, \"window_name_change\", \"client_name_updated\")", "def edit_user_name(self, dto):\n user_id = dto[\"user_id\"]\n user_pin = dto[\"pin\"]\n new_user_name = dto[\"new_user_name\"]\n user = self._find_user_by_id_and_pin(user_id, user_pin)\n self.validate_user_name(new_user_name)\n user[\"user_name\"] = new_user_name\n self._user_dao.save_user(user)", "async def nick(self, ctx, *, nickname):\n if len(nickname) > 32:\n await ctx.send(\"Nickname must be 32 characters or fewer\")\n return\n await ctx.me.edit(nick=nickname)\n await ctx.send(f\"Nickname changed to {nickname}\")", "def update_cloud_account_name(cls, body: AwsCloudAccountUpdateName) -> Dict:\n\t\tpass", "def nickname(self, new_nickname):\r\n self.set({\"nickname\": new_nickname})" ]
[ "0.680213", "0.61724126", "0.61534506", "0.5984996", "0.58834755", "0.5820864", "0.5812739", "0.580092", "0.57732266", "0.57656634", "0.5690298", "0.56819504", "0.56756616", "0.5665553", "0.5652086", "0.56036377", "0.55914325", "0.55752826", "0.5561572", "0.5548004", "0.549205", "0.5490809", "0.5485646", "0.5463472", "0.5463193", "0.5458541", "0.5451794", "0.5444121", "0.5431858", "0.5428363" ]
0.78993976
0
Returns True if link_id is in a valid format.
def isLinkIdFormatValid(link_id): if linkable.LINK_ID_REGEX.match(link_id): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _validate_item_link(self, item):\n if len(item.link) > 255:\n raise ValueError(\"item.link length too long.\")\n\n return True", "def isValid(t_id):\n\tstr_id=str(t_id).strip()\n\treturn str_id.isdigit()", "def is_id_valid(id_code: str) -> bool:\n if id_code.isdigit():\n if len(str(id_code)) == 11:\n id_code = str(id_code)\n gender_number = int(id_code[0:1])\n day = int(id_code[5:7])\n month = int(id_code[3:5])\n year = id_code[1:3]\n birth_number = id_code[7:10]\n if is_valid_gender_number(gender_number) \\\n and is_valid_year_number(int(year)) \\\n and is_valid_month_number(int(month)) \\\n and is_valid_day_number(gender_number, int(year), int(month), int(day)) \\\n and is_valid_birth_number(int(birth_number)) \\\n and is_valid_control_number(str(id_code)):\n return True\n return False\n return False\n return False", "def is_id(string):\n regex = re.compile('[0-9a-f]{32}\\Z', re.I)\n if bool(regex.match(string)):\n return True\n\n return False", "def _validate_identifier(self, identifier):\n for c in identifier:\n if c not in string.letters + string.digits + '_':\n return False\n return True", "def verify(link: str\n ) -> bool:\n \n # Ignore any /live/ or /av/ articles as they aren't proper articles\n if any([path in link for path in (\"/live/\", \"/sport1/\", \"/av/\")]):\n return False\n \n # Ensure the link corresponds with a valid BBC News article.\n return any([link.startswith(prefix) for prefix in BBC_URLS])", "def is_id_valid(id_code: str) -> bool:\n if is_valid_gender_number(int(id_code[0:1])):\n if is_valid_year_number(int(id_code[1:3])):\n if is_valid_month_number(int(id_code[3:5])):\n if is_valid_day_number(int(id_code[0:1]), int(id_code[1:3]), int(id_code[3:5]), int(id_code[5:7])):\n if is_valid_birth_number(int(float(id_code[7:10]))):\n if is_valid_control_number(id_code):\n return True\n else:\n return False\n else:\n return False\n\n else:\n return False\n else:\n return False\n else:\n return False", "def is_valid_passport_id(passport_id: int) -> bool:\n return len(passport_id) == 9 and passport_id.isnumeric()", "def validate_identifier(identifier: str) -> bool:\n if identifier[:2] == 'NR':\n return True\n\n if len(identifier) < 9:\n return False\n\n try:\n d = int(identifier[-7:])\n if d == 0:\n return False\n except ValueError:\n return False\n # TODO This is not correct for entity types that are not Coops\n if identifier[:-7] not in ('CP', 'XCP', 'BC'):\n return False\n\n return True", "def _validate_bbg_id(x):\n return len(x) == 12 and x[:3] == 'BBG' and str.isalnum(x[3:11]) and sum(map(\n lambda u: u in ['A', 'E', 'I', 'O', 'U'], x[3:11])) == 0 and str.isdigit(x[11])", "def _block_id_is_guid(self, name):\r\n return len(name) == 32 and re.search(r'[^0-9A-Fa-f]', name) is None", "def _is_validation(video_id):\n hasher = md5()\n hasher.update(bytes(video_id, 'utf-8'))\n first = hasher.hexdigest()[0]\n return first in ['0', '1']", "def is_valid_listings(link):\n if link.has_attr(\"href\") and link.attrs[\"href\"].startswith(LISTING_PREFIX):\n return True\n return False", "def check_link(self, link):\n false_links = [\"wikipedia:\", \"w:\", \"wikitionary:\", \"wikt:\", \"wikinews:\",\n \"n:\", \"wikibooks:\", \"b:\", \"wikiquote:\", \"q:\", \"wikisource:\",\n \"s:\", \"wikispecies:\", \"species:\", \"wikiversity\", \"v:\", \n \"wikivoyage:\", \"voy:\", \"wikimedia:\", \"foundation:\", \"wmf:\", \n \"commonds:\", \"c:\", \"chapter:\", \"metawikipedia:\", \"meta:\", \n \"m:\", \"incubator:\", \"outreach:\", \"mw:\", \"mediazilla:\", \n \"bugzilla:\", \"testwiki:\", \"wikitech:\", \"wikidata:\", \"d:\",\n \"phabricator:\", \"phab:\", \"talk:\", \"user talk:\", \"file:\", \n \"user:\", \"template:\", \"category:\", \"file talk:\", \n \"category talk:\", \"image:\", \"media:\", \"special:\", \n \"help:\", \"portal:\", \"portal talk:\", \"\\#\"]\n is_bad = any(false_link in link.lower() for false_link in false_links)\n if is_bad or link[0] == \":\":\n return False\n else:\n return True", "def check_id(id):\n id = id.strip()\n \n if id and id.isdigit(): # id must only be a number\n return id\n else:\n return None", "def identify_id(id: str) -> bool:\n return validate_handle(id)", "def card_link(link):\n try:\n link = int(link)\n except ValueError:\n raise exceptions.LinkRatingInvalid()\n\n if link not in range(1, 9):\n raise exceptions.LinkRatingInvalid()", "def is_order_id_valid(self):\n \n if not self.order_id:\n self.error_message = jsonify({'status':'error', 'message': 'orderId parameter missing'})\n return False\n if not re.match('^[a-f0-9]{32}$', self.order_id):\n self.error_message = jsonify({'status': 'error', 'message': 'orderId must be set to (hex) UUID'})\n return False\n return True", "def is_valid(self):\n if len(self) <= 64 and re.match(RE_VALID_UID, self):\n return True\n\n return False", "def is_chain_id(v) -> bool:\n try:\n _validate(v, prefixes=[b'Net'])\n except (ValueError, TypeError):\n return False\n return True", "def is_valid_node_id(val):\n if not val:\n return False\n if not isinstance(val, bytes) and not isinstance(val, bytearray):\n return False\n\n length = len(val)\n if length != SHA1_BIN_LEN and length != SHA2_BIN_LEN and \\\n length != SHA3_BIN_LEN:\n return False\n\n return True", "def _validate_type(self):\n if self._type != \"link\":\n raise securesystemslib.exceptions.FormatError(\n \"Invalid Link: field `_type` must be set to 'link', got: {}\"\n .format(self._type))", "def vet_pdb_id(pdbid):\n if len(pdbid) < 4 or not \\\n pdbid.isalnum() or not \\\n re.match(r'^[0-9][A-Za-z0-9]{3}$', pdbid):\n return False\n return True", "def is_to_public_id(self) -> bool:\n return PublicId.is_valid_str(self.to)", "def checkValidId(self, id, prep_id = False):\n # RRD docs say that limit on vnames is 255 characters and that\n # A-Za-z0-9_ are the valid characters. Zenoss reserves - for it's own\n # use. Limiting to 200 instead just to leave room for whatever.\n # http://oss.oetiker.ch/rrdtool/doc/rrdgraph_data.en.html\n if len(id) > 200:\n return 'GraphPoint names can not be longer than 200 characters.'\n allowed = set(string.ascii_letters + string.digits + '_')\n attempted = set(id)\n if not attempted.issubset(allowed):\n return 'Only letters, digits and underscores are allowed' + \\\n ' in GraphPoint names.'\n return ZenModelRM.checkValidId(self, id, prep_id)", "def validate_identifier(self, identifier):\n pass", "def validateID(id):\n\n if re.compile('[0-9]+').match(id) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' is not a valid Id. ID should be numeric with Length = '%s' \" \n\t\t\t% (id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n # Check for the lenght \n counter = 0\n for char in id:\n counter += 1\n print counter , lib.constants._ATTR_ID_LENGHT\n if counter > lib.constants._ATTR_ID_LENGHT :\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' exceeded the given length i.e Max Length = '%s'\" % \n\t\t\t(id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n return 0\n return 0", "def checkValidId(self, id, prep_id = False):\n new_id = unquote(id)\n if prep_id: new_id = self.prepId(id)\n try:\n globalCheckValidId(self, new_id)\n return True\n except Exception:\n return str(sys.exc_info()[1])", "def check_link(feed):\n # see if this is youtube link\n if feed['link'].count('youtube.com') and 'embed' in feed and feed['embed']:\n y = re.findall('youtube\\.com/embed/(.+)', feed['embed'])\n if y:\n # format correct youtube link\n feed['link'] = 'http://youtu.be/{0}'.format(y[0])\n return True\n\n return False", "def verify_format(isbn):\n\n return len(isbn) == 10 and (isbn[-1] == \"X\" or isbn[-1].isdigit()) \\\n and all(digit.isdigit() for digit in isbn[:-1])" ]
[ "0.6823712", "0.6606774", "0.65906495", "0.63503116", "0.63053745", "0.62947255", "0.62471926", "0.6207654", "0.6194306", "0.6191876", "0.6190582", "0.60711575", "0.60548055", "0.6014753", "0.5996904", "0.59781694", "0.5946499", "0.59353083", "0.59190065", "0.5917687", "0.59085196", "0.5894455", "0.58926666", "0.5878567", "0.58709157", "0.58569556", "0.5849952", "0.58237934", "0.5809061", "0.58060557" ]
0.90292734
0
Returns True if scope_path is in a valid format.
def isScopePathFormatValid(scope_path): if linkable.SCOPE_PATH_REGEX.match(scope_path): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def path_validate(path):\n # functionality to be added later\n return path", "def _IsWellFormattedFilePath(path):\n return path.startswith(SRC) and path.endswith(_OWNERS)", "def ValidatePath(self, root_path: str) -> bool:\n if 'silver' in root_path:\n return True\n\n return False", "def validpath(self, path):\n root = self.realpath(self.root)\n path = self.realpath(path)\n if not self.root.endswith(os.sep):\n root = self.root + os.sep\n if not path.endswith(os.sep):\n path = path + os.sep\n if path[0:len(root)] == root:\n return True\n return False", "def validate_path(self, path):\n return True # Allow anything in path, even spaces\n # pattern = r'(/?[a-zA-Z_][a-zA-Z0-9_]*)+$' # require start with letter\n # pattern = r'(/?[a-zA-Z0-9_]*)+$' # allow start with number\n pattern = r'^([^ ]+)$' # allow anything except spaces\n if path == '' or re.match(pattern, path):\n return\n raise ValueError(\"Invalid path (spaces not allowed):\\n'%s'\" % path)", "def is_valid_file(self, file_path):\n return True", "def ValidatePath(self, root_path: str) -> bool:\n if 'gold' in root_path:\n return True\n\n return False", "def is_valid(path):\n return (\n bool(path)\n and os.path.isabs(path)\n and os.path.exists(path)\n and (not is_apple() or path.endswith(\".dylib\"))\n )", "def _validate_path(self, path: str, is_file: bool) -> bool:\n is_valid_path = True\n if is_file and not os.path.isfile(path):\n is_valid_path = False\n elif not is_file and not os.path.isdir(path):\n is_valid_path = False\n if is_valid_path:\n logging.info('github_source_interceptor: Located path: ' + path)\n else:\n logging.error('github_source_interceptor: Could not locate path: ' + path)\n\n return is_valid_path", "def validate_scope(scope_required, scope_token):\n if scope_required:\n service, function, actions = scope_required.split(':')\n\n if (service != scope_token['type'] and scope_token['type'] != '*') or \\\n (function != scope_token['name'] and scope_token['name'] != '*') or \\\n (actions not in scope_token['actions'] and '*' not in scope_token['actions']):\n raise Unauthorized('Scope not allowed!')", "def validate_scope(self, scope, *args, **kwargs):\n raise NotImplementedError('Subclasses must implement this method.')", "def isValidPath(self, Path, wantDirectory=None, wantFile=None):\n return natlinkstatus.isValidPath(Path, wantDirectory=wantDirectory, wantFile=wantFile)", "def is_path_constraint_name(global_name):\n return '.path:' in global_name", "def is_log_path_valid(self):\n if self.log_paths:\n return self.path in self.log_paths\n else:\n # If .log_paths is empty, just assume all paths are legal\n return True", "def _validate_format(format_type):\n if format_type not in GeopandasWriter.formats:\n raise ValueError('Unsupported file format.')\n\n return True", "def _allow_scopes(self, scopes):\n self.ensure_one()\n if not scopes:\n return True\n provided_scopes = set(self.scope.split())\n resource_scopes = set(scopes)\n return resource_scopes.issubset(provided_scopes)", "def validate_path(validation_context, path):\n\n return _validate_path(validation_context, path)", "def _is_file_valid(name: str) -> bool:\n return not name.startswith(\".\")", "def is_path(self, s):\n return True", "def validate_url(path):\n parsed = urlparse(path)\n return bool(parsed.scheme) and bool(parsed.netloc)", "def is_valid_path(self, path: []) -> bool:\n if not path:\n return True\n\n if len(path) == 1:\n return self.contains_vertex(path[0])\n\n i = 0\n j = 1\n while j < len(path):\n if path[j] not in self.adj_list[path[i]]:\n return False\n else:\n i += 1\n j += 1\n\n return True", "def validate_short_path(short_path):", "def is_valid(self, identifier, lint_context):\n\n scope_plugin = lint_context['plugins']['scope']\n explicity = scope_plugin.get_explicity_of_scope_visibility(identifier)\n\n is_valid = (scope_plugin.is_function_identifier(identifier) or\n explicity is not ExplicityOfScopeVisibility.IMPLICIT)\n\n if not is_valid:\n self._make_description(identifier, scope_plugin)\n\n return is_valid", "def VerifyScopeForSearch(scope):\n if not re.match('^[^/]+/[^/]+$', scope):\n raise gcloud_exceptions.InvalidArgumentException(\n '--scope', 'A valid scope should be: projects/{PROJECT_ID}, '\n 'projects/{PROJECT_NUMBER}, folders/{FOLDER_NUMBER} or '\n 'organizations/{ORGANIZATION_NUMBER}.')", "def is_valid(self, scopes=None):\n return not self.is_expired() and self.allow_scopes(scopes)", "def valid_is_json(self):\n return self.file_name.endswith('.json')", "def allow_scopes(self, scopes):\n if not scopes:\n return True\n\n provided_scopes = set(self.scope.split())\n resource_scopes = set(scopes)\n\n return resource_scopes.issubset(provided_scopes)", "def is_gcs_path(path):\n return GCS_REGEX.match(path)", "def is_valid(self, scopes=None):\n self.ensure_one()\n return not self.has_expired() and self._allow_scopes(scopes)", "def IsValidPath(path):\n path = path.lower()\n if any(path.endswith(extension) for extension in EXCLUDED_EXTENSIONS):\n return False\n\n segments = path.split('/')\n filename = segments[-1]\n if filename.startswith('.') or filename in EXCLUDED_FILENAMES:\n return False\n\n dirs = segments[:-1]\n # allow META-INF/services at the root to support ServiceLoader\n if dirs[:2] == ['meta-inf', 'services']:\n return True\n\n return not any(dir in EXCLUDED_DIRECTORIES for dir in dirs)" ]
[ "0.62401545", "0.6233845", "0.61344105", "0.61142206", "0.6085311", "0.6056564", "0.6019026", "0.5993043", "0.59646183", "0.5931616", "0.59268624", "0.57996225", "0.5798255", "0.5786526", "0.5785276", "0.5777391", "0.57569396", "0.5749316", "0.57310104", "0.57060814", "0.5705232", "0.5679877", "0.56705856", "0.5662623", "0.5643738", "0.559087", "0.55612296", "0.55598766", "0.5556902", "0.55495316" ]
0.90448207
0
Fetch latest crease xml from animation publish folder...
def _crease_XML_latest_publish(self, tk, templateFile = '', id = '', shotNum = ''): debug(app = self.app, method = '_crease_XML_latest_publish', message = 'Looking for crease xml now...', verbose = False) getCreaseXMLPublishFolder = tk.paths_from_template( templateFile, {'Step' : 'Anm', 'id' : id, 'Shot' : shotNum} ) debug(app = self.app, method = '_crease_XML_latest_publish', message = 'getCreaseXMLPublishFolder: %s' % getCreaseXMLPublishFolder, verbose = False) if getCreaseXMLPublishFolder: getCreaseXMLVersion = os.listdir(getCreaseXMLPublishFolder[0]) debug(app = self.app, method = '_crease_XML_latest_publish', message = 'getCreaseXMLVersion: %s' % getCreaseXMLVersion, verbose = False) ## now find the highest version folder number highestCreaseXMLVersion = r'%s' % max(getCreaseXMLVersion) debug(app = self.app, method = '_crease_XML_latest_publish', message = 'highestCreaseXMLVersion...%s' % highestCreaseXMLVersion, verbose = False) highestCreaseXMLVersion = os.path.join(getCreaseXMLPublishFolder[0], highestCreaseXMLVersion) highestCreaseXMLVersion = highestCreaseXMLVersion.replace('\\', '/') ## Return latest crease xml publish file... return highestCreaseXMLVersion if highestCreaseXMLVersion else ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fetchAnimPublish(self, tk, templateFile = '', fields = '', id = '', shotNum = '', inprogressBar = '', filteredPublish = ''):\r\n\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'Fetching latest caches now....', verbose = False)\r\n\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'Template....%s' % templateFile, verbose = False)\r\n\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'id....%s' % id, verbose = False)\r\n\r\n\t\t## Get all the publishes from shotgun now..\r\n\t\tif self.app.get_setting('shotStep') == 'Blocking':\r\n\t\t\tgetAnimVersionFolders = tk.paths_from_template(templateFile, {'Step' : 'Blck', 'id' : id, 'Shot' : shotNum})\r\n\t\telif self.app.get_setting('shotStep') == 'Animation':\r\n\t\t\tgetAnimVersionFolders = tk.paths_from_template(templateFile, {'Step' : 'Anm', 'id' : id, 'Shot' : shotNum})\r\n\r\n\t\tif getAnimVersionFolders:\r\n\t\t\t## now find the highest version folder number\r\n\t\t\thighestVersionFolder = r'%s' % max(getAnimVersionFolders)\r\n\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'highestVersionFolder...%s' % highestVersionFolder, verbose = False)\r\n\r\n\t\t\thighestVersionFolder = highestVersionFolder.replace('\\\\', '/')\r\n\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'highestVersionFolder replaced \\\\ with /...\\n%s' % highestVersionFolder, verbose = False)\r\n\r\n\t\t\tversionNumber = highestVersionFolder.split('/')[-1]\r\n\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'versionNumber: %s' % versionNumber, verbose = False)\r\n\r\n\t\t\tgetCacheFiles = os.listdir(highestVersionFolder)\r\n\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'getCacheFiles...\\n%s' % getCacheFiles, verbose = False)\r\n\r\n\r\n\t\t\t##################################################################################################################\r\n\t\t\t## GPU CACHE LOADER\r\n\t\t\t# if 'publish/gpu' in highestVersionFolder:\r\n\t\t\t# if filteredPublish == 'Fetch GPU Publish' or filteredPublish == 'Fetch All Publishes':\r\n\t\t\t#\r\n\t\t\t# debug(app = self.app, method = '_fetchAnimPublish', message = 'Loading gpu cache files now...', verbose = False)\r\n\t\t\t# ## Build the group if it doesn't already exist\r\n\t\t\t# if not self._buildGroup('GPU_CACHES_hrc', versionNumber):\r\n\t\t\t# if versionNumber >= self.staticVersionNumber:\r\n\t\t\t# ## Now process the caches\r\n\t\t\t# for each in getCacheFiles:\r\n\t\t\t# nodeName = each.split('_')[0]\r\n\t\t\t# if not cmds.objExists(nodeName):\r\n\t\t\t# gpuNode = '%s/%s' % (highestVersionFolder, each)\r\n\t\t\t# debug(app = self.app, method = '_fetchAnimPublish', message = 'gpuNode...\\n%s' % gpuNode, verbose = False)\r\n\t\t\t#\r\n\t\t\t# cmds.createNode('gpuCache', n = nodeName)\r\n\t\t\t# debug(app = self.app, method = '_fetchAnimPublish', message = 'gpuNode... created..', verbose = False)\r\n\t\t\t#\r\n\t\t\t# cmds.rename(\"transform1\", \"%s_gpu\" % nodeName)\r\n\t\t\t# debug(app = self.app, method = '_fetchAnimPublish', message = 'Rename for %s sucessful...' % nodeName, verbose = False)\r\n\t\t\t#\r\n\t\t\t# try:\r\n\t\t\t# cmds.setAttr('%s.cacheFileName' % nodeName, gpuNode, type = \"string\")\r\n\t\t\t# cmds.setAttr(\"%s.cacheGeomPath\" % nodeName, \"|\", type = \"string\")\r\n\t\t\t# cmds.parent(cmds.listRelatives(nodeName, parent = True)[0], 'GPU_CACHES_hrc')\r\n\t\t\t# except RuntimeError:\r\n\t\t\t# cmds.warning('FAILED TO SET GPU PATHS FOR %s CORRECTLY PLEASE CHECK YOUR PUBLISH!' % nodeName)\r\n\t\t\t#\r\n\t\t\t# debug(app = self.app, method = '_fetchAnimPublish', message = 'GPU cache %s succesfully loaded and parented...' % nodeName, verbose = False)\r\n\t\t\t# else:\r\n\t\t\t# cmds.warning(\"FAILED: %s already exists in the scene\" % nodeName)\r\n\t\t\t# else:\r\n\t\t\t# debug(app = self.app, method = '_fetchAnimPublish', message = 'GPU Caches older than current publish version, no gpus were export on publish for this version thus we are skipping GPU import', verbose = False)\r\n\t\t\t# pass\r\n\t\t\t# else:\r\n\t\t\t# debug(app = self.app, method = '_fetchAnimPublish', message = 'GPU_CACHES_hrc ALREADY SETUP SKIPPING....', verbose = False)\r\n\t\t\t# pass\r\n\r\n\t\t\t##################################################################################################################\r\n\t\t\t## STATIC CACHE LOADER\r\n\t\t\tif 'publish/alembic_static' in highestVersionFolder:\r\n\t\t\t\tif filteredPublish == 'Fetch Static Publish' or filteredPublish == 'Fetch All Publishes':\r\n\r\n\t\t\t\t\thrc = 'ABC_STATIC_CACHES_hrc'\r\n\t\t\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'Loading alembic_static cache files now...', verbose = False)\r\n\r\n\t\t\t\t\t## Build the group if it doesn't already exist\r\n\t\t\t\t\tproceedFetch = True\r\n\t\t\t\t\tif cmds.objExists(hrc):\r\n\t\t\t\t\t\tproceedFetch = cmds.confirmDialog(title = 'Fetch Static Publish', message = '\"%s\" already exist! Press OK to re-fetch a latest publish.' % hrc, button = ['OK', 'Cancel'], defaultButton = 'OK', cancelButton = 'Cancel', dismissString = 'Cancel')\r\n\t\t\t\t\t\tproceedFetch = True if proceedFetch == 'OK' else False\r\n\r\n\t\t\t\t\t## Now process the caches\r\n\t\t\t\t\tif proceedFetch:\r\n\t\t\t\t\t\tif cmds.objExists(hrc):\r\n\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tcmds.delete(hrc)\r\n\t\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\t\tcmds.warning('Failed to delete \"%s\"...' % hrc)\r\n\t\t\t\t\t\t\t\tproceedFetch = False\r\n\r\n\t\t\t\t\t\tif not cmds.objExists(hrc):\r\n\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tself._buildGroup(hrc, versionNumber)\r\n\t\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\t\tcmds.warning('Failed to create \"%s\"...' % hrc)\r\n\t\t\t\t\t\t\t\tproceedFetch = False\r\n\r\n\t\t\t\t\t\tif proceedFetch:\r\n\t\t\t\t\t\t\tfor each in getCacheFiles:\r\n\t\t\t\t\t\t\t\tabcNode = '%s/%s' % (highestVersionFolder, each)\r\n\t\t\t\t\t\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'abcNode %s' % abcNode, verbose = False)\r\n\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tcmds.AbcImport(abcNode, reparent = \"|%s\" % hrc, setToStartFrame = True)#, createIfNotFound = True, removeIfNoUpdate = True)\r\n\t\t\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'StaticCache: %s import FAILED!' % abcNode, verbose = False)\r\n\t\t\t\t\t\t\t\t\tinprogressBar.close()\r\n\t\t\t\t\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = '\"%s\" imported successfully...' % hrc, verbose = False)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'FAILED TO SETUP \"%s\", PLEASE CHECK WITH YOUR SUPERVISOR!!!' % hrc, verbose = False)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = '\"%s\" ALREADY SETUP SKIPPING...' % hrc, verbose = False)\r\n\r\n\t\t\t##################################################################################################################\r\n\t\t\t## ANIMATED CACHE LOADER\r\n\t\t\telif 'publish/alembic_anim' in highestVersionFolder:\r\n\t\t\t\tif filteredPublish == 'Fetch Anim Publish' or filteredPublish == 'Fetch All Publishes':\r\n\r\n\t\t\t\t\thrc = 'ABC_ANIM_CACHES_hrc'\r\n\t\t\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'Loading alembic_anim cache files now...', verbose = False)\r\n\r\n\t\t\t\t\t## Build the group if it doesn't already exist\r\n\t\t\t\t\tproceedFetch = True\r\n\t\t\t\t\tif cmds.objExists(hrc):\r\n\t\t\t\t\t\tproceedFetch = cmds.confirmDialog(title = 'Fetch Anim Publish', message = '\"%s\" already exist! Press OK to re-fetch a latest publish.' % hrc, button = ['OK', 'Cancel'], defaultButton = 'OK', cancelButton = 'Cancel', dismissString = 'Cancel')\r\n\t\t\t\t\t\tproceedFetch = True if proceedFetch == 'OK' else False\r\n\r\n\t\t\t\t\t## Now process the caches\r\n\t\t\t\t\tif proceedFetch:\r\n\t\t\t\t\t\tif cmds.objExists(hrc):\r\n\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tcmds.delete(hrc)\r\n\t\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\t\tcmds.warning('Failed to delete \"%s\"...' % hrc)\r\n\t\t\t\t\t\t\t\tproceedFetch = False\r\n\r\n\t\t\t\t\t\tif not cmds.objExists(hrc):\r\n\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tself._buildGroup(hrc, versionNumber)\r\n\t\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\t\tcmds.warning('Failed to create \"%s\"...' % hrc)\r\n\t\t\t\t\t\t\t\tproceedFetch = False\r\n\r\n\t\t\t\t\t\tif proceedFetch:\r\n\t\t\t\t\t\t\tfor each in getCacheFiles:\r\n\t\t\t\t\t\t\t\tabcNode = '%s/%s' % (highestVersionFolder, each)\r\n\t\t\t\t\t\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'abcNode %s' % abcNode, verbose = False)\r\n\t\t\t\t\t\t\t\tcmds.AbcImport(abcNode, reparent = \"|%s\" % hrc, setToStartFrame = True)\r\n\t\t\t\t\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = '\"%s\" imported successfully...' % hrc, verbose = False)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'FAILED TO SETUP \"%s\", PLEASE CHECK WITH YOUR SUPERVISOR!!!' % hrc, verbose = False)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = '\"%s\" ALREADY SETUP SKIPPING...' % hrc, verbose = False)\r\n\r\n\t\t\t##################################################################################################################\r\n\t\t\t## CAMERA LOADER\r\n\t\t\telif 'publish/cam' in highestVersionFolder:\r\n\t\t\t\tif filteredPublish == 'Fetch Camera Publish' or filteredPublish == 'Fetch All Publishes':\r\n\r\n\t\t\t\t\thrc = 'BAKE_CAM_hrc'\r\n\t\t\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'Loading camera files now...', verbose = False)\r\n\r\n\t\t\t\t\t## Build the group if it doesn't already exist\r\n\t\t\t\t\tproceedFetch = True\r\n\t\t\t\t\tif cmds.objExists(hrc):\r\n\t\t\t\t\t\tproceedFetch = cmds.confirmDialog(title = 'Fetch Camera Publish', message = '\"%s\" already exist! Press OK to re-fetch a latest publish.' % hrc, button = ['OK', 'Cancel'], defaultButton = 'OK', cancelButton = 'Cancel', dismissString = 'Cancel')\r\n\t\t\t\t\t\tproceedFetch = True if proceedFetch == 'OK' else False\r\n\r\n\t\t\t\t\t## Now process the caches\r\n\t\t\t\t\tif proceedFetch:\r\n\t\t\t\t\t\tif cmds.objExists(hrc):\r\n\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tcmds.delete(hrc)\r\n\t\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\t\tcmds.warning('Failed to delete \"%s\"...' % hrc)\r\n\t\t\t\t\t\t\t\tproceedFetch = False\r\n\r\n\t\t\t\t\t\tif proceedFetch:\r\n\t\t\t\t\t\t\tfor each in getCacheFiles:\r\n\t\t\t\t\t\t\t\tcamNode = '%s/%s' % (highestVersionFolder, each)\r\n\t\t\t\t\t\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'camera %s' % camNode, verbose = False)\r\n\t\t\t\t\t\t\t\tcmds.file(camNode, i = True)\r\n\t\t\t\t\t\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'Camera imported successfully...', verbose = False)\r\n\t\t\t\t\t\t\t\tfor each in cmds.listRelatives(hrc, children = True):\r\n\t\t\t\t\t\t\t\t\tchannels = ['tx', 'ty', 'tz', 'rx', ' ry', 'rz', 'sx', 'sy', 'sz']\r\n\t\t\t\t\t\t\t\t\tfor eachChan in channels:\r\n\t\t\t\t\t\t\t\t\t\tcmds.setAttr('%s.%s' %(each, eachChan), lock = True)\r\n\t\t\t\t\t\t\t\t\t\tif not cmds.objExists('%s.version' % hrc):\r\n\t\t\t\t\t\t\t\t\t\t\tcmds.addAttr(('%s' % hrc), ln='version', dt='string')\r\n\t\t\t\t\t\t\t\t\t\t\tcmds.setAttr('%s.version' % hrc, versionNumber, type='string')\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'FAILED TO SETUP \"%s\", PLEASE CHECK WITH YOUR SUPERVISOR!!!' % hrc, verbose = False)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = '\"%s\" ALREADY SETUP SKIPPING...' % hrc, verbose = False)\r\n\t\t\telse:\r\n\t\t\t\tpass\r\n\t\telse:\r\n\t\t\tdebug(app = self.app, method = '_fetchAnimPublish', message = 'No Versions found for %s...' % templateFile, verbose = False)", "def extractAnimationsFromXar( strFilename ):\n print( \"INF: extractAnimationFromXar: parsing '%s'\" % strFilename );\n allAnims = dict();\n xar = xml.dom.minidom.parse( strFilename );\n choregrapheNode = xar.childNodes[0]; # first is \"ChoregrapheProject\"\n strXarVersion = choregrapheNode.getAttribute( \"xar_version\" );\n print( \"strXarVersion: %s\" % strXarVersion );\n #~ print( domNodeToString( choregrapheNode ) );\n # look for root box\n for node in choregrapheNode.childNodes:\n if( node.nodeType != xml.dom.minidom.Node.TEXT_NODE and node.hasAttribute( \"name\" ) ):\n if( node.getAttribute( \"name\" ) == \"root\" ):\n break;\n else:\n return False;\n rootNode = node;\n #~ print( domNodeToString( rootNode ) );\n listNodesBox = findElementByName( rootNode, \"Box\" ); # find all elements with a specific name, and return them in an array\n print( \"listNodesBox found: %d\" % len( listNodesBox ) );\n #~ print( domNodeToString( listNodesBox[8] ) );\n for node in listNodesBox:\n strAnimationName = node.getAttribute( \"name\" );\n strAnimationName = strAnimationName.replace( \" \", \"_\" );\n listTimeline = findElementByName( node, \"Timeline\" );\n #~ print( domNodeToString( listTimeline[0] ) );\n listNames = [];\n listTimes = [];\n listPositions = []; \n for timeline in listTimeline:\n if( len(listTimeline) > 1 ):\n print( \"ERR: more than one timeline in a box: not handled case! (strAnimationName:%s)\" % strAnimationName );\n return;\n #~ print( str( timeline.attributes ) );\n #~ print( domNodeToString( timeline ) );\n #~ print( domAttributesToString( timeline ) );\n nFps = int( timeline.getAttribute( \"fps\" ) );\n #~ print( \"fps: %d\" % nFps );\n listActuator = findElementByName( timeline, \"ActuatorCurve\" );\n for actuator in listActuator:\n strActuatorName = str(actuator.getAttribute( \"actuator\" )); # str => remove unicode\n listNames.append( strActuatorName );\n listKey = findElementByName( actuator, \"Key\" );\n keyTimes = [];\n keyPositions = [];\n if( len(listKey) < 1 ):\n print( \"WRN: extractAnimationFromXar: in the box %s, the joint %s is used but no keys are defined for it, removing it from the used joint list...\" % ( strAnimationName, strActuatorName ) );\n del listNames[-1];\n continue;\n for key in listKey:\n rKeyNumber = float( key.getAttribute( \"frame\" ) );\n rKeyVal = float( key.getAttribute( \"value\" ) ) * math.pi/180;\n keyTimes.append( rKeyNumber / nFps );\n listTangent = findElementByName( actuator, \"Tangent\" );\n if( len( listTangent ) == 0 ):\n keyPositions.append( rKeyVal ); # no splines there\n else:\n keyPositions.append( [rKeyVal] ); # prepare for appending spline info\n for tangent in listTangent:\n #~ print( domAttributesToString( tangent ) );\n strInterpType=tangent.getAttribute( \"interpType\" );\n strSide=tangent.getAttribute( \"strSide\" );\n rAbscissaParam=float( tangent.getAttribute( \"abscissaParam\" ) )/nFps;\n rOrdinateParam=float( tangent.getAttribute( \"ordinateParam\" ) ) * math.pi/180;\n if( strInterpType == \"linear\" ):\n keyPositions[-1].append( [1,rAbscissaParam,rOrdinateParam] ); # todo, validate parameters!\n elif( strInterpType == \"bezier\" ):\n keyPositions[-1].append( [2,rAbscissaParam,rOrdinateParam] ); # todo, validate parameters!\n else:\n print( \"ERR: extractAnimationFromXar: this type isn't handled: '%s'\" % strInterpType );\n listTimes.append( keyTimes );\n listPositions.append( keyPositions );\n # for actuator\n allAnims[strAnimationName] = [listNames,listTimes,listPositions];\n # for timeline \n # for node\n print( \"INF: extractAnimationFromXar: exiting with %d anim(s)\" % len(allAnims) );\n return allAnims;", "def _fetchFXPublish(self, tk, templateFile = '', fields = '', id = '', shotNum = '', inprogressBar = ''):\r\n\r\n\t\t## First clean up any existing caches and fluids\r\n\t\tself._removeFX()\r\n\r\n\t\t## CHECK FOR FX PUBLISHES NOW\r\n\t\tgetFXVersionFolders = tk.paths_from_template(templateFile, {'Step' : 'FX', 'id' : id, 'Shot' : shotNum})\r\n\t\t## FX PUBLISH FLUID CONTAINERS\r\n\t\tif getFXVersionFolders:\r\n\t\t\t## now find the highest version folder number\r\n\t\t\thighestVersionFolder = r'%s' % max(getFXVersionFolders)\r\n\t\t\tdebug(app = self.app, method = '_fetchFXPublish', message = 'highestVersionFolder...%s' % highestVersionFolder, verbose = False)\r\n\r\n\t\t\thighestVersionFolder = highestVersionFolder.replace('\\\\', '/')\r\n\t\t\tdebug(app = self.app, method = '_fetchFXPublish', message = 'highestVersionFolder replaced \\\\ with /...\\n%s' % highestVersionFolder, verbose = False)\r\n\r\n\t\t\tversionNumber = highestVersionFolder.split('/')[-1]\r\n\t\t\tdebug(app = self.app, method = '_fetchFXPublish', message = 'versionNumber: %s' % versionNumber, verbose = False)\r\n\r\n\t\t\tgetCacheFiles = os.listdir(highestVersionFolder)\r\n\t\t\tdebug(app = self.app, method = '_fetchFXPublish', message = 'getCacheFiles...\\n%s' % getCacheFiles, verbose = False)\r\n\t\t\t##################################################################################################################\r\n\t\t\t## FX FLUID TEXTURE CONTAINER CACHES\r\n\t\t\tif 'publish/fx' in highestVersionFolder:\r\n\t\t\t\tdebug(app = self.app, method = '_fetchFXPublish', message = 'Loading fx cache files now...', verbose = False)\r\n\r\n\t\t\t\t## Build the group if it doesn't already exist\r\n\t\t\t\tself._buildGroup('FX_CACHES_hrc', versionNumber)\r\n\r\n\t\t\t\tif not cmds.objExists('fluids_hrc'):\r\n\t\t\t\t\tdebug(None, method = '_fetchFXPublish', message = 'FETCHING FLUID TEXTURE CONTAINERS NOW!', verbose = False)\r\n\r\n\t\t\t\t\tif getCacheFiles: ## A PUBLISH EXISTS\r\n\t\t\t\t\t\t## IMPORT FLUIDS_HRC FROM MB FILE\r\n\t\t\t\t\t\tfor each in getCacheFiles:\r\n\t\t\t\t\t\t\tif each.endswith('.mb'):\r\n\t\t\t\t\t\t\t\tfluidsNode = '%s/%s' % (highestVersionFolder, each)\r\n\t\t\t\t\t\t\t\tdebug(app = self.app, method = '_fetchFXPublish', message = 'fluidsNode: %s.' % fluidsNode, verbose = False)\r\n\t\t\t\t\t\t\t\t## Import the fluids_hrc group mb file now...\r\n\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tcmds.file(fluidsNode, i = True)\r\n\t\t\t\t\t\t\t\t\tdebug(app = self.app, method = '_fetchFXPublish', message = 'Fluids_hrc.mb imported successfully.. fluidsNode:%s.' % fluidsNode, verbose = False)\r\n\r\n\t\t\t\t\t\t\t\t\t## Now assign the fluid presets again! Or the caches DO NOT WORK!!!\r\n\t\t\t\t\t\t\t\t\t## Apply foam preset\r\n\t\t\t\t\t\t\t\t\tpathToFoamPreset = '%s%s' % (CONST.OCEANTEXTURE_PRESETPATH, CONST.OCEAN_FOAMTEXTURE_PRESET)\r\n\t\t\t\t\t\t\t\t\tmel.eval( 'applyPresetToNode \"%s\" \"\" \"\" \"%s\" 1;' %(CONST.FOAM_FLUID_SHAPENODE, pathToFoamPreset) )\r\n\t\t\t\t\t\t\t\t\tdebug(None, method = '_fetchFXPublish', message = 'Mel preset applied: %s' % pathToFoamPreset, verbose = False)\r\n\r\n\t\t\t\t\t\t\t\t\t## Apply wake preset\r\n\t\t\t\t\t\t\t\t\tpathToWakePreset = '%s%s' % (CONST.OCEANTEXTURE_PRESETPATH, CONST.OCEAN_WAKETEXTURE_PRESET)\r\n\t\t\t\t\t\t\t\t\tmel.eval( 'applyPresetToNode \"%s\" \"\" \"\" \"%s\" 1;' %(CONST.WAKE_FLUID_SHAPENODE, pathToWakePreset) )\r\n\t\t\t\t\t\t\t\t\tdebug(None, method = '_fetchFXPublish', message = 'Mel preset applied: %s' % pathToWakePreset, verbose = False)\r\n\t\t\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\t\t\tcmds.warning('Failed to load FX file, file is corrupt.')\r\n\r\n\t\t\t\t\t\t### NOW ATTACH THE CACHE TO THE FLUID TEXTURES!\r\n\t\t\t\t\t\t### Changed export to single file altered this to accommodate the single file exported.\r\n\t\t\t\t\t\tdebug(None, method = '_fetchFXPublish', message = 'ATTACHING CACHES NOW...', verbose = False)\r\n\t\t\t\t\t\tfor each in getCacheFiles:## THERE SHOULD ONLY BE ONE NOW!??\r\n\t\t\t\t\t\t\tif each.endswith('.xml'):\r\n\t\t\t\t\t\t\t\tcachePath = '%s/%s' % (highestVersionFolder, each)\r\n\t\t\t\t\t\t\t\tdebug(app = self.app, method = '_fetchFXPublish', message = 'cachePath:%s' % cachePath, verbose = False)\r\n\t\t\t\t\t\t\t\tdebug(app = self.app, method = '_fetchFXPublish', message = 'getCacheFiles each: %s' % each, verbose = False)\r\n\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t#mel.eval(\"doImportFluidCacheFile(\\\"%s\\\", {}, {\\\"%s\\\"}, {});\" % (cachePath, CONST.FOAM_FLUID_SHAPENODE))\r\n\t\t\t\t\t\t\t\t\tfluidCaches.rebuild_cache_from_xml(cachePath)\r\n\t\t\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\t\t\tcmds.warning('Failed to connect cache %s' % cachePath)\r\n\t\t\t\t\t\tdebug(app = self.app, method = '_fetchFXPublish', message = 'Fluid caches imported successfully...', verbose = False)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tcmds.warning('THERE ARE NO FLUID CONTAINERS PUBLISHED FROM FX FOR THIS SHOT! Please see your cg supervisor now...')\r\n\t\t\t\telse:\r\n\t\t\t\t\tdebug(app = self.app, method = '_fetchFXPublish', message = 'fluids_hrc ALREADY IN SCENE SKIPPING....', verbose = False)\r\n\t\telse:\r\n\t\t\tcmds.confirmDialog(title = 'FX PUBLISH', message = \"No FX publishes found. Please confirm with the FX artists whether this shot requires any FX publish and if not, proceed to rebuild and move the ocean manually as long as it covers the camera view!\", button = 'OK')", "def _get_latest_content(self):\n if self._modified is None:\n self._load_content()\n return\n\n # check if data updated\n statinfo = os.stat(self._path)\n if statinfo.st_mtime > self._modified:\n self._load_content()", "def download(self):\n logger.info(f\"downloading project {self}\")\n self.project.storage.download(f\"{self.path}/releasemanifest\", None)\n self.extract()", "def get_single_xml_metadata(_oid):\n record = Metadata.objects.get_or_404(pk=_oid)\n\n json_rec = json.loads(record.to_json())\n\n d_fmt = '%Y-%m-%d'\n\n d_fmt1 = '%Y-%m-%dT%H:%M:%SZ'\n\n try:\n #start/end date might not exist yet\n if record.start_date is not None:\n json_rec['start_date'] = record.start_date.isoformat() + '.000Z'\n if record.end_date is not None:\n json_rec['end_date'] = record.end_date.isoformat() + '.000Z'\n if record.first_pub_date is not None:\n json_rec['first_pub_date'] = record.first_pub_date.strftime(d_fmt)\n if record.md_pub_date is not None:\n json_rec['md_pub_date'] = record.md_pub_date.strftime(d_fmt1)\n\n except AttributeError:\n # if we get an attribute error, continue; any other error will still\n # cause the program to fail\n pass\n\n json_rec['last_mod_date'] = record.last_mod_date.strftime(d_fmt1)\n\n\n # for XSLT, need something inside of each <item> in this generic XML\n _enclose_word = lambda k: {'word': k}\n _enclose_words = lambda words: map(_enclose_word, words)\n\n json_rec['thematic_keywords'] = _enclose_words(\n json_rec['thematic_keywords'])\n\n json_rec['place_keywords'] = _enclose_words(json_rec['place_keywords'])\n\n json_rec['data_format'] = _enclose_words(json_rec['data_format'])\n\n json_rec['topic_category'] = _enclose_words(json_rec['topic_category'])\n\n _enclose_url = lambda url: {'url': url}\n\n json_rec['online'] = map(_enclose_url, json_rec['online'])\n\n if record.md_pub_date is not None:\n json_rec['download_url'] = \\\n app.config['ATTACHMENT_DOWNLOAD_BASE_URL'] + str(record.id)\n\n xml_str = dicttoxml(dict(record=json_rec)) # , attr_type=False)\n\n return Response(xml_str, 200, mimetype='application/xml')", "def latestEntriesRss():\n now = datetime.now()\n latestEntries = session.query(Pokemon).order_by(desc(Pokemon.date_entered))\\\n .limit(20)\n rss = render_template('rss.xml', lastBuildDate=now, entries=latestEntries)\n response = make_response(rss)\n response.headers[\"Content-Type\"] = \"application/xml\"\n return response", "def __local_rs(soup):\n return __get_local_g1_news(soup)", "def __local_rs(soup):\n return __get_local_g1_news(soup)", "def load_xml(pmid, sleep=None):\r\n link = \"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&retmode=xml&id=%s\" % str(pmid) \r\n page = requests.get(link) \r\n tree = html.fromstring(page.content)\r\n if sleep is not None:\r\n time.sleep(sleep)\r\n return tree", "def load_xml(pmid, sleep=None):\n link = \"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&retmode=xml&id=%s\" % str(pmid) + \"&api_key=\" + API_KEY\n print(link)\n page = requests.get(link)\n tree = html.fromstring(page.content)\n if sleep is not None:\n time.sleep(sleep)\n return tree", "def get_current_file(self):\n#-----------on attend la fin de creation du fichier Nexus\n \n while self._ismoving():\n self.logger.debug(\"DataRecorder creat Nexus file\") \n time.sleep(1.0)\n return self.dp.currentFiles[0]", "def download_archive(self):\n\n def time_convert(structure):\n \"\"\"\n :param structure: tuple representation of time\n :return: GitHub archive time\n \"\"\"\n \n \n join_number_to_zero = lambda number: (\"\" if number > 9 else \"0\") + str(number)\n\n return \"%s-%s-%s-%s\" % (\n structure.tm_year, join_number_to_zero(structure.tm_mon), join_number_to_zero(structure.tm_mday),\n structure.tm_hour)\n\n current_time = self.get_time()\n self.logger.debug(__name__ + \": \" + \"current time: \" + str(gmtime(current_time)))\n\n difference = -25200\n #timezone difference in seconds between GMT and west coast of USA\n\n downloading_time = int(timegm(self.config[\"last_connection_time\"])) + 3600\n self.logger.debug(__name__ + \": \" + \"downloading time: \" + str(gmtime(downloading_time)))\n\n if downloading_time > current_time - 7200:\n self.logger.info(__name__ + \": \" + \"unable to download file (time limiting).\")\n return\n\n downloading_time += difference\n\n json_file_name = self.download_file(time_convert(gmtime(downloading_time)))\n\n self.config[\"last_connection_time\"] = gmtime(downloading_time - difference)\n self.logger.debug(__name__ + \": \" + \"last_connection_time: \" + str(self.config[\"last_connection_time\"]))\n\n return json_file_name", "def get_cghub_xml(dirname, analysis_id, logger=default_logger):\n\n os.system(\"cgquery analysis_id=%s -a -o %s/metadata.xml\" %(analysis_id, dirname))\n return \"%s/metadata.xml\" %(dirname)", "def __local_sc(soup):\n return __get_local_g1_news(soup)", "def getOldEpisodes(config, rss, chan, namespaces):\n # Indicates items are to be added. Needed to know whether or not to\n # manually add namespaces. Yes, it is wonky. A side effect of the way\n # ElementTree adds namespaces.\n itemsAdded = False\n # Return value for the old episode elements which can be empty\n # if no old episodes exist\n items = None\n # Return value for the first year of publication as indicated by the\n # `pubDate` on the earliest episode. Used for generating the copyright\n # string. Can be empty if no old episodes exist.\n firstYear = None\n\n xmlFilepath = config['xmlFilepath']\n\n if os.path.isfile(xmlFilepath):\n # Load and strip the XML\n with open(xmlFilepath, 'r') as f:\n xmlStr = ''\n for line in f:\n # strip leading and trailing whitespace so minidom can prettify\n # without adding extraenous new lines\n xmlStr += line.lstrip().rstrip()\n\n # Parse the XML\n rssPrev = ET.ElementTree()\n\n try:\n rssPrev = ET.ElementTree(ET.fromstring(xmlStr))\n except:\n logger.fatal(\"Unable to parse \\'\" + xmlFilepath + \"\\'\")\n exit(1)\n\n # Find all the items and append them to the new tree\n items = rssPrev.getroot().findall('channel/item', namespaces)\n\n # Append found items and add appropriate namespaces\n if items:\n # Indicate items are to be added\n itemsAdded = True\n\n # Items do not carry an Atom namespace element, so add it manually\n rss.set(\"xmlns:atom\", \"http://www.w3.org/2005/Atom\")\n\n # Find the earliest `lastBuildDate` to determine copyright\n pubDates = rssPrev.getroot().findall('channel/item/pubDate',\n namespaces)\n\n for pubDate in pubDates:\n # Parse out the year\n year = re.findall(r\" \\d{4} \", pubDate.text)[0].lstrip().rstrip()\n\n # Set the year if empty or lower\n if not firstYear:\n firstYear = year\n else:\n if int(year) < int(firstYear):\n firstYear = year\n\n # No items were added, then add all namespace attributes manually.\n if not itemsAdded:\n for prefix, uri in namespaces.iteritems():\n rss.set(\"xmlns:\" + prefix, uri)\n\n return items, firstYear", "def __local_rn(soup):\n return __get_local_g1_news(soup)", "def __local_rn(soup):\n return __get_local_g1_news(soup)", "def extract_release_data(self, filename):\r\n\r\n impl = getDOMImplementation()\r\n\r\n doc = impl.createDocument(None, \"release\", None)\r\n top_element = doc.documentElement\r\n \r\n for action in self._actions:\r\n data = action.extract_release_data()\r\n if data:\r\n project_node = doc.createElement('project')\r\n for attr in data:\r\n project_node.setAttribute(attr, data[attr])\r\n top_element.appendChild(project_node)\r\n \r\n f_file = open(filename, 'w+')\r\n f_file.write(doc.toprettyxml(indent=\" \"))\r\n f_file.close()", "def __local_ms(soup):\n return __get_local_g1_news(soup)", "def __local_ms(soup):\n return __get_local_g1_news(soup)", "def load_mta_archived_feed(feed='gtfs', timestamp='2014-09-17-09-31'):\n import requests\n\n return requests.get(\"https://datamine-history.s3.amazonaws.com/{0}-{1}\".format(feed, timestamp))", "async def get_last_xkcd(session):\n url = \"https://xkcd.com/rss.xml\"\n async with session.get(url) as response:\n content = await response.text()\n parsed = feedparser.parse(content)\n\n title = re.search('title=\"(.*?)\"', parsed.entries[0].description)\n img_url = re.search('src=\"(.*?)\"', parsed.entries[0].description)\n\n embed = discord.Embed()\n\n embed.title = parsed.entries[0].title\n embed.description = html.unescape(title.group(1))\n embed.set_image(url=img_url.group(1).strip())\n embed.set_footer(text=parsed.entries[0].link)\n\n return embed", "def cli():\n fire.Fire(fetch_rss_file)", "def scrap(date=None):\n \n global htmlclient, bloomberg\n response = requests.get(bloomberg+\"/archive/news/\"+date, headers=htmlclient)\n if response.status_code==200:\n soup = BeautifulSoup(response.text)\n \n # Getting the urls to all the posts from the archive at the given date\n for story_list in soup.findAll(\"ul\", attrs={\"class\":\"stories\"}):\n storylinks = [ story.a.get(\"href\") for story in story_list.findAll(\"li\") ]\n \n # Function call to store the link contents\n if storylinks:\n for story in storylinks:\n store_it(date, story) \n return 1\n # Set flag to -1 when there is no posts available(i.e. status code is 408) in the archive or\n # it already stored all the articles from bloomberg\n else: \n return -1", "def get_url_and_parse():\n \n # TODO: dynamic site_code\n #global site_code\n #\n #if not site_code:\n # site_code = get_site_code(city_name)\n\n urllib.request.urlretrieve(\n \"https://dd.weather.gc.ca/citypage_weather/xml/AB/s0000661_e.xml\", \"s0000661_e.xml\")\n tree = ET.parse(\"s0000661_e.xml\")\n return tree.getroot()", "def download_presentation(epObject, uc):\r\n fileDict = make_file_dict()\r\n fileDict = populate_file_dict(epObject, uc, fileDict)\r\n now = str(datetime.datetime.now().hour) + \\\r\n str(datetime.datetime.now().minute) + \\\r\n str(datetime.datetime.now().second)\r\n directoryName = epObject.Name.replace(\" \", \"\") + \"_presentation_\" + now\r\n os.mkdir(directoryName)\r\n os.chdir(directoryName)\r\n temp = tempfile.TemporaryFile()\r\n temp.write(urllib.request.urlopen(fileDict['pageUrls'][0]).read())\r\n temp.seek(0)\r\n update_page(temp, fileDict, \"index.html\", index=True)\r\n temp.close()\r\n os.mkdir(\"Pages\")\r\n os.chdir(\"Pages\")\r\n for (pageUrl, pageFileName) in zip(fileDict['pageUrls'][1:], \r\n fileDict['pageFileNames'][1:]):\r\n temp = tempfile.TemporaryFile()\r\n temp.write(urllib.request.urlopen(pageUrl).read())\r\n update_page(temp, fileDict, pageFileName)\r\n temp.close()\r\n os.chdir(\"../\")\r\n os.mkdir(\"Content\")\r\n os.chdir(\"Content\")\r\n for (fileUrl, fileId) in zip(fileDict['fileUrls'], fileDict['fileIds']):\r\n fileName = eportfolio.get_ep_object_properties(uc, fileId).\\\r\n FileName.strip()\r\n urllib.request.urlretrieve(fileUrl, fileName)\r\n os.chdir(\"../\")\r\n os.mkdir(\"Formatting\")\r\n os.chdir(\"Formatting\")\r\n for (cssUrl, cssFileName) in zip(fileDict['cssUrls'],\r\n fileDict['cssFileNames']):\r\n temp = tempfile.TemporaryFile()\r\n temp.write(urllib.request.urlopen(cssUrl).read())\r\n temp.seek(0)\r\n update_css_file(cssUrl, temp, cssFileName)\r\n temp.close()\r\n for imgUrl in fileDict['imgUrls']:\r\n fileName = imgUrl[imgUrl.rfind(\"/\"): ]\r\n if fileName.find(\"?\") > 0:\r\n fileName = fileName[: fileName.find(\"?\")]\r\n urllib.request.urlretrieve(imgUrl, fileName)\r\n os.chdir(\"../\")\r\n print(str(fileDict))\r\n return fileDict", "def parse(url):\n s_url = url.split(\"/\")\n ns = [e for e in s_url if \"canonical-\" in e][0].split(\"-\")[-1]\n f = \".\".join(s_url[-1].split(\".\")[:-1])\n\n textgroup, work, donotcate = tuple(f.split(\".\"))\n lang = f.split(\"-\")[-1][0:3]\n urn = \"urn:cts:{0}:{1}\".format(ns, f)\n\n target = \"output/canonical-{namespace}/data/{group}/{work}/{file}\".format(namespace=ns, group=textgroup, work=work, file=s_url[-1])\n\n \"\"\"\n Downloading the resource \n \"\"\"\n # Download the resource\n response = requests.get(url)\n # Ensure there was no errors\n response.raise_for_status()\n\n # Get the file name by splitting the url\n filename = url.split(\"/\")[-1]\n\n \"\"\" \n Caching the resource \n \"\"\"\n\n os.makedirs(\"cache\", exist_ok=True)\n # Save the original response\n with open(\"cache/original-\"+filename, 'w') as f:\n # Don't forget to write the reponse.text and not response itself\n read_data = f.write(response.text)\n\n # Open it and parse it\n with open(\"cache/original-\"+filename) as f:\n # We use the etree.parse property\n parsed = etree.parse(f, parser)\n\n return lang, urn, target, parsed", "def fetchContent(self):\n print 'fetching page by its path: '+ self.path\n uri = '%s?path=%s' % (self.client.MakeContentFeedUri(), self.path)\n # get the content feed\n feed = self.client.GetContentFeed(uri=uri)\n # take out the content\n self.entry = feed.get_webpages()[0]", "def save_publish():\n import mop\n\n path = cmds.file(query=True, location=True)\n work_dir = os.path.dirname(path)\n publish_dir = os.path.join(work_dir, \"release\")\n\n highest_publish = None\n highest_version = -1\n\n for f in os.listdir(publish_dir):\n ext = os.path.splitext(f)[-1]\n if ext == \".ma\":\n pattern = r\"v(?P<version>\\d{3})\"\n regex = re.compile(pattern)\n match = regex.search(f)\n if match:\n version = int(match.group(\"version\"))\n if version > highest_version:\n highest_version = version\n highest_publish = f\n\n new_path = mop.increment_version(os.path.join(publish_dir, highest_publish))\n cmds.file(rename=new_path)\n cmds.file(save=True, force=True)" ]
[ "0.6206035", "0.55234224", "0.52666616", "0.5235553", "0.52091956", "0.5206474", "0.52010465", "0.51887625", "0.51887625", "0.5178505", "0.51600707", "0.51256144", "0.5122521", "0.50846535", "0.50757486", "0.5039742", "0.5022523", "0.5022523", "0.5006762", "0.4999023", "0.4999023", "0.49819782", "0.49736205", "0.4965643", "0.49513444", "0.49358705", "0.4935829", "0.49291444", "0.49152857", "0.49143884" ]
0.63504636
0
Exposing a tool to help push the ocean into the right location based off the FX published fluid containers fluids_hrc
def _setOceanLocation(self): ## If the fluids_hrc exists if cmds.objExists('fluids_hrc'): if cmds.objExists('ocean_srf'): cmds.connectAttr('fluids_hrc.translateX', 'ocean_srf.translateX', f = True) cmds.connectAttr('fluids_hrc.translateZ', 'ocean_srf.translateZ', f = True) else: cmds.warning('MISSING ocean_srf node from scene....') if cmds.objExists('oceanPreviewPlane_prv'): cmds.connectAttr('fluids_hrc.translateX', 'oceanPreviewPlane_prv.translateX', f = True) cmds.connectAttr('fluids_hrc.translateZ', 'oceanPreviewPlane_prv.translateZ', f = True) else: cmds.warning('MISSING oceanPreviewPlane_prv node from scene....') else: cmds.warning('NO fluids_hrc FOUND! Can not move the ocean into final position. PLEASE CHECK FX PUBLISH NOW!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n\n fab_list = get_fabric_list(SANNAV_IP_ADDRESS, SANNAV_FOS_USERNAME, SANNAV_FOS_PASSWORD)\n\n # Print all known facts about the fabrics and the switches\n # Comment out this print statement if this code will be used to generate\n # an Ansible Tower inventory.\n print(json.dumps(fab_list))\n\n # This section of code formats the results to be in a format acceptable to Ansible Tower (awx).\n # To use it, unblock the following block of code and comment out the preceeding print statement.\n\n _ = \"\"\"\n toAwx = {'_meta': {'hostvars': {}}}\n\n for fabric in fab_list[\"Fabrics\"]:\n toAwx[fabric[\"name\"]] = { 'hosts': []}\n for switch in fabric[\"Switches\"]:\n toAwx[fabric[\"name\"]]['hosts'].append(switch['ipAddress'])\n print(json.dumps(toAwx));\n \"\"\"", "def _connectWakeAndFoamToOcean(self, tk, templateFile = '', id = '', shotNum = '', inprogressBar = ''):\r\n\t\tdebug(app = self.app, method = '_connectWakeAndFoamToOcean', message = 'Connecting fluid textures to ocean shader....', verbose = False)\r\n\r\n\t\t####################################################\r\n\t\t## Straight up connection if no interactive is found.\r\n\t\tif cmds.objExists(CONST.OCEANDISPSHADER) and cmds.objExists(CONST.WAKE_FLUID_SHAPENODE) and cmds.objExists(CONST.FOAM_FLUID_SHAPENODE):\r\n\t\t\ttry:\r\n\t\t\t\tcmds.connectAttr(\"%s.outAlpha\" % CONST.WAKE_FLUID_SHAPENODE, \"%s.waveHeightOffset\" % CONST.OCEANDISPSHADER, force = True)\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\t\t\ttry:\r\n\t\t\t\tcmds.connectAttr(\"%s.outAlpha\" % CONST.FOAM_FLUID_SHAPENODE, \"%s.foamOffset\" % CONST.OCEANDISPSHADER, force = True)\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\r\n\t\t###########################################\r\n\t\t####### INTERACTIVE STUFFF ################\r\n\t\t### Now check for interactive caches and blend these\r\n\t\tdebug(app = self.app, method = '_connectWakeAndFoamToOcean', message = 'Looking for interactive anim fluids now...', verbose = False)\r\n\t\tinprogressBar.updateProgress(percent = 76, doingWhat = 'Looking for interactive caches..')\r\n\r\n\t\tgetAnimVersionFolders = tk.paths_from_template(templateFile, {'Step' : 'Anm', 'id' : id, 'Shot' : shotNum})\r\n\t\tdebug(app = self.app, method = '_connectWakeAndFoamToOcean', message = 'getAnimVersionFolders: %s' % getAnimVersionFolders, verbose = False)\r\n\r\n\t\t## now find the highest version folder number\r\n\t\thighestVersionFolder = r'%s' % max(getAnimVersionFolders)\r\n\t\tdebug(app = self.app, method = '_connectWakeAndFoamToOcean', message = 'highestVersionFolder...%s' % highestVersionFolder, verbose = False)\r\n\r\n\t\thighestVersionFolder = highestVersionFolder.replace('\\\\', '/')\r\n\t\tdebug(app = self.app, method = '_connectWakeAndFoamToOcean', message = 'highestVersionFolder replaced \\\\ with /...\\n%s' % highestVersionFolder, verbose = False)\r\n\r\n\t\tversionNumber = highestVersionFolder.split('/')[-1]\r\n\t\tdebug(app = self.app, method = '_connectWakeAndFoamToOcean', message = 'versionNumber: %s' % versionNumber, verbose = False)\r\n\r\n\t\tlistCacheFiles = os.listdir(highestVersionFolder)\r\n\t\tdebug(app = self.app, method = '_connectWakeAndFoamToOcean', message = 'listCacheFiles...\\n%s' % listCacheFiles, verbose = False)\r\n\r\n\t\tinteractiveCaches = {}\r\n\t\tfor each in listCacheFiles:\r\n\t\t\tif each.endswith('.xml'): ## the ocean shader preset saved out\r\n\t\t\t\tif CONST.WAKE_FLUID_SHAPENODE in each:\r\n\t\t\t\t\tinteractiveCaches[CONST.WAKE_FLUID_SHAPENODE] = '%s/%s' % (highestVersionFolder, each)\r\n\t\t\t\telse:\r\n\t\t\t\t\tinteractiveCaches[CONST.FOAM_FLUID_SHAPENODE] = '%s/%s' % (highestVersionFolder, each)\r\n\r\n\t\tif interactiveCaches:\r\n\t\t\tfluidCaches.mergeFluidCaches(interactiveFoamXML = interactiveCaches[CONST.FOAM_FLUID_SHAPENODE], interactiveWakeXML = interactiveCaches[CONST.WAKE_FLUID_SHAPENODE])\r\n\r\n\t\tdebug(app = self.app, method = '_connectWakeAndFoamToOcean', message = 'Ocean connected....', verbose = False)", "def main():\n parser = cmdLineParse()\n inps = parser.parse_args()\n gf = asf.load_inventory(inps.inventory)\n\n if inps.template:\n print(f\"Reading from template file: {inps.template}...\")\n inputDict = dice.read_yaml_template(inps.template)\n else:\n inputDict = {\n \"topsinsar\": {\n \"sensorname\": \"SENTINEL1\",\n \"reference\": {\"safe\": \"\"},\n \"secondary\": {\"safe\": \"\"},\n }\n }\n\n intdir = \"int-{0}-{1}\".format(inps.reference, inps.secondary)\n if not os.path.isdir(intdir):\n os.mkdir(intdir)\n os.chdir(intdir)\n\n reference_urls = asf.get_slc_urls(gf, inps.reference, inps.path)\n secondary_urls = asf.get_slc_urls(gf, inps.secondary, inps.path)\n downloadList = reference_urls + secondary_urls\n inps.reference_scenes = [os.path.basename(x) for x in reference_urls]\n inps.secondary_scenes = [os.path.basename(x) for x in secondary_urls]\n\n if inps.poeorb:\n try:\n frame = os.path.basename(inps.reference_scenes[0])\n downloadList.append(asf.get_orbit_url(frame))\n frame = os.path.basename(inps.secondary_scenes[0])\n downloadList.append(asf.get_orbit_url(frame))\n except Exception as e:\n print(\"Trouble downloading POEORB... maybe scene is too recent?\")\n print(\"Falling back to using header orbits\")\n print(e)\n inps.poeorb = False\n pass\n\n # Update input dictionary with argparse inputs\n inputDict[\"topsinsar\"][\"reference\"][\"safe\"] = inps.reference_scenes\n inputDict[\"topsinsar\"][\"reference\"][\"output directory\"] = \"referencedir\"\n inputDict[\"topsinsar\"][\"secondary\"][\"safe\"] = inps.secondary_scenes\n inputDict[\"topsinsar\"][\"secondary\"][\"output directory\"] = \"secondarydir\"\n # Optional inputs\n # swaths, poeorb, dem, roi, gbox, alooks, rlooks, filtstrength\n if inps.swaths:\n inputDict[\"topsinsar\"][\"swaths\"] = inps.swaths\n if inps.dem:\n inputDict[\"topsinsar\"][\"demfilename\"] = inps.dem\n if inps.roi:\n inputDict[\"topsinsar\"][\"regionofinterest\"] = inps.roi\n if inps.gbox:\n inputDict[\"topsinsar\"][\"geocodeboundingbox\"] = inps.gbox\n if inps.filtstrength:\n inputDict[\"topsinsar\"][\"filterstrength\"] = inps.filtstrength\n if inps.alooks:\n inputDict[\"topsinsar\"][\"azimuthlooks\"] = inps.alooks\n if inps.rlooks:\n inputDict[\"topsinsar\"][\"rangelooks\"] = inps.rlooks\n print(inputDict)\n xml = dice.dict2xml(inputDict)\n dice.write_xml(xml)\n # Create a download file\n asf.write_download_urls(downloadList)\n print(f\"Generated download-links.txt and topsApp.xml in {intdir}\")", "def main():\n create_fc(\"layers/WAPR.lyr\",\n \"TravelerInfo.gdb/HighwayAlerts\")", "def main(config):\n file_paths_info = [('GLOFRIS','WATCH','ARG_inunriver_historical_000000000WATCH_1980_rp01000.tif'),\n \t\t\t\t('GLOFRIS','RCP45','ARG_inunriver_rcp4p5_0000GFDL-ESM2M_2030_rp01000.tif'),\n \t\t\t\t('GLOFRIS','RCP85','ARG_inunriver_rcp8p5_0000GFDL-ESM2M_2030_rp01000.tif'),\n \t\t\t\t('FATHOM','AR_fluvial_undefended_merged','AR-FU-1000.tif'),\n \t\t\t\t('FATHOM','AR_pluvial_undefended_merged','AR-PU-1000.tif')\n \t\t\t\t]\n figure_names = ['GLOFRIS-WATCH-fluvial','GLOFRIS-RCP45-fluvial','GLOFRIS-RCP85-fluvial','FATHOM-fluvial','FATHOM-pluvial']\n figure_titles = ['current fluvial flooding','RCP4.5 fluvial flooding','RCP8.5 fluvial flooding','current fluvial flooding','current pluvial flooding']\n for f_i in range(len(file_paths_info)):\n\t hazard_file = os.path.join(config['paths']['data'],'flood_data', file_paths_info[f_i][0],file_paths_info[f_i][1],file_paths_info[f_i][2])\n\t output_file = os.path.join(config['paths']['figures'], 'flood-map-{}.png'.format(figure_names[f_i]))\n\t ax = get_axes()\n\t plot_basemap(ax, config['paths']['data'])\n\t scale_bar(ax, location=(0.8, 0.05))\n\t plot_basemap_labels(ax, config['paths']['data'], include_regions=True,include_zorder=3)\n\n\t proj_lat_lon = ccrs.PlateCarree()\n\n\n\t # Create color map\n\t colors = plt.get_cmap('Blues')\n\n\t # Read in raster data\n\t data, lat_lon_extent = get_data(hazard_file)\n\t data[(data <= 0) | (data > 5)] = np.nan\n\t max_val = np.nanmax(data)\n\t norm=mpl.colors.Normalize(vmin=0, vmax=max_val)\n\n\t # Plot population data\n\t im = ax.imshow(data, extent=lat_lon_extent,transform=proj_lat_lon, cmap=colors,norm =norm, zorder=2)\n\n\t # Add colorbar\n\t cbar = plt.colorbar(im, ax=ax,fraction=0.1, shrink=0.87,pad=0.01, drawedges=False, orientation='horizontal',\n\t norm=mpl.colors.Normalize(vmin=0, vmax=max_val), ticks=list(np.linspace(0,max_val,3)))\n\t cbar.set_clim(vmin=0,vmax=max_val)\n\n\n\t cbar.outline.set_color(\"none\")\n\t cbar.ax.yaxis.set_tick_params(color='black')\n\t cbar.ax.set_xlabel('Flood depths (m)',fontsize=12,color='black')\n\n\t plt.title('1 in 1000 year {}'.format(figure_titles[f_i]), fontsize = 14)\n\t save_fig(output_file)\n\t plt.close()", "def PropagatePhotons(tray, name,\n GCDFile,\n If=lambda f:True ,\n RandomService = None,\n KeepIndividualMaps = False,\n HybridMode = False,\n IgnoreMuons = False,\n IgnoreCascades = False,\n UseGPUs = False,\n UseAllCPUCores = False,\n KeepSlicedMCTree = False,\n IceModel = \"spice_3.2\",\n CascadeService = None,\n IceModelLocation = None,\n UseCascadeExtension = True,\n UseGeant4=False, \n CrossoverEnergyEM=None, \n CrossoverEnergyHadron=None, \n UnshadowedFraction = 1.0, #changed 2014-10-16 to IC86 nominal preset, IC79 used 0.9\n DOMOversizeFactor=5.0,\n HoleIceParameterization=expandvars(\"$I3_SRC/ice-models/resources/models/angsens/as.h2-50cm\"),\n InputMCTree=\"I3MCTree\",\n UseI3PropagatorService = False,\n OutputPESeriesMapName=\"I3MCPESeriesMap\",\n OutputPhotonSeriesName=None,\n):\n\n from I3Tray import I3Units\n from icecube import icetray, dataclasses, dataio\n from icecube import phys_services, sim_services\n\n from icecube import clsim, photonics_service\n from os import listdir\n from os.path import isdir\n\n \n if IgnoreMuons and not HybridMode:\n raise RuntimeError(\"Can currently only ignore muons in hybrid mode\")\n\n clsimIceModel = None\n if IceModelLocation is None:\n IceModelLocation = expandvars(\"$I3_BUILD/ice-models/resources/models\")\n if isinstance(IceModel, clsim.I3CLSimMediumProperties):\n if HybridMode:\n raise RuntimeError(\"Cannot use custom ice models in hybrid mode\")\n clsimIceModel = IceModel\n elif IceModel == \"Spice1\":\n clsimIceModel = expandvars(IceModelLocation+\"/spice_1\")\n elif IceModel == \"SpiceMie\":\n clsimIceModel = expandvars(IceModelLocation+\"/spice_mie\")\n elif IceModel == \"SpiceLea\":\n clsimIceModel = expandvars(IceModelLocation+\"/spice_lea\")\n else:\n for d in listdir(IceModelLocation):\n if isdir(expandvars(IceModelLocation+\"/\"+d)) and IceModel.lower() == d.lower():\n clsimIceModel = expandvars(IceModelLocation+\"/\"+d)\n break\n if not clsimIceModel:\n raise RuntimeError(\"Unknown ice model: %s\", IceModel)\n\n if HybridMode and IceModel not in (\"Spice1\",\"SpiceMie\"):\n raise RuntimeError(\"Can only use Spice1 and SpiceMie in hybrid mode. photon tables do not support ice anisotropy at this time.\")\n\n if (not IgnoreCascades) and HybridMode:\n if CascadeService is None:\n logging.warning(\"*** no cascades tables provided. Loading tables for\", IceModel)\n \n # If we can see CVMFS, we'll get the splines from there.\n # Note : when available, switch icecube.wisc.edu for icecube.opensciencegrid.org\n UseSplinesFromCVMFS = os.path.isdir(\"/cvmfs/icecube.opensciencegrid.org/data/photon-tables/splines\")\n\n if(UseSplinesFromCVMFS):\n TablePath=\"/cvmfs/icecube.opensciencegrid.org/data/photon-tables/splines\"\n else:\n TablePath=\"/data/sim/sim-new/spline-tables\" \n \n logging.info(\"Using splines from CVMFS: \", UseSplinesFromCVMFS)\n\n # Work out which splines to use based on ice model preferences\n if(HoleIceParameterization==expandvars('$I3_SRC/ice-models/resources/models/angsens/as.h2-50cm')):\n CascadeModel=IceModel\n elif(HoleIceParameterization==expandvars('$I3_SRC/ice-models/resources/models/angsens/as.nominal')):\n if IceModel==\"SpiceMie\":\n CascadeModel=\"SpiceMieNoHoleIce\"\n else:\n raise RuntimeError(\"No no-hole-ice spline for %s\", IceModel)\n else: \n raise RuntimeError(\"No spline for %s with hole ice param %s\", IceModel, HoleIceParameterization)\n\n cascade_service = LoadCascadeTables(IceModel=CascadeModel, TablePath=TablePath)\n else:\n cascade_service = CascadeService\n else:\n cascade_service = None\n\n if HybridMode:\n if OutputPhotonSeriesName is not None:\n raise RuntimeError(\"saving photons is not supported in hybrid mode\")\n if UseGeant4:\n raise RuntimeError(\"Geant4 not supported in hybrid mode\")\n if ((CrossoverEnergyEM is not None) or (CrossoverEnergyHadron is not None)):\n raise RuntimeError(\"CrossoverEnergyEM or CrossoverEnergyHadron not supported in hybrid mode\")\n\n # split the MCTree into a cascade-only and a track-only version\n tray.AddModule(\"I3MCTreeHybridSimulationSplitter\", name+\"_splitMCTree\",\n InputMCTreeName=InputMCTree,\n OutputMCTreeNameTracks=InputMCTree+\"Tracks\",\n OutputMCTreeNameCascades=InputMCTree+\"Cascades\")\n \n tray.AddModule(\"I3TauSanitizer\", name+\"_sanitize_taus\",\n InputMCTreeName = InputMCTree+\"Tracks\",\n OutputMCTreeName = InputMCTree+\"Tracks\") # overwrite the input\n\n if not IgnoreMuons:\n if UseGPUs:\n DoNotParallelize=False\n else:\n DoNotParallelize=not UseAllCPUCores\n threading.Thread(target=resetTasksetThreads,args=(os.getpid(),)).start()\n logging.debug('tasksetInUse = ',tasksetInUse())\n logging.debug('DoNotParallelize = ',DoNotParallelize)\n\n # simulate tracks (with clsim)\n tray.AddSegment(clsim.I3CLSimMakeHits, name+\"_makeCLSimHits\",\n PhotonSeriesName = None,\n MCTreeName = InputMCTree+\"Tracks\",\n OutputMCTreeName = InputMCTree+\"Tracks_sliced\",\n MCPESeriesName = OutputPESeriesMapName + \"Tracks\",\n UseI3PropagatorService = UseI3PropagatorService,\n RandomService = RandomService,\n UnshadowedFraction=UnshadowedFraction,\n DoNotParallelize=DoNotParallelize,\n UseGeant4=False, # never use this with Geant4!\n UseGPUs=UseGPUs,\n UseCPUs=not UseGPUs,\n IceModelLocation=clsimIceModel,\n DOMOversizeFactor=DOMOversizeFactor,\n UseCascadeExtension=UseCascadeExtension,\n GCDFile=GCDFile,\n DisableTilt=True)\n\n tray.AddModule(\"Delete\", name+\"_cleanup_clsim_sliced_MCTree\",\n Keys = [InputMCTree+\"Tracks_sliced\"])\n\n if not IgnoreCascades:\n tray.AddModule(\"I3PhotonicsHitMaker\", name+\"_hitsFromTheTable\",\n CascadeService = cascade_service,\n TrackService = None, # tracks are handled by clsim\n UnshadowedFraction = UnshadowedFraction,\n Input = InputMCTree+\"Cascades\",\n Output = OutputPESeriesMapName + \"Cascades\",\n RandomService = RandomService\n )\n\n MCPEsToCombine = []\n if not IgnoreMuons:\n MCPEsToCombine.append(OutputPESeriesMapName + \"Tracks\")\n if not IgnoreCascades:\n MCPEsToCombine.append(OutputPESeriesMapName + \"Cascades\")\n\n # combine the resulting I3MCHitSeriesMaps\n tray.AddModule(\"I3CombineMCPE\", name+\"_combine_pes\",\n InputResponses = MCPEsToCombine,\n OutputResponse = OutputPESeriesMapName)\n\n if not KeepIndividualMaps:\n # delete the original maps and the split I3MCTrees\n tray.AddModule(\"Delete\", name+\"_cleanup_peseriesmaps\",\n Keys = MCPEsToCombine)\n\n tray.AddModule(\"Delete\", name+\"_cleanup_MCTree\",\n Keys=[InputMCTree+\"Tracks\", InputMCTree+\"Cascades\"])\n\n else:\n # non-hybrid clsim-only simulation\n # If we're using Geant4, we do NOT want the taus to be dark.\n if not UseGeant4:\n tray.AddModule(\"I3TauSanitizer\", name+\"_sanitize_taus\",\n InputMCTreeName = InputMCTree,\n OutputMCTreeName = InputMCTree) # overwrite the input\n\n if UseGPUs:\n DoNotParallelize=False\n else:\n DoNotParallelize=not UseAllCPUCores\n threading.Thread(target=resetTasksetThreads,args=(os.getpid(),)).start()\n logging.debug('tasksetInUse = %s' % tasksetInUse())\n logging.debug('DoNotParallelize = %s' % DoNotParallelize)\n\n # simulate tracks (with clsim)\n tray.AddSegment(clsim.I3CLSimMakeHits, name+\"_makeCLSimHits\",\n PhotonSeriesName = OutputPhotonSeriesName,\n MCTreeName = InputMCTree,\n MCPESeriesName = OutputPESeriesMapName,\n UseI3PropagatorService = UseI3PropagatorService,\n RandomService = RandomService,\n UnshadowedFraction = UnshadowedFraction,\n DoNotParallelize = DoNotParallelize,\n UseGeant4=UseGeant4,\n CrossoverEnergyEM=CrossoverEnergyEM,\n CrossoverEnergyHadron=CrossoverEnergyHadron,\n UseGPUs=UseGPUs,\n UseCPUs=not UseGPUs,\n DOMOversizeFactor=DOMOversizeFactor,\n HoleIceParameterization=HoleIceParameterization,\n IceModelLocation=clsimIceModel,\n GCDFile=GCDFile,\n UseCascadeExtension=UseCascadeExtension)", "def __init__(self, sw_path, json_path):\n Topo.__init__(self)\n\n # Directory where this file / script is located\"\n selfPath = os.path.dirname(os.path.abspath(\n inspect.getfile(inspect.currentframe()))) # script directory\n\n # Initialize a service helper for Quagga with default options\n quaggaSvc = QuaggaService(autoStop=False)\n\n # Path configurations for mounts\n quaggaBaseConfigPath = selfPath + '/configs/'\n\n # List of Quagga host configs\n quaggaHosts = {}\n #quaggaHosts['r1'] = (QuaggaHost(name='r1', ip='172.0.1.1/16', loIP='10.0.1.1/24'))\n quaggaHosts['r2'] = QuaggaHost(name='r2', ip='172.0.2.1/16', loIP='10.0.2.1/24')\n quaggaHosts['r3'] = QuaggaHost(name='r3', ip='172.0.3.1/16', loIP='10.0.3.1/24')\n quaggaHosts['r4'] = QuaggaHost(name='r4', ip='172.0.4.1/16', loIP='10.0.4.1/24')\n quaggaHosts['r5'] = QuaggaHost(name='r5', ip='172.0.5.1/16', loIP='10.0.5.1/24')\n #quaggaHosts['r6'] = (QuaggaHost(name='r6', ip='172.0.6.1/16', loIP='10.0.6.1/24'))\n\n\n # Add the switch for the SWIFTED router\n p4_switch = self.addSwitch('s1', dpid='1', sw_path=sw_path, json_path=json_path, thrift_port=_THRIFT_BASE_PORT)\n\n\n # Setup each Quagga router, add a link between it and the IXP fabric\n for name, host in quaggaHosts.iteritems():\n\n # Create an instance of a host, called a quaggaContainer\n quaggaContainer = self.addHost(name=host.name,\n ip=host.ip,\n hostname=host.name,\n privateLogDir=True,\n privateRunDir=True,\n inMountNamespace=True,\n inPIDNamespace=True,\n inUTSNamespace=True)\n\n # Add a loopback interface with an IP in router's announced range\n self.addNodeLoopbackIntf(node=host.name, ip=host.loIP)\n\n # Configure and setup the Quagga service for this node\n quaggaSvcConfig = \\\n {'quaggaConfigPath': quaggaBaseConfigPath + host.name}\n self.addNodeService(node=host.name, service=quaggaSvc,\n nodeConfig=quaggaSvcConfig)\n\n r6 = self.addHost(name='r6',\n ip='172.0.6.1/16',\n hostname='r6',\n privateLogDir=True,\n privateRunDir=True,\n inMountNamespace=True,\n inPIDNamespace=True,\n inUTSNamespace=True)\n\n\n r1 = self.addHost(name='r1',\n ip='172.0.1.1/16',\n hostname='r1',\n privateLogDir=True,\n privateRunDir=True,\n inMountNamespace=True,\n inPIDNamespace=True,\n inUTSNamespace=True)\n\n # Attach the quaggaContainer to the IXP Fabric Switch\n self.addLink('r1', p4_switch, intfName1=\"s1\", intfName2='r1-p4switch')\n self.addLink('r2', p4_switch, intfName1=\"s1\", intfName2='r2-p4switch')\n self.addLink('r3', p4_switch, intfName1=\"s1\", intfName2='r3-p4switch')\n self.addLink('r4', p4_switch, intfName1=\"s1\", intfName2='r4-p4switch')", "def main():\n st.sidebar.title(\"Controlling\")\n st.markdown(\n \"\"\"\n# Bewegungsdaten verschiedener Datenquellen - Social Distancing\nResulate von politischen Maßnamen sowie andere Faktoren die sich auf die Anzahl der Infektionen auswirken.\n\"\"\"\n )\n\n select_block_container_style()\n\n # Map with data from uber | EXAMPLE FROM STREAMLIT\n place1 = load_data(100000)\n\n hour = st.slider(\"Hour to look at\", 0, 23)\n\n place1 = place1[place1[DATE_TIME].dt.hour == hour]\n\n st.subheader(\"Geo data between %i:00 and %i:00\" % (hour, (hour + 1) % 24))\n midpoint = (np.average(place1[\"lat\"]), np.average(place1[\"lon\"]))\n\n st.write(pdk.Deck(\n map_style=\"mapbox://styles/mapbox/light-v9\",\n initial_view_state={\n \"latitude\": midpoint[0],\n \"longitude\": midpoint[1],\n \"zoom\": 11,\n \"pitch\": 50,\n },\n layers=[\n pdk.Layer(\n \"HexagonLayer\",\n data=place1,\n get_position=[\"lon\", \"lat\"],\n radius=100,\n elevation_scale=4,\n elevation_range=[0, 1000],\n pickable=True,\n extruded=True,\n ),\n ],\n ))\n\n # My preliminary idea of an API for generating a grid\n with Grid(\"1 1 1\", color=COLOR, background_color=BACKGROUND_COLOR) as grid:\n grid.cell(\n class_=\"a\",\n grid_column_start=2,\n grid_column_end=3,\n grid_row_start=1,\n grid_row_end=2,\n ).markdown(\"# Hier vielleicht plots oder Tabellen oder einfach nur Text.\")\n grid.cell(\"b\", 2, 3, 2, 3).text(\"The cell to the left is a dataframe\")\n grid.cell(\"c\", 3, 4, 2, 3).text(\"The cell to the left is a textframe\")\n grid.cell(\"d\", 1, 2, 1, 3).dataframe(get_dataframe())\n grid.cell(\"e\", 3, 4, 1, 2).markdown(\n \"Try changing the **block container style** in the sidebar!\"\n )\n grid.cell(\"f\", 1, 3, 3, 4).text(\n \"The cell to the right is a matplotlib svg image\"\n )\n grid.cell(\"g\", 3, 4, 3, 4).pyplot(get_matplotlib_plt())\n\n st.plotly_chart(get_plotly_subplots())", "def main(starttime, hstart, hstop, cfg):\n\n if cfg.target is tools.Target.ICONOEM or cfg.target is tools.Target.ICONART:\n\n logging.info('ICON chemistry data for IC/BC')\n\n # Wait for meteo to finish first\n tools.check_job_completion(cfg.log_finished_dir,\"meteo\")\n\n tools.create_dir(cfg.icon_input_oae, \"online emissions input\")\n tools.create_dir(cfg.icon_input_icbc, \"icon_input_icbc\")\n tools.create_dir(cfg.icon_input_icbc_processed, \"icon_input_icbc_processed\")\n\n starttime_real = starttime + timedelta(hours = hstart)\n\n #-----------------------------------------------------\n # Remap chemistry initial conditions\n #-----------------------------------------------------\n logfile = os.path.join(cfg.log_working_dir, \"ic_chem\")\n logfile_finish = os.path.join(cfg.log_finished_dir,\"ic_chem\")\n\n # Write remap_chem namelist\n in_filename = os.path.join(cfg.input_root_chem,starttime.strftime(cfg.chem_nameformat)+'.grb')\n out_filename = os.path.join(cfg.icon_input,'oae',cfg.oae_chem_init_nc+'_dry.nc')\n in_grid_filename = in_filename\n out_grid_filename = os.path.join(cfg.input_root_grid,cfg.dynamics_grid_filename)\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_namelist_remap_chem'])) as input_file:\n to_write = input_file.read()\n output_nml = os.path.join(cfg.icon_work, 'icontools_remap_chem_ic.namelist')\n with open(output_nml, \"w\") as outf:\n to_write = to_write.format(cfg=cfg,\n in_filename=in_filename,\n out_filename=out_filename,\n in_grid_filename=in_grid_filename,\n out_grid_filename=out_grid_filename)\n outf.write(to_write)\n\n # Write remapfields namelist\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_namelist_remapfields_chem_ic'])) as input_file:\n to_write = input_file.read()\n output_fields = os.path.join(cfg.icon_work, 'icontools_remapfields_chem_ic.namelist')\n with open(output_fields, \"w\") as outf:\n to_write = to_write.format(cfg=cfg)\n outf.write(to_write)\n\n # Write run script (remap_ic.job)\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_remap_chem_ic_runjob'])) as input_file:\n to_write = input_file.read()\n output_run = os.path.join(cfg.icon_work, \"remap_chem_ic.job\")\n with open(output_run, \"w\") as outf:\n outf.write(to_write.format(\n cfg=cfg,\n logfile=logfile, logfile_finish=logfile_finish)\n )\n exitcode = subprocess.call([\"sbatch\", \"--wait\",\n os.path.join(cfg.icon_work, 'remap_chem_ic.job')])\n if exitcode != 0:\n raise RuntimeError(\"sbatch returned exitcode {}\".format(exitcode))\n logging.info(\"Remapped initial conditions with icontools\")\n\n os.remove(output_nml)\n os.remove(output_fields)\n os.remove(output_run)\n\n # Transform initial data from dry to wet mixing ratios\n cdo.expr(\"'CH4w=CH4*(1-QV)'\",input=out_filename,output='temp_file_01.nc')\n cdo.selvar(\"LNSP\",input=out_filename,output='temp_file_03.nc')\n os.remove(out_filename)\n # Rename variable to match ICON internal name with CDO:\n out_filename = os.path.join(cfg.icon_input,'oae',cfg.oae_chem_init_nc)\n cdo.chname(\"CH4w\",\"CH4\",input='temp_file_01.nc',output='temp_file_02.nc')\n cdo.merge(input='temp_file_02.nc temp_file_03.nc',output=out_filename)\n\n os.remove('temp_file_01.nc')\n os.remove('temp_file_02.nc')\n os.remove('temp_file_03.nc')\n \n\n\n #-----------------------------------------------------\n # Remap chem LBC\n #-----------------------------------------------------\n logfile = os.path.join(cfg.log_working_dir, \"lbc_chem\")\n logfile_finish = os.path.join(cfg.log_finished_dir,\"lbc_chem\")\n\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_namelist_remapfields_chem_lbc'])) as input_file:\n to_write = input_file.read()\n output_nml_fields = os.path.join(cfg.icon_work, 'icontools_remapfields_chem_lbc.namelist')\n with open(output_nml_fields, \"w\") as outf:\n to_write = to_write.format(cfg=cfg)\n outf.write(to_write)\n\n for time in tools.iter_hours(starttime, hstart, hstop, cfg.meteo_inc):\n\n # Write remap_lbc namelist\n in_grid_filename = os.path.join(cfg.input_root_chem,starttime.strftime(cfg.chem_nameformat)+'.grb')\n in_filename = os.path.join(cfg.input_root_chem,time.strftime(cfg.chem_nameformat)+'.grb')\n out_grid_filename = os.path.join(cfg.icon_input_grid,cfg.lateral_boundary_grid)\n out_filename = os.path.join(cfg.icon_input_icbc,time.strftime(cfg.chem_nameformat)+'_lbc')\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_namelist_remap'])) as input_file:\n to_write = input_file.read()\n output_nml_lbc = os.path.join(cfg.icon_work, 'icontools_remap_chem_lbc.namelist')\n with open(output_nml_lbc, \"w\") as outf:\n to_write = to_write.format(cfg=cfg,\n in_grid_filename=in_grid_filename,\n in_filename=in_filename,\n out_grid_filename=out_grid_filename,\n out_filename=out_filename)\n outf.write(to_write)\n\n # Write run script (remap_chem_lbc.job)\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_remap_chem_lbc_runjob'])) as input_file:\n to_write = input_file.read()\n output_run = os.path.join(cfg.icon_work, \"remap_chem_lbc.job\")\n with open(output_run, \"w\") as outf:\n outf.write(to_write.format(\n cfg=cfg,\n logfile=logfile, logfile_finish=logfile_finish)\n )\n exitcode = subprocess.call([\"sbatch\", \"--wait\",\n os.path.join(cfg.icon_work, 'remap_chem_lbc.job')])\n if exitcode != 0:\n raise RuntimeError(\"sbatch returned exitcode {}\".format(exitcode))\n logging.info(\"Remapped boundary conditions at {} with icontools\".format(time))\n\n os.remove(output_nml_lbc)\n os.remove(output_run)\n\n os.remove(output_nml_fields)\n\n\n #-----------------------------------------------------\n # Merge chem files with meteo files using cdo\n #-----------------------------------------------------\n\n for time in tools.iter_hours(starttime, hstart, hstop, cfg.meteo_inc):\n\n chem_file = os.path.join(cfg.icon_input_icbc,time.strftime(cfg.chem_nameformat)+'_lbc')\n meteo_file = os.path.join(cfg.icon_input_icbc, time.strftime(cfg.source_nameformat)+'_lbc.nc')\n var_file = os.path.join(cfg.icon_input_icbc, time.strftime(cfg.source_nameformat)+'_lbc_var.nc')\n transform_file = os.path.join(cfg.icon_input_icbc, time.strftime(cfg.source_nameformat)+'_lbc_transform.nc')\n name_file = os.path.join(cfg.icon_input_icbc, time.strftime(cfg.source_nameformat)+'_lbc_name.nc')\n processed_file = os.path.join(cfg.icon_input_icbc_processed, time.strftime(cfg.source_nameformat)+'_lbc.nc')\n\n # Select variable with CDO\n cdo.selvar(\"CH4\",\"QV\",input=chem_file,output=var_file)\n # Transform to wet-mixing ratios with CDO\n cdo.expr(\"'CH4w=CH4*(1-QV)'\",input=var_file,output=transform_file)\n # Rename variable to match ICON internal name with CDO:\n cdo.chname(\"CH4w\",\"oem_tracer_1\",input=transform_file,output=name_file)\n # Merge with CDO\n cdo.merge(input=name_file+' '+meteo_file,output=processed_file)\n\n # Delete temporary files\n os.remove(chem_file)\n os.remove(var_file)\n os.remove(transform_file)\n os.remove(name_file)\n\n logging.info(\"Merged chem variables to file {}\".format(meteo_file))\n\n\n\n # If COSMO (and not ICON):\n else:\n inv_to_process = []\n if cfg.target is tools.Target.COSMOGHG:\n try:\n CAMS = dict(fullname = \"CAMS\",\n nickname = \"cams\",\n executable = \"cams4int2cosmo\",\n indir = cfg.cams_dir_orig,\n outdir = cfg.cams_dir_proc,\n param = cfg.cams_parameters)\n inv_to_process.append(CAMS)\n except AttributeError:\n pass\n try:\n CT = dict(fullname = \"CarbonTracker\",\n nickname = \"ct\",\n executable = \"ctnoaa4int2cosmo\",\n indir = cfg.ct_dir_orig,\n outdir = cfg.ct_dir_proc,\n param = cfg.ct_parameters)\n inv_to_process.append(CT)\n except AttributeError:\n pass\n elif cfg.target is tools.Target.COSMOART:\n try:\n MOZART = dict(fullname = 'MOZART',\n nickname = 'mozart',\n executable = 'mozart2int2lm',\n indir = cfg.mozart_file_orig,\n outdir = cfg.mozart_dir_proc,\n param = [{'inc' : cfg.mozart_inc,\n 'suffix' : cfg.mozart_prefix}])\n inv_to_process.append(MOZART)\n except AttributeError:\n pass\n else:\n # Unknown target\n raise RuntimeError(\"Unknown target: {}\".format(cfg.target))\n\n # TO DO \n #MOZART = dict(fullname=\"MOZART\", nickname=\"mozart\",executable=\"cams4int2cosmo\")\n \n logging.info(\"Processing \" + \", \".join([i[\"fullname\"] for i in inv_to_process])+\" data\")\n\n scratch_path = os.path.join(cfg.int2lm_input,'icbc')\n tools.create_dir(scratch_path, \"icbc input\")\n\n for inv in inv_to_process:\n logging.info(inv[\"fullname\"]+\" files\")\n tools.create_dir(inv[\"outdir\"], \"processed \" + inv[\"fullname\"])\n #process_inv(starttime,hstart,hstop,increment,inv,cfg)\n \n for p in inv[\"param\"]:\n inc = p[\"inc\"]\n for time in tools.iter_hours(starttime, hstart, hstop, inc):\n logging.info(time)\n\n filename = os.path.join(inv[\"outdir\"],p[\"suffix\"]+\"_\"+time.strftime(\"%Y%m%d%H\")+\".nc\")\n if not os.path.exists(filename):\n logging.info(filename)\n try:\n to_call = getattr(tools, inv[\"executable\"])\n to_call.main(time,inv[\"indir\"],inv[\"outdir\"],p)\n except:\n logging.error(\"Preprocessing \"+inv[\"fullname\"] + \" data failed\")\n raise\n\n # copy to (temporary) run input directory\n tools.copy_file(filename, scratch_path)\n\n logging.info(\"OK\")", "def startFluidinfo():\n sudo('start fluidinfo-api')\n sudo('/etc/init.d/haproxy start')\n sudo('/etc/init.d/nginx start')", "def main():\n\n # Define the path to the config file, and read it.\n conf_file = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'conf', 'config.toml')\n configuration = read_config_file(conf_file)\n\n # Set our variables based on what is read from the config file.\n rs_url = configuration['platform']['url']\n api_key = configuration['platform']['api_key']\n client_id = configuration['platform']['client_id']\n\n # Specify ID for group that you would like to move your hosts to.\n group_id = 0 # UPDATE THIS WITH YOUR DESIRED GROUP ID\n\n # Move the hosts to the new group.\n print(f\"Moving host(s) to new group({group_id}).\")\n print()\n\n # Call the function to move your hosts\n successful = move_hosts_to_new_group(rs_url, api_key, client_id, group_id)\n\n if successful:\n print(\"The move was successful.\")\n\n else:\n print(\"The attempted move was not successful.\")", "def geocube():", "def main() -> None:\r\n world_dirs = volume_utils.get_world_dirs()\r\n world_names = volume_utils.get_world_names(world_dirs = world_dirs)\r\n\r\n world = volume_utils.pick_world(world_names = world_names)\r\n print(f'You picked {world_names[world]}.')\r\n\r\n try:\r\n volume_name = input(\"Enter a name for your world volume: \")\r\n except Exception:\r\n print(\"Naming the volume 'dockerized_world' by default.\")\r\n volume_name = 'dockerized_world'\r\n\r\n volume_utils.make_volume(world_dirs = world_dirs, picked_world = world, volume_name = volume_name)", "def deploy():", "def main():\n pass\n # service = getServiceInstant()\n # # insertFile(service, 'dynamip.conf')\n # file_id = getFileIdFromName(service, 'dynamip.conf')\n # metadata = updateFile(service, file_id, 'dynamip.conf')\n # print(metadata)\n # file = open('metadata_of_updated_dynamip_file', 'w')\n # json.dump(metadata, file)\n # file.write(file)\n # results = service.files().list(maxResults=10).execute()\n # items = results.get('items', [])\n # if not items:\n # print('No files found.')\n # else:\n # print('Files:')\n # for item in items:\n # try:\n # print('{0} ({1})'.format(item['title'], item['id']))\n # except Exception, e:\n # print(e)", "def main():\n ############################################################################\n # Docker setup #\n ############################################################################\n ip = {\"ROBOT_IP\": ROBOT_IPS[fab_conf[\"target\"].as_str()]}\n compose_up(DOCKER_COMPOSE_PATHS[\"driver\"], check_output=True, env_vars=ip)\n log.debug(\"Driver services are running.\")\n\n ############################################################################\n # Load fabrication data #\n ############################################################################\n fab_json_path = fab_conf[\"paths\"][\"fab_data_path\"].as_path()\n clay_bullets = load_bullets(fab_json_path)\n\n log.info(\"Fabrication data read from: {}\".format(fab_json_path))\n log.info(\"{} items in clay_bullets.\".format(len(clay_bullets)))\n\n pick_station_json = fab_conf[\"paths\"][\"pick_conf_path\"].as_path()\n with pick_station_json.open(mode=\"r\") as fp:\n pick_station_data = json.load(fp)\n pick_station = PickStation.from_data(pick_station_data)\n\n ############################################################################\n # Create Ros Client #\n ############################################################################\n ros = RosClient()\n\n ############################################################################\n # Create ABB Client #\n ############################################################################\n abb = AbbClient(ros)\n abb.run()\n log.debug(\"Connected to ROS\")\n\n check_reconnect(\n abb,\n driver_container_name=DRIVER_CONTAINER_NAME,\n timeout_ping=fab_conf[\"docker\"][\"timeout_ping\"].get(),\n wait_after_up=fab_conf[\"docker\"][\"sleep_after_up\"].get(),\n )\n\n ############################################################################\n # setup in_progress JSON #\n ############################################################################\n if not fab_conf[\"skip_progress_file\"]:\n json_progress_identifier = \"IN_PROGRESS-\"\n\n if fab_json_path.name.startswith(json_progress_identifier):\n in_progress_json = fab_json_path\n else:\n in_progress_json = fab_json_path.with_name(\n json_progress_identifier + fab_json_path.name\n )\n\n ############################################################################\n # Fabrication loop #\n ############################################################################\n\n to_place = setup_fab_data(clay_bullets)\n\n if not questionary.confirm(\"Ready to start program?\").ask():\n log.critical(\"Program exited because user didn't confirm start.\")\n print(\"Exiting.\")\n sys.exit()\n\n # Set speed, accel, tool, wobj and move to start pos\n pre_procedure(abb)\n\n for bullet in to_place:\n bullet.placed = None\n bullet.cycle_time = None\n\n for i, bullet in enumerate(to_place):\n current_bullet_desc = \"Bullet {:03}/{:03} with id {}.\".format(\n i, len(to_place) - 1, bullet.bullet_id\n )\n\n abb.send(PrintText(current_bullet_desc))\n log.info(current_bullet_desc)\n\n pick_frame = pick_station.get_next_frame(bullet)\n\n # Pick bullet\n pick_future = pick_bullet(abb, pick_frame)\n\n # Place bullet\n place_future = place_bullet(abb, bullet)\n\n bullet.placed = 1 # set placed to temporary value to mark it as \"placed\"\n\n # Write progress to json while waiting for robot\n if not fab_conf[\"skip_progress_file\"].get():\n with in_progress_json.open(mode=\"w\") as fp:\n json.dump(clay_bullets, fp, cls=ClayBulletEncoder)\n log.debug(\"Wrote clay_bullets to {}\".format(in_progress_json.name))\n\n # This blocks until cycle is finished\n cycle_time = pick_future.result() + place_future.result()\n\n bullet.cycle_time = cycle_time\n log.debug(\"Cycle time was {}\".format(bullet.cycle_time))\n bullet.placed = time.time()\n log.debug(\"Time placed was {}\".format(bullet.placed))\n\n ############################################################################\n # Shutdown procedure #\n ############################################################################\n\n # Write progress of last run of loop\n if not fab_conf[\"skip_progress_file\"].get():\n with in_progress_json.open(mode=\"w\") as fp:\n json.dump(clay_bullets, fp, cls=ClayBulletEncoder)\n log.debug(\"Wrote clay_bullets to {}\".format(in_progress_json.name))\n\n if (\n len([bullet for bullet in clay_bullets if bullet.placed is None]) == 0\n and not fab_conf[\"skip_progress_file\"].get()\n ):\n done_file_name = fab_json_path.name.replace(json_progress_identifier, \"\")\n done_json = fab_conf[\"paths\"][\"json_dir\"].as_path() / \"00_done\" / done_file_name\n\n in_progress_json.rename(done_json)\n\n with done_json.open(mode=\"w\") as fp:\n json.dump(clay_bullets, fp, cls=ClayBulletEncoder)\n\n log.debug(\"Saved placed bullets to 00_Done.\")\n elif not fab_conf[\"skip_progress_file\"].get():\n log.debug(\n \"Bullets without placed timestamp still present, keeping {}\".format(\n in_progress_json.name\n )\n )\n\n log.info(\"Finished program with {} bullets.\".format(len(to_place)))\n\n post_procedure(abb)", "def deployFluidinfo(deploymentPath, revision):\n homePath = os.path.join('/home', env.user)\n revisionPath = os.path.join(deploymentPath, revision)\n sudo('mkdir -p %s' % revisionPath)\n put('fluidinfo.tar.bz2', homePath)\n filePath = os.path.join(homePath, 'fluidinfo.tar.bz2')\n sudo('cp %s %s' % (filePath, revisionPath))\n\n with cd(revisionPath):\n sudo('chown -R fluidinfo %s' % revisionPath)\n sudo('chown fluidinfo fluidinfo.tar.bz2')\n sudo('tar jxvf fluidinfo.tar.bz2', user='fluidinfo')\n sudo('mkdir -p var/log var/log/trace var/run var/tmp',\n user='fluidinfo')\n\n with cd(os.path.join(revisionPath, 'fluidinfo')):\n sudo('virtualenv .', user='fluidinfo')\n sudo('./bin/pip install --use-mirrors '\n '--download-cache=/var/lib/fluidinfo/source-dependencies '\n '-r requirements.txt', user='fluidinfo')\n\n ## Copy configuration files\n\n serverName = os.path.basename(deploymentPath)\n templateData = {'deployment-path': deploymentPath,\n 'server-name': serverName}\n fileCopies = [\n ('fluidinfo/fluidinfo-api.conf.template', '../fluidinfo-api.conf'),\n ('cron/postgres-crontab.template', '../scripts/postgres-crontab'),\n ('cron/fluidinfo-crontab.template', '../scripts/fluidinfo-crontab'),\n\n ('cron/backup-postgresql.sh.template',\n '../scripts/backup-postgresql.sh'),\n\n ('cron/metrics.sh', '../scripts/metrics.sh'),\n ('cron/time-fluidinfo.py', '../scripts/time-fluidinfo.py'),\n ('cron/solr-optimize.sh', '../scripts/solr-optimize.sh')\n\n # TODO: Copy configuration files for nginx, haproxy, logrotate and\n # upstart, these require service restarts if files have changed.\n ]\n\n with cd(os.path.join(revisionPath, 'fluidinfo')):\n sudo('mkdir ../scripts')\n\n for origin, destination in fileCopies:\n specificFilename = os.path.join('deployment', serverName, origin)\n defaultFilename = os.path.join('deployment', 'default', origin)\n origin = (specificFilename\n if os.path.exists(specificFilename)\n else defaultFilename)\n\n sudo('cp {origin} {destination}'.format(**locals()))\n\n for key, value in templateData.iteritems():\n value = value.replace('.', r'\\.').replace('/', '\\/')\n expression = r's/{{ %s }}/%s/g' % (key, value)\n sudo(\"sed -i -e '%s' %s\" % (expression, destination))\n\n sudo('chmod +x ../scripts/backup-postgresql.sh')\n sudo('crontab -u postgres ../scripts/postgres-crontab')\n sudo('crontab -u fluidinfo ../scripts/fluidinfo-crontab')", "def main():\n pods = openshift_object.get_running_pods()\n me = openshift_object.get_self()\n routes = openshift_object.get_routes()\n nodes = openshift_object.get_nodes()\n pvc = openshift_object.get_pvcs()\n pv = openshift_object.get_pv()\n project = openshift_object.get_projects()\n return jsonify({\n \"pods\": pods,\n \"me\": me,\n \"routes\": routes, \n \"nodes\":nodes,\n \"pvcs\":pvc,\n \"pv\":pv,\n \"projects\":project})", "def main(host='10.84.109.148', port=8086):\n user = \"\"\n password = \"\"\n dbname = \"ruuvi1\"\n dbuser = \"\"\n dbuser_password = \"\"\n client = InfluxDBClient(host, port, user, password, dbname)\n for mac in roovi_macs:\n query = \"select last(humidity),temperature, time from ruuvi_measurements where mac = \"+\"\\'\"+mac+\"\\'\" #filter temp, humidity data across all ruuvitags\n result = client.query(query)\n cpu_points = list(result.get_points(measurement='ruuvi_measurements')) #convert datatype to list\n for points in cpu_points:\n val= getfwi(points.get('time'),points.get('last'),points.get('temperature'))\n new_dic[roovi_locs.get(mac)]=val #store FWI index acc to location\n print(\" \\n\\n\\tLocation |\\t FWI\")\n pprint (new_dic)", "def main():\n\n with open(\".auth_token\", mode=\"r\") as tokenfile:\n authtoken = tokenfile.read().strip()\n\n # Initialize connection to Archivist\n aconn = Archivist(\n \"https://soak-0-avid.engineering-k8s-stage-2.dev.wild.jitsuin.io\",\n auth=authtoken,\n )\n # Get all assets with required attributes and properties\n props = {\"confirmation_status\": \"CONFIRMED\"}\n attrs = {\"arc_display_type\": \"Traffic light\"}\n for event in aconn.events.list(asset_id=\"assets/-\", props=props, attrs=attrs):\n print(\"event\", event)\n\n # alternatively one could pull the list and cache locally...\n events = aconn.events.list(asset_id=\"assets/-\", props=props, attrs=attrs)\n for event in events:\n print(\"event\", event)", "def start(args, config):\n print('Starts an HPC fleet: \"{}\"'.format(args))", "def main():\n\n # Handling arguments\n args = get_args()\n all_clusters = args.all_clusters\n all_datacenters = args.all_datacenters\n all_hosts = args.all_hosts\n clusters = []\n if args.clusters:\n clusters = args.clusters\n debug = args.debug\n allow_fqdn = args.allow_fqdn\n datacenters = []\n if args.datacenters:\n datacenters = args.datacenters\n hosts = []\n if args.hosts:\n hosts = args.hosts\n host_configure_agent = args.host_configure_agent\n hosts_file = None\n if args.hosts_file:\n hosts_file = args.hosts_file\n hv_username = None\n if args.hv_username:\n hv_username = args.hv_username\n hv_password = None\n if args.hv_password:\n hv_password = args.hv_password\n hv_management_network = None\n if args.hv_management_network:\n hv_management_network = args.hv_management_network\n hv_data_network = None\n if args.hv_data_network:\n hv_data_network = args.hv_data_network\n hv_vm_network = None\n if args.hv_vm_network:\n hv_vm_network = args.hv_vm_network\n hv_mc_network = None\n if args.hv_mc_network:\n hv_mc_network = args.hv_mc_network\n log_file = None\n if args.logfile:\n log_file = args.logfile\n nuage_enterprise = args.nuage_enterprise\n nuage_host = args.nuage_host\n nuage_port = args.nuage_port\n nuage_password = None\n if args.nuage_password:\n nuage_password = args.nuage_password\n nuage_username = args.nuage_username\n nuage_vrs_ovf = None\n if args.nuage_vrs_ovf:\n nuage_vrs_ovf = args.nuage_vrs_ovf\n nosslcheck = args.nosslcheck\n verbose = args.verbose\n vcenter_host = args.vcenter_host\n vcenter_name = vcenter_host\n if args.vcenter_name:\n vcenter_name = args.vcenter_name\n vcenter_https_port = args.vcenter_https_port\n vcenter_http_port = args.vcenter_http_port\n vcenter_password = None\n if args.vcenter_password:\n vcenter_password = args.vcenter_password\n vcenter_username = args.vcenter_username\n\n # Logging settings\n if debug:\n log_level = logging.DEBUG\n elif verbose:\n log_level = logging.INFO\n else:\n log_level = logging.WARNING\n\n logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s %(message)s', level=log_level)\n logger = logging.getLogger(__name__)\n\n # Input checking\n if not all_datacenters and len(datacenters) < 1:\n logger.critical('Not all datacenters have to be present in the Nuage Deployment tool (--all-datacenters option NOT enabled), but also no datacenters specified (at least one --datacenter)')\n return 1\n if not all_clusters and len(clusters) < 1:\n logger.critical('Not all clusters have to be present in the Nuage Deployment tool (--all-clusters option NOT enabled), but also no clusters specified (at least one --cluster)')\n return 1\n if not all_hosts and len(hosts) < 1 and not hosts_file:\n logger.critical('Not all hosts have to be present in the Nuage Deployment tool (--all-hosts option NOT enabled), but also no hosts specified (at least one --host or specify a file with the host information via --hosts-file)')\n return 1\n if all_datacenters and len(datacenters) > 0:\n logger.warning('You enabled all datacenters and added individual datacenter options, --all-datacenters takes precendence and overwrites the specified datacenters.')\n datacenters = []\n if all_clusters and len(clusters) > 0:\n logger.warning('You enabled all clusters and added individual cluster options, --all-clusters takes precendence and overwrites the specified clusters.')\n clusters = []\n if all_hosts and len(hosts) > 0 and not hosts_file:\n logger.warning('You enabled all hosts and added individual hosts options, --all-hosts takes precendence and overwrites the specified hosts.')\n hosts = []\n elif all_hosts and len(hosts) < 1 and hosts_file:\n logger.warning('You enabled all hosts and provided a hosts file, the hosts file takes precendence over the --all-hosts flag and this flag will be ignored.')\n all_hosts = False\n elif not all_hosts and len(hosts) > 0 and hosts_file:\n logger.warning('You specified host with the --host argument and provided a hosts file, the hosts file takes precendence over the --host paramerters and these will be ignored.')\n hosts = []\n\n # CSV Handling\n hosts_list = None\n if hosts_file:\n hosts_list = {}\n # CSV fields:\n # VM Name, Resource Pool, Folder, MAC Address, Post Script\n logger.debug('Parsing csv %s' % hosts_file)\n\n if not os.path.isfile(hosts_file):\n logger.critical('CSV file %s does not exist, exiting' % hosts_file)\n return 1\n\n with open(hosts_file, 'rb') as hostlist:\n hosts_list_raw = csv.reader(hostlist, delimiter=',', quotechar='\"')\n for row in hosts_list_raw:\n logger.debug('Found CSV row: %s' % ','.join(row))\n # Adding IP to the hosts variable so it can also be used in further handling if it's a valid IP\n if allow_fqdn or ip_address_is_valid(row[0]):\n hosts_list[row[0]] = row\n hosts.append(row[0])\n else:\n logger.warning('Found an invalid IP %s in the hosts file and FQDNs are not allowed, skipping line' % row[0])\n\n # Getting user password for Nuage connection\n if nuage_password is None:\n logger.debug('No command line Nuage password received, requesting Nuage password from user')\n nuage_password = getpass.getpass(prompt='Enter password for Nuage host %s for user %s: ' % (nuage_host, nuage_username))\n\n # Getting user password for vCenter connection\n if vcenter_password is None:\n logger.debug('No command line vCenter password received, requesting vCenter password from user')\n vcenter_password = getpass.getpass(prompt='Enter password for vCenter host %s for user %s: ' % (vcenter_host, vcenter_username))\n\n # Getting user password for hosts\n if hv_password is None:\n logger.debug('No command line Host password received, requesting Host password from user')\n hv_password = getpass.getpass(prompt='Enter password for the hosts inside vCenter %s for user %s: ' % (vcenter_host, hv_username))\n\n try:\n vc = None\n nc = None\n\n # Connecting to Nuage\n try:\n logger.info('Connecting to Nuage server %s:%s with username %s' % (nuage_host, nuage_port, nuage_username))\n nc = vsdk.NUVSDSession(username=nuage_username, password=nuage_password, enterprise=nuage_enterprise, api_url=\"https://%s:%s\" % (nuage_host, nuage_port))\n nc.start()\n except IOError:\n pass\n\n if not nc or not nc.is_current_session():\n logger.error('Could not connect to Nuage host %s with user %s and specified password' % (nuage_host, nuage_username))\n return 1\n\n # Connecting to vCenter\n try:\n logger.info('Connecting to vCenter server %s:%s with username %s' % (vcenter_host, vcenter_https_port, vcenter_username))\n if nosslcheck:\n vc = SmartConnectNoSSL(host=vcenter_host, user=vcenter_username, pwd=vcenter_password, port=int(vcenter_https_port))\n else:\n vc = SmartConnect(host=vcenter_host, user=vcenter_username, pwd=vcenter_password, port=int(vcenter_https_port))\n\n except IOError:\n pass\n\n if not vc:\n logger.error('Could not connect to vCenter host %s with user %s and specified password' % (vcenter_host, vcenter_username))\n return 1\n\n logger.debug('Registering vCenter disconnect at exit')\n atexit.register(Disconnect, vc)\n\n logger.info('Connected to both Nuage & vCenter servers')\n\n # Check if the vCenter exists in Nuage vCenter Deployment Tool\n nuage_vcenter = None\n logger.debug('Checking if vCenter %s is already present in Nuage vCenter Deployment Tool' % vcenter_name)\n for nvc in nc.user.vcenters.get():\n if nvc.ip_address == vcenter_host:\n logger.debug('Found vCenter %s, not recreating' % vcenter_name)\n nuage_vcenter = nvc\n break\n\n # If th vCenter does not exist in Nuage vCenter Deployment Tool, create it\n if not nuage_vcenter:\n logger.debug('vCenter %s with IP %s not found in the Nuage vCenter Deployment Tool, creating' % (vcenter_name, vcenter_host))\n nuage_vcenter = vsdk.NUVCenter(name=vcenter_name, ip_address=vcenter_host, user_name=vcenter_username, password=vcenter_password, http_port=vcenter_http_port, https_port=vcenter_https_port, ovf_url=nuage_vrs_ovf)\n nc.user.create_child(nuage_vcenter)\n logger.info('Created vCenter %s in the Nuage vCenter Deployment Tool' % vcenter_name)\n\n # Datacenter Handling\n # Gathering all Datacenters inside the vCenter\n logger.debug('Gathering all Datacenters from vCenter')\n content = vc.content\n obj_view = content.viewManager.CreateContainerView(content.rootFolder, [vim.Datacenter], True)\n vc_dc_list = obj_view.view\n obj_view.Destroy()\n\n # Gathering all Datacenters inside the Nuage vCenter\n logger.debug('Gathering all Datacenter from the Nuage vCenter entry')\n nc_dc_list = nuage_vcenter.vcenter_data_centers.get()\n\n # Parsing all datacenters\n for vc_dc in vc_dc_list:\n if all_datacenters or vc_dc.name in datacenters:\n logger.debug('vCenter Datacenter %s is in list that has to be present in the Nuage vCenter Deployment Tool, checking if it already exists.' % vc_dc.name)\n handle_vdt_datacenter(logger=logger, nc=nc, vc=vc, nuage_vcenter=nuage_vcenter, vc_dc=vc_dc, nc_dc_list=nc_dc_list, vcenter_name=vcenter_name, all_clusters=all_clusters, all_hosts=all_hosts, clusters=clusters, hosts=hosts, hosts_list=hosts_list, hv_username=hv_username, hv_password=hv_password, hv_management_network=hv_management_network, hv_data_network=hv_data_network, hv_vm_network=hv_vm_network, hv_mc_network=hv_mc_network, host_configure_agent=host_configure_agent, allow_fqdn=allow_fqdn)\n\n logger.info('Completed all tasks.')\n return 0\n\n except vmodl.MethodFault as e:\n logger.critical('Caught vmodl fault: %s' % e.msg)\n return 1\n except Exception as e:\n logger.critical('Caught exception: %s' % str(e))\n return 1", "def main():\n\n ip_filename = arguments.ip_file.strip()\n\n # Set project directory to 'logs' unless an optional directory was given\n if arguments.project_dir:\n project = arguments.project_dir\n else:\n project = 'logs'\n\n if arguments.device_class:\n device_cls = arguments.device_class.strip()\n else:\n # Default device class for Netmiko\n device_cls = 'cisco_ios'\n\n ips = []\n ips = load_txt_file(ip_filename)\n\n total_devices = len(ips)\n # Track devices which fail login or pings\n missing_devices = []\n # Track devices which were successfully accessed\n devices_verified = 0\n\n # Create Directory for show output based on the Project Name\n path = os.path.join(\"./\", project.strip())\n # print path\n if not os.path.exists(path):\n os.makedirs(path)\n print(f\"Created directory: {path}\")\n\n # Create logfile for the discovery run in same directory as the resulting show commands\n # logfilename = project + \"-logfile.log\"\n # logfilename = os.path.join(path, logfilename)\n\n if total_devices > 1:\n heading = f\"##### Executing show commands for discovery project {project} for {str(total_devices)} devices! #####\"\n else:\n heading = f\"##### Executing show commands for discovery project {project} for {str(total_devices)} device! #####\"\n\n print(\"#\" * len(heading))\n print(heading)\n print(\"#\" * len(heading))\n\n print(f\"Device IP(s) in project {project}:\")\n for i in ips:\n print(f\"\\t{i}\")\n print(\"--------------------------\")\n print(f\"Total devices: {str(len(ips))}\")\n print(\"#\" * len(heading))\n print(\"\\n\")\n\n ## Default Credentials\n # Default list of credentials in format username, user password, enable password\n credentials = ['cisco, cisco, cisco']\n\n ## Load Credentials if -c or --creds option was used\n if arguments.creds:\n # Override default credentials as a new credential file with one or more sets of credentials was provided\n cred_filename = arguments.creds\n credentials = load_txt_file(cred_filename)\n\n ##### SHOW COMMANDS\n commands = []\n\n ## Load custom show commands if -c or --show option was used\n if arguments.show:\n # Override default list of show commands as a new file with one or more show commands was provided\n show_filename = arguments.show\n custom_showcmds = load_txt_file(show_filename)\n\n # first command to send is an end to get back to the main prompt\n commands = custom_showcmds\n\n else:\n # DEFAULT SHOW COMMANDS\n commands = [\"show version\",\n ]\n\n # if not arguments.pingonly:\n # print(\"Sending \" + str(len(commands)) + \" show commands:\")\n # for x in range(0, len(commands)):\n # print(\"\\t\" + commands[x])\n\n # For each IP in the ip address file, attempt to ping, attempt to log in, attempt to enter enable mode and\n # execute and save show command output\n for mgmt_ip in ips:\n\n login_success = False\n enable_success = False\n output = ''\n hostname = \"dev_\" + mgmt_ip\n\n # If Ping is successful attempt to log in and if that is successful attempt to enter enable mode and\n # execute list of show commands\n device_pings = ping_device(mgmt_ip)\n\n if device_pings:\n print(f\"Device {mgmt_ip} Responds to Pings!\\n\")\n\n # If the -i or --icmppingonly option was provided when the script was called, then only execute the ping code.\n if arguments.icmppingonly:\n # Keep a count of the devices that are pingable\n devices_verified += 1\n # Skip everything else as the icmp ping only option was given\n continue\n\n if len(credentials) > 1:\n print(\"**** Attempting multiple credentials to access device....\")\n\n try_telnet = False\n # Credential Loop\n for line in credentials:\n\n lineitem = line.split(',')\n uname = lineitem[0].strip()\n upwd = lineitem[1].strip()\n epwd = lineitem[2].strip()\n\n if not try_telnet:\n\n print(f\"\\t**** Attempting user credentials for {uname} with SSH.\")\n\n try:\n dev_conn = ConnectHandler(device_type=device_cls, ip=mgmt_ip, username=uname, password=upwd,\n secret=epwd)\n login_success = True\n\n\n except NetMikoAuthenticationException:\n print(f\"\\tNetMikoAuthenticationException: Device failed SSH Authentication with username {uname}\")\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'Failed Authentication')\n login_success = False\n # continue\n\n except (EOFError, SSHException, NetMikoTimeoutException):\n print('\\tSSH is not enabled for this device.')\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'Failed SSH')\n login_success = False\n try_telnet = True\n # continue\n\n except Exception as e:\n print(\"\\tGeneral Exception: ERROR!:\" + str(sys.exc_info()[0]) + \"==>\" + str(sys.exc_info()[1]))\n print(str(e))\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'General Exception')\n login_success = False\n # continue\n\n if login_success:\n print(\"\\t**** SSH Login Succeeded! Will not attempt login with any other credentials.\")\n # Break out of credential loop\n break\n else:\n print(\"\\t**** SSH Login Failed!\")\n # continue\n\n # Try Telnet\n if try_telnet:\n print(\"\\t**** Attempting user credentials for \" + uname + \" with Telnet.\")\n\n try:\n dev_conn = ConnectHandler(device_type='cisco_ios_telnet', ip=mgmt_ip, username=uname,\n password=upwd,\n secret=epwd)\n login_success = True\n\n except NetMikoAuthenticationException:\n print(f\"\\tNetMikoAuthenticationException: Device failed SSH Authentication with username {uname}\")\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'Failed Authentication')\n login_success = False\n continue\n\n except Exception as e:\n print(\"\\tGeneral Exception: ERROR!:\" + str(sys.exc_info()[0]) + \"==>\" + str(sys.exc_info()[1]))\n print(str(e))\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'General Exception')\n login_success = False\n continue\n\n if login_success:\n print(\"\\t**** Telnet Login Succeeded! Will not attempt login with any other credentials.\")\n # Break out of credential loop\n break\n else:\n print(\"\\t**** Telnet Login Failed!\")\n continue\n\n if login_success:\n # Check to see if login has resulted in enable mode (i.e. priv level 15)\n is_enabled = dev_conn.check_enable_mode()\n\n if not is_enabled:\n try:\n dev_conn.enable()\n enable_success = True\n except Exception as e:\n print(str(e))\n print(\"\\tCannot enter enter enable mode on device!\")\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'failed enable')\n enable_success = False\n continue\n else:\n print(\"\\tDevice already in enabled mode!\")\n enable_success = True\n\n if enable_success:\n\n for cmd in commands:\n output += dev_conn.send_command(cmd, strip_prompt=False, strip_command=False)\n dev_conn.exit_config_mode()\n dev_conn.disconnect()\n\n # output contains a stream of text vs individual lines\n # split into individual lies for further parsing\n # output_lines = re.split(r'[\\n\\r]+', output)\n\n # show_info = get_show_info(output_lines)\n #\n # if show_info['hostname']:\n # hostname = show_info.pop('hostname')\n\n # print(\"Information for device: \" + hostname)\n # for k, v in show_info.items():\n # print(\"\\t\" + k +\"\\t\\t-\\t\" + v)\n\n # Save output to file\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\n log_filename = hostname + \"-\" + timestr + \".txt\"\n log_filename = os.path.join(path, log_filename)\n\n log_file = open(log_filename, 'w')\n log_file.write(\"!#Output file for device \" + hostname + \"\\n\")\n log_file.write(\"!#Commands executed on \" + timestr + \"\\n\\r\")\n log_file.write(\"!\\n\")\n log_file.write(output)\n log_file.close()\n devices_verified += 1\n print(\"\\nOutput results saved in: \" + log_filename + \"\\n\\n\")\n\n\n else:\n # Device does not PING\n print(\"Device is unreachable\")\n missing_devices.append(mgmt_ip)\n\n # Totals Verification\n if arguments.icmppingonly:\n info = (\"Total number of devices in IP list:\\t\\t\" + str(total_devices) + \"\\n\",\n \"Total number of devices which responded to pings:\\t\" + str(devices_verified) + \"\\n\")\n else:\n info = (\"Total number of devices in IP list:\\t\\t\" + str(total_devices) + \"\\n\",\n \"Total number of show command output files:\\t\" + str(devices_verified) + \"\\n\")\n\n\n # Print Note on totals\n for note in info:\n print(note)", "def main():\n get_obofoundry(force_download=True)", "def start():\n _with_deploy_env(['./bin/paster serve src/remix/oerpub/rhaptoslabs/production.ini --daemon'])", "def main():\n # files\n summary_file = sys.argv[1]\n pwms_to_tfs_file = sys.argv[2]\n expressed_tfs_file = sys.argv[3] # TODO\n\n # TODO pull in num regions to resize things? but complicated with overlaps etc\n # TODO edit edges with type of interaction\n # TODO may want to color by trajectory, to demonstrate waves of trajectory\n \n # read in data\n summary = pd.read_csv(summary_file, sep=\"\\t\")\n pwms_to_tfs = pd.read_csv(pwms_to_tfs_file, sep=\"\\t\")\n pwms_to_tfs = pwms_to_tfs[pwms_to_tfs[\"expressed\"].notna()]\n pwms_to_filt_tfs = {} # dict: key - pwm names, vals - dict of ensembl ids to hgnc ids\n for line_idx in range(pwms_to_tfs.shape[0]):\n pwm_info = pwms_to_tfs.iloc[line_idx,:]\n pwm_name = pwm_info[\"hclust_model_name\"]\n pwm_to_tf = dict(zip(pwm_info[\"expressed\"].split(\";\"), pwm_info[\"expressed_hgnc\"].split(\";\")))\n pwms_to_filt_tfs[pwm_name] = pwm_to_tf\n\n \n # filter expressed hgncs for dynamic ones only\n tfs_filt = pd.read_csv(expressed_tfs_file, sep=\"\\t\", index_col=0)\n for pwm_name in pwms_to_filt_tfs.keys():\n tfs_tmp = pwms_to_filt_tfs[pwm_name]\n for ensembl_tf in tfs_tmp.keys():\n if ensembl_tf not in tfs_filt.index:\n del tfs_tmp[ensembl_tf]\n if len(tfs_tmp.keys()) == 0:\n del pwms_to_filt_tfs[pwm_name]\n pwms_to_filt_tfs[pwm_name] = tfs_tmp\n\n # add in tfs column\n tf1 = []\n for pwm in summary[\"pwm1\"]:\n tf_str = []\n for ensembl_id in pwms_to_filt_tfs[pwm]:\n tf_str.append(pwms_to_filt_tfs[pwm][ensembl_id])\n # TODO try add in max point\n expression = tfs_filt.loc[ensembl_id,:]\n max_idx = np.argmax(expression.values)\n tf_str.append(str(max_idx))\n tf_str = (\";\").join(tf_str)\n tf1.append(tf_str)\n summary[\"tf1\"] = tf1\n\n tf2 = []\n for pwm in summary[\"pwm2\"]:\n tf_str = []\n for ensembl_id in pwms_to_filt_tfs[pwm]:\n tf_str.append(pwms_to_filt_tfs[pwm][ensembl_id])\n expression = tfs_filt.loc[ensembl_id,:]\n max_idx = np.argmax(expression.values)\n tf_str.append(str(max_idx))\n tf_str = (\";\").join(tf_str)\n tf2.append(tf_str)\n summary[\"tf2\"] = tf2\n \n # remove failed rules\n summary = summary[~summary[\"interaction\"].str.contains(\"FAILED\")]\n \n # make graph\n graph = nx.from_pandas_edgelist(summary, \"tf1\", \"tf2\")\n\n # set up positions\n #pos = graphviz_layout(graph, prog=\"dot\")\n pos = graphviz_layout(graph, prog=\"neato\")\n scale_factor = 3\n for key in pos.keys():\n coords = pos[key]\n pos[key] = {\"x\": scale_factor*coords[0], \"y\": -scale_factor*coords[1]}\n nx.set_node_attributes(graph, pos, \"graphics\") # note this is diff from v1 to v2 in networkx\n \n # add graphics\n add_graphics_theme_to_nx_graph(graph)\n\n # write gml\n out_file = \"summary.gml\"\n nx.write_gml(stringize_nx_graph(graph), out_file, stringizer=str)\n\n # tfs: for each tf, get gene column\n \n \n return", "def install_logstash_ui (vpc_conn,ec2_conn, elb_conn, cloudwatch_conn , r53_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'APACHE'\n app_name = 'LOGGING-UI'\n external_type = 'LOGGING-UI-EXT'\n logging_ui_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n #logging_ui_ami_name = base_name + \"-\" + app_name\n logging_ui_ami_name = 'Master-Logging-UI'\n logging_ui_sec_grp_name = get_secgrp_name( base_name, app_name )\n logging_ui_lb_sec_grp_name = get_lb_secgrp_name( base_name, app_name )\n nat_sec_grp_name = get_secgrp_name(base_name, \"NAT\")\n elb_listeners = [ ( '80', '8080', 'HTTP' ) ]\n ext_elb_name = get_elb_name( base_name, external_type )\n ext_elb_name = ext_elb_name.replace(\"_\",\"-\")\n\n logging_ui_ami_id = params.get( 'source-ami' )\n\n if not logging_ui_ami_id or len( logging_ui_ami_id ) < 1 :\n logging_ui_ami = get_ami_by_name( ec2_conn, logging_ui_ami_name )\n if not logging_ui_ami :\n print \"Could not find AMI to install Logstash UI! \" + logging_ui_ami_name\n sys.exit( 2 )\n else :\n logging_ui_ami = get_ami_by_id( ec2_conn, logging_ui_ami_id )\n if not logging_ui_ami :\n print \"Could not find AMI to install Logstash UI! \" + logging_ui_ami_id\n sys.exit( 2 )\n\n print \"Creating Logging UI Instances\"\n\n logging_ui_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n\n logging_ui_sec_grp = find_secgrp(ec2_conn, logging_ui_sec_grp_name)\n logging_ui_lb_sec_grp = find_secgrp(ec2_conn, logging_ui_lb_sec_grp_name)\n nat_sec_grp = find_secgrp(ec2_conn, nat_sec_grp_name)\n\n if not logging_ui_sec_grp :\n logging_ui_sec_grp = create_secgrp ( ec2_conn, vpc, logging_ui_sec_grp_name, \"Allows Log UI LB access to the logging UI\" )\n\n if not logging_ui_lb_sec_grp :\n logging_ui_lb_sec_grp = create_secgrp ( ec2_conn, vpc, logging_ui_lb_sec_grp_name, \"Allows HBO access to Logging UI LB\" )\n\n\n ##\n ## Grant all requires access\n ##\n print nat_sec_grp\n try :\n grant_ssh_access ( ec2_conn, [logging_ui_sec_grp], nat_sec_grp )\n except :\n print \"Rule exists\"\n\n try :\n grant_grp_self_access ( ec2_conn, logging_ui_sec_grp, 0, 40000, protocol = 'tcp' )\n except :\n print \"Rule exists\"\n\n try :\n grant_grp_access ( ec2_conn, [logging_ui_lb_sec_grp], logging_ui_sec_grp, 8080, protocol = 'tcp' )\n except :\n print \"Rule exists\"\n\n try :\n grant_ssh_access ( ec2_conn, [logging_ui_lb_sec_grp], nat_sec_grp )\n except :\n print \"Rule exists\"\n\n ## Open the elastic search API to HBO internal\n try :\n logging_ui_lb_sec_grp.authorize( ip_protocol = \"tcp\",\n from_port = 80,\n to_port = 80,\n cidr_ip = hbo_cidr_list )\n except :\n print \"HBO access already granted\"\n\n logging_ui_instances_ids = []\n\n #\n loggin_ui_config = get_loggingui_config(base_name)\n\n for subnet in logging_ui_subnets :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", logui_ip_block)\n logstash = launch_instance_vpc( ec2_conn,\n logging_ui_ami,\n base_name = base_name,\n instance_type = app_name,\n keypair = logging_ui_keypair,\n machine_type = 'm3.medium',\n security_group_id = logging_ui_sec_grp ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = loggin_ui_config,\n static_ip_address = instance_private_ip )\n logging_ui_instances_ids.append( logstash.id )\n\n print \"Setting alarms on the kibana instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, logstash.id, app_type, base_topicarn, log_monitor_rules )\n\n ext_elb = find_elb(elb_conn, ext_elb_name)\n\n if not ext_elb :\n subnets = get_vpc_subnets( vpc_conn, vpc, 'PUBLIC' )\n ext_elb = create_elb ( elb_conn,\n ext_elb_name,\n subnets,\n elb_listeners,\n logging_ui_lb_sec_grp,\n \"8080\",\n \"/logging-ui/index.html\",\n True )\n\n print \"Adding Logging UI instances into the load balancer.\"\n swap_elb_instances( elb_conn,\n ext_elb,\n logging_ui_instances_ids,\n swap_smoothly = False,\n terminate_old_instances = True,\n ec2_conn = ec2_conn,\n cloudwatch_conn = cloudwatch_conn )\n\n print \"Creating Route53 DNS Entries\"\n ext_dns_name = create_dns_name(base_name, \"logging-ui\")\n print \"Public DNS: \" + ext_dns_name\n set_dns_cname( r53_conn, ext_dns_name, ext_elb.dns_name )", "def localhost():\n env.run = local\n env.cd = lcd\n env.deployment = 'local'", "def main():\r\n mvip, user, user_pass, mvip_node = get_inputs()\r\n payload = build_payload()\r\n headers, url = build_auth(mvip, user, user_pass, mvip_node)\r\n response_json = connect_cluster(headers, url, payload)\r\n paired_vols = get_replication_status(response_json)\r\n payload = get_vol_stats(paired_vols)\r\n response_json = connect_cluster(headers, url, payload)\r\n parse_volume_stats(paired_vols, response_json)", "def main():\n # OptionParser helps reading options from command line\n parser = OptionParser(\n usage='%prog [options]', \n version=\"%%prog, Hermod release %s\" % (config.get('DEFAULT', \n 'version')), \n description=\"copy l2 files to your current directory\"\n )\n parser.set_defaults(datestart=datetime(1999, 1, 1), \n dateend = datetime.today(), \n orbitstart = 0x0000, \n orbitend = 0xFFFF, \n verbose = False, \n launch = False, \n cal=[-1, -2], \n fqid =[127, 128], \n threshold = .1, \n queue = 'rerun', \n qsmr= '2-1')\n parser.add_option('-s', '--start-time', \n action='callback', callback=parsetime, dest='datestart', nargs=2, \n type='string', metavar='YYYYMMDD HH:MM', \n help='filter on start date default is 2 days from now'\n )\n parser.add_option('-k', '--end-time', \n action='callback', callback=parsetime, dest='dateend', nargs=2, \n type='string', metavar='YYYYMMDD HH:MM', \n help='filter on stop date default is now'\n )\n parser.add_option('-o', '--start-orbit', \n action='store', dest='orbitstart', type='int', \n metavar='ORB_START', help='add filter on start decimal orbit'\n )\n parser.add_option('-e', '--end-orbit', \n action='store', dest='orbitend', type='int', \n metavar='ORB_END', help='filter on end decimal orbit'\n )\n parser.add_option('-O', '--start-hexorbit', \n action='callback', callback=hex2dec, dest='orbitstart', \n type='string', metavar='HEX_ORB_START', \n help='filter on start hex orbit'\n )\n parser.add_option('-E', '--end-hexorbit', \n action='callback', callback=hex2dec, dest='orbitend', type='string', \n metavar='HEX_ORB_END', help='filter on stop hex orbit'\n )\n parser.add_option('-f', '--fqid', action='append', dest='fqid', type='int', \n metavar='FQID', help='filter on fqids'\n )\n parser.add_option('-v', '--verbose', action='store_true', dest='verbose', \n help='display info when launching to queue'\n )\n parser.add_option('-l', '--launch', action='store_true', dest='launch', \n help='launch jobs into processing system'\n )\n parser.add_option('-Q', '--qsmr', \n action='store', type='string', dest='qsmr', \n metavar='QSMR', help='Qsmr version, format \"2-1\"'\n )\n (options, args) = parser.parse_args()\n\n # manipulate som values\n if len(options.cal)==2:\n n = range(20)\n setattr(parser.values, 'cal', map(float, range(20)))\n\n if len(options.fqid)==2:\n n = range(50)\n setattr(parser.values, 'fqid', n)\n\n # Initiate a database connection\n try:\n db = connect(host=config.get('READ_SQL','host'),\n user=config.get('READ_SQL','user'),\n db=config.get('READ_SQL','db'))\n except Warning, inst:\n print >> stderr, \"Warning: %s\" % inst\n except StandardError, inst:\n print >> stderr, \"Error: %s\" % inst\n exit(1)\n\n #find orbitfiles to run\n cursor = db.cursor(DictCursor)\n try:\n status = cursor.execute('''select * from level1 \n left join level2files using (id) \n where 1 \n and orbit>=%s \n and start_utc>=%s \n and orbit<=%s \n and stop_utc<=%s \n and version=%s\n and fqid in %s\n order by orbit,fqid''', \n (options.orbitstart, options.datestart, \n options.orbitend, options.dateend, \n options.qsmr, options.fqid))\n except Warning, e:\n print >> stderr, \"Hermod:\", str(e)\n except Exception, e:\n print >> stderr, \"Hermod:\", str(e)\n exit(3)\n except KeyboardInterupt:\n print >> stderr, \"Hermod: KeyboardInterrupt, closing database...\"\n cursor.close()\n db.close()\n exit(2)\n\n # do desired action on every object\n for i in cursor:\n try:\n if not (i['hdfname'] is None):\n src = join(config.get('GEM', 'SMRL2_DIR'), i['hdfname'])\n dst = join(getcwd(), basename(i['hdfname']))\n if exists(src):\n copyfile(src, dst)\n except HermodError, inst:\n print >> stderr, 'HermodError: %s'%inst \n continue\n cursor.close()\n db.close()" ]
[ "0.5672977", "0.560191", "0.55638695", "0.55382484", "0.55016917", "0.5492851", "0.5487739", "0.547502", "0.5456432", "0.5440167", "0.54215467", "0.5404361", "0.53749293", "0.53648347", "0.53563046", "0.5336703", "0.53051615", "0.5297738", "0.52720964", "0.52703017", "0.52670056", "0.5190995", "0.5177667", "0.5166713", "0.5165895", "0.51486814", "0.5133977", "0.51265466", "0.5103701", "0.50858736" ]
0.5646119
1
Return if the selected digits from start in the number are a palindrome
def is_number_palindrome(number, digits, start): number = str((number // 10**start) % 10**digits).zfill(digits) return is_palindrome(number)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_palindrome(n):\n d = digits(n)\n r = int(\"\".join([str(i) for i in d]))\n return n == r", "def isPalindrome(Number):\r\n ListOfDigit=[int(d) for d in str(Number)]\r\n n=len(ListOfDigit)\r\n for i in range(n//2):\r\n if ListOfDigit[i]!=ListOfDigit[-(i+1)]:\r\n return(False)\r\n return(True)", "def is_palindrome(num):\n digitList = int2list(num)\n \n i = 0\n while i <= round(len(digitList)/2):\n if digitList[i] != digitList[-(i+1)]:\n return False\n i += 1\n return True", "def is_palindrome(number_):\n temp = number_\n reverse = 0\n while number_ > 0:\n digit = number_ % 10\n reverse = reverse * 10 + digit\n number_ = number_ // 10\n if temp == reverse:\n return True\n else:\n return False", "def palindrome_check(num):\n num= str(num)\n len_num= len(num)\n for i in range(len_num/2):\n if num[i] == num[len_num-i-1]:\n ans= True\n else:\n ans= False\n break\n return ans", "def is_palindrome(a):\n\tmax = a\n\tmin = 0\n\twhile max > 0:\n\t\tmin = (min * 10 + max % 10)\n\t\tmax /= 10\n\treturn min == a", "def check_palindrome():", "def is_palindrome(number):\r\n str_input = str(number)\r\n return str_input == reversed(str_input)", "def is_palindrome(n):\n v = []\n while n > 0:\n v.append(n % 10)\n n //= 10\n for i in range(len(v)//2):\n if v[i] != v[len(v)-i-1]:\n return False\n return True", "def is_palindrome(n):\n # store locally\n temp = n\n rev = 0\n while n > 0:\n # get digit one by one\n digit = n % 10\n # find reverse number\n rev = rev * 10 + digit\n # divide the number\n n = n // 10\n return temp == rev", "def is_palindrome(n):\n x, y = n, 0\n f = lambda: y * 10 + x % 10\n while x > 0:\n x, y = x // 10, f()\n return y == n", "def isPalendrome(number):\n\t\n\tnum = str(number)\n\ti \t= 0\n\tj \t= len(num) - 1\n\tmid = len(num) // 2\n\n\t#print(mid)\n\t\n\t# While i and j are not in the middle\n\twhile( i != mid):\n\t\t#print(i,j,sep=\"\\t\")\n\t\t#print(num[i],num[j], sep=\"\\t\")\n\t\tif(num[i] != num[j]):\n\t\t\treturn(False)\n\t\telse:\n\t\t\ti = i + 1\n\t\t\tj = j - 1\n\n\treturn(True)", "def is_palindrome(n):\n x, y = n, 0\n f = lambda: 10 * y + x % 10\n while x > 0:\n x, y = x // 10, f()\n return y == n", "def is_palindrome(x):\n \n # Assume negative number is not a palindromic number.\n if x < 0:\n return False\n\n ten_base = 1\n \n # Determine num size with a base of 10\n while x / ten_base >= 10:\n ten_base *= 10\n\n while x > 0:\n left_num, right_num = x / ten_base, x % 10\n if left_num != right_num:\n return False\n \n # Update and prep for next iteration.\n x = (x % ten_base) / 10\n ten_base /= 100\n\n return True", "def isPalindromic(n: int):\n return str(n) == str(n)[::-1]", "def is_number_palindrome(n):\n digits = list() # list of all digits in n (reversed order)\n while n > 0:\n last_digit = n % 10\n digits.append(last_digit)\n n = n // 10\n if is_list_palindrome(digits):\n return True\n else:\n return False", "def is_palindrome(n):\n return(n == reverse(n))", "def check_pal(num):\r\n \r\n num = str(num) #Convert number to string.\r\n \r\n #If a number is a palindrome, rreturn True \r\n if num[0] == num[len(num)-1] and len(num) <= 3:\r\n return True\r\n \r\n #If the first and last digits of a number are equal when its length is > 3,\r\n #strip the end digits away analyse the resulting number.\r\n elif num[0] == num[len(num)-1]:\r\n return check_pal(num[1:len(num)-1])\r\n \r\n #If a number is not a palindrome, return False\r\n else:\r\n return False", "def is_palindrome(num):\n str_num = str(num)\n\n if len(str_num) == 1:\n return True\n elif len(str_num) == 2:\n return str_num[0] == str_num[1]\n\n if str_num[0] == str_num[len(str_num)-1]:\n return is_palindrome(str_num[1:len(str_num)-1])\n else:\n return False", "def is_palindrome(n):\n ns = str(n)\n for i in range(0, len(ns) // 2):\n if ns[i] != ns[len(ns) - 1 - i]: return False\n return True", "def is_palindrome(x):\n strx = str(x)\n return strx == strx[::-1]\n # chars = [c for c in x] if not is_number(x) else digits(x)\n # for i in range(len(chars) // 2):\n # if chars[i] != chars[len(chars) - i - 1]:\n # return False\n # return True", "def is_palindrome(num_in):\n if str(num_in) == str(num_in)[::-1]:\n return True\n\n return False", "def has_palindrome(i, start, length):\r\n s = str(i)[start:start+length]\r\n return s[::-1] == s", "def is_palindrome(num):\n\treversed_num = str(num)[::-1]\n\tif reversed_num == str(num): return True\n\telse: return False", "def is_antipalindrome(n):\n v = []\n while n > 0:\n v.append(n % 10)\n n //= 10\n for i in range(len(v)//2):\n if v[i] == v[len(v)-i-1]:\n return False\n return True", "def check_palindrome(number):\r\n \r\n number = str(number) #Converting a number to a string.\r\n \r\n #If the number is a palindrome then it will return True \r\n if number[0] ==number[len(number)-1] and len(number) <= 3:\r\n return True\r\n #If the first and last digits of a number are equal when its length is > 3,\r\n #strip the end digits away analyse the resulting number.\r\n elif number[0] == number[len(number)-1]:\r\n return check_palindrome(number[1:len(number)-1])\r\n \r\n #If a number is not a palindrome, return False\r\n else:\r\n return False", "def is_palindromic(n: int) -> bool:\n str_n = str(n)\n if str_n == str_n[::-1]:\n return True\n return False", "def is_palindrome(element):\n if int(element) < 0:\n return False\n if element == element[::-1]:\n return True\n else:\n return False", "def isPalindrome(x):\n # Write the functionality:\n\n if x == str(x)[::-1]:\n return True\n elif x==121:\n return True\n else:\n return False", "def palindrome_itertive(a):\n # TODO make this less crappy\n start = 0 \n end = len(a) - 1\n while start != end:\n # print(end)\n # print('start: ', start, ' a: ', a[start])\n # print('end: ', end, ' a: ', a[end])\n if not a[start] == a[end]:\n return False\n else:\n start += 1\n end -= 1\n return True" ]
[ "0.7902744", "0.78919506", "0.7875554", "0.78522855", "0.780591", "0.77704966", "0.7650538", "0.7627759", "0.7580284", "0.75686455", "0.7507331", "0.75047344", "0.7498053", "0.7495534", "0.74926513", "0.74589795", "0.74185145", "0.73735946", "0.7350664", "0.73400944", "0.7291305", "0.7287503", "0.72614264", "0.72503674", "0.7219647", "0.7216595", "0.7204814", "0.71877784", "0.71868503", "0.7126142" ]
0.8182947
0
Get all the tag combinations possible for a tree of length n
def get_all_tag_seq(self, n): tags = list(product(self.tags, repeat=n)) return tags
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fn(n):\n if n == 1: return [TreeNode()]\n ans = []\n for nn in range(1, n, 2): \n for left in fn(nn):\n for right in fn(n-1-nn): \n ans.append(TreeNode(left=left, right=right))\n return ans", "def get_subs(n):\n \n from itertools import product\n return [''.join(sub) for sub in product('CATGN', repeat=n)]", "def expand_all(start, end, tags):\n if len(tags) == 1:\n for branch in expand(start, end, tags[0]):\n yield [branch]\n else:\n first, rest = tags[0], tags[1:]\n for middle in range(start+1, end+1-len(rest)): \n for first_branch in expand(start, middle, first):\n for rest_branches in expand_all(middle, end, rest):\n yield [first_branch] + rest_branches", "def build(n):\n if n not in memo:\n res = []\n if n % 2 != 0:\n for i in range(1, n - 1):\n left = i\n right = n - 1 - i\n if left % 2 == 1 and right % 2 == 1:\n left_ = build(left)\n right_ = build(right)\n for l in left_:\n for r in right_:\n new_tree = TreeNode(0)\n new_tree.left, new_tree.right = l, r\n res.append(new_tree)\n memo[n] = res\n return memo[n]", "def make_kmer_tree(self, k, nums):\n nodes = [(np.array([]), [])]\n for it in range(k):\n new_nodes = []\n count = 0\n for i, node in enumerate(nodes):\n n, e = node\n if len(n) < it:\n continue\n for a in nums:\n count += 1\n new_node = (np.append(n, a), [])\n new_nodes.append(new_node)\n nodes[i][1].append(len(nodes) + count - 1)\n nodes += new_nodes\n return nodes", "def _get_possible_sense_combinations(self, taggable, tagged):\n\t\tprint(\"\\tget possible combinations...\")\n\t\t# first create a list of the already tagged senses and store for each of those one list inside that contains the one single correct sense\n\t\ttagged_sense_keys = [[(token, token.wn_sense_key)] for token in tagged]\n\t\ttaggable_possible_sense_keys = []\n\n\t\t# for each token that has to be tagged now find all possible senses and collect them\n\t\tfor token in taggable:\n\t\t\ttoken_sense_pairs = []\n\t\t\t# for each possible sense of the token add one to the list of that sense\n\t\t\tpossible_senses = self._get_possible_wn_senses_for_token(token)\n\t\t\tfor single_possible_sense in possible_senses:\n\t\t\t\ttoken_sense_pairs.append((token, single_possible_sense))\n\t\t\ttaggable_possible_sense_keys.append(token_sense_pairs)\n\n\t\tcomplete_list_of_tokens = taggable_possible_sense_keys + tagged_sense_keys\n\n\t\tprint(\"\\t\\t...building combinations\")\n\t\t# return a dot product of the lists of possible senses of all tokens\n\t\treturn list_product(*complete_list_of_tokens)", "def chunked_tags(train):\n cfdist = nltk.ConditionalFreqDist()\n for t in train:\n for word, tag, chtag in tree2conlltags(t):\n if chtag == \"O\":\n cfdist[tag].inc(False)\n else:\n cfdist[tag].inc(True)\n return [tag for tag in cfdist.conditions() if cfdist[tag].max() == True]", "def make_nodes(n):\n return [\n protein(namespace='NS', name=str(i))\n for i in range(1, n)\n ]", "def getTags(number=None):", "def generate_ngrams(iterable, n):\n return zip(*[itertools.islice(it, i, None) for i, it in enumerate(itertools.tee(iterable, n))])", "def graphs_conn_iso(n):\n def graphs_conn_helper(n):\n for oldg in graphs_conn_iso(n-1):\n for s in powerset(range(n-1)):\n if s == ():\n continue\n g = oldg + [list(s)]\n for v in s:\n g[v] = g[v] + [n-1]\n # NOT g[v] += ... or g[v].append(...)\n # to avoid changing items in oldg\n yield g\n\n assert n >= 0\n if n >= 3:\n for g in unique_iso(graphs_conn_helper(n)):\n yield g\n elif n == 2:\n yield [ [1], [0] ]\n elif n == 1:\n yield [ [] ]\n else: # n == 0\n yield []", "def combinations(s, n):\n return (\"\".join(x) for x in tuples(s,n))", "def get_paren_combos():\n results = [None] * 4\n options = [('%s', '(%s)')]\n for i in range(1, 4):\n results[i] = list(itertools.product(*(i * options)))\n return results", "def ngrams(text, n):\n return chain(*[ngrams_(text, i) for i in range(n + 1)])", "def build_ngrams(tokens, n=2):\n ngrams = zip(*(islice(group, idx, None) for idx, group in enumerate(tee(tokens, n))))\n return ngrams", "def get_perms(n):\n \n from itertools import permutations\n bases = 'CATGN'\n return [''.join(perm) for perm in permutations(bases, n)]", "def tc_gen(n):\r\n comb = (list(tuple) for tuple in itertools.product([True,False], repeat=n))\r\n return list(comb)", "def get_children(search_tag, tag_list):\n list_return = []\n\n for tag in tag_list:\n if str(tag.parent) == str(search_tag):\n list_return.append(tag)\n list_return.extend(get_children(tag, tag_list))\n return list(set(list_return)) # This will return a list of unique elements", "def find_all_ngrams(dataset, n):\n return zip(*[dataset[i:] for i in xrange(n)])", "def partial_tree(s, n):\n if n == 1:\n return (Tree(s.first), s.rest)\n elif n == 2:\n return (Tree(s.first, [Tree(s.rest.first)]), s.rest.rest)\n else:\n left_size = (n-1)//2\n right_size = n - left_size - 1\n \"*** YOUR CODE HERE ***\"", "def get_node_tags(self, elem_tag):\n all_node_tag = np.array([], dtype=int)\n if np.size(elem_tag) > 1:\n for ie in range(len(elem_tag)):\n all_node_tag = np.concatenate(\n (all_node_tag, self.get_connectivity(elem_tag[ie]))\n )\n all_node_tag = np.unique(all_node_tag)\n else:\n all_node_tag = self.get_connectivity(elem_tag)\n\n return all_node_tag", "def all_structures(self):\n for seq in itertools.product(*map(range, self.tag_sizes)):\n yield np.array(seq)", "def get_nodes_combinations(graph):\n nodes = graph.nodes()\n nodes_powerset = []\n for n in chain.from_iterable(combinations(nodes, r) for r in range(len(nodes)+1)):\n if len(n) > 1:\n nodes_powerset.append(list(n))\n return nodes_powerset", "def build_tiers(self):\n self.tiers = []\n for size in range(1,len(self.sigma)+1):\n for combo in itertools.combinations(self.sigma, size):\n self.tiers.append(tuple(sorted(list(combo))))", "def combos(array,n=2): \n # base case\n if n==0:\n yield frozenset()\n return\n\n # core recursion\n for c in set(combos(array,n-1)):\n for i in array:\n #added this to avoid duplicate combos\n if i not in c:\n # add element i to combo c\n yield frozenset({i})| c", "def extensions_at_depth(puzzle, n):\n lst = []\n x = puzzle.extensions()\n if n == 1:\n return x\n for i in x:\n lst.append(i)\n if i.extensions() != None:\n y = extensions_at_depth(i,n-1)\n for j in y:\n lst.append(j)\n return lst", "def build_tree(n, d, name=defaultname):\n return build_tree_helper(1, n, 1, d, name)", "def reduce_n_times(tree, n, weights, last_round=False):\n k = []\n if n==1:\n aggs_ = list(np.unique(np.array(aggs(tree, weights))))\n agg_weights = [apply_aggregation(t, weights) for t in aggs_]\n H = [calculate_H(x) for x in agg_weights]\n sorted_H = [i[0] for i in sorted(enumerate(H), key=lambda x:x[1], reverse=True)][0:10]\n for i in sorted_H:\n k.append(aggs_[i])\n return k\n if n>1:\n trees = reduce_n_times(tree, n-1, weights)\n results = []\n for tree in trees:\n results = results + aggs(tree, weights)\n l = min([len(x) for x in results])\n results = filter(lambda x: len(x) == l, results)\n results = list(np.unique(np.array(results)))\n agg_weights = [apply_aggregation(t, weights) for t in results]\n S = [calculate_S(x) for x in agg_weights]\n if last_round == True:\n sorted_S = [i[0] for i in sorted(enumerate(S), key=lambda x:x[1], reverse=True)]\n for i in sorted_S:\n k.append(results[i])\n else:\n sorted_S = [i[0] for i in sorted(enumerate(S), key=lambda x:x[1], reverse=True)][0:10]\n for i in sorted_S:\n k.append(results[i])\n return k", "def n_grams(tokens, n=1):\n shiftToken = lambda i: (el for j,el in enumerate(tokens) if j>=i)\n shiftedTokens = (shiftToken(i) for i in range(n))\n tupleNGrams = zip(*shiftedTokens)\n return tupleNGrams", "def weak_compositions(n, k):\n if n < 0 or k < 0:\n return\n elif k == 0:\n # the empty sum, by convention, is zero, so only return something if\n # n is zero\n if n == 0:\n yield []\n return\n elif k == 1:\n yield [n]\n return\n else:\n # For each first integer i in range(n+1), list all compositions\n # on n-i nodes, of length at most k-1.\n for i in range(n+1):\n for comp in weak_compositions(n-i, k-1):\n yield [i] + comp" ]
[ "0.6263927", "0.61682093", "0.60873145", "0.599515", "0.596374", "0.5940209", "0.5909812", "0.5883347", "0.58303434", "0.58066684", "0.5776324", "0.57579505", "0.57390195", "0.5715532", "0.57020915", "0.5674218", "0.5645066", "0.5627819", "0.56251854", "0.56197864", "0.56149757", "0.5593644", "0.5583311", "0.55824214", "0.5582329", "0.5573191", "0.55583835", "0.5542587", "0.55370206", "0.5525006" ]
0.6972763
0
Get index of a tag sequence m in self.tags
def get_tag_index(self, m): return self.tags.index(m)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index_in_tag(self):\n if hasattr(self, '_m_index_in_tag'):\n return self._m_index_in_tag if hasattr(self, '_m_index_in_tag') else None\n\n self._m_index_in_tag = (self.tag - 35)\n return self._m_index_in_tag if hasattr(self, '_m_index_in_tag') else None", "def get_tag_index(tags, tag_to_search):\n counter = 0\n for t in tags:\n if tag_to_search == t:\n break\n else:\n counter+=1\n return counter", "def get_index(tag):\n global kpi_list\n try:\n return kpi_list.index(str(tag))\n except ValueError:\n return -1", "def index_tag_in_lines(lines, tag):\n for index, line in enumerate(lines):\n if tag in line:\n return index\n raise ValueError(f'{tag} not found.')", "def index_tag_seq(words, seq, strict=False):\n tags = get_tag_seq(words)\n nouns = 'NN' in seq or 'NNS' in seq\n alt_seq = None\n if strict is False:\n if nouns is True:\n alt_seq = [\n 'NNS' if x == 'NN' else \n 'NN' if x == 'NNS' else \n x for x in seq\n ] \n \n for i in range(len(tags)):\n check_seq = tags[i:i+len(seq)]\n if check_seq == seq:\n return i\n if nouns:\n if check_seq == alt_seq:\n return i\n\n return -1", "def indexof(self, value, tag=WORD):\n match = lambda a, b: a.endswith(\"*\") and b.startswith(a[:-1]) or a==b\n indices = []\n for i in range(len(self.words)):\n if match(value, unicode(self.get(i, tag))):\n indices.append(i)\n return indices", "def index(self, key):\n return self._sequence.index(key)", "def tagkeyindex(self,tag):\r\n\r\n returnset = set()\r\n if self.tag_dict_contains(tag):\r\n\r\n for x_temp in self.get_keys_for_tag(tag):\r\n if self.key_dict_contains(x_temp+'/'+tag):\r\n for y_temp in self.get_indexes_for_key(x_temp+'/'+tag):\r\n returnset.add(y_temp)\r\n return returnset", "def get_tag_indices(tags):\n tag_counts = count_tags(tags);\n tag_names = tag_counts.keys()\n return {name : index for (index, name) in enumerate(tag_names)}", "def get_index(uid, i):\n return _SHARED_SEQUENCES[uid][i]", "def get_idx(self, key):\n found = [i for i, e in enumerate(self.list) if e.key == key]\n if found:\n return found[0]\n\n else:\n return -1", "def get_index(attribute, attributes):\n for i in range(14):\n if attribute == attributes[i]:\n return i", "def index(self, word):\n return self.tokens.index(word)", "def get_tag_for_word(self, word: str):\n doc = self.model(word)\n for token in doc:\n return token.pos_", "def token_to_idx(self) -> Dict[Hashable, int]:\n return self._token_to_idx", "def offset(self, needle):\n if isinstance(needle, int):\n if needle.bit_length() <= 32:\n needle = p32(needle)\n else:\n needle = p64(needle)\n needle = d(needle)\n\n idx = self._seq.index(needle)\n if self._seq[idx+len(needle):].find(needle) != -1:\n raise ValueError(\"Multiple occurances found!\")\n\n return idx", "def indexMatching(seq, condition):\n for i,x in enumerate(seq):\n if condition(x):\n return i\n return -1", "def index(self, atom):\n return self.atom_list.index(atom)", "def _code_indices(self) -> Tuple[int, ...]:\n return tuple(idx for idx, seg in enumerate(self.segments) if seg.is_code)", "def getOmIndex(self, name):\n for i in range(len(self.oameni)):\n if self.oameni[i].name == name:\n return i\n return None", "def _index(self,key):\n index=0\n for item in self._item:\n if item.key==key:\n return index\n index+=1\n return -1", "def all_lines_with_tag(mm, tag, nline_max=1024*1024):\n all_idx = []\n for iline in range(nline_max):\n idx = mm.find(tag.encode())\n if idx == -1:\n break\n mm.seek(idx)\n all_idx.append(idx)\n mm.readline()\n\n # guard\n if iline >= nline_max-1:\n raise RuntimeError('may need to increase nline_max')\n return all_idx", "def __getitem__(self, tag):\n return self.__tags.get(tag.lower(), 0)", "def _get_signal_index(self, signal):\n # Process signal :\n signal = signal.replace(', :', '').replace(':, ', '')[1:-1]\n # Find index :\n idx = tuple(int(k) for k in signal.split(', '))\n return self._navidx.index(idx)", "def idx(self):\n return self._idx", "def element_index(self):\n return self._index", "def index(self, item):\n\t\ti = 0\t\t\n\t\tif not self.len:\n\t\t\traise ValueError(\"Lista vacia\")\n\t\tif self.prim.dato == item:\n\t\t\treturn i\n\t\tactual = self.prim\n\t\twhile actual and actual.dato != item:\n\t\t\tactual = actual.prox\n\t\t\ti += 1\n\t\tif not actual:\n\t\t\traise ValueError(\"Elemento no encontrado\")\n\t\treturn i", "def set_indx(self, item):\n for i, s in enumerate(self._data):\n if item in s:\n return i\n return None", "def getAttrIndex(self, *args):\n return _libsbml.XMLToken_getAttrIndex(self, *args)", "def get_index(self, gi):\n for i in range(len(self.gradual_items)):\n gi_obj = self.gradual_items[i]\n if (gi.symbol == gi_obj.symbol) and (gi.attribute_col == gi_obj.attribute_col):\n return i\n return -1" ]
[ "0.7142307", "0.6958829", "0.6688589", "0.66438335", "0.65356153", "0.6207758", "0.62011987", "0.6198451", "0.61982673", "0.6162026", "0.6146128", "0.6113043", "0.60828024", "0.60632235", "0.60050696", "0.5982074", "0.59499717", "0.5918507", "0.5912195", "0.5911467", "0.59011835", "0.58865374", "0.58789486", "0.5863743", "0.5863466", "0.585393", "0.5851366", "0.5836543", "0.58301455", "0.5795933" ]
0.8766997
0
Given two tags and a label, return the psi factor of the two tag sequences
def get_psi_score(self, psi, pos1, pos2, lab, m1, m2): i, j = self.get_tag_index(m1), self.get_tag_index(m2) return psi[pos1, pos2, lab, i, j]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _psi_function(share1, share2):\n return (share1 - share2) * math.log(share1/share2)", "def get_emissions_probability(label_matches, given_tag, given_word, tag_counts):\r\n\tlookup_tuple = (given_word, given_tag)\r\n\tword_tag_frequency = label_matches.get(lookup_tuple, 0)\r\n\ttag_frequency = tag_counts[given_tag]\r\n\tif tag_frequency == 0:\r\n\t\temissions_probability = 0\r\n\telse:\r\n\t\temissions_probability = float(word_tag_frequency)/float(tag_frequency)\r\n\treturn emissions_probability", "def psi(x, y):\n return x", "def match_two_exp_psf(exp1, exp2):\n\tif has_smaller_psf(exp1, exp2):\n\t\texpm1 = match_psf(exp1, exp2)\n\t\treturn expm1, exp2\n\telif has_smaller_psf(exp2, exp1):\n\t\texpm2 = match_psf(exp2, exp1)\n\t\treturn exp1, expm2\n\telse:\n\t\tprint(\"[matchpsf] skip matching psf as the psfs are similar\")\n\t\treturn exp1, exp2", "def heuristic_2(a: str, b: str) -> float:\n # generate term-document matrices\n if get_intro(a) == \"\" or get_intro(b) == \"\":\n return 2\n else:\n corpus = [get_intro(a), get_intro(b)]\n vect = TfidfVectorizer()\n mat = vect.fit_transform(corpus)\n # return cosine similarity\n return abs(1 - cosine_similarity(mat[0:1], mat)[0][1]) * 2", "def merge_overlapping_predictions(tags1: List[str], tags2: List[str]) -> List[str]:\n ret_sequence = []\n prev_label = \"O\"\n\n # Build a coherent sequence out of two\n # spans which predicates' overlap\n\n for tag1, tag2 in zip(tags1, tags2):\n label1 = tag1.split(\"-\", 1)[-1]\n label2 = tag2.split(\"-\", 1)[-1]\n if (label1 == \"V\") or (label2 == \"V\"):\n # Construct maximal predicate length -\n # add predicate tag if any of the sequence predict it\n cur_label = \"V\"\n\n # Else - prefer an argument over 'O' label\n elif label1 != \"O\":\n cur_label = label1\n else:\n cur_label = label2\n\n # Append cur tag to the returned sequence\n cur_tag = get_coherent_next_tag(prev_label, cur_label)\n prev_label = cur_label\n ret_sequence.append(cur_tag)\n return ret_sequence", "def fidelity(self, psi_1, psi_2):\n return np.abs(self.ip(psi_1, psi_2))**2", "def tag_prob(self, y):\n p = 1\n n = self._n\n y = (START_TAG,) * (n - 1) + tuple(y) + (END_TAG,)\n for i in range(len(y) - self._n + 1):\n tag = y[i + n - 1]\n prev_tags = y[i:i + n - 1]\n p *= self.trans_prob(tag, prev_tags)\n\n return p", "def masi_distance(label1, label2):\r\n\r\n len_intersection = len(label1.intersection(label2))\r\n len_union = len(label1.union(label2))\r\n len_label1 = len(label1)\r\n len_label2 = len(label2)\r\n if len_label1 == len_label2 and len_label1 == len_intersection:\r\n m = 1\r\n elif len_intersection == min(len_label1, len_label2):\r\n m = 0.67\r\n elif len_intersection > 0:\r\n m = 0.33\r\n else:\r\n m = 0\r\n\r\n return 1 - (len_intersection / len_union) * m", "def compute_class_freqs(labels):\n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n \n # total number of patients (rows)\n N = len(labels)\n \n positive_frequencies = np.sum(labels,axis=0)/N\n negative_frequencies = 1-positive_frequencies\n\n ### END CODE HERE ###\n return positive_frequencies, negative_frequencies", "def binary_distance(label1, label2):\r\n\r\n return 0.0 if label1 == label2 else 1.0", "def tag_log_prob(self, y, add_end_token=True):\n p = 0\n n = self._n\n y = (START_TAG,) * (n - 1) + tuple(y) + (END_TAG,) * add_end_token\n for i in range(len(y) - self._n + 1):\n tag = y[i + n - 1]\n prev_tags = y[i:i + n - 1]\n if self.trans_prob(tag, prev_tags) == 0:\n return -math.inf\n\n p += math.log2(self.trans_prob(tag, prev_tags))\n\n return p", "def t2_given_t1(t2:str, t1:str, train_bag:Sequence[Tuple[str, str]]=train_tagged_words)-> Tuple[int, int]: \n tags = [pair[1] for pair in train_bag]\n count_t1 = len([t for t in tags if t == t1])\n count_t2_t1 = 0\n for index in range(len(tags)-1):\n if tags[index] == t1 and tags[index+1] == t2:\n count_t2_t1 += 1\n return (count_t2_t1, count_t1)", "def retagger(tags):\n if tags == 'Positive':\n return 'pos'\n else:\n return 'neg'", "def text_proximity(str_1: str, str_2: str) -> float:\n tokens_1 = Counter(str_1.split(' '))\n tokens_2 = Counter(str_2.split(' '))\n return _normalized_scalar_product(tokens_1, tokens_2)", "def f_value(a, b):\r\n if not any(a) or not any(b) or len(a) <= 1 or len(b) <= 1:\r\n raise ValueError(\"Vectors should contain more than 1 element\")\r\n F = var(a) / var(b)\r\n dfn = len(a) - 1\r\n dfd = len(b) - 1\r\n return dfn, dfd, F", "def trans_prob(self, tag, prev_tags=None):\n assert prev_tags is not None or self._n == 1\n prev_tags = prev_tags or ()\n\n return self._trans.get(prev_tags, {}).get(tag, 0)", "def tag2predictions(ote_tag_sequence, ts_tag_sequence):\n n_tag = len(ote_tag_sequence)\n # opinion target sequence and targeted sentiment sequence\n ot_sequence, ts_sequence = [], []\n beg, end = -1, -1\n for i in range(n_tag):\n tag = ote_tag_sequence[i]\n if tag == 'S':\n ot_sequence.append((i, i))\n elif tag == 'B':\n beg = i\n elif tag == 'E':\n end = i\n if end > beg and beg != -1:\n ot_sequence.append((beg, end))\n beg, end = -1, -1\n sentiments = []\n beg, end = -1, -1\n for i in range(n_tag):\n ts_tag = ts_tag_sequence[i]\n # current position and sentiment\n eles = ts_tag.split('-')\n if len(eles) == 2:\n pos, sentiment = eles\n else:\n pos, sentiment = 'O', 'O'\n if sentiment != 'O':\n # current word is a subjective word\n sentiments.append(sentiment)\n if pos == 'S':\n # singleton\n ts_sequence.append((i, i, sentiments[0]))\n sentiments = []\n elif pos == 'B':\n beg = i\n elif pos == 'E':\n end = i\n # schema1: only the consistent sentiment tags are accepted\n # that is, all of the sentiment tags are the same\n if end > beg > -1 and len(set(sentiments)) == 1:\n ts_sequence.append((beg, end, sentiment))\n sentiments = []\n beg, end = -1, -1\n\n # schema2: only consider the sentiment at the beginning of the aspect span\n # if end > beg > -1:\n # ts_sequence.append((beg, end, sentiments[0]))\n # sentiments = []\n # beg, end = -1, -1\n return ot_sequence, ts_sequence", "def correspondences(labels1,labels2,return_counts=True):\n q = 100000\n assert amin(labels1)>=0 and amin(labels2)>=0\n assert amax(labels2)<q\n combo = labels1*q+labels2\n result = unique(combo, return_counts=return_counts)\n if return_counts:\n result, counts = result\n result = array([result//q,result%q,counts])\n else:\n result = array([result//q,result%q])\n return result", "def t2_given_t1(t2, t1, train_bag):\n tags = [pair[1] for pair in train_bag]\n count_t1 = len([t for t in tags if t == t1])\n count_t2_t1 = 0\n for index in range(len(tags)-1):\n if tags[index] == t1 and tags[index+1] == t2:\n count_t2_t1 += 1\n return count_t2_t1, count_t1", "def GetSuggesteeSimilarity(suggestee1_tags, suggestee2_tags):\n total_size = len(suggestee1_tags) + len(suggestee2_tags)\n if total_size == 0:\n return 1.\n size_of_intersection = 0\n elems_in_first = {}\n for tag in suggestee1_tags:\n elems_in_first[tag] = True\n for tag in suggestee2_tags:\n if tag in elems_in_first:\n size_of_intersection += 1\n return size_of_intersection / (total_size - size_of_intersection)", "def compute_class_freqs(gen):\r\n labels = gen.labels\r\n N = labels.shape[0]\r\n positive_frequencies = np.sum(labels, axis=0) / N\r\n negative_frequencies = np.sum(1 - labels, axis=0) / N\r\n return positive_frequencies, negative_frequencies", "def bspe(a, b):\n if b-a == 1:\n return MPZ_ONE, MPZ(b)\n m = (a+b)//2\n p1, q1 = bspe(a, m)\n p2, q2 = bspe(m, b)\n return p1*q2+p2, q1*q2", "def evaluate(self, feats, tag_set):\n corect=0\n incorect=0 \n for (tokens, tags) in zip(feats, tag_set):\n yyhat= self.tag(tokens)\n for pre, tag in zip(yyhat, tags):\n if pre==tag:\n corect+=1\n else:\n incorect+=1\n\n return corect/(corect+incorect)", "def get_e(word, tag, e_word_tag_counts, q_uni_counts):\n word_tag_tupple = (word, tag)\n\n word_tag_count = 0\n if word_tag_tupple in e_word_tag_counts:\n word_tag_count = e_word_tag_counts[word_tag_tupple]\n\n nof_tag = q_uni_counts[tag]\n return float(word_tag_count) / nof_tag", "def sentence_encoding_rnn_phi(t1, t2):\n return (t1.leaves(), t2.leaves())", "def best_match(psi1, psi2, threshold=None):\n if threshold is None:\n threshold = (2 * psi1.shape[0])**-0.25\n Q = np.abs(psi1.T.conj() @ psi2) # Overlap matrix\n orig, perm = so.linear_sum_assignment(-Q)\n return perm, Q[orig, perm] < threshold", "def best_sequence(self, T, pos, psi, phi, fix_tags=[]):\n for idx, m in fix_tags:\n phi[idx - 1, m] = 100\n # if fix_idx:\n # phi[fix_idx - 1, fix_m] = 100\n msgs, pointers = max_product(T, pos, psi, phi, True)\n tags_dict = get_best_tags(T, msgs, pointers)\n tags = []\n for i in range(1, len(T) + 1):\n tags.append(self.get_tag(tags_dict[str(i)]))\n return tags", "def psi_wf(self, vw, d1, d2, ns, tl):\n\t osmotic = (R*299./VW)*np.log((((vw/self.ZW)*self.ZW)/(VW))/((((vw/self.ZW)*self.ZW)/(VW))+ns))/10**6 #MPa\n\t turgor = ((vw/self.ZW) - d1)**d2#MPa\n\t return turgor+osmotic #MPa ", "def interval_distance(label1,label2):\r\n\r\n try:\r\n return pow(label1 - label2, 2)\r\n# return pow(list(label1)[0]-list(label2)[0],2)\r\n except:\r\n print(\"non-numeric labels not supported with interval distance\")" ]
[ "0.59438974", "0.5719111", "0.5711388", "0.5612045", "0.5503579", "0.54424065", "0.54400754", "0.5393413", "0.53729504", "0.53618443", "0.5353206", "0.5324507", "0.5300333", "0.52868974", "0.5257343", "0.52473134", "0.52161574", "0.52138144", "0.5211579", "0.5206199", "0.5191032", "0.5182754", "0.51764643", "0.5171435", "0.5162805", "0.5107067", "0.510491", "0.50815046", "0.508114", "0.5079237" ]
0.58502215
1
Calculate the gradient of the log probability for a given tree with respect to the psi and phi parameters
def dlog_prob(self, T, pos, m, psi, phi=tr.Tensor()): if phi.size() == tr.Size([0]): phi = self.create_phi(T, pos, m) dpsi_score = self.dlog_score(T, pos, m, psi) dpsi_Z = self.dlogZ(T, pos, psi, phi) return dpsi_score - dpsi_Z
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dlogZ(self, T, pos, psi, phi):\n msgs = belief_propagation(T, pos, psi, phi, True)\n dpsi = calculate_gradient(msgs, T, pos, psi, True, True)\n return dpsi", "def fd_grad(self, T, pos, psi, phi, eps=1e-5):\n dpsi = tr.zeros_like(psi)\n dphi = tr.zeros_like(phi)\n for pos1 in range(psi.shape[0]):\n for pos2 in range(psi.shape[1]):\n for lab in range(psi.shape[2]):\n for i in range(psi.shape[3]):\n for j in range(psi.shape[4]):\n psi[pos1, pos2, lab, i, j] += eps\n val1 = self.logZ(T, pos, psi, phi)\n psi[pos1, pos2, lab, i, j] -= 2 * eps\n val2 = self.logZ(T, pos, psi, phi)\n psi[pos1, pos2, lab, i, j] += eps\n dpsi[pos1, pos2, lab, i, j] = (val1 - val2) / (2 * eps)\n for p in range(phi.shape[0]):\n for i in range(phi.shape[1]):\n phi[p, i] += eps\n val1 = self.logZ(T, pos, psi, phi)\n phi[p, i] -= 2 * eps\n val2 = self.logZ(T, pos, psi, phi)\n phi[p, i] += eps\n dphi[p, i] = (val1 - val2) / (2 * eps)\n return dpsi, dphi", "def grad_log(self, X):\n # \"\"\"\n # Evaluate the gradients (with respect to the input) of the log density at\n # each of the n points in X. This is the score function.\n\n # X: n x d numpy array.\n XB = np.dot(X, self.B)\n Y = 0.5*XB + self.c\n E2y = np.exp(2*Y)\n # n x dh\n Phi = old_div((E2y-1.0),(E2y+1))\n # n x dx\n T = np.dot(Phi, 0.5*self.B.T)\n S = self.b - X + T\n return S", "def Gradient(Walker,particle):\n\n h=0.001\n dPsi = zeros(shape=shape(Walker.Re[particle]))\n for i in range(Walker.sys_dim):\n Y=Walker.Re[particle][i]\n Walker.Re[particle][i]-=h\n wfs1=wfs(Walker)\n Walker.Re[particle][i]+=2.0*h\n wfs2=wfs(Walker)\n dPsi[i] = (wfs2-wfs1)/2/h\n Walker.Re[particle][i]=Y\n\n return dPsi", "def logp_grad(self, xs, ys, fs, **kwargs):", "def _log_likelihood_gradients(self):\r\n return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK, X=self.X), self.likelihood._gradients(partial=np.diag(self.dL_dK))))", "def MvNormalLogp():\n cov = pt.matrix(\"cov\")\n cov.tag.test_value = floatX(np.eye(3))\n delta = pt.matrix(\"delta\")\n delta.tag.test_value = floatX(np.zeros((2, 3)))\n\n cholesky = Cholesky(lower=True, on_error=\"nan\")\n\n n, k = delta.shape\n n, k = f(n), f(k)\n chol_cov = cholesky(cov)\n diag = pt.diag(chol_cov)\n ok = pt.all(diag > 0)\n\n chol_cov = pt.switch(ok, chol_cov, pt.fill(chol_cov, 1))\n delta_trans = solve_lower(chol_cov, delta.T).T\n\n result = n * k * pt.log(f(2) * np.pi)\n result += f(2) * n * pt.sum(pt.log(diag))\n result += (delta_trans ** f(2)).sum()\n result = f(-0.5) * result\n logp = pt.switch(ok, result, -np.inf)\n\n def dlogp(inputs, gradients):\n (g_logp,) = gradients\n cov, delta = inputs\n\n g_logp.tag.test_value = floatX(1.0)\n n, k = delta.shape\n\n chol_cov = cholesky(cov)\n diag = pt.diag(chol_cov)\n ok = pt.all(diag > 0)\n\n chol_cov = pt.switch(ok, chol_cov, pt.fill(chol_cov, 1))\n delta_trans = solve_lower(chol_cov, delta.T).T\n\n inner = n * pt.eye(k) - pt.dot(delta_trans.T, delta_trans)\n g_cov = solve_upper(chol_cov.T, inner)\n g_cov = solve_upper(chol_cov.T, g_cov.T)\n\n tau_delta = solve_upper(chol_cov.T, delta_trans.T)\n g_delta = tau_delta.T\n\n g_cov = pt.switch(ok, g_cov, -np.nan)\n g_delta = pt.switch(ok, g_delta, -np.nan)\n\n return [-0.5 * g_cov * g_logp, -g_delta * g_logp]\n\n return OpFromGraph([cov, delta], [logp], grad_overrides=dlogp, inline=True)", "def log_prob(self, T, pos, m, psi, phi=tr.Tensor()):\n if phi.size() == tr.Size([0]):\n phi = self.create_phi(T, pos, m)\n return self.log_score(T, pos, m, psi, phi) - self.logZ(T, pos, psi, phi)", "def _log_likelihood_gradients(self):\n return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK,X=self.X,slices1=self.Xslices,slices2=self.Xslices), self.likelihood._gradients(partial=np.diag(self.dL_dK))))", "def grad_log(self, X):\n g = autograd.elementwise_grad(self.log_den)\n G = g(X)\n return G", "def grad_log_likelihood(self, log_occr_array=None):\n\n if log_occr_array is not None:\n # Catch invalid occurrence rates for zero likelihood\n try:\n self.log_occr = log_occr_array\n except InvalidOccurrenceRate:\n return -np.inf * np.ones_like(self.occr, dtype=float)\n\n # Calculate components first\n N_exp = self.calc_integral() * self._N_stars # perhaps not needed\n nexp_terms = self._N_stars * self.calc_bin_volumes() * self.H_bar_array\n s_terms = self.H_array * self.F_array * self.occr\n numerator_terms = self.H_array * self.F_array\n\n if not tf.is_tensor(self.occr):\n # Checking shapes of intermediate terms,\n # numerator_terms vs s_terms.sum(-1, -2) and vs v factors.\n intermediate_terms = numerator_terms / s_terms.sum(axis=(-1, -2))\n # TODO: v_factor changed to negative, I think a minus\n # sign had been missed\n grad_log_array = - nexp_terms + intermediate_terms.sum(axis=0)\n\n # BUG TODO\n if np.isnan(grad_log_array).any():\n warnings.warn(\".grad_log_likelihood value is nan.\")\n import pdb; pdb.set_trace()\n grad_log_array = -np.inf * grad_log_array\n else:\n raise NotImplementedError(\"Manual gradient calculate with \"\n \"tensorflow objects isn't \"\n \"implemented, and seems a bit \"\n \"redundant.\")\n\n return grad_log_array", "def gradient(self, theta):\n a = -(6 * self.scale ** 2)\n b = 3 * self.scale ** 2 + np.exp(2 * theta)\n b *= np.log(3 * self.scale ** 2 * np.exp(-2 * theta) + 1)\n return a / b", "def log_probability(theta):\n global priors\n global logp\n lp = np.sum([priors[p].logpdf(x) for p, x in zip(logp.parnames, theta)])\n if not np.isfinite(lp) or np.isnan(lp):\n return -np.inf\n ll = logp(theta)\n if not np.isfinite(ll):\n return -np.inf\n return lp + ll", "def log_probability(theta):\n global priors\n global logp\n lp = np.sum([priors[p].logpdf(x) for p, x in zip(logp.parnames, theta)])\n if not np.isfinite(lp) or np.isnan(lp):\n return -np.inf\n ll = logp(theta)\n if not np.isfinite(ll):\n return -np.inf\n return lp + ll", "def logZ(self, T, pos, psi, phi):\n msgs = belief_propagation(T, pos, psi, phi, True)\n log_z = calculate_belief_sum(msgs, True)\n return log_z", "def log_prior_grad(self, inputs):", "def grad_llh(self, params):\n grad = np.clip(self.grad_log_likelihood(params[0], params[1], params[2:]), SMALLEST_NUMBER,\n LARGEST_NUMBER)\n\n return grad", "def logit_cost_grad(self, theta, X, y):\n\n grad = np.zeros(len(theta))\n\n ### YOUR CODE HERE\n sig = utils.sigmoid(theta)\n # sig = np.subtract(sig, y)\n sig = sig - y\n grad = np.dot(X.T, sig) + 2 * self.params['lamb'] * self.regularizer[1](self.weights)\n ### END YOUR CODE\n\n return grad", "def _log_prior_gradients(self):\n if self.priors.size == 0:\n return 0.\n x = self.param_array\n ret = np.zeros(x.size)\n #compute derivate of prior density\n [np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.items()]\n #add in jacobian derivatives if transformed\n priored_indexes = np.hstack([i for p, i in self.priors.items()])\n for c,j in self.constraints.items():\n if not isinstance(c, Transformation):continue\n for jj in j:\n if jj in priored_indexes:\n ret[jj] += c.log_jacobian_grad(x[jj])\n return ret", "def _log_prior_gradients(self):\n x = self._get_params()\n ret = np.zeros(x.size)\n [np.put(ret,i,p.lnpdf_grad(xx)) for i,(p,xx) in enumerate(zip(self.priors,x)) if not p is None]\n return ret", "def grad_log_likelihood(g_t):\n # state vector [g1, g2, g3, ... , gn-1, gn]\n state_vector = np.zeros(N)\n\n # factor that will be added to one node and subtracted from another\n def alphaij(gj, gi, deltaij, varij):\n return ((gj - gi) - deltaij) / varij\n\n # indices of state vector\n # Iterate over all connections\n for r in self.graph.index:\n state1, state2, value, variance, ligand, standard_state = self.graph.iloc[r]\n i = self.states[self.states.name == state1].index[0]\n j = self.states[self.states.name == state2].index[0]\n\n gj = g_t[j]\n gi = g_t[i]\n\n edge_attr = self.cycle.edges()[(state1, state2)]\n deltaij = edge_attr['energy'] # measured difference\n varij = edge_attr['weight'] # measured variance\n\n shared_alpha = alphaij(gj, gi, deltaij, varij)\n\n state_vector[i] += shared_alpha\n state_vector[j] -= shared_alpha\n\n return state_vector", "def get_log_likelihood(phi, pred, t, dot_product, weight, reg= 1):\n prior = -0.5* np.sum(np.multiply(weight, weight))\n likelihood = np.multiply(t, np.log(pred+TOLERANCE)) + np.multiply(1.0- t, np.log(1.0-pred+TOLERANCE))\n likelihood = np.sum(likelihood)\n\n return prior + likelihood", "def log_likelihood_gradients_(self, y, f):\n log_lik = self.evaluate_log_likelihood(y, f)\n f = np.squeeze(f)\n J = jacrev(self.evaluate_log_likelihood, argnums=1)\n H = jacrev(J, argnums=1)\n return log_lik, J(y, f), H(y, f)", "def log_LxP(theta, D):\r\n p1 = np.exp( -((theta-D[0])**2)/2 )\r\n p2 = np.exp( -((theta-D[1])**2)/2 )\r\n if np.abs(theta) <= 1:\r\n LxP = p1*p2\r\n else:\r\n LxP = 0.0\r\n return np.log(LxP)", "def _log_prior_gradients(self):\r\n if self.priors is None:\r\n return 0.\r\n x = self._get_params()\r\n ret = np.zeros(x.size)\r\n [np.put(ret, i, p.lnpdf_grad(xx)) for i, (p, xx) in enumerate(zip(self.priors, x)) if not p is None]\r\n return ret", "def calc_grad(X, Y, theta):\n m, n = X.shape\n\n margins = Y * X.dot(theta)\n probs = 1. / (1 + np.exp(margins))\n grad = -(1./m) * (X.T.dot(probs * Y))\n\n return grad", "def log_likelihood(self, theta=None, phi=None):\n theta = theta if theta is not None else self.theta\n phi = phi if phi is not None else self.phi\n ret = 0.\n for m, dm in enumerate(self.corpus):\n for n, w_mn in enumerate(dm):\n tp = 0.\n for k in range(self.n_components):\n tp += theta[m, k] * phi[k, w_mn]\n ret += np.log(tp)\n return ret", "def f(p, phi, phib, df):\n\treturn -log(p) - df + (p-1)*phi + \\\n\t ( phi*(1-p) + phib + \\\n\t 5./4*alpha*(phi*p)**(9./4)-(9./4)*alpha*(p*phi)**(5./4) - \\\n\t (1./2)(1-p*phi)**2 - (phib/Nb)-5./4*alpha*(phi+phib)**(9./4) + \\\n\t (9./4)*alpha*(phi+phib)**(5.4) + \\\n\t 1./2*(1-phi-phib)**2 ) * Ns", "def _forward_log_det_jacobian(self, x):\n d = self._compute_shared(x=x)\n relx = (x - d.x_k) / d.w_k\n relx = relx # tf.where(d.out_of_bounds, 0.5*tf.ones_like(x), relx)\n grad = (\n 2 * tf.math.log(d.s_k) +\n tf.math.log(d.d_kp1 * relx**2 + 2 * d.s_k * relx * (1 - relx) + # newln\n d.d_k * (1 - relx)**2) -\n 2 * tf.math.log((d.d_kp1 + d.d_k - 2 * d.s_k) * relx *\n (1 - relx) + d.s_k))\n return grad # tf.where(d.out_of_bounds, tf.zeros_like(grad), grad)", "def log_prob(self):\n res = -self.L_h/2*np.log(2*np.pi*self.la)\n res = res + self.L_h*(self.L_h-1)/2*self.a\n\n\n res = res - 1/(2*self.la)*np.square(np.linalg.norm(self.e*self.pie))\n\n res = res - 1/(2*self.la)*np.sum(self.e2*self.pie_var)\n\n res = res - self.L_h/2*np.log(2*np.pi*self.sigma2)\n res = res - 1/(2*self.sigma2)*(np.square(np.linalg.norm(self.w))+np.trace(self.R))\n\n print(\"Log-probability difference = {}\".format(res - self.LP), file=self.logfile)\n self.LP = res\n return res" ]
[ "0.7159313", "0.6766048", "0.6629792", "0.6519036", "0.6494689", "0.6489138", "0.6413346", "0.64006495", "0.6371672", "0.63623327", "0.63360333", "0.63232714", "0.6310686", "0.6310686", "0.6260827", "0.62424856", "0.62325174", "0.62135476", "0.61898154", "0.61844623", "0.61440015", "0.6114388", "0.6110577", "0.6110501", "0.60843813", "0.6071816", "0.6047368", "0.60419714", "0.6017729", "0.60096514" ]
0.6783176
1
returns beat info as string
def Beat_disp(self): return ' '.join(str(x+self.offset) for x in self.beats)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def info(self):\n out = f\"sec: {self.em_sec()}\\nmin: {self.em_min()}\"\n out += f\"\\nhora: {self.em_hora()}\\ndia: {self.em_dia()}\"\n return out", "def get_at_as_string(self):\n\n return self.at.strftime(\"%Y-%m-%dT%H:%M:%S.000Z\")", "def __str__(self):\n return_text = \"Time-Triggered Frame information =>\\n\"\n return_text += \" Sender id : \" + str(self.__sender_id) + \"\\n\"\n return_text += \" Receivers ids : \" + str(self.__receivers_id) + \"\\n\"\n return_text += \" Path : \" + str(self.__paths) + \"\\n\"\n return_text += \" End_to_End : \" + str(self.__end_to_end_delay) + \" nanoseconds\\n\"\n return_text += \" Period : \" + str(self.__period) + \" nanoseconds\\n\"\n return_text += \" Starting : \" + str(self.__starting_time) + \" nanoseconds\\n\"\n return_text += \" Deadline : \" + str(self.__deadline) + \" nanoseconds\\n\"\n return_text += \" Size : \" + str(self.__size) + \" bytes\"\n return return_text", "def coverage_time_str(info_df: DataFrame) -> str:\n start = attribute_value(info_df, \"time_coverage_start\")\n start_dt = parse_time(start)\n\n now = datetime.now()\n now = now.replace(hour=0, minute=0, second=0, microsecond=0)\n\n if start_dt < now:\n start = now.isoformat() + \"Z\"\n end = attribute_value(info_df, \"time_coverage_end\")\n\n return f\"[({start}):1:({end})]\"", "def get_now_time():\r\n return '[' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f') + ']: '", "def human_readable_info(self) -> str:\n next_session = unix_str(self._stat.next_session)\n last_session = unix_str(self._stat.last_session)\n return \"\"\"\n Next Session: {}\n Last Session: {}\n Repetitions: {}\n Health: {}\n ------------------------\n Past Quality (last 20):\n ------------------------\n {}\n \"\"\".format(\n next_session,\n last_session,\n self._stat.actual_repetitions,\n self._health(),\n self._past_quality_graph(),\n )", "def get_entry_string(self):\n return f\"{self.get_time_string()} {self.mode} {self.radar}\"", "def getDebugText(self):\n timeDifference = time.time() - self.time_created\n hours = math.floor(timeDifference / 3600)\n minutes = math.floor((timeDifference % 3600) / 60)\n seconds = math.floor(timeDifference % 3600 % 60)\n\n output = \"\\n\" * 50\n output += \"Time started: %s\\n\" % time.ctime(self.time_created)\n output += \"Time now: %s\\n\" % time.ctime()\n output += \"Time elapsed: %02d:%02d:%02d\\n\" % (hours, minutes, seconds)\n output += (\"=\" * 80) + \"\\n\"\n output += \"Health potions used: %d\\n\" % self.hp_pots_used\n output += \"Health potions per hour: %d\\n\" % (self.hp_pots_used / (\n timeDifference / 3600))\n output += \"Mana potions used: %d\\n\" % self.mana_pots_used\n output += \"Mana potions per hour: %d\\n\" % (self.mana_pots_used / (\n timeDifference / 3600))\n return output", "def get_info(self) -> str:\n return self.info", "def output(self):\n if self.after_sunrise:\n return \"%02d:%02d:%02dR\" % self.time\n if self.after_sunset:\n return \"%02d:%02d:%02dT\" % self.time\n return \"%02d:%02d:%02d\" % self.time", "def beatTracker(inputFile):\n beats, downbeats, ose, sig = analyse(inputFile)\n return beats, downbeats", "def to_str(self):\n return self.PATTERN % (self.hours, self.minutes, self.seconds, self.milliseconds)", "def msg(self):\n ended = time.time()\n started_wait = datetime.datetime.fromtimestamp(self.started).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n raised_date = datetime.datetime.fromtimestamp(ended).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n duration = ended - self.started\n return \"Info[started at {}, raised at {} after {}s]\".format(\n started_wait, raised_date, round(duration, 2)\n )", "def __str__(self):\n\n string = ''\n string += \"Battery Voltage: \" + \"{0:.2f}\".format(self.getBatteryVoltage()) + '\\n'\n string += \"Data Logging: \" + str(self.getDataLoggingStatus()) + '\\n'\n string += \"Data Filename: \" + self.getFilename() +'\\n'\n\n string += \"Time Since Instrument Reset (s): \" + \"{0:.2f}\".format(self.getResetTime()) + '\\n'\n string += \"Data Age (s): \" + \"{0:.2f}\".format(self.getDataAge()) + '\\n'\n string += '\\n'\n string += \"Interior Temperature 1 (F): \" + \"{0:.2f}\".format(self.getInteriorTemperature1()) + '\\n'\n string += \"Interior Temperature 2 (F): \" + \"{0:.2f}\".format(self.getInteriorTemperature2()) + '\\n'\n string += \"Interior Temperature 3 (F): \" + \"{0:.2f}\".format(self.getInteriorTemperature3()) + '\\n'\n string += \"Exterior Temperature (F): \" + \"{0:.2f}\".format(self.getExteriorTemperature()) + '\\n'\n string += \"Pressure (PSI): \" + \"{0:.2f}\".format(self.getPressure()) + '\\n'\n string += \"Humidity (%): \" + \"{0:.2f}\".format(self.getHumidity()) + '\\n'\n string += '\\n'\n\n string += \"GPS Time: \" + str(self.getGpsTime1()) + '\\n'\n string += \"Latitude: \" + \"{0:.9f}\".format(self.getLatitude1()) + '\\n'\n string += \"Longitude: \" + \"{0:.9f}\".format(self.getLongitude1()) + '\\n'\n string += \"Altitude (ft): \" + \"{0:.2f}\".format(self.getAltitude1()) + '\\n'\n string += \"Speed (MPH): \" + \"{0:.2f}\".format(self.getSpeed()) + '\\n'\n string += '\\n'\n\n string += \"GPS Time: \" + str(self.getGpsTime2()) + '\\n'\n string += \"Latitude: \" + \"{0:.9f}\".format(self.getLatitude2()) + '\\n'\n string += \"Longitude: \" + \"{0:.9f}\".format(self.getLongitude2()) + '\\n'\n string += \"Altitude (ft): \" + \"{0:.2f}\".format(self.getAltitude2()) + '\\n'\n string += '\\n'\n\n ax, ay, az = self.getAcceleration()\n string += \"Acceleration (x, y, z): \"\n string += \"{0:.2f}\".format(ax) + \", \"\n string += \"{0:.2f}\".format(ay) + \", \"\n string += \"{0:.2f}\".format(az) + '\\n'\n\n gx, gy, gz = self.getRates()\n string += \"Rates (x, y, z): \"\n string += \"{0:.2f}\".format(gx) + \", \"\n string += \"{0:.2f}\".format(gy) + \", \"\n string += \"{0:.2f}\".format(gz) + '\\n'\n\n mx, my, mz = self.getMagneticReading()\n string += \"Magnetic Field (x, y, z): \"\n string += \"{0:.2f}\".format(mx) + \", \"\n string += \"{0:.2f}\".format(my) + \", \"\n string += \"{0:.2f}\".format(mz) + '\\n'\n\n roll, pitch, yaw = self.getAttitude()\n string += \"Roll (deg): \" + \"{0:.2f}\".format(roll) + '\\n'\n string += \"Pitch (deg): \" + \"{0:.2f}\".format(pitch) + '\\n'\n string += \"Yaw (deg): \" + \"{0:.2f}\".format(yaw) + '\\n'\n string += '\\n'\n relayStates = self.getRelayStates()\n \n\n string += \"Relay States: \" \n string += (( \"ON \") if relayStates[0] else ( \"OFF \")) \n string += (( \"ON \") if relayStates[1] else ( \"OFF \"))\n string += (( \"ON \") if relayStates[2] else ( \"OFF \"))\n string += (( \"ON \") if relayStates[3] else ( \"OFF \"))\n string += '\\n'\n\n\n return string", "def _player_info(self):\n return \"%r %s seat:%s m:%r c:%s b:%s \" % (self.name, self.serial, self.seat, self.money, self._chips, self._bet)", "def info(cls):\n return 'Snapshot (i.e. hydro variables at given time) plotting module.'", "def getFormattedText(self):\r\n h = \"00\"\r\n m = \"00\"\r\n s = \"00\"\r\n if(self.seconds < 10):\r\n s = \"0\" + str(self.seconds)\r\n else:\r\n s = str(self.seconds)\r\n\r\n if(self.minutes < 10):\r\n m = \"0\" + str(self.minutes)\r\n else:\r\n m = str(self.minutes)\r\n\r\n if(self.hours < 10):\r\n h = \"0\" + str(self.hours)\r\n else:\r\n h = str(self.hours)\r\n\r\n return h + \":\" + m + \":\" + s", "def __str__(self):\n\n return self.date.strftime('%y/%m/%d') + \" \" + self.time.strftime(\"%I:%M %p\") + \" \" + self.activity + \" \" + self.description[:40]", "def __str__(self):\n return '%.2d:%.2d:%.2d' % (self.hour, self.minute, self.second)", "def getinfo(timestamp):\n datetime, message = timestamp.split(']')\n\n date, time = datetime.split()\n date = date.strip('[')\n hour, minute = time.split(':')\n\n message = message.split()\n extra = message[1] # either 'asleep', 'up', or '#XXX'\n\n return date, int(hour), int(minute), extra", "def ctime(self):\n return \"\"", "def ctime(self):\n return \"\"", "def _timestamp(self):\n\n retval = []\n\n if self.log_level >= _Log.DEBUG:\n retval.append('%f: ' % (time.time() - self.start_time,))\n\n return ''.join(retval)", "def __repr__(self: object) -> str:\n measstring: str = \"Tatort - {:04d} - {} - {} - {} - {}\".format(self.episode_id, self.episode_name, self.episode_inspectors, self.episode_sequence, self.episode_broadcast)\n return measstring", "def info(self):\n return (f\"Match id: {self._id}\\n\"\n f\"dire_score: {self.dire_score}\\n\"\n f\"dire_team: {self.dire_team}\\n\"\n f\"duration: {self.duration}\\n\"\n f\"game_mode: {self.game_mode}\\n\"\n f\"patch: {self.patch}\\n\"\n f\"radiant_score: {self.radiant_score}\\n\"\n f\"radiant_team: {self.radiant_team}\\n\"\n f\"radiant_win: {self.radiant_win}\\n\"\n f\"skill: {self.skill}\\n\"\n f\"start_time: {self.start_time}\\n\")", "def info(self) -> str:\n return self._info", "def info(self) -> str:\n return self._info", "def timestr():\n return dt.strftime(dt.now(),'%H:%M:%S')", "def summary_string(self) -> str:", "def __str__(self) -> str:\n\n # Get current time\n t = timer()\n # Length of label field, calculated from max label length\n fldlen = [len(lbl) for lbl in self.t0] + [\n len(self.default_label),\n ]\n lfldln = max(fldlen) + 2\n # Header string for table of timers\n s = f\"{'Label':{lfldln}s} Accum. Current\\n\"\n s += \"-\" * (lfldln + 25) + \"\\n\"\n # Construct table of timer details\n for lbl in sorted(self.t0):\n td = self.td[lbl]\n if self.t0[lbl] is None:\n ts = \" Stopped\"\n else:\n ts = f\" {(t - self.t0[lbl]):.2e} s\" % (t - self.t0[lbl]) # type: ignore\n s += f\"{lbl:{lfldln}s} {td:.2e} s {ts}\\n\"\n\n return s" ]
[ "0.66582495", "0.6518425", "0.6161863", "0.6112195", "0.6085194", "0.6056986", "0.6040513", "0.59848976", "0.5980764", "0.59712094", "0.5916152", "0.59127504", "0.5898801", "0.5861905", "0.58609194", "0.5855922", "0.58121693", "0.5801718", "0.5774646", "0.5771938", "0.574199", "0.574199", "0.5730006", "0.57110345", "0.5678113", "0.56704986", "0.56704986", "0.5667893", "0.5653733", "0.5648271" ]
0.7030934
0
Team members' stats page for app
def team_members_stats(request): username = request.session.get('username', False) profile = request.session.get('profile', False) if (username): context = {'username': username, 'profile': profile} return render(request, 'MedTAG_sket_dock_App/index.html', context) else: return redirect('MedTAG_sket_dock_App:login')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_stats(self):\n print(self.team_one.name + \" stats: \")\n self.team_one.stats()\n print(self.team_two.name + \" stats: \")\n self.team_two.stats()", "def info():\n print 'Loading info page'\n\n team_list = datastore.get_all_teams(engine)\n\n return render_template('info.html', rows=team_list)", "def baron_stats(request, name):\n assert isinstance(request, HttpRequest)\n query = Baron_Players.objects.filter(summoner_name=name)\n \n return render(\n request,\n 'AscensionESports_Baseline/player_stats.html',\n {\n 'background': getBaronBackground(),\n 'color': getBaronColor(),\n 'title':'Baron League Stats',\n 'query_results': query,\n 'year': datetime.now().year,\n }\n )", "def stats(request):\n \n return render(request, 'stats.html')", "def showStats(population, masterList, index):\n count = 0\n if index == \"all\":\n for team in population:\n print (\"Team at index\", count)\n print(\"Tot Avg\", team.totAvg)\n print(\"Tot Runs\", team.totRuns)\n print(\"Tot HRs\", team.totHr)\n print(\"Tot RBIs\", team.totRbi)\n print(\"Tot SB\", team.totSb)\n print(\"Tot points\", team.points, '\\n')\n count += 1\n else:\n print(\"Team at index\", index)\n print(\"Tot Avg\", population[index].totAvg)\n print(\"Tot Runs\", population[index].totRuns)\n print(\"Tot HRs\", population[index].totHr)\n print(\"Tot RBIs\", population[0].totRbi)\n print(\"Tot SB\", population[0].totSb)\n print(\"Tot points\", population[0].points, '\\n')", "def teams(request):\n if request.method == 'GET':\n # Validates if the member exists. If members logon successfully they are created on the teams-app.\n email = request.session.get('email', None)\n full_name = request.session.get('full_name', None)\n try: \n member = Member.objects.get(email=email)\n except ObjectDoesNotExist:\n member = Member(email=email, full_name=full_name)\n member.save()\n\n member_teams = member.teams.all() \n context = {\n 'email': email,\n 'full_name': full_name,\n 'member_teams': member_teams\n }\n return render(request, 'teamsapp/teams.html', context)\n else:\n raise Http404('Not allowed')", "async def stats(ctx):\n pythonVersion = platform.python_version()\n dpyVersion = discord.__version__\n serverCount = len(bot.guilds)\n memberCount = len(set(bot.get_all_members()))\n\n embed = discord.Embed(\n title=f\"{bot.user.name} Stats\",\n description=\"\\uFEFF\",\n colour=ctx.author.colour,\n timestamp=ctx.message.created_at,\n )\n\n embed.add_field(name=\"Bot Version:\", value=\"0.0.1\")\n embed.add_field(name=\"Python Version:\", value=pythonVersion)\n embed.add_field(name=\"Discord.Py Version\", value=dpyVersion)\n embed.add_field(name=\"Total Guilds:\", value=serverCount)\n embed.add_field(name=\"Total Users:\", value=memberCount)\n embed.add_field(name=\"Bot Developers:\", value=\"<@271612318947868673>\")\n\n embed.set_footer(text=f\"Carpe Noctem | {bot.user.name}\")\n embed.set_author(name=bot.user.name, icon_url=bot.user.avatar_url)\n\n await ctx.send(embed=embed)", "async def view_stats(self, ctx):\n app_info = await self.bot.application_info()\n total_ram = (psutil.virtual_memory().total >> 30) + 1\n embed = discord.Embed(\n title=\"Bot Stats\",\n description=f\"Running on a dedicated server with {total_ram}GB RAM \\n provided by RandomGhost#0666.\",\n )\n\n embed.add_field(name=\"**__General Info__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"Latency\", value=f\"{self.bot.latency*1000:.03f}ms\")\n embed.add_field(name=\"Guild Count\", value=f\"{len(self.bot.guilds):,}\")\n embed.add_field(name=\"User Count\", value=f\"{len(self.bot.users):,}\")\n\n embed.add_field(name=\"**__Technical Info__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"System CPU Usage\", value=f\"{psutil.cpu_percent():.02f}%\")\n embed.add_field(name=\"System RAM Usage\", value=f\"{psutil.virtual_memory().used/1048576:.02f} MB\")\n embed.add_field(name=\"System Uptime\", value=f\"{timedelta(seconds=int(time.time() - psutil.boot_time()))}\")\n embed.add_field(name=\"Bot CPU Usage\", value=f\"{process.cpu_percent():.02f}%\")\n embed.add_field(name=\"Bot RAM Usage\", value=f\"{process.memory_info().rss / 1048576:.02f} MB\")\n embed.add_field(name=\"Bot Uptime\", value=f\"{timedelta(seconds=int(time.time() - process.create_time()))}\")\n\n embed.add_field(name=\"**__Links__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"Support Server\", value=\"[https://discord.swaglyrics.dev](https://discord.swaglyrics.dev)\")\n embed.add_field(name=\"Invite\", value=\"[https://invite.swaglyrics.dev](https://invite.swaglyrics.dev)\")\n embed.add_field(\n name=\"Source\",\n value=\"[https://swaglyrics.dev/SwagLyrics-Discord-Bot]\" \"(https://swaglyrics.dev/SwagLyrics-discord-bot)\",\n )\n\n embed.set_footer(\n text=f\"Made by {app_info.owner} • {self.bot.get_user(512708394994368548)}\",\n icon_url=[\n app_info.owner.avatar_url_as(size=128),\n self.bot.get_user(512708394994368548).avatar_url_as(size=128),\n ][getrandbits(1)],\n ) # randomize clash or flabbet avatar\n\n await ctx.send(embed=embed)", "def index():\r\n\r\n title = \"Global Statistics\"\r\n\r\n n_auth = n_auth_users()\r\n\r\n n_anon = n_anon_users()\r\n\r\n n_total_users = n_anon + n_auth\r\n\r\n n_published_apps = cached_apps.n_published()\r\n n_draft_apps = cached_apps.n_draft()\r\n n_total_apps = n_published_apps + n_draft_apps\r\n\r\n n_tasks = n_tasks_site()\r\n\r\n n_task_runs = n_task_runs_site()\r\n\r\n top5_apps_24_hours = get_top5_apps_24_hours()\r\n\r\n top5_users_24_hours = get_top5_users_24_hours()\r\n\r\n locs = get_locs()\r\n\r\n show_locs = False\r\n if len(locs) > 0:\r\n show_locs = True\r\n\r\n stats = dict(n_total_users=n_total_users, n_auth=n_auth, n_anon=n_anon,\r\n n_published_apps=n_published_apps,\r\n n_draft_apps=n_draft_apps,\r\n n_total_apps=n_total_apps,\r\n n_tasks=n_tasks,\r\n n_task_runs=n_task_runs)\r\n\r\n users = dict(label=\"User Statistics\",\r\n values=[\r\n dict(label='Anonymous', value=[0, n_anon]),\r\n dict(label='Authenticated', value=[0, n_auth])])\r\n\r\n apps = dict(label=\"Apps Statistics\",\r\n values=[\r\n dict(label='Published', value=[0, n_published_apps]),\r\n dict(label='Draft', value=[0, n_draft_apps])])\r\n\r\n tasks = dict(label=\"Task and Task Run Statistics\",\r\n values=[\r\n dict(label='Tasks', value=[0, n_tasks]),\r\n dict(label='Answers', value=[1, n_task_runs])])\r\n\r\n return render_template('/stats/global.html', title=title,\r\n users=json.dumps(users),\r\n apps=json.dumps(apps),\r\n tasks=json.dumps(tasks),\r\n locs=json.dumps(locs),\r\n show_locs=show_locs,\r\n top5_users_24_hours=top5_users_24_hours,\r\n top5_apps_24_hours=top5_apps_24_hours,\r\n stats=stats)", "def index():\r\n if current_user.is_authenticated():\r\n user_id = current_user.id\r\n else:\r\n user_id = 'anonymous'\r\n top_users = cached_users.get_leaderboard(current_app.config['LEADERBOARD'],\r\n user_id=user_id)\r\n\r\n return render_template('/stats/index.html', title=\"Community Leaderboard\",\r\n top_users=top_users)", "def fetch_teams_stats():\n teams_scraper = TeamStatsScraper(API_URL, API_HEADERS)\n result = teams_scraper.save_objects()\n return result", "def showUserStats(self) :\n self.getAllStats()\n self.getNbTotalLevelsPlayed()\n Scenario.messageAllStats(self.level_history[0].created_at)\n self.showBestStats()\n self.showWorstStats()\n self.showAverageStats()", "async def stats(self, ctx: commands.Context):\n users = len(self.bot.users)\n guilds = len(self.bot.guilds)\n\n embed = Embed(color=Color.dark_green())\n embed.add_field(name=\"Guilds\", value=guilds)\n embed.add_field(name=\"Users\", value=users)\n embed.set_thumbnail(url=ctx.guild.me.avatar_url)\n\n await ctx.send(embed=embed)", "def statistics():\n return render_template('statistics.html'), 200", "def stats_page():\n import alltheitems.stats\n return alltheitems.stats.index()", "def overview(request):\n LOGGER.info('Rendering WMT16 HIT overview for user \"{0}\".'.format(\n request.user.username or \"Anonymous\"))\n \n # Re-initialise random number generator.\n seed(None)\n \n # Collect available language pairs for the current user.\n language_codes = set([x[0] for x in LANGUAGE_PAIR_CHOICES])\n language_pairs = request.user.groups.filter(name__in=language_codes)\n \n # Collect available annotation projects for the current user.\n annotation_projects = request.user.project_set.all()\n \n hit_data = []\n total = [0, 0, 0]\n\n for language_pair in language_pairs:\n for annotation_project in annotation_projects:\n hit = _compute_next_task_for_user(request.user, annotation_project, language_pair)\n user_status = HIT.compute_status_for_user(request.user, annotation_project, language_pair)\n for i in range(3):\n total[i] = total[i] + user_status[i]\n \n if hit:\n # Convert status seconds back into datetime.time instances.\n for i in range(2):\n user_status[i+1] = seconds_to_timedelta(int(user_status[i+1]))\n \n hit_data.append(\n (hit.get_language_pair_display(), hit.get_absolute_url(),\n hit.hit_id, user_status, annotation_project)\n )\n \n # Convert total seconds back into datetime.timedelta instances.\n total[1] = seconds_to_timedelta(int(total[2]) / float(int(total[0]) or 1))\n \n # Remove microseconds to get a nicer timedelta rendering in templates.\n total[1] = total[1] - timedelta(microseconds=total[1].microseconds)\n \n total[2] = seconds_to_timedelta(int(total[2]))\n \n groups = _identify_groups_for_user(request.user)\n group = None\n if len(groups) > 1:\n LOGGER.debug(u'User \"{0}\" assigned to multiple annotation groups: {1}'.format(\n request.user.username or u'Anonymous',\n u', '.join([x.name for x in groups]))\n )\n group = groups[0]\n \n if group is not None:\n group_name = group.name\n group_status = HIT.compute_status_for_group(group)\n for i in range(2):\n group_status[i+1] = seconds_to_timedelta(int(group_status[i+1]))\n \n else:\n group_status = None\n group_name = None\n \n LOGGER.debug(u'\\n\\nHIT data for user \"{0}\":\\n\\n{1}\\n'.format(\n request.user.username or \"Anonymous\",\n u'\\n'.join([u'{0}\\t{1}\\t{2}\\t{3}'.format(*x) for x in hit_data])))\n\n # Compute admin URL for super users.\n admin_url = None\n if request.user.is_superuser:\n admin_url = reverse('admin:index')\n \n dictionary = {\n 'active_page': \"OVERVIEW\",\n 'hit_data': hit_data,\n 'total': total,\n 'group_name': group_name,\n 'group_status': group_status,\n 'admin_url': admin_url,\n 'title': 'WMT16 Dashboard',\n 'annotation_groups': [x.name for x in groups],\n }\n dictionary.update(BASE_CONTEXT)\n \n LOGGER.info(dictionary.values())\n \n return render(request, 'wmt16/overview.html', dictionary)", "def test_05_app_stats_index(self):\r\n # As Anonymou user\r\n url = \"/app/%s/stats\" % self.app_short_name\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"App Stats page should be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"App Stats page should be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"App Stats page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def test_05_app_stats_index(self):\r\n # As Anonymou user\r\n url = \"/app/%s/stats\" % self.app_short_name\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"App Stats page should not be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"App Stats page should not be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"App Stats page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "async def cmd_stats(self, ctx):\n\n guild = ctx.guild\n members = await guild.fetch_members().flatten()\n answer = f''\n embed = discord.Embed(title=\"Statistiken\",\n description=f'Wir haben aktuell {len(members)} Mitglieder auf diesem Server, verteilt auf folgende Rollen:')\n\n for role in guild.roles:\n if not self.get_key(role):\n continue\n role_members = role.members\n if len(role_members) > 0 and not role.name.startswith(\"Farbe\"):\n embed.add_field(name=role.name, value=f'{len(role_members)} Mitglieder', inline=False)\n\n no_role = 0\n for member in members:\n # ToDo Search for study roles only!\n if len(member.roles) == 1:\n no_role += 1\n\n embed.add_field(name=\"\\u200B\", value=\"\\u200b\", inline=False)\n embed.add_field(name=\"Mitglieder ohne Rolle\", value=str(no_role), inline=False)\n\n await ctx.channel.send(answer, embed=embed)", "def test_04_global_stats_index(self):\r\n # As Anonymou user\r\n url = \"/stats\"\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Stats page should be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Stats page should be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Stats page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def analyze_team_page(auth, db, id_team):\n session = login(auth)\n\n url = \"http://es.ibasketmanager.com/equipo.php?id=\" + id_team\n r = session.get(url)\n load_status = 0\n while load_status != 200:\n load_status = r.status_code\n \n print(show(\"profile\") + \" > Analizando perfil del equipo\")\n\n soup = BeautifulSoup(r.content, \"html.parser\")\n \n trs2 = soup.find_all(\"tr\", {\"class\": \"tipo2\"})\n\n id_user = trs2[0].find(\"a\")[\"href\"].split(\"=\")[1]\n streak = trs2[2].find_all(\"td\")[1].text\n club_seats = trs2[3].find_all(\"td\")[1].text.replace(\".\",\"\").strip()\n ranking = trs2[4].find_all(\"td\")[1].text.replace(\"Ranking\",\"\").strip()\n \n trs1 = soup.find_all(\"tr\", {\"class\": \"tipo1\"})\n fans = trs1[3].find_all(\"td\")[1].text.replace(\".\",\"\").strip()\n\n return [id_user, club_seats, fans, ranking, streak]", "def status(request):\n LOGGER.info('Rendering WMT16 HIT status for user \"{0}\".'.format(\n request.user.username or \"Anonymous\"))\n \n if not STATUS_CACHE.has_key('global_stats'):\n update_status(key='global_stats')\n \n if not STATUS_CACHE.has_key('language_pair_stats'):\n update_status(key='language_pair_stats')\n \n if not STATUS_CACHE.has_key('group_stats'):\n update_status(key='group_stats')\n \n if not STATUS_CACHE.has_key('user_stats'):\n update_status(key='user_stats')\n \n # Compute admin URL for super users.\n admin_url = None\n if request.user.is_superuser:\n admin_url = reverse('admin:index')\n \n dictionary = {\n 'active_page': \"STATUS\",\n 'global_stats': STATUS_CACHE['global_stats'],\n 'language_pair_stats': STATUS_CACHE['language_pair_stats'],\n 'group_stats': STATUS_CACHE['group_stats'],\n 'user_stats': STATUS_CACHE['user_stats'],\n 'clusters': RANKINGS_CACHE.get('clusters', []),\n 'admin_url': admin_url,\n 'title': 'WMT16 Status',\n }\n dictionary.update(BASE_CONTEXT)\n \n return render(request, 'wmt16/status.html', dictionary)", "def test_04_global_stats_index(self):\r\n # As Anonymou user\r\n url = \"/stats\"\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Stats page should not be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Stats page should not be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Stats page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def teams(teamid):\n team_summary = team.TeamSummary(teamid)\n team_summary_info = team_summary.info()\n team_season_ranks = team_summary.season_ranks()\n\n team_common_roster = team.TeamCommonRoster(teamid)\n roster = team_common_roster.roster()\n coaches = team_common_roster.coaches()\n\n season = team_summary_info[0][\"SEASON_YEAR\"]\n\n team_game_log = team.TeamGameLogs(teamid,\n season=season)\n team_games = team_game_log.info()\n\n playoffs_teamgamelogs = team.TeamGameLogs(teamid,\n season=season,\n season_type=\"Playoffs\")\n playoffs_team_games = playoffs_teamgamelogs.info()\n\n team_season = team.TeamSeasons(teamid)\n team_season_info = team_season.info()\n\n for i in team_season_info:\n if (i[\"YEAR\"] == season):\n current_season_info = i\n\n return render_template(\"teams.html\",\n title=team_summary_info[0][\"TEAM_CITY\"] + \" \" + team_summary_info[0][\"TEAM_NAME\"],\n teamid=teamid,\n team_summary_info=team_summary_info,\n team_season_ranks=team_season_ranks,\n season=season,\n team_games=team_games,\n playoffs_team_games=playoffs_team_games,\n team_season=team_season_info,\n roster=roster,\n coaches=coaches,\n current_season_info=current_season_info,\n team_img=TEAM_ID_DATA)", "def friend_stats(request):\n \n r = {}\n fb_ids = FacebookProfile.objects.all().values(\"facebook_id\") \n for u in FacebookProfile.objects.all():\n friends = Friends.objects.filter(facebook_id__in=fb_ids)\n num_friends = Friends.objects.filter(facebook_id=u.facebook_id, friends__in=friends).count()\n participant = OTNUser.objects.get(facebook_profile__facebook_id=u.facebook_id)\n r[u.facebook_id]=\"%s (%d): %d\"%(participant.name, participant.id, num_friends)\n\n return JSONHttpResponse(r)", "def stats(request):\n stats = []\n activeProject = None\n activity = get_activity()\n if activity:\n activeProject = activity.project\n projects = Project.objects.filter(company__id=1).order_by('name')\n for project in projects:\n isCurrent = (activeProject != None) and (project.id == activeProject.id)\n # If this is NOT the currently selected project...\n if (not isCurrent) or (not activeProject):\n # If this project is password protected, skip it.\n if (project.password != None) and (len(project.password) > 0):\n continue\n sessions = Session.objects.filter(project=project,\n endtime__gt=F('starttime') +\n timedelta(minutes=3))\n files = File.objects.filter(project=project)\n fileactions = Fileaction.objects.filter(file__in=files)\n events = Event.objects.filter(session__in=sessions)\n sQuery = {\n 'avg': 'SUM(TIMESTAMPDIFF(SECOND, starttime, endtime)) / COUNT(*)',\n 'min': 'MIN(TIMESTAMPDIFF(SECOND, starttime, endtime))',\n 'max': 'MAX(TIMESTAMPDIFF(SECOND, starttime, endtime))',\n 'count': 'COUNT(*)'\n }\n sessions = sessions.extra(select=sQuery)\n sessions = sessions.values_list('avg', 'min', 'max', 'count').get()\n session_average_duration = 0\n session_min_duration = 0\n session_max_duration = 0\n if sessions[0] is not None:\n session_average_duration = int(sessions[0])\n if sessions[1] is not None:\n session_min_duration = int(sessions[1])\n if sessions[2] is not None:\n session_max_duration = int(sessions[2])\n session_count = sessions[3]\n statsdata = {\n 'selected': isCurrent,\n 'name': project.name,\n 'session_average_duration': session_average_duration,\n 'session_min_duration': session_min_duration,\n 'session_max_duration': session_max_duration,\n 'session_count': session_count,\n 'file_count': files.count(),\n 'fileaction_count': fileactions.count(),\n 'event_count': events.count()\n }\n stats.append(statsdata)\n return render_to_response(\n 'stats.html',\n {\n 'stats': stats,\n 'tab': 'stats'\n },\n context_instance=RequestContext(request)\n )", "def view_team_page(request, team_pk):\n\t\n\tselected_team = ChallengeTeam.objects.get(pk = team_pk)\n\t\n\tusers = selected_team.team_members.all()\n\t\n\tteam_name = selected_team.team_name\n\t\n\tall_results = get_team_results(users, selected_team.challenge.schedule)\n\tteam_consistency = all_results[\"consistency\"]\n\tteam_completion = all_results[\"completion\"]\n\t\n\tmember_names = []\n\tfor usr in users:\n\t\tprint usr.first_name + \" \" + usr.last_name\n\t\tmember_names.append(usr.first_name + \" \" + usr.last_name)\n\t\t\n\tjoin_control = \"join\"\n\tif(request.user in selected_team.team_members.all()):\n\t\tjoin_control = \"leave\"\n\telif(selected_team.challenge.invite_only and not request.user in selected_team.invited.all()):\n\t\tjoin_control = \"invite\"\n\t\n\tcontext = RequestContext(request, { \"team_pk\" : team_pk, \"name\" : team_name, \"members\" : member_names, \"consistency\" : team_consistency, \"completion\" : team_completion, \"join_control\" : join_control, \"messages\" : messages })\n\treturn render_to_response(\"encourage/view_team.html\", context)", "def teamstats(team_id, conn, curr):\n \n title = \"Team Points Breakdown for Team {}\".format(team_id)\n cmd = \"\"\"\n SELECT user_id, event_id, team_id, score FROM PlayerStats\n WHERE team_id={}\n \"\"\".format(team_id)\n curr.execute(cmd)\n teamscores = curr.fetchall()\n player_dict = {}\n event_set = set()\n data = []\n for player in teamscores:\n user_id = player[0]\n event_id = player[1]\n score = player[3]\n if user_id in player_dict:\n player_dict[user_id][event_id] = score\n else:\n player_dict[user_id] = {event_id: score}\n event_set.add(event_id)\n\n color_index = 0\n for player, score in player_dict.items():\n scores = []\n for event_id in event_set:\n scores.append(player_dict[player][event_id])\n width = 0\n if len(event_set) == 1:\n data.append(\n Bar(\n x=['event {}'.format(str(event_id)) for event_id in list(event_set)],\n y=scores,\n name='Player ID {}'.format(player),\n width=.4,\n marker=dict(\n color=COLORS[color_index]\n )\n )\n )\n else:\n data.append(\n Bar(\n x=['event {}'.format(str(event_id)) for event_id in list(event_set)],\n y=scores,\n name='Player ID {}'.format(player),\n marker=dict(\n color=COLORS[color_index]\n )\n )\n )\n color_index += 1\n\n layout = Layout(\n barmode='stack',\n title=title,\n yaxis=dict(\n title='Points'\n ),\n xaxis=dict(\n title='Events'\n )\n )\n fig = plotly.graph_objs.Figure(data=data, layout=layout)\n return fig", "def team_tester(request):\n\n\t# Look for the team size entered by the user\n\tteam_size = int(request.GET.get('team_size', False))\n\n\t# If user has entered information...\n\tif team_size:\n\n\t\t# Get the rest of the information from the form\n\t\tscores_up = int(request.GET.get('scores_up', False))\n\t\tscores_count = int(request.GET.get('scores_count', False))\n\t\tsumstat = request.GET.get('sumstat', False)\n\t\ttime = request.GET.get('time', False)\n\t\tgymnast_list = []\n\t\tfor i in range(1, team_size+1):\n\t\t\tgymnast_search_id = \"gymnast_search\" + str(i)\n\t\t\tgymnast_list.append(request.GET.get(gymnast_search_id, False))\n\n\t\t# Set the date range \n\t\tnow = datetime.datetime.now()\n\t\tif time==\"year\":\n\t\t\tdate_range = [now-relativedelta(years=1), now]\n\t\telif time == \"season\":\n\t\t\tdate_range = [datetime.date(2019, 10, 13), now] # Since last world championships\n\t\telse:\n\t\t\tdate_range = [datetime.date(2016, 8, 21), now] # Since last olympics\n\n\t\t# Loop through the list of gymnasts and get scores\n\t\ttable_data = []\n\t\tfor gymnast in gymnast_list:\n\t\t\tgymnast = Gymnast.objects.get(name=gymnast)\n\t\t\tthis_gymnast_scores = []\n\t\t\tthis_gymnast_scores.append(gymnast)\n\t\t\tfor sub_event in [\"VT\", \"UB\", \"BB\", \"FX\"]:\n\t\t\t\tscores = Score.objects.filter(gymnast=gymnast, \n\t\t\t\t\tmeet__in=Meet.objects.filter(start_date__range=date_range), event__in=Event.objects.filter(name=sub_event), score_num=1)\n\t\t\t\tif scores.count() > 0:\n\t\t\t\t\tif sumstat == \"avg\":\n\t\t\t\t\t\tscores_sumstat = scores.aggregate(Avg('score'))['score__avg']\n\t\t\t\t\telif sumstat == \"max\":\n\t\t\t\t\t\tscores_sumstat = scores.aggregate(Max('score'))['score__max']\n\t\t\t\telse:\n\t\t\t\t\tscores_sumstat = \"\"\n\t\t\t\tthis_gymnast_scores.append(scores_sumstat)\n\t\t\ttable_data.append(this_gymnast_scores)\n\n\t\t# Select the scores that go up and the scores that count\n\t\tfor i in range(1, 5):\n\t\t\t# Get the list of all scores on this event\n\t\t\tevent_scores = [col[i] for col in table_data]\n\t\t\t# Get the sort order of these scores\n\t\t\tsort_order = np.argsort(np.argsort(event_scores)) # See https://github.com/numpy/numpy/issues/8757\n\t\t\tsort_order = team_size - 1 - sort_order\n\t\t\t# Replace each score with a tuple of the score and the class that we'll use for the td of each score\n\t\t\tfor j, row in enumerate(table_data):\n\t\t\t\t# For scores that count\n\t\t\t\tif sort_order[j] < scores_count:\n\t\t\t\t\ttable_data[j][i] = [table_data[j][i], \"counts\"]\n\t\t\t\telif sort_order[j] < scores_up:\n\t\t\t\t\ttable_data[j][i] = [table_data[j][i], \"up\"]\n\t\t\t\telse:\n\t\t\t\t\ttable_data[j][i] = [table_data[j][i], \"not_used\"]\n\n\t\t# Calculate total row\n\t\ttotal_row = [\"Team Total\", 0, 0, 0, 0]\n\t\tfor row in table_data:\n\t\t\tfor i in range(1, 5):\n\t\t\t\tif row[i][1] == \"counts\" and (not isinstance(row[i][0], str)):\n\t\t\t\t\ttotal_row[i] = total_row[i] + row[i][0]\n\t\ttable_data.append(total_row)\n\t\tteam_total = sum(total_row[1:5])\n\t\tprint(table_data)\n\telse:\n\t\tteam_size=5\n\t\tscores_up=4\n\t\tscores_count=3\n\t\tsumstat = \"avg\"\n\t\ttime = \"year\"\n\t\tgymnast_list = []\n\t\ttable_data = []\n\t\tteam_total = \"\"\n\n\n\n\tcontext = {\n\t\t'team_size': team_size,\n\t\t'scores_up': scores_up,\n\t\t'scores_count': scores_count,\n\t\t'sumstat': sumstat,\n\t\t'time': time,\n\t\t'gymnast_list': gymnast_list,\n\t\t'table_data': table_data,\n\t\t'team_total': team_total,\n\t}\n\n\treturn render(request, 'team_tester.html', context=context)", "def team_list(request):\n template = loader.get_template('team/team_list.html')\n teams_list = Team.objects.all().order_by('name')\n\n if not request.user.is_authenticated:\n team = None\n else:\n team = request.user.profile.team\n\n paginator = Paginator(teams_list, 6)\n\n page = request.GET.get('page')\n try:\n teams = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n teams = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n teams = paginator.page(paginator.num_pages)\n\n context = {\n 'teams': teams,\n 'team': team\n }\n\n return CustomHttpResponse.send(template, context, request)" ]
[ "0.72802764", "0.7093188", "0.6892792", "0.66899914", "0.65582514", "0.6528671", "0.64821774", "0.6452844", "0.64374906", "0.64060926", "0.63946265", "0.63593155", "0.63570726", "0.6330489", "0.6300333", "0.62758255", "0.6246533", "0.6213044", "0.6185886", "0.6148427", "0.6138208", "0.6134316", "0.613257", "0.6104232", "0.61014026", "0.6097455", "0.6097395", "0.6093304", "0.6079273", "0.6071337" ]
0.7581212
0
This view returns the current session parameters
def get_session_params(request): json_resp = {} usecase = request.session.get('usecase',None) language = request.session.get('language',None) institute = request.session.get('institute',None) annotation = request.session.get('mode',None) team_member = request.session.get('team_member',None) report_type = request.session.get('report_type',None) batch = request.session.get('batch',None) if batch is not None and report_type is not None and usecase is not None and language is not None and institute is not None and annotation is not None: json_resp['usecase'] = usecase json_resp['language'] = language json_resp['institute'] = institute json_resp['team_member'] = team_member json_resp['report_type'] = report_type json_resp['batch'] = batch if annotation == 'Human': json_resp['annotation'] = 'Manual' elif annotation == 'Robot': json_resp['annotation'] = 'Automatic' else: json_resp['usecase'] = '' json_resp['language'] = '' json_resp['institute'] = '' json_resp['batch'] = '' if User.objects.filter(profile='Admin').exists(): admin = User.objects.filter(profile='Admin') admin = admin.first() admin_name = admin.username json_resp['team_member'] = admin_name else: json_resp['team_member'] = 'Test' json_resp['annotation'] = '' json_resp['report_type'] = '' return JsonResponse(json_resp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_params(self):\n \n return self.params[self.profile]", "def get_parameters(self):\n return self.context.params", "def SessionInfo(self):\r\n\t\treturn self._get_attribute('sessionInfo')", "def get(self):\n return self.params", "def get(self):\n return self._params", "def params(self):\n\t\treturn self.params_", "def get_params (self):\n return self.params", "def get_params (self):\n return self.params", "def get_params(self):\n return self.params", "def get_params(self):\n return self.params", "def get_params(self):\n return self.params", "def parameters():\n return render_template(\n 'parameters.html',\n title= \"Pi-Lapse\",\n year=datetime.now().year,\n )", "def myCurrentSetting(self):\n paramDict = self.getCurrentSetting()\n return paramDict", "def parameters(self):\n return self.vars", "def meta_params(request):\n return request.param", "def query_params(self):\n return self.request._request.GET", "def _get_current_session(self) -> Dict[str, Any]:\n return self._data[-1]", "def get_params(self):\n\n return self.params_", "def SessionInfo(self):\n return self._get_attribute('sessionInfo')", "def session(self):", "def _get_params(self):\r\n return self.k._get_params()", "def parameters(self):\n return self._params", "def request_vars(self):", "def getSession(self):\n return self.request.getSession()", "def get(request):\r\n return request.session.get('partial_pipeline')", "def getRequest(request):\n return request.session.get('openid_request')", "def session_info(session, params_table=None):\n cfg = session_conf(session, params_table=params_table)\n return cfg.session", "def get_params(self):", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters" ]
[ "0.6704671", "0.64286435", "0.624347", "0.6236312", "0.6195294", "0.6193571", "0.61749065", "0.61749065", "0.6170325", "0.6170325", "0.6170325", "0.6147238", "0.6137965", "0.61196494", "0.6095869", "0.6027674", "0.60054123", "0.5997175", "0.5986101", "0.5984785", "0.59830755", "0.59622806", "0.59527695", "0.59430975", "0.5918114", "0.59163487", "0.5904453", "0.5872135", "0.584239", "0.584239" ]
0.6681705
1
This view handles the GET and POST requestes for LABELS ANNOTATION ACTION
def annotationlabel(request,action=None): username = request.session['username'] mode1 = request.session['mode'] auto_required = request.GET.get('ns_id', None) mode = NameSpace.objects.get(ns_id=mode1) # print('mode',mode1) usecase = request.session['usecase'] # language = request.GET.get('language',request.session['language']) type = 'labels' if request.method == 'GET' and action.lower() == 'user_labels': """GET request: given the report, the labels annotated by the user are returned""" language = request.GET.get('language', request.session['language']) user_get = request.GET.get('username',username) report_id = request.GET.get('report_id') report1 = Report.objects.get(id_report = report_id,language = language) # if auto_required == 'Robot': # mode = NameSpace.objects.get(ns_id=auto_required) if auto_required is not None: mode_1 = NameSpace.objects.get(ns_id=auto_required) else: mode_1 = mode json_dict = get_user_gt(user_get,mode_1,report1,language,'labels') return JsonResponse(json_dict,safe=False) elif request.method == 'GET' and action.lower() == 'all_labels': """ GET request: given the use case, all the labels associated to that usecase are returned. """ labels = AnnotationLabel.objects.filter(name=usecase).values('seq_number','label','annotation_mode') print(labels) json_dict = {} if len(labels) > 0: if mode1 == 'Human' or auto_required == 'Human': json_dict['labels'] = [] for el in labels: json_val = {} if 'Manual' in el['annotation_mode']: # if int(el['seq_number']) > count: # i primi 20 sono inseriti automaticamente json_val['label'] = (el['label']) json_val['seq_number'] = (el['seq_number']) json_dict['labels'].append(json_val) if mode1 == 'Robot' or auto_required == 'Robot': json_dict['labels'] = [] for el in labels: json_val = {} if 'Automatic' in el['annotation_mode']: json_val['label'] = (el['label']) json_val['seq_number'] = (el['seq_number']) json_dict['labels'].append(json_val) else: json_dict['labels'] = [] json_dict['labels'] = sorted(json_dict['labels'], key=lambda json: json['seq_number']) print(json_dict) return JsonResponse(json_dict) elif request.method == 'POST' and action.lower() == 'delete': """PSOT request: given the report, the labels the user annotated are removed together with the associated groundtruth.""" request_body_json = json.loads(request.body) report_id = request_body_json['report_id'] user = User.objects.get(username=username,ns_id=mode) language = request.GET.get('language', request.session['language']) report1 = Report.objects.get(id_report=report_id,language = language) if user is None or report1 is None: json_response = {'error': 'An error occurred getting parameters.'} return json_response to_del = Associate.objects.filter(username=user, ns_id=mode, id_report=report1, language=language) if mode1 == 'Human': try: with transaction.atomic(): if to_del.exists(): json_response = delete_all_annotation(to_del, user, report1,language, type,mode) else: json_response = {'msg':'nothing to do'} except Exception as error: print(error) json_response = {'error': 'An error occurred saving the ground_truth and the labels'} return JsonResponse(json_response) else: return JsonResponse(json_response) else: json_response = restore_robot_annotation(report1, 'labels', user) return JsonResponse(json_response) if request.method == 'POST' and action.lower() == 'insert': """PSOT request: given the report, the labels the user annotated are added in the database and a new JSON groundtruth is created. """ request_body_json = json.loads(request.body) report_id = request_body_json['report_id'] user = User.objects.get(username=username,ns_id=mode) language = request.GET.get('language', request.session['language']) report1 = Report.objects.get(id_report=report_id,language = language) if user is None or report1 is None: json_response = {'error': 'An error occurred getting the parameters.'} return JsonResponse(json_response) labels_to_save = request_body_json['labels'] # In this case the user manually deletes all the labels (NOT WITH CLEAR BUTTON) and saves. if len(labels_to_save) == 0 and mode1 == 'Human': """If there are not labels to save, if there is a ground truth saved in the database, this is removed, otherwise no action is performed. """ rows = Associate.objects.filter(username = user,ns_id=mode, id_report = report1, language = language) if rows.exists(): try: with transaction.atomic(): json_response = delete_all_annotation(rows,user,report1,language,type,mode) except Exception as error: print(error) json_response = {'error': 'An error occurred.'} return JsonResponse(json_response, status=500) else: return JsonResponse(json_response) else: json_response = {'message': 'Nothing to save.'} return JsonResponse(json_response) if len(labels_to_save) == 0 and mode1 == 'Robot': """ If there are not labels to save and the name space is Robot no action is performed and the already existing ground-truth is kept """ to_del = Associate.objects.filter(id_report=report1, language=language, username=user, ns_id=mode) # print('RESTORE') json_response = restore_robot_annotation(report1, 'labels',user) return JsonResponse(json_response) update = True """ Check if the user's labels she inserted are as many as the rows already present in the db: if they are not: update the annotation: the old annotation is replaced with the new one if they are: check if the labels existing are those inserted, in this case nothing is done, otherwise the current groundtruth is updated. """ existing_rows = Associate.objects.filter(username = user,ns_id=mode, id_report =report1,language =language) if existing_rows.exists(): if existing_rows.count() == len(labels_to_save): for label in labels_to_save: label1 = AnnotationLabel.objects.get(name=usecase, label=label['label'], seq_number=label['seq_number']) if not Associate.objects.filter(username=user,ns_id=mode, seq_number=label1.seq_number, label=label1, id_report=report1, language=language).exists(): update = True break else: update = False if update == True: try: with transaction.atomic(): # Remove all the existing labels inserted by the user for that report. The existing ground truth is kept untile the deletion is successful to_del = Associate.objects.filter(username=user,ns_id=mode, id_report=report1,language = language) delete_all_annotation(to_del,user,report1,language,type,mode) json_resp_labels = update_annotation_labels(labels_to_save,usecase,user,report1,language,mode) jsonDict = serialize_gt(type, usecase, username, report_id,language,mode) GroundTruthLogFile.objects.create(username=user,ns_id=mode, id_report=report1, language = language, gt_json=jsonDict, gt_type=type,insertion_time=Now()) except (Exception) as error: print(error) print('rolled back') json_response = {'error': 'An error occurred saving the ground_truth ' 'and the labels, the transaction rolledback'} return JsonResponse(json_response) else: return JsonResponse(json_resp_labels) else: if mode1 == 'Human': if not GroundTruthLogFile.objects.filter(gt_type='labels', username=user, ns_id=mode, id_report=report1, language=language).exists(): js = serialize_gt('labels', usecase, username, report1.id_report, language, mode) GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(), username=user, ns_id=mode, id_report=report1, language=language, gt_type='labels') ass = Associate.objects.filter(username=user, id_report=report1, language=language, ns_id=mode).values('label', 'seq_number') for el in ass: lab = AnnotationLabel.objects.get(label=el['label'], seq_number=el['seq_number']) Associate.objects.filter(username=user, ns_id=mode, label=lab, seq_number=lab.seq_number, id_report=report1, language=language).delete() Associate.objects.create(username=user, ns_id=mode, label=lab, seq_number=lab.seq_number, insertion_time=Now(), id_report=report1, language=language) json_response = {'message': 'ok'} else: json_response = {'message': 'no changes detected'} return JsonResponse(json_response) elif mode1 == 'Robot': """ In this section the name space Robot is handled: If the user is in the AUTOMATIC MODE and the labels she inserts are those annotated by the algorithm, this means that she agrees with the annotation of the Robot user. The annotation does not change, only the insertion time is changed.""" try: with transaction.atomic(): # in questa sezione solo se la gt è uguale a prima, l'utente acconsente alla gt della macchina user_robot = User.objects.get(username='Robot_user', ns_id=mode) gt_robot = GroundTruthLogFile.objects.filter(username=user_robot, ns_id=mode, id_report=report1, language=language, gt_type='labels') gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report1, language=language, gt_type='labels') if gt_robot.count() == 1 and not gt.exists(): # if gt_robot[0].insertion_time == gt[0].insertion_time: js = serialize_gt('labels', usecase, username, report1.id_report, language, mode) GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report1, language=language, gt_type='labels').delete() GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(), username=user, ns_id=mode, id_report=report1, language=language, gt_type='labels') ass = Associate.objects.filter(username=user, id_report=report1, language=language, ns_id=mode).values('label', 'seq_number') for el in ass: lab = AnnotationLabel.objects.get(label=el['label'], seq_number=el['seq_number']) Associate.objects.filter(username=user, ns_id=mode, label=lab, seq_number=lab.seq_number, id_report=report1, language=language).delete() Associate.objects.create(username=user, ns_id=mode, label=lab, seq_number=lab.seq_number, insertion_time=Now(), id_report=report1, language=language) except Exception as error: print(error) print('rolled back') json_response = {'error': 'An error occurred updating labels dates'} return JsonResponse(json_response) else: json_response = {'message': 'dates updated'} return JsonResponse(json_response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def label():\n\n if request.method == \"GET\":\n return render_template(\"/label.html\")\n\n else:\n # initialise the variables from the hidden html form input\n type = request.form.get(\"type\")\n url = request.form.get(\"url\")\n thumb = request.form.get(\"thumb\")\n\n # Authorization header to be embedded into the url \n headers = {\n 'Authorization': 'Discogs token=mqjXUBBzjnqrjUkKFIrOPAmlEZsGoDXjkRZgnRIR'\n }\n\n # search the database for label information\n labels = requests.get(\"%s\" % url, headers=headers)\n label = labels.json()\n\n # set variable if user is selecting pagination\n goto = request.form.get(\"goto\")\n\n if goto == None:\n\n # search the database for labels releases\n releases = requests.get(\"%s/releases?per_page=50\" % url, headers=headers)\n release = releases.json()\n\n # retreiving useful data\n data = release[\"releases\"]\n pagination = release[\"pagination\"]\n pages = pagination[\"pages\"]\n page = pagination[\"page\"]\n \n\n return render_template(\"/label.html\", label=label, data=data, labelThumb=thumb, page=page, pages=pages, pagination=pagination, type=type, url=url, thumb=thumb)\n\n else:\n\n # search the database for artists releases goto page\n releases = requests.get(\"%s\" % goto, headers=headers)\n release = releases.json()\n\n # retreiving useful data\n data = release[\"releases\"]\n pagination = release[\"pagination\"]\n pages = pagination[\"pages\"]\n page = pagination[\"page\"]\n \n\n return render_template(\"/label.html\", label=label, data=data, labelThumb=thumb, page=page, pages=pages, pagination=pagination, type=type, url=url, thumb=thumb)", "def labels(self, request, *args, **kwargs):\n http_status = status.HTTP_400_BAD_REQUEST\n # pylint: disable=attribute-defined-outside-init\n self.object = instance = self.get_object()\n\n if request.method == \"POST\":\n add_tags_to_instance(request, instance)\n http_status = status.HTTP_201_CREATED\n\n tags = instance.tags\n label = kwargs.get(\"label\")\n\n if request.method == \"GET\" and label:\n data = [tag[\"name\"] for tag in tags.filter(name=label).values(\"name\")]\n\n elif request.method == \"DELETE\" and label:\n count = tags.count()\n tags.remove(label)\n\n # Accepted, label does not exist hence nothing removed\n http_status = (\n status.HTTP_200_OK\n if count > tags.count()\n else status.HTTP_404_NOT_FOUND\n )\n\n data = list(tags.names())\n else:\n data = list(tags.names())\n\n if request.method == \"GET\":\n http_status = status.HTTP_200_OK\n\n setattr(self, \"etag_data\", data)\n\n return Response(data, status=http_status)", "def post_label():\n label_id = dao.set_label(id=str(uuid.uuid4()),\n name=request.json['name'],\n fields=request.json['fields'])\n\n return jsonify(dao.get_label(label_id))", "def dispatch_request(request):\n if request.method == 'GET' and 'id' in request.GET:\n return render(request, 'engine/annotation.html', {\n 'js_3rdparty': JS_3RDPARTY.get('engine', []),\n 'status_list': [str(i) for i in StatusChoice]\n })\n else:\n return redirect('/dashboard/')", "def get_label(domain, pathtype, method):\n verb = LABELS[method]\n if method == 'POST' or pathtype != 'resource':\n noun = capp.config['DOMAIN'][domain]['item_title']\n article = 'a'\n else:\n noun = domain\n article = 'all'\n return '{0} {1} {2}'.format(verb, article, noun)", "def requestSubmitted(request):", "def render_POST(self, request):", "def post(self, request):\n pass", "def new_label(self, context, payload):\n\n labels = GmailActions.labels(context)['labels']\n label_id = \"\"\n\n for label in labels:\n if label['name'] == payload['name']:\n label_id = label['id']\n break\n\n access_token = util.get_access_token(context['headers'])\n url = util.get_url(context) + f\"labels/{label_id}\"\n response = util.rest(\"GET\", url, access_token)\n\n if response.status_code > 400:\n raise Exception(\"Error \", response.text)\n\n return json.loads(response.text)", "def index():\n if(app.my_config == None):\n app.my_config = set_config()\n cfg = app.my_config\n repos = []\n repos = get_config_repos(cfg)\n app.my_token = get_tkn(cfg)\n app.my_secret = get_secret(cfg)\n\n r = request.method\n\n #GET METHOD\n if r == \"GET\":\n #return get_info(repos)\n return flask.render_template('index.html', repos=repos)\n\n #POST METHOD\n elif r == \"POST\":\n if check_signature(get_secret(cfg), request):\n if get_event(request) == \"ping\":\n return \"ok\"\n elif get_event(request) == \"label\":\n if(app.my_session == None):\n app.my_session = requests.Session()\n app.my_session.headers = {'User-Agent': 'Python'}\n def token_auth(req):\n req.headers['Authorization'] = 'token ' + app.my_token\n return req\n app.my_session.auth = token_auth\n session = app.my_session\n\n repo = get_repo_name(request)\n if(repo in repos):\n action = get_action(request)\n name = get_lname(request)\n color = get_lcolor(request)\n\n if(name != app.last_label or action != app.last_action):\n app.last_label = name\n app.last_action = action\n # CREATED\n if (action == 'created'):\n return create_label(name, color, repos, session, repo)\n # EDITED\n if (action == 'edited'):\n old_name = get_old_name(request)\n return edit_label(old_name, name, color, repos, session, repo)\n # DELETED\n if (action == 'deleted'):\n return delete_label(name, repos, session, repo)\n else:\n return \"OK\"\n else:\n code = 400\n msg = 'BAD REQUEST'\n return msg, code\n else:\n code = 401\n msg = 'UNAUTHORIZED'\n return msg, code", "def run(request):\n try:\n # ensure the user has used a POST request\n if request.method == \"POST\":\n # prepare data for prediction\n # note: we expect an image of size 28x28 here.\n # TODO: add handling of images that are not 28x28, either resize or deny\n try:\n data = np.asarray(Image.open(io.BytesIO(request.get_data(False)))).reshape(-1, 28, 28)\n except UnidentifiedImageError:\n raise ValueError(\n \"The provided image data could not be read. Ensure that you provide a valid image, eg. in jpeg or \"\n \"png format.\"\n )\n\n # do prediction\n prediction_confidences = neural_network.predict(data)\n predicted_label_index = np.argmax(prediction_confidences)\n predicted_label = labels[predicted_label_index]\n confidence = prediction_confidences[0][predicted_label_index]\n\n # return result\n return AMLResponse(\n {\"predicted_label\": predicted_label, \"confidence\": str(confidence)}, status_code=200, json_str=True,\n )\n else:\n raise Exception(\"This service supports POST requests only.\")\n\n except Exception as exception:\n return AMLResponse(\n {\"error\": repr(exception), \"traceback\": traceback.format_exc()}, status_code=500, json_str=True,\n )", "def label_new(request):\n if request.method == 'POST':\n form = NewLabelForm(request.POST)\n\n if form.is_valid():\n label = form.save()\n messages.success(request, 'Label successfully created.')\n return HttpResponseRedirect(reverse('label_main', args=[label.id]))\n else:\n messages.error(request, 'Please correct the errors below.')\n else:\n form = NewLabelForm()\n\n return render_to_response('annotations/label_new.html', {\n 'form': form,\n },\n context_instance=RequestContext(request)\n )", "def on_label(self, payload):\n pass", "def label_list(request):\n\n labels = Label.objects.all().order_by('group__id', 'name')\n\n return render_to_response('annotations/label_list.html', {\n 'labels': labels,\n },\n context_instance=RequestContext(request)\n )", "def create_text_classification_record_controller(self, request):\n try:\n logging.info(\n \"executing create_text_classification_record_controller function\"\n )\n create_text_classification_record_request = request.dict(exclude_none=True)\n\n project_flow_record = self.CRUDProjectFlow.read_by_model_id(\n model_id=create_text_classification_record_request.get(\"model_uri\")\n )\n create_label_studio_task_request = {\n \"data\": {\"text\": request.data},\n \"is_labeled\": False,\n \"project\": project_flow_record.get(\"annotation_project_id\"),\n }\n task_id = self.get_task_id(request=create_label_studio_task_request)\n creat_annotation_url = (\n f\"{self.create_label_studio_task_url}/{task_id}/annotations/\"\n )\n if create_text_classification_record_request.get(\"ground_truth\"):\n create_annotation_request = {\n \"result\": [\n {\n \"value\": {\n \"choices\": [\n create_text_classification_record_request.get(\n \"ground_truth\"\n )\n ]\n },\n \"from_name\": \"sentiment\",\n \"to_name\": \"text\",\n \"type\": \"choices\",\n }\n ],\n \"ground_truth\": True,\n }\n else:\n create_annotation_request = {\n \"result\": [\n {\n \"value\": {\n \"choices\": [\n create_text_classification_record_request.get(\n \"inferred_value\"\n )\n ]\n },\n \"from_name\": \"sentiment\",\n \"to_name\": \"text\",\n \"type\": \"choices\",\n }\n ],\n \"ground_truth\": False,\n }\n annotation_response, status_code = APIInterface.post(\n route=creat_annotation_url,\n data=create_annotation_request,\n headers=self.header,\n )\n if status_code == 201:\n crud_request = self.create_data_monitoring_crud_request(\n request=create_text_classification_record_request,\n task_id=task_id,\n inferred_results=create_annotation_request,\n )\n self.CRUDDataMonitoring.create(**crud_request)\n return {\n \"annotation_id\": annotation_response.get(\"id\"),\n \"task_id\": task_id,\n }\n else:\n raise Exception({\"status\": \"Data creation failed\"})\n except Exception as error:\n logging.error(\n f\"Error in create_inferred_data_record_controller function: {error}\"\n )\n raise error", "def select_ind_sentence(request):\n global results\n if request.method == \"POST\":\n\n return _start_analysis(request)\n else:\n return HttpResponse(\n json.dumps({\"error\": \"error, GET request not supported\"}),\n content_type=\"application/json\"\n )", "def label_main(request, label_id):\n\n label = get_object_or_404(Label, id=label_id)\n\n sources_with_label = Source.objects.filter(labelset__labels=label).order_by('name')\n visible_sources_with_label = [s for s in sources_with_label if s.visible_to_user(request.user)]\n\n # Differentiate between the sources that the user is part of\n # and the other public sources. Sort the source list accordingly, too.\n sources_of_user = Source.get_sources_of_user(request.user)\n\n source_types = []\n for s in visible_sources_with_label:\n if s in sources_of_user:\n source_types.append('mine')\n else:\n source_types.append('public')\n\n visible_sources_with_label = zip(source_types, visible_sources_with_label)\n visible_sources_with_label.sort(key=lambda x: x[0]) # Mine first, then public\n\n # Example patches.\n # TODO: don't hardcode the patch path\n example_annotations = Annotation.objects.filter(label=label, image__source__visibility=Source.VisibilityTypes.PUBLIC).exclude(user=get_robot_user()).order_by('?')[:5]\n patches = [dict(\n annotation=a,\n fullImage=a.image,\n source=a.image.source,\n patchPath=\"data/annotations/\" + str(a.id) + \".jpg\",\n row=a.point.row,\n col=a.point.column,\n pointNum=a.point.point_number,\n )\n for a in example_annotations]\n\n for p in patches:\n generate_patch_if_doesnt_exist(p['patchPath'], p['annotation'])\n\n\n return render_to_response('annotations/label_main.html', {\n 'label': label,\n 'visible_sources_with_label': visible_sources_with_label,\n 'patches': patches,\n },\n context_instance=RequestContext(request)\n )", "def get_labels():\n json_request = request.json # get the json from the server\n keys = sort_keys(json_request.keys()) # sort the keys (i.e. the token ids)\n labels = []\n for k in keys:\n # get the labels that the user input to the UI\n val = (json_request[k]['text'], json_request[k]['value'])\n labels.append(val)\n return labels", "def put_labels():\n dao.delete_all_labels()\n for label in request.json:\n if 'id' not in label or not label['id']:\n label['id'] = str(uuid.uuid4())\n dao.set_label(id=label['id'],\n name=label['name'],\n fields=label['fields'])\n return if_found(dao.get_labels())", "def _apply_label(self, label):\n data = {\n \"name\" : label.title,\n \"description\" : label.desc,\n \"color\" : label.color\n }\n resp = self._post(\n self._base + \"/labels\", data=self._format_data(data))", "async def new_label(event, gh, *args, **kwargs):\n if event.data[\"label\"][\"name\"] == TRIVIAL_LABEL:\n issue_number_found = ISSUE_RE.search(\n event.data[\"pull_request\"][\"title\"])\n if issue_number_found:\n status = create_success_status(issue_number_found)\n else:\n status = TRIVIAL_STATUS\n await _post_status(event, gh, status)", "def index(request):\n if request.method == \"POST\":\n form = ImageForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n\n # get the current instance object to display in template\n img_obj = form.instance\n\n pred = model_predict(img_obj.image.path, model)\n pred_class = decode_predictions(pred, top=1)\n result = str(pred_class[0][0][1])\n\n\n return render(request, 'ImgClassifier/homePg.html',context={\n 'form':form,\n 'img_obj':img_obj,\n 'prediction_label': result\n })\n\n else: # on GET Request\n form = ImageForm()\n return render(request, 'ImgClassifier/homePg.html',context={\n 'form':form,\n })", "def get(self, request):\n pass", "def get(self, request, *args, **kwargs):\n if kwargs.get(\"label\", \"\") not in self.request.session.get(\"objects\", {}):\n return HttpResponseRedirect(\"/create/\")\n\n return super().get(request, *args, **kwargs)", "def post(self):", "def post(self, request):\n return super().post(request)", "def post(self, request):\n return super().post(request)", "def _on_articles_labels(self, evt=None):\n \n # get selected articles\n articles = self._articles_view.GetSelectedArticles()\n if not articles:\n return\n \n # get available labels\n labels = self._library.search(core.Query(\"\", core.Label.NAME))\n \n # set labels\n dlg = LabelsView(self, articles, labels)\n response = dlg.ShowModal()\n dlg.Destroy()\n \n # check response\n if response != wx.ID_OK:\n return\n \n # update library\n for article in articles:\n self._library.update(article)\n \n # refresh collections view\n self._collections_view.UpdateLabelsCollections()\n self._collections_view.UpdateCounts()\n \n # refresh articles view\n self._articles_view.ShowArticles()\n \n # re-select articles\n self._articles_view.SetSelectedArticles(articles)", "def create_ner_record_controller(self, request):\n try:\n logging.info(\"executing create_object_detection_record_controller function\")\n create_ner_record_request = request.dict(exclude_none=True)\n\n project_flow_record = self.CRUDProjectFlow.read_by_model_id(\n model_id=create_ner_record_request.get(\"model_uri\")\n )\n create_label_studio_task_request = {\n \"data\": {\"text\": request.data},\n \"is_labeled\": False,\n \"project\": project_flow_record.get(\"annotation_project_id\"),\n }\n task_id = self.get_task_id(request=create_label_studio_task_request)\n creat_annotation_url = (\n f\"{self.create_label_studio_task_url}/{task_id}/annotations/\"\n )\n if request.ground_truth:\n final_result = [\n result_data.update(\n {\n \"type\": \"labels\",\n \"to_name\": \"text\",\n \"from_name\": \"label\",\n }\n )\n for result_data in request.ground_truth\n ]\n create_annotation_request = {\n \"result\": final_result,\n \"ground_truth\": True,\n }\n else:\n final_result = [\n result_data.update(\n {\n \"type\": \"labels\",\n \"to_name\": \"text\",\n \"from_name\": \"label\",\n }\n )\n for result_data in request.inferred_value\n ]\n create_annotation_request = {\n \"result\": final_result,\n \"ground_truth\": False,\n }\n annotation_response, status_code = APIInterface.post(\n route=creat_annotation_url,\n data=create_annotation_request,\n headers=self.header,\n )\n if status_code == 201:\n crud_request = self.create_data_monitoring_crud_request(\n request=create_ner_record_request,\n task_id=task_id,\n inferred_results=create_annotation_request,\n )\n self.CRUDDataMonitoring.create(**crud_request)\n return {\n \"annotation_id\": annotation_response.get(\"id\"),\n \"task_id\": task_id,\n }\n else:\n raise Exception({\"status\": \"Data creation failed\"})\n except Exception as error:\n logging.error(\n f\"Error in create_inferred_data_record_controller function: {error}\"\n )\n raise error", "def labelset_main(request, source_id):\n\n source = get_object_or_404(Source, id=source_id)\n\n labelset = source.labelset\n if labelset.isEmptyLabelset():\n return HttpResponseRedirect(reverse('labelset_new', args=[source.id]))\n\n labels = labelset.labels.all().order_by('group__id', 'name')\n\n\n return render_to_response('annotations/labelset_main.html', {\n 'source': source,\n 'labelset': labelset,\n 'labels': labels,\n },\n context_instance=RequestContext(request)\n )" ]
[ "0.66144353", "0.63546795", "0.63470924", "0.60510325", "0.5695163", "0.5651894", "0.5647814", "0.5625091", "0.5582534", "0.5574167", "0.5572838", "0.55621165", "0.5510184", "0.5436197", "0.54305923", "0.5367623", "0.5344053", "0.532556", "0.5323372", "0.526705", "0.5223831", "0.5216614", "0.5196634", "0.51948404", "0.51817006", "0.5181182", "0.5181182", "0.5157888", "0.51312757", "0.5124402" ]
0.7058827
0
This view handles the GET AND POST requests to insert, delete, get concepts.
def contains(request, action=None): username = request.session.get('username', False) mode1 = request.session.get('mode', False) mode = NameSpace.objects.get(ns_id=mode1) error_json = {"Error": "No user authenticated"} if (username): response_json = {} if request.method == 'GET': """GET request: it returns a list of concepts the user inserted about that report """ report = request.GET.get('report_id') language = request.GET.get('language', request.session['language']) user_get = request.GET.get('username',username) report1 = Report.objects.get(id_report=report, language = language) auto_required = request.GET.get('ns_id',None) # if auto_required == 'Robot': # mode = NameSpace.objects.get(ns_id=auto_required) if auto_required is not None: mode_1 = NameSpace.objects.get(ns_id=auto_required) else: mode_1 = mode response_json = get_user_gt(user_get,mode_1,report1,language,'concepts') # print('concetti',response_json) return JsonResponse(response_json) elif request.method == 'POST' and action.lower() == 'insert': """ POST request: insert new concepts in the database""" request_body_json = json.loads(request.body) concepts_list = request_body_json['concepts_list'] language = request_body_json['language'] report = request_body_json['report_id'] report1 = Report.objects.get(id_report=report) username = request.session.get('username', False) user1 = User.objects.get(username=username,ns_id=mode) usecase = request.session.get('usecase',False) type = 'concepts' if report is not None and concepts_list is not None: user = username count = 0 already_inserted_list = [] try: with transaction.atomic(): for concept in concepts_list: concept = json.loads(concept) concept_url = concept['concept_url'] semantic_area = concept['semantic_area'] if not check_concept_report_existance(report, concept_url, user,mode, semantic_area,language): # Insert a new record if populate_contains_table(report, concept_url, user,mode, semantic_area,language): count += 1 else: error_json = {"error message": "insert in table 'contains' failed"} return JsonResponse(error_json) else: already_inserted_list.append(concept) jsonDict = serialize_gt(type, usecase, username, report,language,mode) GroundTruthLogFile.objects.create(username=user1, id_report=report1,ns_id=mode, language = language, gt_json=jsonDict, gt_type=type, insertion_time=Now()) except Exception as error: print(error) print('rolled back') if count == len(concepts_list): response_json = {"message": "All concepts inserted successfully"} else: response_json = {"message": "Some concepts have been already inserted: ["+ ", ".join(already_inserted_list)+"]"} else: response_json = {"error": "Missing data"} elif request.method == 'POST' and action.lower() == 'update': """ POST request: update the concepts that already exist in the database, a new ground truth is created if needed.""" request_body_json = json.loads(request.body) concepts_list = request_body_json['concepts_list'] report = request_body_json['report_id'] language = request_body_json['language'] report1 = Report.objects.get(id_report = report,language = language) username = request.session.get('username',False) user1 = User.objects.get(username = username,ns_id=mode) usecase = request.session.get('usecase',False) type = 'concepts' if report is not None and concepts_list is not None: user = username count = 0 rows = Contains.objects.filter(username = user1,ns_id=mode, id_report = report1, language = language) if rows.exists() and len(concepts_list) == 0: if mode1 == 'Human': with transaction.atomic(): json_response=delete_contains_record(report1, language, None,mode, user, None) return JsonResponse(json_response,safe=False) else: # json_response = {'message': 'Robot mode, rows can not be deleted'} print('RESTORE') json_response = restore_robot_annotation(report1,'concepts',user1) return JsonResponse(json_response) elif not rows.exists() and len(concepts_list) == 0: json_response = {'message':'nothing to do'} return JsonResponse(json_response) if len(concepts_list) == 0: json_response = {'message': 'Nothing to do'} return JsonResponse(json_response) update = True if rows.exists(): if rows.count() == len(concepts_list): for concept in concepts_list: concept_url = concept['concept_url'] semantic_area = concept['semantic_area'] concept_model = Concept.objects.get(concept_url = concept_url) concepts = Contains.objects.filter(name=semantic_area, username = user1,ns_id=mode, id_report = report1, language = language, concept_url = concept_model) if concepts.exists(): update = False else: update = True break # Delete previous data for the specified user and report if update == True: try: with transaction.atomic(): js = delete_contains_record(report1,language, None, mode,user, None) # Insert new data for concept in concepts_list: # Insert a new record concept_url = concept['concept_url'] semantic_area = concept['semantic_area'] if populate_contains_table(report, concept_url, user, mode,semantic_area,language): count += 1 else: error_json = {"error message": "insert in table 'contains' failed"} return JsonResponse(error_json) jsonDict = serialize_gt(type, usecase, username, report,language,mode) if GroundTruthLogFile.objects.filter(username=user1, ns_id=mode,id_report=report1,language = language, gt_type=type).exists(): GroundTruthLogFile.objects.filter(username=user1,ns_id=mode, id_report=report1, language=language,gt_type=type).delete() GroundTruthLogFile.objects.create(username=user1,ns_id=mode, id_report=report1, gt_json=jsonDict,language = language, gt_type=type, insertion_time=Now()) except Exception as error: print(error) print('rolled back') if count == len(concepts_list): response_json = {"message": "Update successfull"} else: response_json = {"error": "Update unsuccessfull"} else: try: with transaction.atomic(): if mode1 == 'Human': if not GroundTruthLogFile.objects.filter(gt_type='concepts', username=user, ns_id=mode, id_report=report1, language=language).exists(): js = serialize_gt('concepts', usecase, username, report1.id_report, language, mode) GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(), username=user1, ns_id=mode, id_report=report1, language=language, gt_type='concepts') ass = Contains.objects.filter(username=user1, id_report=report1, language=language, ns_id=mode).values('name', 'concept_url') for el in ass: sem = SemanticArea.objects.get(name=el['name']) concept_u = Concept.objects.get(concept_url=el['concept_url']) Contains.objects.filter(username=user1, id_report=report1, language=language, ns_id=mode, name=sem, concept_url=concept_u).delete() Contains.objects.create(username=user1, ns_id=mode, id_report=report1, language=language, name=sem, concept_url=concept_u, insertion_time=Now()) json_response = {'message': 'no changes detected'} return JsonResponse(json_response) elif mode1 == 'Robot': user_robot = User.objects.get(username='Robot_user', ns_id=mode) gt_robot = GroundTruthLogFile.objects.filter(username=user_robot, ns_id=mode, id_report=report1, language=language, gt_type='concepts') # in questa sezione solo se la gt è uguale a prima, l'utente acconsente alla gt della macchina gt = GroundTruthLogFile.objects.filter(username=user1, ns_id=mode, id_report=report1, language=language, gt_type='concepts') if gt_robot.count() == 1 and not gt.exists(): # if gt_robot[0].insertion_time == gt[0].insertion_time: # js = gt[0].gt_json js = serialize_gt('concepts', usecase, username, report1.id_report, language, mode) GroundTruthLogFile.objects.filter(username=user1, ns_id=mode, id_report=report1, language=language, gt_type='concepts').delete() GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(), username=user1, ns_id=mode, id_report=report1, language=language, gt_type='concepts') ass = Contains.objects.filter(username=user1, id_report=report1, language=language, ns_id=mode).values('name', 'concept_url') for el in ass: sem = SemanticArea.objects.get(name=el['name']) concept_u = Concept.objects.get(concept_url=el['concept_url']) Contains.objects.filter(username=user1, id_report=report1, language=language, ns_id=mode, name=sem, concept_url=concept_u).delete() Contains.objects.create(username=user1, ns_id=mode, id_report=report1, language=language, name=sem, concept_url=concept_u, insertion_time=Now()) except Exception as error: print(error) json_response = {'error': 'An error occurred trying to save your ground truth.'} return JsonResponse(json_response, status=500) else: json_response = {'message': 'dates updated'} return JsonResponse(json_response) else: response_json = {"error": "Missing data"} elif request.method == 'POST' and action.lower() == 'delete': """ POST request: delete the concepts the user associated to a specific report """ request_body_json = json.loads(request.body) report = request_body_json['report_id'] language = request_body_json['language'] username = request.session.get('username', False) user1 = User.objects.get(username=username,ns_id=mode) report1 = Report.objects.get(id_report = report,language = language) with transaction.atomic(): if report is not None and language is not None: if mode1 == 'Human': response_json = delete_contains_record(report, language, None,mode, user1, None) else: print('RESTORE') response_json = restore_robot_annotation(report1, 'concepts', user1) else: response_json = {"Error": "Missing data"} return JsonResponse(response_json) else: return JsonResponse(error_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conc_view(request):\n\n usecase = request.session['usecase']\n mode = request.session['mode']\n auto_required = request.GET.get('ns_id',None)\n jsonDict = {}\n concepts = {}\n notEmpty = False\n jsonDict['concepts'] = []\n if mode == 'Human' or auto_required == 'Human':\n cursor = connection.cursor()\n cursor.execute(\"SELECT DISTINCT b.name FROM belong_to as b inner join concept_has_uc as ch on ch.concept_url = b.concept_url inner join concept as c on c.concept_url = ch.concept_url where ch.name = %s AND annotation_mode in %s\",[str(usecase),('Manual','Manual and Automatic')])\n ar = cursor.fetchall()\n areas = []\n for el in ar:\n areas.append(el[0])\n for area in areas:\n name = area\n concepts[name] = []\n concepts_list_final = get_concepts_by_usecase_area(usecase, name,'Human')\n for c in concepts_list_final:\n if c not in concepts[name]:\n concepts[name].append(c)\n notEmpty = True\n if notEmpty == True:\n jsonDict['concepts'] = concepts\n\n elif mode == 'Robot' or auto_required == 'Robot':\n with transaction.atomic():\n with connection.cursor() as cursor:\n\n areas = ['Diagnosis', 'Test', 'Procedure', 'Anatomical Location']\n for area in areas:\n concepts[area] = get_concepts_by_usecase_area(usecase, area, 'Robot')\n if len(concepts[area]) > 0:\n notEmpty = True\n if notEmpty == True:\n jsonDict['concepts'] = concepts\n print(concepts)\n\n return JsonResponse(jsonDict)", "def PostConcepts(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get(self, request, *args, **kwargs):\n organization_form = organization.forms.OrganizationForm()\n user_form = organization.forms.UserForm()\n # print(pet_form, pet_video_form)\n context = {'organization_form': organization_form,'user_form': user_form}\n context.update(django.core.context_processors.csrf(request))\n return django.shortcuts.render_to_response('organization/organization_insert.html', context)", "def hello_post(request):\n return hello_put(request)", "def retrieve(self, request, vocab, collection, pk=None, format=None):\n pk = \"{}/{}/{}\".format(vocab, collection, pk)\n queryset = Concept.objects.all()\n concept = get_object_or_404(queryset, pk=pk)\n serializer = ConceptSerializer(concept, context={\"request\": request})\n return Response(serializer.data)", "def test_list_concepts_get(self):\n response = self.client.open(\n '/list_concepts',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_iodu_get_concept(self):\n\n # Create a new `Concept` record.\n obj_id, refr = create_concept(dal=self.dal)\n\n self.assertEqual(obj_id, 1)\n\n # Retrieve the new record.\n obj = self.dal.get(Concept, obj_id) # type: Concept\n\n # Assert that the different fields of the record match.\n self.assertEqual(obj.concept_id, obj_id)\n self.assertEqual(obj.ui, refr[\"ui\"])\n self.assertEqual(obj.name, refr[\"name\"])\n self.assertEqual(obj.casn1_name, refr[\"casn1_name\"])\n self.assertEqual(obj.registry_number, refr[\"registry_number\"])\n self.assertEqual(obj.scope_note, refr[\"scope_note\"])\n self.assertEqual(\n obj.translators_english_scope_note,\n refr[\"translators_english_scope_note\"],\n )\n self.assertEqual(\n obj.translators_scope_note,\n refr[\"translators_scope_note\"],\n )", "def PostVocabConcepts(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get(self):\n self.post()", "def get(self):\n self.post()", "def snippet_detail(request, pk):\n try:\n snippet = Quickstart.objects.get(pk=pk)\n except Quickstart.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = QuickstartSerializer(snippet)\n return JsonResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = QuickstartSerializer(snippet, data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data)\n return JsonResponse(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n quickstart.delete()\n return HttpResponse(status=204)", "def list(self, request, vocab, collection, format=None):\n # What we really want is the collection, which contains a list of\n # concepts\n return redirect(\"/collections/{}/{}\".format(vocab, collection))", "def reg_detail(request, pk):\n try:\n snippet = Registration.objects.get(pk=pk)\n except Snippet.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = RegistrationSerializer(snippet)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = RegistrationSerializer(snippet, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n snippet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def PostConceptRelations(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_presence_examode_concepts(request):\n\n json_resp = {}\n json_resp['concepts'] = get_presence_exa_concepts()\n json_resp['labels'] = get_presence_exa_labels()\n # print(json_resp)\n return JsonResponse(json_resp)", "def get_detail(self, request, **kwargs):\n\t\tself.method_check(request, allowed=['get'])\n\t\tdata = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))\n\t\tdoc_node = Document.objects.get(CTS=data.get(\"CTS\"))\t\n\t\treturn self.create_response(request, data)", "def post(self, request, coll_id):\n # Note: in many cases, this function redirects to a URI that displays a form\n # to gather further details of values to update. Values returned by\n # POST to this view are then passed as URI segments in the GET request\n # that renders the form. Maybe there's an easier way than all this \n # URI-wrangling?\n redirect_uri = None\n http_response = None\n continuation_next, continuation_here = self.continuation_urls(\n request.POST,\n None # self.view_uri(\"AnnalistSiteView\")\n )\n if \"close\" in request.POST:\n redirect_uri = continuation_next.get('continuation_url', self.view_uri(\"AnnalistSiteView\"))\n # Check \"config\" authorization\n viewinfo = self.collection_view_setup(coll_id, \"config\")\n if viewinfo.http_response:\n return viewinfo.http_response\n # Record types\n type_id = request.POST.get('typelist', None)\n if \"type_new\" in request.POST:\n redirect_uri = self.item_new_uri(\n coll_id, \"_type\", \"Type_view\", \n continuation_here\n )\n if \"type_copy\" in request.POST:\n redirect_uri = self.item_copy_uri(\n coll_id, \"_type\", \"Type_view\", type_id, \n message.NO_TYPE_FOR_COPY, \n continuation_here, continuation_next\n )\n if \"type_edit\" in request.POST:\n redirect_uri = self.item_edit_uri(\n coll_id, \"_type\", \"Type_view\", type_id, \n message.NO_TYPE_FOR_COPY, \n continuation_here, continuation_next\n )\n if \"type_delete\" in request.POST:\n redirect_uri, http_response = self.item_delete_response(\n coll_id, type_id, \n message.NO_TYPE_FOR_DELETE, \n message.REMOVE_RECORD_TYPE, \n \"AnnalistRecordTypeDeleteView\",\n continuation_next)\n # List views\n list_id = request.POST.get('listlist', None)\n if \"list_new\" in request.POST:\n redirect_uri = self.item_new_uri(\n coll_id, \"_list\", \"List_view\", \n continuation_here\n )\n if \"list_copy\" in request.POST:\n redirect_uri = self.item_copy_uri(\n coll_id, \"_list\", \"List_view\", list_id, \n message.NO_LIST_FOR_COPY, \n continuation_here, continuation_next\n )\n if \"list_edit\" in request.POST:\n redirect_uri = self.item_edit_uri(\n coll_id, \"_list\", \"List_view\", list_id, \n message.NO_LIST_FOR_COPY, \n continuation_here, continuation_next\n )\n if \"list_delete\" in request.POST:\n redirect_uri, http_response = self.item_delete_response(\n coll_id, list_id, \n message.NO_LIST_FOR_DELETE, \n message.REMOVE_RECORD_LIST, \n \"AnnalistRecordListDeleteView\",\n continuation_next)\n # Record views\n view_id = request.POST.get('viewlist', None)\n if \"view_new\" in request.POST:\n redirect_uri = self.item_new_uri(\n coll_id, \"_view\", \"View_view\", \n continuation_here\n )\n if \"view_copy\" in request.POST:\n redirect_uri = self.item_copy_uri(\n coll_id, \"_view\", \"View_view\", view_id, \n message.NO_VIEW_FOR_COPY, \n continuation_here, continuation_next\n )\n if \"view_edit\" in request.POST:\n redirect_uri = self.item_edit_uri(\n coll_id, \"_view\", \"View_view\", view_id, \n message.NO_VIEW_FOR_COPY, \n continuation_here, continuation_next\n )\n if \"view_delete\" in request.POST:\n redirect_uri, http_response = self.item_delete_response(\n coll_id, view_id, \n message.NO_VIEW_FOR_DELETE, \n message.REMOVE_RECORD_VIEW, \n \"AnnalistRecordViewDeleteView\",\n continuation_next)\n # Invoke selected view and/or render status response\n if redirect_uri:\n http_response = http_response or HttpResponseRedirect(redirect_uri)\n if http_response:\n return http_response\n raise Annalist_Error(request.POST, \"Unexpected values in POST to \"+self.get_request_path())", "def snippet_detail_apiview(request, pk, format=None):\n try:\n snippet = Snippet.objects.get(pk=pk)\n except Snippet.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = SnippetSerializer(snippet, context={'request': request})\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = SnippetSerializer(snippet, data=request.data, context={'request': request})\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n snippet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def insert_link(request,action=None):\n\n username = request.session['username']\n mode1 = request.session['mode']\n mode = NameSpace.objects.get(ns_id=mode1)\n # language = request.GET.get('language',request.session['language'])\n usecase = request.session['usecase']\n auto_required = request.GET.get('ns_id', None)\n type = 'concept-mention'\n\n if request.method == 'GET' and action.lower() == 'linked':\n\n \"\"\"GET request: it returns the mention-concept associations found by the user for that report.\"\"\"\n\n try:\n report_id = request.GET.get('report_id')\n language = request.GET.get('language', request.session['language'])\n user_get = request.GET.get('username',username)\n report1 = Report.objects.get(id_report=report_id,language = language )\n # if auto_required == 'Robot':\n # mode = NameSpace.objects.get(ns_id=auto_required)\n if auto_required is not None:\n mode_1 = NameSpace.objects.get(ns_id=auto_required)\n else:\n mode_1 = mode\n json_dict = get_user_gt(user_get,mode_1,report1,language,'concept-mention')\n #print(json_dict)\n return JsonResponse(json_dict)\n except Exception as error:\n print(error)\n json_response = {'error': 'An error occurred during the GET request.'}\n return JsonResponse(json_response, status=500)\n\n if request.method == 'GET' and action.lower() == 'mentions':\n\n \"\"\"This GET request returns the list fo mentions associated to the report \"\"\"\n\n report_id = request.GET.get('report_id')\n user_get = request.GET.get('username', username)\n language = request.GET.get('language', request.session['language'])\n if auto_required is not None:\n mode_1 = NameSpace.objects.get(ns_id=auto_required)\n else:\n mode_1 = mode\n report1 = Report.objects.get(id_report=report_id,language = language)\n try:\n a = Annotate.objects.filter(username=user_get,ns_id=mode_1, id_report=report1, language=language).values('start', 'stop')\n json_dict = {}\n json_dict['mentions1'] = []\n for el in a:\n mention_text = Mention.objects.get(start=int(el['start']), stop=int(el['stop']), id_report=report1,\n language=language)\n\n json_val = {}\n json_val['start'] = (el['start'])\n json_val['stop'] = (el['stop'])\n json_val['mention_text'] = mention_text.mention_text\n json_dict['mentions1'].append(json_val)\n return JsonResponse(json_dict)\n\n except Exception as error:\n print(error)\n json_response = {'error': 'Sorry, an erorr occurred during the GET request.'}\n return JsonResponse(json_response, status=500)\n\n elif request.method == 'POST' and action.lower() == 'insert_mention':\n\n \"\"\" POST request: insertion of a new mention \"\"\"\n\n json_response = {'message': 'Your mentions were correctly inserted'}\n user = User.objects.get(username=username,ns_id=mode)\n request_body_json = json.loads(request.body)\n report_id = request_body_json['report_id']\n language = request_body_json['language']\n mentions = request_body_json['mentions']\n report1 = Report.objects.get(id_report=report_id, language=language)\n if user is None or report1 is None:\n json_response = {'error': 'An error occurred getting the parameters.'}\n return JsonResponse(json_response)\n\n if len(mentions) > 0:\n try:\n with transaction.atomic():\n mention = mentions[0]\n m = Mention.objects.filter(start = mention['start'], stop = mention['stop'], id_report = report1, language = language)\n if not Mention.objects.filter(start = mention['start'], stop = mention['stop'], id_report = report1, language = language).exists():\n Mention.objects.create(start = mention['start'], stop = mention['stop'],mention_text = mention['mention_text'], id_report = report1, language = language)\n menti = Mention.objects.get(start = mention['start'], stop = mention['stop'], id_report = report1, language = language)\n Annotate.objects.create(username = user,ns_id=mode, insertion_time = Now(),start = menti, stop = menti.stop, id_report = report1, language = language)\n type = 'mentions'\n if GroundTruthLogFile.objects.filter(username=user, ns_id=mode,id_report=report1, language=language,\n gt_type=type).exists():\n GroundTruthLogFile.objects.filter(username=user, ns_id=mode,id_report=report1, language=language,\n gt_type=type).delete()\n\n jsonDict = serialize_gt(type, usecase, username, report_id, language,mode)\n GroundTruthLogFile.objects.create(username=user,ns_id=mode, id_report=report1, language=language,\n gt_json=jsonDict,\n gt_type=type, insertion_time=Now())\n\n except Exception as error:\n print(error)\n json_response = {'error': 'An error occurred trying to save your ground truth.'}\n return JsonResponse(json_response, status=500)\n else:\n return JsonResponse(json_response)\n\n else:\n json_response = {'message': 'nothing to save'}\n return JsonResponse(json_response)\n\n elif request.method == 'POST' and action.lower() == 'delete':\n\n \"\"\" POST request: delete the mention-concept associations the user found for that report\"\"\"\n\n request_body_json = json.loads(request.body)\n report_id = request_body_json['report_id']\n language = request_body_json['language']\n user = User.objects.get(username=username,ns_id=mode)\n report1 = Report.objects.get(id_report=report_id,language = language)\n if user is None or report1 is None:\n json_response = {'error': 'An error occurred getting parameters.'}\n return json_response\n\n to_del = Linked.objects.filter(username=user,ns_id=mode, id_report=report1.id_report,language = language)\n if len(to_del) == 0:\n json_response = {'message': 'Nothing to delete.'}\n return JsonResponse(json_response)\n if mode1 == 'Human':\n try:\n with transaction.atomic():\n json_response = delete_all_associations(user, report1, language, type,usecase,mode)\n except Exception as error:\n print(error)\n json_response = {'error': 'Sorry, an erorr occurred, rolled back.'}\n return JsonResponse(json_response,status=500)\n else:\n return JsonResponse(json_response)\n else:\n json_response = restore_robot_annotation(report1, 'concept-mention', user)\n # json_response = {'message': 'Nothing to do'}\n return JsonResponse(json_response)\n\n\n elif request.method == 'POST' and action.lower() == 'insert':\n\n \"\"\" POST request: insert the associations in the db \"\"\"\n\n json_response = {'message': 'Associations and Ground truth saved.'}\n request_body_json = json.loads(request.body)\n report_id = request_body_json['report_id']\n language = request_body_json['language']\n user = User.objects.get(username=username,ns_id=mode)\n report1 = Report.objects.get(id_report=report_id,language = language)\n if user is None or report1 is None:\n json_response = {'error': 'An error occurred getting the parameters.'}\n return JsonResponse(json_response)\n\n concepts = request_body_json['linked']\n # In this case the user manually deletes all the associations (NOT WITH CLEAR BUTTON) and saves.\n if len(concepts) == 0 and mode1 == 'Human':\n if Linked.objects.filter(username=user,ns_id=mode, id_report=report1, language=language).exists():\n try:\n with transaction.atomic():\n json_response = delete_all_associations(user, report1,language, type,usecase,mode)\n return JsonResponse(json_response)\n except Exception as error:\n print(error)\n json_response = {'error': 'Sorry, an erorr occurred, rolled back.'}\n return JsonResponse(json_response, status=500)\n\n else:\n json_response ={'message':'Nothing to do'}\n return JsonResponse(json_response)\n if len(concepts) == 0 and mode1 == 'Robot':\n \"\"\"If the user sends 0 associations the robot's ground truth is restored\"\"\"\n to_del = Linked.objects.filter(username = user, ns_id = mode, id_report = report1,language = language)\n json_response = restore_robot_annotation(report1, 'concept-mention',user)\n # json_response = {'message': 'Nothing to do'}\n return JsonResponse(json_response)\n\n update = True\n existing_rows = Linked.objects.filter(username=user,ns_id=mode, id_report=report1, language=language)\n if existing_rows.exists():\n if existing_rows.count() == len(concepts):\n\n for concept in concepts:\n conc = concept\n ment = Mention.objects.filter(start=conc['start'], stop=conc['stop'],\n mention_text=conc['mention_text'],\n id_report=report1, language=language)\n\n if ment.exists():\n ment = ment.first()\n concept_model = Concept.objects.get(concept_url=conc['concept_url'])\n area = SemanticArea.objects.get(name=conc['semantic_area'])\n anno = Linked.objects.filter(username=user,ns_id = mode, id_report=report1, language=language,\n start=ment, stop=ment.stop,concept_url = concept_model,name=area)\n if anno.exists():\n update = False\n else:\n update = True\n break\n else: # update if at least one association is different\n update = True\n break\n if update == True:\n try:\n with transaction.atomic():\n\n json_response = update_associations(concepts, user, report1,language,usecase,mode)\n if GroundTruthLogFile.objects.filter(username=user,ns_id=mode, language = language,id_report=report1, gt_type='concept-mention').exists():\n obj = GroundTruthLogFile.objects.filter(username=user, ns_id = mode,language = language,id_report=report1, gt_type='concept-mention')\n obj.delete()\n jsonDict = serialize_gt(type, usecase, username, report_id,language,mode)\n GroundTruthLogFile.objects.create(username=user,ns_id=mode, language = language,id_report=report1, gt_json=jsonDict, gt_type=type,\n insertion_time=Now())\n return JsonResponse(json_response)\n\n except Exception as error:\n print(error)\n json_response = {'error': 'An error occurred trying to save your ground truth.'}\n return JsonResponse(json_response,status=500)\n else:\n try:\n with transaction.atomic():\n if mode1 == 'Human':\n if not GroundTruthLogFile.objects.filter(gt_type='concept-mention', username=user, ns_id=mode,\n id_report=report1, language=language).exists():\n js = serialize_gt('concept-mention', usecase, username, report1.id_report, language, mode)\n\n GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(), username=user,\n ns_id=mode,\n id_report=report1, language=language,\n gt_type='concept-mention')\n ass = Linked.objects.filter(username=user, id_report=report1, language=language,\n ns_id=mode).values('start', 'stop', 'name', 'concept_url')\n for el in ass:\n men_cur = Mention.objects.get(id_report=report1, language=language, start=el['start'],\n stop=el['stop'])\n sem = SemanticArea.objects.get(name=el['name'])\n concept_u = Concept.objects.get(concept_url=el['concept_url'])\n Linked.objects.filter(username=user, id_report=report1, language=language, ns_id=mode,\n name=sem, start=men_cur, stop=el['stop'],\n concept_url=concept_u).delete()\n Linked.objects.create(username=user, ns_id=mode, id_report=report1, language=language,\n name=sem, stop=el['stop'], start=men_cur, concept_url=concept_u,\n insertion_time=Now())\n\n json_response = {'message': 'no changes detected'}\n return JsonResponse(json_response)\n elif mode1 == 'Robot':\n user_robot = User.objects.get(username='Robot_user', ns_id=mode)\n gt_robot = GroundTruthLogFile.objects.filter(username=user_robot, ns_id=mode, id_report=report1,\n language=language,\n gt_type='concept-mention')\n # in questa sezione solo se la gt è uguale a prima, l'utente acconsente alla gt della macchina\n gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report1,\n language=language,\n gt_type='concept-mention')\n if gt_robot.count() == 1 and not gt.exists():\n # if gt_robot[0].insertion_time == gt[0].insertion_time:\n js = serialize_gt('concept-mention', usecase, username, report1.id_report, language, mode)\n\n # js = gt[0].gt_json\n GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report1,\n language=language,\n gt_type='concept-mention').delete()\n GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(), username=user,\n ns_id=mode, id_report=report1, language=language,\n gt_type='concept-mention')\n ass = Linked.objects.filter(username=user, id_report=report1, language=language,\n ns_id=mode).values('start', 'stop', 'name', 'concept_url')\n for el in ass:\n men_cur = Mention.objects.get(id_report=report1, language=language, start=el['start'],\n stop=el['stop'])\n sem = SemanticArea.objects.get(name=el['name'])\n concept_u = Concept.objects.get(concept_url=el['concept_url'])\n Linked.objects.filter(username=user, id_report=report1, language=language, ns_id=mode,\n name=sem, start=men_cur, stop=el['stop'],\n concept_url=concept_u).delete()\n Linked.objects.create(username=user, ns_id=mode, id_report=report1, language=language,\n name=sem, stop=el['stop'], start=men_cur, concept_url=concept_u,\n insertion_time=Now())\n\n except Exception as error:\n print(error)\n json_response = {'error': 'An error occurred trying to save your ground truth.'}\n return JsonResponse(json_response, status=500)\n else:\n json_response = {'message': 'dates updated'}\n return JsonResponse(json_response)", "def POST(self):\n data=web.input(booklists={})\n posts=model.operate(data)\n return render.view(posts,data.table,titles[data.table],session.id)", "def snippet_detail_csrf(request, pk, format=None):\n try:\n snippet = Snippet.objects.get(pk=pk)\n except Snippet.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = SnippetSerializer(snippet, context={'request': request})\n return JsonResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = SnippetSerializer(snippet, data=data, context={'request': request})\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n snippet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def post(self, request):\n if not request.user.is_authenticated():\n return redirect(\"todo\")\n\n # _method contains the true verb the form want to use, if it's not POST.\n method = request.POST.get(\"_method\", \"\")\n if not method:\n return self._create_task(request)\n elif method == \"PUT\":\n completed = request.POST.get(\"completed\", \"\")\n title = request.POST.get(\"title\", \"\")\n description = request.POST.get(\"description\", \"\")\n if completed and not (title or description):\n return self._complete_task(request)\n else:\n return self._edit_task(request)\n elif method == \"DELETE\":\n return self._delete_task(request)\n\n return self._no_valid_action()", "def post(self, request):\n pass", "def snippet_list_apiview(request, format=None):\n if request.method == 'GET':\n snippets = Snippet.objects.all()\n serializer = SnippetSerializer(snippets, many=True, context={'request': request})\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = SnippetSerializer(data=request.data, context={'request': request})\n if serializer.is_valid():\n serializer.save(owner=request.user)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def view_put():\n\n return jsonify(\n get_dict(\"url\", \"args\", \"form\", \"data\", \"origin\", \"headers\", \"files\", \"json\")\n )", "def snippet_detail(request, pk):\n try:\n snippet = Snippet.objects.get(pk=pk)\n except Snippet.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = SnippetSerializer(snippet)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = SnippetSerializer(snippet, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n snippet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def snippet_detail(request, pk):\n try:\n snippet = Snippet.objects.get(pk=pk)\n except Snippet.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = SnippetSerializer(snippet)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = SnippetSerializer(snippet, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n snippet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def post(self, request, *args, **kwargs):\n return self.get(request, *args, **kwargs)", "def todo_detail(request, pk):\n try:\n todo = Todo.objects.get(pk=pk) # 특정 snippet 가져오기\n except Todo.DoesNotExist: # 일치하는 snippet이 없을 경우\n return Response(status=status.HTTP_404_NOT_FOUND)\n \n if request.method == 'GET':\n serializer = TodoSerializer(todo)\n return Response(serializer.data)\n \n elif request.method == 'PUT':\n serializer = TodoSerializer(todo, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n todo.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def api_question(request, question_id: int):\n try:\n question = Question.objects.get(qid=question_id)\n except Question.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = QuestionSerializer(question)\n return JsonResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = QuestionSerializer(question, data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data)\n return JsonResponse(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n question.delete()\n return HttpResponse(status=204)" ]
[ "0.61219996", "0.59057087", "0.56041396", "0.5594849", "0.55910647", "0.5548109", "0.5538304", "0.55363566", "0.551329", "0.551329", "0.5454264", "0.5444275", "0.5436636", "0.5426927", "0.54256684", "0.5419429", "0.5403076", "0.53987503", "0.53939396", "0.5388849", "0.5387255", "0.5386335", "0.5373872", "0.5360398", "0.5352483", "0.53450227", "0.53450227", "0.53373826", "0.5331466", "0.5331055" ]
0.60170645
1
This view returns the list of reports associated to a single use_case, institute and language
def get_reports(request): inst = request.GET.get('institute',None) use = request.GET.get('usec',None) print(use) lang = request.GET.get('lang',None) batch = request.GET.get('batch',None) all = request.GET.get('all',None) actual_report = request.GET.get('actual_report',None) if all == 'all': # All the reports are returned independently of the usecase, the language or institute. use_obj = UseCase.objects.get(name = use) reps = Report.objects.filter(institute = inst,name = use_obj,language = lang).values('id_report','report_json','language') json_resp = {} json_resp['report'] = [] for report in reps: json_rep = {} json_rep['id_report'] = report['id_report'] json_rep['language'] = report['language'] json_rep['report_json'] = report['report_json'] json_resp['report'].append(json_rep) return JsonResponse(json_resp) if(inst != None and use != None and lang != None and batch != None): """ It is used in the options modal: if the reuqired combination of institute, language and usecase has 0 reports associated, a message is returned. In this case this view returns the number of reports associated to a specific configuration required """ rep = Report.objects.filter(institute = inst, name = use, language = lang, batch = batch) json_count = {'count':len(rep)} return JsonResponse(json_count) usecase = request.session.get('usecase',None) mode1 = request.session.get('mode',None) mode = NameSpace.objects.get(ns_id=mode1) language = request.session.get('language',None) institute = request.session.get('institute',None) username = request.session['username'] batch = request.session['batch'] token = request.GET.get('configure',None) # This parameter is set when jsonError = {'error':'something wrong with params!'} if usecase is not None and language is not None and institute is not None and batch is not None: # Get the reports associated to the usecase, language and institute of the SESSION reports1 = Report.objects.filter(name = usecase, language = language, institute = institute,batch=batch) if mode1 == 'Robot': # gts_r = GroundTruthLogFile.objects.filter(language = language,ns_id=mode).values('id_report') # gts_r1 = GroundTruthLogFile.objects.filter(language=language, ns_id=mode).order_by( # 'id_report').distinct('id_report').values('id_report') # ids1 = [el['id_report'] for el in gts_r1] # print(len(ids1)) gts_r1 = GroundTruthLogFile.objects.filter(id_report__in = reports1,language = language,ns_id=mode).order_by('id_report').distinct('id_report').values('id_report') ids = [el['id_report'] for el in gts_r1] # print(len(ids)) # print(ids == ids1) # for el in gts_r1: # # if el['id_report'] not in ids and Report.objects.filter(language = language, id_report = el['id_report'], batch = batch).exists(): # ids.append(el['id_report']) reports1 = Report.objects.filter(id_report__in=ids,name = usecase, language = language, institute = institute,batch = batch) json_resp = {} json_resp['report'] = [] if reports1.exists(): reports = reports1.values('id_report','report_json','language') for report in reports: json_rep = {} json_rep['id_report'] = report['id_report'] json_rep['language'] = report['language'] json_rep['report_json'] = report['report_json'] json_resp['report'].append(json_rep) json_resp['report'].sort(key=lambda json: json['id_report'], reverse=False) # Reports are sorted by ID # json_resp['report'].sort(key=lambda json: json['report_json']['report_id'], reverse=False) # Reports are sorted by ID json_resp['index'] = 0 if token is not None: # Get the last ground truth given the session parameters. gt = get_last_groundtruth(username, usecase, language, institute,mode,batch) else: # Get the last ground truth of the user. gt = get_last_groundtruth(username,None, None, None,mode,batch) if gt is not None: # The index is updated and it characterizes the first report of the list shown to the user. id_report = gt['id_report'] use = gt['use_case'] lang = gt['language'] institute = gt['institute'] report_json = Report.objects.get(id_report = id_report, name = use, language = lang, institute = institute) rep_json = report_json.report_json index = json_resp['report'].index({'id_report':id_report,'language':lang,'report_json':rep_json}) json_resp['index'] = int(index) if actual_report is not None: index = json_resp['report'].index(actual_report) json_resp['index'] = int(index) return JsonResponse(json_resp) else: return JsonResponse(jsonError,status=500)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data(request):\n\n json_resp = {}\n # reports = Report.objects.filter(name = UseCase.objects.get(name=request.session['usecase']),institute = request.session['institute'],language = request.session['language'])\n\n json_resp['reports'] = []\n institute = request.GET.get('institute',request.session['institute'])\n usecase = request.GET.get('usecase',request.session['usecase'])\n print(usecase)\n language = request.GET.get('language',request.session['language'])\n ns_human = NameSpace.objects.get(ns_id='Human')\n ns_robot = NameSpace.objects.get(ns_id='Robot')\n user_robot = User.objects.get(username='Robot_user', ns_id=ns_robot)\n # usec = UseCase.objects.get(name = usecase)\n # reports = Report.objects.filter(name = usec,institute = institute, language = language).values('id_report')\n # gt_report = GroundTruthLogFile.objects.filter(language = language).exclude(username = user_robot,id_report__in=reports).order_by('id_report').distinct('id_report')\n cursor = connection.cursor()\n cursor.execute(\"SELECT r.id_report,r.language,r.report_json,r.name,r.institute,r.batch,COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND r.institute = %s AND r.language = %s AND g.username != %s GROUP BY (r.id_report,r.language,r.report_json,r.name,r.institute,r.batch)\",[usecase,institute,language,'Robot_user'])\n gt_report_ids = []\n indice = 0\n st = time.time()\n for el in cursor.fetchall():\n\n # report = Report.objects.get(language = language, id_report = el.id_report_id)\n gt_report_ids.append(el[0])\n # print(str(indice))\n indice +=1\n # report = Report.objects.get(id_report=el.id_report, language=el.language)\n # language = report.language\n\n # user_robot = User.objects.get(username = 'Robot_user',ns_id = ns_robot)\n gt_human = 1\n gt_robot = 0\n\n rep = json.loads(el[2])\n new_rep = {}\n for key in rep.keys():\n nkey = key+ '_0'\n new_rep[nkey] = rep[key]\n\n total = el[6]\n\n new_rep['usecase'] = usecase\n new_rep['id_report_not_hashed'] = rep.get('report_id',el[0])\n new_rep['id_report'] = el[0]\n new_rep['institute'] = institute\n new_rep['language'] = language\n new_rep['batch'] = el[5]\n\n json_resp['reports'].append({'total':total, 'report':new_rep,'id_report':el[0], 'language':language})\n\n usec = UseCase.objects.get(name = usecase)\n reports = Report.objects.filter(institute = institute,language = language,name = usec).exclude(id_report__in=gt_report_ids)\n # print(reports.count())\n indice = 0\n st = time.time()\n for el in reports:\n report = el\n # print(str(indice))\n indice += 1\n # report = Report.objects.get(id_report=el.id_report, language=el.language)\n # language = report.language\n\n # user_robot = User.objects.get(username = 'Robot_user',ns_id = ns_robot)\n gt_human = 0\n gt_robot = 0\n\n rep = report.report_json\n new_rep = {}\n for key in rep.keys():\n nkey = key + '_0'\n new_rep[nkey] = rep[key]\n\n total = gt_human + gt_robot\n\n new_rep['usecase'] = report.name_id\n new_rep['id_report_not_hashed'] = rep.get('report_id', report.id_report)\n new_rep['id_report'] = report.id_report\n new_rep['institute'] = report.institute\n new_rep['language'] = report.language\n new_rep['batch'] = report.batch\n\n json_resp['reports'].append(\n {'total': total, 'report': new_rep, 'id_report': report.id_report, 'language': report.language})\n # print('elaboro1',str(end1-st1))\n tot = time.time()\n print('totale',str(tot-st))\n\n return JsonResponse(json_resp,safe=False)", "def usecases(request):\n\n context = {\n\n }\n\n return render(request, 'hydraviewer/usecases.html', context)", "def report_missing_auto(request):\n\n usecases = UseCase.objects.all()\n json_resp = {}\n languages = ['english', 'English']\n for el in usecases:\n use = el.name\n batches = []\n batches.append('all')\n if use.lower() in ['colon','lung','uterine cervix']:\n report = Report.objects.filter(name=el,language__in=languages).exclude(institute = 'PUBMED')\n count_rep = report.count()\n for rp in report:\n if rp.batch not in batches:\n batches.append(rp.batch)\n # print(el)\n # print(count_rep)\n\n if count_rep > 0:\n json_resp[use] = {}\n for batch in batches:\n batch = str(batch)\n json_resp[use][batch] = {}\n if batch == 'all':\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n else:\n report_count = Report.objects.filter(name=el,batch = batch,language__in=languages).exclude(institute = 'PUBMED').count()\n json_resp[use][batch]['tot'] = report_count\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and batch = %s and r.language in %s;\",\n [str(use), 'Robot_user', 'labels', 'PUBMED',batch, tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n # print(json_resp)\n return JsonResponse(json_resp)", "def get(self, request, format=None):\n param_report = self.request.query_params.get('report', None)\n start_date = self.request.query_params.get('start_date', '')\n end_date = self.request.query_params.get('end_date', '')\n detailed_report = self.request.query_params.get('detailed_report', 'false')\n\n if start_date == '':\n start_date = datetime.date.today().strftime(\"%Y-%m-%d 16:00:00\")\n else:\n start_date = start_date.replace(\"T\", \" \", 1)\n if end_date == '':\n end_date = datetime.date.today().strftime(\"%Y-%m-%d 16:00:00\")\n else:\n end_date = end_date.replace(\"T\", \" \", 1)\n\n if param_report is None or param_report == \"\":\n return Response(\"No report specified\", status=status.HTTP_400_BAD_REQUEST)\n\n table_html = None\n table_title = None\n table_subtitle = \"\"\"%s - %s\"\"\" % (start_date, end_date)\n\n # if param_report == \"police_division_summary_report\":\n # table_html = get_police_division_summary()\n # table_title = \"Police Division Summary Report\"\n\n if param_report == \"category_wise_summary_report\":\n table_html = get_category_summary(start_date, end_date, detailed_report)\n table_title = \"No. of Incidents by Category\"\n\n elif param_report == \"mode_wise_summary_report\":\n table_html = get_mode_summary(start_date, end_date, detailed_report)\n table_title = \"No. of Incidents by Mode\"\n\n elif param_report == \"district_wise_summary_report\":\n table_html = get_district_summary(start_date, end_date, detailed_report)\n table_title = \"No. of Incidents by District\"\n\n elif param_report == \"severity_wise_summary_report\":\n table_html = get_severity_summary(start_date, end_date, detailed_report)\n table_title = \"No. of Incidents by Severity\"\n\n elif param_report == \"subcategory_wise_summary_report\":\n table_html = get_subcategory_summary(start_date, end_date, detailed_report)\n table_title = \"No. of Incidents by Subcategory\"\n\n elif param_report == \"status_wise_summary_report\":\n table_html = get_status_summary(start_date, end_date, detailed_report)\n table_title = \"No. of Incidents by Status\"\n\n if table_html is None:\n return Response(\"Report not found\", status=status.HTTP_400_BAD_REQUEST)\n\n table_html = apply_style(\n table_html.replace(\".0\", \"\", -1).replace(\"(Total No. of Incidents)\",\n \"<strong>(Total No. of Incidents)</strong>\", 1).replace(\n \"(Unassigned)\", \"<strong>(Unassigned)</strong>\", 1), table_title, table_subtitle)\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"Report.pdf\"'\n pisa.CreatePDF(table_html, dest=response)\n\n return response", "def training_report_view(request, application_slug):\n return training_report(request, application_slug, attach=False)", "def show_campaigns(request, utm_campaign, **kwargs):\n \n err_msg = ''\n try:\n err_msg = str(kwargs['kwargs']['err_msg'])\n except:\n pass\n \n test_type_override = ''\n try:\n test_type_override = MySQLdb._mysql.escape_string(request.POST['test_type_override'])\n \n if test_type_override == 'Banner':\n test_type_var = FDH._TESTTYPE_BANNER_\n elif test_type_override == 'Landing Page':\n test_type_var = FDH._TESTTYPE_LP_\n elif test_type_override == 'Banner and LP':\n test_type_var = FDH._TESTTYPE_BANNER_LP_\n \n except:\n test_type_var = ''\n pass\n \n try:\n \"\"\" Find the earliest and latest page views for a given campaign \"\"\"\n lptl = DL.LandingPageTableLoader()\n ccrml = DL.CiviCRMLoader()\n \n start_time = ccrml.get_earliest_donation(utm_campaign)\n end_time = ccrml.get_latest_donation(utm_campaign)\n \n one_step = lptl.is_one_step(start_time, end_time, utm_campaign) \n \n if not(one_step): \n start_time = lptl.get_earliest_campaign_view(utm_campaign)\n end_time = lptl.get_latest_campaign_view(utm_campaign) \n\n interval = 1\n \n \"\"\" Create reporting object to retrieve campaign data and write plots to image repo on disk \"\"\"\n ir = DR.IntervalReporting(was_run=False, use_labels=False, font_size=20, plot_type='line', query_type='campaign', file_path=projSet.__web_home__ + 'campaigns/static/images/')\n \n \"\"\" Produce analysis on the campaign view data \"\"\" \n ir.run(start_time, end_time, interval, 'views', utm_campaign, {}, one_step=one_step)\n \n \"\"\" \n ESTIMATE THE START AND END TIME OF THE CAMPAIGN\n ===============================================\n \n Search for the first instance when more than 10 views are observed over a sampling period\n \"\"\"\n \n col_names = ir._data_loader_.get_column_names()\n \n views_index = col_names.index('views')\n ts_index = col_names.index('ts')\n \n row_list = list(ir._data_loader_._results_) # copy the query results\n for row in row_list:\n if row[views_index] > 100:\n start_time_est = row[ts_index]\n break\n row_list.reverse()\n for row in row_list:\n if row[views_index] > 100:\n end_time_est = row[ts_index]\n break\n \n \n \"\"\"\n BUILD THE VISUALIZATION FOR THE TEST VIEWS OF THIS CAMAPAIGN\n ============================================================ \n \"\"\"\n \n \"\"\" Read the test name \"\"\"\n ttl = DL.TestTableLoader()\n row = ttl.get_test_row(utm_campaign)\n test_name = ttl.get_test_field(row ,'test_name')\n \n \"\"\" Regenerate the data using the estimated start and end times \"\"\"\n ir = DR.IntervalReporting(was_run=False, use_labels=False, font_size=20, plot_type='line', query_type='campaign', file_path=projSet.__web_home__ + 'campaigns/static/images/')\n ir.run(start_time_est, end_time_est, interval, 'views', utm_campaign, {}, one_step=one_step)\n \n \"\"\" Determine the type of test (if not overridden) and retrieve the artifacts \"\"\"\n test_type, artifact_name_list = FDH.get_test_type(utm_campaign, start_time, end_time, DL.CampaignReportingLoader(query_type=''), test_type_var)\n \n return render_to_response('campaigns/show_campaigns.html', {'utm_campaign' : utm_campaign, 'test_name' : test_name, 'start_time' : start_time_est, 'end_time' : end_time_est, 'one_step' : one_step, \\\n 'artifacts' : artifact_name_list, 'test_type' : test_type, 'err_msg' : err_msg}, context_instance=RequestContext(request)) \n\n except Exception as inst:\n \n logging.error('Failed to correctly produce campaign diagnostics.')\n logging.error(type(inst))\n logging.error(inst.args)\n logging.error(inst)\n \n \"\"\" Return to the index page with an error \"\"\"\n err_msg = 'There is insufficient data to analyze this campaign: %s. Check to see if the <a href=\"/LML/\">impressions have been loaded</a>. <br><br>ERROR:<br><br>%s' % (utm_campaign, inst.__str__())\n \n return index(request, kwargs={'err_msg' : err_msg})", "def pubmed_reports(request):\n\n json_resp = {}\n json_resp['usecase'] = []\n reps = Report.objects.all()\n for r in reps:\n\n if r.id_report.startswith('PUBMED_') and not str(r.name_id) in json_resp['usecase']:\n json_resp['usecase'].append(str(r.name_id))\n return JsonResponse(json_resp)", "def pubmed_missing_auto(request):\n\n usecases = UseCase.objects.all()\n json_resp = {}\n json_resp['annotated'] = 0\n json_resp['tot'] = 0\n json_resp['usecase'] = []\n languages = ['English','english']\n for el in usecases:\n use = el.name\n json_resp[use] = {}\n batches = []\n batches.append('all')\n if use.lower() in ['colon','lung','uterine cervix']:\n report = Report.objects.filter(name=el,language__in=languages,institute = 'PUBMED')\n for el in report:\n if el.batch not in batches:\n batches.append(el.batch)\n count_rep = report.count()\n\n if count_rep > 0:\n json_resp['usecase'].append(str(use))\n json_resp['tot'] = json_resp['tot'] + count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n json_resp['annotated'] = json_resp['annotated'] + groundTruths\n\n for batch in batches:\n\n json_resp[use][batch] = {}\n if batch == 'all' or batch is None:\n report = Report.objects.filter(name=use,language__in=languages, institute='PUBMED')\n count_rep = report.count()\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n else:\n report = Report.objects.filter(name=use,language__in=languages, institute='PUBMED',batch = batch)\n count_rep = report.count()\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and batch = %s and r.language in %s;\",\n [str(use), 'Robot_user', 'labels', 'PUBMED',batch,tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n # print('risposta',json_resp)\n return JsonResponse(json_resp)", "def get_test_report(request, **kwargs): \n\t\n #Fetching the details of the selected event\n test_list = sidecar.events.test_report(project_id=kwargs['test_id'])\n report_list = []\n\t\n #Creating the list for the report\n for tests in test_list._logs:\n\tjson_test = json.loads(tests['data'])\n\ttests['success'] = json_test['success'] \n\ttests['time'] = json_test['time']\n\ttests['test_cases'] = json_test['test_cases']\n\treport_list.append(tests)\n\n #Making the context and sending to template\n context = {\n \"page_title\": _(\"Test Results\"),\n \"tests\": report_list\n }\n return render(request, 'rally_dashboard/events/test_detail.html', context)", "def report_start_end(request):\n\n report = request.GET.get('report_id')\n lang = request.GET.get('language',None)\n usecase = request.session['usecase']\n data = get_fields_from_json()\n json_keys_to_display = data['fields']\n json_keys_to_display.extend(['journal','authors','year','volume'])\n json_keys_to_ann = data['fields_to_ann']\n json_keys = (data['all_fields'])\n\n language = request.GET.get('language',request.session['language'])\n request_auto = request.GET.get('ns_id',None)\n if request.session['mode'] == 'Robot' or (request_auto is not None and request_auto == 'Robot' and request.session['institute'] != 'PUBMED'):\n # In this case we require automatic annotation: the keys to annotate change\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n with open(os.path.join(workpath,'./automatic_annotation/auto_fields/auto_fields.json')) as out:\n data = json.load(out)\n json_keys = data['total_fields'][usecase]\n json_keys_to_ann = data['extract_fields'][usecase]\n for el in json_keys_to_ann:\n if el in json_keys_to_display:\n json_keys_to_display.remove(el)\n\n json_keys.extend(['journal', 'authors', 'year', 'volume', 'abstract', 'title'])\n json_keys_to_ann.extend(['abstract', 'title'])\n if lang is not None:\n language = lang\n json_dict = report_get_start_end(json_keys,json_keys_to_ann,report,language)\n # print(json_dict)\n return JsonResponse(json_dict)", "def get_uses_missing_exa(request):\n\n use_to_ret = {}\n use_to_ret['labels_present'] = []\n use_to_ret['concepts_present'] = []\n use_to_ret['labels_missing'] = []\n use_to_ret['concepts_missing'] = []\n uses = ['colon','uterine cervix','lung']\n for el in uses:\n usecase = UseCase.objects.get(name=el)\n presence = True\n if Report.objects.filter(name = usecase).count() > 0:\n if not AnnotationLabel.objects.filter(name = usecase, annotation_mode = 'Manual and Automatic').exists():\n use_to_ret['labels_missing'].append(el)\n else:\n use_to_ret['labels_present'].append(el)\n\n cursor = connection.cursor()\n cursor.execute(\"SELECT c.annotation_mode FROM concept AS c INNER JOIN concept_has_uc AS hc ON c.concept_url = hc.concept_url WHERE hc.name = %s\",[str(el)])\n ans = cursor.fetchall()\n for concept in ans:\n if concept[0] != 'Manual and Automatic':\n presence = False\n break\n if len(ans) > 0:\n if presence == False:\n use_to_ret['concepts_missing'].append(el)\n else:\n use_to_ret['concepts_present'].append(el)\n else:\n use_to_ret['concepts_missing'].append(el)\n\n return JsonResponse(use_to_ret)", "def list(self, request):\n\n records = filter_against_records(request)\n \n if 'faculty_id' in request.query_params:\n faculty = Faculties.objects.filter(id=request.query_params.get('faculty_id'))[0]\n departments = Departments.objects.filter(faculty_id=model_to_dict(faculty)['id'])\n for department in departments:\n education_programs = EducationPrograms.objects.filter(main_department_id=model_to_dict(department)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'department_id' in request.query_params:\n department = Departments.objects.filter(id=request.query_params.get('department_id'))[0]\n education_programs = EducationPrograms.objects.filter(main_department_id=model_to_dict(department)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'start_year_id' in request.query_params:\n start_year = StartYears.objects.filter(id=request.query_params.get('start_year_id'))[0]\n education_programs = EducationPrograms.objects.filter(start_year_id=model_to_dict(start_year)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'specialization_id' in request.query_params:\n specialization = Specializations.objects.filter(id=request.query_params.get('specialization_id'))[0]\n education_programs = EducationPrograms.objects.filter(specialization_id=model_to_dict(specialization)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'education_level_id' in request.query_params:\n education_level = EducationLevels.objects.filter(id=request.query_params.get('education_level_id'))[0]\n education_programs = EducationPrograms.objects.filter(education_level_id=model_to_dict(education_level)['id'])\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'education_program_id' in request.query_params:\n education_program = EducationPrograms.objects.filter(id=request.query_params.get('education_program_id'))[0]\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'student_id' in request.query_params:\n records = records.filter(student_id=request.query_params.get('student_id'))\n\n \n\n \n \n \n students = Students.objects.all()\n res = []\n for student in students:\n student_records = records.filter(student_id=model_to_dict(student)['id'])\n if len(student_records) > 0:\n res.append(student)\n\n return Response(normalize_students(res))", "def check_presence_exa_conc_lab(request):\n\n # reports = request.GET.get('reports',None)\n rep = request.GET.get('id_report',None)\n language = request.GET.get('language',None)\n usecase = request.GET.get('usecase',None)\n reports = None\n if request.method == 'POST':\n request_body_json = json.loads(request.body)\n reports = request_body_json['reports']\n if rep is not None and language is not None:\n report = Report.objects.get(id_report = rep,language = language)\n usecase = report.name_id\n # print(usecase)\n json_resp = {}\n if usecase in ['colon','uterine cervix','lung']:\n bool = check_exa_lab_conc_only(usecase)\n print('bool',bool)\n else:\n bool = [False,False]\n json_resp['labels'] = bool[0]\n json_resp['concepts'] = bool[1]\n elif usecase is not None:\n json_resp = {}\n json_resp['labels'] = False\n json_resp['concepts'] = False\n\n # labels = []\n # concepts = []\n json_resp = {}\n if usecase in ['colon','uterine cervix','lung']:\n bool = check_exa_lab_conc_only(usecase)\n else:\n bool = [False,False]\n json_resp['labels'] = bool[0]\n json_resp['concepts'] = bool[1]\n # labels.append(bool[0])\n # concepts.append(bool[1])\n # if False in labels:\n # json_resp['labels'] = False\n # else:\n # json_resp['labels'] = True\n #\n # if False in concepts:\n # json_resp['concepts'] = False\n # else:\n # json_resp['concepts'] = True\n elif reports is not None:\n report_list = json.loads(reports)\n json_resp = {}\n json_resp['labels'] = False\n json_resp['concepts'] = False\n usecases = []\n for rep in report_list:\n # rep = json.loads(rep)\n if rep['usecase'] not in usecases:\n usecases.append(rep['usecase'])\n labels = []\n concepts = []\n for u in usecases:\n # print(u)\n json_resp = {}\n if u in ['colon', 'uterine cervix', 'lung']:\n bool = check_exa_lab_conc_only(u)\n else:\n bool = [False, False]\n\n labels.append(bool[0])\n concepts.append(bool[1])\n if False in labels:\n json_resp['labels'] = False\n else:\n json_resp['labels'] = True\n\n if False in concepts:\n json_resp['concepts'] = False\n else:\n json_resp['concepts'] = True\n\n else:\n json_resp={'error':'a usecase is needed'}\n\n print(json_resp)\n return JsonResponse(json_resp)", "def get_fields(request):\n\n json_resp = {}\n json_resp['fields'] = []\n json_resp['fields_to_ann'] = []\n all = request.GET.get('all',None)\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n auto_request = request.GET.get('ns_id', None)\n report = request.GET.get('report', None)\n # print(request.session['report_type'])\n if report is not None or all == 'all':\n if report is not None:\n if report.startswith('PUBMED_'):\n json_resp['fields'] = ['volume','authors','year','journal']\n json_resp['fields_to_ann'] = ['title','abstract']\n else:\n json_resp = get_fields_from_json()\n if all == 'all':\n # All the possible fields for every usecase (MANUAL CONFIGURATION)\n json_resp = get_fields_from_json()\n if Report.objects.filter(institute = 'PUBMED').exists():\n json_resp['all_fields'].extend(['title','abstract','volume','journal','year','authors']) #aggiungo pubmed solo in coda!\n else:\n if request.session['report_type'] == 'pubmed':\n json_resp['fields'] = ['volume','authors','year','journal']\n json_resp['fields_to_ann'] = ['title','abstract']\n else:\n # Fileds related exclusively to a usecase\n json_resp = get_fields_from_json_configuration(request.session['usecase'],request.session['institute'],request.session['language'])\n if request.session['mode'] == 'Robot' or auto_request == 'Robot':\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json')) as out:\n data = json.load(out)\n json_resp['fields_to_ann'] = data['extract_fields'][request.session['usecase']]\n for el in json_resp['fields_to_ann']:\n if el in json_resp['fields']:\n json_resp['fields'].remove(el)\n # print('FIELDS', json_resp)\n return JsonResponse(json_resp)", "def getReport(request):\n\n\t#parameters needed for different REST API's\n\tparams = {\n\t\t'rid':-1,\n\t\t'year':-1,\n\t\t'con_num':-1,\n\t\t'assign_num':-1,\n\t\t'item_num':-1,\n\t\t'wtype': -1,\n\t\t'payno': -1,\n\t\t'snap': 0, #default is 0 for snapshots (for now)\n\t\t'issue_date': -1,\n\t}\n\n\t#loop over the parameters and set them if they appear in the api url\n\tfor p in params:\n\t\tif p in request.GET:\n\t\t\tparams[p] = request.GET[p]\n\n\n\t#get the request session and load data\n\ts = requests.Session()\n\tif not isinstance(rgen.ReportGenerator.get_url(params), dict):\n\t\tresponse = s.get(rgen.ReportGenerator.get_url(params))\n\n\t\t#set the iterator and the content\n\t\tit = json.loads(response.content)\n\t\tcontent = json.loads(response.content)\n\t\t\n\t\t#while a next page exists, parse the api\n\t\tpageNum = 1\n\t\twhile \"next\" in it:\n\t\t\tresponse = s.get(rgen.ReportGenerator.get_url(params) + '?page=' + str(pageNum))\n\t\t\tit = json.loads(response.content)\n\t\t\tcontent[\"items\"].extend(it[\"items\"])\n\t\t\tpageNum += 1\n\n\telse:\n\t\t#if the url is a list\n\t\tcontent = {}\n\t\tfor part in rgen.ReportGenerator.get_url(params):\n\t\t\tresponse = s.get(rgen.ReportGenerator.get_url(params)[part])\n\t\t\tit = json.loads(response.content)\n\t\t\t#content = {\"part1\":{\"items\":[]}, \"part2\":{\"items\":[]}, \"part3\":{\"items\":[]}}\n\t\t\t\n\t\t\tcontent[part] = {}\n\t\t\tcontent[part][\"items\"] = []\n\t\t\tcontent[part][\"items\"].extend(it[\"items\"])\n\n\t\t\tpageNum = 1\n\t\t\twhile \"next\" in it:\n\t\t\t\tresponse = s.get(rgen.ReportGenerator.get_url(params)[part] + '?page=' + str(pageNum))\n\t\t\t\tit = json.loads(response.content)\n\t\t\t\tcontent[part][\"items\"].extend(it[\"items\"])\n\t\t\t\tpageNum += 1\n\t\n\t#set the file object to be returned as a download\n\tfile = HttpResponse(rgen.ReportGenerator.formExcel(content, params), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n\tif params[\"rid\"] == '70':\n\t\tfile['Content-Disposition'] = 'attachment; filename=' + rgen.r_dict[params[\"rid\"]][1] + ' No.' + params['issue_date'] + '.xlsx'\n\telse:\n\t\tfile['Content-Disposition'] = 'attachment; filename=' + rgen.r_dict[params[\"rid\"]][1] + '.xlsx'\n\ts.close()\n\treturn file", "def get_presence_robot_user(request):\n\n id_report = request.GET.get('id_report', None)\n language = request.GET.get('language', None)\n use = request.GET.get('usecase', None)\n rep = request.GET.get('report_type', None)\n json_resp = {'auto_annotation_count': 0}\n cursor = connection.cursor()\n\n reports_list = None\n if request.method == 'POST':\n request_body_json = json.loads(request.body)\n reports_list = request_body_json['reports']\n\n if id_report is not None and language is not None:\n\n usecase = Report.objects.get(id_report=id_report, language=language)\n use = usecase.name_id\n cursor.execute(\n \"SELECT COUNT(*) FROM ground_truth_log_file as g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.username = %s and r.name = %s\",\n ['Robot_user', str(use)])\n ans = cursor.fetchone()[0]\n json_resp = {'auto_annotation_count': (ans)}\n\n elif use is not None and rep is not None:\n # print(rep)\n if rep == 'reports':\n cursor.execute(\n \"SELECT COUNT(*) FROM ground_truth_log_file as g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.ns_id = %s AND g.username = %s and r.name = %s and r.institute != %s\",\n ['Robot', 'Robot_user', str(use), 'PUBMED'])\n\n elif rep == 'pubmed':\n cursor.execute(\n \"SELECT COUNT(*) FROM ground_truth_log_file as g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.username = %s and r.name = %s and r.institute = %s\",\n ['Robot_user', str(use), 'PUBMED'])\n\n ans = cursor.fetchone()[0]\n if ans > 0:\n json_resp = {'auto_annotation_count': ans}\n # print(json_resp)\n elif reports_list is not None:\n report_list = json.loads(reports_list)\n # print(report_list)\n usecase_list = []\n for rep in report_list:\n\n if rep['usecase'] not in usecase_list:\n usecase_list.append(rep['usecase'])\n for u in usecase_list:\n cursor = connection.cursor()\n cursor.execute(\n \"SELECT COUNT(*) FROM ground_truth_log_file as g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.username = %s and r.name = %s\",\n ['Robot_user', str(u)])\n ans = cursor.fetchone()[0]\n if ans > 0:\n json_resp = {'auto_annotation_count': ans}\n else:\n json_resp = {'auto_annotation_count': 0}\n\n elif use is None and reports_list is None and id_report is None and language is None:\n robot = NameSpace.objects.get(ns_id='Robot')\n gt = GroundTruthLogFile.objects.filter(ns_id=robot)\n json_resp = {'auto_annotation_count': gt.count()}\n\n print(json_resp)\n return JsonResponse(json_resp)", "def display_report(request, **kwargs):\n\n #Getting the report of the tests \n try:\n outputStr = sidecar.events.test_logs(project_id=kwargs['project_id'])\n outputStr = outputStr.results\n except Exception, e:\n outputStr = \"Updating the logs...\"\n \n #Making the output\n context = {\n \"page_title\": _(\"Test Report\"),\n \"test_report\": outputStr\n }\n return render(request, 'rally_dashboard/events/view_report.html', context)", "def check_auto_presence_for_configuration(request):\n\n report_type = request.GET.get('report_type',None)\n usecase = request.GET.get('usecase',None)\n language = request.GET.get('language',None)\n institute = request.GET.get('institute',None)\n batch = request.GET.get('batch',None)\n languages = ['English','english']\n # print('BATCH',str(batch))\n use = UseCase.objects.get(name=usecase)\n json_resp = {}\n mode = NameSpace.objects.get(ns_id = 'Robot')\n user = User.objects.get(ns_id = mode, username='Robot_user')\n\n if report_type == 'pubmed':\n cursor = connection.cursor()\n cursor.execute(\"SELECT COUNT(*) FROM ground_truth_log_file AS g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.ns_id = %s AND g.username = %s AND r.institute=%s AND r.language in %s AND r.name = %s AND r.batch = %s\",['Robot','Robot_user','PUBMED',tuple(languages),str(usecase),int(batch)])\n reports = cursor.fetchone()[0]\n json_resp['count'] = reports\n\n elif report_type == 'reports':\n cursor = connection.cursor()\n cursor.execute(\n \"SELECT COUNT(*) FROM ground_truth_log_file AS g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.ns_id = %s AND g.username = %s AND r.institute!=%s AND r.institute = %s AND r.language = %s AND r.name = %s AND r.batch = %s\",\n ['Robot', 'Robot_user', 'PUBMED',str(institute),str(language),str(usecase),int(batch)])\n reports = cursor.fetchone()[0]\n json_resp['count'] = reports\n print(json_resp)\n return JsonResponse(json_resp)", "def achieve_viewall(request):\n is_loggedin, username = get_session_variables(request)\n contrib_list = []\n article_list = []\n gsoc_list = []\n speaker_list = []\n intern_list = []\n contest_participant_list = []\n icpc_participants_regional_list = []\n icpc_participants_final_list = []\n\n contrib_list_all = Contribution.objects.all()\n contrib_list = Contribution.objects.all()[:5]\n article_list = Article.objects.all()[:5]\n gsoc_list = Gsoc.objects.all()[:5]\n speaker_list = Speaker.objects.all()[:5]\n intern_list = Intern.objects.all()[:5]\n contest_list = Contest_won.objects.all()[:5]\n\n \n contrib_org = {}\n if contrib_list_all:\n for contrib in contrib_list_all:\n if contrib.org_name not in contrib_org.keys():\n contrib_org[contrib.org_name] = 0\n\n for contrib in contrib_list:\n contrib_org[contrib.org_name] += 1\n\n if contest_list:\t\n contest_participant_list = []\n\tfor contest_won_obj in contest_list:\t\n\t c_id = contest_won_obj.contest_id\n\t c_p_objs = Contest_won_participant.objects.filter(contest_id = c_id)\n\t contest_participant_list.extend(c_p_objs)\n \n icpc_list_regionals = ACM_ICPC_detail.objects.filter(level='regional').order_by('ranking')[:2]\n if icpc_list_regionals:\n for icpc_obj in icpc_list_regionals:\n team = icpc_obj.team_name\n member1 = [icpc_obj.participant1_name, \\\n get_username_from_email(icpc_obj.participant1_email)]\n\n member2 = [icpc_obj.participant2_name, \\\n get_username_from_email(icpc_obj.participant2_email)]\n\n member3 = [icpc_obj.participant3_name, \\\n get_username_from_email(icpc_obj.participant3_email)]\n\n icpc_participant_list = [icpc_obj, member1,member2,member3]\n icpc_participants_regional_list.append(icpc_participant_list)\n\n icpc_list_finals = ACM_ICPC_detail.objects.filter(level='finals').order_by('ranking')[:2]\n if icpc_list_finals:\n for icpc_obj in icpc_list_finals:\n team = icpc_obj.team_name\n member1 = [icpc_obj.participant1_name, \\\n get_username_from_email(icpc_obj.participant1_email)]\n\n member2 = [icpc_obj.participant2_name, \\\n get_username_from_email(icpc_obj.participant2_email)]\n\n member3 = [icpc_obj.participant3_name, \\\n get_username_from_email(icpc_obj.participant3_email)]\n\n icpc_participant_list = [icpc_obj, member1,member2,member3]\n icpc_participants_final_list.append(icpc_participant_list)\n\n return render_to_response('achievement/achievement_viewall.html',\\\n\t\t{'username':username, \\\n 'is_loggedin':is_loggedin, \\\n 'contrib_list':contrib_list, \\\n 'contrib_org':contrib_org,\\\n 'article_list':article_list, \\\n 'gsoc_list':gsoc_list, \\\n 'speaker_list':speaker_list, \\\n 'intern_list':intern_list, \\\n 'contest_list':contest_list, \\\n 'contest_participant_list':contest_participant_list, \\\n 'icpc_participants_final_list':icpc_participants_final_list, \\\n 'icpc_participants_regional_list':icpc_participants_regional_list}, \\\n RequestContext(request))", "def summaryView(request):\n\n alert_errors = []\n alert_infos = []\n alert_filters = []\n\n runs = get_runs_from_request_filters(\n request, alert_errors, alert_infos, alert_filters\n )\n\n summary = SummaryReport(runs)\n\n context = {\n \"refs\": summary.reference_runs(),\n \"runs\": summary.runs_checked_per_type(),\n \"tk_maps\": summary.tracker_maps_per_type(),\n \"certified_runs\": summary.certified_runs_per_type(),\n \"sums\": summary.sum_of_quantities_per_type(),\n \"alert_errors\": alert_errors,\n \"alert_infos\": alert_infos,\n \"alert_filters\": alert_filters,\n }\n\n return render(request, \"certhelper/summary.html\", context)", "def getSiteWideReports(context):\n\n return getReports(context, category=\"Site Wide\")", "def medtag_reports(request):\n\n json_resp = {}\n json_resp['usecase'] = []\n reps = Report.objects.all()\n for r in reps:\n if not r.id_report.startswith('PUBMED_') and not str(r.name_id) in json_resp['usecase']:\n json_resp['usecase'].append(str(r.name_id))\n return JsonResponse(json_resp)", "def viewexperiments(request):\r\n # Obtain the context from the HTTP request.\r\n\r\n context_instance = RequestContext(request)\r\n\r\n try:\r\n user = _validate_and_get_geniuser(request)\r\n except LoggedInButFailedGetGeniUserError:\r\n return _show_failed_get_geniuser_page(request)\r\n\r\n\r\n page_top_errors = []\r\n username = user.username\r\n ret = [] #returning list\r\n user_experiments = Experiment.objects.filter(geni_user=user)\r\n for experiment in reversed(user_experiments):\r\n #reversed so the oldest experiment is the last we show.\r\n experiment_sensors = []\r\n name_list = []\r\n experiment_sensors.extend(list(Battery.objects.filter(experiment_id=experiment)))\r\n experiment_sensors.extend(list(Bluetooth.objects.filter(experiment_id=experiment)))\r\n experiment_sensors.extend(list(Cellular.objects.filter(experiment_id=experiment)))\r\n experiment_sensors.extend(list(Settings.objects.filter(experiment_id=experiment)))\r\n experiment_sensors.extend(list(ConcretSensor.objects.filter(experiment_id=experiment)))\r\n experiment_sensors.extend(list(Location.objects.filter(experiment_id=experiment)))\r\n experiment_sensors.extend(list(Signal_strengths.objects.filter(experiment_id=experiment)))\r\n experiment_sensors.extend(list(Wifi.objects.filter(experiment_id=experiment)))\r\n\r\n for sensor in experiment_sensors:\r\n name_list.append(sensor.show_name())\r\n\r\n if name_list == []:\r\n name_list = \"None\"\r\n\r\n ret.append([experiment.expe_name,name_list,experiment.id])\r\n \r\n \r\n \r\n return render(request, 'control/viewexperiments.html', {'username' : username, \r\n 'page_top_errors' : page_top_errors, 'ret':ret})", "def get_stats_array_per_usecase(request):\n\n mode = request.GET.get('mode',None)\n usern = request.GET.get('member',request.session['username'])\n username = User.objects.get(username=usern, ns_id=mode)\n language = request.GET.get('language',request.session['language'])\n institute = request.GET.get('institute',request.session['institute'])\n batch = request.GET.get('batch',request.session['batch'])\n json_dict = {}\n js = {}\n js['original'] = {}\n js['percent'] = {}\n json_dict['medtag'] = get_array_per_usecase(username,mode,language,institute,batch)\n json_dict['pubmed'] = get_array_per_usecase_PUBMED(username,mode,language,institute,batch)\n\n\n # print(json_dict)\n return JsonResponse(json_dict)", "def view_cases(context,case_id):\n\n adapter = context.obj['adapter']\n\n if case_id is not None:\n results = adapter.find_case({'case_id': case_id})\n\n else:\n results = adapter.find_cases({})\n\n click.echo(pprint(results))", "def experiment_run_list(request, instrument, ipts):\n \n logger.debug(\"Catalog: %s : instrument = %s, IPTS = %s\"%(inspect.stack()[0][3],instrument,ipts))\n \n \n \n breadcrumbs = Breadcrumbs(\"home\", reverse('catalog'))\n breadcrumbs.append_experiment_list(instrument)\n breadcrumbs.append(ipts.lower())\n \n template_values = {'instrument': instrument,\n 'experiment': ipts,\n 'title': '%s %s' % (instrument.upper(), ipts.upper()),\n 'breadcrumbs': breadcrumbs}\n\n if users_view_util.is_experiment_member(request, instrument, ipts) is False:\n template_values['user_alert'] = [\"You do not have access to this experiment data.\"]\n else:\n runs = get_ipts_runs(instrument.upper(), ipts)\n template_values['run_data'] = runs\n if len(runs) == 0:\n template_values['user_alert'] = ['No runs were found for instrument %s experiment %s' % (instrument, ipts)]\n # Fill in Fermi auth values, login info,\n template_values = remote_view_util.fill_template_values(request, **template_values)\n template_values = catalog_view_util.fill_template_values(request, **template_values)\n template_values = users_view_util.fill_template_values(request, **template_values)\n return render_to_response('catalog/experiment_run_list.html', template_values)", "def admin_evaluate_reports(request):\n if not validate_request(request): return redirect(reverse(URL_FORBIDDEN))\n \n admin = auth.get_user(request)\n \n if request.method == \"GET\":\n all_thesis = [] # list of dict\n \n for panelmember in PanelMember.objects.filter(Q(status = 'F') | Q(status = 'Z')).filter(feedback_at = 'A'):\n thesis = panelmember.thesis\n dict = {}\n dict['title'] = thesis.title\n dict['student_full_name'] = thesis.student.first_name + \" \" + thesis.student.last_name\n dict['report'] = panelmember.feedback_with_referee_details\n dict['student_username'] = thesis.student.user.username\n dict['id'] = thesis.id\n dict['referee_name'] = panelmember.referee.user.first_name + ' ' + panelmember.referee.user.last_name\n dict['referee_id'] = panelmember.referee.id\n all_thesis.append(dict)\n \n return render(request, 'app/admin/view_finalReports.html', {\n 'title':'Final Reports',\n 'layout_data' : get_layout_data(request),\n 'all_thesis' : all_thesis\n })\n elif request.method == \"POST\":\n form = PanelMember2Form(request.POST, request.FILES)\n \n\n thesis = int(request.POST['thesis'])\n referee = int(request.POST['referee'])\n \n if form.is_valid() and validate_pdf(request.FILES['feedback_without_referee_details']):\n referee = Referee.objects.get(id = referee)\n thesis = Thesis.objects.get(id = thesis)\n panelmember = PanelMember.objects.get(thesis = thesis,referee = referee)\n panelmember.feedback_at = 'G'\n \n time = str(datetime.datetime.now())\n timestamp = ''\n for i in time:\n if not (i == ':' or i == '-'):\n timestamp += i\n request.FILES['feedback_without_referee_details'].name = \"Evaluation_Report_\"+thesis.student.user.username+\"_\"+timestamp+\".pdf\"\n \n panelmember.feedback_without_referee_details = request.FILES['feedback_without_referee_details']\n panelmember.save()\n\n total_feedbacks = PanelMember.objects.filter(thesis = thesis, feedback_at = 'G').count()\n if total_feedbacks == thesis.indian_referees_required + thesis.foreign_referees_required:\n _update_student_status(thesis, STATUS_ID_THESIS_FEEDBACKS_RECEIVED) \n\n # send notification to all guide\n send_notification_to_all_guides(admin, thesis, \"A feedback report has been sent of student \" + thesis.student.first_name + \" \" + thesis.student.last_name)\n # email\n subject = \"[Feed Back reports] of the Thesis titled\" + thesis.title\n content = \"<br>Dear Sir/Madam,</br><br></br><br></br>\"+\"A feedback report has been sent of student \" + thesis.student.first_name + \" \" + thesis.student.last_name +'. Please Check the PhD Portal for more details.'+\"<br></br><br></br>Regards,<br></br>PhDPortal.\"\n \n email = []\n\n for thesisGuide in ThesisGuide.objects.filter(thesis = thesis):\n receiver = Faculty.objects.get(user = thesisGuide.guide.user)\n email.append(receiver.email)\n\n send_email_task.delay(email, subject, content)\n return redirect(reverse(admin_evaluate_reports))\n else:\n return redirect(reverse(URL_BAD_REQUEST))\n else:\n return redirect(reverse(URL_BAD_REQUEST))", "def display_reports(self, layout): # pylint: disable=arguments-differ", "def report(request):\n return render_to_response('application/report.html',\n {'search_suggestions': _search_suggestions(),\n },\n context_instance=RequestContext(request));", "def retrieve_analysis_report(accession, fields=None, file=None):\n return retrieve_filereport(\n accession=accession,\n result=\"analysis\",\n fields=fields,\n file=file)" ]
[ "0.61894846", "0.60761696", "0.5919785", "0.58525854", "0.5772856", "0.575343", "0.5743181", "0.57404655", "0.5713519", "0.56065595", "0.5573967", "0.5552891", "0.54978424", "0.5465102", "0.5448308", "0.5387445", "0.53673327", "0.5361622", "0.53593826", "0.52845144", "0.5263182", "0.5256736", "0.52389497", "0.5225673", "0.5225627", "0.52242917", "0.52093947", "0.5206881", "0.5195759", "0.51944673" ]
0.6606941
0
This view checks whether the configuration files the user inserted are well formed and returns the response
def check_input_files(request): reports = [] labels = [] pubmedfiles = [] concepts = [] type1 = request.POST.get('type',None) username = request.POST.get('username',None) # email = request.POST.get('email',None) password = request.POST.get('password',None) for filename, file in request.FILES.items(): if filename.startswith('reports'): reports.append(file) if filename.startswith('pubmed'): pubmedfiles.append(file) elif filename.startswith('concepts'): concepts.append(file) elif filename.startswith('labels'): labels.append(file) jsonDisp = request.POST.get('json_disp','') jsonAnn = request.POST.get('json_ann','') load_concepts = request.POST.get('exa_concepts',None) load_labels = request.POST.get('exa_labels',None) jsonResp = check_file(reports,pubmedfiles,labels,concepts,jsonDisp,jsonAnn,username,password,load_concepts,load_labels) # print(jsonResp) return JsonResponse(jsonResp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_config_verify(self,\n raw_response: Any,\n *args,\n **kwargs) -> bool:\n pass", "def edit_config_verify(self,\n raw_response: Any,\n *args,\n **kwargs) -> bool:\n pass", "def validate_config():\n\n # diff/sync settings, not including templates (see below)\n nori.setting_check_list('action', ['diff', 'sync'])\n nori.setting_check_type('reverse', bool)\n nori.setting_check_type('bidir', bool)\n nori.setting_check_callbacks('pre_action_callbacks')\n nori.setting_check_callbacks('post_action_callbacks', 1, 1)\n for i, cb_t in enumerate(nori.core.cfg['post_action_callbacks']):\n nori.setting_check_type(('post_action_callbacks', i, 3), bool)\n nori.setting_check_list('source_type', ['generic', 'drupal'])\n nori.setting_check_callable('source_query_func', may_be_none=False)\n nori.setting_check_callable('source_query_defaulter', may_be_none=True)\n nori.setting_check_callable('source_query_validator', may_be_none=False)\n nori.setting_check_callbacks('source_template_change_callbacks')\n nori.setting_check_callbacks('source_global_change_callbacks')\n nori.setting_check_list('dest_type', ['generic', 'drupal'])\n nori.setting_check_callable('dest_query_func', may_be_none=False)\n nori.setting_check_callable('dest_query_defaulter', may_be_none=True)\n nori.setting_check_callable('dest_query_validator', may_be_none=False)\n nori.setting_check_callbacks('dest_template_change_callbacks')\n nori.setting_check_callbacks('dest_global_change_callbacks')\n nori.setting_check_list('template_mode', ['all', 'include', 'exclude'])\n if nori.core.cfg['template_mode'] != 'all':\n nori.setting_check_not_empty('template_list')\n for i, t_name in enumerate(nori.core.cfg['template_list']):\n nori.setting_check_type(('template_list', i),\n nori.core.STRING_TYPES)\n nori.setting_check_list('key_mode', ['all', 'include', 'exclude'])\n if nori.core.cfg['key_mode'] != 'all':\n nori.setting_check_not_empty('key_list')\n\n # templates: general\n nori.setting_check_not_empty(\n 'templates', types=nori.core.MAIN_SEQUENCE_TYPES\n )\n for i, template in enumerate(nori.core.cfg['templates']):\n nori.setting_check_type(('templates', i), nori.core.MAPPING_TYPES)\n # bogus elements\n for k in template:\n if k not in T_KEYS:\n nori.err_exit(\n \"Warning: cfg['templates'][{0}][{1}] is set\\n\"\n \"(to {2}), but there is no such setting.\" .\n format(i, *map(nori.pps, [k, template[k]])),\n nori.core.exitvals['startup']['num']\n )\n # template name\n nori.setting_check_type(('templates', i, T_NAME_KEY),\n nori.core.STRING_TYPES)\n # multiple-valued value columns?\n nori.setting_check_type(('templates', i, T_MULTIPLE_KEY), bool)\n # source-DB query function arguments\n nori.setting_check_arg_tuple(('templates', i, T_S_QUERY_ARGS_KEY))\n # to-dest transform function\n nori.setting_check_callable(('templates', i, T_TO_D_FUNC_KEY),\n may_be_none=True)\n # source-DB don't-replicate flag\n nori.setting_check_type(('templates', i, T_S_NO_REPL_KEY), bool)\n # source-DB change callbacks\n nori.setting_check_callbacks(('templates', i, T_S_CHANGE_CB_KEY))\n # dest-DB query function arguments\n nori.setting_check_arg_tuple(('templates', i, T_D_QUERY_ARGS_KEY))\n # to-source transform function\n nori.setting_check_callable(('templates', i, T_TO_S_FUNC_KEY),\n may_be_none=True)\n # dest-DB don't-replicate flag\n nori.setting_check_type(('templates', i, T_D_NO_REPL_KEY), bool)\n # dest-DB change callbacks\n nori.setting_check_callbacks(('templates', i, T_D_CHANGE_CB_KEY))\n # key mode\n nori.setting_check_list(('templates', i, T_KEY_MODE_KEY),\n ['all', 'include', 'exclude'])\n if template[T_KEY_MODE_KEY] != 'all':\n # key list\n nori.setting_check_not_empty(('templates', i, T_KEY_LIST_KEY))\n\n # templates: query-function arguments\n for (sd, t_key, validator_key) in [\n ('s', T_S_QUERY_ARGS_KEY, 'source_query_validator'),\n ('d', T_D_QUERY_ARGS_KEY, 'dest_query_validator')\n ]:\n # args tuple\n args_idx = ('templates', i, t_key)\n args_t = template[t_key]\n # key_cv, value_cv (somewhat)\n for cv_str in ['key_cv', 'value_cv']:\n cv_idx = args_idx + (1, cv_str)\n nori.setting_check_not_empty(\n cv_idx, types=nori.core.MAIN_SEQUENCE_TYPES\n )\n cv_seq = args_t[1][cv_str]\n for j, cv in enumerate(cv_seq):\n nori.setting_check_length(cv_idx + (j, ), 2, 3,\n types=tuple)\n # the rest of the arguments\n nori.core.cfg[validator_key](sd, args_idx, args_t, i)\n\n # reporting settings\n nori.setting_check_list('report_order', ['template', 'keys'])\n # the rest are handled by nori.validate_email_config()", "def _check_config(self):", "def validateConfig(self):\n ## (boolean with the result of the validation, eventual error message)\n return (True, '')", "def getYamlInstructions():\n with open('role_file_template.yaml', 'r') as yamlfile:\n output = yamlfile.read()\n if request.headers['Accept'] == 'application/json':\n return output, 200\n else:\n return render_template(\"output.html\", output=output)", "def _validate_config(self):\n # Simulation ID\n empty_string_check(self._config_dict['@id'])\n \n # Output\n empty_string_check(self._config_dict['output']['@baseDirectory'])\n self._config_dict['output']['@saveInteractionLog'] = parse_boolean(self._config_dict['output']['@saveInteractionLog'])\n self._config_dict['output']['@saveRelevanceJudgments'] = parse_boolean(self._config_dict['output']['@saveRelevanceJudgments'])\n self._config_dict['output']['@trec_eval'] = parse_boolean(self._config_dict['output']['@trec_eval'])\n \n # Topics\n def check_topic(t):\n \"\"\"\n Checks a given topic, t. Looks for a topic ID and a valid topic description file.\n \"\"\"\n empty_string_check(t['@id'])\n filesystem_exists_check(t['@filename'])\n filesystem_exists_check(t['@qrelsFilename'])\n \n if '@backgroundFilename' in t: # A background file was specified.\n filesystem_exists_check(t['@backgroundFilename'])\n else:\n t['@backgroundFilename'] = None # No background file was specified.\n \n topics = self._config_dict['topics']['topic']\n \n if type(topics) == list:\n for topic in topics:\n check_topic(topic)\n else:\n check_topic(topics)\n \n # Users\n users = self._config_dict['users']['user']\n \n if type(users) == list:\n for user in users:\n filesystem_exists_check(user['@configurationFile'])\n else:\n filesystem_exists_check(users['@configurationFile'])\n \n # Search Interface\n empty_string_check(self._config_dict['searchInterface']['@class'])\n check_attributes(self._config_dict['searchInterface'])", "def validate_config(self):\n pass", "def validate_config(self):\n pass", "def validate(self, config_json):\n pass", "def validate_configuration_file(self):\n\n with open(self.config_path, \"r+\") as f_config:\n return bool(re.search(get_configuration_file_re(),\n f_config.read()))", "def check():\n data = list(request.files.values())[0].file.read() if len(request.files) else request.body.read()\n return checkView(data, request.params)", "def check_config_file(config_file_name, log_full_filename, log_file_path):\n try:\n if not os.path.isfile(config_file_name):\n # file not exist\n append_log_info(\"Config file not exists\", log_full_filename, log_file_path)\n return []\n else:\n try:\n config_content = []\n with open(config_file_name) as f:\n for line in f:\n comment = False\n try:\n if line.strip()[0] == \"#\":\n comment = True\n except:\n pass\n if not comment:\n line_split = line.split()\n if len(line_split) >= 5:\n config_content.append(line.split())\n return config_content\n except:\n append_log_info(\"Error file opening: \" + config_file_name, log_full_filename, log_file_path)\n return []\n except:\n append_log_info(\"Generic error on config file: \" + config_file_name, log_full_filename, log_file_path)\n return []", "def configure_db(request):\n\n reports = []\n pubmedfiles = []\n areas = []\n labels = []\n concepts = []\n type = request.POST.get('type',None)\n load_concepts = request.POST.get('exa_concepts',None)\n load_labels = request.POST.get('exa_labels',None)\n\n for filename, file in request.FILES.items():\n if filename.startswith('reports'):\n reports.append(file)\n elif filename.startswith('pubmed'):\n pubmedfiles.append(file)\n elif filename.startswith('areas'):\n areas.append(file)\n elif filename.startswith('concepts'):\n concepts.append(file)\n elif filename.startswith('labels'):\n labels.append(file)\n\n jsonDisp = request.POST.get('json_disp',None)\n jsonAnn = request.POST.get('json_ann',None)\n jsonAll = request.POST.get('json_all',None)\n username = request.POST.get('username',None)\n password = request.POST.get('password',None)\n\n msg = configure_data(pubmedfiles,reports,labels,concepts,jsonDisp,jsonAnn,jsonAll,username,password,load_concepts,load_labels)\n if 'message' in list(msg.keys()):\n get_fields_extractable('configure')\n return JsonResponse(msg)", "def output_config() -> Response:\n c = dict(config)\n c['password'] = \"*********\"\n return jsonify(c)", "def configuration():", "def validate(request):\n \n # If no file was passed, send a failed status code.\n if ('file' not in request.FILES):\n response = HttpResponse()\n response.status_code = 400; # Bad Request\n response.reason_phrase = (\"No file was passed. File is expected as a \"\n \"parameter named 'file' in a set of form \"\n \"data.\")\n return response\n \n # Read in the file.\n contents = request.FILES['file'].read()\n \n # Check .exe format\n with ExeRawSample(data=contents) as exeSample:\n if (exeSample.validate()):\n return save_sample(request, exeSample, 'exe')\n \n # No valid format was found.\n response = HttpResponse(json.dumps({'Valid': False}))\n return response", "def valid_configuration(self):\n valid = True\n\n if (not self.__config.suffix()) and (self.__config.output_dir() == self.__config.input_dir()):\n print(\"ERROR: output_dir directory cannot be the same as input_dir with an empty suffix!\")\n valid = False\n if not self.__config.public_key():\n print(\"ERROR: public_key not set! Set it through 'pdfworkshop config public_key <your_key>'. \"\n \"A free API key can be obtained from https://developer.ilovepdf.com/\")\n valid = False\n return valid", "def configuration_view(project):\n project_query = Project.select().where(Project.slug == project).first()\n if project_query is None:\n flash(\"invalid project\")\n return redirect(url_for(\"projects\"))\n session[\"project\"] = project_query\n\n g.selected_tab = \"configuration\"\n\n settings = None\n if request.method == \"GET\":\n settings = Anemone.abcfile.parse(path(project_query.path, \"build.abc\"))\n elif request.method == \"POST\":\n configuration_post(project_query, request)\n\n return render_template(\"configure.html\", ssh=open(app.config[\"SSH_PUBLIC\"]).readline(),\n build=settings, unity=app.config[\"UNITY_PATH\"])", "def perform_diff_config_result_page():\n #  Get all fields from form\n module = request.forms.getall('module')\n client = request.forms.getall('client')\n version1 = request.forms.getall('version1')\n version2 = request.forms.getall('version2')\n\n # Build html\n modif = do_ck5050_ini_diff_request(module, client, version1, version2)\n\n # Build template page\n with open(\"./header.html\") as header, open('./config.tpl') as config, open('./footer.html') as footer:\n template_html = header.read() + config.read() + footer.read()\n\n if not modif:\n modif = {}\n\n output = template(template_html, module=module, client=client, version1=version1,\n version2=version2, modif=modif)\n\n return output", "def check_configs(self):\n\n pass", "def _validate_config(self):\n pass", "def check_config(outconfig):\n self.log.info(\"Checking if all the necessary files exist.\")\n\n # Perform necessary checks\n\n log.info(\"All necessary files exist for {} configuration.\".format(outconfig[\"Flavor\"]))\n\n return", "def validate_config(self):\n reference = data_file(\"../config/template/minimum_aiscalator.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"In Global Application Configuration file \"\n _validate_configs(self._app_conf, ref, msg,\n missing_exception=True,\n type_mismatch_exception=True)\n reference = data_file(\"../config/template/aiscalator.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"In Global Application Configuration file \"\n _validate_configs(self._app_conf, ref, msg,\n missing_exception=False,\n type_mismatch_exception=True)\n if self._step_name:\n reference = data_file(\"../config/template/minimum_step.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in step named \" + self._step_name\n _validate_configs(self._step,\n ref[\"steps\"][\"Untitled\"],\n msg,\n missing_exception=True,\n type_mismatch_exception=True)\n reference = data_file(\"../config/template/step.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in step named \" + self._step_name\n _validate_configs(self._step,\n ref[\"steps\"][\"Untitled\"],\n msg,\n missing_exception=False,\n type_mismatch_exception=True)\n if self._dag_name:\n reference = data_file(\"../config/template/minimum_dag.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in dag named \" + self._dag_name\n _validate_configs(self._dag,\n ref[\"dags\"][\"Untitled\"],\n msg,\n missing_exception=True,\n type_mismatch_exception=True)\n reference = data_file(\"../config/template/step.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in dag named \" + self._dag_name\n _validate_configs(self._dag,\n ref[\"dags\"][\"Untitled\"],\n msg,\n missing_exception=False,\n type_mismatch_exception=True)", "def check_auto_presence_for_configuration(request):\n\n report_type = request.GET.get('report_type',None)\n usecase = request.GET.get('usecase',None)\n language = request.GET.get('language',None)\n institute = request.GET.get('institute',None)\n batch = request.GET.get('batch',None)\n languages = ['English','english']\n # print('BATCH',str(batch))\n use = UseCase.objects.get(name=usecase)\n json_resp = {}\n mode = NameSpace.objects.get(ns_id = 'Robot')\n user = User.objects.get(ns_id = mode, username='Robot_user')\n\n if report_type == 'pubmed':\n cursor = connection.cursor()\n cursor.execute(\"SELECT COUNT(*) FROM ground_truth_log_file AS g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.ns_id = %s AND g.username = %s AND r.institute=%s AND r.language in %s AND r.name = %s AND r.batch = %s\",['Robot','Robot_user','PUBMED',tuple(languages),str(usecase),int(batch)])\n reports = cursor.fetchone()[0]\n json_resp['count'] = reports\n\n elif report_type == 'reports':\n cursor = connection.cursor()\n cursor.execute(\n \"SELECT COUNT(*) FROM ground_truth_log_file AS g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.ns_id = %s AND g.username = %s AND r.institute!=%s AND r.institute = %s AND r.language = %s AND r.name = %s AND r.batch = %s\",\n ['Robot', 'Robot_user', 'PUBMED',str(institute),str(language),str(usecase),int(batch)])\n reports = cursor.fetchone()[0]\n json_resp['count'] = reports\n print(json_resp)\n return JsonResponse(json_resp)", "def check_file(self):\n\n # File manipulation status\n status = {}\n\n # check if the post request has the file part\n if 'datasource' not in self.request.files:\n status['error'] = 'No file part'\n return False, status\n\n file = request.files['datasource']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n #flash('No selected file')\n #return redirect(request.url)\n status['error'] = 'No selected file'\n return False, status\n\n # Get filename\n # Save to local hardrive\n filename = secure_filename(file.filename)\n # file.save(os.path.join(self.kwargs['UPLOAD_FOLDER'], filename))\n is_saved, error = self.save_file(self.kwargs['UPLOAD_FOLDER'], filename, file)\n\n if is_saved:\n # Return filename\n status['filename'] = filename\n return True, status\n else:\n\n # Return error if something wrong\n status['error'] = error\n return False, status", "def _verify_ini(self, config_file=None):\n\n imgur_values = ['ClientID', 'ClientSecret', 'AccessToken', 'RefreshToken']\n mysql_values = ['Host', 'User', 'Password', 'Database']\n missing_values = []\n\n if not config_file:\n print(\"No Config Filed Supplied. Aborting\")\n sys.exit(1)\n\n for val in imgur_values:\n if val not in config_file['IMGURAPI']:\n missing_values.append('IMGURAPI: ' + val)\n\n for val in mysql_values:\n if val not in config_file['MYSQL']:\n missing_values.append('MYSQL: ' + val)\n\n if missing_values:\n print('ERROR: ini file is missing required values. \\n Missing Values:')\n for val in missing_values:\n print(val)\n sys.exit(1)", "def validate_config(self):\n config = self.config\n\n # which doc types are enabled\n need_at_least_one = ['GOOGLE_DRIVE_ENABLED','GITHUB_ENABLED','DISQUS_ENABLED']\n found_one = False\n for n in need_at_least_one:\n if n in config.keys():\n found_one = True\n break\n if not found_one:\n raise Exception(\"Error: need at least one of: %s\"%(\", \".join(need_at_least_one)))\n\n if 'GOOGLE_DRIVE_ENABLED' in config.keys():\n if config['GOOGLE_DRIVE_ENABLED']:\n if 'GOOGLE_DRIVE_CREDENTIALS_FILE' in config.keys():\n if os.path.basename(config['GOOGLE_DRIVE_CREDENTIALS_FILE']) != 'credentials.json':\n raise Exception(\"Error: the file specified with GOOGLE_DRIVE_CREDENTIALS_FILE in the config file must have a filename of 'credentials.json'\")", "def check_config(cfg):", "def check_file():\n #print('request=', request)\n #print('request.data=', request.data)\n #print('request.form=', request.form)\n #print('request.files=', request.files)\n #print('request.json=', request.json)\n qdata = None\n adata = None\n Q = None\n A = None\n if request.json:\n qdata = request.json.get('Q')\n adata = request.json.get('A')\n if 'Qfile' in request.files:\n qdata = request.files['Qfile'].read().decode('utf-8')\n if 'Afile' in request.files:\n adata = request.files['Afile'].read().decode('utf-8')\n\n #print('qdata\\n', qdata)\n #print('adata\\n', adata)\n try:\n if qdata:\n Q = adc2019.read_Q(qdata)\n if adata:\n A = adc2019.read_A(adata)\n if Q is None and A is None:\n return jsonify({'check_file': 'No data'})\n if Q is None:\n return jsonify({'check_file': 'A-ok'})\n if A is None:\n return jsonify({'check_file': 'Q-ok'})\n\n info = adc2019.check_data(Q, A)\n #print(info)\n info2 = info.copy()\n for k in ['count', 'corner', 'line_length', 'line_corner', 'ban_data_F']:\n info2[k] = str(info2[k])\n info2['check_file'] = 'ok'\n return jsonify(info2)\n except Exception as e:\n #traceback.print_exc()\n errinfo = ['ADC2019 rule violation'] + [str(i) for i in e.args]\n info = {'error': errinfo, 'stack_trace': traceback.format_exc()}\n return jsonify(info)\n\n return jsonify({'check_file': 'ok',\n 'value': 1234567,\n 'msg': '生麦生米生卵'})" ]
[ "0.59801537", "0.5930928", "0.59076905", "0.58123827", "0.5767755", "0.5741729", "0.5710974", "0.5606", "0.5606", "0.5601504", "0.5598409", "0.5596937", "0.5566562", "0.55586517", "0.5556762", "0.5550772", "0.55330944", "0.55282456", "0.5525261", "0.5514157", "0.5479723", "0.54766756", "0.5447429", "0.54236", "0.54123354", "0.54015875", "0.5397892", "0.53914773", "0.53707117", "0.5367241" ]
0.61117023
0
This view checks whether the files inserted by the user to update the configuration are well formed
def check_files_for_update(request): reports = [] pubmedfiles = [] labels = [] concepts = [] type1 = request.POST.get('type',None) for filename, file in request.FILES.items(): if filename.startswith('reports'): reports.append(file) if filename.startswith('pubmed'): pubmedfiles.append(file) elif filename.startswith('concepts'): concepts.append(file) elif filename.startswith('labels'): labels.append(file) jsonDisp = request.POST.get('json_disp',None) jsonAnn = request.POST.get('json_ann',None) jsonDispUp = request.POST.get('json_disp_update',None) jsonAnnUp = request.POST.get('json_ann_update',None) load_concepts = request.POST.get('exa_concepts',None) load_labels = request.POST.get('exa_labels',None) msg = check_for_update(type1,pubmedfiles,reports,labels,concepts,jsonDisp,jsonAnn,jsonDispUp,jsonAnnUp,load_concepts,load_labels) jsonResp = {'message':msg} return JsonResponse(jsonResp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_config(self):\n try:\n config_metadata = self.dbc.get_metadata(\"config.txt\")\n except rest.ErrorResponse:\n print str(datetime.datetime.now()) \\\n + \": No config.txt in Dropbox directory. Exiting.\"\n sys.exit()\n if config_metadata[\"modified\"] != self.config_date:\n print str(datetime.datetime.now()) + \": Config changed\"\n self.config_date = config_metadata[\"modified\"]\n try:\n self.dbc.get_file(\"config.txt\")\n except rest.ErrorResponse as e:\n print str(datetime.datetime.now()) + e.reason\n return False\n self.config.reload(self.local_directory + \"/\" + \"config.txt\")\n return True\n return False", "def edited_file_locations(self):", "def check_line_edits_and_refresh_filestate(self):\r\n\t\t# line edit changes (other places where filestate is updated: browse button clicks, ok click)\r\n\t\tif self.source_img_entry.isModified():\r\n\t\t\tself.filestate.set_source_img_filename(self.source_img_entry.text().replace(\"\\\\\", \"/\"))\r\n\t\tif self.existing_case and self.source_db_entry.isModified():\r\n\t\t\tself.filestate.set_source_db_filename(self.source_db_entry.text().replace(\"\\\\\", \"/\"))\r\n\t\tif self.sink_dir_entry.isModified():\r\n\t\t\tself.filestate.set_sink_dir_name(self.sink_dir_entry.text().replace(\"\\\\\", \"/\"))", "def _check_config(self):", "def check_paths(self):\r\n\t\tself.check_line_edits_and_refresh_filestate()\r\n\t\t# paths\r\n\t\tsource_img_filename = self.source_img_entry.text().replace(\"\\\\\", \"/\")\r\n\t\tsink_dir_name = self.sink_dir_entry.text().replace(\"\\\\\", \"/\")\r\n\t\tsink_db_name_entry_text = self.sink_db_name_entry.text()\r\n\t\tdb_ext = \".db\" if not sink_db_name_entry_text.lower().endswith(\".db\") else \"\"\r\n\t\tsink_db_filename = os.path.join(sink_dir_name, sink_db_name_entry_text + db_ext).replace(\"\\\\\", \"/\")\r\n\t\tsource_db_filename = \"\"\r\n\r\n\t\t# check validity\r\n\t\tsource_img_filename_valid = self.filestate.is_valid(source_img_filename, SOURCE_IMG)\r\n\t\tsink_dir_name_valid = self.filestate.is_valid(sink_dir_name, SINK_DIR)\r\n\t\tsink_db_filename_valid = self.filestate.is_valid(sink_db_filename, SINK_DB)\r\n\t\tsource_db_filename_valid = True\r\n\r\n\t\tall_paths_valid = source_img_filename_valid and sink_dir_name_valid and sink_db_filename_valid\r\n\r\n\t\tif self.existing_case:\r\n\t\t\tsource_db_filename = self.source_db_entry.text()\r\n\t\t\tsource_db_filename_valid = self.filestate.is_valid(source_db_filename, SOURCE_DB)\r\n\t\t\tall_paths_valid = all_paths_valid and source_db_filename_valid\r\n\r\n\t\tif all_paths_valid:\r\n\t\t\tself.filestate.set_source_img_filename(source_img_filename)\r\n\t\t\tself.filestate.set_sink_dir_name(sink_dir_name)\r\n\t\t\tself.filestate.set_sink_db_filename(sink_db_filename)\r\n\t\t\tif self.existing_case:\r\n\t\t\t\tself.filestate.set_source_db_filename(source_db_filename)\r\n\t\t\tself.refresh_UI()\r\n\t\t\treturn True\r\n\r\n\t\t# in the case of invalidity\r\n\t\tif not source_img_filename_valid:\r\n\t\t\tif not self.filestate.source_img_file_exists:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source image file at does not exist.\")\r\n\t\t\telif not self.filestate.source_img_file_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source image file type is invalid (must be .npy).\")\r\n\t\t\tself.filestate.set_source_img_filename(\"\")\r\n\t\tif not source_db_filename_valid: # only if existing case\r\n\t\t\tif not self.source_db_file_exists:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source database file does not exist.\")\r\n\t\t\telif not self.filestate.source_db_file_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source database file type is invalid (must be .db)\")\r\n\t\t\tself.filestate.set_source_db_filename(\"\")\r\n\t\tif not sink_dir_name_valid:\r\n\t\t\tif not self.filestate.sink_dir_exists:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided sink directory does not exist.\")\r\n\t\t\telif not self.sink_dir_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided sink directory format is invalid.\")\r\n\t\t\tself.filestate.set_sink_dir_name(\"\")\r\n\t\tif not sink_db_filename_valid:\r\n\t\t\tif sink_dir_name_valid and not self.filestate.sink_db_file_preexists and \\\r\n\t\t\t\t\tself.filestate.sink_db_file_format_valid and \\\r\n\t\t\t\t\tdisplay_yes_no_message(self, \"Create file at \" + sink_db_filename + \"?\"):\r\n\t\t\t\t# create file with read write permissions\r\n\t\t\t\t###########################################\r\n\t\t\t\ttry:\r\n\t\t\t\t\tsink_db_file = open(sink_db_filename, \"w+\")\r\n\t\t\t\t\tsink_db_file.close()\r\n\t\t\t\texcept IOError as error:\r\n\t\t\t\t\tdisplay_warning_message(self, \"Failed to create provided sink database file: \" + error)\r\n\t\t\t\t###########################################\r\n\t\t\t\t# set sink db filename\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.filestate.set_sink_db_filename(sink_db_filename)\r\n\t\t\t\t\tself.refresh_UI()\r\n\t\t\t\t\treturn True\r\n\t\t\telif not self.filestate.sink_db_file_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Be sure to specify a name for the sink database.\")\r\n\t\t\tself.filestate.set_sink_db_filename(\"\")\r\n\r\n\t\t# print(\"paths invalid\")\r\n\t\tself.refresh_UI()\r\n\t\treturn False", "def validate_config():\n\n # diff/sync settings, not including templates (see below)\n nori.setting_check_list('action', ['diff', 'sync'])\n nori.setting_check_type('reverse', bool)\n nori.setting_check_type('bidir', bool)\n nori.setting_check_callbacks('pre_action_callbacks')\n nori.setting_check_callbacks('post_action_callbacks', 1, 1)\n for i, cb_t in enumerate(nori.core.cfg['post_action_callbacks']):\n nori.setting_check_type(('post_action_callbacks', i, 3), bool)\n nori.setting_check_list('source_type', ['generic', 'drupal'])\n nori.setting_check_callable('source_query_func', may_be_none=False)\n nori.setting_check_callable('source_query_defaulter', may_be_none=True)\n nori.setting_check_callable('source_query_validator', may_be_none=False)\n nori.setting_check_callbacks('source_template_change_callbacks')\n nori.setting_check_callbacks('source_global_change_callbacks')\n nori.setting_check_list('dest_type', ['generic', 'drupal'])\n nori.setting_check_callable('dest_query_func', may_be_none=False)\n nori.setting_check_callable('dest_query_defaulter', may_be_none=True)\n nori.setting_check_callable('dest_query_validator', may_be_none=False)\n nori.setting_check_callbacks('dest_template_change_callbacks')\n nori.setting_check_callbacks('dest_global_change_callbacks')\n nori.setting_check_list('template_mode', ['all', 'include', 'exclude'])\n if nori.core.cfg['template_mode'] != 'all':\n nori.setting_check_not_empty('template_list')\n for i, t_name in enumerate(nori.core.cfg['template_list']):\n nori.setting_check_type(('template_list', i),\n nori.core.STRING_TYPES)\n nori.setting_check_list('key_mode', ['all', 'include', 'exclude'])\n if nori.core.cfg['key_mode'] != 'all':\n nori.setting_check_not_empty('key_list')\n\n # templates: general\n nori.setting_check_not_empty(\n 'templates', types=nori.core.MAIN_SEQUENCE_TYPES\n )\n for i, template in enumerate(nori.core.cfg['templates']):\n nori.setting_check_type(('templates', i), nori.core.MAPPING_TYPES)\n # bogus elements\n for k in template:\n if k not in T_KEYS:\n nori.err_exit(\n \"Warning: cfg['templates'][{0}][{1}] is set\\n\"\n \"(to {2}), but there is no such setting.\" .\n format(i, *map(nori.pps, [k, template[k]])),\n nori.core.exitvals['startup']['num']\n )\n # template name\n nori.setting_check_type(('templates', i, T_NAME_KEY),\n nori.core.STRING_TYPES)\n # multiple-valued value columns?\n nori.setting_check_type(('templates', i, T_MULTIPLE_KEY), bool)\n # source-DB query function arguments\n nori.setting_check_arg_tuple(('templates', i, T_S_QUERY_ARGS_KEY))\n # to-dest transform function\n nori.setting_check_callable(('templates', i, T_TO_D_FUNC_KEY),\n may_be_none=True)\n # source-DB don't-replicate flag\n nori.setting_check_type(('templates', i, T_S_NO_REPL_KEY), bool)\n # source-DB change callbacks\n nori.setting_check_callbacks(('templates', i, T_S_CHANGE_CB_KEY))\n # dest-DB query function arguments\n nori.setting_check_arg_tuple(('templates', i, T_D_QUERY_ARGS_KEY))\n # to-source transform function\n nori.setting_check_callable(('templates', i, T_TO_S_FUNC_KEY),\n may_be_none=True)\n # dest-DB don't-replicate flag\n nori.setting_check_type(('templates', i, T_D_NO_REPL_KEY), bool)\n # dest-DB change callbacks\n nori.setting_check_callbacks(('templates', i, T_D_CHANGE_CB_KEY))\n # key mode\n nori.setting_check_list(('templates', i, T_KEY_MODE_KEY),\n ['all', 'include', 'exclude'])\n if template[T_KEY_MODE_KEY] != 'all':\n # key list\n nori.setting_check_not_empty(('templates', i, T_KEY_LIST_KEY))\n\n # templates: query-function arguments\n for (sd, t_key, validator_key) in [\n ('s', T_S_QUERY_ARGS_KEY, 'source_query_validator'),\n ('d', T_D_QUERY_ARGS_KEY, 'dest_query_validator')\n ]:\n # args tuple\n args_idx = ('templates', i, t_key)\n args_t = template[t_key]\n # key_cv, value_cv (somewhat)\n for cv_str in ['key_cv', 'value_cv']:\n cv_idx = args_idx + (1, cv_str)\n nori.setting_check_not_empty(\n cv_idx, types=nori.core.MAIN_SEQUENCE_TYPES\n )\n cv_seq = args_t[1][cv_str]\n for j, cv in enumerate(cv_seq):\n nori.setting_check_length(cv_idx + (j, ), 2, 3,\n types=tuple)\n # the rest of the arguments\n nori.core.cfg[validator_key](sd, args_idx, args_t, i)\n\n # reporting settings\n nori.setting_check_list('report_order', ['template', 'keys'])\n # the rest are handled by nori.validate_email_config()", "def refresh(self):\n hasChanged = self.hasChanged()\n if hasChanged: self.loadIni()\n if len(self.loadFiles) > 255:\n del self.loadFiles[255:]\n self.safeSave()\n return hasChanged", "def checkFiles(self):\n if self.user[\"Save\"] != \"\":\n self.of_exist = os.path.exists(os.path.join(self.user[\"Save\"], \"Of\"))\n self.back_of_exist = os.path.exists(\n os.path.join(self.user[\"Save\"], \"Back_Of\")\n )\n self.img_exist = os.path.exists(os.path.join(self.user[\"Save\"], \"Images\"))\n self.depth_exist = os.path.exists(os.path.join(self.user[\"Save\"], \"Depth\"))\n\n self.object_detection_dir_exist = os.path.exists(\n os.path.join(self.user[\"Save\"], \"ObjectDetection\")\n )\n\n self.gt_exist = self.user[\"GT\"] != \"\"\n\n self.create_super_pixel_label = (\n self.super_pixel_method != \"\"\n and not os.path.exists(\n os.path.join(self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method)\n )\n )\n\n self.ui.c_crash_plot_video.setEnabled(self.ui.c_crash_plot.isChecked())\n self.ui.t_low.setEnabled(not self.ui.c_optimize.isChecked())\n self.ui.t_high.setEnabled(not self.ui.c_optimize.isChecked())\n self.ui.c_optimize.setEnabled(self.gt_exist)\n self.ui.c_error_plot.setEnabled(self.gt_exist)\n self.ui.c_error_plot_video.setEnabled(self.ui.c_error_plot.isChecked())\n self.ui.c_speed_plot_video.setEnabled(self.ui.c_speed_plot.isChecked())\n self.ui.c_super_pixel_video.setEnabled(\n self.ui.combo_superpixel.currentIndex() != 0\n )\n self.ui.c_csv.setEnabled(self.ui.c_error_plot.isChecked())\n\n if self.runRequirements():\n self.ui.b_run.setEnabled(True)\n else:\n self.ui.b_run.setEnabled(False)", "def is_update_success():\n file_success = os.getcwd() + \"\\\\last_success.txt\"\n if os.path.exists(file_success):\n return True\n else:\n return False", "def _check_pofiles_content(self):\n\n # The list of invalid chars is specific to Catalan language\n invalid_chars = {u'á', u'ñ', u'ë', u'ù', u'â', u'ê', u'î', u'ô', u'û',\n u'ë', u'ÿ', u'ä', u'ö'}\n\n try:\n\n THRESHOLD_PERCENTAGE = 1\n findFiles = FindFiles()\n for filename in findFiles.find(self.temp_dir, \"*.po\"):\n poFile = pofile(filename)\n\n invalid = 0\n for entry in poFile:\n # Only localized segments. Skips developers names,\n # untranslated country names, etc\n if entry.msgid == entry.msgstr:\n continue\n\n for char in entry.msgstr.lower():\n if char in invalid_chars:\n invalid = invalid + 1\n\n if len(poFile) > 100 and invalid > 0:\n percentage = 100.0 * invalid / len(poFile)\n if percentage > THRESHOLD_PERCENTAGE:\n self.errors = self.errors + 1\n print \"Unsual number of invalid chars at {0} ({1}%)\".\\\n format(filename, str(percentage))\n\n except Exception as detail:\n print detail", "def _check_if_cff_file_needs_rewriting(self, content):\n logger.info(\"Checking if we can re-use injection config file...\")\n if os.path.isfile(self.config_file_name) is False:\n logger.info(\"...no config file {} found.\".format(self.config_file_name))\n return True\n else:\n logger.info(\n \"...OK: config file {} already exists.\".format(self.config_file_name)\n )\n\n with open(self.config_file_name, \"r\") as f:\n file_content = f.read()\n if file_content == content:\n logger.info(\n \"...OK: file contents match, no update of {} required.\".format(\n self.config_file_name\n )\n )\n return False\n else:\n logger.info(\n \"...file contents unmatched, updating {}.\".format(\n self.config_file_name\n )\n )\n return True", "def lint(self, code, view_has_changed):\n\n vls = self.settings\n ext_setting = vls.get('extension', [])\n if len(ext_setting) > 0:\n ext = os.path.splitext(self.filename)[1].lower()\n if ext not in ext_setting:\n return []\n\n return super(Verilator, self).lint(code, view_has_changed)", "def updateWidgets(self):\n super(AdminRulesForm, self).updateWidgets()\n available_templates = getUtility(IVocabularyFactory, name='collective.imagetags.templates')(self.context)\n skins = getToolByName(self.context, 'portal_skins')\n path = skins.getSkinPath(skins.getDefaultSkin())\n paths = [i.strip() for i in path.split(',')]\n include = False\n improved_templates = []\n for template in available_templates.by_value:\n # If template directory is available and (is before 'plone_content' or 'plone_content' isn't available)...\n include = (template in paths and 'plone_content' in paths and paths.index(template)<paths.index('plone_content')) or \\\n (template in paths and not 'plone_conent' in paths)\n \n # ... then check it\n if include:\n term = available_templates.getTerm(template)\n improved_templates.append(term.token)\n\n for template in self.widgets['improved_templates'].items:\n template['checked'] = template['value'] in improved_templates", "def check_entry(self, controller, entries, list_of_project_info, error_label):\r\n\r\n for x in range(0, len(entries)):\r\n if entries[x].get() == \"\":\r\n messagebox.showerror(\"Error\", \"Expected no empty fields\")\r\n return\r\n if not entries[2].get().isalpha():\r\n messagebox.showerror(\"Error\", \"Expected column in letter not number, e.g. 'B' \")\r\n return\r\n name_col = self.col_to_num(entries[2].get())\r\n self.write_to_indata(entries)\r\n\r\n list_error,error_present = [], []\r\n list_error = controller.start_config(entries, name_col, list_error, list_of_project_info)\r\n if len(list_error) == 0:\r\n message = \"Successfully generated all state files\"\r\n error_present.append(message)\r\n error_label.config(text=\"Successfully generated all state files\")\r\n else:\r\n for element in list_error:\r\n if element.error_type == \"1\": # error in loop_trough_row\r\n message = \"expected error in excel spreadsheet at row\" + str(element.file_name) + \"\\n\"\r\n elif element.error_type == \"2\": #filname missing\r\n message = \"expected error in file \" + str(element.file_name)+ \"\\n\"\r\n elif element.error_type == \"3\": # Filename error\r\n message = \"expected error in file name at row \" + str(element.file_name) + \"\\n\"\r\n elif element.error_type == \"4\": # \"Seems like error in 1:st or 3:rd line in excel sheet\"\r\n message = \"expected error in excel spreadsheet on 1:st or 3:rd row \" + \"\\n\"\r\n error_present.append(message)\r\n error_report = open(\"error_report.txt\", \"w+\")\r\n error_report.write(''.join(error_present))\r\n error_report.close()\r\n error_label.config(text=\"Error occured, check error report in \"+ entries[1].get())\r\n # error_label.config(text=(''.join(error_present)))\r", "def edit_config_verify(self,\n raw_response: Any,\n *args,\n **kwargs) -> bool:\n pass", "def on_save(self):\r\n #new_config = ConfigParser.RawConfigParser()\r\n cur_config = self.config.dict_config\r\n #\r\n # update the dict_config\r\n cur_config[\"access_restriction\"][\"ips\"] = self.text_ips.get(1.0, tk.END).strip()\r\n cur_config[\"access_restriction\"][\"ar_url\"] = self.entry_url.get().strip()\r\n #\r\n cur_config[\"email\"][\"relay_server_host\"] = self.entry_server_host.get().strip()\r\n cur_config[\"email\"][\"relay_server_port\"] = self.entry_server_port.get().strip()\r\n cur_config[\"email\"][\"email_from\"] = self.entry_from.get().strip()\r\n cur_config[\"email\"][\"recipients\"] = self.text_recipients.get(1.0, tk.END).strip()\r\n cur_config[\"email\"][\"ar_enabled_subject\"] = self.entry_enabled_subject.get().strip()\r\n cur_config[\"email\"][\"ar_enabled_body\"] = self.text_enabled_body.get(1.0, tk.END).strip()\r\n cur_config[\"email\"][\"ar_disabled_subject\"] = self.entry_disabled_subject.get()\r\n cur_config[\"email\"][\"ar_disabled_body\"] = self.text_disabled_body.get(1.0, tk.END).strip()\r\n\r\n #self.action.save_config()\r\n # # sync dict_config to the gui\r\n # for section in self.config.dict_config:\r\n # new_config.add_section(section)\r\n # for item in self.config.dict_config[section]:\r\n # new_config.set(section, item, self.config.dict_config[section][item])\r\n # #\r\n # # saving to a file\r\n # with open(self.config.file_path, 'w') as newconfigfile:\r\n # new_config.write(newconfigfile)\r\n #\r\n # # mbox.showinfo(\"Information\",\r\n # # \"Current configuration has been successfully saved to '%s'\" % os.path.basename(self.configfile))\r\n # self.console.debug(\"Configuration has been saved to '%s'\" % self.config.file_path)\r", "def checkFilename(self):\r\n \r\n #all this should be in the view\r\n\r\n print(\"working directory \", self.path) \r\n print(\"If you'd like to use another directory/folder, please include the full path with the filename.\")\r\n #should i let users change working directory or just put it in the file path\r\n print(\"checking filename \", self.filename)\r\n\r\n if not os.path.isfile(self.filename):\r\n print(\"this is not an existing file\")\r\n createYN = (input(\"create it? y/n \")).upper()\r\n if createYN=='Y':\r\n self.createFile()\r\n self.getHeaderDict()\r\n\r\n else: # create file = NO\r\n headerDict = {} #create an empty dictionary\r\n self.loadDictRow(keystring = '') #this will create keys but not values\r\n\r\n else:\r\n \"\"\"\r\n Check to see if the first row is headers, and second row is Test Router\r\n \"\"\"\r\n print(\"this is an existing file\")\r\n self.getHeaderDict()", "def getAdminData(self):\n if request.args.get('action') == 'upload':\n if request.files:\n ufile = request.files['uploadfile']\n fname = os.path.join(current_app.config.get('PATH_TMP'), ufile.filename)\n ufile.save(fname)\n\n scheduler.add_job(processFile, args=[current_app.config.get('PATH_TMP'), ufile.filename]) # schedule operation\n return \"\"\n\n elif request.args.get('action') == 'uploadchecker':\n if request.files:\n ufile = request.files['uploadfile']\n if not os.path.exists('%s/emonitor/modules/alarms/inc/%s' % (current_app.config.get('PROJECT_ROOT'), ufile.filename)):\n ufile.save('%s/emonitor/modules/alarms/inc/%s' % (current_app.config.get('PROJECT_ROOT'), ufile.filename))\n try:\n cls = imp.load_source('emonitor.modules.alarms.inc', 'emonitor/modules/alarms/inc/%s' % ufile.filename)\n if isinstance(getattr(cls, cls.__all__[0])(), AlarmFaxChecker):\n return \"ok\"\n except:\n pass\n os.remove('%s/emonitor/modules/alarms/inc/%s' % (current_app.config.get('PROJECT_ROOT'), ufile.filename))\n return babel.gettext(u'admin.alarms.checkernotvalid')\n return \"\"\n\n elif request.args.get('action') == 'getkeywords':\n for f in [f for f in os.listdir('%s/emonitor/modules/alarms/inc/' % current_app.config.get('PROJECT_ROOT')) if f.endswith('.py')]:\n if f == request.args.get('checker'):\n cls = imp.load_source('emonitor.modules.alarms.inc', 'emonitor/modules/alarms/inc/%s' % f)\n variables = getattr(cls, cls.__all__[0])().getDefaultConfig()[u'translations']\n return {u'keywords': \"\\n\".join(getattr(cls, cls.__all__[0])().getDefaultConfig()[u'keywords']), u'variables': variables}\n return \"\"\n\n elif request.args.get('action') == 'alarmsforstate':\n alarms = classes.get('alarm').getAlarms(state=int(request.args.get('state')))\n return render_template('admin.alarms_alarm.html', alarms=alarms)\n\n elif request.args.get('action') == 'alarmsarchive':\n for id in request.args.get('alarmids').split(','):\n classes.get('alarm').changeState(int(id), 3)\n return \"\"", "def check_file(self):\n\n # File manipulation status\n status = {}\n\n # check if the post request has the file part\n if 'datasource' not in self.request.files:\n status['error'] = 'No file part'\n return False, status\n\n file = request.files['datasource']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n #flash('No selected file')\n #return redirect(request.url)\n status['error'] = 'No selected file'\n return False, status\n\n # Get filename\n # Save to local hardrive\n filename = secure_filename(file.filename)\n # file.save(os.path.join(self.kwargs['UPLOAD_FOLDER'], filename))\n is_saved, error = self.save_file(self.kwargs['UPLOAD_FOLDER'], filename, file)\n\n if is_saved:\n # Return filename\n status['filename'] = filename\n return True, status\n else:\n\n # Return error if something wrong\n status['error'] = error\n return False, status", "def check_update(self):\r\n with open('new_config.json', 'rt') as jsonfile:\r\n configuration = jsonfile.read()\r\n configuration_data = json.loads(configuration)\r\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\r\n try:\r\n sock.settimeout(2)\r\n sock.bind(('', 8080))\r\n sock.listen(1)\r\n conn, addr = sock.accept()\r\n except socket.timeout:\r\n return False\r\n sock.settimeout(None)\r\n with conn:\r\n conn.send(configuration)\r\n data = conn.recv(1024)\r\n with open('new_config.json', 'wt') as jsonfile:\r\n json.dump(data, jsonfile)\r\n self.set_new_configuration()", "def update_db(request):\n\n reports = []\n pubmedfiles = []\n labels = []\n concepts = []\n usecase = request.session['usecase']\n type1 = request.POST.get('type', None)\n\n\n for filename, file in request.FILES.items():\n if filename.startswith('reports'):\n reports.append(file)\n if filename.startswith('pubmed'):\n pubmedfiles.append(file)\n elif filename.startswith('concepts'):\n concepts.append(file)\n elif filename.startswith('labels'):\n labels.append(file)\n\n jsonDisp = request.POST.get('json_disp', None)\n jsonAnn = request.POST.get('json_ann', None)\n jsonDispUp = request.POST.get('json_disp_update', '')\n jsonAnnUp = request.POST.get('json_ann_update', '')\n jsonAll = request.POST.get('json_all_update', '')\n load_concepts = request.POST.get('exa_concepts',None)\n load_labels = request.POST.get('exa_labels',None)\n batch = request.POST.get('batch',None)\n #print(batch)\n msg = update_db_util(reports,pubmedfiles,labels,concepts,jsonDisp,jsonAnn,jsonDispUp,jsonAnnUp,jsonAll,load_concepts,load_labels,batch)\n if 'message' in list(msg.keys()):\n keys = get_fields_from_json()\n request.session['fields'] = keys['fields']\n request.session['fields_to_ann'] = keys['fields_to_ann']\n if type1 == 'reports':\n get_fields_extractable('update')\n\n if request.session['mode'] == 'Robot':\n\n # fields = UseCase.objects.get(name=usecase)\n # fields_to_extr = fields.extract_fields\n # request.session['fields_to_ann'] = fields_to_extr\n\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json')) as out:\n data = json.load(out)\n request.session['fields_to_ann'] = data['extract_fields'][usecase]\n\n return JsonResponse(msg)", "def check(self):\n mtime = self.get_mtime()\n if mtime == self.mtime:\n return False\n if mtime is None:\n log.info(\"Option file disappeared\")\n elif self.mtime is None:\n log.info(\"Option file loaded for first time\")\n else:\n log.info(\"Option file was modified {age} ago, passed time={passed}\".format(\n age=timedelta(seconds=time() - mtime),\n passed=timedelta(seconds=mtime - self.mtime)\n ))\n self.mtime = mtime\n self.values = self.read_content()\n return True", "def dirty(self) -> bool:\n return len(self.detect_changed_files()) != 0", "def update(self):\n \n dbpath, config = self._start()\n \n self.config.obo = check_file(config.obo, dbpath, \"obo\") \n desc_file = check_file(config.model_descriptions, dbpath,\n \"model_descriptions\", allow_none=True) \n phen_file = check_file(config.model_phenotypes, dbpath,\n \"model_phenotypes\", allow_none=True)\n \n summary = self._update(desc_file, phen_file) \n if len(summary[\"incorrect_ids\"]) == 0 and not config.skip_compute:\n self._compute(models=summary[\"new_phenotypes\"])\n \n self._end()", "def db_update_files():\n _populate_table_files(File)\n _populate_table_files(Software)\n return redirect(url_for('view_index'))", "def isApplied(self):\n return self.file in settings['mosh.resourceReplacer.applied']", "def valid(self):\n return (self.get(\"~#mtime\", 0) and\n self[\"~#mtime\"] == util.mtime(self[\"~filename\"]))", "def perform_diff_config_result_page():\n #  Get all fields from form\n module = request.forms.getall('module')\n client = request.forms.getall('client')\n version1 = request.forms.getall('version1')\n version2 = request.forms.getall('version2')\n\n # Build html\n modif = do_ck5050_ini_diff_request(module, client, version1, version2)\n\n # Build template page\n with open(\"./header.html\") as header, open('./config.tpl') as config, open('./footer.html') as footer:\n template_html = header.read() + config.read() + footer.read()\n\n if not modif:\n modif = {}\n\n output = template(template_html, module=module, client=client, version1=version1,\n version2=version2, modif=modif)\n\n return output", "def parse_upload(self) -> bool:\n if 'SUCCESS' not in self.upload_output:\n # Fatal Error\n self.warnings = [self.upload_output]\n return False\n self.warnings = self.upload_output.strip().split(\"\\n\")[0:-1]\n self.tree_id = self.upload_output.strip().split(\"\\n\")[-1].split()[1]\n return True", "def parse_files_and_set_flags(self):\n change_requires_product_plus1 = False\n sensitive_file_touched = {}\n try:\n files_contents = self.github.get_files()\n LOG.info(\"**** Reading files ****\")\n for item in files_contents:\n file_path = item[\"filename\"]\n if any(x in str(file_path) for x in self.pr.config.sensitiveFiles):\n sensitive_file_touched[\"is_found\"] = True\n sensitive_file_touched[\"file_name\"] = str(file_path)\n if item[\"filename\"].find(self.pr.config.productPlusRequiredDirPattern) != -1:\n LOG.info(\"product change found marking ui_change to True\")\n change_requires_product_plus1 = True\n # break\n except PRFilesNotFoundException, e:\n LOG.exception(e)\n return sensitive_file_touched, change_requires_product_plus1" ]
[ "0.5990421", "0.5856733", "0.58000755", "0.5700677", "0.5700643", "0.5694012", "0.56602377", "0.5652492", "0.5576875", "0.5533964", "0.55261713", "0.5525095", "0.54940057", "0.5441861", "0.54376626", "0.54344094", "0.5424616", "0.5412756", "0.54043454", "0.53829175", "0.5370786", "0.53690016", "0.53526", "0.5352057", "0.5346276", "0.53272337", "0.53267664", "0.5325472", "0.5305637", "0.5298142" ]
0.6431848
0
This view returns the list of all the distinct keys present in the json reports. This view is called during configuration
def get_keys(request): keys=[] reports = Report.objects.all().exclude(institute = 'PUBMED') for report in reports: json_rep = report.report_json for el in json_rep.keys(): if el not in keys: keys.append(el) json_resp = {'keys':keys} return JsonResponse(json_resp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AllKeys(self) -> _n_0_t_1[str]:", "def distinct(self, key):\n return self.database.command({'distinct': self.name,\n 'key': key})['values']", "def keys(self) -> KeysView:\n return self._dict.keys()", "def GET(self, key):\n header('Content-Type', 'application/json')\n return dumps(list_values(key=key))", "def pubmed_reports(request):\n\n json_resp = {}\n json_resp['usecase'] = []\n reps = Report.objects.all()\n for r in reps:\n\n if r.id_report.startswith('PUBMED_') and not str(r.name_id) in json_resp['usecase']:\n json_resp['usecase'].append(str(r.name_id))\n return JsonResponse(json_resp)", "def keys(self):\n return", "def keys(self) -> List:\n pass", "def keys(self, *args, **kwargs):\n return self._list(*args, **kwargs)", "def list_key_values_command():\n # Get Args needed for the command\n incident = demisto.args().get('id', get_investigation_id())\n # Search Collection for matching incident_id\n return_json = [] # type: ignore\n context = []\n found = False\n cursor = COLLECTION.find({}, {'_id': False})\n if cursor is None:\n # Collection doesn't exist - thus no records\n return_json = None # type: ignore\n else:\n # Iterate, collecting any name/value pairs associated with the incident\n for i in cursor:\n if incident in i:\n found = True\n return_json.append({\n 'Key': i[incident]['key'],\n 'Value': i[incident]['value']\n })\n context.append({\n 'Incident': incident,\n 'Key': i[incident]['key'],\n 'Value': i[incident]['value']\n })\n\n if not found:\n # Means no records were found with that incident_id\n # Discard empty return_json\n return_json = None # type: ignore\n\n human_readable = tableToMarkdown(f'The key/value paires stored in incident {incident}', return_json)\n ec = {'MongoDB.Incident(val.Key === obj.Key)': context}\n # Return a useful status\n return human_readable, ec, {}", "def list_all_keys(self):\n \n return self.keys", "def keys(self) -> KeysView[str]:\n return self.raw.keys()", "async def keys(self) -> Iterable[str]:", "def get_keys_from_list():\n json_data = request.get_json()\n\n d = dict()\n d['elements'] = list()\n settings.setOptionsFile(get_info('uid'))\n fn = settings.getHistoROOTFileName()\n rfn = settings.getReferenceROOTFileName()\n# open root file stored in the root database\n f = ROOT.TFile(fn)\n# open reference root file stored in the root database\n rf = ROOT.TFile(rfn)\n\n for values in json_data.itervalues():\n for k in values:\n subd = dict()\n subd[\"index\"] = k[\"index\"]\n if fn != k[\"file\"]: \n fn = k[\"file\"]\n settings.setHistoROOTFileName(fn)\n f = ROOT.TFile(fn)\n print \"histogram :>>>>>: \",k[\"histogram\"]\n subd[\"data\"] = eval(cppyy.gbl.getDictionary(f,k[\"histogram\"]))\n if rfn != k[\"referenceFile\"]: \n rfn = k[\"referenceFile\"]\n settings.setReferenceROOTFileName(rfn)\n rf = ROOT.TFile(rfn)\n subd[\"refdata\"] = eval(cppyy.gbl.getDictionary(rf,k[\"reference\"]))\n d['elements'].append(subd)\n\n f.Close()\n rf.Close()\n\n return jsonify(d)", "async def get_keys(self):\n return self.dict.keys()", "def list_user_keys(self):\n return AlgoliaUtils_request(self.client.headers, self.read_hosts, \"GET\", \"/1/indexes/%s/keys\" % self.url_index_name, self.client.timeout)", "def keys(self) -> List[str]:\n raise NotImplementedError", "def medtag_reports(request):\n\n json_resp = {}\n json_resp['usecase'] = []\n reps = Report.objects.all()\n for r in reps:\n if not r.id_report.startswith('PUBMED_') and not str(r.name_id) in json_resp['usecase']:\n json_resp['usecase'].append(str(r.name_id))\n return JsonResponse(json_resp)", "def get_all_keys(self):\n return self.psettings.allKeys()", "def _get_keys(self, listOfKeys):\n return self._keys", "def get_keys(self) -> Dict[str, str]:\n return self.http.get(self.config.paths.keys)", "def get_keys(self):\r\n return self._keys", "def keys(self):\n return self.properties.keys()", "def list(self):\n return list(sorted(self.manager.data[\"dataset\"].keys()))", "def keys(self):\n return self.keys", "def keys(self):\n return _keys(self)", "def keys(self):\n return _keys(self)", "def keys(self):\n return [key for key, value in self.items()]", "def dashboard_view(self):\n return AttrDict({\n 'file_histogram': [h for h in self.file_histogram.values()],\n 'files': [f for f in self.files.values()],\n 'nodes': [\n {\n \"type\": \"Project\",\n \"count\": 1\n },\n {\n \"type\": \"Subject\",\n \"count\": len(self.subjects)\n },\n {\n \"type\": \"Samples\",\n # samples is a dict keyed by subject id, sum the len of each subject's sample list\n \"count\": sum([len(sl) for sl in list(self.samples.values())])\n },\n ],\n 'size': sum([f['size']for f in self.files.values()]),\n 'project_id': self.name,\n 'public': self.attributes['public'],\n 'createdDate': self.attributes.workspace.createdDate,\n 'lastModified': self.attributes.workspace.lastModified,\n 'data_type': self.data_type,\n 'data_category': self.data_category,\n 'problems': self.problems\n })", "def keys(self):\n list_all_dict = self.list_all()\n return list_all_dict[\"nodes\"] + list_all_dict[\"groups\"]", "def fetch_all_keys():\n response = TIME_TABLE.scan()\n items = response['Items']\n items.sort(key=lambda x: x['timeStamp'])\n response = ''\n for item in items:\n response = '{0}\\n{1}'.format(response, item)\n return response" ]
[ "0.6015251", "0.5963718", "0.5916041", "0.58546275", "0.5769133", "0.57459795", "0.5695538", "0.5680063", "0.5674234", "0.56573683", "0.56514174", "0.56393325", "0.5621896", "0.55753326", "0.55740577", "0.5571665", "0.55628973", "0.55588096", "0.55510443", "0.55476856", "0.55406773", "0.5535074", "0.5532528", "0.5518681", "0.5506313", "0.5506313", "0.5477848", "0.54746956", "0.5466701", "0.54588324" ]
0.79304016
0
This view returns ALL the ground truths to be downloaded. This view can be called only by the admin and the ground truths returned are those of ALL the users in the platform
def download_all_ground_truths(request): json_resp = {} json_resp['ground_truth'] = [] cursor = connection.cursor() mode = request.GET.get('gt_mode',None) if mode is None: human = NameSpace.objects.get(ns_id = 'Human') robot = NameSpace.objects.get(ns_id = 'Robot') gt_human = GroundTruthLogFile.objects.filter(ns_id = human) agent = User.objects.get(ns_id = robot,username = 'Robot_user') gt_robot = GroundTruthLogFile.objects.filter(ns_id = robot,username = agent) for el in gt_human: gt_json = el.gt_json if gt_json['gt_type'] == 'concept-mention': gt_json['gt_type'] = 'linking' json_resp['ground_truth'].append(gt_json) for el in gt_robot: gt_json = el.gt_json if gt_json['gt_type'] == 'concept-mention': gt_json['gt_type'] = 'linking' json_resp['ground_truth'].append(gt_json) cursor.execute("SELECT gt_json FROM ground_truth_log_file WHERE ns_id = %s AND username != %s",['Robot','Robot_user']) ans = cursor.fetchall() for el in ans: gt_json = json.loads(el[0]) if gt_json['gt_type'] == 'concept-mention': gt_json['gt_type'] = 'linking' json_resp['ground_truth'].append(gt_json) elif mode.lower() == 'automatic': cursor.execute( "SELECT gt_json FROM ground_truth_log_file WHERE ns_id = %s AND username != %s", ['Robot', 'Robot_user']) #CAMBIO # cursor.execute( # "SELECT g.gt_json FROM ground_truth_log_file AS g INNER JOIN ground_truth_log_file AS gg ON g.id_report = gg.id_report AND g.language = gg.language AND g.gt_type = gg.gt_type AND g.id_report = gg.id_report AND g.ns_id = gg.ns_id WHERE g.ns_id = %s AND g.username != %s AND gg.username = %s AND g.insertion_time != gg.insertion_time", # ['Robot', 'Robot_user', 'Robot_user']) ans = cursor.fetchall() for el in ans: gt_json = json.loads(el[0]) if gt_json['gt_type'] == 'concept-mention': gt_json['gt_type'] = 'linking' json_resp['ground_truth'].append(gt_json) return JsonResponse(json_resp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_ground_truths(request):\n\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n path1 = os.path.join(workpath, './static/temp/temp.csv')\n path2 = os.path.join(workpath, './static/BioC/temp_files/to_download.csv')\n if os.path.exists(path1):\n os.remove(path1)\n if os.path.exists(path2):\n os.remove(path2)\n username = request.session['username']\n inst = request.GET.get('institute',None)\n if inst == '':\n inst = None\n else:\n inst = str(inst)\n use = request.GET.get('usec',None)\n if use == '':\n use = None\n else:\n use = str(use)\n report_type = request.GET.get('report_type',None)\n if report_type == '':\n report_type = None\n annotation_mode = request.GET.get('mode',None)\n if annotation_mode == '':\n annotation_mode = None\n lang = request.GET.get('lang',None)\n if lang == '':\n lang = None\n else:\n lang = str(lang)\n batch = request.GET.get('batch','') # added 22/10/2021\n if batch == '' or batch == 'all':\n batch = None\n else:\n batch = int(batch)\n\n all = request.GET.get('all_gt',None)\n action = request.GET.get('action',None)\n format = request.GET.get('format',None)\n json_resp = {}\n json_resp['ground_truth'] = []\n if format == 'json' or all =='all' :\n json_resp = create_json_to_download(report_type,action,username,use,annotation_mode,inst,lang,all,batch)\n return JsonResponse(json_resp)\n\n elif format == 'csv':\n response = HttpResponse(content_type='text/csv')\n resp = create_csv_to_download(report_type,annotation_mode,username,use,inst,lang,action,response,batch)\n return resp\n\n elif format == 'biocxml':\n json_keys_to_display = request.session['fields']\n json_keys_to_ann = request.session['fields_to_ann']\n if report_type == 'pubmed':\n json_keys_to_display = ['year','authors','volume','journal']\n json_keys_to_ann = ['title','abstract']\n json_keys = json_keys_to_display + json_keys_to_ann\n resp = generate_bioc(json_keys,json_keys_to_ann,username,action,lang,use,inst,'xml',annotation_mode,report_type,batch)\n return HttpResponse(resp,content_type='application/xml')\n\n elif format == 'biocjson':\n json_keys_to_display = request.session['fields']\n json_keys_to_ann = request.session['fields_to_ann']\n json_keys = json_keys_to_display + json_keys_to_ann\n resp = generate_bioc(json_keys,json_keys_to_ann,username,action,lang,use,inst,'json',annotation_mode,report_type,batch)\n return HttpResponse(resp,content_type='application/xml')", "def get_user_ground_truth(request):\n\n user = request.GET.get('user',None)\n action = request.GET.get('action',None)\n mode = request.GET.get('mode',None)\n report = request.GET.get('report',None)\n language = request.GET.get('language',request.session['language'])\n mode_obj = NameSpace.objects.get(ns_id=mode)\n report = Report.objects.get(id_report = report, language = language)\n gt = get_user_gt(user,mode_obj,report,language,action)\n return JsonResponse(gt)", "def show_all_training():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n \n training = Training.query.all()\n\n \n return render_template(\"training_display.html\", training = training)", "def get_gt_list(request):\n\n groundTruths = 0\n json_resp = {}\n username =request.GET.get('username',None)\n ins = request.GET.get('inst',None)\n lang = request.GET.get('lang',None)\n use = request.GET.get('use',None)\n action = request.GET.get('action',None)\n token = request.GET.get('token',None)\n reptype = request.GET.get('reptype',None)\n languages = ['English','english']\n annotation_mode = request.GET.get('annotation_mode',None)\n if ins == '':\n ins = None\n if use == '':\n use = None\n if lang == '':\n lang = None\n if reptype == '':\n reptype = 'reports'\n if token == 'all':\n ns_robot = NameSpace.objects.get(ns_id='Robot')\n ns_human = NameSpace.objects.get(ns_id='Human')\n rob_user = User.objects.get(username='Robot_user',ns_id=ns_robot)\n list_gt = GroundTruthLogFile.objects.filter(username = rob_user).count() + GroundTruthLogFile.objects.filter(ns_id=ns_human).count()\n groundTruths = list_gt\n gt_rob = GroundTruthLogFile.objects.filter(ns_id=ns_robot,username = rob_user)\n\n i = 0\n # print(groundTruths)\n for el in gt_rob:\n gts = GroundTruthLogFile.objects.filter(ns_id=ns_robot,gt_type = el.gt_type,id_report = el.id_report_id,language = el.language).exclude(insertion_time = el.insertion_time)\n gts_count = gts.count()\n # print('count: '+str(i)+' '+str(gts.count()))\n i = i+1\n groundTruths = groundTruths + gts_count\n\n\n else:\n with connection.cursor() as cursor:\n if reptype == 'reports':\n if annotation_mode == 'Human':\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language WHERE r.institute = COALESCE(%s,r.institute) AND r.name = %s AND r.language = COALESCE(%s,r.language) AND g.gt_type = %s AND g.ns_id = %s and r.institute != %s AND username = COALESCE(%s,g.username)\",\n [ins, use, lang, action, 'Human','PUBMED',username])\n groundTruths = cursor.fetchone()[0]\n elif annotation_mode == 'Robot':\n # CAMBIO\n # cursor.execute(\n # \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language INNER JOIN ground_truth_log_file AS gg ON g.id_report = gg.id_report AND g.language = gg.language AND g.ns_id = gg.ns_id AND g.gt_type = gg.gt_type WHERE r.institute = COALESCE(%s,r.institute) AND r.name = %s AND r.language = COALESCE(%s,r.language) AND g.gt_type = %s AND g.ns_id = %s AND g.username != %s AND gg.username = %s AND g.insertion_time != gg.insertion_time AND r.institute != %s\",\n # [ins, use, lang, action, 'Robot', 'Robot_user', 'Robot_user','PUBMED'])\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language WHERE r.institute = COALESCE(%s,r.institute) AND r.name = %s AND r.language = COALESCE(%s,r.language) AND g.gt_type = %s AND g.ns_id = %s AND g.username != %s AND r.institute != %s AND username = COALESCE(%s,g.username)\",\n [ins, use, lang, action, 'Robot', 'Robot_user','PUBMED',username])\n groundTruths = cursor.fetchone()[0]\n else:\n if annotation_mode == 'Human':\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language WHERE r.name = %s AND r.language in %s AND g.gt_type = %s AND g.ns_id = %s and r.institute = %s AND username = COALESCE(%s,g.username)\",\n [use, tuple(languages), action, 'Human','PUBMED',username])\n groundTruths = cursor.fetchone()[0]\n elif annotation_mode == 'Robot':\n #CAMBIO\n # cursor.execute(\n # \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language INNER JOIN ground_truth_log_file AS gg ON g.id_report = gg.id_report AND g.language = gg.language AND g.ns_id = gg.ns_id AND g.gt_type = gg.gt_type WHERE r.name = %s AND r.language in %s AND g.gt_type = %s AND g.ns_id = %s AND g.username != %s AND gg.username = %s AND g.insertion_time != gg.insertion_time AND r.institute = %s\",\n # [use, tuple(languages), action, 'Robot', 'Robot_user', 'Robot_user','PUBMED'])\n # groundTruths = cursor.fetchone()[0]\n\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language WHERE r.name = %s AND r.language in %s AND g.gt_type = %s AND g.ns_id = %s AND g.username != %s AND r.institute = %s AND username = COALESCE(%s,g.username)\",\n [use, tuple(languages), action, 'Robot', 'Robot_user','PUBMED',username])\n groundTruths = cursor.fetchone()[0]\n\n\n\n\n json_resp['ground_truths'] = groundTruths\n # print(json_resp)\n return JsonResponse(json_resp)", "def all(self) -> list[dict[str, Any]]:\n return self.client.get(self._url())", "def download_all_data(self) -> None:\n print(\"Download in progress.\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_CHARACTERS\"], \"nextcloud\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_FRAGMENTS\"], \"nextcloud\")\n self.download_data(os.environ[\"HABBAKUK_URL\"], \"generic_url\")\n print(\"Download complete!\")", "def get_all(self):\n return self.__fetcher.get_fetched()", "def get(self):\r\n return get_all()", "def get_all(self):\n result_get = GetRest(function = self.function).performRequest()\n return result_get", "def all():\n Refresh.committees()\n Refresh.legislators()\n Refresh.bills()", "def toall_get(self, request):\n _view = _object_view(self, request)\n queried = ToAllChannelPostings(request.params.mixed()).query()\n objs = [request.view(obj) for obj in queried[0]]\n _view.update({\n \"postings\": objs,\n \"result_complete\": queried[1]\n })\n return _view", "def download_all(): #@save\n for name in DATA_HUB:\n download(name)", "def get_covers_ng(request):\n response_data = {}\n\n validation = init_validation(request)\n\n if 'error' in validation:\n return JsonResponse(validation['data'], status=validation['error'])\n\n headers = {'Content-Type': 'application/json'}\n response = requests.get(validation['mongo_url'] + \"/getLatestNg?limit=%s\" % validation['limit'], auth=HTTPBasicAuth(MONGO_API_USER, MONGO_API_PWD), verify=MONGO_SERVER_CERTIFICATE, headers=headers)\n\n status_code = response.status_code\n response_body = response.text\n\n if str(status_code) == \"200\":\n return json.loads(response_body)\n\n response_body = {\"result\": \"failure\", \"message\": response.text, \"status_code\": status_code}\n return json.loads(response_body)", "def download_all_reports(request):\n\n request_body_json = json.loads(request.body)\n report_list = request_body_json['report_list']\n mode = request_body_json['format']\n action = request_body_json['action']\n annot = request_body_json['annotation_mode']\n\n if annot == 'Manual':\n annot = 'Human'\n elif annot == 'Automatic':\n annot = 'Robot'\n\n try:\n response = HttpResponse(content_type='text/csv')\n resp = download_report_gt(report_list, action, annot, mode, response)\n if mode == 'biocxml' or mode == 'biocjson':\n return HttpResponse(resp, content_type='application/xml')\n elif mode == 'csv':\n return resp\n elif mode == 'json':\n return JsonResponse(resp)\n\n except Exception as e:\n print(e)\n json_error = {'error': e}\n return JsonResponse(json_error)", "def download_all(generate=False, reset=False, max_workers=5):\n\n logger.info('starting download for all properties')\n\n for row in db.con['gsc_properties'].all():\n download(row['account_name'], row['gsc_property'], reset=reset)\n\n logger.info('finished download for all properties')", "def download(all):\n print(\"Downloading\")", "def list(self):\n response = self.client.get_json(URL_MAPPING)\n response.success = response.status_code == 200\n return response", "def all(self):\n return self.client.request_with_method(Methods.LIST % self.name)['items']", "def get_datasets(request):\n from seed.models import obj_to_dict\n org = Organization.objects.get(pk=request.GET.get('organization_id'))\n datasets = []\n for d in ImportRecord.objects.filter(super_organization=org):\n importfiles = [obj_to_dict(f) for f in d.files]\n dataset = obj_to_dict(d)\n dataset['importfiles'] = importfiles\n if d.last_modified_by:\n dataset['last_modified_by'] = d.last_modified_by.email\n dataset['number_of_buildings'] = BuildingSnapshot.objects.filter(\n import_file__in=d.files,\n canonicalbuilding__active=True,\n ).count()\n dataset['updated_at'] = convert_to_js_timestamp(d.updated_at)\n datasets.append(dataset)\n\n return {\n 'status': 'success',\n 'datasets': datasets,\n }", "def download_all_maps(self):\n return self._download_all_maps_recur()", "def get(self, request):\n odlcs = []\n for user in User.objects.all():\n # Get odlcs which have thumbnail.\n odlcs.extend([\n t for t in Odlc.objects.filter(user=user).all() if t.thumbnail\n ])\n # Sort odlcs by last edit time, convert to json.\n odlcs = [\n t.json(is_superuser=request.user.is_superuser)\n for t in sorted(odlcs, key=lambda t: t.last_modified_time)\n ]\n return JsonResponse(odlcs, safe=False)", "def openGroundTruth(self):\n gt_dir = self.openFile(\n self.user[\"GT\"],\n title=\"Load Ground Truth Data\",\n file_filter=\"Numpy Files (*.npy)\",\n )\n if gt_dir != \"\":\n self.user[\"GT\"] = gt_dir\n self.ui.l_ground_truth.setText(\"Load: \" + self.splitPath(gt_dir)[-1])\n self.checkFiles()", "def get_all(self, criteria, target_id=None):\n query = self.gen_query(criteria, target_id)\n results = query.all()\n return self.derive_url_dicts(results)", "def ui_backgrounds(request):\n out = createBaseResponseObject() \n path = os.path.join(settings.BASE_PATH , \"sketch_ui/static/ui/backgrounds\")\n files = os.listdir(path)\n for f in files:\n out['results'].append(f)\n \n return HttpResponse(json.dumps(out))", "def downloads(request):\n proteins = Protein.objects.all()\n\n # render page\n return render(request, 'downloads.html', {'proteins': proteins})", "def getAll(self):\n result_get = GetRest(function = self.function).performRequest()\n return result_get", "async def getAll():\n return [cluster.export() for cluster in clusters.get_all()]", "def download_all(self):\r\n # Fetch website list\r\n self.fetch_website_list()\r\n\r\n for website in self.website_list:\r\n self.download(website['id'])", "def explore_all_nf_data():\n request = app.current_request\n resource_type = request.query_params[\"resource_type\"]\n offset = int(request.query_params[\"offset\"])\n limit = int(request.query_params[\"limit\"])\n explorer = UnogsExplorer(resource_type)\n success = explorer.explore(limit, offset)\n return {\"success\": success}", "def test_get_all_user(self):\n response = self.client().get(AuthTestCase.admin)\n # assert the response code\n self.assertEqual(response.status_code, 200)" ]
[ "0.6390326", "0.6114309", "0.5874675", "0.5586476", "0.54724544", "0.54573137", "0.538209", "0.5373068", "0.5310844", "0.5264944", "0.5234574", "0.5225719", "0.52199984", "0.52116776", "0.52091205", "0.5126838", "0.5124764", "0.5122737", "0.5103302", "0.51019853", "0.50845915", "0.5083029", "0.50818187", "0.50699353", "0.5065931", "0.5064804", "0.50489", "0.50370157", "0.503081", "0.50294816" ]
0.71700454
0
This view returns the key files of BioC mentions and linking.
def download_key_files(request): workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in path = os.path.join(workpath, './static/BioC/linking.key') path1 = os.path.join(workpath, './static/BioC/mention.key') ment = request.GET.get('type_key',None) if ment == 'mentions': path = open(path1, 'r') return HttpResponse(path, content_type='text/plain') elif ment == 'linking': path1 = open(path, 'r') return HttpResponse(path1, content_type='text/plain')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def take_auth_data():\n home = str(Path.home())\n path_to_keys = '/Documents/twitter/keys/'\n\n files = [f for f in listdir(home+path_to_keys) if '.DS' not in f]\n\n tokens = []\n for f in files:\n with open(home+path_to_keys+f, 'r') as lines:\n ln = lines.readline().replace(\" \", \"\")\n tokens.append(ln)\n\n auth_data = dict(zip(files, tokens))\n return auth_data", "def get_metadata_keys (args):\n keyfile = args.get(\"keyfile\")\n if (keyfile):\n with open(keyfile, \"r\") as mdkeys_file:\n return mdkeys_file.read().splitlines()\n else:\n return None", "def keys(self):\n return ['title', 'keywords', 'description', 'url', 'content_file',\n 'language', 'phone', 'email']", "def cli(ctx):\n return ctx.gi.cannedkeys.get_keys()", "def ListFiles(bucketname, client, key):\n response = client.list_objects(Bucket=bucketname, Prefix=key)\n for content in response.get('Contents', []):\n yield content.get('Key')", "def _keys(self):\n for name in listdir(abspath(self._path)):\n key, ext = splitext(name)\n if ext == \".pkl\":\n yield key", "async def keys(self) -> Iterable[str]:", "def get_dna_bank_files(request):\n\n user = User.objects.get(username=DEFAULT_USERNAME)\n directory = Directory.objects.get(user=user)\n\n dna_files = []\n\n dna_files_query_set = DNAFile.objects.filter(\n directory=directory, is_available=True)\n\n for dna_file in dna_files_query_set:\n dna_files.append(dna_file.get_file_details())\n\n response = {\"dna_files\": dna_files}\n\n return Response(response, status=HTTP_200_OK)", "def get_users_dna_file_details(request):\n\n user = User.objects.get(username=request.user)\n directory = Directory.objects.get(user=user)\n\n dna_files = []\n\n dna_files_query_set = DNAFile.objects.filter(\n directory=directory, is_available=True)\n\n for dna_file in dna_files_query_set:\n dna_files.append(dna_file.get_file_details())\n\n response = {\"dna_files\": dna_files}\n\n return Response(response, status=HTTP_200_OK)", "def keys(brain):\n obj = brain.getObject()\n return obj.get_full_title()", "def get_files(self):\n\n cur = self.app.conn.cursor()\n sql = \"select distinct case_text.fid, source.name from case_text join source on case_text.fid=source.id where \"\n sql += \"caseid=? order by lower(source.name) asc\"\n cur.execute(sql, [self.case['caseid'], ])\n self.casefiles = cur.fetchall()\n sql = \"select id, name, fulltext, mediapath, memo, owner, date, av_text_id from source order by source.name asc\"\n cur.execute(sql)\n self.allfiles = cur.fetchall()\n msg = _(\"Files linked: \") + str(len(self.casefiles)) + \" / \" + str(len(self.allfiles))\n self.ui.label_files_linked.setText(msg)", "def content_list(self):\n return self.face.FACES.files.find({})", "def show_k():\n\n page = request.args.get('page', 1, type=int)\n knowledges_ids = Knowledge.query.order_by(Knowledge.id.asc()).paginate(\n page, current_app.config['PAGE_ITEMS'], False)\n\n k = \"myK000\"\n\n knowledges_list = [(f'{k}{i.id}' if (i.id < 10) else f'{\"myK00\"}{i.id}'\n if(i.id < 100) else f'{\"myK0\"}{i.id}', i.description) for i in knowledges_ids.items]\n\n verK = True\n fileDir = os.path.dirname(os.path.realpath('__file__'))\n\n # me tengo que meter a la ruta base/cyber_role y ejecutar este endpoint\n file_json = 'cyber_role/KSAT_JSON/Knowledges.json'\n\n if not isfile(join(fileDir, file_json)):\n file_json = 'KSAT_JSON/Knowledges.json'\n\n with open(file_json) as file:\n # Obtenemos el json del fichero\n data = json.load(file)\n\n equivalencia_nist = {}\n # ya tenemos el diccionario del nist, original\n values = list(data.values())\n keys = list(data.keys())\n\n for i in knowledges_ids.items:\n if i.description in values:\n equivalencia_nist[i.id] = keys[values.index(i.description)]\n\n\n return render_template('general/ksat.html', title='Knowledges',\n lista_K=knowledges_ids, l_K=knowledges_list,\n l_eq=list(equivalencia_nist.values()), verK=verK)", "def keynames(self):\r\n \r\n infile=open(self._datafile, 'r')\r\n if self._resultfile: self._resultfile.write(\"Keys in datafile: \"+self._datafile+'\\n')\r\n else: print (\"Keys in datafile: \"+self._datafile+'\\n')\r\n for tmpc in infile:\r\n for i in range(0, len(tmpc)):\r\n if tmpc[i:i+1]=='#': break\r\n elif tmpc[i:i+1]==' ':\r\n if self._resultfile: self._resultfile.write(tmpc[0:i]+'\\n')\r\n else: print tmpc[0:i]\r\n break\r\n if self._resultfile: self._resultfile.write(tmpc[0:i]+'\\n')\r\n else: print tmpc[0:i]+'\\n'", "async def list_keys(request: web.Request) -> web.Response:\n keys = [\n {'uri': '/wifi/keys/{}'.format(key.directory),\n 'id': key.directory,\n 'name': os.path.basename(key.file)} for key in wifi.list_keys()\n ]\n return web.json_response({'keys': keys}, status=200)", "def get_ancillary_files(docmeta: DocMetadata) -> List[Dict]:\n return current_session().get_ancillary_files(docmeta)", "def baca_kunci_rsa():\n filename = ambil_file(['key'])\n if filename.endswith('.key'):\n with open(filename,\"rb\") as f:\n kunci = f.readlines()\n return kunci\n else:\n return False", "def keys():", "def getKeys(name = None):\n if name == None:\n name = session.get('name')\n\n keys = hl.getUser(\"Name\",name)[\"Keys\"]\n hl.keyDistributeFlag(name)\n #If on a production server, use actual path\n if os.path.isdir(keys_dir):\n filename = keys_dir + keys + '.ovpn' \n\n #if not os.path.exists(filename):\n # hl.zipUserKeys(keys) \n\n return send_file(filename, as_attachment=True)\n #Else use relative dev path\n else:\n return send_file('static\\\\Test_client1.zip', as_attachment=True)", "def mentee_list_view(request):\n # TODO: this view\n pass", "def fetch_file(index_file, filename):\n with open(index_file, 'r') as index, open(filename, 'w+') as download:\n print 'Fetching keys from ', KEYSERVER, ' to create ', filename\n fetched_file = ''\n index_length = len(index.readlines())\n index.seek(0) # because python is stupid\n counter = 0\n for key in index.readlines():\n print 'Fetching key ', counter, ' of ', index_length\n counter = counter + 1\n fetched_file = fetched_file + parse_key(key.rstrip('\\n'))\n print 'All keys have been downloaded'\n download.write(base64.b64decode(fetched_file))\n print 'File has been decoded and saved as ', filename", "def bmark_list(request):\r\n # Removed because view was deprecated\r\n return bmarks.recent(request)", "def mentor_list_view(request):\n # TODO: this view\n pass", "def readable(request):\r\n rdict = request.matchdict\r\n bid = rdict.get('hash_id', None)\r\n username = rdict.get('username', None)\r\n if username:\r\n username = username.lower()\r\n\r\n if bid:\r\n found = BmarkMgr.get_by_hash(bid, username=username)\r\n if found:\r\n return {\r\n 'bmark': found,\r\n 'username': username,\r\n }\r\n else:\r\n return HTTPNotFound()", "def show_kml_list():\n out = []\n\n for filename in os.listdir(settings.KML_OUTPUT_DIR):\n path = os.path.join(settings.KML_OUTPUT_DIR, filename)\n if os.path.isdir(path):\n continue\n f = open(path)\n content = f.read(300)\n f.close()\n name = KML_NAME_RE.search(content)\n if not name:\n continue\n out.append((name.group(1), filename))\n\n return {'items': sorted(out, cmp=lambda a, b: dumb_czech_cmp(a, b)), 'MEDIA_URL': settings.MEDIA_URL}", "def get_keys_from_csv_update(request):\n\n reports = []\n json_resp = {}\n for filename, file in request.FILES.items():\n if filename.startswith('reports'):\n reports.append(file)\n elif filename.startswith('pubmed'):\n reports.append(file)\n\n keys,uses = get_keys_csv_update(reports)\n json_resp['keys'] = keys\n json_resp['uses'] = list(uses)\n # print('CHIAVI',keys)\n return JsonResponse(json_resp)", "def get(self):\n client = ManagePsb(credentials, databaseName)\n projection = {\n 'imageId': 0,\n \"_id\": 0\n }\n cursor = client.Filter(collection, Projection=projection)\n info = list(cursor)\n newInfo = ManageKeys(info)\n return newInfo.LikeJson()", "def getmentioningobjs(idfindex, idfobject):\n idf, edges = eppystuff.an_idfedges(idfindex)\n mentioningobjs = idf_helpers.getanymentions(idf, idfobject)\n keys = [mentioningobj.key for mentioningobj in mentioningobjs] \n objnames = [mentioningobj.obj[1] for mentioningobj in mentioningobjs] \n idfkeys = idf_helpers.idfobjectkeys(idf)\n keysobjsindexes = [(idfkeys.index(mentioningobj.key.upper()), \n idf.idfobjects[mentioningobj.key.upper()].index(mentioningobj))\n for mentioningobj in mentioningobjs] \n urls = [\"../../%s/%s\" % (idfkey, objkey) \n for idfkey, objkey in keysobjsindexes]\n urllinks = ['<a href=%s>%s</a>' % (url, name) \n for url, name in zip(urls, objnames)]\n lines = [\"%s->%s\" % (mentioningobj.key, urllink) \n for mentioningobj, urllink in zip(mentioningobjs, urllinks)]\n return ', '.join(lines)", "def keys(self) -> KeysView[str]:\n return self.raw.keys()", "def get_list_of_files_contoller(message):\n chat_id = message.chat.id\n user_id = message.from_user.id\n if db.files.count_documents({\"user_id\": user_id}) > 0:\n list_ = db.files.find({\"user_id\": user_id})\n markup = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)\n for file in list_:\n markup.add(\n telebot.types.KeyboardButton(\n text=f'/get_file {file[\"id\"]} {file[\"type\"]}'\n )\n )\n text = \"Please choose file:\"\n bot.reply_to(message, text, reply_markup=markup)\n else:\n text = \"Files not found\"\n bot.reply_to(message, text)" ]
[ "0.5546954", "0.5437735", "0.5268663", "0.5253988", "0.52061164", "0.52018476", "0.5182864", "0.51804817", "0.5095209", "0.5094157", "0.50905", "0.5058644", "0.5009201", "0.50003994", "0.49914286", "0.4987616", "0.49603057", "0.49587998", "0.4955711", "0.49460372", "0.49432445", "0.48929042", "0.4891522", "0.4888082", "0.4880371", "0.48646966", "0.48572385", "0.48317215", "0.4823677", "0.4809947" ]
0.7325465
0
This view returns the last ground truth created by the user for the session's parameters
def get_last_gt(request): username = request.session['username'] mode1 = request.session['mode'] mode = NameSpace.objects.get(ns_id=mode1) language = request.session['language'] usecase = request.session['usecase'] institute = request.session['institute'] batch = request.session['batch'] jsonDict = {} token = request.GET.get('configure',None) if token is None: gt_json = get_last_groundtruth(username,None,None,None,mode,batch) else: gt_json = get_last_groundtruth(username,usecase,language,institute,mode,batch) if gt_json is None: jsonDict['groundtruth'] = '' jsonDict['report'] = '' jsonDict['report_id'] = '' else: jsonDict['groundtruth'] = gt_json id_report = gt_json['id_report'] language = gt_json['language'] report = Report.objects.get(id_report=id_report, language=language) jsonDict['report'] = report.report_json jsonDict['report_id'] = id_report return JsonResponse(jsonDict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user_ground_truth(request):\n\n user = request.GET.get('user',None)\n action = request.GET.get('action',None)\n mode = request.GET.get('mode',None)\n report = request.GET.get('report',None)\n language = request.GET.get('language',request.session['language'])\n mode_obj = NameSpace.objects.get(ns_id=mode)\n report = Report.objects.get(id_report = report, language = language)\n gt = get_user_gt(user,mode_obj,report,language,action)\n return JsonResponse(gt)", "def home_view(request):\n # Scoreboard with cookies :p\n session = request.session\n if not 'win' in session:\n session['win'] = 0\n if not 'lose' in session:\n session['lose'] = 0\n if not 'tie' in session:\n session['tie'] = 0\n\n return {'project': 'RPSLS'}", "def homeView(request):\n \n # RE-Initialization of variable of session (use during all the session):\n request.session['idSmeller'] = None\n request.session['currentIdGuess'] = None\n request.session['guessStep'] = None\n request.session['SampleIdToAnalyze'] = None\n request.session['guessStep'] = None #To adapt the demo mode for game part\n \n paramToGenerateTemplate = dict()\n if 'demoMode' not in request.session.keys():\n request.session['demoMode'] = True #init in demo mode\n \n paramToGenerateTemplate['demoMode'] = request.session['demoMode']\n \n # Render: \n return render(request, 'SmellGuessTemplate/home.html', paramToGenerateTemplate)#{'nb': nb, 'intensity': intensity, 'color': color, 'note': note, 'image': image, 'opacity': opacity, 'name': name}) ", "def download_all_ground_truths(request):\n\n json_resp = {}\n json_resp['ground_truth'] = []\n cursor = connection.cursor()\n mode = request.GET.get('gt_mode',None)\n if mode is None:\n human = NameSpace.objects.get(ns_id = 'Human')\n robot = NameSpace.objects.get(ns_id = 'Robot')\n gt_human = GroundTruthLogFile.objects.filter(ns_id = human)\n agent = User.objects.get(ns_id = robot,username = 'Robot_user')\n gt_robot = GroundTruthLogFile.objects.filter(ns_id = robot,username = agent)\n for el in gt_human:\n gt_json = el.gt_json\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n for el in gt_robot:\n gt_json = el.gt_json\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n cursor.execute(\"SELECT gt_json FROM ground_truth_log_file WHERE ns_id = %s AND username != %s\",['Robot','Robot_user'])\n ans = cursor.fetchall()\n for el in ans:\n gt_json = json.loads(el[0])\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n\n elif mode.lower() == 'automatic':\n cursor.execute(\n \"SELECT gt_json FROM ground_truth_log_file WHERE ns_id = %s AND username != %s\",\n ['Robot', 'Robot_user'])\n\n #CAMBIO\n # cursor.execute(\n # \"SELECT g.gt_json FROM ground_truth_log_file AS g INNER JOIN ground_truth_log_file AS gg ON g.id_report = gg.id_report AND g.language = gg.language AND g.gt_type = gg.gt_type AND g.id_report = gg.id_report AND g.ns_id = gg.ns_id WHERE g.ns_id = %s AND g.username != %s AND gg.username = %s AND g.insertion_time != gg.insertion_time\",\n # ['Robot', 'Robot_user', 'Robot_user'])\n ans = cursor.fetchall()\n for el in ans:\n gt_json = json.loads(el[0])\n\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n\n return JsonResponse(json_resp)", "def gameView(request):\n \n paramToGenerateTemplate = dict()\n \n if request.method == 'POST':\n \n ### FIRST CONNECTION: ###\n # If Smeller is not registrated = first visit of user on game page\n if request.session['idSmeller'] == None : \n \n formSmeller = SmellerModelForm(request.POST) # then data is collected.\n if formSmeller.is_valid(): # If data are valid (correct type, size, etc.)\n \n #Save smeller data:\n smeller = formSmeller.save() # Save in DB\n request.session['idSmeller'] = smeller.id\n \n #Generate game samples to analyze:\n l_allSampleId = getAllId( Sample.objects.all() )\n nbSamplesToAnalyze = 6\n request.session['SampleIdToAnalyze'] = random.sample(l_allSampleId, nbSamplesToAnalyze) #init\n \n \n #Preparation of the first analyze:\n firstToAnalyze = request.session['SampleIdToAnalyze'][0] \n firstSample = Sample.objects.get(id=firstToAnalyze)\n firstGuess = Guess(smeller=smeller,sample=firstSample)#Initialize a guess entry for this first sample\n firstGuess.save()\n request.session['guessStep'] = 1\n request.session['currentIdGuess'] = firstGuess.id\n \n #Parameters to generate the first page of game:\n paramToGenerateTemplate['guessStep'] = request.session['guessStep']\n paramToGenerateTemplate['nameSample'] = firstSample.name\n paramToGenerateTemplate['currentSamples'] = getAllName(request.session['SampleIdToAnalyze'], Sample.objects)\n \n else:\n error = 'Invalid format of registration...'\n \n else :\n #### OTHER CONNECTIONS: ###\n #Take the current data:\n currentGuess = Guess.objects.get(id=request.session['currentIdGuess'])\n \n #Save and generation of the different page of the game:\n if 'firstStep' in request.POST.keys() :\n request.session['guessStep'] = 2\n\n if 'intensity' in request.POST.keys() :\n currentGuess.intensity = request.POST['intensity']\n request.session['guessStep'] = 3\n \n elif 'humor' in request.POST.keys() :\n currentGuess.humor = Humor.objects.get(id=request.POST['humor'])\n request.session['guessStep'] = 4\n \n elif 'note' in request.POST.keys() : \n currentGuess.note = Note.objects.get(id=request.POST['note'])\n request.session['guessStep'] = 5\n \n elif 'image' in request.POST.keys() : \n currentGuess.image = Image.objects.language().get(id=request.POST['image'])\n request.session['guessStep'] = 6\n \n elif 'feeling' in request.POST.keys() :\n currentGuess.feeling = request.POST['feeling']\n request.session['guessStep'] = 7\n \n elif 'name' in request.POST.keys() : \n currentGuess.name = request.POST['name']\n request.session['guessStep'] = 8\n \n currentGuess.save()\n \n else: # if it's not post, it's not safe\n error = 'You try to connect to this game with the wrong way, please, go back to home...'\n \n \n currentGuess = Guess.objects.get(id=request.session['currentIdGuess'])\n \n # Parameters to generate template\n paramToGenerateTemplate['currentSamples'] = getAllName(request.session['SampleIdToAnalyze'], Sample.objects)\n paramToGenerateTemplate['nameSample'] = Sample.objects.get(id=currentGuess.sample_id).name\n \n paramToGenerateTemplate['intensity'] = currentGuess.intensity\n paramToGenerateTemplate['humor'] = currentGuess.humor\n paramToGenerateTemplate['note'] = currentGuess.note\n paramToGenerateTemplate['image'] = currentGuess.image\n paramToGenerateTemplate['feeling'] = currentGuess.feeling\n \n paramToGenerateTemplate['listHumors'] = Humor.objects.all()\n paramToGenerateTemplate['listNotes'] = Note.objects.all()\n paramToGenerateTemplate['listImages'] = Image.objects.all()\n\n #Preparing next step:\n paramToGenerateTemplate['guessStep'] = request.session['guessStep']\n \n return render(request, 'SmellGuessTemplate/game.html', paramToGenerateTemplate)", "def gameView(request, game_id): # checked for rwc23\n game = get_object_or_404(Game, id = game_id)\n\n if game.finished:\n picks = Prediction.objects.all().filter(game = game).order_by('-points')\n else:\n picks = Prediction.objects.all().filter(game = game).order_by('playerRound__player__username')\n logging.debug(f\"Number of picks {len(picks)}\")\n context = {'picks': picks, 'game': game}\n return render(request, 'rwc23/gameView.html', context)", "def resultView(request):\n \n if request.method == 'POST': # If it's a POST request\n guess = Guess.objects.get(id=request.session['currentIdGuess'])\n guess.name = request.POST['name']\n guess.save()\n \n request.session['guessStep'] = 1 \n \n #In waiting to display images of other results:\n idOfAnalyzedSample = guess.sample_id\n idemGuess = Guess.objects.filter(sample_id=idOfAnalyzedSample).exclude(smeller_id=guess.smeller_id) #Give a list of all guess with same current sample\n intensities = list()\n humors = list()\n notes = list()\n images = list()\n feelings = list()\n names = list()\n for g in idemGuess:\n intensities.append(g.intensity)\n humors.append(g.humor_id)\n notes.append(g.note_id)\n images.append(g.image_id)\n feelings.append(g.feeling)\n names.append(g.name)\n \n else: # if it's not post, it's not safe\n error = 'You try to connect to this game with the wrong way, please, go back to home...'\n \n \n guess = Guess.objects.get(id=request.session['currentIdGuess'])\n smeller = Smeller.objects.get(id=request.session['idSmeller'])\n \n \n paramToGenerateTemplate = dict()\n \n \n #######################################################################\"\n #all param to generate:\n paramToGenerateTemplate['intensity'] = guess.intensity\n paramToGenerateTemplate['intensityDisplay'] = 30*guess.intensity/100+40\n paramToGenerateTemplate['humorColor'] = guess.humor.color\n paramToGenerateTemplate['humourColorName'] = (Humor.objects.get(id=guess.humor_id)).name\n paramToGenerateTemplate['noteColor'] = guess.note.color\n paramToGenerateTemplate['noteColorName'] = (Note.objects.get(id=guess.note_id)).name\n paramToGenerateTemplate['pathImage'] = guess.image.pathImage\n paramToGenerateTemplate['imageName'] = (Image.objects.get(id=guess.image_id)).name\n paramToGenerateTemplate['opacityLevelPercent'] = str(guess.feeling*50/100) \n paramToGenerateTemplate['opacityLevel'] = str(guess.feeling*0.5/100)\n paramToGenerateTemplate['feelingLevel'] = guess.feeling\n paramToGenerateTemplate['name'] = guess.name\n \n \n if len(intensities) == 0 :\n\t paramToGenerateTemplate['intensityMean'] = None\n\t paramToGenerateTemplate['intensityMeanDisplay'] = None\n else :\n\t intensityMean = mean(intensities)\n\t paramToGenerateTemplate['intensityMean'] = intensityMean\n\t paramToGenerateTemplate['intensityMeanDisplay'] = 30*intensityMean/100+40\n\t \n if len(feelings) == 0 :\n\t paramToGenerateTemplate['feelingLevelMean'] = None\n\t paramToGenerateTemplate['opacityMeanLevelPercent'] = None\n\t paramToGenerateTemplate['opacityMeanLevel'] = None\n else :\n\t feelingLevelMean = mean(feelings)\n\t paramToGenerateTemplate['feelingLevelMean'] = feelingLevelMean\n\t paramToGenerateTemplate['opacityLevelMeanPercent'] = str(feelingLevelMean*50/100) \n\t paramToGenerateTemplate['opacityLevelMean'] = str(feelingLevelMean*0.5/100)\n \n maxHumorsId = maxi(humors)\n if isinstance(maxHumorsId, int):#existe et donc non None\n maxHumors = Humor.objects.get(id=maxHumorsId)\n paramToGenerateTemplate['humorColorMean'] = maxHumors.color\n paramToGenerateTemplate['humourColorMeanName'] = (Humor.objects.get(id=maxHumorsId)).name\n else:\n paramToGenerateTemplate['humorColorMean'] = None\n paramToGenerateTemplate['humourColorMeanName'] = _(u\"Non disponible\")\n \n \n maxNotesId = maxi(notes)\n if isinstance(maxNotesId, int):#existe et donc non None\n maxNotes = Note.objects.get(id=maxNotesId)\n paramToGenerateTemplate['noteColorMean'] = maxNotes.color\n paramToGenerateTemplate['noteColorMeanName'] = (Note.objects.get(id=maxNotesId)).name\n else:\n paramToGenerateTemplate['noteColorMean'] = None\n paramToGenerateTemplate['noteColorMeanName'] = _(u\"Non disponible\")\n \n maxImagesId = maxi(images)\n if isinstance(maxImagesId, int):#existe et donc non None \n maxImages = Image.objects.get(id=maxImagesId)\n paramToGenerateTemplate['pathImageMean'] = maxImages.pathImage\n paramToGenerateTemplate['imageMeanName'] = (Image.objects.get(id=maxImagesId)).name\n else:\n paramToGenerateTemplate['pathImageMean'] = None\n paramToGenerateTemplate['imageMeanName'] = _(u\"Non disponible\")\n \n paramToGenerateTemplate['nameSample'] = Sample.objects.get(id=guess.sample_id).name\n \n \n #######################################################################\"\n \n \n paramToGenerateTemplate['guess'] = guess\n paramToGenerateTemplate['listGuess'] = Guess.objects.filter(smeller=smeller);\n \n \n request.session['SampleIdToAnalyze'].remove(guess.sample_id) #Warning: remove method don't return the list minus the element...\n \n paramToGenerateTemplate['remainSamplesToAnalyze'] = []\n for idSample in request.session['SampleIdToAnalyze'] :\n\t paramToGenerateTemplate['remainSamplesToAnalyze'].append(Sample.objects.get(id=idSample).name)\n \n \n paramToGenerateTemplate['nbRemainSamplesToAnalyze'] = len(request.session['SampleIdToAnalyze'])\n if paramToGenerateTemplate['nbRemainSamplesToAnalyze'] > 0: #if it remains sample(s) to analyze: \n\n sample = Sample.objects.get(id=request.session['SampleIdToAnalyze'][0])\n paramToGenerateTemplate['nameNextSample'] = sample.name\n guess = Guess(smeller=smeller,sample=sample)\n guess.save()\n request.session['currentIdGuess'] = guess.id\n \n #else : DB_to_csv()\n \n return render(request, 'SmellGuessTemplate/result.html', paramToGenerateTemplate)", "def currentWall(request):\n if request.method == 'GET':\n current = WallConfiguration.objects.latest('timeChanged')\n serializer = WallConfigurationSerializer(current)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = WallConfigurationSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def get_session_params(request):\n\n json_resp = {}\n usecase = request.session.get('usecase',None)\n language = request.session.get('language',None)\n institute = request.session.get('institute',None)\n annotation = request.session.get('mode',None)\n team_member = request.session.get('team_member',None)\n report_type = request.session.get('report_type',None)\n batch = request.session.get('batch',None)\n\n if batch is not None and report_type is not None and usecase is not None and language is not None and institute is not None and annotation is not None:\n json_resp['usecase'] = usecase\n json_resp['language'] = language\n json_resp['institute'] = institute\n json_resp['team_member'] = team_member\n json_resp['report_type'] = report_type\n json_resp['batch'] = batch\n if annotation == 'Human':\n json_resp['annotation'] = 'Manual'\n elif annotation == 'Robot':\n json_resp['annotation'] = 'Automatic'\n else:\n json_resp['usecase'] = ''\n json_resp['language'] = ''\n json_resp['institute'] = ''\n json_resp['batch'] = ''\n if User.objects.filter(profile='Admin').exists():\n admin = User.objects.filter(profile='Admin')\n admin = admin.first()\n admin_name = admin.username\n json_resp['team_member'] = admin_name\n else:\n json_resp['team_member'] = 'Test'\n json_resp['annotation'] = ''\n json_resp['report_type'] = ''\n return JsonResponse(json_resp)", "def current_session_view(request):\n if request.method == 'POST':\n form = CurrentSessionForm(request.POST)\n if form.is_valid():\n session = form.cleaned_data['current_session']\n term = form.cleaned_data['current_term']\n AcademicSession.objects.filter(name=session).update(current=True)\n AcademicSession.objects.exclude(name=session).update(current=False)\n AcademicTerm.objects.filter(name=term).update(current=True)\n AcademicTerm.objects.exclude(name=term).update(current=False)\n\n else:\n form = CurrentSessionForm(initial={\n \"current_session\": AcademicSession.objects.get(current=True),\n \"current_term\": AcademicTerm.objects.get(current=True)\n })\n\n\n return render(request, 'corecode/current_session.html', {\"form\":form})", "def home(request: Request):\n latest_question_list = request.dbsession.query(Question).order_by(Question.published_at.desc()).all()\n return locals()", "def results():\n \n to_predict_list = request.form.to_dict() \n to_predict_list = list(to_predict_list.values()) \n to_predict_list = list(map(float, to_predict_list)) \n result = ValuePredictor(to_predict_list) \n if int(result)== 1: \n prediction ='Run Martha, or you\\'re gonna get the sugar.'\n else: \n prediction ='Go ahead and have another donut Martha, you\\'re all good.' \n return render_template(\"results.html\",\n year=datetime.now().year,\n prediction = prediction\n )", "def results():\n if session.has_key('id') and session.has_key('score'):\n id = session['id']\n score = session['score']\n else:\n return redirect(url_for('start'))\n\n #destroy session\n session.pop('id')\n session.pop('score')\n session.pop('test')\n\n return render_template('results.html', id=id, score=score)", "def home(request):\n committees = Committee.objects.all()\n delegations = Delegation.objects.all()\n latest_delegate = Delegate.objects.last()\n\n context = {\"committees\": committees, \"delegations\": delegations, \"latest_delegate\": latest_delegate}\n template = \"jurycore/home.html\"\n return render(request, template, context)", "def algebra742live(lti=lti):\n user = db.session.query(User).filter_by(lti_user_id=lti.name).first()\n if user:\n return render_template(ROOMS[0].template)\n else:\n form = UserInfoForm()\n return render_template('GetUserInfo.html', lti=lti, form=form)", "def MOT_task(request):\n # When it's called for the first, pass this default dict:\n # When seq manager would be init, make id_session automatic to +1\n # Search user, find highest id_session --> +1\n parameters = {'n_targets': 3, 'n_distractors': 3, 'angle_max': 9, 'angle_min': 3,\n 'radius': 90, 'speed_min': 4, 'speed_max': 4, 'episode_number': 0,\n 'nb_target_retrieved': 0, 'nb_distract_retrieved': 0, 'id_session': 0,\n 'presentation_time': 1000, 'fixation_time': 1000, 'tracking_time': 12000,\n 'debug': 0, 'secondary_task': 'detection', 'SRI_max': 2000, 'RSI': 1000,\n 'diagcm': 40, 'delta_orientation': 45}\n # As we don't have any seq manager, let's initialize to same parameters:\n with open('interface_app/static/JSON/parameters.json', 'w') as json_file:\n json.dump(parameters, json_file)\n\n with open('interface_app/static/JSON/parameters.json') as json_file:\n parameters = mark_safe(json.load(json_file))\n return render(request, 'app_MOT.html', locals())", "def prepare_game(restart):\n if 'user_id' in session.keys():\n\n user = ViewModel.get_user_data(session['user_id'])\n game = ViewModel.get_game(user.id)\n\n if not game or restart:\n movie = session['starting_movie']\n current = ViewModel.add_choice(movie['title'], movie['id'], \"movie\")\n chain = []\n else:\n chain = ViewModel.get_chain(game)\n current = ViewModel.get_current(game)\n\n return (user, game, current, chain)\n\n else:\n return redirect(url_for('start'))", "def search_testing():\n\n if 'user_id' in session:\n user_id = session['user_id']\n else:\n \n user_id = None\n \n return render_template('testing.html')", "def get_gt_list(request):\n\n groundTruths = 0\n json_resp = {}\n username =request.GET.get('username',None)\n ins = request.GET.get('inst',None)\n lang = request.GET.get('lang',None)\n use = request.GET.get('use',None)\n action = request.GET.get('action',None)\n token = request.GET.get('token',None)\n reptype = request.GET.get('reptype',None)\n languages = ['English','english']\n annotation_mode = request.GET.get('annotation_mode',None)\n if ins == '':\n ins = None\n if use == '':\n use = None\n if lang == '':\n lang = None\n if reptype == '':\n reptype = 'reports'\n if token == 'all':\n ns_robot = NameSpace.objects.get(ns_id='Robot')\n ns_human = NameSpace.objects.get(ns_id='Human')\n rob_user = User.objects.get(username='Robot_user',ns_id=ns_robot)\n list_gt = GroundTruthLogFile.objects.filter(username = rob_user).count() + GroundTruthLogFile.objects.filter(ns_id=ns_human).count()\n groundTruths = list_gt\n gt_rob = GroundTruthLogFile.objects.filter(ns_id=ns_robot,username = rob_user)\n\n i = 0\n # print(groundTruths)\n for el in gt_rob:\n gts = GroundTruthLogFile.objects.filter(ns_id=ns_robot,gt_type = el.gt_type,id_report = el.id_report_id,language = el.language).exclude(insertion_time = el.insertion_time)\n gts_count = gts.count()\n # print('count: '+str(i)+' '+str(gts.count()))\n i = i+1\n groundTruths = groundTruths + gts_count\n\n\n else:\n with connection.cursor() as cursor:\n if reptype == 'reports':\n if annotation_mode == 'Human':\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language WHERE r.institute = COALESCE(%s,r.institute) AND r.name = %s AND r.language = COALESCE(%s,r.language) AND g.gt_type = %s AND g.ns_id = %s and r.institute != %s AND username = COALESCE(%s,g.username)\",\n [ins, use, lang, action, 'Human','PUBMED',username])\n groundTruths = cursor.fetchone()[0]\n elif annotation_mode == 'Robot':\n # CAMBIO\n # cursor.execute(\n # \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language INNER JOIN ground_truth_log_file AS gg ON g.id_report = gg.id_report AND g.language = gg.language AND g.ns_id = gg.ns_id AND g.gt_type = gg.gt_type WHERE r.institute = COALESCE(%s,r.institute) AND r.name = %s AND r.language = COALESCE(%s,r.language) AND g.gt_type = %s AND g.ns_id = %s AND g.username != %s AND gg.username = %s AND g.insertion_time != gg.insertion_time AND r.institute != %s\",\n # [ins, use, lang, action, 'Robot', 'Robot_user', 'Robot_user','PUBMED'])\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language WHERE r.institute = COALESCE(%s,r.institute) AND r.name = %s AND r.language = COALESCE(%s,r.language) AND g.gt_type = %s AND g.ns_id = %s AND g.username != %s AND r.institute != %s AND username = COALESCE(%s,g.username)\",\n [ins, use, lang, action, 'Robot', 'Robot_user','PUBMED',username])\n groundTruths = cursor.fetchone()[0]\n else:\n if annotation_mode == 'Human':\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language WHERE r.name = %s AND r.language in %s AND g.gt_type = %s AND g.ns_id = %s and r.institute = %s AND username = COALESCE(%s,g.username)\",\n [use, tuple(languages), action, 'Human','PUBMED',username])\n groundTruths = cursor.fetchone()[0]\n elif annotation_mode == 'Robot':\n #CAMBIO\n # cursor.execute(\n # \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language INNER JOIN ground_truth_log_file AS gg ON g.id_report = gg.id_report AND g.language = gg.language AND g.ns_id = gg.ns_id AND g.gt_type = gg.gt_type WHERE r.name = %s AND r.language in %s AND g.gt_type = %s AND g.ns_id = %s AND g.username != %s AND gg.username = %s AND g.insertion_time != gg.insertion_time AND r.institute = %s\",\n # [use, tuple(languages), action, 'Robot', 'Robot_user', 'Robot_user','PUBMED'])\n # groundTruths = cursor.fetchone()[0]\n\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language WHERE r.name = %s AND r.language in %s AND g.gt_type = %s AND g.ns_id = %s AND g.username != %s AND r.institute = %s AND username = COALESCE(%s,g.username)\",\n [use, tuple(languages), action, 'Robot', 'Robot_user','PUBMED',username])\n groundTruths = cursor.fetchone()[0]\n\n\n\n\n json_resp['ground_truths'] = groundTruths\n # print(json_resp)\n return JsonResponse(json_resp)", "def createWorkout():\n session['num_poses'] = int(request.args.get('num_poses'))\n session['difficulty'] = [request.args.get('difficulty')] # for now just putting in 1 difficulty\n # want difficulty to include beginner & intermediate if intermediate is specified, but how do i skew towards intermediate?\n\n # what if emphasis is not filled in or empty?\n emphasis = request.args.get('emphasis')\n if emphasis == \"\":\n session['emphasis'] = []\n else:\n session['emphasis'] = [int(emphasis)] # session['emphasis'] is a list of integers\n\n session['timingOption'] = request.args.get('timingOption')\n\n workout_list = generateWorkout(session['num_poses'], difficulty=session['difficulty'], categories=session['emphasis'])\n # generateWorkout returns None if it can't find any poses that match the criteria\n\n workout_jsonlist = []\n\n # unpack the workout list to display on the page\n if workout_list:\n for i, pose in enumerate(workout_list):\n workout_jsonlist.append({'pose_id' : pose.pose_id, 'imgurl': pose.img_url, 'name': pose.name, 'is_leftright': pose.is_leftright})\n session['error'] = \"\"\n else:\n session['error'] = \"No Poses Matched. Try creating another workout\"\n \n session['workout'] = workout_jsonlist\n\n # do I want to create a workout automatically? and then save workout will just be saving\n # it to be associated with a certain user?\n\n return redirect('/workout') # go to the workout route to display the workout", "def __str__(self):\n return self._last_opponent", "def history():\n\n if request.method == 'POST':\n user_input_uuid = request.form['uuid']\n\n dm = DatabaseManager()\n genes, diseases, uuid, query, genpanel, date =\\\n dm.retreieve_zoekopdracht(user_input_uuid)\n\n make_session(\"uuid\", uuid, 2)\n\n return redirect(url_for('vis_results'))\n\n hislis = []\n\n if session.get('history'):\n hislis = reversed(session['history'])\n\n return render_template(\"history.html\", hislis=hislis)", "def get_goal(self):\n return self.get_observation(self.env._get_goal())", "def index():\r\n logger.debug(\"\")\r\n\r\n session_info.init_session( session )\r\n form = CompareInputForm()\r\n\r\n if request.method == \"POST\" :\r\n if form.errors :\r\n logger.debug(form.errors)\r\n \r\n # Populate model with form data\r\n model = session_info.get_user_model(session)\r\n model.load_model( form )\r\n\r\n # Execute parameter comparison\r\n model.compare_parameters()\r\n\r\n return redirect( url_for('summary_page' ) )\r\n \r\n return render_template( 'index.html' , form=form )", "def get_GroundTruth(self):\n\n # set first pose to identity\n # first_pose = self.dataset.oxts[0].T_w_imu\n # first_pose_inv = src.se3.inversePose(first_pose)\n # do not correct the orientation\n # first_pose_inv[:3, :3] = np.eye(3)\n\n # do not set first pose to identity\n first_pose_inv = np.eye(4)\n\n for o in self.dataset.oxts:\n\n normalized_pose_original = first_pose_inv @ o.T_w_imu\n self.poses_gt.append(normalized_pose_original)\n\n # gt pose is from I to G\n for i, pose in enumerate(self.poses_gt):\n\n # get gt position\n gt_position = np.reshape(pose[0:3, 3], (-1, 1))\n\n self.gt_position.append(gt_position)\n\n # get gt orientation\n R_wIMU = pose[0:3, 0:3]\n self.gt_orientation.append(R_wIMU)", "def joldStartSess_LL(request):\n participant = JOLD_participant.objects.get(user=request.user.id)\n participant.nb_sess_started += 1;\n participant.save()\n xparams = { # make sure to keep difficulty constant for the same participant!\n 'wind' : participant.wind,\n 'plat' : participant.plat,\n 'dist' : participant.dist,\n 'time' : 10,\n }\n # Initialize game same parameters:\n with open('interface_app/static/JSON/LL_params.json', 'w') as json_file:\n json.dump(xparams, json_file)\n\n with open('interface_app/static/JSON/LL_params.json') as json_file:\n xparams = mark_safe(json.load(json_file))\n return render(request, 'app_LL.html', locals())", "def load_last_vars(self):\n sys_config = self.parent().sys_config # FIXME !!! \n\n folder = Path(sys_config['paths']['animals_folder']) / sys_config['current']['animal']\n SessionsDf = utils.get_sessions(folder)\n\n try:\n previous_sessions = SessionsDf.groupby('task').get_group(sys_config['current']['task'])\n except KeyError:\n utils.printer(\"trying to use last vars, but animal has not been run on this task before.\",'error')\n return None\n\n # to allow for this functionalty while task is running\n if self.parent().parent().is_running:\n ix = -2\n else:\n ix = -1\n\n try:\n prev_session_path = Path(previous_sessions.iloc[ix]['path'])\n prev_vars_path = prev_session_path / sys_config['current']['task'] / \"Arduino\" / \"src\" / \"interface_variables.h\"\n if prev_vars_path.exists():\n prev_vars = utils.parse_arduino_vars(prev_vars_path)\n return prev_vars\n else:\n utils.printer(\"didn't find variables from last session\", \"error\")\n return None\n\n except IndexError:\n # thrown when there is no previous session\n return None", "def _get_current_session(self) -> Dict[str, Any]:\n return self._data[-1]", "def get_last_playthrough_information(last_playthrough_model):\n return user_domain.ExpUserLastPlaythrough(\n last_playthrough_model.user_id,\n last_playthrough_model.exploration_id,\n last_playthrough_model.last_played_exp_version,\n last_playthrough_model.last_updated,\n last_playthrough_model.last_played_state_name)", "def get_parameters():\n \n tf.set_random_seed(1) #so that your \"random\" numbers match ours\n sess = tf.Session() \n #First let's load meta graph and restore weights\n saver = tf.train.import_meta_graph(ckpt_dir + '/trained_model.meta')\n saver.restore(sess, tf.train.latest_checkpoint(ckpt_dir))\n\n #print all tensor name\n #print([n.name for n in graph.as_graph_def().node]) \n #extract parameters from saved session\n W1 = sess.run(\"W1:0\")\n b1 = sess.run(\"b1:0\")\n W2 = sess.run(\"W2:0\")\n b2 = sess.run(\"b2:0\")\n W3 = sess.run(\"W3:0\")\n b3 = sess.run(\"b3:0\") \n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2,\n \"W3\": W3,\n \"b3\": b3}\n \n return parameters, sess" ]
[ "0.6362369", "0.5618397", "0.55958295", "0.5394522", "0.53271693", "0.532361", "0.524949", "0.52235365", "0.5198521", "0.51720786", "0.5165802", "0.51398844", "0.5132665", "0.5132122", "0.51075196", "0.5077153", "0.503414", "0.5029794", "0.5022567", "0.50175035", "0.49976113", "0.49940047", "0.4988563", "0.49768612", "0.49636108", "0.49597797", "0.4944666", "0.49280834", "0.4926588", "0.4919551" ]
0.65990674
0
This view returns for each key of the json report required its text, the indexes of the start and stop chars in the json report string and the number of words that compose the fields to annotate.
def report_start_end(request): report = request.GET.get('report_id') lang = request.GET.get('language',None) usecase = request.session['usecase'] data = get_fields_from_json() json_keys_to_display = data['fields'] json_keys_to_display.extend(['journal','authors','year','volume']) json_keys_to_ann = data['fields_to_ann'] json_keys = (data['all_fields']) language = request.GET.get('language',request.session['language']) request_auto = request.GET.get('ns_id',None) if request.session['mode'] == 'Robot' or (request_auto is not None and request_auto == 'Robot' and request.session['institute'] != 'PUBMED'): # In this case we require automatic annotation: the keys to annotate change workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in with open(os.path.join(workpath,'./automatic_annotation/auto_fields/auto_fields.json')) as out: data = json.load(out) json_keys = data['total_fields'][usecase] json_keys_to_ann = data['extract_fields'][usecase] for el in json_keys_to_ann: if el in json_keys_to_display: json_keys_to_display.remove(el) json_keys.extend(['journal', 'authors', 'year', 'volume', 'abstract', 'title']) json_keys_to_ann.extend(['abstract', 'title']) if lang is not None: language = lang json_dict = report_get_start_end(json_keys,json_keys_to_ann,report,language) # print(json_dict) return JsonResponse(json_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def annotation_all_stats(request):\n\n id_report = request.GET.get('report',None)\n language = request.GET.get('language',None)\n\n json_dict = get_annotations_count(id_report,language)\n\n # print('annotations',json_dict)\n return JsonResponse(json_dict)", "def keyword_analysis(request): \n temp = json.loads(request.body)\n try:\n reviews = MovieReviews.objects.filter(id=temp['review_id'])\n except MovieReviews.DoesNotExist:\n return HttpResponse('empty')\n serializer = MovieReviewsSerializer(reviews , many=True)\n desc = serializer.data[0]['description']\n ret = keyword_extract(desc)\n return Response({'text':serializer.data[0], \"analysis\":ret})", "def text():\n return {\n \"@context\": \"http://www.w3.org/ns/anno.jsonld\",\n \"type\": \"Annotation\",\n \"body\": {\n \"creator\": \"user\",\n \"type\": \"TextualBody\",\n \"value\": \"string\"\n },\n \"generator\": {\n \"homepage\": \"http://mnemosyne.ml\",\n \"id\": \"string\",\n \"name\": \"Mnemosyne\",\n \"type\": \"Mnemosyne\"\n },\n \"target\": {\n \"id\": \"string\",\n \"type\": \"TextQuoteSelector\",\n \"exact\": \"string\",\n \"format\": \"string\",\n \"source\": \"string\",\n \"prefix\": 0,\n \"suffix\": 0,\n \"refinedBy\": {\n \"type\": \"TextPositionSelector\",\n \"start\": \"/div[2]\",\n \"end\": \"/div[2]\"\n },\n },\n }", "def get_data(articles): # Here, the articles will be very long strings\r\n vdictionary = {} # dictionary for tokens that are found in dictionary\r\n _odata = [0] * 12 # list collecting everything except date; last number of words=index:0\r\n word_length = 0 # initializing the value of word length; will be updated via loop\r\n tokens = re.findall('\\w+', articles) # Note that \\w+ splits hyphenated words\r\n for token in tokens: # Goes through generated tokens from articles\r\n if (not token.isdigit()) and (len(token) > 1) and (token in lm_dictionary.keys()): # conditions for checking if token is in dictionary\r\n _odata[1] += 1 # updating word count \r\n word_length += len(token) # updating word length\r\n if token not in vdictionary: # initial statement regarding steps for handling tokens not in the dictionary\r\n vdictionary[token] = 1 # count of tokens in text that show up in dictionary\r\n \r\n####### Keeping Track of Categorical Token Counts (Nonzero entry=True) also checks if word is stop word\r\n if lm_dictionary[token].positive and not lm_dictionary[token].stopword: _odata[2] += 1\r\n if lm_dictionary[token].negative and not lm_dictionary[token].stopword: _odata[3] += 1\r\n if lm_dictionary[token].uncertainty and not lm_dictionary[token].stopword: _odata[4] += 1\r\n if lm_dictionary[token].litigious and not lm_dictionary[token].stopword: _odata[5] += 1\r\n if lm_dictionary[token].weak_modal and not lm_dictionary[token].stopword: _odata[6] += 1\r\n if lm_dictionary[token].moderate_modal and not lm_dictionary[token].stopword: _odata[7] += 1\r\n if lm_dictionary[token].strong_modal and not lm_dictionary[token].stopword: _odata[8] += 1\r\n if lm_dictionary[token].constraining and not lm_dictionary[token].stopword: _odata[9] += 1\r\n #total_syllables += lm_dictionary[token].syllables # interesting parameter to measure\r\n\r\n #_odata[12] = len(re.findall('[0-9]', doc))\r\n # drop punctuation within numbers for number count\r\n articles = re.sub('(?!=[0-9])(\\.|,)(?=[0-9])', '', articles)\r\n articles = articles.translate(str.maketrans(string.punctuation, \" \" * len(string.punctuation)))\r\n #_odata[13] = len(re.findall(r'\\b[-+\\(]?[$€£]?[-+(]?\\d+\\)?\\b', doc))\r\n # _odata[14] = total_syllables / _odata[2]\r\n #print(_odata[1])\r\n _odata[10] = word_length / _odata[1] # computing average word length\r\n _odata[11] = len(vdictionary) # total vocab count\r\n \r\n # Convert counts to %\r\n for i in range(2, 9 + 1): # specifying range of percentages\r\n try:\r\n _odata[i] = (_odata[i] / _odata[1]) * 100 # updating count to percent\r\n except:\r\n print(\"zero denominator\")\r\n # Vocabulary\r\n \r\n return _odata # returning the data\r", "def wps(model, data):\n with msg.loading(f\"Loading model '{model}'...\"):\n nlp = spacy.load(model)\n texts = (eg[\"text\"] for eg in srsly.read_jsonl(data))\n n_docs = 0\n n_words = 0\n start_time = timer()\n for doc in nlp.pipe(texts):\n n_docs += 1\n n_words += len(doc)\n end_time = timer()\n wps = int(n_words / (end_time - start_time))\n result = [\n (\"Docs\", f\"{n_docs:,}\"),\n (\"Words\", f\"{n_words:,}\"),\n (\"Words/s\", f\"{wps:,}\"),\n ]\n msg.table(result, widths=(7, 12), aligns=(\"l\", \"r\"))", "def get_frequency():\n # Check if value is in json\n if request.is_json:\n content = request.get_json()\n if \"text\" in content:\n string = content[\"text\"]\n if \"sort\" in content:\n to_sort = content[\"sort\"]\n else:\n to_sort = 0\n else:\n return jsonify(message=\"Please supply a json with text as key and text to be analyzed as value\"),400\n client_ip = request.environ.get(\"HTTP_X_FORWARDED_FOR\")\n loadbalancer_ip = request.environ.get(\"REMOTE_ADDR\")\n worker_ip = request.environ.get(\"HTTP_HOST\")\n result = word_counter(string,to_sort)\n return jsonify(result,{\"Client_IP\":client_ip,\"Loadbalancer_IP\":loadbalancer_ip,\"Worker_IP\":worker_ip}),200\n\n else:\n return jsonify(message=\"Please Send Your request in json\"),400", "def reconcile_output(self):\n final_string = \"\"\n final_string += \"TEXT: {0}\\n\".format(self._text)\n final_string += \"ID: {0}\\n\".format(self._index)\n final_string += \"Count: {0}\\n\".format(self._count)\n\n final_string += \"=Doc Count Begin=\\n\"\n for doc in list(self._doc_count.keys()):\n final_string += \"{0} $!$ {1}\\n\".format(doc, self._doc_count[doc])\n final_string += \"=Doc Count End=\\n\"\n\n final_string += \"=CEs Begin=\\n\"\n for doc in list(self.word_pairs.keys()):\n for ce in set(self.word_pairs[doc]):\n #format: doc $!$ ce \n final_string += \"{0} $!$ {1} $!$ {2}\\n\".format(doc, ce, self.word_pairs[doc].count(ce))\n final_string += \"=CEs End=\\n\"\n\n final_string += \"=Self Tags Begin=\\n\"\n for source in list(self.this_semantic_tags.keys()):\n for tag in set(self.this_semantic_tags[source]):\n final_string += \"{0} $!$ {1} $!$ {2}\\n\".format(source, tag, self.this_semantic_tags[source].count(tag))\n final_string += \"=Self Tags End=\\n\"\n\n final_string += \"=Semantic Begin=\\n\"\n for source in list(self.lexico_semantic.keys()):\n #for some reason, I had each semantic class getting assigned the\n #same overall count?\n #sem_counts = self.getLexicoSemanticCounts(source)\n for ce in self.lexico_semantic[source]:\n for sem_cls in set(self.lexico_semantic[source][ce]):\n c = self.lexico_semantic[source][ce].count(sem_cls)\n final_string += \"{0} $!$ {1} $!$ {2} $!$ {3}\\n\".format(source, ce, sem_cls, c)\n final_string += \"=Semantic End=\\n\"\n final_string += \"$!$\\n\"\n return final_string", "def get_plain_texts(input_dict):\n\n adc = input_dict['adc']\n annotation = input_dict['token_annotation']\n feature_annotation = input_dict['feature_annotation']\n if feature_annotation:\n annotation+=\"/\"+feature_annotation\n delimiter = input_dict['delimiter']\n includeDocId = input_dict['include_doc_id'] == \"true\"\n\n output_dict = {\"strings\": []}\n for document in adc.documents:\n output_dict[\"strings\"].append((document.name+\": \" if includeDocId else \"\") + delimiter.join([t[1] for t in document.get_annotations_with_text(annotation)]))\n return output_dict", "def _generate_index_report(self, index, query_analysis):\n\n all_fields = []\n equiv_fields = []\n sort_fields = []\n range_fields = []\n\n for query_field in query_analysis['analyzedFields']:\n all_fields.append(query_field['fieldName'])\n if query_field['fieldType'] is EQUIV_TYPE:\n equiv_fields.append(query_field['fieldName'])\n elif query_field['fieldType'] is SORT_TYPE:\n sort_fields.append(query_field['fieldName'])\n elif query_field['fieldType'] is RANGE_TYPE:\n range_fields.append(query_field['fieldName'])\n\n max_equiv_seq = len(equiv_fields)\n max_sort_seq = max_equiv_seq + len(sort_fields)\n max_range_seq = max_sort_seq + len(range_fields)\n\n coverage = 'none'\n query_fields_covered = 0\n query_field_count = query_analysis['fieldCount']\n supported = True\n ideal_order = True\n for index_field in index['key']:\n field_name = index_field[0]\n\n if index_field[1] == '2d':\n supported = False\n break\n\n if field_name not in all_fields:\n break\n\n if query_fields_covered == 0:\n coverage = 'partial'\n\n if query_fields_covered < max_equiv_seq:\n if field_name not in equiv_fields:\n ideal_order = False\n elif query_fields_covered < max_sort_seq:\n if field_name not in sort_fields:\n ideal_order = False\n elif query_fields_covered < max_range_seq:\n if field_name not in range_fields:\n ideal_order = False\n query_fields_covered += 1\n if query_fields_covered == query_field_count:\n coverage = 'full'\n\n # INDEX REPORT\n return OrderedDict({\n 'coverage': coverage,\n 'idealOrder': ideal_order,\n 'queryFieldsCovered': query_fields_covered,\n 'index': index,\n 'supported': supported\n })", "def _generate_index_report(self, index, query_analysis):\r\n\r\n all_fields = []\r\n equiv_fields = []\r\n sort_fields = []\r\n range_fields = []\r\n\r\n for query_field in query_analysis['analyzedFields']:\r\n all_fields.append(query_field['fieldName'])\r\n if query_field['fieldType'] is EQUIV_TYPE:\r\n equiv_fields.append(query_field['fieldName'])\r\n elif query_field['fieldType'] is SORT_TYPE:\r\n sort_fields.append(query_field['fieldName'])\r\n elif query_field['fieldType'] is RANGE_TYPE:\r\n range_fields.append(query_field['fieldName'])\r\n\r\n max_equiv_seq = len(equiv_fields)\r\n max_sort_seq = max_equiv_seq + len(sort_fields)\r\n max_range_seq = max_sort_seq + len(range_fields)\r\n\r\n coverage = 'none'\r\n query_fields_covered = 0\r\n query_field_count = query_analysis['fieldCount']\r\n supported = True\r\n ideal_order = True\r\n for index_field in index['key']:\r\n field_name = index_field[0]\r\n\r\n if index_field[1] == '2d':\r\n supported = False\r\n break\r\n\r\n if field_name not in all_fields:\r\n break\r\n\r\n if query_fields_covered == 0:\r\n coverage = 'partial'\r\n\r\n if query_fields_covered < max_equiv_seq:\r\n if field_name not in equiv_fields:\r\n ideal_order = False\r\n elif query_fields_covered < max_sort_seq:\r\n if field_name not in sort_fields:\r\n ideal_order = False\r\n elif query_fields_covered < max_range_seq:\r\n if field_name not in range_fields:\r\n ideal_order = False\r\n query_fields_covered += 1\r\n if query_fields_covered == query_field_count:\r\n coverage = 'full'\r\n\r\n # INDEX REPORT\r\n return OrderedDict({\r\n 'coverage': coverage,\r\n 'idealOrder': ideal_order,\r\n 'queryFieldsCovered': query_fields_covered,\r\n 'index': index,\r\n 'supported': supported\r\n })", "def _extract_words(f_name):\n word_dict = {}\n idx_dict = {}\n word_cnt = []\n wc = 0\n if os.path.isfile(f_name):\n with open(f_name) as f:\n for line in f:\n obj = json.loads(line)\n # process words in the text\n for t in _extract(obj[\"text\"]):\n # update the word counts\n if t not in word_dict:\n word_dict[t] = wc\n idx_dict[wc] = t\n wc += 1\n word_cnt.append(1)\n else:\n word_cnt[word_dict[t]] += 1\n return word_dict, idx_dict, word_cnt\n else:\n error(\"parse dict - not a file: %s\" % f_name)", "def extractTextWithFullLayout(analyzed_data):\r\n\r\n data = []\r\n for page in analyzed_data:\r\n if not page:\r\n continue\r\n\r\n data.append([])\r\n for lt_obj in page:\r\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\r\n data[-1].append({\r\n 'type': 'text', # Might support more types (e.g. figures) in the future.\r\n 'text': lt_obj.get_text().split(\"\\n\"),\r\n 'layout': {\r\n 'x0': lt_obj.x0,\r\n 'x1': lt_obj.x1,\r\n 'y0': lt_obj.y0,\r\n 'y1': lt_obj.y1\r\n }\r\n })\r\n\r\n return data", "def report_index(self, index):\n i = 0\n for k, data in self.matches[index].items():\n if i != 0:\n print\n print fmt(\"['%c': charset - chars]\" % k, MAGENTA)\n print fmt(sorted([x for x in data[\"charset\"]]), WHITE)\n print fmt(data[\"chars\"], WHITE)\n i = 1", "def render_words_count(request):\n count = 0\n try:\n count = sum([len(d.body.split(None)) for d in Devotional.objects.all()])\n except:\n pass\n\n return render_to_response('devotional/view_word_count.html',\n {'count': count},\n context_instance=RequestContext(request))", "def results(self):\n to_print = \"\"\n\n # determine longest key for pretty print\n limit = 0\n for key in self.__results:\n if len(key) > limit:\n limit = len(key)\n\n # decide spacing and ending between results based on type of input\n spacing = \"\"\n if len(self.input) > 1:\n spacing = \"\\t\"\n\n for key in self.__results:\n key_name = key\n if len(self.patterns) < 2:\n key_name = \"\"\n\n if self.counter:\n matches = self.__results[key]\n else:\n matches = \", \".join([str(i) for i in self.__results[key]])\n\n if len(self.patterns) > 1:\n to_print += (f'{spacing}{key_name:<{limit+2}}{matches}\\n')\n else:\n to_print += (f'{spacing}{matches}\\n')\n\n # remove last newline\n to_print = to_print[:-1]\n\n return to_print", "def count_extracted(j_data):\n count = 0\n for record in j_data:\n tmp = {}\n desc = record['lcr_desc'].lower().split('/')\n title = desc[0]\n cat = category(title)\n if cat and 'location' in record:\n count += 1\n return count", "def collect_hiertext_info(root_path, level, split, print_every=1000):\n\n annotation_path = osp.join(root_path, 'annotations/' + split + '.jsonl')\n if not osp.exists(annotation_path):\n raise Exception(\n f'{annotation_path} not exists, please check and try again.')\n\n annotation = json.load(open(annotation_path))['annotations']\n img_infos = []\n for i, img_annos in enumerate(annotation):\n if i > 0 and i % print_every == 0:\n print(f'{i}/{len(annotation)}')\n img_info = {}\n img_info['file_name'] = img_annos['image_id'] + '.jpg'\n img_info['height'] = img_annos['image_height']\n img_info['width'] = img_annos['image_width']\n img_info['segm_file'] = annotation_path\n anno_info = []\n for paragraph in img_annos['paragraphs']:\n if level == 'paragraph':\n anno = collect_level_info(paragraph)\n anno_info.append(anno)\n elif level == 'line':\n for line in paragraph['lines']:\n anno = collect_level_info(line)\n anno_info.append(anno)\n elif level == 'word':\n for line in paragraph['lines']:\n for word in line['words']:\n anno = collect_level_info(line)\n anno_info.append(anno)\n img_info.update(anno_info=anno_info)\n img_infos.append(img_info)\n return img_infos", "def counterdict(self):\n vas = []\n file = self.read1()\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n if s_i != \"\":\n vas.append(s_i)\n for ele in enumerate(vas):\n print(ele)\n logging.debug(\"Starting with to\")", "def pubmed_missing_auto(request):\n\n usecases = UseCase.objects.all()\n json_resp = {}\n json_resp['annotated'] = 0\n json_resp['tot'] = 0\n json_resp['usecase'] = []\n languages = ['English','english']\n for el in usecases:\n use = el.name\n json_resp[use] = {}\n batches = []\n batches.append('all')\n if use.lower() in ['colon','lung','uterine cervix']:\n report = Report.objects.filter(name=el,language__in=languages,institute = 'PUBMED')\n for el in report:\n if el.batch not in batches:\n batches.append(el.batch)\n count_rep = report.count()\n\n if count_rep > 0:\n json_resp['usecase'].append(str(use))\n json_resp['tot'] = json_resp['tot'] + count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n json_resp['annotated'] = json_resp['annotated'] + groundTruths\n\n for batch in batches:\n\n json_resp[use][batch] = {}\n if batch == 'all' or batch is None:\n report = Report.objects.filter(name=use,language__in=languages, institute='PUBMED')\n count_rep = report.count()\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n else:\n report = Report.objects.filter(name=use,language__in=languages, institute='PUBMED',batch = batch)\n count_rep = report.count()\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and batch = %s and r.language in %s;\",\n [str(use), 'Robot_user', 'labels', 'PUBMED',batch,tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n # print('risposta',json_resp)\n return JsonResponse(json_resp)", "def text_json(request):\n query = str()\n\n if request.method == 'GET':\n query = request.GET.get('q')\n\n results = list()\n\n for c in search.tokenSearch(query):\n tmp = {'category':'課程代號','title':c.token}\n results.append(tmp)\n \n for c in search.zhNameSearch(query):\n tmp = {'category':'課程名稱','title':c.name_zh}\n results.append(tmp)\n\n \n for c in search.engNameSearch(query):\n tmp = {'category':'Course Name','title':c.name_eng}\n results.append(tmp)\n \n for t in Teacher.objects.filter(name_zh__icontains=query):\n tmp = {'category':'老師','title':t.name_zh}\n results.append(tmp)\n \n for d in Department.objects.filter(name_zh__icontains=query):\n tmp = {'category':'開課單位','title':d.name_zh}\n results.append(tmp)\n\n tmp = {'results':results}\n\n return HttpResponse(json.dumps(tmp))", "def get_fields(request):\n\n json_resp = {}\n json_resp['fields'] = []\n json_resp['fields_to_ann'] = []\n all = request.GET.get('all',None)\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n auto_request = request.GET.get('ns_id', None)\n report = request.GET.get('report', None)\n # print(request.session['report_type'])\n if report is not None or all == 'all':\n if report is not None:\n if report.startswith('PUBMED_'):\n json_resp['fields'] = ['volume','authors','year','journal']\n json_resp['fields_to_ann'] = ['title','abstract']\n else:\n json_resp = get_fields_from_json()\n if all == 'all':\n # All the possible fields for every usecase (MANUAL CONFIGURATION)\n json_resp = get_fields_from_json()\n if Report.objects.filter(institute = 'PUBMED').exists():\n json_resp['all_fields'].extend(['title','abstract','volume','journal','year','authors']) #aggiungo pubmed solo in coda!\n else:\n if request.session['report_type'] == 'pubmed':\n json_resp['fields'] = ['volume','authors','year','journal']\n json_resp['fields_to_ann'] = ['title','abstract']\n else:\n # Fileds related exclusively to a usecase\n json_resp = get_fields_from_json_configuration(request.session['usecase'],request.session['institute'],request.session['language'])\n if request.session['mode'] == 'Robot' or auto_request == 'Robot':\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json')) as out:\n data = json.load(out)\n json_resp['fields_to_ann'] = data['extract_fields'][request.session['usecase']]\n for el in json_resp['fields_to_ann']:\n if el in json_resp['fields']:\n json_resp['fields'].remove(el)\n # print('FIELDS', json_resp)\n return JsonResponse(json_resp)", "def _construct_report(self, text):\n result = []\n reports = self._clean_text(text)\n\n for report in reports:\n _dict = self._report_to_dict(report)\n if _dict: result.append(_dict)\n\n return result", "def post(self):\n data = request.json\n return analyze_text(data)", "def report_missing_auto(request):\n\n usecases = UseCase.objects.all()\n json_resp = {}\n languages = ['english', 'English']\n for el in usecases:\n use = el.name\n batches = []\n batches.append('all')\n if use.lower() in ['colon','lung','uterine cervix']:\n report = Report.objects.filter(name=el,language__in=languages).exclude(institute = 'PUBMED')\n count_rep = report.count()\n for rp in report:\n if rp.batch not in batches:\n batches.append(rp.batch)\n # print(el)\n # print(count_rep)\n\n if count_rep > 0:\n json_resp[use] = {}\n for batch in batches:\n batch = str(batch)\n json_resp[use][batch] = {}\n if batch == 'all':\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n else:\n report_count = Report.objects.filter(name=el,batch = batch,language__in=languages).exclude(institute = 'PUBMED').count()\n json_resp[use][batch]['tot'] = report_count\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and batch = %s and r.language in %s;\",\n [str(use), 'Robot_user', 'labels', 'PUBMED',batch, tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n # print(json_resp)\n return JsonResponse(json_resp)", "def post(self):\n input_text = self.get_argument('input_text', '')\n self.write(json_encode(extract_word_frequencies(input_text)))", "def _get_annotations(self, text, language=''):\n body = {\n 'document': {\n 'type': 'PLAIN_TEXT',\n 'content': text,\n },\n 'features': {\n 'extract_syntax': True,\n },\n 'encodingType': 'UTF32',\n }\n if language:\n body['document']['language'] = language\n\n request = self.service.documents().annotateText(body=body)\n response = request.execute()\n tokens = response.get('tokens', [])\n language = response.get('language')\n\n return {'tokens': tokens, 'language': language}", "def get_text_bounds(d):\n text=[]\n item = 1\n while True:\n try:\n data = d['responses'][0]['textAnnotations'][item]['description']\n vert = d['responses'][0]['textAnnotations'][item]['boundingPoly']['vertices'] \n text.append(class_text(data,vert))\n item=item+1\n except:\n break \n sorted_xy = sorted(text,reverse= True)\n return sorted_xy", "def tokenization(self, content: str, doc_id: str):\n self.metadata[doc_id] = dict()\n tokens = list()\n lines = content.splitlines()\n for i in range(200 if self.name == 'Novels' else 0, len(lines)):\n if self.name == 'HillaryEmails' or (lines[i] == '' and lines[i-1] != ''):\n words = 0\n self.metadata[doc_id]['Content'] = str()\n for j in range(i, len(lines)):\n line = lines[j]\n if line:\n words += len(line.split())\n self.metadata[doc_id]['Content'] += line + ''\n if words >= 75:\n self.metadata[doc_id]['Content'] += '...'\n break\n break\n keys = ['Title', 'Author', 'Release Date', 'Language', 'Character set encoding']\n for i, line in enumerate(lines):\n if self.name == 'Novels':\n if i < 30:\n for j in range(len(keys)):\n if keys[j] in line:\n self.metadata[doc_id][keys[j]] = line.strip().replace(keys[j]+': ', '')\n token = line.split() # default split by whitespace\n tokens.extend(zip(token, len(token) * [doc_id]))\n return tokens", "def scrape(data):\n result = {}\n xml_str = scraperwiki.pdftoxml(data)\n root = xml.etree.ElementTree.fromstring(xml_str)\n page_id = 0\n for page in root:\n page_id += 1\n for text in page.iter(tag=\"text\"):\n if text.get(\"font\") == \"3\":\n text_id = (page_id, text.get(\"top\"))\n row = result.get(text_id, \"\")\n if row and len(row) < 60:\n row = row + \" \" * (60 - len(row))\n result[text_id] = row + text.text\n return result", "def __get_search_content(self):\n words = [\",\".join(self.__get_property_from_all_views(\"resource_id\")),\n \",\".join(self.__get_property_from_all_views(\"text\"))]\n return \"\\n\".join(words)" ]
[ "0.5901755", "0.557389", "0.5426363", "0.53363323", "0.53263783", "0.530319", "0.529372", "0.52915365", "0.52282554", "0.52024233", "0.51975214", "0.5193073", "0.5190849", "0.5155429", "0.51324636", "0.51046854", "0.51033866", "0.50946844", "0.5073294", "0.5071243", "0.5070501", "0.5068767", "0.5066293", "0.50603586", "0.50463057", "0.50338876", "0.5018359", "0.50110286", "0.50088656", "0.5001803" ]
0.5978528
0
This view returns the fields to display and annotate. If the annotation mode is automatic the fields to annotate are those the concepts and mentions have been extracted from. The fields are returned to give the user the chance to update the fields she wants to display/annotate in MANUAL CONFIGURATION.
def get_fields(request): json_resp = {} json_resp['fields'] = [] json_resp['fields_to_ann'] = [] all = request.GET.get('all',None) workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in auto_request = request.GET.get('ns_id', None) report = request.GET.get('report', None) # print(request.session['report_type']) if report is not None or all == 'all': if report is not None: if report.startswith('PUBMED_'): json_resp['fields'] = ['volume','authors','year','journal'] json_resp['fields_to_ann'] = ['title','abstract'] else: json_resp = get_fields_from_json() if all == 'all': # All the possible fields for every usecase (MANUAL CONFIGURATION) json_resp = get_fields_from_json() if Report.objects.filter(institute = 'PUBMED').exists(): json_resp['all_fields'].extend(['title','abstract','volume','journal','year','authors']) #aggiungo pubmed solo in coda! else: if request.session['report_type'] == 'pubmed': json_resp['fields'] = ['volume','authors','year','journal'] json_resp['fields_to_ann'] = ['title','abstract'] else: # Fileds related exclusively to a usecase json_resp = get_fields_from_json_configuration(request.session['usecase'],request.session['institute'],request.session['language']) if request.session['mode'] == 'Robot' or auto_request == 'Robot': with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json')) as out: data = json.load(out) json_resp['fields_to_ann'] = data['extract_fields'][request.session['usecase']] for el in json_resp['fields_to_ann']: if el in json_resp['fields']: json_resp['fields'].remove(el) # print('FIELDS', json_resp) return JsonResponse(json_resp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_auto_annotations(request): # post\n\n request_body_json = json.loads(request.body)\n usecase_list = request_body_json['usecase']\n fields_list = request_body_json['selected']\n report_key = request_body_json['report_type']\n batch = request_body_json['batch']\n\n # check existence of examode labels and concepts\n\n if report_key == 'reports':\n for usecase in usecase_list:\n fields = []\n if fields_list != {}:\n if usecase in fields_list.keys():\n fields = list(set(fields_list[usecase]))\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json'),\n 'r') as use_outfile:\n json_to_ret = json.load(use_outfile)\n json_to_ret['extract_fields'][usecase] = fields\n # print(json_to_ret)\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json'), 'w') as use_outfile:\n json.dump(json_to_ret,use_outfile)\n\n # print(fields)\n\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n # output_concepts_dir = os.path.join(workpath, './sket/outputs')\n # for root, dirs, files in os.walk(output_concepts_dir):\n # for f in files:\n # os.unlink(os.path.join(root, f))\n # for d in dirs:\n # shutil.rmtree(os.path.join(root, d))\n\n bool_val,error = create_auto_gt_1(usecase,fields,report_key,batch)\n if bool_val == False:\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json'),\n 'r') as use_outfile:\n json_to_ret = json.load(use_outfile)\n json_to_ret['extract_fields'][usecase] = []\n # print(json_to_ret)\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json'),\n 'w') as use_outfile:\n json.dump(json_to_ret, use_outfile)\n json_resp = {'error': error}\n return JsonResponse(json_resp)\n\n elif report_key == 'pubmed':\n for usecase in usecase_list:\n fields = ['title','abstract']\n # workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n # output_concepts_dir = os.path.join(workpath, './sket/outputs')\n # for root, dirs, files in os.walk(output_concepts_dir):\n # for f in files:\n # os.unlink(os.path.join(root, f))\n # for d in dirs:\n # shutil.rmtree(os.path.join(root, d))\n\n bool_val, error = create_auto_gt_1(usecase, fields, report_key, batch)\n if bool_val == False:\n json_resp = {'error': error}\n return JsonResponse(json_resp)\n\n json_resp = {'msg':'ok'}\n return JsonResponse(json_resp)", "def get_annotations(self):\n entity = self.get_object()\n serializer = AnnotationValueSerializer(entity.annotations.all(), many=True)\n return Response(serializer.data)", "def listMetaDataFields(self, exclude=True):\n #tool = getToolByName(self, ATCT_TOOLNAME)\n #original_list = tool.getMetadataDisplay(exclude)\n\n return DisplayList((\n ('getAnalysisCategory', _p('Analysis Category')),\n ('getAnalysisService', _p('Analysis Service')),\n ('getAnalysts', _('Analyst')),\n ('getClientOrderNumber', _('Client Order')),\n ('getClientReference', _('Client Reference')),\n ('getClientSampleID', _('Client Sample ID')),\n ('getClientTitle', _('Client')),\n ('getContactTitle', _('Contact')),\n ('Creator', _p('Creator')),\n ('created', _('Date Created')),\n ('getDatePublished', _('Date Published')),\n ('getDateReceived', _('Date Received')),\n ('getDateSampled', _('Date Sampled')),\n ('getProfilesTitle', _('Analysis Profiles')),\n ('getRequestID', _('Request ID')),\n ('getSampleID', _('Sample ID')),\n ('getSamplePointTitle', _('Sample Point')),\n ('getSampleTypeTitle', _('Sample Type')),\n ('review_state', _p('Review state')),\n ))", "def fields(request):\n # Only recognizes a few fields for now.\n if request.method != 'POST':\n fields = request.GET.getlist('field')\n response = {}\n if 'reviewers' in fields:\n response['reviewers'] = request.issue.reviewers or []\n if 'description' in fields:\n response['description'] = request.issue.description\n if 'subject' in fields:\n response['subject'] = request.issue.subject\n return response\n\n if not request.issue.edit_allowed:\n if not IS_DEV:\n return HttpTextResponse('Login required', status=401)\n fields = json.loads(request.POST.get('fields'))\n issue = request.issue\n if 'description' in fields:\n issue.description = fields['description']\n if 'reviewers' in fields:\n issue.reviewers = _get_emails_from_raw(fields['reviewers'])\n issue.calculate_updates_for()\n if 'subject' in fields:\n issue.subject = fields['subject']\n issue.put()\n return HttpTextResponse('')", "def display_fields(self):\r\n\r\n field_text = self.show_fields()\r\n field_text_list = field_text.split(EOL)[0:-1]\r\n\r\n def fld_format (x_temp):\r\n\r\n x_temp = x_temp.split(COLON)[0], x_temp.split(COLON)[1]\r\n\r\n \"\"\"formats output of the list of search results\"\"\"\r\n\r\n if not isinstance(x_temp[1],str):\r\n shown_indexes = rangelist.range_find([int(Index(a_temp))\r\n for a_temp in x_temp[1]],reduce=True)\r\n else:\r\n shown_indexes = x_temp[1]\r\n\r\n if len(shown_indexes) < 20:\r\n return (abridge(x_temp[0]).replace(VERTLINE,SLASH)\r\n +VERTLINE\r\n +shown_indexes)\r\n\r\n\r\n returnlist = []\r\n sp_temp = rangelist.split_up_range(shown_indexes)\r\n\r\n\r\n returnlist.append(x_temp[0].replace(VERTLINE,SLASH)[0:min([60,len(x_temp[0])])]\r\n +VERTLINE+sp_temp[0])\r\n for s_temp in sp_temp[1:]:\r\n returnlist.append(VERTLINE+s_temp)\r\n\r\n return returnlist\r\n\r\n show_list(field_text_list,\r\n alerts.FIELDS[3:],0,40,\r\n func=fld_format,\r\n present=True,\r\n display=display)", "def get_all_fields(self):\n fields = []\n for f in self._meta.fields:\n\n fname = f.name \n # resolve picklists/choices, with get_xyz_display() function\n get_choice = 'get_'+fname+'_display'\n if hasattr( self, get_choice):\n value = getattr( self, get_choice)()\n else:\n try :\n value = getattr(self, fname)\n except User.DoesNotExist:\n value = None\n\n # only display fields with values and skip some fields entirely\n if f.editable and value and f.name not in ('id', 'status', 'workshop', 'user', 'complete') :\n\n fields.append(\n {\n 'label':f.verbose_name, \n 'name':f.name, \n 'value':value,\n }\n )\n return fields", "def annotations(self):\n annotations = {\"date\": self.date_trunc(\"usage_start\")}\n # { query_param: database_field_name }\n fields = self._mapper.provider_map.get(\"annotations\")\n for q_param, db_field in fields.items():\n annotations[q_param] = F(db_field)\n if (\n \"project\" in self.parameters.parameters.get(\"group_by\", {})\n or \"and:project\" in self.parameters.parameters.get(\"group_by\", {})\n or \"or:project\" in self.parameters.parameters.get(\"group_by\", {})\n ):\n annotations[\"project\"] = F(\"namespace\")\n\n return annotations", "def fields(request):\n fields = request.GET.getlist('field')\n response = {}\n if 'reviewers' in fields:\n response['reviewers'] = request.issue.reviewers or []\n if 'description' in fields:\n response['description'] = request.issue.description\n if 'subject' in fields:\n response['subject'] = request.issue.subject\n return response", "def get_api_fields(cls):\n return ['fqdn', 'ttl', 'description', 'views']", "def output_fields(request, project_id):\n project = Project.objects.get(id=project_id)\n dal = dal_mongo.DALMongo(project_id)\n ret = {}\n if project.segmentation_skipped:\n ret['col_or_outputfield'] = \"column\"\n ret['values'] = dal.get_matched_cols()\n else:\n ret['col_or_outputfield'] = \"output field\"\n ret['values'] = dal.get_output_fields_matched_cols()\n\n return JsonResponse(ret, safe=False)", "def fields(self):", "def overview(cls, queryset, *annotations):\n if select_related := cls.select_related:\n queryset = queryset.select_related(*select_related)\n if prefetch_related := cls.prefetch_related:\n queryset = queryset.prefetch_related(*prefetch_related)\n if all_annotations := cls.get_overview_annotations():\n if annotations:\n _annotations = {k: v for k, v in all_annotations.items() if k in annotations}\n else:\n _annotations = all_annotations\n queryset = queryset.annotate(**_annotations)\n return queryset", "def annotate(self, request, pk=None):\n\n # Get the document of the detail view and deserialize data\n document = self.get_object()\n serializer = AnnotationSerializer(\n data=request.data,\n context={'request': request, 'document': document}\n )\n\n # Validate the serializer and save the annotation\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n # Return the response\n return Response(serializer.data)", "def _fields(self, doclet):\n FIELD_TYPES = OrderedDict([('params', _params_formatter),\n ('properties', _params_formatter),\n ('exceptions', _exceptions_formatter),\n ('returns', _returns_formatter)])\n for field_name, callback in iteritems(FIELD_TYPES):\n for field in doclet.get(field_name, []):\n description = field.get('description', '')\n unwrapped = sub(r'[ \\t]*[\\r\\n]+[ \\t]*', ' ', description)\n yield callback(field, unwrapped)", "def showAnnotationsExamples(annotation=1.0, showTotalOnly=False):\n total = 0\n # get the right annotation\n if type(annotation) is float:\n annotation = str(annotation)\n # get the lists of annotations and sentences\n annotatedFolderPathList = [u'./002manuallyAnnotated/', u'./003negativeNaiveExtractors/000manualAnnotation/']\n enList, frList, refList, annotList = getAnnotationData(annotatedFolderPathList)\n # print one by one the SP corresponding to the searched annotation\n for indexAnnot, annot in enumerate(annotList):\n if annot == annotation:\n src = enList[indexAnnot] if u'en-fr' in refList[indexAnnot] else frList[indexAnnot]\n trgt = frList[indexAnnot] if u'en-fr' in refList[indexAnnot] else enList[indexAnnot]\n total += 1\n if showTotalOnly is False:\n print(u'{0} - {1}'.format(indexAnnot + 1, src))\n print(u'{0} - {1}'.format(indexAnnot + 1, trgt))\n print()\n print('TOTAL : ', total)\n return", "def test_model_with_no_backward_relations_render_only_relevant_fields(self):\n response = self.client.get(\n reverse(\"django-admindocs-models-detail\", args=[\"admin_docs\", \"family\"])\n )\n fields = response.context_data.get(\"fields\")\n self.assertEqual(len(fields), 2)", "def annotations(self) -> pulumi.Output[Mapping[str, Any]]:\n return pulumi.get(self, \"annotations\")", "def annotationlabel(request,action=None):\n\n username = request.session['username']\n mode1 = request.session['mode']\n auto_required = request.GET.get('ns_id', None)\n mode = NameSpace.objects.get(ns_id=mode1)\n\n # print('mode',mode1)\n usecase = request.session['usecase']\n # language = request.GET.get('language',request.session['language'])\n type = 'labels'\n\n if request.method == 'GET' and action.lower() == 'user_labels':\n\n \"\"\"GET request: given the report, the labels annotated by the user are returned\"\"\"\n\n language = request.GET.get('language', request.session['language'])\n user_get = request.GET.get('username',username)\n report_id = request.GET.get('report_id')\n report1 = Report.objects.get(id_report = report_id,language = language)\n # if auto_required == 'Robot':\n # mode = NameSpace.objects.get(ns_id=auto_required)\n if auto_required is not None:\n mode_1 = NameSpace.objects.get(ns_id=auto_required)\n else:\n mode_1 = mode\n json_dict = get_user_gt(user_get,mode_1,report1,language,'labels')\n return JsonResponse(json_dict,safe=False)\n\n elif request.method == 'GET' and action.lower() == 'all_labels':\n\n \"\"\" GET request: given the use case, all the labels associated to that usecase are returned. \"\"\"\n\n labels = AnnotationLabel.objects.filter(name=usecase).values('seq_number','label','annotation_mode')\n print(labels)\n json_dict = {}\n if len(labels) > 0:\n\n if mode1 == 'Human' or auto_required == 'Human':\n json_dict['labels'] = []\n for el in labels:\n json_val = {}\n if 'Manual' in el['annotation_mode']:\n # if int(el['seq_number']) > count: # i primi 20 sono inseriti automaticamente\n json_val['label'] = (el['label'])\n json_val['seq_number'] = (el['seq_number'])\n json_dict['labels'].append(json_val)\n if mode1 == 'Robot' or auto_required == 'Robot':\n json_dict['labels'] = []\n for el in labels:\n json_val = {}\n if 'Automatic' in el['annotation_mode']:\n json_val['label'] = (el['label'])\n json_val['seq_number'] = (el['seq_number'])\n json_dict['labels'].append(json_val)\n\n else:\n json_dict['labels'] = []\n\n json_dict['labels'] = sorted(json_dict['labels'], key=lambda json: json['seq_number'])\n print(json_dict)\n return JsonResponse(json_dict)\n\n elif request.method == 'POST' and action.lower() == 'delete':\n\n \"\"\"PSOT request: given the report, the labels the user annotated are removed together with the\n associated groundtruth.\"\"\"\n\n request_body_json = json.loads(request.body)\n report_id = request_body_json['report_id']\n user = User.objects.get(username=username,ns_id=mode)\n language = request.GET.get('language', request.session['language'])\n report1 = Report.objects.get(id_report=report_id,language = language)\n if user is None or report1 is None:\n json_response = {'error': 'An error occurred getting parameters.'}\n return json_response\n to_del = Associate.objects.filter(username=user, ns_id=mode, id_report=report1, language=language)\n if mode1 == 'Human':\n try:\n with transaction.atomic():\n\n if to_del.exists():\n json_response = delete_all_annotation(to_del, user, report1,language, type,mode)\n\n else:\n json_response = {'msg':'nothing to do'}\n\n except Exception as error:\n print(error)\n json_response = {'error': 'An error occurred saving the ground_truth and the labels'}\n return JsonResponse(json_response)\n else:\n return JsonResponse(json_response)\n else:\n json_response = restore_robot_annotation(report1, 'labels', user)\n return JsonResponse(json_response)\n\n\n if request.method == 'POST' and action.lower() == 'insert':\n\n \"\"\"PSOT request: given the report, the labels the user annotated are added in the database and a new \n JSON groundtruth is created. \"\"\"\n\n request_body_json = json.loads(request.body)\n report_id = request_body_json['report_id']\n user = User.objects.get(username=username,ns_id=mode)\n language = request.GET.get('language', request.session['language'])\n report1 = Report.objects.get(id_report=report_id,language = language)\n\n if user is None or report1 is None:\n json_response = {'error': 'An error occurred getting the parameters.'}\n return JsonResponse(json_response)\n\n labels_to_save = request_body_json['labels']\n # In this case the user manually deletes all the labels (NOT WITH CLEAR BUTTON) and saves.\n if len(labels_to_save) == 0 and mode1 == 'Human':\n\n \"\"\"If there are not labels to save, if there is a ground truth saved in the database, this is removed,\n otherwise no action is performed. \"\"\"\n\n rows = Associate.objects.filter(username = user,ns_id=mode, id_report = report1, language = language)\n if rows.exists():\n try:\n with transaction.atomic():\n json_response = delete_all_annotation(rows,user,report1,language,type,mode)\n\n except Exception as error:\n print(error)\n json_response = {'error': 'An error occurred.'}\n return JsonResponse(json_response, status=500)\n else:\n return JsonResponse(json_response)\n else:\n json_response = {'message': 'Nothing to save.'}\n return JsonResponse(json_response)\n\n if len(labels_to_save) == 0 and mode1 == 'Robot':\n\n \"\"\" If there are not labels to save and the name space is Robot no action is performed and the already \n existing ground-truth is kept \"\"\"\n to_del = Associate.objects.filter(id_report=report1, language=language, username=user, ns_id=mode)\n # print('RESTORE')\n json_response = restore_robot_annotation(report1, 'labels',user)\n return JsonResponse(json_response)\n\n update = True\n\n \"\"\" Check if the user's labels she inserted are as many as the rows already present in the db: \n if they are not: update the annotation: the old annotation is replaced with the new one\n if they are: check if the labels existing are those inserted, in this case nothing is done, otherwise \n the current groundtruth is updated. \"\"\"\n\n existing_rows = Associate.objects.filter(username = user,ns_id=mode, id_report =report1,language =language)\n if existing_rows.exists():\n if existing_rows.count() == len(labels_to_save):\n for label in labels_to_save:\n label1 = AnnotationLabel.objects.get(name=usecase, label=label['label'], seq_number=label['seq_number'])\n if not Associate.objects.filter(username=user,ns_id=mode, seq_number=label1.seq_number, label=label1,\n id_report=report1, language=language).exists():\n update = True\n break\n else:\n update = False\n if update == True:\n try:\n with transaction.atomic():\n # Remove all the existing labels inserted by the user for that report. The existing ground truth is kept untile the deletion is successful\n to_del = Associate.objects.filter(username=user,ns_id=mode, id_report=report1,language = language)\n delete_all_annotation(to_del,user,report1,language,type,mode)\n\n json_resp_labels = update_annotation_labels(labels_to_save,usecase,user,report1,language,mode)\n\n jsonDict = serialize_gt(type, usecase, username, report_id,language,mode)\n GroundTruthLogFile.objects.create(username=user,ns_id=mode, id_report=report1, language = language,\n gt_json=jsonDict, gt_type=type,insertion_time=Now())\n\n except (Exception) as error:\n print(error)\n print('rolled back')\n json_response = {'error': 'An error occurred saving the ground_truth '\n 'and the labels, the transaction rolledback'}\n return JsonResponse(json_response)\n\n else:\n return JsonResponse(json_resp_labels)\n else:\n if mode1 == 'Human':\n if not GroundTruthLogFile.objects.filter(gt_type='labels', username=user, ns_id=mode, id_report=report1,\n language=language).exists():\n js = serialize_gt('labels', usecase, username, report1.id_report, language, mode)\n\n GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(), username=user,\n ns_id=mode, id_report=report1, language=language,\n gt_type='labels')\n\n ass = Associate.objects.filter(username=user, id_report=report1, language=language,\n ns_id=mode).values('label', 'seq_number')\n for el in ass:\n lab = AnnotationLabel.objects.get(label=el['label'], seq_number=el['seq_number'])\n Associate.objects.filter(username=user, ns_id=mode, label=lab, seq_number=lab.seq_number,\n id_report=report1, language=language).delete()\n Associate.objects.create(username=user, ns_id=mode, label=lab, seq_number=lab.seq_number,\n insertion_time=Now(), id_report=report1, language=language)\n\n json_response = {'message': 'ok'}\n else:\n json_response = {'message': 'no changes detected'}\n return JsonResponse(json_response)\n\n elif mode1 == 'Robot':\n\n \"\"\" In this section the name space Robot is handled: If the user is in the AUTOMATIC MODE and the labels\n she inserts are those annotated by the algorithm, this means that she agrees with the annotation of the \n Robot user. The annotation does not change, only the insertion time is changed.\"\"\"\n\n try:\n with transaction.atomic():\n # in questa sezione solo se la gt è uguale a prima, l'utente acconsente alla gt della macchina\n user_robot = User.objects.get(username='Robot_user', ns_id=mode)\n gt_robot = GroundTruthLogFile.objects.filter(username=user_robot, ns_id=mode,\n id_report=report1, language=language,\n gt_type='labels')\n\n gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report1,\n language=language,\n gt_type='labels')\n if gt_robot.count() == 1 and not gt.exists():\n # if gt_robot[0].insertion_time == gt[0].insertion_time:\n js = serialize_gt('labels', usecase, username, report1.id_report, language, mode)\n GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report1,\n language=language,\n gt_type='labels').delete()\n\n GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(), username=user,\n ns_id=mode, id_report=report1, language=language,\n gt_type='labels')\n\n ass = Associate.objects.filter(username=user, id_report=report1, language=language,\n ns_id=mode).values('label', 'seq_number')\n for el in ass:\n lab = AnnotationLabel.objects.get(label=el['label'], seq_number=el['seq_number'])\n Associate.objects.filter(username=user, ns_id=mode, label=lab,\n seq_number=lab.seq_number,\n id_report=report1, language=language).delete()\n Associate.objects.create(username=user, ns_id=mode, label=lab,\n seq_number=lab.seq_number,\n insertion_time=Now(), id_report=report1, language=language)\n\n except Exception as error:\n print(error)\n print('rolled back')\n json_response = {'error': 'An error occurred updating labels dates'}\n return JsonResponse(json_response)\n else:\n json_response = {'message': 'dates updated'}\n return JsonResponse(json_response)", "def get_list_display(self, request):\n list_display = []\n for field_name in self.list_display:\n try:\n db_field = self.model._meta.get_field(field_name)\n if isinstance(db_field, BooleanField):\n field_name = boolean_switch_field(db_field)\n except FieldDoesNotExist:\n pass\n list_display.append(field_name)\n return list_display", "def _annotations(request):\n result = Search(request).run(MultiDict(request.params))\n\n return request.find_service(AnnotationReadService).get_annotations_by_id(\n ids=result.annotation_ids\n )", "def annotate(api_key, text, ontologies=[], longest_only=False, expand_mappings=False, include=[]):\n annotations = []\n url = BIOPORTAL_API_BASE + '/annotator'\n\n headers = {\n 'content-type': \"application/json\",\n 'authorization': \"apikey token=\" + api_key\n }\n\n if len(text) > 0:\n payload = {'text': text,\n 'longest_only': longest_only,\n 'expand_mappings': expand_mappings}\n\n if len(ontologies) > 0:\n payload['ontologies'] = ','.join(ontologies)\n\n if len(include) > 0:\n payload['include'] = ','.join(include)\n\n response = requests.post(url, json=payload, headers=headers, verify=False)\n\n if response.status_code != 200:\n raise Exception('Problem when calling the Annotator: ' + response.text)\n\n\n\n # print(payload)\n # print(response.url)\n # print(response.status_code)\n # print(response.text)\n annotations = json.loads(response.text)\n\n return annotations", "def fields(self):\n ...", "def editable_metadata_fields(self):\r\n def jsonify_value(field, json_choice):\r\n if isinstance(json_choice, dict):\r\n json_choice = dict(json_choice) # make a copy so below doesn't change the original\r\n if 'display_name' in json_choice:\r\n json_choice['display_name'] = get_text(json_choice['display_name'])\r\n if 'value' in json_choice:\r\n json_choice['value'] = field.to_json(json_choice['value'])\r\n else:\r\n json_choice = field.to_json(json_choice)\r\n return json_choice\r\n\r\n def get_text(value):\r\n \"\"\"Localize a text value that might be None.\"\"\"\r\n if value is None:\r\n return None\r\n else:\r\n return self.runtime.service(self, \"i18n\").ugettext(value)\r\n\r\n metadata_fields = {}\r\n\r\n # Only use the fields from this class, not mixins\r\n fields = getattr(self, 'unmixed_class', self.__class__).fields\r\n\r\n for field in fields.values():\r\n\r\n if field.scope != Scope.settings or field in self.non_editable_metadata_fields:\r\n continue\r\n\r\n # gets the 'default_value' and 'explicitly_set' attrs\r\n metadata_fields[field.name] = self.runtime.get_field_provenance(self, field)\r\n metadata_fields[field.name]['field_name'] = field.name\r\n metadata_fields[field.name]['display_name'] = get_text(field.display_name)\r\n metadata_fields[field.name]['help'] = get_text(field.help)\r\n metadata_fields[field.name]['value'] = field.read_json(self)\r\n\r\n # We support the following editors:\r\n # 1. A select editor for fields with a list of possible values (includes Booleans).\r\n # 2. Number editors for integers and floats.\r\n # 3. A generic string editor for anything else (editing JSON representation of the value).\r\n editor_type = \"Generic\"\r\n values = field.values\r\n if isinstance(values, (tuple, list)) and len(values) > 0:\r\n editor_type = \"Select\"\r\n values = [jsonify_value(field, json_choice) for json_choice in values]\r\n elif isinstance(field, Integer):\r\n editor_type = \"Integer\"\r\n elif isinstance(field, Float):\r\n editor_type = \"Float\"\r\n elif isinstance(field, List):\r\n editor_type = \"List\"\r\n elif isinstance(field, Dict):\r\n editor_type = \"Dict\"\r\n elif isinstance(field, RelativeTime):\r\n editor_type = \"RelativeTime\"\r\n metadata_fields[field.name]['type'] = editor_type\r\n metadata_fields[field.name]['options'] = [] if values is None else values\r\n\r\n return metadata_fields", "def _get_fields(\n annotations: Dict[str, Union[Annotation, Input, Output]]\n ) -> Dict[str, Union[Annotation, Input, Output]]:\n annotation_fields = OrderedDict()\n for name, annotation in annotations.items():\n # Skip return type\n if name == \"return\":\n continue\n # Handle EnumMeta annotation\n if isinstance(annotation, EnumMeta):\n from .enum_input import EnumInput\n\n annotation = EnumInput(type=\"string\", enum=annotation)\n # Handle Group annotation\n if is_group(annotation):\n annotation: GroupInput = copy.deepcopy(getattr(annotation, IOConstants.GROUP_ATTR_NAME))\n # Try creating annotation by type when got like 'param: int'\n if not _is_dsl_type_cls(annotation) and not _is_dsl_types(annotation):\n origin_annotation = annotation\n annotation: Input = _get_annotation_cls_by_type(annotation, raise_error=False)\n if not annotation:\n msg = f\"Unsupported annotation type {origin_annotation!r} for parameter {name!r}.\"\n raise UserErrorException(msg)\n annotation_fields[name] = annotation\n return annotation_fields", "def get_fields(self):\r\n return self.fields", "def annotations(self):\n\t\tif self._record is not None:\n\t\t return self._record.annotations\n\t\telse:\n\t\t return {}", "def get_overview_annotations() -> dict:\n return {}", "def extract(self):\n self.field_list = []\n \n try:\n self.mfields = self.getModel()._meta.fields\n if(self.mfields):\n try:\n for model_fields in self.mfields:\n if(model_fields.name == \"id\"):\n pass \n \n elif(model_fields.name == \"pci\"):\n pass \n elif(model_fields.name == \"sci\"):\n pass \n elif(model_fields.name == \"validated\"):\n pass \n else:\n self.field_list.append(model_fields.name)\n return self.field_list\n except:\n raise \n else:\n return None \n except:\n raise", "def getFields(context, interface=None, annotation=None):\n if interface is None:\n domain_model = proxy.removeSecurityProxy(context.domain_model)\n interface = utils.get_derived_table_schema(domain_model)\n if annotation is None:\n annotation = utils.get_descriptor(interface)\n for field_name in annotation.listing_columns:\n yield interface[field_name]\n # !+FIELD_KEYERROR(mr, jul-2012) throws a KeyError when field_name is \n # not part of the interface e.g. if we use a \"field property\" that is \n # implemented as a domain_model.{property}.", "def report_start_end(request):\n\n report = request.GET.get('report_id')\n lang = request.GET.get('language',None)\n usecase = request.session['usecase']\n data = get_fields_from_json()\n json_keys_to_display = data['fields']\n json_keys_to_display.extend(['journal','authors','year','volume'])\n json_keys_to_ann = data['fields_to_ann']\n json_keys = (data['all_fields'])\n\n language = request.GET.get('language',request.session['language'])\n request_auto = request.GET.get('ns_id',None)\n if request.session['mode'] == 'Robot' or (request_auto is not None and request_auto == 'Robot' and request.session['institute'] != 'PUBMED'):\n # In this case we require automatic annotation: the keys to annotate change\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n with open(os.path.join(workpath,'./automatic_annotation/auto_fields/auto_fields.json')) as out:\n data = json.load(out)\n json_keys = data['total_fields'][usecase]\n json_keys_to_ann = data['extract_fields'][usecase]\n for el in json_keys_to_ann:\n if el in json_keys_to_display:\n json_keys_to_display.remove(el)\n\n json_keys.extend(['journal', 'authors', 'year', 'volume', 'abstract', 'title'])\n json_keys_to_ann.extend(['abstract', 'title'])\n if lang is not None:\n language = lang\n json_dict = report_get_start_end(json_keys,json_keys_to_ann,report,language)\n # print(json_dict)\n return JsonResponse(json_dict)" ]
[ "0.5912422", "0.5710874", "0.5643516", "0.5519478", "0.5514924", "0.541801", "0.5411552", "0.5356129", "0.52762175", "0.5274392", "0.5206856", "0.51891625", "0.5185114", "0.51487356", "0.51449263", "0.5080195", "0.5075947", "0.50657296", "0.50644743", "0.50619006", "0.5053078", "0.50467515", "0.50438195", "0.5041999", "0.5000167", "0.4996557", "0.49899897", "0.49653322", "0.49461505", "0.49451053" ]
0.63069195
0
This view creates the HttpResponse object with the CSV examples files, these are the examples the user can download.
def download_examples(request): file_required = request.GET.get('token',None) path = '' workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in if file_required == 'reports': path = os.path.join(workpath, './static/examples/report.csv') elif file_required == 'concepts': path = os.path.join(workpath, './static/examples/concept.csv') elif file_required == 'labels': path = os.path.join(workpath, './static/examples/labels.csv') elif file_required == 'pubmed': path = os.path.join(workpath, './static/examples/pubmed.csv') content = open(path,'r') return HttpResponse(content, content_type='text/csv')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csv_download_view(request):\n logging.info(\" CSV file download is working\")\n now = datetime.now()\n timestamp = now.strftime(\"%Y_%m_%d\")\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"results_' + \\\n GLOBAL_VARIABLE.get_host_name()+'_'+timestamp+'.csv\"'\n\n writer = csv.writer(response)\n list_of_cd = list(GLOBAL_VARIABLE.get_current_data())\n\n for i in range(10):\n rows = [sub_list[i] for sub_list in list_of_cd]\n writer.writerow(rows)\n\n return response", "def download_templates(request):\n\n file_required = request.GET.get('token',None)\n path = ''\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n\n if file_required == 'reports':\n path = os.path.join(workpath, './static/templates/report.csv')\n\n elif file_required == 'concepts':\n path = os.path.join(workpath, './static/templates/concept.csv')\n\n elif file_required == 'labels':\n path = os.path.join(workpath, './static/templates/labels.csv')\n\n elif file_required == 'pubmed':\n path = os.path.join(workpath, './static/templates/pubmed.csv')\n\n content = open(path,'r')\n return HttpResponse(content, content_type='text/csv')", "def generate_dataset_csv(request):\n\n response = csv_export(request,Dataset)\n return response", "def download_test_files(request):\n\n # Log the start of the function\n logger.info(\"=========== returns ms1 test files from code directory input/ms1\")\n\n # create an absolute path to the 'example_data_dir' containing the test data files, then create\n # absolute paths to each test data file. Note the test data files are located in this code base.\n example_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','input/ms1')\n pos_input = os.path.join(example_data_dir, example_pos_filename)\n neg_input = os.path.join(example_data_dir, example_neg_filename)\n tracer_file = os.path.join(example_data_dir, example_tracer_filename)\n run_sequence_pos_file = os.path.join(example_data_dir, example_run_sequence_pos_filename)\n run_sequence_neg_file = os.path.join(example_data_dir, example_run_sequence_neg_filename)\n\n # create filenames\n filename1 = 'ms1_pos_input_test_data.csv'\n filename2 = 'ms1_neg_input_test_data.csv'\n filename3 = 'ms1_tracer_test_data.csv'\n filename4 = 'ms1_run_sequence_pos_test_data.csv'\n filename5 = 'ms1_run_sequence_neg_test_data.csv'\n\n # List of files to be zipped\n files_to_zip = {filename1: pos_input, filename2: neg_input, filename3: tracer_file, filename4: run_sequence_pos_file, filename5: run_sequence_neg_file}\n\n # Create an in-memory zip file\n in_memory_zip = BytesIO()\n with ZipFile(in_memory_zip, 'w', ZIP_DEFLATED) as zipf:\n # Add each file to the zipfile\n for filename in files_to_zip:\n logger.info('filename: {}'.format(filename))\n file_path = files_to_zip[filename]\n with open(file_path, 'rb') as file:\n file_content = file.read()\n zipf.writestr(filename, file_content)\n # The ZipFile object is automatically closed when exiting the 'with' block\n\n zip_filename = \"ms1_test_data_files.zip\"\n # Create an HTTP response with the zip file attached for download\n response = HttpResponse(in_memory_zip.getvalue(),content_type='application/zip')\n response['Content-Disposition'] = 'attachment; filename=' + zip_filename\n response['Content-length'] = in_memory_zip.tell()\n\n # Return the HTTP response\n return response", "def dataset_constructor_csv_file_upload(request):\n if request.method == \"POST\":\n relation_support_dataset = request.FILES['csv_file']\n handle_uploaded_file(relation_support_dataset, 'temp/cntr_csv_file.csv')\n df = pd.read_csv('temp/cntr_csv_file.csv')\n ind = {}\n data = []\n for i, row in df.iterrows():\n if row['reldescription'] not in ind:\n data.append({'name':row['reldescription'], 'examples':[]})\n ind[row['reldescription']] = len(data) - 1\n data[ind[row['reldescription']]]['examples'].append({'head':row['head'], 'tail':row['tail'], 'sentence':row['sentence']})\n return HttpResponse(\n json.dumps({'num_rels':len(data), 'num_exs':len(data[0]['examples']), 'data':data}),\n content_type=\"application/json\"\n )", "def downloadResponse(request, formcode=None):\n if formcode !=None:\n response = HttpResponse(content_type='text/csv')\n responses = Response.objects.filter(form_id=formcode)\n writer = csv.writer(response)\n writer.writerow(['User', 'Submit Date', 'Answer1', 'Answer2', 'Answer3'])\n for r in responses:\n user = User.objects.get(id=r.user_id)\n writer.writerow([user, r.submitDate, r.answer1 ,r.answer2 , r.answer3])\n\n response['Content-Disposition'] = 'attachment; filename=\"response.csv\"'\n return response \n return render(request, 'download.html')", "def get(self, request, **_kwargs):\n self.check_access()\n response = HttpResponse(content_type='text/csv')\n filename = \"team-membership_{}_{}_{}.csv\".format(\n self.course.id.org, self.course.id.course, self.course.id.run\n )\n response['Content-Disposition'] = f'attachment; filename=\"{filename}\"'\n load_team_membership_csv(self.course, response)\n return response", "def dwn_analysis_csv(request):\n data = []\n for i in results:\n data.append((i['sentence'], i['head'], i['tail'], i['pred_relation'], i['sent'], i['conf']))\n df = pd.DataFrame(data, columns=['Sentence', 'Head', 'Tail', 'Predicted Relation', 'Predicted Sentiment', 'Confidence'])\n df.to_csv(\"temp/analysis_results.csv\", index=False)\n \n return FileResponse(open('temp/analysis_results.csv','rb'))", "def download_ground_truths(request):\n\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n path1 = os.path.join(workpath, './static/temp/temp.csv')\n path2 = os.path.join(workpath, './static/BioC/temp_files/to_download.csv')\n if os.path.exists(path1):\n os.remove(path1)\n if os.path.exists(path2):\n os.remove(path2)\n username = request.session['username']\n inst = request.GET.get('institute',None)\n if inst == '':\n inst = None\n else:\n inst = str(inst)\n use = request.GET.get('usec',None)\n if use == '':\n use = None\n else:\n use = str(use)\n report_type = request.GET.get('report_type',None)\n if report_type == '':\n report_type = None\n annotation_mode = request.GET.get('mode',None)\n if annotation_mode == '':\n annotation_mode = None\n lang = request.GET.get('lang',None)\n if lang == '':\n lang = None\n else:\n lang = str(lang)\n batch = request.GET.get('batch','') # added 22/10/2021\n if batch == '' or batch == 'all':\n batch = None\n else:\n batch = int(batch)\n\n all = request.GET.get('all_gt',None)\n action = request.GET.get('action',None)\n format = request.GET.get('format',None)\n json_resp = {}\n json_resp['ground_truth'] = []\n if format == 'json' or all =='all' :\n json_resp = create_json_to_download(report_type,action,username,use,annotation_mode,inst,lang,all,batch)\n return JsonResponse(json_resp)\n\n elif format == 'csv':\n response = HttpResponse(content_type='text/csv')\n resp = create_csv_to_download(report_type,annotation_mode,username,use,inst,lang,action,response,batch)\n return resp\n\n elif format == 'biocxml':\n json_keys_to_display = request.session['fields']\n json_keys_to_ann = request.session['fields_to_ann']\n if report_type == 'pubmed':\n json_keys_to_display = ['year','authors','volume','journal']\n json_keys_to_ann = ['title','abstract']\n json_keys = json_keys_to_display + json_keys_to_ann\n resp = generate_bioc(json_keys,json_keys_to_ann,username,action,lang,use,inst,'xml',annotation_mode,report_type,batch)\n return HttpResponse(resp,content_type='application/xml')\n\n elif format == 'biocjson':\n json_keys_to_display = request.session['fields']\n json_keys_to_ann = request.session['fields_to_ann']\n json_keys = json_keys_to_display + json_keys_to_ann\n resp = generate_bioc(json_keys,json_keys_to_ann,username,action,lang,use,inst,'json',annotation_mode,report_type,batch)\n return HttpResponse(resp,content_type='application/xml')", "def test_get_students_features_csv(self):\r\n url = reverse('get_students_features', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url + '/csv', {})\r\n self.assertEqual(response['Content-Type'], 'text/csv')", "def dwn_rel_sup_csv(request):\n i = int(request.GET.get('i'))\n \n return FileResponse(open('temp/relation_support_datasets/relation_support_dataset_{}_{}.csv'.format(i, request.user.username),'rb'))", "def create_csv_response(filename, header, datarows):\r\n response = HttpResponse(mimetype='text/csv')\r\n response['Content-Disposition'] = 'attachment; filename={0}'\\\r\n .format(filename)\r\n csvwriter = csv.writer(\r\n response,\r\n dialect='excel',\r\n quotechar='\"',\r\n quoting=csv.QUOTE_ALL)\r\n\r\n csvwriter.writerow(header)\r\n for datarow in datarows:\r\n encoded_row = [unicode(s).encode('utf-8') for s in datarow]\r\n csvwriter.writerow(encoded_row)\r\n return response", "def dwn_saved_result_csv(request):\n source_id = request.GET.get('source_id')\n data = []\n objs = ExtractedRelation.objects.filter(source=source_id)\n s = Source.objects.filter(source_id=source_id)[0]\n for i in objs:\n data.append((i.sentence, i.head, i.tail, i.pred_relation, i.sentiment, i.conf, s.source, i.rel_id, os.path.basename(i.ckpt)))\n \n df = pd.DataFrame(data, columns=['Sentence', 'Head', 'Tail', 'Predicted Relation', 'Predicted Sentiment', 'Confidence', 'Source', 'rel_id', 'Checkpoint'])\n df.to_csv(\"temp/analysis_results.csv\", index=False)\n \n return FileResponse(open('temp/analysis_results.csv','rb'))", "def post(self, request, *args, **kwargs):\n create_media_folder_if_not_exists()\n delete_csv_before_request()\n try:\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n file = serializer.save()\n df = pd.read_csv(file.csv.path)\n df = get_dummies(df)\n df.to_csv(os.path.join(MEDIA_ROOT, 'modified.csv'), index=False)\n modified = Csv.objects.create(csv='modified.csv')\n\n # response = HttpResponse(modified.csv, content_type='application/csv')\n # response['Content-Disposition'] = 'inline; filename=' + os.path.basename(str(modified.csv))\n return FileResponse(modified.csv) # response\n\n # return Response({\"file\": b\"\".join(modified.csv).decode(\"utf-8\")}, status=status.HTTP_200_OK)\n # return Response({'result': 'ok' }, status=status.HTTP_200_OK)\n except Exception as e:\n return Response({'result': 'ERROR ' + str(e)}, status=status.HTTP_400_BAD_REQUEST)", "def download_bank_details(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"bank_details.csv\"'\n\n writer = csv.writer(response)\n \n writer.writerow([\n 's/n',\n 'account_number',\n 'account_name',\n 'recipient_code',\n 'bank_name',\n 'student_name',\n 'date_added'\n ])\n \n count = 0\n for bank in StudentBankDetail.objects.filter(month=batch_date):\n count +=1\n writer.writerow([\n count,\n str(bank.account_number),\n str(bank.account_name),\n str(bank.recipient_code),\n str(bank.bank.bank_name),\n str(bank.student.name),\n datetime.strftime(bank.date_added, '%d-%m-%Y')\n ])\n \n\n\n return response", "def csvdata():\n return render_template(\"data.html\")", "def do_GET(self):\n self.send_response(200)\n self.end_headers()\n self.wfile.write(json.dumps(sample_files_list_response).encode())", "def csv(request):\n if request.method == 'POST':\n form = CSVUploadForm(request.POST, request.FILES)\n if form.is_valid():\n fund_bot = FundBot(csv_file=request.FILES['csv_file'])\n filename = '%s-banner-iii.csv' % datetime.datetime.today().strftime('%Y-%m-%d')\n response = HttpResponse(mimetype=\"text/csv\")\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n \n csv_response = fund_bot.process(response)\n new_log = FundProcessLog(substitutions=int(fund_bot.substitutions))\n new_log.save()\n return csv_response\n else:\n return direct_to_template(request,\n 'vendors/iii/csv.html',\n {'form':CSVUploadForm()})", "def get_overall_csv(request, cur_course_user):\n course = cur_course_user.course\n assessments = models.Assessment.objects.filter(course=course).order_by('id')\n\n # Create the HttpResponse object with the appropriate CSV header.\n response = http.HttpResponse(content_type='text/csv')\n\n filename = \"%s-scores.csv\" % course.name\n # Replace spaces in the course name with dashes and convert to lower case\n filename = filename.replace(' ', '-').lower()\n\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % filename\n\n fieldnames=['Last Name', 'First Name', 'ID', 'Email']\n for assessment in assessments:\n fieldnames.append(assessment.name)\n if hasattr(assessment, 'homework'):\n fieldnames.append('Late days for %s' % assessment.name)\n\n writer = csv.DictWriter(response, fieldnames=fieldnames)\n\n course_users = models.CourseUser.objects.filter(course=course,\n privilege=models.CourseUser.STUDENT).order_by('user__last_name', 'user__first_name')\n\n writer.writeheader()\n\n for course_user in course_users:\n user = course_user.user\n\n row = {\n 'Last Name': user.last_name,\n 'First Name': user.first_name,\n 'ID': user.student_id,\n 'Email': user.email\n }\n\n for assessment in assessments:\n submission = models.Submission.objects.filter(group_members=course_user, assessment=assessment, last=True)\n\n if submission.count() == 0:\n row[assessment.name] = 'Not Found'\n else:\n submission = submission[0]\n row[assessment.name] = submission.points if submission.graded else 'ungraded'\n\n if hasattr(assessment, 'homework'):\n diff = submission.time - submission.assessment.homework.soft_deadline\n late_days = diff.total_seconds() / 24.0 / 60.0 / 60.0\n late_days = max(0, math.ceil(late_days))\n\n row['Late days for %s' % assessment.name] = late_days\n\n writer.writerow(row)\n\n return response", "def make_response(header, data, format, name, encoding=None):\n if format == 'csv':\n formatter = CSVformatter(encoding)\n mimetype = 'application/csv'\n elif format == 'xls':\n formatter = CSVformatter(encoding)\n mimetype = 'application/xls'\n else:\n raise Exception(\"Unknown format: %s\" % (format,))\n\n resp = HttpResponse(generator(header, data, formatter), mimetype=mimetype)\n resp['Content-Disposition'] = 'attachment; filename=%s.%s' % (name, format)\n return resp", "def download_all_reports(request):\n\n request_body_json = json.loads(request.body)\n report_list = request_body_json['report_list']\n mode = request_body_json['format']\n action = request_body_json['action']\n annot = request_body_json['annotation_mode']\n\n if annot == 'Manual':\n annot = 'Human'\n elif annot == 'Automatic':\n annot = 'Robot'\n\n try:\n response = HttpResponse(content_type='text/csv')\n resp = download_report_gt(report_list, action, annot, mode, response)\n if mode == 'biocxml' or mode == 'biocjson':\n return HttpResponse(resp, content_type='application/xml')\n elif mode == 'csv':\n return resp\n elif mode == 'json':\n return JsonResponse(resp)\n\n except Exception as e:\n print(e)\n json_error = {'error': e}\n return JsonResponse(json_error)", "def _generate_examples(self, files):\n idx = 0\n for filename in files:\n with open(filename) as file:\n for line in file:\n yield idx, {\"text\": line}\n idx += 1", "def csv_response(filename, header, rows):\r\n response = HttpResponse(mimetype='text/csv')\r\n response['Content-Disposition'] = 'attachment; filename={0}'.format(filename)\r\n writer = csv.writer(response, dialect='excel', quotechar='\"', quoting=csv.QUOTE_ALL)\r\n # In practice, there should not be non-ascii data in this query,\r\n # but trying to do the right thing anyway.\r\n encoded = [unicode(s).encode('utf-8') for s in header]\r\n writer.writerow(encoded)\r\n for row in rows:\r\n encoded = [unicode(s).encode('utf-8') for s in row]\r\n writer.writerow(encoded)\r\n return response", "def create_explanations_csv():\n with open('output/' + dataset_name + '_' + model_name + '.csv', mode='w', newline='') as file:\n writer = csv.writer(file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(\n [\"index\", \"original text\", \"true class\", \"decoded text\", \"black box prediction\",\n \"decision tree prediction\", \"fidelity\", \"exemplars\", \"counter exemplars\", \"top exemplar words\",\n \"top counter exemplar words\"])\n for i in range(len(idx)):\n writer.writerow(\n [idx[i], X_original[i], y_original[i], final_decoded_sentences[i][0], bbpreds[i], dtpreds[i],\n fidelities[i], exemplars[i], counter_exemplars[i], top_exemplar_words_dict_list[i],\n top_counter_exemplar_words_dict_list[i]])", "def download_and_prepare_dmipy_example_dataset(self):\r\n subject_ID = 100307\r\n self.download_subject(subject_ID)\r\n self.prepare_example_slice(subject_ID)", "def get_csv(request, cur_course_user, assessment_id):\n assessment = shortcuts.get_object_or_404(models.Assessment, pk=assessment_id)\n\n # Create the HttpResponse object with the appropriate CSV header.\n response = http.HttpResponse(content_type='text/csv')\n\n filename = \"%s-scores.csv\" % assessment.name\n # Replace spaces in the assessment name with dashes and convert to lower case\n filename = filename.replace(' ', '-').lower()\n\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % filename\n\n question_parts = assessment.get_prefetched_question_parts().order_by('-question_number')\n num_questions = assessment.get_num_questions()\n\n fieldnames=['Last Name', 'First Name', 'ID', 'Email', 'Total Score']\n if hasattr(assessment, 'homework'):\n fieldnames.append('Finalized?')\n fieldnames.append('Submission time')\n fieldnames.append('Late days')\n\n for i in range(num_questions):\n fieldnames.append('Question %d' % (i + 1))\n\n writer = csv.DictWriter(response, fieldnames=fieldnames)\n\n submissions = assessment.get_prefetched_submissions().order_by('course_user__user__last_name',\n 'course_user__user__first_name')\n\n writer.writeheader()\n\n for submission in submissions:\n for course_user in submission.group_members.all():\n user = course_user.user\n score = submission.points if submission.graded else 'ungraded'\n\n row = {\n 'Last Name': user.last_name,\n 'First Name': user.first_name,\n 'ID': user.student_id,\n 'Email': user.email,\n 'Total Score': score\n }\n\n if hasattr(assessment, 'homework'):\n cur_timezone = pytz.timezone(assessment.course.get_timezone_string())\n local_time = timezone.localtime(submission.time, timezone=cur_timezone)\n row['Submission time'] = local_time.strftime('%m/%d/%Y %I:%M %p')\n\n diff = submission.time - submission.assessment.homework.soft_deadline\n late_days = diff.total_seconds() / 24.0 / 60.0 / 60.0\n late_days = max(0, math.ceil(late_days))\n row['Late days'] = late_days\n\n row['Finalized?'] = 'Yes' if submission.is_finalized() else 'No'\n\n for i in range(num_questions):\n if submission.is_question_graded(i + 1):\n row['Question %d' % (i + 1)] = submission.get_question_points(i + 1)\n else:\n row['Question %d' % (i + 1)] = 'ungraded'\n writer.writerow(row)\n\n return response", "def test_staff_csv(self):\r\n\r\n self._setstaff_login()\r\n self._add_edx4edx()\r\n\r\n def_ms = modulestore()\r\n course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))\r\n CourseStaffRole(course.id).add_users(self.user)\r\n\r\n response = self.client.post(reverse('sysadmin_staffing'),\r\n {'action': 'get_staff_csv', })\r\n self.assertIn('attachment', response['Content-Disposition'])\r\n self.assertEqual('text/csv', response['Content-Type'])\r\n columns = [_('course_id'), _('role'), _('username'),\r\n _('email'), _('full_name'), ]\r\n self.assertIn(','.join('\"' + c + '\"' for c in columns),\r\n response.content)\r\n\r\n self._rm_edx4edx()", "def simulation_export(request, simulation):\n\n seed = np.random.randint(10000)\n dir = '{0}/website_files/exports/{1}'.format(settings.BASE_DIR, seed)\n os.makedirs(dir)\n\n files_names = []\n\n files_names.append(object_export_save(simulation, 'centroid', dir))\n files_names.append(object_export_save(simulation, 'crossing', dir))\n files_names.append(object_export_save(simulation, 'link', dir))\n files_names.append(object_export_save(simulation, 'function', dir))\n files_names.append(public_transit_export_save(simulation, dir))\n files_names.append(pricing_export_save(simulation, dir))\n\n\n demandsegments = get_query('demandsegment', simulation)\n for demandsegment in demandsegments:\n files_names.append(matrix_export_save(simulation, demandsegment, dir))\n\n\n\n #Need to add parameters file here\n\n zipname = '{0}'.format(str(simulation))\n\n s = BytesIO()\n\n file = zipfile.ZipFile(s, 'w')\n\n for f in files_names:\n # Calculate path for file in zip\n fdir, fname = os.path.split(f)\n zip_path = os.path.join(zipname, fname)\n\n # Add file, at correct path\n file.write(f, zip_path)\n\n file.close()\n\n # Grab ZIP file from in-memory, make response with correct MIME-type\n response = HttpResponse(s.getvalue())\n response['content_type'] = 'application/x-zip-compressed'\n # ..and correct content-disposition\n response['Content-Disposition'] = 'attachment; filename={0}.zip'.format(str(simulation))\n\n shutil.rmtree(dir, ignore_errors=True)\n\n return response", "def initialize_response(self, filename):\n key = 'Content-Disposition'\n self.response = HttpResponse(content_type='text/csv')\n self.response[key] = f'attachment; filename=\"{filename}\"'\n self.writer = UnicodeCsvWriter(self.response)", "def send_csv_reply(self, request, result, tags):\n request.setHeader('Content-disposition', 'attachment; filename=%s.csv' % \n result[0]['uuid'])\n if tags[0][0]:\n tags = tags[0][1][0][0]\n else:\n tags = None\n self.write_one_stream(request, \n result[0], \n tags)\n \n request.finish()" ]
[ "0.66388816", "0.6486266", "0.6369911", "0.6286953", "0.62677735", "0.6229694", "0.6140815", "0.60950977", "0.60798424", "0.6048974", "0.588191", "0.5857587", "0.5844419", "0.58400095", "0.58131593", "0.5644796", "0.5629658", "0.5619012", "0.5589778", "0.55788064", "0.55754256", "0.55678886", "0.5552659", "0.5528432", "0.55255556", "0.5497241", "0.5442896", "0.54304147", "0.5423516", "0.54053414" ]
0.8027104
0
This view creates the HttpResponse object with the appropriate CSV header, these are the templates the user can download.
def download_templates(request): file_required = request.GET.get('token',None) path = '' workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in if file_required == 'reports': path = os.path.join(workpath, './static/templates/report.csv') elif file_required == 'concepts': path = os.path.join(workpath, './static/templates/concept.csv') elif file_required == 'labels': path = os.path.join(workpath, './static/templates/labels.csv') elif file_required == 'pubmed': path = os.path.join(workpath, './static/templates/pubmed.csv') content = open(path,'r') return HttpResponse(content, content_type='text/csv')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csv_download_view(request):\n logging.info(\" CSV file download is working\")\n now = datetime.now()\n timestamp = now.strftime(\"%Y_%m_%d\")\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"results_' + \\\n GLOBAL_VARIABLE.get_host_name()+'_'+timestamp+'.csv\"'\n\n writer = csv.writer(response)\n list_of_cd = list(GLOBAL_VARIABLE.get_current_data())\n\n for i in range(10):\n rows = [sub_list[i] for sub_list in list_of_cd]\n writer.writerow(rows)\n\n return response", "def download_bank_details(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"bank_details.csv\"'\n\n writer = csv.writer(response)\n \n writer.writerow([\n 's/n',\n 'account_number',\n 'account_name',\n 'recipient_code',\n 'bank_name',\n 'student_name',\n 'date_added'\n ])\n \n count = 0\n for bank in StudentBankDetail.objects.filter(month=batch_date):\n count +=1\n writer.writerow([\n count,\n str(bank.account_number),\n str(bank.account_name),\n str(bank.recipient_code),\n str(bank.bank.bank_name),\n str(bank.student.name),\n datetime.strftime(bank.date_added, '%d-%m-%Y')\n ])\n \n\n\n return response", "def downloadResponse(request, formcode=None):\n if formcode !=None:\n response = HttpResponse(content_type='text/csv')\n responses = Response.objects.filter(form_id=formcode)\n writer = csv.writer(response)\n writer.writerow(['User', 'Submit Date', 'Answer1', 'Answer2', 'Answer3'])\n for r in responses:\n user = User.objects.get(id=r.user_id)\n writer.writerow([user, r.submitDate, r.answer1 ,r.answer2 , r.answer3])\n\n response['Content-Disposition'] = 'attachment; filename=\"response.csv\"'\n return response \n return render(request, 'download.html')", "def csv_response(filename, header, rows):\r\n response = HttpResponse(mimetype='text/csv')\r\n response['Content-Disposition'] = 'attachment; filename={0}'.format(filename)\r\n writer = csv.writer(response, dialect='excel', quotechar='\"', quoting=csv.QUOTE_ALL)\r\n # In practice, there should not be non-ascii data in this query,\r\n # but trying to do the right thing anyway.\r\n encoded = [unicode(s).encode('utf-8') for s in header]\r\n writer.writerow(encoded)\r\n for row in rows:\r\n encoded = [unicode(s).encode('utf-8') for s in row]\r\n writer.writerow(encoded)\r\n return response", "def generate_dataset_csv(request):\n\n response = csv_export(request,Dataset)\n return response", "def create_csv_response(filename, header, datarows):\r\n response = HttpResponse(mimetype='text/csv')\r\n response['Content-Disposition'] = 'attachment; filename={0}'\\\r\n .format(filename)\r\n csvwriter = csv.writer(\r\n response,\r\n dialect='excel',\r\n quotechar='\"',\r\n quoting=csv.QUOTE_ALL)\r\n\r\n csvwriter.writerow(header)\r\n for datarow in datarows:\r\n encoded_row = [unicode(s).encode('utf-8') for s in datarow]\r\n csvwriter.writerow(encoded_row)\r\n return response", "def make_response(header, data, format, name, encoding=None):\n if format == 'csv':\n formatter = CSVformatter(encoding)\n mimetype = 'application/csv'\n elif format == 'xls':\n formatter = CSVformatter(encoding)\n mimetype = 'application/xls'\n else:\n raise Exception(\"Unknown format: %s\" % (format,))\n\n resp = HttpResponse(generator(header, data, formatter), mimetype=mimetype)\n resp['Content-Disposition'] = 'attachment; filename=%s.%s' % (name, format)\n return resp", "def return_csv(self, filename, header, data):\r\n\r\n csv_file = StringIO.StringIO()\r\n writer = csv.writer(csv_file, dialect='excel', quotechar='\"',\r\n quoting=csv.QUOTE_ALL)\r\n\r\n writer.writerow(header)\r\n\r\n # Setup streaming of the data\r\n def read_and_flush():\r\n \"\"\"Read and clear buffer for optimization\"\"\"\r\n csv_file.seek(0)\r\n csv_data = csv_file.read()\r\n csv_file.seek(0)\r\n csv_file.truncate()\r\n return csv_data\r\n\r\n def csv_data():\r\n \"\"\"Generator for handling potentially large CSVs\"\"\"\r\n for row in data:\r\n writer.writerow(row)\r\n csv_data = read_and_flush()\r\n yield csv_data\r\n response = HttpResponse(csv_data(), mimetype='text/csv')\r\n response['Content-Disposition'] = 'attachment; filename={0}'.format(\r\n filename)\r\n return response", "def create_csv(request):\n date_dict = income_date_parser(request)\n\n income_history = get_incomes_funds_ids(user_id=date_dict['user_id'],\n date_start=date_dict['start_date'],\n date_end=date_dict['finish_date'],\n time_diff=date_dict['utc_difference'])\n del income_history[-1]\n\n output = io.StringIO()\n\n headers = []\n if income_history:\n for i in income_history[0]:\n if i != 'income_history_id':\n headers.append(i)\n\n writer = csv.DictWriter(output, dialect='excel', quoting=csv.QUOTE_ALL, fieldnames=headers)\n writer.writeheader()\n\n if income_history:\n for entry in income_history:\n del entry['income_history_id']\n writer.writerow(entry)\n\n response = file_streaming_response('text/csv', 'income_history.csv', output)\n return response", "def download_examples(request):\n\n file_required = request.GET.get('token',None)\n path = ''\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n if file_required == 'reports':\n path = os.path.join(workpath, './static/examples/report.csv')\n\n elif file_required == 'concepts':\n path = os.path.join(workpath, './static/examples/concept.csv')\n\n elif file_required == 'labels':\n path = os.path.join(workpath, './static/examples/labels.csv')\n\n elif file_required == 'pubmed':\n path = os.path.join(workpath, './static/examples/pubmed.csv')\n\n content = open(path,'r')\n return HttpResponse(content, content_type='text/csv')", "def export(request):\n\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n filename = 'export-inscrits%s.csv' % date.today().strftime(\"%y-%m-%d\")\n\n person_list = Person.objects.all()\n\n table = ExportPersonTable(person_list)\n table.order_by = request.GET.get(\"sort\",'last_name')\n\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n writer = csv.writer(response)\n # Write headers to CSV file\n headers = []\n for column in table.columns:\n headers.append(column.header.encode('utf8'))\n writer.writerow(headers)\n\n # Write data to CSV file\n for obj in table.rows:\n row = []\n for value in obj:\n row.append(value.encode('utf8'))\n writer.writerow(row)\n\n # Return CSV file to browser as download\n return response", "def initialize_response(self, filename):\n key = 'Content-Disposition'\n self.response = HttpResponse(content_type='text/csv')\n self.response[key] = f'attachment; filename=\"{filename}\"'\n self.writer = UnicodeCsvWriter(self.response)", "def export(request):\n\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n filename = 'export-inscripcions-tallers-%s.csv' % date.today().strftime(\"%y-%m-%d\")\n\n regtaller_list = TallerRegistration.objects.all()\n\n table = ExportTallerRegistrationTable(regtaller_list)\n table.order_by = request.GET.get(\"sort\",'last_name')\n\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n writer = csv.writer(response)\n # Write headers to CSV file\n headers = []\n for column in table.columns:\n headers.append(column.header.encode('utf8'))\n writer.writerow(headers)\n\n # Write data to CSV file\n for obj in table.rows:\n row = []\n for value in obj:\n if isinstance(value, basestring):\n row.append(value.encode('utf8'))\n else:\n row.append(value)\n writer.writerow(row)\n\n # Return CSV file to browser as download\n return response", "def get(self, request, **_kwargs):\n self.check_access()\n response = HttpResponse(content_type='text/csv')\n filename = \"team-membership_{}_{}_{}.csv\".format(\n self.course.id.org, self.course.id.course, self.course.id.run\n )\n response['Content-Disposition'] = f'attachment; filename=\"{filename}\"'\n load_team_membership_csv(self.course, response)\n return response", "def csv(request):\n if request.method == 'POST':\n form = CSVUploadForm(request.POST, request.FILES)\n if form.is_valid():\n fund_bot = FundBot(csv_file=request.FILES['csv_file'])\n filename = '%s-banner-iii.csv' % datetime.datetime.today().strftime('%Y-%m-%d')\n response = HttpResponse(mimetype=\"text/csv\")\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n \n csv_response = fund_bot.process(response)\n new_log = FundProcessLog(substitutions=int(fund_bot.substitutions))\n new_log.save()\n return csv_response\n else:\n return direct_to_template(request,\n 'vendors/iii/csv.html',\n {'form':CSVUploadForm()})", "def csvdata():\n return render_template(\"data.html\")", "def csv_response(rows, filename=\"export.csv\"):\n\t# Unfortunately Flask doesn't let you output response as an IO Stream, so you have\n\t# buffer the entire response to a string first.\n\tsi = StringIO.StringIO()\n\tcw = csv.writer(si)\n\tcw.writerow(header)\n\tfor row in rows:\n\t\tcw.writerow()\n\toutput = make_response(si.getvalue())\n\toutput.headers[\"Content-Disposition\"] = \"attachment; filename=%s\" % filename\n\toutput.headers[\"Content-type\"] = \"text/csv\"\n\treturn output", "def csv_report(request):\r\n if not _can_download_report(request.user):\r\n return HttpResponseForbidden(_('You do not have permission to view this page.'))\r\n\r\n if request.method == 'POST':\r\n start_date = request.POST.get('start_date', '')\r\n end_date = request.POST.get('end_date', '')\r\n start_letter = request.POST.get('start_letter', '')\r\n end_letter = request.POST.get('end_letter', '')\r\n report_type = request.POST.get('requested_report', '')\r\n try:\r\n start_date = _get_date_from_str(start_date) + datetime.timedelta(days=0)\r\n end_date = _get_date_from_str(end_date) + datetime.timedelta(days=1)\r\n except ValueError:\r\n # Error case: there was a badly formatted user-input date string\r\n return _render_report_form(start_date, end_date, start_letter, end_letter, report_type, date_fmt_error=True)\r\n\r\n report = initialize_report(report_type, start_date, end_date, start_letter, end_letter)\r\n items = report.rows()\r\n\r\n response = HttpResponse(mimetype='text/csv')\r\n filename = \"purchases_report_{}.csv\".format(datetime.datetime.now(pytz.UTC).strftime(\"%Y-%m-%d-%H-%M-%S\"))\r\n response['Content-Disposition'] = 'attachment; filename=\"{}\"'.format(filename)\r\n report.write_csv(response)\r\n return response\r\n\r\n elif request.method == 'GET':\r\n end_date = datetime.datetime.now(pytz.UTC)\r\n start_date = end_date - datetime.timedelta(days=30)\r\n start_letter = \"\"\r\n end_letter = \"\"\r\n return _render_report_form(start_date.strftime(\"%Y-%m-%d\"), end_date.strftime(\"%Y-%m-%d\"), start_letter, end_letter, report_type=\"\")\r\n\r\n else:\r\n return HttpResponseBadRequest(\"HTTP Method Not Supported\")", "def render_to_response(self, context, **response_kwargs):\n if not self.request.user.is_authenticated:\n # Do not allow to get a good response\n return nlogin(self.request)\n elif 'Csv' in self.request.GET.get('submit_type', ''):\n \"\"\" Provide CSV response\"\"\"\n return export_csv(self.get_qs(), 'begrippen')\n else:\n return super(PartListView, self).render_to_response(context, **response_kwargs)", "def send_csv_reply(self, request, result, tags):\n request.setHeader('Content-disposition', 'attachment; filename=%s.csv' % \n result[0]['uuid'])\n if tags[0][0]:\n tags = tags[0][1][0][0]\n else:\n tags = None\n self.write_one_stream(request, \n result[0], \n tags)\n \n request.finish()", "def export_as_csv(modeladmin, request, queryset):\n if not request.user.is_staff:\n raise PermissionDenied\n opts = modeladmin.model._meta\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s.csv' % unicode(opts).replace('.', '_')\n writer = csv.writer(response)\n field_names = [field.name for field in opts.fields]\n # Write a first row with header information\n writer.writerow(field_names)\n # Write data rows\n for obj in queryset:\n writer.writerow([getattr(obj, field) for field in field_names])\n return response", "def export_to_csv(self, request, queryset):\n fields = self.get_table_fields()\n field_names = [field.name for field in fields]\n field_verbose_names = [field.verbose_name.encode(\n 'utf-8'\n ) for field in fields]\n\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; \\\nfilename=%s.csv' % unicode(self.model._meta).replace('.', '_')\n\n writer = csv.writer(response)\n writer.writerow(field_verbose_names)\n for obj in queryset:\n writer.writerow([unicode(getattr(obj, field)).encode(\n \"utf-8\",\n \"replace\"\n ) for field in field_names])\n return response", "def render_to_response(self, context, **response_kwargs):\n if not self.request.user.is_authenticated:\n # Do not allow to get a good response\n return nlogin(self.request)\n elif 'Csv' in self.request.GET.get('submit_type', ''):\n \"\"\" Provide CSV response\"\"\"\n return export_csv(self.get_qs(), 'begrippen')\n else:\n return super(TextListView, self).render_to_response(context, **response_kwargs)", "def export_as_csv(modeladmin, request, queryset):\n if not request.user.is_staff:\n raise PermissionDenied\n opts = modeladmin.model._meta\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s.csv' % unicode(opts).replace('.', '_')\n writer = csv.writer(response, delimiter=';')\n field_names = [field.name for field in opts.fields]\n # Write a first row with header information\n writer.writerow(field_names)\n # Write data rows\n for obj in queryset:\n values = []\n for field in field_names:\n value = (getattr(obj, field))\n if callable(value):\n try:\n value = value() or ''\n except:\n value = 'Error retrieving value'\n if value is None:\n value = ''\n values.append(unicode(value).encode('utf-8'))\n writer.writerow(values)\n #writer.writerow([getattr(obj, field) for field in field_names])\n return response", "def csv_export(self,\n states=None,\n fields=None,\n filenamebase='projects',\n delimiter=',',\n newline='\\r\\n',\n ):\n \n if fields is None:\n fields = self.fields()\n \n out = StringIO()\n out.write(delimiter.join(fields) + newline)\n\n for project in self.data():\n values = []\n for field in project:\n text = field['text']\n if type(text) is UnicodeType:\n text = text.encode('utf8')\n value = CSV_TEMPLATE % text\n values.append(value)\n out.write(delimiter.join(values) + newline)\n \n value = out.getvalue()\n out.close()\n\n timestamp = datetime.today().strftime(\"%Y%m%d%H%M\")\n filename = filenamebase + timestamp + '.csv'\n \n self.request.RESPONSE.setHeader('Content-Type', 'application/x-msexcel')\n self.request.RESPONSE.setHeader(\"Content-Disposition\", \n \"inline;filename=%s\"%filename)\n\n return value", "def shops_procurement_email_csv(request):\n\n Order.objects.all().delete()\n Product.objects.all().delete()\n\n procurements = Procurement.objects.all()\n\n if procurements:\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=procurement_%s.csv' % procurement_id\n\n for procurement in procurements:\n\n writer = csv.writer(response)\n\n writer.writerow([\n 'Vendor',\n 'Product',\n 'Variant',\n 'Quantity',\n 'Grams'])\n\n order_by_args = [\n 'product_variant__product__vendor',\n 'product_variant', ]\n procurement_items = procurement.procurementitem_set.all().order_by(*order_by_args)\n\n for procurement_item in procurement_items:\n writer.writerow([\n procurement_item.product_variant.product.vendor,\n str(procurement_item.product_variant.product),\n str(procurement_item.product_variant.option1),\n str((procurement_item.order_units) or ''),\n str((procurement_item.order_weight) or '')])\n\n return response", "def export_repayment_csv(request):\n import csv\n from django.utils.encoding import smart_str\n # response = HttpResponse(content_type='text/csv')\n # response['Content-Disposition'] = 'attachment; filename=Repayment_report.csv'\n from_date = request.GET.get('from_date')\n to_date = request.GET.get('to_date')\n search = request.GET.get('search_value') or ''\n search_query = Q()\n if search:\n search_query = Q(user__user__username__icontains=search) | \\\n Q(user__user__first_name__icontains=search) | \\\n Q(project__title__icontains=search) | \\\n Q(amount__icontains=search) | \\\n Q(user__user__last_name__icontains=search) | \\\n Q(user__user__email__icontains=search)\n if from_date and to_date:\n import datetime\n import pytz\n date1 = datetime.datetime.strptime(from_date, '%Y-%m-%d').date()\n date2 = datetime.datetime.strptime(to_date, '%Y-%m-%d').date()\n repayments = RepaymentFragment.objects.filter(amount__gt=0.00,\n created_at__range=[\n datetime.datetime(date1.year, date1.month, date1.day, 8, 15,\n 12, 0, pytz.UTC),\n datetime.datetime(date2.year, date2.month, date2.day, 8, 15,\n 12, 0, pytz.UTC)]).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"user__user\").filter(search_query).iterator()\n else:\n repayments = RepaymentFragment.objects.filter(amount__gt=0.00).filter(search_query).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"user__user\").iterator()\n # writer = csv.writer(response, csv.excel)\n # response.write(u'\\ufeff'.encode('utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly)\n\n def stream():\n buffer_ = StringIO()\n writer = csv.writer(buffer_)\n writer.writerow([\n smart_str(u\"FIRST NAME\"),\n smart_str(u\"LAST NAME\"),\n smart_str(u\"USERNAME\"),\n smart_str(u\"EMAIL\"),\n smart_str(u\"DATE\"),\n smart_str(u\"NAME OF PROJECT\"),\n smart_str(u\"DONATION AMOUNT\"),\n smart_str(u\"REPAYMENT AMOUNT\"),\n\n ])\n\n for payment in repayments:\n writer.writerow([\n smart_str(payment.user.user.first_name),\n smart_str(payment.user.user.last_name),\n smart_str(payment.user.user.username),\n smart_str(payment.user.user.email),\n smart_str(payment.created_at),\n smart_str(payment.project.title),\n smart_str(round(\n Payment.objects.filter(user=payment.user).filter(project=payment.project).aggregate(Sum('amount'))[\n 'amount__sum'] or 0, 2)),\n smart_str(round(payment.amount, 2)),\n ])\n buffer_.seek(0)\n data = buffer_.read()\n buffer_.seek(0)\n buffer_.truncate()\n yield data\n\n # Create the streaming response object with the appropriate CSV header.\n response = StreamingHttpResponse(stream(), content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"Repayment_report.csv\"'\n return response", "def return_csv(func, datatable, file_pointer=None):\r\n if file_pointer is None:\r\n response = HttpResponse(mimetype='text/csv')\r\n response['Content-Disposition'] = 'attachment; filename={0}'.format(func)\r\n else:\r\n response = file_pointer\r\n writer = csv.writer(response, dialect='excel', quotechar='\"', quoting=csv.QUOTE_ALL)\r\n encoded_row = [unicode(s).encode('utf-8') for s in datatable['header']]\r\n writer.writerow(encoded_row)\r\n for datarow in datatable['data']:\r\n # 's' here may be an integer, float (eg score) or string (eg student name)\r\n encoded_row = [\r\n # If s is already a UTF-8 string, trying to make a unicode\r\n # object out of it will fail unless we pass in an encoding to\r\n # the constructor. But we can't do that across the board,\r\n # because s is often a numeric type. So just do this.\r\n s if isinstance(s, str) else unicode(s).encode('utf-8')\r\n for s in datarow\r\n ]\r\n writer.writerow(encoded_row)\r\n return response", "def DownloadRingtoneData(request):\r\n response = HttpResponse(mimetype='text/csv')\r\n response['Content-Disposition'] = 'attachment; filename=ringtones.csv'\r\n\r\n writer = csv.DictWriter(response, models.Ringtone.CSV_FILEDS)\r\n # Hack. Write the header first.\r\n d = {}\r\n for k in models.Ringtone.CSV_FILEDS:\r\n d[k] = k\r\n writer.writerow(d)\r\n for r in models.Ringtone.all():\r\n writer.writerow(r.DumpToCSVRow())\r\n return response", "def export_any_dataset(request, *fields, queryset, filename, csv_field_delimiter = \";\"):\n\n name, extension = os.path.splitext(filename)\n file_format = extension[1:]\n headers, rows = render_queryset_as_data(*fields, queryset=queryset)\n\n output = None\n if file_format == 'csv':\n content_type = 'text/csv'\n output = io.StringIO()\n writer = csv.writer(output, delimiter=csv_field_delimiter, quoting=csv.QUOTE_MINIMAL)\n\n writer.writerow(headers)\n for row in rows:\n writer.writerow(row)\n\n elif file_format == 'xlsx':\n content_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n #content_type = 'application/vnd.ms-excel'\n output = io.BytesIO()\n with open_xlsx_file(output) as writer:\n\n writer.write_headers_from_strings(headers)\n for row in rows:\n writer.writerow(row)\n writer.apply_autofit()\n\n assert writer.is_closed()\n else:\n raise Exception('Wrong export file format \"%s\"' % file_format)\n\n # send \"output\" object to stream with mimetype and filename\n assert output is not None\n output.seek(0)\n # response = HttpResponse(\n # output.read(),\n response = StreamingHttpResponse(\n output,\n content_type=content_type,\n )\n #response['Content-Disposition'] = 'inline; filename=\"%s\"' % filename\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % filename\n\n return response" ]
[ "0.77301335", "0.73666567", "0.7355991", "0.7228909", "0.7226983", "0.7217648", "0.7094669", "0.70587486", "0.7015085", "0.7014507", "0.7010828", "0.70096016", "0.6967033", "0.69572055", "0.69189924", "0.6718752", "0.66366225", "0.6557932", "0.64799553", "0.6463194", "0.6458684", "0.64370936", "0.64055645", "0.63928705", "0.6391644", "0.6369948", "0.6366872", "0.6305472", "0.62988484", "0.6279091" ]
0.75247663
1
This view returns the list of all the keys found in report files (other than institute,usecase,id_report,language) the admin has just inserted to update the database (only the keys that have never been detected before are returned).
def get_keys_from_csv_update(request): reports = [] json_resp = {} for filename, file in request.FILES.items(): if filename.startswith('reports'): reports.append(file) elif filename.startswith('pubmed'): reports.append(file) keys,uses = get_keys_csv_update(reports) json_resp['keys'] = keys json_resp['uses'] = list(uses) # print('CHIAVI',keys) return JsonResponse(json_resp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_keys(request):\n\n keys=[]\n reports = Report.objects.all().exclude(institute = 'PUBMED')\n for report in reports:\n json_rep = report.report_json\n for el in json_rep.keys():\n if el not in keys:\n keys.append(el)\n json_resp = {'keys':keys}\n return JsonResponse(json_resp)", "def list_documents(self, report_type: Type, key: str = None) -> List[str]:\n documents = []\n collection = self.client.collection(f'{report_type}').list_documents()\n for document in collection:\n if key:\n if document.id == key:\n for _document in document.get().to_dict():\n documents.append(_document)\n else:\n documents.append(document.id)\n\n return documents", "def get_keys(weat_db):\n import updater\n keys = updater.list_keys(weat_db, verbose=False)\n return keys", "def _FindKeyFiles(self):\r\n \r\n if self.__fCachedFiles is not None:\r\n return self.__fCachedFiles\r\n \r\n app = wingapi.gApplication\r\n proj = app.GetProject()\r\n files = proj.GetAllFiles()\r\n manage_files = []\r\n settings_files = []\r\n for fn in files:\r\n if os.path.basename(fn) == 'manage.py' and not os.path.dirname(fn).endswith('project_template') and os.path.isfile(fn):\r\n manage_files.append(fn)\r\n elif os.path.basename(fn) == 'settings.py' and not os.path.dirname(fn).endswith('project_template') and os.path.isfile(fn):\r\n settings_files.append(fn)\r\n\r\n pairs = []\r\n for manage_file in manage_files:\r\n for settings_file in settings_files:\r\n manage_dir = os.path.dirname(manage_file)\r\n settings_dir = os.path.dirname(settings_file)\r\n if manage_dir == settings_dir:\r\n pairs.append((manage_file, settings_file))\r\n if len(pairs) > 1:\r\n app.SetStatusMessage(\"Warning: Multiple manage.py/settings.py pairs found in project\")\r\n \r\n if len(pairs) > 0:\r\n self.__fCachedFiles = pairs[0]\r\n else:\r\n self.__fCachedFiles = (None, None)\r\n \r\n return self.__fCachedFiles", "def keynames(self):\r\n \r\n infile=open(self._datafile, 'r')\r\n if self._resultfile: self._resultfile.write(\"Keys in datafile: \"+self._datafile+'\\n')\r\n else: print (\"Keys in datafile: \"+self._datafile+'\\n')\r\n for tmpc in infile:\r\n for i in range(0, len(tmpc)):\r\n if tmpc[i:i+1]=='#': break\r\n elif tmpc[i:i+1]==' ':\r\n if self._resultfile: self._resultfile.write(tmpc[0:i]+'\\n')\r\n else: print tmpc[0:i]\r\n break\r\n if self._resultfile: self._resultfile.write(tmpc[0:i]+'\\n')\r\n else: print tmpc[0:i]+'\\n'", "def _get_report_keys(self, text, key_format=r'\\:\\w+\\:'):\n return [i.replace(\":\", \"\") for i in re.findall(r'\\:\\w+\\:', text)]", "def get_keys(self):\r\n\t\tlogger.debug(\"Getting the keys\")\r\n\t\t\r\n\t\treturn db.get_items('keys')", "def list_key_values_command():\n # Get Args needed for the command\n incident = demisto.args().get('id', get_investigation_id())\n # Search Collection for matching incident_id\n return_json = [] # type: ignore\n context = []\n found = False\n cursor = COLLECTION.find({}, {'_id': False})\n if cursor is None:\n # Collection doesn't exist - thus no records\n return_json = None # type: ignore\n else:\n # Iterate, collecting any name/value pairs associated with the incident\n for i in cursor:\n if incident in i:\n found = True\n return_json.append({\n 'Key': i[incident]['key'],\n 'Value': i[incident]['value']\n })\n context.append({\n 'Incident': incident,\n 'Key': i[incident]['key'],\n 'Value': i[incident]['value']\n })\n\n if not found:\n # Means no records were found with that incident_id\n # Discard empty return_json\n return_json = None # type: ignore\n\n human_readable = tableToMarkdown(f'The key/value paires stored in incident {incident}', return_json)\n ec = {'MongoDB.Incident(val.Key === obj.Key)': context}\n # Return a useful status\n return human_readable, ec, {}", "def get_data(request):\n\n json_resp = {}\n # reports = Report.objects.filter(name = UseCase.objects.get(name=request.session['usecase']),institute = request.session['institute'],language = request.session['language'])\n\n json_resp['reports'] = []\n institute = request.GET.get('institute',request.session['institute'])\n usecase = request.GET.get('usecase',request.session['usecase'])\n print(usecase)\n language = request.GET.get('language',request.session['language'])\n ns_human = NameSpace.objects.get(ns_id='Human')\n ns_robot = NameSpace.objects.get(ns_id='Robot')\n user_robot = User.objects.get(username='Robot_user', ns_id=ns_robot)\n # usec = UseCase.objects.get(name = usecase)\n # reports = Report.objects.filter(name = usec,institute = institute, language = language).values('id_report')\n # gt_report = GroundTruthLogFile.objects.filter(language = language).exclude(username = user_robot,id_report__in=reports).order_by('id_report').distinct('id_report')\n cursor = connection.cursor()\n cursor.execute(\"SELECT r.id_report,r.language,r.report_json,r.name,r.institute,r.batch,COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND r.institute = %s AND r.language = %s AND g.username != %s GROUP BY (r.id_report,r.language,r.report_json,r.name,r.institute,r.batch)\",[usecase,institute,language,'Robot_user'])\n gt_report_ids = []\n indice = 0\n st = time.time()\n for el in cursor.fetchall():\n\n # report = Report.objects.get(language = language, id_report = el.id_report_id)\n gt_report_ids.append(el[0])\n # print(str(indice))\n indice +=1\n # report = Report.objects.get(id_report=el.id_report, language=el.language)\n # language = report.language\n\n # user_robot = User.objects.get(username = 'Robot_user',ns_id = ns_robot)\n gt_human = 1\n gt_robot = 0\n\n rep = json.loads(el[2])\n new_rep = {}\n for key in rep.keys():\n nkey = key+ '_0'\n new_rep[nkey] = rep[key]\n\n total = el[6]\n\n new_rep['usecase'] = usecase\n new_rep['id_report_not_hashed'] = rep.get('report_id',el[0])\n new_rep['id_report'] = el[0]\n new_rep['institute'] = institute\n new_rep['language'] = language\n new_rep['batch'] = el[5]\n\n json_resp['reports'].append({'total':total, 'report':new_rep,'id_report':el[0], 'language':language})\n\n usec = UseCase.objects.get(name = usecase)\n reports = Report.objects.filter(institute = institute,language = language,name = usec).exclude(id_report__in=gt_report_ids)\n # print(reports.count())\n indice = 0\n st = time.time()\n for el in reports:\n report = el\n # print(str(indice))\n indice += 1\n # report = Report.objects.get(id_report=el.id_report, language=el.language)\n # language = report.language\n\n # user_robot = User.objects.get(username = 'Robot_user',ns_id = ns_robot)\n gt_human = 0\n gt_robot = 0\n\n rep = report.report_json\n new_rep = {}\n for key in rep.keys():\n nkey = key + '_0'\n new_rep[nkey] = rep[key]\n\n total = gt_human + gt_robot\n\n new_rep['usecase'] = report.name_id\n new_rep['id_report_not_hashed'] = rep.get('report_id', report.id_report)\n new_rep['id_report'] = report.id_report\n new_rep['institute'] = report.institute\n new_rep['language'] = report.language\n new_rep['batch'] = report.batch\n\n json_resp['reports'].append(\n {'total': total, 'report': new_rep, 'id_report': report.id_report, 'language': report.language})\n # print('elaboro1',str(end1-st1))\n tot = time.time()\n print('totale',str(tot-st))\n\n return JsonResponse(json_resp,safe=False)", "def _get_keys(self, listOfKeys):\n return self._keys", "def print_keys_existing(self):\n\t\tfor key in self.cache:\n\t\t\tprint(key)", "def AllKeys(self) -> _n_0_t_1[str]:", "def get_keys_and_uses_from_csv(request):\n\n labels = []\n pubmed = []\n reports = []\n concepts = []\n json_resp = {}\n type_selected = ''\n for filename, file in request.FILES.items():\n if filename.startswith('reports'):\n type_selected = 'reports'\n reports.append(file)\n if filename.startswith('pubmed'):\n type_selected = 'pubmed'\n reports.append(file)\n if filename.startswith('labels'):\n type_selected = 'labels'\n reports.append(file)\n if filename.startswith('concepts'):\n type_selected = 'concepts'\n reports.append(file)\n\n keys,uses,final_uses = get_keys_and_uses_csv(reports)\n json_resp['keys'] = keys\n # print(uses)\n # print(type(uses))\n #\n uses = list(map(lambda x: x.lower(), uses))\n final_uses = list(map(lambda x: x.lower(), final_uses))\n json_resp['uses'] = list(uses)\n # print(json_resp['uses'])\n return JsonResponse(json_resp)", "def test_overall_report_keys():\n keys = overall_data.keys()\n assert('banner_report' in keys)\n assert('rewarded_report' in keys)\n assert('interstitial_report' in keys)\n assert('overall_report' in keys)", "def report_missing_auto(request):\n\n usecases = UseCase.objects.all()\n json_resp = {}\n languages = ['english', 'English']\n for el in usecases:\n use = el.name\n batches = []\n batches.append('all')\n if use.lower() in ['colon','lung','uterine cervix']:\n report = Report.objects.filter(name=el,language__in=languages).exclude(institute = 'PUBMED')\n count_rep = report.count()\n for rp in report:\n if rp.batch not in batches:\n batches.append(rp.batch)\n # print(el)\n # print(count_rep)\n\n if count_rep > 0:\n json_resp[use] = {}\n for batch in batches:\n batch = str(batch)\n json_resp[use][batch] = {}\n if batch == 'all':\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n else:\n report_count = Report.objects.filter(name=el,batch = batch,language__in=languages).exclude(institute = 'PUBMED').count()\n json_resp[use][batch]['tot'] = report_count\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and batch = %s and r.language in %s;\",\n [str(use), 'Robot_user', 'labels', 'PUBMED',batch, tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n # print(json_resp)\n return JsonResponse(json_resp)", "def key_dict_values (self):\r\n\r\n if self.using_database:\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT note_index \"\r\n +\"FROM keys_to_indexes\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n return self.key_dict.values()", "def show_k():\n\n page = request.args.get('page', 1, type=int)\n knowledges_ids = Knowledge.query.order_by(Knowledge.id.asc()).paginate(\n page, current_app.config['PAGE_ITEMS'], False)\n\n k = \"myK000\"\n\n knowledges_list = [(f'{k}{i.id}' if (i.id < 10) else f'{\"myK00\"}{i.id}'\n if(i.id < 100) else f'{\"myK0\"}{i.id}', i.description) for i in knowledges_ids.items]\n\n verK = True\n fileDir = os.path.dirname(os.path.realpath('__file__'))\n\n # me tengo que meter a la ruta base/cyber_role y ejecutar este endpoint\n file_json = 'cyber_role/KSAT_JSON/Knowledges.json'\n\n if not isfile(join(fileDir, file_json)):\n file_json = 'KSAT_JSON/Knowledges.json'\n\n with open(file_json) as file:\n # Obtenemos el json del fichero\n data = json.load(file)\n\n equivalencia_nist = {}\n # ya tenemos el diccionario del nist, original\n values = list(data.values())\n keys = list(data.keys())\n\n for i in knowledges_ids.items:\n if i.description in values:\n equivalencia_nist[i.id] = keys[values.index(i.description)]\n\n\n return render_template('general/ksat.html', title='Knowledges',\n lista_K=knowledges_ids, l_K=knowledges_list,\n l_eq=list(equivalencia_nist.values()), verK=verK)", "def admin_evaluate_reports(request):\n if not validate_request(request): return redirect(reverse(URL_FORBIDDEN))\n \n admin = auth.get_user(request)\n \n if request.method == \"GET\":\n all_thesis = [] # list of dict\n \n for panelmember in PanelMember.objects.filter(Q(status = 'F') | Q(status = 'Z')).filter(feedback_at = 'A'):\n thesis = panelmember.thesis\n dict = {}\n dict['title'] = thesis.title\n dict['student_full_name'] = thesis.student.first_name + \" \" + thesis.student.last_name\n dict['report'] = panelmember.feedback_with_referee_details\n dict['student_username'] = thesis.student.user.username\n dict['id'] = thesis.id\n dict['referee_name'] = panelmember.referee.user.first_name + ' ' + panelmember.referee.user.last_name\n dict['referee_id'] = panelmember.referee.id\n all_thesis.append(dict)\n \n return render(request, 'app/admin/view_finalReports.html', {\n 'title':'Final Reports',\n 'layout_data' : get_layout_data(request),\n 'all_thesis' : all_thesis\n })\n elif request.method == \"POST\":\n form = PanelMember2Form(request.POST, request.FILES)\n \n\n thesis = int(request.POST['thesis'])\n referee = int(request.POST['referee'])\n \n if form.is_valid() and validate_pdf(request.FILES['feedback_without_referee_details']):\n referee = Referee.objects.get(id = referee)\n thesis = Thesis.objects.get(id = thesis)\n panelmember = PanelMember.objects.get(thesis = thesis,referee = referee)\n panelmember.feedback_at = 'G'\n \n time = str(datetime.datetime.now())\n timestamp = ''\n for i in time:\n if not (i == ':' or i == '-'):\n timestamp += i\n request.FILES['feedback_without_referee_details'].name = \"Evaluation_Report_\"+thesis.student.user.username+\"_\"+timestamp+\".pdf\"\n \n panelmember.feedback_without_referee_details = request.FILES['feedback_without_referee_details']\n panelmember.save()\n\n total_feedbacks = PanelMember.objects.filter(thesis = thesis, feedback_at = 'G').count()\n if total_feedbacks == thesis.indian_referees_required + thesis.foreign_referees_required:\n _update_student_status(thesis, STATUS_ID_THESIS_FEEDBACKS_RECEIVED) \n\n # send notification to all guide\n send_notification_to_all_guides(admin, thesis, \"A feedback report has been sent of student \" + thesis.student.first_name + \" \" + thesis.student.last_name)\n # email\n subject = \"[Feed Back reports] of the Thesis titled\" + thesis.title\n content = \"<br>Dear Sir/Madam,</br><br></br><br></br>\"+\"A feedback report has been sent of student \" + thesis.student.first_name + \" \" + thesis.student.last_name +'. Please Check the PhD Portal for more details.'+\"<br></br><br></br>Regards,<br></br>PhDPortal.\"\n \n email = []\n\n for thesisGuide in ThesisGuide.objects.filter(thesis = thesis):\n receiver = Faculty.objects.get(user = thesisGuide.guide.user)\n email.append(receiver.email)\n\n send_email_task.delay(email, subject, content)\n return redirect(reverse(admin_evaluate_reports))\n else:\n return redirect(reverse(URL_BAD_REQUEST))\n else:\n return redirect(reverse(URL_BAD_REQUEST))", "def keys(self):\n return ['title', 'keywords', 'description', 'url', 'content_file',\n 'language', 'phone', 'email']", "def keys(self):\n keys = set()\n with pd.HDFStore(self.rootpath, mode=\"r\") as hdf:\n hdf5_keys = hdf.keys()\n\n for key in hdf5_keys:\n kp = key.split(\"/\")\n if len(kp) == 5:\n print(kp, len(kp))\n keys.add(kp[4])\n return list(keys)", "def get_all_keys(self):\n return self.psettings.allKeys()", "def apikeys(request):\n display = get_boolean_value(request.GET.get('display', False))\n\n return render(request, 'gui/profile/profile_api_keys_list.html', {\n 'user': request.user,\n 'display_keys': display\n })", "def list_user_keys(self):\n return AlgoliaUtils_request(self.client.headers, self.read_hosts, \"GET\", \"/1/indexes/%s/keys\" % self.url_index_name, self.client.timeout)", "async def list_keys(request: web.Request) -> web.Response:\n keys = [\n {'uri': '/wifi/keys/{}'.format(key.directory),\n 'id': key.directory,\n 'name': os.path.basename(key.file)} for key in wifi.list_keys()\n ]\n return web.json_response({'keys': keys}, status=200)", "def get_keys(self, file_name):\n\n nc = Dataset(file_name)\n keylist = []\n for key in nc.variables.keys():\n if ((not key == \"time\") and (not key == \"grid\")):\n keylist.append(key)\n\n nc.close()\n return keylist", "def get_keys(show: bool = False) -> pd.DataFrame:\n\n current_user = get_current_user()\n current_keys = {}\n\n for k, _ in current_user.credentials.get_fields().items():\n field_value = current_user.credentials.get_value(field=k)\n if field_value and field_value != \"REPLACE_ME\":\n current_keys[k] = field_value\n\n if current_keys:\n df = pd.DataFrame.from_dict(current_keys, orient=\"index\")\n df.index.name = \"API\"\n df = df.rename(columns={0: \"Key\"})\n if show:\n return df\n df.loc[:, \"Key\"] = \"*******\"\n return df\n\n return pd.DataFrame()", "def keys(self):\n return", "def keys(self, data, installer_context):", "def keys(self, installer_context):\n return self.spec.keys(self.data, installer_context)", "def keys(self):\n return DiscoDBInquiry(super(DiscoDB, self).keys)" ]
[ "0.71772546", "0.5747411", "0.5723134", "0.569637", "0.5561608", "0.54670197", "0.5361327", "0.5307448", "0.53016967", "0.5248564", "0.5241497", "0.52275854", "0.5220743", "0.52100676", "0.5197569", "0.5185162", "0.5140861", "0.5136076", "0.51293594", "0.51133555", "0.51117575", "0.5082436", "0.50735945", "0.5068384", "0.50618815", "0.5059695", "0.50551623", "0.5037052", "0.50327843", "0.50305027" ]
0.6515069
1
This view returns the groundtruth associated to a specific user,action,report
def get_user_ground_truth(request): user = request.GET.get('user',None) action = request.GET.get('action',None) mode = request.GET.get('mode',None) report = request.GET.get('report',None) language = request.GET.get('language',request.session['language']) mode_obj = NameSpace.objects.get(ns_id=mode) report = Report.objects.get(id_report = report, language = language) gt = get_user_gt(user,mode_obj,report,language,action) return JsonResponse(gt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_reports_from_action(request):\n\n username = request.session['username']\n mode1 = request.session['mode']\n mode = NameSpace.objects.get(ns_id=mode1)\n language = request.session['language']\n report_to_ret = []\n action = request.GET.get('action',None)\n user = User.objects.get(username = username,ns_id=mode)\n gt = GroundTruthLogFile.objects.filter(username = user,ns_id=mode, language = language, gt_type = action).order_by('-insertion_time')\n if gt.exists():\n if mode1 == 'Human':\n for element in gt:\n val = (element.id_report_id,element.insertion_time.replace(tzinfo=timezone.utc).astimezone(tz=None))\n report_to_ret.append(val)\n\n elif mode1 == 'Robot':\n user_rob = User.objects.get(username = 'Robot_user',ns_id = mode)\n for el in gt:\n # gt_rob = GroundTruthLogFile.objects.get(id_report = el.id_report_id, language = language, gt_type = el.gt_type,ns_id=mode, username=user_rob)\n # if el.insertion_time != gt_rob.insertion_time:\n val = (el.id_report_id, el.insertion_time.replace(tzinfo=timezone.utc).astimezone(tz=None))\n report_to_ret.append(val)\n\n jsonDict = {}\n jsonDict['reports_presence'] = report_to_ret\n # print(jsonDict)\n return JsonResponse(jsonDict)", "def get_presence_robot_user(request):\n\n id_report = request.GET.get('id_report', None)\n language = request.GET.get('language', None)\n use = request.GET.get('usecase', None)\n rep = request.GET.get('report_type', None)\n json_resp = {'auto_annotation_count': 0}\n cursor = connection.cursor()\n\n reports_list = None\n if request.method == 'POST':\n request_body_json = json.loads(request.body)\n reports_list = request_body_json['reports']\n\n if id_report is not None and language is not None:\n\n usecase = Report.objects.get(id_report=id_report, language=language)\n use = usecase.name_id\n cursor.execute(\n \"SELECT COUNT(*) FROM ground_truth_log_file as g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.username = %s and r.name = %s\",\n ['Robot_user', str(use)])\n ans = cursor.fetchone()[0]\n json_resp = {'auto_annotation_count': (ans)}\n\n elif use is not None and rep is not None:\n # print(rep)\n if rep == 'reports':\n cursor.execute(\n \"SELECT COUNT(*) FROM ground_truth_log_file as g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.ns_id = %s AND g.username = %s and r.name = %s and r.institute != %s\",\n ['Robot', 'Robot_user', str(use), 'PUBMED'])\n\n elif rep == 'pubmed':\n cursor.execute(\n \"SELECT COUNT(*) FROM ground_truth_log_file as g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.username = %s and r.name = %s and r.institute = %s\",\n ['Robot_user', str(use), 'PUBMED'])\n\n ans = cursor.fetchone()[0]\n if ans > 0:\n json_resp = {'auto_annotation_count': ans}\n # print(json_resp)\n elif reports_list is not None:\n report_list = json.loads(reports_list)\n # print(report_list)\n usecase_list = []\n for rep in report_list:\n\n if rep['usecase'] not in usecase_list:\n usecase_list.append(rep['usecase'])\n for u in usecase_list:\n cursor = connection.cursor()\n cursor.execute(\n \"SELECT COUNT(*) FROM ground_truth_log_file as g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.username = %s and r.name = %s\",\n ['Robot_user', str(u)])\n ans = cursor.fetchone()[0]\n if ans > 0:\n json_resp = {'auto_annotation_count': ans}\n else:\n json_resp = {'auto_annotation_count': 0}\n\n elif use is None and reports_list is None and id_report is None and language is None:\n robot = NameSpace.objects.get(ns_id='Robot')\n gt = GroundTruthLogFile.objects.filter(ns_id=robot)\n json_resp = {'auto_annotation_count': gt.count()}\n\n print(json_resp)\n return JsonResponse(json_resp)", "def download_all_ground_truths(request):\n\n json_resp = {}\n json_resp['ground_truth'] = []\n cursor = connection.cursor()\n mode = request.GET.get('gt_mode',None)\n if mode is None:\n human = NameSpace.objects.get(ns_id = 'Human')\n robot = NameSpace.objects.get(ns_id = 'Robot')\n gt_human = GroundTruthLogFile.objects.filter(ns_id = human)\n agent = User.objects.get(ns_id = robot,username = 'Robot_user')\n gt_robot = GroundTruthLogFile.objects.filter(ns_id = robot,username = agent)\n for el in gt_human:\n gt_json = el.gt_json\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n for el in gt_robot:\n gt_json = el.gt_json\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n cursor.execute(\"SELECT gt_json FROM ground_truth_log_file WHERE ns_id = %s AND username != %s\",['Robot','Robot_user'])\n ans = cursor.fetchall()\n for el in ans:\n gt_json = json.loads(el[0])\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n\n elif mode.lower() == 'automatic':\n cursor.execute(\n \"SELECT gt_json FROM ground_truth_log_file WHERE ns_id = %s AND username != %s\",\n ['Robot', 'Robot_user'])\n\n #CAMBIO\n # cursor.execute(\n # \"SELECT g.gt_json FROM ground_truth_log_file AS g INNER JOIN ground_truth_log_file AS gg ON g.id_report = gg.id_report AND g.language = gg.language AND g.gt_type = gg.gt_type AND g.id_report = gg.id_report AND g.ns_id = gg.ns_id WHERE g.ns_id = %s AND g.username != %s AND gg.username = %s AND g.insertion_time != gg.insertion_time\",\n # ['Robot', 'Robot_user', 'Robot_user'])\n ans = cursor.fetchall()\n for el in ans:\n gt_json = json.loads(el[0])\n\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n\n return JsonResponse(json_resp)", "def check_presence_exa_conc_lab(request):\n\n # reports = request.GET.get('reports',None)\n rep = request.GET.get('id_report',None)\n language = request.GET.get('language',None)\n usecase = request.GET.get('usecase',None)\n reports = None\n if request.method == 'POST':\n request_body_json = json.loads(request.body)\n reports = request_body_json['reports']\n if rep is not None and language is not None:\n report = Report.objects.get(id_report = rep,language = language)\n usecase = report.name_id\n # print(usecase)\n json_resp = {}\n if usecase in ['colon','uterine cervix','lung']:\n bool = check_exa_lab_conc_only(usecase)\n print('bool',bool)\n else:\n bool = [False,False]\n json_resp['labels'] = bool[0]\n json_resp['concepts'] = bool[1]\n elif usecase is not None:\n json_resp = {}\n json_resp['labels'] = False\n json_resp['concepts'] = False\n\n # labels = []\n # concepts = []\n json_resp = {}\n if usecase in ['colon','uterine cervix','lung']:\n bool = check_exa_lab_conc_only(usecase)\n else:\n bool = [False,False]\n json_resp['labels'] = bool[0]\n json_resp['concepts'] = bool[1]\n # labels.append(bool[0])\n # concepts.append(bool[1])\n # if False in labels:\n # json_resp['labels'] = False\n # else:\n # json_resp['labels'] = True\n #\n # if False in concepts:\n # json_resp['concepts'] = False\n # else:\n # json_resp['concepts'] = True\n elif reports is not None:\n report_list = json.loads(reports)\n json_resp = {}\n json_resp['labels'] = False\n json_resp['concepts'] = False\n usecases = []\n for rep in report_list:\n # rep = json.loads(rep)\n if rep['usecase'] not in usecases:\n usecases.append(rep['usecase'])\n labels = []\n concepts = []\n for u in usecases:\n # print(u)\n json_resp = {}\n if u in ['colon', 'uterine cervix', 'lung']:\n bool = check_exa_lab_conc_only(u)\n else:\n bool = [False, False]\n\n labels.append(bool[0])\n concepts.append(bool[1])\n if False in labels:\n json_resp['labels'] = False\n else:\n json_resp['labels'] = True\n\n if False in concepts:\n json_resp['concepts'] = False\n else:\n json_resp['concepts'] = True\n\n else:\n json_resp={'error':'a usecase is needed'}\n\n print(json_resp)\n return JsonResponse(json_resp)", "def get_gt_action_based(request):\n\n action = request.GET.get('action',None)\n ns = request.GET.get('annotation_mode',None)\n\n if ns == 'Manual':\n ns = 'Human'\n elif ns == 'Automatic':\n ns = 'Robot'\n gts = GroundTruthLogFile.objects.filter(gt_type=action)\n\n if ns is not None:\n ns_id = NameSpace.objects.get(ns_id = ns)\n gts = GroundTruthLogFile.objects.filter(ns_id = ns_id, gt_type = action)\n\n json_resp = {'count':gts.count()}\n return JsonResponse(json_resp)", "def check_auto_presence_for_configuration(request):\n\n report_type = request.GET.get('report_type',None)\n usecase = request.GET.get('usecase',None)\n language = request.GET.get('language',None)\n institute = request.GET.get('institute',None)\n batch = request.GET.get('batch',None)\n languages = ['English','english']\n # print('BATCH',str(batch))\n use = UseCase.objects.get(name=usecase)\n json_resp = {}\n mode = NameSpace.objects.get(ns_id = 'Robot')\n user = User.objects.get(ns_id = mode, username='Robot_user')\n\n if report_type == 'pubmed':\n cursor = connection.cursor()\n cursor.execute(\"SELECT COUNT(*) FROM ground_truth_log_file AS g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.ns_id = %s AND g.username = %s AND r.institute=%s AND r.language in %s AND r.name = %s AND r.batch = %s\",['Robot','Robot_user','PUBMED',tuple(languages),str(usecase),int(batch)])\n reports = cursor.fetchone()[0]\n json_resp['count'] = reports\n\n elif report_type == 'reports':\n cursor = connection.cursor()\n cursor.execute(\n \"SELECT COUNT(*) FROM ground_truth_log_file AS g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.ns_id = %s AND g.username = %s AND r.institute!=%s AND r.institute = %s AND r.language = %s AND r.name = %s AND r.batch = %s\",\n ['Robot', 'Robot_user', 'PUBMED',str(institute),str(language),str(usecase),int(batch)])\n reports = cursor.fetchone()[0]\n json_resp['count'] = reports\n print(json_resp)\n return JsonResponse(json_resp)", "def check_gt_existence(request):\n\n\n action = request.GET.get('action',None)\n id_report = request.GET.get('id_report',None)\n language = request.GET.get('language',None)\n username = request.session['username']\n mode = request.session['mode']\n cursor = connection.cursor()\n cursor.execute(\"SELECT COUNT(*) FROM ground_truth_log_file where ns_id = %s and username = %s and language = %s and id_report = %s and gt_type = %s\",[mode,username,language,id_report,action])\n count = cursor.fetchone()[0]\n json_resp = {'count':count}\n return JsonResponse(json_resp)", "def get_data(request):\n\n json_resp = {}\n # reports = Report.objects.filter(name = UseCase.objects.get(name=request.session['usecase']),institute = request.session['institute'],language = request.session['language'])\n\n json_resp['reports'] = []\n institute = request.GET.get('institute',request.session['institute'])\n usecase = request.GET.get('usecase',request.session['usecase'])\n print(usecase)\n language = request.GET.get('language',request.session['language'])\n ns_human = NameSpace.objects.get(ns_id='Human')\n ns_robot = NameSpace.objects.get(ns_id='Robot')\n user_robot = User.objects.get(username='Robot_user', ns_id=ns_robot)\n # usec = UseCase.objects.get(name = usecase)\n # reports = Report.objects.filter(name = usec,institute = institute, language = language).values('id_report')\n # gt_report = GroundTruthLogFile.objects.filter(language = language).exclude(username = user_robot,id_report__in=reports).order_by('id_report').distinct('id_report')\n cursor = connection.cursor()\n cursor.execute(\"SELECT r.id_report,r.language,r.report_json,r.name,r.institute,r.batch,COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND r.institute = %s AND r.language = %s AND g.username != %s GROUP BY (r.id_report,r.language,r.report_json,r.name,r.institute,r.batch)\",[usecase,institute,language,'Robot_user'])\n gt_report_ids = []\n indice = 0\n st = time.time()\n for el in cursor.fetchall():\n\n # report = Report.objects.get(language = language, id_report = el.id_report_id)\n gt_report_ids.append(el[0])\n # print(str(indice))\n indice +=1\n # report = Report.objects.get(id_report=el.id_report, language=el.language)\n # language = report.language\n\n # user_robot = User.objects.get(username = 'Robot_user',ns_id = ns_robot)\n gt_human = 1\n gt_robot = 0\n\n rep = json.loads(el[2])\n new_rep = {}\n for key in rep.keys():\n nkey = key+ '_0'\n new_rep[nkey] = rep[key]\n\n total = el[6]\n\n new_rep['usecase'] = usecase\n new_rep['id_report_not_hashed'] = rep.get('report_id',el[0])\n new_rep['id_report'] = el[0]\n new_rep['institute'] = institute\n new_rep['language'] = language\n new_rep['batch'] = el[5]\n\n json_resp['reports'].append({'total':total, 'report':new_rep,'id_report':el[0], 'language':language})\n\n usec = UseCase.objects.get(name = usecase)\n reports = Report.objects.filter(institute = institute,language = language,name = usec).exclude(id_report__in=gt_report_ids)\n # print(reports.count())\n indice = 0\n st = time.time()\n for el in reports:\n report = el\n # print(str(indice))\n indice += 1\n # report = Report.objects.get(id_report=el.id_report, language=el.language)\n # language = report.language\n\n # user_robot = User.objects.get(username = 'Robot_user',ns_id = ns_robot)\n gt_human = 0\n gt_robot = 0\n\n rep = report.report_json\n new_rep = {}\n for key in rep.keys():\n nkey = key + '_0'\n new_rep[nkey] = rep[key]\n\n total = gt_human + gt_robot\n\n new_rep['usecase'] = report.name_id\n new_rep['id_report_not_hashed'] = rep.get('report_id', report.id_report)\n new_rep['id_report'] = report.id_report\n new_rep['institute'] = report.institute\n new_rep['language'] = report.language\n new_rep['batch'] = report.batch\n\n json_resp['reports'].append(\n {'total': total, 'report': new_rep, 'id_report': report.id_report, 'language': report.language})\n # print('elaboro1',str(end1-st1))\n tot = time.time()\n print('totale',str(tot-st))\n\n return JsonResponse(json_resp,safe=False)", "def get_gt_list(request):\n\n groundTruths = 0\n json_resp = {}\n username =request.GET.get('username',None)\n ins = request.GET.get('inst',None)\n lang = request.GET.get('lang',None)\n use = request.GET.get('use',None)\n action = request.GET.get('action',None)\n token = request.GET.get('token',None)\n reptype = request.GET.get('reptype',None)\n languages = ['English','english']\n annotation_mode = request.GET.get('annotation_mode',None)\n if ins == '':\n ins = None\n if use == '':\n use = None\n if lang == '':\n lang = None\n if reptype == '':\n reptype = 'reports'\n if token == 'all':\n ns_robot = NameSpace.objects.get(ns_id='Robot')\n ns_human = NameSpace.objects.get(ns_id='Human')\n rob_user = User.objects.get(username='Robot_user',ns_id=ns_robot)\n list_gt = GroundTruthLogFile.objects.filter(username = rob_user).count() + GroundTruthLogFile.objects.filter(ns_id=ns_human).count()\n groundTruths = list_gt\n gt_rob = GroundTruthLogFile.objects.filter(ns_id=ns_robot,username = rob_user)\n\n i = 0\n # print(groundTruths)\n for el in gt_rob:\n gts = GroundTruthLogFile.objects.filter(ns_id=ns_robot,gt_type = el.gt_type,id_report = el.id_report_id,language = el.language).exclude(insertion_time = el.insertion_time)\n gts_count = gts.count()\n # print('count: '+str(i)+' '+str(gts.count()))\n i = i+1\n groundTruths = groundTruths + gts_count\n\n\n else:\n with connection.cursor() as cursor:\n if reptype == 'reports':\n if annotation_mode == 'Human':\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language WHERE r.institute = COALESCE(%s,r.institute) AND r.name = %s AND r.language = COALESCE(%s,r.language) AND g.gt_type = %s AND g.ns_id = %s and r.institute != %s AND username = COALESCE(%s,g.username)\",\n [ins, use, lang, action, 'Human','PUBMED',username])\n groundTruths = cursor.fetchone()[0]\n elif annotation_mode == 'Robot':\n # CAMBIO\n # cursor.execute(\n # \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language INNER JOIN ground_truth_log_file AS gg ON g.id_report = gg.id_report AND g.language = gg.language AND g.ns_id = gg.ns_id AND g.gt_type = gg.gt_type WHERE r.institute = COALESCE(%s,r.institute) AND r.name = %s AND r.language = COALESCE(%s,r.language) AND g.gt_type = %s AND g.ns_id = %s AND g.username != %s AND gg.username = %s AND g.insertion_time != gg.insertion_time AND r.institute != %s\",\n # [ins, use, lang, action, 'Robot', 'Robot_user', 'Robot_user','PUBMED'])\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language WHERE r.institute = COALESCE(%s,r.institute) AND r.name = %s AND r.language = COALESCE(%s,r.language) AND g.gt_type = %s AND g.ns_id = %s AND g.username != %s AND r.institute != %s AND username = COALESCE(%s,g.username)\",\n [ins, use, lang, action, 'Robot', 'Robot_user','PUBMED',username])\n groundTruths = cursor.fetchone()[0]\n else:\n if annotation_mode == 'Human':\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language WHERE r.name = %s AND r.language in %s AND g.gt_type = %s AND g.ns_id = %s and r.institute = %s AND username = COALESCE(%s,g.username)\",\n [use, tuple(languages), action, 'Human','PUBMED',username])\n groundTruths = cursor.fetchone()[0]\n elif annotation_mode == 'Robot':\n #CAMBIO\n # cursor.execute(\n # \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language INNER JOIN ground_truth_log_file AS gg ON g.id_report = gg.id_report AND g.language = gg.language AND g.ns_id = gg.ns_id AND g.gt_type = gg.gt_type WHERE r.name = %s AND r.language in %s AND g.gt_type = %s AND g.ns_id = %s AND g.username != %s AND gg.username = %s AND g.insertion_time != gg.insertion_time AND r.institute = %s\",\n # [use, tuple(languages), action, 'Robot', 'Robot_user', 'Robot_user','PUBMED'])\n # groundTruths = cursor.fetchone()[0]\n\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language WHERE r.name = %s AND r.language in %s AND g.gt_type = %s AND g.ns_id = %s AND g.username != %s AND r.institute = %s AND username = COALESCE(%s,g.username)\",\n [use, tuple(languages), action, 'Robot', 'Robot_user','PUBMED',username])\n groundTruths = cursor.fetchone()[0]\n\n\n\n\n json_resp['ground_truths'] = groundTruths\n # print(json_resp)\n return JsonResponse(json_resp)", "def get_test_report(request, **kwargs): \n\t\n #Fetching the details of the selected event\n test_list = sidecar.events.test_report(project_id=kwargs['test_id'])\n report_list = []\n\t\n #Creating the list for the report\n for tests in test_list._logs:\n\tjson_test = json.loads(tests['data'])\n\ttests['success'] = json_test['success'] \n\ttests['time'] = json_test['time']\n\ttests['test_cases'] = json_test['test_cases']\n\treport_list.append(tests)\n\n #Making the context and sending to template\n context = {\n \"page_title\": _(\"Test Results\"),\n \"tests\": report_list\n }\n return render(request, 'rally_dashboard/events/test_detail.html', context)", "def generate_report(self):\n if self.submission_metadata:\n return self._submission_allowed()[1]", "def download_ground_truths(request):\n\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n path1 = os.path.join(workpath, './static/temp/temp.csv')\n path2 = os.path.join(workpath, './static/BioC/temp_files/to_download.csv')\n if os.path.exists(path1):\n os.remove(path1)\n if os.path.exists(path2):\n os.remove(path2)\n username = request.session['username']\n inst = request.GET.get('institute',None)\n if inst == '':\n inst = None\n else:\n inst = str(inst)\n use = request.GET.get('usec',None)\n if use == '':\n use = None\n else:\n use = str(use)\n report_type = request.GET.get('report_type',None)\n if report_type == '':\n report_type = None\n annotation_mode = request.GET.get('mode',None)\n if annotation_mode == '':\n annotation_mode = None\n lang = request.GET.get('lang',None)\n if lang == '':\n lang = None\n else:\n lang = str(lang)\n batch = request.GET.get('batch','') # added 22/10/2021\n if batch == '' or batch == 'all':\n batch = None\n else:\n batch = int(batch)\n\n all = request.GET.get('all_gt',None)\n action = request.GET.get('action',None)\n format = request.GET.get('format',None)\n json_resp = {}\n json_resp['ground_truth'] = []\n if format == 'json' or all =='all' :\n json_resp = create_json_to_download(report_type,action,username,use,annotation_mode,inst,lang,all,batch)\n return JsonResponse(json_resp)\n\n elif format == 'csv':\n response = HttpResponse(content_type='text/csv')\n resp = create_csv_to_download(report_type,annotation_mode,username,use,inst,lang,action,response,batch)\n return resp\n\n elif format == 'biocxml':\n json_keys_to_display = request.session['fields']\n json_keys_to_ann = request.session['fields_to_ann']\n if report_type == 'pubmed':\n json_keys_to_display = ['year','authors','volume','journal']\n json_keys_to_ann = ['title','abstract']\n json_keys = json_keys_to_display + json_keys_to_ann\n resp = generate_bioc(json_keys,json_keys_to_ann,username,action,lang,use,inst,'xml',annotation_mode,report_type,batch)\n return HttpResponse(resp,content_type='application/xml')\n\n elif format == 'biocjson':\n json_keys_to_display = request.session['fields']\n json_keys_to_ann = request.session['fields_to_ann']\n json_keys = json_keys_to_display + json_keys_to_ann\n resp = generate_bioc(json_keys,json_keys_to_ann,username,action,lang,use,inst,'json',annotation_mode,report_type,batch)\n return HttpResponse(resp,content_type='application/xml')", "def test_get_all_reports_made_by_a_user(self):\n client = APIClient()\n response = client.get(reverse(\n 'authentication:user-article-reports'),**self.header_user2)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def display_report(request, **kwargs):\n\n #Getting the report of the tests \n try:\n outputStr = sidecar.events.test_logs(project_id=kwargs['project_id'])\n outputStr = outputStr.results\n except Exception, e:\n outputStr = \"Updating the logs...\"\n \n #Making the output\n context = {\n \"page_title\": _(\"Test Report\"),\n \"test_report\": outputStr\n }\n return render(request, 'rally_dashboard/events/view_report.html', context)", "def report_missing_auto(request):\n\n usecases = UseCase.objects.all()\n json_resp = {}\n languages = ['english', 'English']\n for el in usecases:\n use = el.name\n batches = []\n batches.append('all')\n if use.lower() in ['colon','lung','uterine cervix']:\n report = Report.objects.filter(name=el,language__in=languages).exclude(institute = 'PUBMED')\n count_rep = report.count()\n for rp in report:\n if rp.batch not in batches:\n batches.append(rp.batch)\n # print(el)\n # print(count_rep)\n\n if count_rep > 0:\n json_resp[use] = {}\n for batch in batches:\n batch = str(batch)\n json_resp[use][batch] = {}\n if batch == 'all':\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n else:\n report_count = Report.objects.filter(name=el,batch = batch,language__in=languages).exclude(institute = 'PUBMED').count()\n json_resp[use][batch]['tot'] = report_count\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and batch = %s and r.language in %s;\",\n [str(use), 'Robot_user', 'labels', 'PUBMED',batch, tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n # print(json_resp)\n return JsonResponse(json_resp)", "def admin_evaluate_reports(request):\n if not validate_request(request): return redirect(reverse(URL_FORBIDDEN))\n \n admin = auth.get_user(request)\n \n if request.method == \"GET\":\n all_thesis = [] # list of dict\n \n for panelmember in PanelMember.objects.filter(Q(status = 'F') | Q(status = 'Z')).filter(feedback_at = 'A'):\n thesis = panelmember.thesis\n dict = {}\n dict['title'] = thesis.title\n dict['student_full_name'] = thesis.student.first_name + \" \" + thesis.student.last_name\n dict['report'] = panelmember.feedback_with_referee_details\n dict['student_username'] = thesis.student.user.username\n dict['id'] = thesis.id\n dict['referee_name'] = panelmember.referee.user.first_name + ' ' + panelmember.referee.user.last_name\n dict['referee_id'] = panelmember.referee.id\n all_thesis.append(dict)\n \n return render(request, 'app/admin/view_finalReports.html', {\n 'title':'Final Reports',\n 'layout_data' : get_layout_data(request),\n 'all_thesis' : all_thesis\n })\n elif request.method == \"POST\":\n form = PanelMember2Form(request.POST, request.FILES)\n \n\n thesis = int(request.POST['thesis'])\n referee = int(request.POST['referee'])\n \n if form.is_valid() and validate_pdf(request.FILES['feedback_without_referee_details']):\n referee = Referee.objects.get(id = referee)\n thesis = Thesis.objects.get(id = thesis)\n panelmember = PanelMember.objects.get(thesis = thesis,referee = referee)\n panelmember.feedback_at = 'G'\n \n time = str(datetime.datetime.now())\n timestamp = ''\n for i in time:\n if not (i == ':' or i == '-'):\n timestamp += i\n request.FILES['feedback_without_referee_details'].name = \"Evaluation_Report_\"+thesis.student.user.username+\"_\"+timestamp+\".pdf\"\n \n panelmember.feedback_without_referee_details = request.FILES['feedback_without_referee_details']\n panelmember.save()\n\n total_feedbacks = PanelMember.objects.filter(thesis = thesis, feedback_at = 'G').count()\n if total_feedbacks == thesis.indian_referees_required + thesis.foreign_referees_required:\n _update_student_status(thesis, STATUS_ID_THESIS_FEEDBACKS_RECEIVED) \n\n # send notification to all guide\n send_notification_to_all_guides(admin, thesis, \"A feedback report has been sent of student \" + thesis.student.first_name + \" \" + thesis.student.last_name)\n # email\n subject = \"[Feed Back reports] of the Thesis titled\" + thesis.title\n content = \"<br>Dear Sir/Madam,</br><br></br><br></br>\"+\"A feedback report has been sent of student \" + thesis.student.first_name + \" \" + thesis.student.last_name +'. Please Check the PhD Portal for more details.'+\"<br></br><br></br>Regards,<br></br>PhDPortal.\"\n \n email = []\n\n for thesisGuide in ThesisGuide.objects.filter(thesis = thesis):\n receiver = Faculty.objects.get(user = thesisGuide.guide.user)\n email.append(receiver.email)\n\n send_email_task.delay(email, subject, content)\n return redirect(reverse(admin_evaluate_reports))\n else:\n return redirect(reverse(URL_BAD_REQUEST))\n else:\n return redirect(reverse(URL_BAD_REQUEST))", "def get(self, request):\n rule = self.get_object()\n return success(rule.summary())", "def index(request):\n context = {}\n faculty_instance = Faculty.objects.filter(id=request.user.id).first()\n student_instance = Student.objects.filter(id=request.user.id).first()\n lab_instance = LabAssistant.objects.filter(id=request.user.id).first()\n if faculty_instance:\n context = 'faculty'\n elif student_instance:\n context = 'student'\n elif lab_instance:\n context = 'assistant'\n else:\n context = 'none'\n\n return render(request, 'home.html', {'user_type': context})", "def get_users_list(request):\n\n if request.method == 'POST':\n request_body_json = json.loads(request.body)\n lista = []\n reports = request_body_json['reports']\n action = request_body_json['action']\n for rep in reports:\n r = Report.objects.get(id_report=rep[0], language=rep[1])\n users = GroundTruthLogFile.objects.filter(id_report=r, language=r.language).order_by('username').distinct(\n 'username').values('username')\n if action is not None:\n users = GroundTruthLogFile.objects.filter(gt_type=action, id_report=r, language=r.language).order_by(\n 'username').distinct(\n 'username').values('username')\n\n for name in users:\n if name['username'] != 'Robot_user' and name['username'] not in lista:\n lista.append(name['username'])\n return JsonResponse(lista, safe=False)\n\n users = User.objects.all().values('username')\n user_black_list = ['Robot_user']\n list_type = request.GET.get('list_type',None) # identifica il filtro su utenti che hanno almeno una annotazione\n id_report = request.GET.get('id_report',None)\n action = request.GET.get('action',None)\n language = request.GET.get('language',None)\n reports = request.GET.getlist('reports',None)\n lista = []\n if id_report is not None and language is not None:\n r = Report.objects.get(id_report = id_report,language = language)\n users = GroundTruthLogFile.objects.filter(id_report = r, language = r.language).order_by('username').distinct('username').values('username')\n if action is not None:\n users = GroundTruthLogFile.objects.filter(gt_type = action,id_report=r, language=r.language).order_by('username').distinct(\n 'username').values('username')\n\n for name in users:\n if name['username'] != 'Robot_user' and name['username'] not in lista:\n lista.append(name['username'])\n return JsonResponse(lista, safe=False)\n\n users_obj = User.objects.all()\n if list_type is not None:\n for name in users_obj:\n if name.username not in user_black_list and name.username not in lista:\n us = User.objects.get(username = name,ns_id = name.ns_id)\n gt = GroundTruthLogFile.objects.filter(username = us)\n if gt.exists():\n lista.append(name.username)\n # elif reports is not None and action is not None:\n # for rep in reports:\n # r = Report.objects.get(id_report=rep[0], language=rep[1])\n # users = GroundTruthLogFile.objects.filter(id_report=r, language=r.language).order_by('username').distinct(\n # 'username').values('username')\n # if action is not None:\n # users = GroundTruthLogFile.objects.filter(gt_type=action, id_report=r, language=r.language).order_by(\n # 'username').distinct(\n # 'username').values('username')\n #\n # for name in users:\n # if name['username'] != 'Robot_user' and name['username'] not in lista:\n # lista.append(name['username'])\n # return JsonResponse(lista, safe=False)\n else:\n for name in users:\n if name['username'] not in user_black_list and name['username'] not in lista:\n lista.append(name['username'])\n return JsonResponse(lista,safe=False)", "def reported_by(self, user):\n return Report.objects.filter(recipe=self, chef=user).exists()", "def training_report_view(request, application_slug):\n return training_report(request, application_slug, attach=False)", "def analysis_view(request):\n return render(request, \"tracker/analysis.html\")", "def pyp_reports(request):\n student_id = int(get_from_matchdict('id', request.matchdict))\n\n pdf = get_from_matchdict('pdf', request.matchdict)\n check = request.params.get('check')\n if check and check.lower() == 'true':\n check = True\n else:\n check = False\n\n internal_check = request.params.get('internal_check')\n\n mb_user = request.session.get('mb_user', None)\n if not mb_user:\n # FIXME: Need to re-do it\n pass\n # api_token = request.params.get('api_token')\n # if not api_token or api_token != gns.config.managebac.api_token:\n # return HTTPForbidden()\n elif mb_user.type.startswith('Advisor') or mb_user.type == 'Account Admins':\n # let them in\n pass\n else:\n return HTTPForbidden()\n\n term_id = gns.config.managebac.current_term_id\n with DBSession() as session:\n try:\n rep_statement = session.query(PrimaryReport).\\\n options(joinedload('course')).\\\n filter(\n PrimaryReport.term_id == term_id,\n PrimaryReport.student_id == student_id,\n # PrimaryReport.homeroom_comment!=''\n )\n stu_statement = session.query(Students).filter_by(id=student_id)\n student = stu_statement.one()\n report = rep_statement.one()\n gns.tutorial(\"Got the target student\",edit=(stu_statement, '.sql'))\n gns.tutorial(\"Got Primary report with course information\", edit=(rep_statement, '.sql'))\n except NoResultFound:\n if pdf:\n # raw_input('no report entry for this student: {} with term_id {}'.format(student_id, term_id))\n raise HTTPNotFound()\n else:\n raise HTTPFound(location=request.route_url(\"student_pyp_report_no\", id=student_id))\n except MultipleResultsFound:\n print(\"Issue with database!\")\n raise HTTPInternalServerError(\"Issue with database!\")\n\n title = u\"IGB International School (June 2016): Student Report for {} {}\".format(student.first_name, student.last_name)\n\n # This bit is the only manual info that isn't on managebac\n uoi_table = {\n -1: {\n # ey sem 1\n 1: dict(title=\"Who We Are\", central_idea=\"Playing and learning together enables us to come to new understandings.\"),\n 2: dict(title=\"Sharing The Planet\", central_idea=\"Our lives are interconnected with living things.\"),\n # ey sem 2\n 3: dict(title=\"How the World Works\", central_idea=\"Water is all around us and has many uses.\"),\n 4: dict(title=\"How We Express Ourselves\", central_idea=\"Stories inform, provoke us and provide enjoyment.\"),\n },\n 0: {\n # kg sem 1\n 1: dict(title=\"Who We Are\", central_idea=\"We are part of a community who work, learn, and play together\"),\n 2: dict(title=\"How We Organise Ourselves\", central_idea=\"Communities create systems to fullfill a need.\"),\n 3: dict(title=\"Where We Are in Place and Time\", central_idea=\"Shelters look different and serve a purpose.\"),\n\n # kg sem 2\n 4: dict(title=\"Sharing the Planet\", central_idea=\"People's choices and actions impact the environment and their community.\"),\n 5: dict(title=\"How the World Works\", central_idea=\"Our body and man made resources help protect us from the natural environment.\"),\n 6: dict(title=\"How We Express Ourselves\", central_idea=\"An audience can be engaged through performance.\")\n },\n 1: {\n # gr1 sem 1\n 1: dict(title=\"How we organize ourselves\", central_idea=\"Humans use tools and strategies to understand and organise their environment.\"),\n 2: dict(title=\"Who We Are\", central_idea=\"Games provide us with opportunities to develop an understanding of ourselves and others.\"),\n 3: dict(title=\"How We Express Ourselves\", central_idea=\"Celebrations are an opportunity to reflect and appreciate cultures and beliefs.\"),\n # gr1 sem 2\n 4: dict(title=\"How the World Works\", central_idea=\"Machines make a difference to the way we live our lives.\"),\n 5: dict(title=\"Sharing the Planet\", central_idea=\"Water is essential to life and is a limited resource to many.\"),\n 6: dict(title=\"Where We Are in Place and Time\", central_idea=\"Clocks are a universal measurement tool of time that have had an impact in the past and the present.\"),\n },\n 2: {\n # gr2 sem 1\n 1: dict(title=\"Who We Are\", central_idea=\"With rights come responsibilities.\"),\n 2: dict(title=\"How We Express Ourselves\", central_idea=\"Cultures tell stories in different ways and for different reasons.\"),\n 3: dict(title=\"How We Organize Ourselves\", central_idea=\"Number system provide a common language we can use to make sense of the world.\"),\n # gr2 sem 2\n 4: dict(title=\"Sharing The Planet\", central_idea=\"Plants sustain life on earth and we have a responsible role to play\"),\n 5: dict(title=\"Where we are in Place and Time\", central_idea=\"Influence can change people and their environment.\"),\n 6: dict(title=\"How the World Works\", central_idea=\"Forces are a vital part of our survival.\"),\n },\n 3: {\n # gr3 sem 1\n 1: dict(title=\"How We Organise Ourselves\", central_idea=\"Communication connects people.\"),\n 2: dict(title=\"Sharing the Planet\", central_idea=\"People can conserve the world's resources through responsible behaviours\"),\n 3: dict(title=\"Where We are in Place and Time\", central_idea=\"Innovations from past civilizations have an influence on the present\"),\n # gr3 sem 2\n 4: dict(title=\"How the World Works\", central_idea=\"Safe structures are designed and built for purpose and consider the environment and materials.\"),\n 5: dict(title=\"Who We Are\", central_idea=\"Communication connects people and communities.\"),\n 6: dict(title=\"How We Express Ourselves\", central_idea=\"Nature can inspire people to express their creativity.\"),\n },\n 4: {\n # gr4 sem 1\n 1: dict(title=\"How We Express Ourselves\", central_idea=\"Media influences how we think and the choices we make.\"),\n 2: dict(title=\"Sharing the Planet\", central_idea=\"Organisms rely on one another to balance ecosystems.\"),\n 3: dict(title=\"How we Organise Ourselves\", central_idea=\"Societies establish systems for trade and commerce to meet needs and wants.\"),\n # gr4 sem 2\n 4: dict(title=\"Where We Are in Place and Time\", central_idea=\"The quest for understanding has led to exploration and discovery.\"),\n 5: dict(title=\"How The World Works\", central_idea=\"Earth has formed over time and is still changing.\"),\n 6: dict(title=\"Who We Are\", central_idea=\"People's beliefs influence their actions.\"),\n },\n 5: {\n # gr5 sem 1\n 1: dict(title=\"How we Organise Ourselves\", central_idea=\"All societies have rules and reasons for these rules.\"),\n 2: dict(title=\"Where We Are in Place and Time\", central_idea=\"Malaysia's cultural diversity has been shaped by its history.\"),\n 3: dict(title=\"How the World Works\", central_idea=\"Changes to matter can be of a chemical and/or physical nature.\"),\n # gr5 sem 2\n 4: dict(title=\"Sharing The Planet\", central_idea=\"The choices we make during moments of conflict affect our relationships\"),\n 5: dict(title=\"How We Express Ourselves: Exhibition\", central_idea=\"Artists seek to evoke an emotional response from their audience.\"),\n 6: dict(title=\"Who We Are\", central_idea=\"External and internal factors cause changes in our lives\"),\n },\n }\n\n chinese_teachers = {\n 10792613: [11203970, 10836999, 10912649, 10863230, 11544715, 11707916, 11609996, 11707918, 11708046, 10912651, 11707928, 11274137, 11707932, 11707934, 11204000, 11204641, 11204001, 11708067, 11270692, 11707940, 11204385, 11563304, 11204008, 11153068, 11573550, 11707952, 10882225, 11204017, 11707957, 10834618, 10866874, 11080380, 10893375, 11707840, 11190340, 10834630, 11611847, 10834633, 10834636, 11693517, 11707984, 11203923, 11707859, 10834645, 10834648, 10834649, 10834651, 11707870, 11182305, 11203938, 11200870, 10973671, 11707882, 11708014, 11203950, 11203952, 11708018, 11203954, 10882162, 11633398, 11707900, 11538429, 11124222, 11135103, 11737995, 11621139, 11707870, 10882159], # xiaopiong\n 11256632: [11204609, 10836994, 11707907, 11135108, 10836999, 11135112, 10837001, 11203979, 10865037, 11707924, 11621141, 11203988, 11204377, 11173915, 10913691, 11204637, 10856823, 11204383, 11204640, 11707939, 11204392, 11614634, 11364525, 10882226, 11204660, 11190071, 10834616, 10834617, 11464377, 10866873, 10866876, 10834621, 10834622, 10866877, 10856636, 11578945, 11611841, 10893379, 10834628, 10834625, 11611847, 10834635, 10834640, 10834642, 10834643, 11930324, 11707860, 11203926, 11707990, 11426392, 11502297, 11578839, 11707869, 11708005, 10834661, 11203946, 11324785, 11124210, 10863222, 11124215, 10856824, 11203961, 10856826, 11124219, 11204605, 11707902, 10986488], # nancy\n }\n\n students_chinese_teachers = {}\n\n for teacher_id, student_ids in chinese_teachers.items():\n with DBSession() as session:\n teacher = session.query(Teachers).filter_by(id=teacher_id).one()\n for this_student in student_ids:\n students_chinese_teachers[this_student] = teacher\n\n bahasa_teachers = {\n 10872708: [10908165, 10856828],\n }\n students_bahasa_teachers = {}\n for teacher_id, student_ids in bahasa_teachers.items():\n with DBSession() as session:\n teacher = session.query(Teachers).filter_by(id=teacher_id).one()\n for this_student in student_ids:\n students_bahasa_teachers[this_student] = teacher\n\n if 'Grade' in report.course.name or 'Kindergarten' in report.course.name:\n which_folder = 'grades'\n template = 'frontend:elem_reports/templates/student_pyp_report.pt'\n\n with DBSession() as session:\n try:\n rep_statement = session.query(PrimaryReport).\\\n options(joinedload('course')).\\\n options(joinedload('sections')).\\\n options(joinedload('sections.learning_outcomes')).\\\n options(joinedload('sections.teachers')).\\\n options(joinedload('sections.strands')).\\\n options(joinedload('teacher')).\\\n filter(\n PrimaryReport.term_id == term_id,\n PrimaryReport.student_id == student_id\n )\n att_statement = session.query(Absences).filter_by(term_id=term_id, student_id=student_id)\n\n attendance = att_statement.one()\n report = rep_statement.one()\n\n gns.tutorial(\"Got K-5 report info with joined information\", edit=(rep_statement, '.sql'), banner=True)\n except NoResultFound:\n if pdf:\n # raw_input(\"No K-5 report entry\")\n raise HTTPNotFound()\n else:\n raise HTTPFound(location=request.route_url(\"student_pyp_report_no\", id=student_id))\n\n subject_rank = {\n 'language': 0,\n 'mathematics': 1,\n 'unit of inquiry 1': 2,\n 'unit of inquiry 2': 3,\n 'unit of inquiry 3': 4,\n 'unit of inquiry 4': 4.1,\n 'unit of inquiry 5': 4.2,\n 'unit of inquiry 6': 4.3,\n 'art': 5,\n 'music': 6,\n 'physical education': 7,\n 'bahasa melayu': 8,\n 'chinese': 9,\n 'host nation': 10,\n 'self-management': 10000\n }\n report.sections = sorted([section for section in report.sections if subject_rank.get(section.name.lower(), 10001) < 10000], key=lambda x: subject_rank.get(x.name.lower(), 1000))\n report.sections = [section for section in report.sections if section.comment]\n\n # Only output sections that have any data in them\n # Comment out during development\n # report.sections = [section for section in report.sections if section.comment]\n\n if 'Kindergarten' in report.course.grade:\n grade_norm = 0\n else:\n grade_norm = int(re.sub(\"[^0-9]\", \"\", report.course.grade))\n\n rotate_list = [0, 1, 2, 5, 9]\n pagination_list = [0, 1, 4, 7, 10]\n\n for section in report.sections:\n section.rank = subject_rank.get(section.name.lower())\n report.sections = [s for s in report.sections if s.rank not in [4.1, 4.2, 4.3]] # skip\n\n gns.tutorial(\"Formatting each subject area in this order: {}\".format(\", \".join([r.name for r in report.sections])), banner=True)\n for section in report.sections:\n # Substitute the correct Chinese teachers based on manual info above\n # Do first so all subsequent operations take place properly\n if section.rank == 9 and student.id in students_chinese_teachers:\n section.teachers = [students_chinese_teachers.get(student.id)]\n\n if section.rank == 8 and student.id in students_bahasa_teachers:\n # Host Nations? and Bahasa mixed up maybe?\n section.teachers = [students_bahasa_teachers.get(student.id)]\n\n section.append_uoi_table = section.rank == 4\n section.display_rotated = section.rank in rotate_list\n\n if section.rank in [2]:\n section.organization_header = 'Units of Inquiry'\n section.name_after = \"\"\n elif section.rank in [3, 4]:\n section.organization_header = 'skip'\n section.name_after = \"\"\n else:\n section.organization_header = section.name + ' (' + \" & \".join([s.first_name + ' ' + s.last_name for s in section.teachers]) + ')'\n section.name_after = \"\"\n\n # Set the unit title if it needs to be\n if section.rank in [2, 3, 4, 4.1, 4.2, 4.3]:\n which_uoi = int(re.sub(\"[^0-9]\", \"\", section.name))\n section.name = uoi_table.get(grade_norm)[which_uoi]['title']\n\n # Determine pagination\n if section.rank in pagination_list: # TODO What about more than two inquiry units?\n section.pagination = True\n else:\n section.pagination = False\n\n section.learning_outcomes = sorted(section.learning_outcomes, key=lambda x: x.which)\n\n # Standardize the headings\n if section.rank in [2, 3, 4, 4.1, 4.2, 4.3]:\n section.name = section.name.title()\n section.name_after = uoi_table.get(grade_norm)[which_uoi]['central_idea']\n\n en_dash = u'\\u2013'\n for outcome in section.learning_outcomes:\n\n if section.rank in [2, 3, 4]:\n # Unit of inquiry\n outcome.heading = \"\"\n\n elif section.rank not in [0, 1]:\n outcome.heading = \"\" # blank\n\n else:\n # If it's a subject that we care to keep the data, standardize the format:\n outcome.heading = outcome.heading.replace(en_dash, '-')\n match = re.match('(.*)-', outcome.heading)\n if match:\n outcome.heading = match.group(1).strip()\n\n # Evaluates and adds data to items\n old_heading = None\n for outcome in section.learning_outcomes:\n\n if outcome.heading != old_heading:\n # Mark that indicates we need to evaluate\n\n if section.rank in [0, 1]:\n # Determine the effort assigned by the teacher for this\n effort = [s.selection for s in section.strands if s.label_titled.startswith(outcome.heading)]\n effort = effort[0] if len(effort) == 1 else (effort[0] if len(set(effort)) == 1 else \"<?>\")\n else:\n effort = [s.selection for s in section.strands if s.selection]\n effort = effort[0] if len(set(effort)) == 1 else str(effort)\n outcome.effort = {'G': \"Good\", 'N': \"Needs Improvement\", 'O': \"Outstanding\"}.get(effort, None)\n\n if not outcome.effort and internal_check:\n # Raise a problem here\n raise ReportIncomplete('something') # FIXME: There is no report incomplete exception\n\n old_heading = outcome.heading\n\n if not outcome.selection and internal_check:\n raise ReportIncomplete('something')\n gns.tutorial(\"Completed formatting of {} section\".format(section.name))\n\n report.sections = [s for s in report.sections if s.rank not in [4.1, 4.2, 4.3]] # skip\n\n elif 'Early' in report.course.name:\n which_folder = 'early_years'\n template = 'frontend:elem_reports/templates/student_pyp_ey_report.pt'\n\n # 1/2: semeseter\n # 0/1: early years\n\n ey_report_indicators = {\n 1: {\n 0: [\n {'name': 'Listening & Speaking', 'content': 'Learners show an understanding of the value of speaking and listening to communicate. They are using language to name their environment, to get to know each other, to initiate and explore relationships, to question and inquire.'},\n {'name': 'Viewing & Presenting', 'content': 'Learners show an understanding that the world around them is full of visual language that conveys meaning. They are able to interpret and respond to visual texts. They are extending and using visual language in more purposeful ways.'},\n {'name': 'Reading & Writing', 'content': 'Learners show an understanding that print represents the real or the imagined world. They have a concept of a \"book\", and an awareness of some of its structural elements. They use visual cues to recall sounds and the words they are \"reading\" to construct meaning.'},\n ],\n 1: [\n {'name': 'Number', 'content': 'Learners will understand that numbers are used for many different purposes in the real world. They will develop an understanding of one-to-one correspondence, be able to count and use number words and numerals to represent quantities.'},\n {'name': 'Shape and Space', 'content': 'Learners will develop an understanding that shapes have characteristics that can be described and compared.'},\n {'name': 'Pattern', 'content': 'Learners will develop an understanding that patterns and sequences occur in everyday situations. They will be able to identify and extend patterns in various ways.'},\n {'name': 'Measurement', 'content': 'Learners will develop an understanding of how measurement involves the comparison of objects and ordering.They will be able to identify and compare attributes of real objects.'},\n {'name': 'Data', 'content': 'Learners will develop an understanding of how the collection and organization of information helps to make sense of the world. They will sort and label objects by attributes and discuss information represented in graphs including pictographs and tally marks.'}\n ]\n },\n 2: {\n 0: [\n {'name': 'Listening & Speaking', 'content': 'Learners will show an understanding of the value of speaking and listening to communicate. They will use language to name their environment, to get to know each other, to initiate and explore relationships, to question and inquire.'},\n {'name': 'Viewing & Presenting', 'content': 'Learners will show an understanding that the world around them is full of visual language that conveys meaning. They will interpret and respond to visual texts. They will be extending and using visual language in more purposeful ways.'},\n {'name': 'Reading & Writing', 'content': 'Learners will show an understanding that print represents the real or the imagined world. They will develop the concept of a &ldquo;book&rdquo;, and an awareness of some of its structural elements. They will use visual cues to recall sounds and the words they are &ldquo;reading&rdquo; to construct meaning.'},\n ],\n 1: [\n {'name': 'Number', 'content': 'Learners will understand that numbers are used for many different purposes in the real world. They will develop an understanding of one-to-one correspondence, be able to count and use number words and numerals to represent quantities.'},\n {'name': 'Shape and Space', 'content': 'Learners will understand and use common language to describe paths, regions and boundaries of their immediate environment.'},\n {'name': 'Pattern', 'content': 'Learners will understand that patterns and sequences occur in everyday situations. They will be able to identify, describe, extend and create patterns in various ways.'},\n {'name': 'Measurement', 'content': 'Learners will develop an understanding of how measurement involves the comparison of objects and the ordering and sequencing of events. They will be able to identify, compare and describe attributes of real objects as well as describe and sequence familiar events in their daily routine.'},\n {'name': 'Data', 'content': 'Learners will develop an understanding of how the collection and organization of information helps to make sense of the world. They will sort and label objects by attributes and discuss information represented in graphs including pictographs and tally marks. The learners will discuss chance in daily events.'},\n ],\n },\n }\n with DBSession() as session:\n try: \n report = session.query(PrimaryReport).\\\n options(joinedload('course')).\\\n options(joinedload('sections')).\\\n options(joinedload('sections.learning_outcomes')).\\\n options(joinedload('sections.teachers')).\\\n options(joinedload('teacher')).\\\n filter(\n PrimaryReport.term_id == term_id,\n PrimaryReport.student_id == student_id,\n ).one()\n student = session.query(Students).filter_by(id=student_id).one()\n attendance = session.query(Absences).filter_by(term_id=term_id, student_id=student_id).one()\n except NoResultFound:\n if pdf:\n raise HTTPNotFound()\n else:\n raise HTTPFound(location=request.route_url(\"student_pyp_report_no\", id=student_id))\n\n subject_rank = {\n 'self-management': -1,\n 'language': 0,\n 'mathematics': 1,\n 'unit of inquiry 1': 2,\n 'unit of inquiry 2': 3,\n 'unit of inquiry 3': 4,\n 'unit of inquiry 4': 4.1,\n 'unit of inquiry 5': 4.2,\n 'unit of inquiry 6': 4.3,\n 'art': 5,\n 'music': 6,\n 'physical education': 7,\n 'bahasa melayu': 8,\n 'chinese': 9,\n 'host nation': 10\n }\n\n report.sections = sorted([section for section in report.sections if subject_rank.get(section.name.lower()) < 10000], key=lambda x: subject_rank.get(x.name.lower(), 1000))\n # report.sections = report_sections\n # Filter out the un-needed units of inquiry\n # report.sections = [s for s in report.sections if s.rank <= 1 or (s.rank >= 4 and s.rank not in [4,4.1])]\n\n\n # Only output sections that have any data in them\n # Comment out during development\n # report.sections = [section for section in report.sections if section.comment and subject_rank.get(section.name.lower()) not in [2, 3]]\n\n grade_norm = -1\n\n pagination_list = [0, 3, 7, 10]\n\n for section in report.sections:\n\n section.rank = subject_rank.get(section.name.lower())\n\n if section.rank == -1:\n # blurb for self-management\n section.blurb = \"<i><p>Within the PYP, the approaches to learning skill of self management encompasses the development of gross and fine motor skills, spatial awareness, safety, healthy lifestyles, codes of behaviour and informed choices. </p><p>In an Early Years context these are reflected through the play based approach to teaching and learning. Reporting about self management in Early Years focuses on the whole child, stressing the importance of developing independence, social and emotional skills such as making relationships, managing feelings and behaviour, self confidence and self awareness. In addition the development of physical skills (moving and handling, health and self care) are highlighted as well. </p></i>\"\n else:\n section.blurb = \"\"\n\n if section.rank in [0, 1]: # Could be Lanugage & Maths, set up the report indicators\n ey = int('Early Years 1' in report.course.name) + 1\n section.report_indicators = ey_report_indicators[ey][section.rank] # change this to 2 later\n else:\n section.report_indicators = None\n\n # Substitute the correct Chinese teachers based on manual info above\n if section.rank == 9 and student.id in students_chinese_teachers:\n section.teachers = [students_chinese_teachers.get(student.id)]\n\n if section.rank in [999999]: # Turn this off\n section.organization_header = \"Units of Inquiry\"\n section.name_after = \"\"\n elif section.rank in [4, 4.1]:\n section.organization_header = 'skip'\n section.name_after = \"\"\n else:\n section.organization_header = None\n section.name_after = ' (' + \" & \".join([s.first_name + ' ' + s.last_name for s in section.teachers]) + ')'\n\n if section.rank in [2, 3, 4, 4.1, 4.2,4.3,4.4]:\n which_uoi = int(re.sub(\"[^0-9]\", \"\", section.name))\n section.name = uoi_table.get(grade_norm)[which_uoi]['title']\n section.name_after = \"\"\n\n # Determine pagination\n if section.rank in pagination_list: #TODO What about more than two inquiry units?\n section.pagination = True\n else:\n section.pagination = False\n\n if section.rank in [2, 3, 4, 4.1, 4.2,4.3,4.4]:\n section.name = section.name.title() \n section.name_after = uoi_table.get(grade_norm)[which_uoi]['central_idea']\n\n section.learning_outcomes = sorted(section.learning_outcomes, key=lambda x: x.which)\n\n # ey sections\n report.sections = [s for s in report.sections if s.rank not in [4, 4.1]]\n\n\n options={\n 'quiet': '',\n 'disable-javascript': '',\n 'encoding': 'utf-8',\n 'header-html': 'http://igbisportal.vagrant:6543/header-html',\n 'header-spacing': '5',\n\n\n 'footer-html': 'http://igbisportal.vagrant:6543/footer-html?student_id={}'.format(student.id),\n\n 'print-media-type': '',\n\n 'margin-left': '3mm',\n 'margin-right': '3mm',\n 'margin-bottom': '10mm'\n }\n\n\n if check:\n stu = student.first_nickname_last_studentid\n message = []\n for s in report.sections:\n if not s.teachers:\n message.append(\"No teacher assigned in {}\".format(s.name))\n #raise HTTPNotFound(\"##No teacher assigned for {} in {}##\".format(stu, s.name))\n if not s.comment:\n teachers = \",\".join([t.username_handle for t in s.teachers])\n message.append('{} missing {} comment'.format(teachers, s.name))\n #raise HTTPNotFound('##{} missing {} comment for {}##'.format(teachers, s.name, stu))\n\n if s.learning_outcomes and not 'Early' in report.course.name:\n\n if s.overall_comment == 'N/A':\n for o in s.learning_outcomes:\n if hasattr(o, 'effort') and not o.effort:\n teachers = \",\".join([t.username_handle for t in s.teachers])\n message.append('{} did not enter {} effort for {}'.format(teachers, o.heading, s.name))\n # raise HTTPNotFound()\n if not o.selection:\n teachers = \",\".join([t.username_handle for t in s.teachers])\n message.append('{} did not enter {} indication for {}'.format(teachers, o.heading, s.name))\n # raise HTTPNotFound('##{} did not enter indication for {} in {}##'.format(teachers, s.name, stu))\n\n elif s.overall_comment == '':\n teachers = \",\".join([t.username_handle for t in s.teachers])\n message.append('{} did not enter effort for single subject {}'.format(teachers, s.name)) \n\n if message:\n raise HTTPNotFound('##\\n({}) {}:\\n\\t{}##'.format(student.grade, student.first_nickname_last_studentid, \"\\n\\t\".join(message)))\n\n raise HTTPFound()\n\n with DBSession() as session:\n try:\n record = session.query(db.table.PrimaryReportLastUpdated).filter(db.table.PrimaryReportLastUpdated.student_id == student.id).one()\n last_updated = record.timestamp\n last_updated_date = last_updated.strftime(gns.config.reports.last_updated_format)\n except NoResultFound:\n last_updated_date = '<Unknown>'\n except MultipleResultsFound:\n last_updated_date = '<Internal DB Error: Multiple results found>'\n\n if pdf:\n result = render(template,\n dict(\n title=title,\n report=report,\n student=student,\n attendance=attendance,\n pdf=True,\n download_url=\"\",\n link_to_mb=\"\",\n last_updated=\"\",\n ),\n request=request)\n import pdfkit # import here because installation on server is hard\n\n prefix_file_name = '{}/pdf-downloads/{}/{}-Grade{}-{}-[{}]-'.format(\n gns.config.paths.home,\n which_folder,\n '55048',\n grade_norm,\n student.first_name + '-' + student.last_name,\n student.student_id\n )\n\n full_file = '{}({}).pdf'.format(prefix_file_name, last_updated_date)\n\n for _file in glob.glob(\"{}.*\".format(prefix_file_name)):\n # Remove any old stuff still lingering in there\n if _file != full_file:\n os.remove(_file)\n\n path = '{}/pdf-downloads/{}/{}-Grade{}-{}-[{}]-({}).pdf'.format(\n gns.config.paths.home,\n which_folder,\n '55048',\n grade_norm,\n student.first_name + '-' + student.last_name,\n student.student_id,\n last_updated_date\n )\n\n gns.tutorial(\"Sending to pdfkit, also saving to {path}\".format(path=path), edit=(result, '.pretty'), banner=True)\n try:\n pdffile = pdfkit.from_string(result, path, options=options) # render as HTML and return as a string\n except OSError as err:\n return HTTPInternalServerError(\"Problem with file? {}\".format(err))\n\n pdffile # not used\n if pdf.lower() == \"download\":\n content_type = \"application/octet-stream\"\n\n response = FileResponse(path, request=request, content_type=content_type)\n response.content_disposition = u\"attachment; filename={}.pdf\".format(title)\n return response\n\n else:\n content_type = \"application/pdf\"\n response = FileResponse(path, request=request, content_type=content_type, charset='utf-8')\n return response\n\n else:\n # Check when it was last updated\n\n if gns.tutorial_on:\n import pkg_resources\n package, filename = template.split(\":\")\n abspath = pkg_resources.resource_filename(*template.split(\":\"))\n from chameleon import PageTemplateFile\n template_file = PageTemplateFile(abspath)\n gns.tutorial(\"Loaded the template\", edit=(template_file.read(), '.html'), banner=True)\n result = render(template,\n dict(\n title=title,\n report=report,\n student=student,\n attendance=attendance,\n pdf=False,\n download_url=\"/students/{}/pyp_report/download/\".format(student.id),\n link_to_mb=\"https://igbis.managebac.com/classes/{}/pyp-gradebook/tasks/term_grades?student={}&term={}\".format(report.course.id, student.id, gns.config.managebac.current_term_id),\n last_updated=last_updated_date,\n ),\n request=request\n )\n response = Response(result)\n return response", "def show(request, pk, ck):\n\n project_container = get_object_or_404(ProjectContainer, id=pk)\n coding = get_object_or_404(CodingProject, id=ck)\n\n user = get_user(request)\n coder = Person.objects.using('datatracker').get(id=coding.coder)\n if project_container.code_request is None:\n mentor = coder\n else:\n mentor = Person.objects.using('datatracker').get(id=project_container.code_request.mentor)\n\n # According to model areas and working groups should come from documents\n tags = []\n keys = []\n areas = []\n if project_container.docs:\n keys = filter(None, project_container.docs.split(';'))\n docs = list(DocAlias.objects.using('datatracker').filter(name__in=keys).values_list('name', 'document__group__name',\n 'document__group__parent__name'))\n for name, gname, gparentname in docs:\n if gparentname:\n if gparentname not in areas:\n areas.append(gparentname) # use acronym?\n else:\n areas.append(gname)\n tags += coding.tags.all()\n\n if not areas:\n areas = [constants.STRING_NONE]\n if not tags:\n tags = [constants.STRING_NONE]\n\n return render_page(request, constants.TEMPLATE_MATCHES_SHOW, {\n 'projectcontainer': project_container,\n 'coding': coding,\n 'areas': areas,\n 'tags': tags,\n 'docs': docs,\n 'coder': coder,\n 'mentor': mentor,\n 'owner': user,\n 'list_template': constants.TEMPLATE_MATCHES_LIST\n })", "def get_single_user_contest_info(self, user_id=None):\n\n # retrieve info \n\treturn Point.objects.values('metric__name').filter(user__id=user_id, contest__contest_type=F('user__account__contest_type'), contest__status=project_constants.CONTEST_ACTIVE).annotate(total_points=Sum('points'), total_votes = Count('points'))", "def contains(request, action=None):\n\n username = request.session.get('username', False)\n mode1 = request.session.get('mode', False)\n mode = NameSpace.objects.get(ns_id=mode1)\n\n error_json = {\"Error\": \"No user authenticated\"}\n\n if (username):\n response_json = {}\n if request.method == 'GET':\n\n \"\"\"GET request: it returns a list of concepts the user inserted about that report \"\"\"\n\n report = request.GET.get('report_id')\n language = request.GET.get('language', request.session['language'])\n user_get = request.GET.get('username',username)\n report1 = Report.objects.get(id_report=report, language = language)\n auto_required = request.GET.get('ns_id',None)\n # if auto_required == 'Robot':\n # mode = NameSpace.objects.get(ns_id=auto_required)\n if auto_required is not None:\n mode_1 = NameSpace.objects.get(ns_id=auto_required)\n else:\n mode_1 = mode\n response_json = get_user_gt(user_get,mode_1,report1,language,'concepts')\n # print('concetti',response_json)\n return JsonResponse(response_json)\n\n elif request.method == 'POST' and action.lower() == 'insert':\n\n \"\"\" POST request: insert new concepts in the database\"\"\"\n\n request_body_json = json.loads(request.body)\n concepts_list = request_body_json['concepts_list']\n language = request_body_json['language']\n report = request_body_json['report_id']\n report1 = Report.objects.get(id_report=report)\n\n username = request.session.get('username', False)\n user1 = User.objects.get(username=username,ns_id=mode)\n usecase = request.session.get('usecase',False)\n type = 'concepts'\n\n if report is not None and concepts_list is not None:\n user = username\n count = 0\n already_inserted_list = []\n try:\n with transaction.atomic():\n for concept in concepts_list:\n concept = json.loads(concept)\n concept_url = concept['concept_url']\n semantic_area = concept['semantic_area']\n if not check_concept_report_existance(report, concept_url, user,mode, semantic_area,language):\n # Insert a new record\n if populate_contains_table(report, concept_url, user,mode, semantic_area,language):\n count += 1\n else:\n error_json = {\"error message\": \"insert in table 'contains' failed\"}\n return JsonResponse(error_json)\n else:\n already_inserted_list.append(concept)\n jsonDict = serialize_gt(type, usecase, username, report,language,mode)\n GroundTruthLogFile.objects.create(username=user1, id_report=report1,ns_id=mode,\n language = language, gt_json=jsonDict,\n gt_type=type, insertion_time=Now())\n except Exception as error:\n print(error)\n print('rolled back')\n\n if count == len(concepts_list):\n response_json = {\"message\": \"All concepts inserted successfully\"}\n else:\n response_json = {\"message\": \"Some concepts have been already inserted: [\"+ \", \".join(already_inserted_list)+\"]\"}\n else:\n response_json = {\"error\": \"Missing data\"}\n\n elif request.method == 'POST' and action.lower() == 'update':\n\n \"\"\" POST request: update the concepts that already exist in the database, a new ground truth is created \n if needed.\"\"\"\n\n request_body_json = json.loads(request.body)\n concepts_list = request_body_json['concepts_list']\n report = request_body_json['report_id']\n language = request_body_json['language']\n report1 = Report.objects.get(id_report = report,language = language)\n username = request.session.get('username',False)\n user1 = User.objects.get(username = username,ns_id=mode)\n usecase = request.session.get('usecase',False)\n type = 'concepts'\n if report is not None and concepts_list is not None:\n user = username\n count = 0\n rows = Contains.objects.filter(username = user1,ns_id=mode, id_report = report1, language = language)\n if rows.exists() and len(concepts_list) == 0:\n if mode1 == 'Human':\n with transaction.atomic():\n json_response=delete_contains_record(report1, language, None,mode, user, None)\n return JsonResponse(json_response,safe=False)\n else:\n # json_response = {'message': 'Robot mode, rows can not be deleted'}\n print('RESTORE')\n json_response = restore_robot_annotation(report1,'concepts',user1)\n return JsonResponse(json_response)\n elif not rows.exists() and len(concepts_list) == 0:\n json_response = {'message':'nothing to do'}\n return JsonResponse(json_response)\n if len(concepts_list) == 0:\n json_response = {'message': 'Nothing to do'}\n return JsonResponse(json_response)\n update = True\n if rows.exists():\n if rows.count() == len(concepts_list):\n for concept in concepts_list:\n concept_url = concept['concept_url']\n semantic_area = concept['semantic_area']\n concept_model = Concept.objects.get(concept_url = concept_url)\n concepts = Contains.objects.filter(name=semantic_area, username = user1,ns_id=mode, id_report = report1, language = language, concept_url = concept_model)\n if concepts.exists():\n update = False\n else:\n update = True\n break\n\n # Delete previous data for the specified user and report\n if update == True:\n try:\n with transaction.atomic():\n js = delete_contains_record(report1,language, None, mode,user, None)\n # Insert new data\n for concept in concepts_list:\n # Insert a new record\n concept_url = concept['concept_url']\n semantic_area = concept['semantic_area']\n if populate_contains_table(report, concept_url, user, mode,semantic_area,language):\n count += 1\n else:\n error_json = {\"error message\": \"insert in table 'contains' failed\"}\n return JsonResponse(error_json)\n jsonDict = serialize_gt(type, usecase, username, report,language,mode)\n if GroundTruthLogFile.objects.filter(username=user1, ns_id=mode,id_report=report1,language = language, gt_type=type).exists():\n GroundTruthLogFile.objects.filter(username=user1,ns_id=mode, id_report=report1, language=language,gt_type=type).delete()\n\n GroundTruthLogFile.objects.create(username=user1,ns_id=mode, id_report=report1,\n gt_json=jsonDict,language = language,\n gt_type=type, insertion_time=Now())\n\n except Exception as error:\n print(error)\n print('rolled back')\n\n if count == len(concepts_list):\n response_json = {\"message\": \"Update successfull\"}\n else:\n response_json = {\"error\": \"Update unsuccessfull\"}\n else:\n try:\n with transaction.atomic():\n if mode1 == 'Human':\n if not GroundTruthLogFile.objects.filter(gt_type='concepts', username=user,\n ns_id=mode,\n id_report=report1,\n language=language).exists():\n\n js = serialize_gt('concepts', usecase, username, report1.id_report, language,\n mode)\n\n GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(),\n username=user1, ns_id=mode, id_report=report1,\n language=language, gt_type='concepts')\n ass = Contains.objects.filter(username=user1, id_report=report1,\n language=language,\n ns_id=mode).values('name',\n 'concept_url')\n for el in ass:\n sem = SemanticArea.objects.get(name=el['name'])\n concept_u = Concept.objects.get(concept_url=el['concept_url'])\n Contains.objects.filter(username=user1, id_report=report1,\n language=language,\n ns_id=mode, name=sem,\n concept_url=concept_u).delete()\n Contains.objects.create(username=user1, ns_id=mode, id_report=report1,\n language=language, name=sem, concept_url=concept_u,\n insertion_time=Now())\n json_response = {'message': 'no changes detected'}\n return JsonResponse(json_response)\n elif mode1 == 'Robot':\n user_robot = User.objects.get(username='Robot_user', ns_id=mode)\n gt_robot = GroundTruthLogFile.objects.filter(username=user_robot, ns_id=mode,\n id_report=report1, language=language,\n gt_type='concepts')\n # in questa sezione solo se la gt è uguale a prima, l'utente acconsente alla gt della macchina\n gt = GroundTruthLogFile.objects.filter(username=user1, ns_id=mode,\n id_report=report1,\n language=language,\n gt_type='concepts')\n if gt_robot.count() == 1 and not gt.exists():\n # if gt_robot[0].insertion_time == gt[0].insertion_time:\n\n # js = gt[0].gt_json\n js = serialize_gt('concepts', usecase, username, report1.id_report, language,\n mode)\n\n GroundTruthLogFile.objects.filter(username=user1, ns_id=mode, id_report=report1,\n language=language,\n gt_type='concepts').delete()\n GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(),\n username=user1, ns_id=mode, id_report=report1,\n language=language, gt_type='concepts')\n ass = Contains.objects.filter(username=user1, id_report=report1,\n language=language,\n ns_id=mode).values('name',\n 'concept_url')\n for el in ass:\n sem = SemanticArea.objects.get(name=el['name'])\n concept_u = Concept.objects.get(concept_url=el['concept_url'])\n Contains.objects.filter(username=user1, id_report=report1,\n language=language,\n ns_id=mode, name=sem,\n concept_url=concept_u).delete()\n Contains.objects.create(username=user1, ns_id=mode, id_report=report1,\n language=language, name=sem, concept_url=concept_u,\n insertion_time=Now())\n\n except Exception as error:\n print(error)\n json_response = {'error': 'An error occurred trying to save your ground truth.'}\n return JsonResponse(json_response, status=500)\n else:\n json_response = {'message': 'dates updated'}\n return JsonResponse(json_response)\n else:\n response_json = {\"error\": \"Missing data\"}\n\n elif request.method == 'POST' and action.lower() == 'delete':\n\n \"\"\" POST request: delete the concepts the user associated to a specific report \"\"\"\n\n request_body_json = json.loads(request.body)\n report = request_body_json['report_id']\n language = request_body_json['language']\n username = request.session.get('username', False)\n user1 = User.objects.get(username=username,ns_id=mode)\n report1 = Report.objects.get(id_report = report,language = language)\n with transaction.atomic():\n if report is not None and language is not None:\n if mode1 == 'Human':\n response_json = delete_contains_record(report, language, None,mode, user1, None)\n else:\n print('RESTORE')\n response_json = restore_robot_annotation(report1, 'concepts', user1)\n\n\n else:\n response_json = {\"Error\": \"Missing data\"}\n\n return JsonResponse(response_json)\n\n else:\n return JsonResponse(error_json)", "def project_report(request, **kwargs):\n\n #Creating the command for the logs \n print(\"in the project_report ...........................................\")\n outputStr = \"Updating the logs...\"\n #Making the output\n context = {\n \"page_title\": _(\"Test Details\"),\n \"test_lists\": 'report_list', #tests_list\n \"log_data\": outputStr\n }\n return render(request, 'rally_dashboard/events/test_logs.html', context)", "def referee_evaluate_thesis(request):\n\n if not validate_request(request): return redirect(reverse(URL_FORBIDDEN))\n\n \n user = auth.get_user(request)\n referee = Referee.objects.get(user = user)\n \n if request.method == \"GET\":\n all_thesis = [] # list of dict\n \n for panelMember in PanelMember.objects.filter(referee = referee).filter(status = 'A'):\n thesis = panelMember.thesis\n dict = {}\n dict['title'] = thesis.title\n\n dict['student_full_name'] = thesis.student.first_name + ' ' + thesis.student.last_name\n dict['synopsis'] = thesis.synopsis\n dict['thesis'] = thesis.thesis\n dict['keywords'] = []\n\n if panelMember.answer_for_questions == True:\n if thesis.thesis_modifications == \"NULL\" or thesis.thesis_modifications == \"\":\n dict['thesis_modifications'] = None\n else:\n dict['thesis_modifications'] = thesis.thesis_modifications\n else:\n dict['thesis_modifications'] = None\n\n\n for keys in ThesisKeyword.objects.filter(thesis = thesis):\n dict['keywords'].append((IEEEKeyword.objects.get(id = keys.keyword.id)).keyword)\n \n dict['student_username'] = thesis.student.user.username\n dict['id'] = thesis.id\n \n all_thesis.append(dict)\n return render(\n request,\n 'app/referee/evaluate_thesis.html',\n {\n 'title':'Evaluate Thesis',\n 'layout_data' : get_layout_data(request),\n 'all_thesis' : all_thesis\n }\n )\n else:\n return redirect(reverse(URL_BAD_REQUEST))", "def get(self, request, report_type):\n\n # Verify if the coordinator is correctly logged in.\n if not request.session.get('session', False) or not request.session['type'] == 'coordinator':\n return render(request, self.template_login)\n\n template = ''\n context = {}\n\n # Depending the option given return de requested reports.\n if report_type == 'teacher_report':\n template, context = self.teacher_report(request)\n elif report_type == 'career_teachers_report':\n template, context = self.career_teachers_report(request)\n elif report_type == 'career_teachers_excel':\n return self.career_teachers_excel(request)\n\n # If there is a great request render the PDF's, otheway redirect to the reports view.\n if template and context:\n return render_to_pdf_response(request, template, context)\n\n return redirect('/evaluations/career_results/32/47740/#reportes')", "def _render_reported(self) -> dict:\n logging.debug(f\"Fetching reported bugs for {self.user.display_name}\")\n reported = defaultdict(list)\n tasks = self.user.searchTasks(\n bug_reporter=self.user, status=self.status, created_since=self.since\n )\n tasks = [LPWrap(t) for t in tasks]\n for t in tasks:\n if in_window(self.window, t.bug.date_created):\n reported[t.bug_target_name].append(\n {t.bug.id: t.title,}\n )\n return reported" ]
[ "0.6772657", "0.6000003", "0.59264755", "0.5880304", "0.57433313", "0.5722247", "0.56435704", "0.5437227", "0.5424971", "0.54028565", "0.5310623", "0.5304719", "0.5268187", "0.5193207", "0.51647246", "0.51642805", "0.51576793", "0.515311", "0.5151735", "0.5143959", "0.5133754", "0.512587", "0.5104086", "0.509844", "0.5083222", "0.50794935", "0.5059842", "0.50493026", "0.50325316", "0.50265414" ]
0.69742775
0
This view returns the list of batches associated to a use case
def get_batch_list(request): json_resp = {} json_resp['batch_list'] = [] usecase = request.GET.get('usecase',None) # print(usecase) if usecase is None: batch = Report.objects.all().exclude(institute='PUBMED').values('batch') else: use_obj = UseCase.objects.get(name=usecase) batch = Report.objects.filter(name=use_obj).exclude(institute = 'PUBMED').values('batch') for el in batch: if el['batch'] not in json_resp['batch_list']: json_resp['batch_list'].append( el['batch']) # print(json_resp['batch_list']) json_resp['batch_list'] = sorted(json_resp['batch_list']) # print(json_resp) return JsonResponse(json_resp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def view_batches(request):\n\n template = 'batch_list.html'\n\n context = {\n 'invalid_due_date': request.GET.get('invalid_due_date')\n }\n\n try:\n get_batches(request, context)\n except Exception as e:\n context['error'] = '{} {}'.format(e, traceback.format_exc())\n\n # TODO: GO PAF - Start\n context['go_funder1'] = go_funder.objects.get(funder_code='1')\n context['go_funder2'] = go_funder.objects.get(funder_code='2')\n context['go_funder3'] = go_funder.objects.get(funder_code='3')\n context['go_funder4'] = go_funder.objects.get(funder_code='4')\n context['go_funder5'] = go_funder.objects.get(funder_code='5')\n # TODO: GO PAF - End\n\n return render(request, template, context)", "def view_batches() -> str:\r\n tank_possibilities = [\"Albert\", \"Brigadier\", \"Camilla\", \"Dylon\", \"Emily\",\r\n \"Florence\", \"Gertrude\", \"Harry\", \"R2D2\",\r\n \"No Tank Needed\"]\r\n return render_template(\"view_batches.html\",\r\n batch_output=current_brewings,\r\n tank_options=tank_possibilities)", "def get_auto_anno_batch_list(request):\n\n json_resp = {}\n usecase = request.GET.get('usecase')\n # print(usecase)\n use_obj = UseCase.objects.get(name=usecase)\n json_resp['batch_list'] = []\n languages = ['English','english']\n batch = Report.objects.filter(name=use_obj,language__in = languages).exclude(institute = 'PUBMED').values('batch')\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)", "def get_PUBMED_batch_list(request):\n\n json_resp = {}\n usecase = request.GET.get('usecase')\n # print(usecase)\n use_obj = UseCase.objects.get(name=usecase)\n json_resp['batch_list'] = []\n batch = Report.objects.filter(name=use_obj,institute = 'PUBMED').values('batch')\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)", "def get_auto_anno_PUBMED_batch_list(request):\n\n json_resp = {}\n usecase = request.GET.get('usecase')\n # print(usecase)\n languages = ['English', 'english']\n use_obj = UseCase.objects.get(name=usecase)\n json_resp['batch_list'] = []\n batch = Report.objects.filter(name=use_obj,language__in = languages,institute = 'PUBMED').values('batch')\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)", "def batches(self):\n return [self.get_batch(i) for i in range(self.num_batches)]", "def batch(self):\n return self._client.batch()", "def retag_all_batches(apps, schema_editor):\n pass", "def get_batches(auth, base_url='https://api.cratejoy.com/v1/'):\n \n batch_endpoint = '{}shipment_batches/'.format(base_url)\n\n resp = requests.get(\n batch_endpoint,\n auth=auth\n )\n\n print('GET request to {} responded with status '\n 'code: {}'.format(batch_endpoint,\n resp.status_code))\n print(resp.content)", "def _get_batch(self):\n url = self._base_url + urlConfig.URLS['Project'] + '/' + self._project_id + '/batch'\n response = apiCall.get(self._get_token(), url,self._proxy, {}, 10)\n logging.debug(response)\n return response", "def batches(set_name):\n global num_batches, args, ds_sizes \n # num_batches = how many batches in each dataset(train, valid, test)\n # ds_sizes = dataset_sizes \n for b in range(num_batches[set_name]):\n bi = b * args.batch_size # one batch mul batch_size \n bj = (b + 1) * args.batch_size \n if b == num_batches[set_name] - 1:\n bj = ds_sizes[set_name] # maybe only remainer set\n yield bi, bj", "def test_get_batch_statistics_request(self):\n self.trans_details.get_batch_statistics(\n batch_id = 123456,\n )", "def sfdcGetBatches(**kwargs):\n api_ver = kwargs.get('api_ver', '')\n session_id = kwargs.get('session_id', '')\n instance = kwargs.get('instance', '')\n job_id = kwargs.get('job_id', '')\n sfdcXml = kwargs.get('sfdcXml', {})\n\n url = sfdcXml.get('check', {}).get('batchList', {}).get('url', '')\n\n headers = {'X-SFDC-Session': session_id}\n url = url.format(instance=instance, api_ver=api_ver,\\\n job_id=job_id)\n\n resp = requests.post(url=url, headers=headers)\n dictResp = xmltodict.parse(resp.text)\n\n batch_ids = [batch['id'] for batch\\\n in dictResp['batchInfoList']['batchInfo']]\n\n return batch_ids", "def batches_list(project='batch', n_batches=5):\n\tbatches_links = [(project, i+1, f\"Batch {i+1}\") for i in range(n_batches)]\n\treturn batches_links", "def get_model_list(batch: int = 1):\n batch = str(batch)\n err_msg = \"Batch {} is not yet optimized.\".format(batch)\n assert batch in MODEL_ZOO_BATCH_NGC.keys(), err_msg\n return list(MODEL_ZOO_BATCH_NGC[batch].keys())", "def batch_list(session):\n weigh_events_sq = batch_events_by_type(session, \"weigh\").subquery(\"weigh_events\")\n propagate_events_sq = batch_events_by_type(session, \"propagate\").subquery(\n \"propagate_events\"\n )\n transfer_events_sq = batch_events_by_type(session, \"transfer\").subquery(\n \"transfer_events\"\n )\n harvest_events_sq = batch_events_by_type(session, \"harvest\").subquery(\n \"harvest_events\"\n )\n harvest_sq = harvest_with_unit_yield(session).subquery(\"harvest_with_unit_yield\")\n locations_sq = locations_with_extras(session).subquery(\"locations\")\n\n query = (\n session.query(\n BatchClass.id.label(\"batch_id\"),\n BatchClass.tray_size,\n BatchClass.number_of_trays,\n CropTypeClass.id.label(\"crop_type_id\"),\n CropTypeClass.name.label(\"crop_type_name\"),\n weigh_events_sq.c.event_time.label(\"weigh_time\"),\n propagate_events_sq.c.event_time.label(\"propagate_time\"),\n transfer_events_sq.c.event_time.label(\"transfer_time\"),\n harvest_events_sq.c.event_time.label(\"harvest_time\"),\n transfer_events_sq.c.next_action_time.label(\"expected_harvest_time\"),\n locations_sq.c.id.label(\"location_id\"),\n locations_sq.c.zone,\n locations_sq.c.aisle,\n locations_sq.c.column,\n locations_sq.c.shelf,\n locations_sq.c.summary.label(\"location_summary\"),\n harvest_sq.c.yield_per_sqm,\n harvest_sq.c.crop_yield,\n harvest_sq.c.waste_disease,\n harvest_sq.c.waste_defect,\n harvest_sq.c.over_production,\n (harvest_events_sq.c.event_time - transfer_events_sq.c.event_time).label(\n \"grow_time\"\n ),\n case(\n [\n (harvest_events_sq.c.event_time != None, \"harvest\"),\n (transfer_events_sq.c.event_time != None, \"transfer\"),\n (propagate_events_sq.c.event_time != None, \"propagate\"),\n (weigh_events_sq.c.event_time != None, \"weigh\"),\n ],\n else_=None,\n ).label(\"last_event\"),\n )\n .join(CropTypeClass, CropTypeClass.id == BatchClass.crop_type_id)\n # We inner join on weigh_events, because if the batch doesn't have a weigh event\n # it doesn't really exist, but outer join on the others since they are optional.\n .join(weigh_events_sq, weigh_events_sq.c.batch_id == BatchClass.id)\n .outerjoin(propagate_events_sq, propagate_events_sq.c.batch_id == BatchClass.id)\n .outerjoin(transfer_events_sq, transfer_events_sq.c.batch_id == BatchClass.id)\n .outerjoin(harvest_events_sq, harvest_events_sq.c.batch_id == BatchClass.id)\n .outerjoin(locations_sq, locations_sq.c.id == transfer_events_sq.c.location_id)\n .outerjoin(harvest_sq, harvest_sq.c.batch_event_id == harvest_events_sq.c.id)\n )\n return query", "def get_batch(self):\n return self.batch", "def test_get_settled_batch_list(self):\n self.trans_details.get_settled_batch_list(\n include_statistics = True,\n )\n\n self.trans_details.get_settled_batch_list(\n first_settlement_date=u\"2011-01-01T01:00:00\",\n )\n\n self.trans_details.get_settled_batch_list(\n last_settlement_date=u\"2011-01-01T01:00:00\",\n )\n\n # all three together\n self.trans_details.get_settled_batch_list(\n include_statistics = True,\n first_settlement_date=u\"2011-01-01T01:00:00\",\n last_settlement_date=u\"2011-01-02T01:00:00\"\n )", "def get_batch(self, batch_kwargs, batch_parameters=None) -> None:\n raise NotImplementedError", "def produce_query_batches(self):\n pass", "def get(self, request, format=None):\n benchmarkmodels = BenchmarkModel.objects.all()\n serializer = BenchmarkModelListSerializer(benchmarkmodels, many=True)\n return Response(serializer.data)", "def get_batch_runs(self):\n return self.batch_runs", "def get_cf_batch(self, cf_name, keyspace_name=None):\n return self._Get_CF_Batch(cf_name=cf_name, keyspace_name=keyspace_name)", "def RecordBatches(\n self, options: dataset_options.RecordBatchesOptions\n ) -> Iterator[pa.RecordBatch]:", "def get_samples(paths_to_batches_info, google_bucket_id, sublist=None):\n paths_to_samples = pd.read_excel(paths_to_batches_info, index_col=0)\n df_list = []\n\n for tsca_id, paths in paths_to_samples.iterrows():\n if sublist is not None and tsca_id not in sublist:\n continue\n # Make data Firecloud-compatible\n batch_data = prepare_batch_samples_for_metadata_export(paths.path_to_samples_info, tsca_id, google_bucket_id)\n df_list.append(batch_data)\n\n all_samples = pd.concat(df_list, axis=0)\n \n # Add cohort codes to data\n cohort_formatted_names = pd.read_table('cohort_files/cohort_names_dictionary.txt', header=None, names=['Collection', 'cohort_code'])\n all_samples = pd.merge(all_samples, cohort_formatted_names, on='Collection', how='left')\n return all_samples", "def get_plant_batches(db_path: str) -> List[PlantBatch]:\n plant_batches: List[PlantBatch] = []\n\n conn: Connection = sqlite3.connect(path.join(db_path, 'batches.db'))\n cur: Cursor = conn.cursor()\n\n for row in cur.execute('SELECT Plant, Location, Tray, n_trays, planting_time FROM batches'):\n # print('\\n\\n')\n # for i in row:\n # print(f\"{type(i)}: {i}\")\n\n batch: PlantBatch = parse_batch_db_entry(row)\n\n plant_batches.append(batch)\n\n cur.close()\n conn.close()\n return plant_batches", "def view_batch(request, id):\n\n bh_rec = BatchHeaders.objects.get(id=id)\n\n context = {\n 'count': bh_rec.total_count,\n 'amount': bh_rec.total_amount,\n 'status': bh_rec.status.text_code,\n 'due_date': bh_rec.due_date,\n 'created': bh_rec.created,\n 'batch_id': bh_rec.id,\n 'batch_ref': bh_rec.reference,\n 'success': request.GET.get('success'),\n 'sent': bh_rec.sent,\n 'uk_due_date': bh_rec.due_date.strftime('%d/%m/%Y'),\n 'uk_call_date': bh_rec.call_date.strftime('%d/%m/%Y'),\n 'funder_code': bh_rec.funder.funder_code\n }\n\n if bh_rec.status == OPEN:\n\n try:\n batch_lock = BatchLock.objects.filter(batch_header=bh_rec, released__isnull=True).order_by('-id')[0]\n except:\n batch_lock = False\n\n if batch_lock:\n context['batch_lock'] = batch_lock\n context['batch_lock_check'] = True\n else:\n batch_lock_session_id = str(uuid.uuid1())\n batch_lock = BatchLock(batch_header=bh_rec, user=request.user, session_id=batch_lock_session_id)\n batch_lock.save()\n\n context['batch_lock'] = batch_lock\n\n resync = False\n url = reverse('core_dd_drawdowns:view_batch', args=[id])\n\n if request.META.get('HTTP_REFERER'):\n if not re.search(url, request.META['HTTP_REFERER']):\n resync = True\n else:\n resync = True\n\n if resync:\n resync_drawdowns_with_dd_history(bh_rec.reference)\n\n else:\n context['history'] = True\n\n query = {\n 'batch_header': bh_rec\n }\n for k in ('agreement_id__contains', 'amount', 'ddi_status', 'status'):\n if request.GET.get(k):\n query[k] = request.GET[k]\n context['filter'] = query\n\n if query.get('ddi_status'):\n if query['ddi_status'] == 'No Setup':\n del(query['ddi_status'])\n query['dd_reference__isnull'] = True\n else:\n query['ddi_status'] = ncf_dd_status_text.objects.get(dd_text_description=query.get('ddi_status'))\n\n recs = DrawDown.objects.filter(**query)\n\n context['filtered_count'] = recs.count()\n\n paginator = Paginator(recs, 10)\n page = request.GET.get('page')\n try:\n pub = paginator.page(page)\n except PageNotAnInteger:\n pub = paginator.page(1)\n except EmptyPage:\n pub = paginator.page(paginator.num_pages)\n\n context['records'] = pub\n\n # TODO: GO PAF - Start\n context['go_funder1'] = go_funder.objects.get(funder_code='1')\n context['go_funder2'] = go_funder.objects.get(funder_code='2')\n context['go_funder3'] = go_funder.objects.get(funder_code='3')\n context['go_funder4'] = go_funder.objects.get(funder_code='4')\n context['go_funder5'] = go_funder.objects.get(funder_code='5')\n # TODO: GO PAF - End\n\n context.update({'forecast': forecast_prediction(bh_rec.reference)})\n\n if query.get('ddi_status'):\n query['ddi_status'] = '{}'.format(query['ddi_status'])\n\n context['query'] = query\n\n return render(request, 'batch_screen.html', context)", "def get_batches(self, k=5):\n indexes = [x for x in range(len(self))]\n np.random.shuffle(indexes)\n s = 0\n size = int(np.ceil(len(indexes) / k))\n batches = []\n while s < len(indexes):\n batches += [indexes[s:s + size]]\n s = s + size\n return batches", "def get_instances(self) -> List[Instance]:\n big_bench_task: Dict = BIGBenchScenario.download_and_get_task(self.output_path, self.task, self.subtask)\n\n # From https://github.com/google/BIG-bench/blob/main/docs/doc.md#json-schema,\n # \"keywords\", \"description\" and \"examples\" are all required fields for a BIG-bench task.\n # keywords: \"A list of strings, where each string contains a separate keyword describing the task\"\n self.tags = big_bench_task[\"keywords\"]\n\n # description: \"A plaintext description of the task, suitable for a non-expert to perform the task and\n # potentially generate new examples.\"\n # Append the task, subtask and task-specific description from BIG-bench to `description`.\n self.description = (\n f\"{self.description} Task: {self.task} \"\n f\"{f'Subtask: {self.subtask} ' if self.subtask else ''} \"\n f\"Description: {big_bench_task['description']}\"\n )\n\n # examples: \"A list of dicts\"\n examples: List[Dict] = big_bench_task[\"examples\"]\n # Before splitting the data, shuffle the examples with a fixed seed for reproducibility.\n random.seed(0)\n random.shuffle(examples)\n\n # BIG-bench split the data according to\n # https://github.com/google/BIG-bench/blob/main/bigbench/bbseqio/README.md#splits:\n # all: This contains all the examples.\n # validation: This contains 20% of the examples or at least 16 examples.\n # train: All examples that are not in the validation split (generally 80% of the examples)\n # For few-shot eval, use the all split.\n #\n # TODO: I'm not sure what they mean by \"for few-shot eval, use the all split.\"\n # Does that mean they don't draw in-context examples from a separate train split?\n #\n # We split the data as follows:\n # test: This contains 20% of the examples or at least 16 examples.\n # validation: Same size as the test split.\n # train: Remaining examples, not in the test and validation splits.\n total_examples: int = len(examples)\n num_test_examples: int = max(int(0.2 * total_examples), BIGBenchScenario.MIN_TEST_EXAMPLES)\n num_train_examples: int = total_examples - num_test_examples * 2\n\n # Build `Instance`s from `examples`.\n instances: List[Instance] = []\n for i, example in enumerate(examples):\n # Build references.\n references: List[Reference]\n\n # Each example has \"input\" and either \"target_scores\" or \"target\".\n if \"target_scores\" in example:\n # For \"target_scores\", BIG-bench compares target scores against the model's predicted probabilities:\n # \"The example score is then the target score (as specified in the target_scores dict) of the target\n # that received the highest probability. Scores are averaged across examples. Conventional\n # multiple-choice accuracy can be achieved by assigning the correct target a score of 1, and\n # all incorrect targets a score of 0.\"\n # It seems all BIG-bench Lite tasks with target scores either have a target score\n # of 0 (incorrect answer) or 1 (correct answer).\n # So, for now, `Reference`s with the highest target score are correct.\n highest_score = max(example[\"target_scores\"].values())\n references = [\n Reference(Output(text=target), tags=[CORRECT_TAG] if score == highest_score else [])\n for target, score in example[\"target_scores\"].items()\n ]\n elif \"target\" in example:\n # All the outputs in \"target\" are correct e.g., {\"input\": \"1 + 1 = \", \"target\": [\"two\",\"2\"]}.\n # \"target\" can either be a list of correct values or a single correct value.\n targets: List[str] = example[\"target\"] if type(example[\"target\"]) == list else [example[\"target\"]]\n references = [Reference(Output(text=target), tags=[CORRECT_TAG]) for target in targets]\n else:\n raise ValueError(f\"Invalid example that doesn't have `target` or `target_scores` field: {example}\")\n\n # Get split based on current index `i`.\n split: str\n if i < num_train_examples:\n split = TRAIN_SPLIT\n elif num_train_examples <= i < num_train_examples + num_test_examples:\n split = TEST_SPLIT\n else:\n split = VALID_SPLIT\n\n instances.append(Instance(Input(text=example[\"input\"]), references, split=split))\n\n return instances", "def batch(self):\n return self._batch" ]
[ "0.65461797", "0.6110669", "0.60014474", "0.5702793", "0.56802934", "0.5664144", "0.56525695", "0.5584406", "0.5498826", "0.5481742", "0.5451509", "0.54278344", "0.53596485", "0.53492475", "0.53231066", "0.52532625", "0.52472556", "0.5156872", "0.5120976", "0.51208967", "0.510708", "0.5062218", "0.5057387", "0.50247747", "0.4983182", "0.49785578", "0.49729684", "0.4970825", "0.49560818", "0.49466684" ]
0.6295193
1
This view returns the list of batches associated to a use case which have english language
def get_auto_anno_batch_list(request): json_resp = {} usecase = request.GET.get('usecase') # print(usecase) use_obj = UseCase.objects.get(name=usecase) json_resp['batch_list'] = [] languages = ['English','english'] batch = Report.objects.filter(name=use_obj,language__in = languages).exclude(institute = 'PUBMED').values('batch') for el in batch: if el['batch'] not in json_resp['batch_list']: json_resp['batch_list'].append( el['batch']) # print(json_resp['batch_list']) json_resp['batch_list'] = sorted(json_resp['batch_list']) # print(json_resp) return JsonResponse(json_resp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_auto_anno_PUBMED_batch_list(request):\n\n json_resp = {}\n usecase = request.GET.get('usecase')\n # print(usecase)\n languages = ['English', 'english']\n use_obj = UseCase.objects.get(name=usecase)\n json_resp['batch_list'] = []\n batch = Report.objects.filter(name=use_obj,language__in = languages,institute = 'PUBMED').values('batch')\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)", "def get_batch_list(request):\n\n\n json_resp = {}\n json_resp['batch_list'] = []\n\n usecase = request.GET.get('usecase',None)\n # print(usecase)\n if usecase is None:\n batch = Report.objects.all().exclude(institute='PUBMED').values('batch')\n else:\n use_obj = UseCase.objects.get(name=usecase)\n batch = Report.objects.filter(name=use_obj).exclude(institute = 'PUBMED').values('batch')\n\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)", "def book_language_list(request):\n languages = Language.objects.all().order_by('-name')\n return render(request, 'library/book_language_list.html', {\"languages\": languages, })", "def get_PUBMED_batch_list(request):\n\n json_resp = {}\n usecase = request.GET.get('usecase')\n # print(usecase)\n use_obj = UseCase.objects.get(name=usecase)\n json_resp['batch_list'] = []\n batch = Report.objects.filter(name=use_obj,institute = 'PUBMED').values('batch')\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)", "def get_all(self, lang: str = None):\n pass", "def index(self):\n try:\n query = h.eagerload_morpheme_language_model(Session.query(MorphemeLanguageModel))\n query = h.add_order_by(query, dict(request.GET), self.query_builder)\n return h.add_pagination(query, dict(request.GET))\n except Invalid, e:\n response.status_int = 400\n return {'errors': e.unpack_errors()}", "def report_missing_auto(request):\n\n usecases = UseCase.objects.all()\n json_resp = {}\n languages = ['english', 'English']\n for el in usecases:\n use = el.name\n batches = []\n batches.append('all')\n if use.lower() in ['colon','lung','uterine cervix']:\n report = Report.objects.filter(name=el,language__in=languages).exclude(institute = 'PUBMED')\n count_rep = report.count()\n for rp in report:\n if rp.batch not in batches:\n batches.append(rp.batch)\n # print(el)\n # print(count_rep)\n\n if count_rep > 0:\n json_resp[use] = {}\n for batch in batches:\n batch = str(batch)\n json_resp[use][batch] = {}\n if batch == 'all':\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n else:\n report_count = Report.objects.filter(name=el,batch = batch,language__in=languages).exclude(institute = 'PUBMED').count()\n json_resp[use][batch]['tot'] = report_count\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and batch = %s and r.language in %s;\",\n [str(use), 'Robot_user', 'labels', 'PUBMED',batch, tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n # print(json_resp)\n return JsonResponse(json_resp)", "def get_report_translations(request):\n\n id_report = request.GET.get('id_report',None)\n if id_report is not None:\n languages = []\n lang = Report.objects.filter(id_report = id_report)\n for el in lang:\n if el.language not in languages:\n languages.append(el.language)\n\n json_resp = {}\n # print(languages)\n json_resp['languages'] = languages\n return JsonResponse(json_resp)", "def test_lang_subset_unlikely_language(en_multilingual):\n sentences = [\"你好\" * 200]\n docs = [Document([], text=text) for text in sentences]\n en_multilingual(docs)\n assert [doc.lang for doc in docs] == [\"en\"]\n\n processor = en_multilingual.processors['langid']\n model = processor._model\n text_tensor = processor._text_to_tensor(sentences)\n en_idx = model.tag_to_idx['en']\n predictions = model(text_tensor)\n assert predictions[0, en_idx] < 0, \"If this test fails, then regardless of how unlikely it was, the model is predicting the input string is possibly English. Update the test by picking a different combination of languages & input\"", "def data_en(request):\n files = myFile.objects.order_by('name')\n context = {'files' : files}\n return render(request, 'sacms/data_en.html', context)", "def collect_english_cats(self):\n tf.logging.info('collecting english categories')\n self.english_cats = list(\n self.frames(filter_english=True, filter_category=True))", "async def get_multilingual(filename: str):\n query_result = {\"langList\": []}\n database = get_db()\n query_displayname = database.AQLQuery(\n query=main_queries.QUERY_MULTILINGUAL_LANGS,\n bindVars={\n \"filename\": filename\n },\n rawResults=True\n )\n query_result = {\"langList\": query_displayname.result[0]}\n return query_result", "def lm_train(data_dir, language, fn_LM):\r\n\r\n # TODO: Implement Function\r\n\r\n language_model, unigram, bigram = {}, {}, {}\r\n CKP = \"WEAREDELETINGEND\"\r\n pre_w = CKP\r\n for root, dirs, files in os.walk(data_dir, topdown=False):\r\n for name in files:\r\n if name.endswith(language):\r\n #print(\"reading \", name)\r\n filepath = os.path.join(data_dir, name)\r\n readingfile = open(filepath, \"r\")\r\n for line in readingfile:\r\n processed = preprocess(line, language)\r\n if len(processed) != 0:\r\n tokenList = processed.split()\r\n for w in tokenList:\r\n # ======================\r\n # for unigram structure\r\n # ======================\r\n # not exist yet, initialize it at count 1\r\n if w not in unigram.keys():\r\n unigram[w] = 1\r\n else:\r\n unigram[w] += 1\r\n\r\n # ======================\r\n # for bigram structure\r\n # ======================\r\n if pre_w not in bigram.keys():\r\n bigram[pre_w] = {} # building the first words level\r\n bigram[pre_w][w] = 1\r\n else:\r\n if w not in bigram[pre_w].keys():\r\n bigram[pre_w][w] = 1\r\n else:\r\n bigram[pre_w][w] += 1\r\n pre_w = w\r\n pre_w = CKP\r\n\r\n\r\n language_model[\"uni\"] = unigram\r\n bigram.pop(CKP)\r\n bigram.pop(\"SENTEND\")\r\n language_model[\"bi\"] = bigram\r\n\r\n #Save Model\r\n with open(fn_LM+'.pickle', 'wb') as handle:\r\n pickle.dump(language_model, handle, protocol=pickle.HIGHEST_PROTOCOL)\r\n\r\n return language_model", "def view_batches(request):\n\n template = 'batch_list.html'\n\n context = {\n 'invalid_due_date': request.GET.get('invalid_due_date')\n }\n\n try:\n get_batches(request, context)\n except Exception as e:\n context['error'] = '{} {}'.format(e, traceback.format_exc())\n\n # TODO: GO PAF - Start\n context['go_funder1'] = go_funder.objects.get(funder_code='1')\n context['go_funder2'] = go_funder.objects.get(funder_code='2')\n context['go_funder3'] = go_funder.objects.get(funder_code='3')\n context['go_funder4'] = go_funder.objects.get(funder_code='4')\n context['go_funder5'] = go_funder.objects.get(funder_code='5')\n # TODO: GO PAF - End\n\n return render(request, template, context)", "def fetchTranslation(self, language):\n pass", "def showTranslatedWithoutJoin(cls):\n print (\"ALL WORDS WITH TRANSLATIONS STORED IN DATABASE:\")\n for word1 in EnglishHelper.query(\"SELECT english_word FROM EnglishWords\", fetchAll=True):\n try:\n print word1[0],\" - \", (EnglishHelper.query(\"select polish_word from PolishWords where \"\n \" id_pl=(select id_pl from translations where \"\n \"id_eng = (select id_eng from EnglishWords \"\n \"where english_word = '%s'))\"%word1))[0].encode('utf-8')\n except:\n print \"There is no translation, sorry :(\"", "def snippets_by_language(request, slug):\n language = get_object_or_404(Language, slug__exact=slug)\n return list_detail.object_list(request,\n queryset=Snippet.objects.get_by_language(slug),\n extra_context={ 'object': language },\n template_name='cab/language_detail.html',\n **base_generic_dict)", "def list_straten_adapter(obj, request):\n naam = obj.label\n for name, language in obj.namen:\n if language == 'nl' and name:\n naam = name\n break\n return {\n 'id': obj.id,\n 'label': obj.label,\n 'naam': naam,\n 'status': {\n 'id': obj.status.id,\n 'naam': obj.status.naam,\n 'definitie': obj.status.definitie\n },\n }", "def get_langs(id):", "def _load_word_embedding(self, lang):\n dict_fold = 'train' # which fold of the data will be used to produce results\n if self.args.task == 'conneau' or self.args.task == 'xling':\n data_dir = os.path.join(self.args.data_dir, 'MUSE')\n lang_path = os.path.join(data_dir, 'wiki.' + lang + '.vec')\n elif self.args.task == 'dinu':\n data_dir = os.path.join(self.args.data_dir, 'dinu')\n lang_path = os.path.join(data_dir, 'embeddings', lang + '.emb.txt')\n elif self.args.task == 'zhang':\n order = [lang,trg]\n if lang == 'en':\n order = order[::-1]\n data_dir = os.path.join(self.args.home_dir,'pkg/UBiLexAT/data/','-'.join(order))\n lang_path = os.path.join(data_dir, 'word2vec.' + lang)\n\n langfile = open(lang_path, encoding=self.args.encoding, errors='surrogateescape')\n words, xs = embeddings.read(langfile, self.args.maxs)\n langfile.close()\n # Build word to index map\n word2ind = {word: i for i, word in enumerate(words)}\n\n return xs, words, word2ind", "def test_list_source_language(self):\n\n # check if documentalist has access to the list view\n self.login_documentalist()\n response = self.client.get('/languages/' )\n\n # 403 = unauthorized\n self.assertEqual(response.status_code, 403)\n\n self.client.logout()\n self.login_admin()\n\n response = self.client.get('/languages/')\n self.assertContains(response, \"português\")", "def pubmed_missing_auto(request):\n\n usecases = UseCase.objects.all()\n json_resp = {}\n json_resp['annotated'] = 0\n json_resp['tot'] = 0\n json_resp['usecase'] = []\n languages = ['English','english']\n for el in usecases:\n use = el.name\n json_resp[use] = {}\n batches = []\n batches.append('all')\n if use.lower() in ['colon','lung','uterine cervix']:\n report = Report.objects.filter(name=el,language__in=languages,institute = 'PUBMED')\n for el in report:\n if el.batch not in batches:\n batches.append(el.batch)\n count_rep = report.count()\n\n if count_rep > 0:\n json_resp['usecase'].append(str(use))\n json_resp['tot'] = json_resp['tot'] + count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n json_resp['annotated'] = json_resp['annotated'] + groundTruths\n\n for batch in batches:\n\n json_resp[use][batch] = {}\n if batch == 'all' or batch is None:\n report = Report.objects.filter(name=use,language__in=languages, institute='PUBMED')\n count_rep = report.count()\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n else:\n report = Report.objects.filter(name=use,language__in=languages, institute='PUBMED',batch = batch)\n count_rep = report.count()\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and batch = %s and r.language in %s;\",\n [str(use), 'Robot_user', 'labels', 'PUBMED',batch,tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n # print('risposta',json_resp)\n return JsonResponse(json_resp)", "def get_language_features(self, queries, gram):\n queries_dim = queries.dim()\n\n if queries_dim==3:\n N = queries.size(0)\n M = queries.size(1)\n num_words = self.num_words[gram]\n queries = queries.view(-1, num_words) # resize (N,M,k) -> (N*M,k)\n\n language_feats = self.language_nets[self.gram_id[gram]](queries)\n\n if queries_dim==3:\n language_feats = language_feats.view(N, M, -1)\n\n return language_feats", "def translate(self, language=None):", "def language_text_sources(lang):\n return [\n DATA + \"/tokenized/{source}/{lang}.txt\".format(source=source, lang=lang)\n for source in LANGUAGE_SOURCES[lang]\n if source in FULL_TEXT_SOURCES\n ]", "def annotationlabel(request,action=None):\n\n username = request.session['username']\n mode1 = request.session['mode']\n auto_required = request.GET.get('ns_id', None)\n mode = NameSpace.objects.get(ns_id=mode1)\n\n # print('mode',mode1)\n usecase = request.session['usecase']\n # language = request.GET.get('language',request.session['language'])\n type = 'labels'\n\n if request.method == 'GET' and action.lower() == 'user_labels':\n\n \"\"\"GET request: given the report, the labels annotated by the user are returned\"\"\"\n\n language = request.GET.get('language', request.session['language'])\n user_get = request.GET.get('username',username)\n report_id = request.GET.get('report_id')\n report1 = Report.objects.get(id_report = report_id,language = language)\n # if auto_required == 'Robot':\n # mode = NameSpace.objects.get(ns_id=auto_required)\n if auto_required is not None:\n mode_1 = NameSpace.objects.get(ns_id=auto_required)\n else:\n mode_1 = mode\n json_dict = get_user_gt(user_get,mode_1,report1,language,'labels')\n return JsonResponse(json_dict,safe=False)\n\n elif request.method == 'GET' and action.lower() == 'all_labels':\n\n \"\"\" GET request: given the use case, all the labels associated to that usecase are returned. \"\"\"\n\n labels = AnnotationLabel.objects.filter(name=usecase).values('seq_number','label','annotation_mode')\n print(labels)\n json_dict = {}\n if len(labels) > 0:\n\n if mode1 == 'Human' or auto_required == 'Human':\n json_dict['labels'] = []\n for el in labels:\n json_val = {}\n if 'Manual' in el['annotation_mode']:\n # if int(el['seq_number']) > count: # i primi 20 sono inseriti automaticamente\n json_val['label'] = (el['label'])\n json_val['seq_number'] = (el['seq_number'])\n json_dict['labels'].append(json_val)\n if mode1 == 'Robot' or auto_required == 'Robot':\n json_dict['labels'] = []\n for el in labels:\n json_val = {}\n if 'Automatic' in el['annotation_mode']:\n json_val['label'] = (el['label'])\n json_val['seq_number'] = (el['seq_number'])\n json_dict['labels'].append(json_val)\n\n else:\n json_dict['labels'] = []\n\n json_dict['labels'] = sorted(json_dict['labels'], key=lambda json: json['seq_number'])\n print(json_dict)\n return JsonResponse(json_dict)\n\n elif request.method == 'POST' and action.lower() == 'delete':\n\n \"\"\"PSOT request: given the report, the labels the user annotated are removed together with the\n associated groundtruth.\"\"\"\n\n request_body_json = json.loads(request.body)\n report_id = request_body_json['report_id']\n user = User.objects.get(username=username,ns_id=mode)\n language = request.GET.get('language', request.session['language'])\n report1 = Report.objects.get(id_report=report_id,language = language)\n if user is None or report1 is None:\n json_response = {'error': 'An error occurred getting parameters.'}\n return json_response\n to_del = Associate.objects.filter(username=user, ns_id=mode, id_report=report1, language=language)\n if mode1 == 'Human':\n try:\n with transaction.atomic():\n\n if to_del.exists():\n json_response = delete_all_annotation(to_del, user, report1,language, type,mode)\n\n else:\n json_response = {'msg':'nothing to do'}\n\n except Exception as error:\n print(error)\n json_response = {'error': 'An error occurred saving the ground_truth and the labels'}\n return JsonResponse(json_response)\n else:\n return JsonResponse(json_response)\n else:\n json_response = restore_robot_annotation(report1, 'labels', user)\n return JsonResponse(json_response)\n\n\n if request.method == 'POST' and action.lower() == 'insert':\n\n \"\"\"PSOT request: given the report, the labels the user annotated are added in the database and a new \n JSON groundtruth is created. \"\"\"\n\n request_body_json = json.loads(request.body)\n report_id = request_body_json['report_id']\n user = User.objects.get(username=username,ns_id=mode)\n language = request.GET.get('language', request.session['language'])\n report1 = Report.objects.get(id_report=report_id,language = language)\n\n if user is None or report1 is None:\n json_response = {'error': 'An error occurred getting the parameters.'}\n return JsonResponse(json_response)\n\n labels_to_save = request_body_json['labels']\n # In this case the user manually deletes all the labels (NOT WITH CLEAR BUTTON) and saves.\n if len(labels_to_save) == 0 and mode1 == 'Human':\n\n \"\"\"If there are not labels to save, if there is a ground truth saved in the database, this is removed,\n otherwise no action is performed. \"\"\"\n\n rows = Associate.objects.filter(username = user,ns_id=mode, id_report = report1, language = language)\n if rows.exists():\n try:\n with transaction.atomic():\n json_response = delete_all_annotation(rows,user,report1,language,type,mode)\n\n except Exception as error:\n print(error)\n json_response = {'error': 'An error occurred.'}\n return JsonResponse(json_response, status=500)\n else:\n return JsonResponse(json_response)\n else:\n json_response = {'message': 'Nothing to save.'}\n return JsonResponse(json_response)\n\n if len(labels_to_save) == 0 and mode1 == 'Robot':\n\n \"\"\" If there are not labels to save and the name space is Robot no action is performed and the already \n existing ground-truth is kept \"\"\"\n to_del = Associate.objects.filter(id_report=report1, language=language, username=user, ns_id=mode)\n # print('RESTORE')\n json_response = restore_robot_annotation(report1, 'labels',user)\n return JsonResponse(json_response)\n\n update = True\n\n \"\"\" Check if the user's labels she inserted are as many as the rows already present in the db: \n if they are not: update the annotation: the old annotation is replaced with the new one\n if they are: check if the labels existing are those inserted, in this case nothing is done, otherwise \n the current groundtruth is updated. \"\"\"\n\n existing_rows = Associate.objects.filter(username = user,ns_id=mode, id_report =report1,language =language)\n if existing_rows.exists():\n if existing_rows.count() == len(labels_to_save):\n for label in labels_to_save:\n label1 = AnnotationLabel.objects.get(name=usecase, label=label['label'], seq_number=label['seq_number'])\n if not Associate.objects.filter(username=user,ns_id=mode, seq_number=label1.seq_number, label=label1,\n id_report=report1, language=language).exists():\n update = True\n break\n else:\n update = False\n if update == True:\n try:\n with transaction.atomic():\n # Remove all the existing labels inserted by the user for that report. The existing ground truth is kept untile the deletion is successful\n to_del = Associate.objects.filter(username=user,ns_id=mode, id_report=report1,language = language)\n delete_all_annotation(to_del,user,report1,language,type,mode)\n\n json_resp_labels = update_annotation_labels(labels_to_save,usecase,user,report1,language,mode)\n\n jsonDict = serialize_gt(type, usecase, username, report_id,language,mode)\n GroundTruthLogFile.objects.create(username=user,ns_id=mode, id_report=report1, language = language,\n gt_json=jsonDict, gt_type=type,insertion_time=Now())\n\n except (Exception) as error:\n print(error)\n print('rolled back')\n json_response = {'error': 'An error occurred saving the ground_truth '\n 'and the labels, the transaction rolledback'}\n return JsonResponse(json_response)\n\n else:\n return JsonResponse(json_resp_labels)\n else:\n if mode1 == 'Human':\n if not GroundTruthLogFile.objects.filter(gt_type='labels', username=user, ns_id=mode, id_report=report1,\n language=language).exists():\n js = serialize_gt('labels', usecase, username, report1.id_report, language, mode)\n\n GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(), username=user,\n ns_id=mode, id_report=report1, language=language,\n gt_type='labels')\n\n ass = Associate.objects.filter(username=user, id_report=report1, language=language,\n ns_id=mode).values('label', 'seq_number')\n for el in ass:\n lab = AnnotationLabel.objects.get(label=el['label'], seq_number=el['seq_number'])\n Associate.objects.filter(username=user, ns_id=mode, label=lab, seq_number=lab.seq_number,\n id_report=report1, language=language).delete()\n Associate.objects.create(username=user, ns_id=mode, label=lab, seq_number=lab.seq_number,\n insertion_time=Now(), id_report=report1, language=language)\n\n json_response = {'message': 'ok'}\n else:\n json_response = {'message': 'no changes detected'}\n return JsonResponse(json_response)\n\n elif mode1 == 'Robot':\n\n \"\"\" In this section the name space Robot is handled: If the user is in the AUTOMATIC MODE and the labels\n she inserts are those annotated by the algorithm, this means that she agrees with the annotation of the \n Robot user. The annotation does not change, only the insertion time is changed.\"\"\"\n\n try:\n with transaction.atomic():\n # in questa sezione solo se la gt è uguale a prima, l'utente acconsente alla gt della macchina\n user_robot = User.objects.get(username='Robot_user', ns_id=mode)\n gt_robot = GroundTruthLogFile.objects.filter(username=user_robot, ns_id=mode,\n id_report=report1, language=language,\n gt_type='labels')\n\n gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report1,\n language=language,\n gt_type='labels')\n if gt_robot.count() == 1 and not gt.exists():\n # if gt_robot[0].insertion_time == gt[0].insertion_time:\n js = serialize_gt('labels', usecase, username, report1.id_report, language, mode)\n GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report1,\n language=language,\n gt_type='labels').delete()\n\n GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(), username=user,\n ns_id=mode, id_report=report1, language=language,\n gt_type='labels')\n\n ass = Associate.objects.filter(username=user, id_report=report1, language=language,\n ns_id=mode).values('label', 'seq_number')\n for el in ass:\n lab = AnnotationLabel.objects.get(label=el['label'], seq_number=el['seq_number'])\n Associate.objects.filter(username=user, ns_id=mode, label=lab,\n seq_number=lab.seq_number,\n id_report=report1, language=language).delete()\n Associate.objects.create(username=user, ns_id=mode, label=lab,\n seq_number=lab.seq_number,\n insertion_time=Now(), id_report=report1, language=language)\n\n except Exception as error:\n print(error)\n print('rolled back')\n json_response = {'error': 'An error occurred updating labels dates'}\n return JsonResponse(json_response)\n else:\n json_response = {'message': 'dates updated'}\n return JsonResponse(json_response)", "def handler_unbabel_translations():\n jobs = []\n for item in db.stories.find({}):\n for lang in [l[0] for l in UNBABEL_API_LANGUAGES if l[0] != 'en']:\n uid = item.get('unbabel_uid_{}'.format(lang), None)\n if uid:\n jobs.append(get_unbabel_translation.s(uid, lang))\n job = group(jobs)\n job.apply_async()\n return job", "def test_language_sensitivity(self): \n \n for lang in self.LANGUAGES:\n activate(lang)\n \n self.assertEqual(get_wording_text('test_1'), lang)", "def test_english_gb_no_translation(self):\n resp = ResponseFactory(\n locale=u'en-GB',\n description=u'hello',\n translated_description=u''\n )\n\n # No new jobs should be generated\n eq_(len(resp.generate_translation_jobs()), 0)\n\n # Re-fetch from the db and make sure the description was copied over\n resp = Response.objects.get(id=resp.id)\n eq_(resp.description, resp.translated_description)", "def translate(lang):\n\n\tlangfilename = os.path.join(\"data\", \"translations\", lang + \".json\")\n\tif os.path.exists(langfilename):\n\t\twith open(langfilename, 'r') as langfile:\n\t\t\ttranslations = json.loads(langfile.read())\n\telse:\n\t\ttranslations = {}\n\n\twith open(os.path.join(\"data\", \"translations\", \"message_list.json\"), \"r\") as message_list_file:\n\t\tmessages = json.loads(message_list_file.read())\n\n\tcnt = 0\n\tfor m in messages:\n\t\tcnt += 1\n\t\t#if cnt > 15: break\n\t\tif not translations.get(m):\n\t\t\tprint 'translating: ' + m\n\t\t\tresponse = requests.get(\"\"\"https://www.googleapis.com/language/translate/v2\"\"\",\n\t\t\t\tparams = {\n\t\t\t\t\t\"key\": conf.google_api_key,\n\t\t\t\t\t\"source\": \"en\",\n\t\t\t\t\t\"target\": lang,\n\t\t\t\t\t\"q\": m\n\t\t\t\t}, verify=False)\n\n\t\t\tt = response.json[\"data\"][\"translations\"][0][\"translatedText\"] or m\n\t\t\ttranslations[m] = t.encode('utf-8')\n\n\t\t\twith open(langfilename, 'w') as langfile:\n\t\t\t\tlangfile.write(json.dumps(translations, indent=1, sort_keys=True))" ]
[ "0.6487992", "0.5999556", "0.58158106", "0.57203585", "0.5676394", "0.56275505", "0.5532938", "0.5520498", "0.5510358", "0.55094916", "0.5498899", "0.54472136", "0.53090847", "0.5303731", "0.52797216", "0.5278192", "0.52427185", "0.5226427", "0.5222747", "0.5220931", "0.521236", "0.5207087", "0.5200088", "0.51822114", "0.5177833", "0.5167159", "0.5156491", "0.51536214", "0.5128627", "0.51126736" ]
0.6674985
0
This view returns the list of batches associated to a PUBMED use case
def get_PUBMED_batch_list(request): json_resp = {} usecase = request.GET.get('usecase') # print(usecase) use_obj = UseCase.objects.get(name=usecase) json_resp['batch_list'] = [] batch = Report.objects.filter(name=use_obj,institute = 'PUBMED').values('batch') for el in batch: if el['batch'] not in json_resp['batch_list']: json_resp['batch_list'].append( el['batch']) # print(json_resp['batch_list']) json_resp['batch_list'] = sorted(json_resp['batch_list']) # print(json_resp) return JsonResponse(json_resp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def view_batches(request):\n\n template = 'batch_list.html'\n\n context = {\n 'invalid_due_date': request.GET.get('invalid_due_date')\n }\n\n try:\n get_batches(request, context)\n except Exception as e:\n context['error'] = '{} {}'.format(e, traceback.format_exc())\n\n # TODO: GO PAF - Start\n context['go_funder1'] = go_funder.objects.get(funder_code='1')\n context['go_funder2'] = go_funder.objects.get(funder_code='2')\n context['go_funder3'] = go_funder.objects.get(funder_code='3')\n context['go_funder4'] = go_funder.objects.get(funder_code='4')\n context['go_funder5'] = go_funder.objects.get(funder_code='5')\n # TODO: GO PAF - End\n\n return render(request, template, context)", "def get_auto_anno_PUBMED_batch_list(request):\n\n json_resp = {}\n usecase = request.GET.get('usecase')\n # print(usecase)\n languages = ['English', 'english']\n use_obj = UseCase.objects.get(name=usecase)\n json_resp['batch_list'] = []\n batch = Report.objects.filter(name=use_obj,language__in = languages,institute = 'PUBMED').values('batch')\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)", "def batch(self):\n return self._client.batch()", "def get_batch_list(request):\n\n\n json_resp = {}\n json_resp['batch_list'] = []\n\n usecase = request.GET.get('usecase',None)\n # print(usecase)\n if usecase is None:\n batch = Report.objects.all().exclude(institute='PUBMED').values('batch')\n else:\n use_obj = UseCase.objects.get(name=usecase)\n batch = Report.objects.filter(name=use_obj).exclude(institute = 'PUBMED').values('batch')\n\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)", "def view_batches() -> str:\r\n tank_possibilities = [\"Albert\", \"Brigadier\", \"Camilla\", \"Dylon\", \"Emily\",\r\n \"Florence\", \"Gertrude\", \"Harry\", \"R2D2\",\r\n \"No Tank Needed\"]\r\n return render_template(\"view_batches.html\",\r\n batch_output=current_brewings,\r\n tank_options=tank_possibilities)", "def batches(self):\n return [self.get_batch(i) for i in range(self.num_batches)]", "def _get_batch(self):\n url = self._base_url + urlConfig.URLS['Project'] + '/' + self._project_id + '/batch'\n response = apiCall.get(self._get_token(), url,self._proxy, {}, 10)\n logging.debug(response)\n return response", "def retag_all_batches(apps, schema_editor):\n pass", "def get_batch(self):\n return self.batch", "def get_auto_anno_batch_list(request):\n\n json_resp = {}\n usecase = request.GET.get('usecase')\n # print(usecase)\n use_obj = UseCase.objects.get(name=usecase)\n json_resp['batch_list'] = []\n languages = ['English','english']\n batch = Report.objects.filter(name=use_obj,language__in = languages).exclude(institute = 'PUBMED').values('batch')\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)", "def batches_list(project='batch', n_batches=5):\n\tbatches_links = [(project, i+1, f\"Batch {i+1}\") for i in range(n_batches)]\n\treturn batches_links", "def produce_query_batches(self):\n pass", "def get_batches(auth, base_url='https://api.cratejoy.com/v1/'):\n \n batch_endpoint = '{}shipment_batches/'.format(base_url)\n\n resp = requests.get(\n batch_endpoint,\n auth=auth\n )\n\n print('GET request to {} responded with status '\n 'code: {}'.format(batch_endpoint,\n resp.status_code))\n print(resp.content)", "def batch_list(session):\n weigh_events_sq = batch_events_by_type(session, \"weigh\").subquery(\"weigh_events\")\n propagate_events_sq = batch_events_by_type(session, \"propagate\").subquery(\n \"propagate_events\"\n )\n transfer_events_sq = batch_events_by_type(session, \"transfer\").subquery(\n \"transfer_events\"\n )\n harvest_events_sq = batch_events_by_type(session, \"harvest\").subquery(\n \"harvest_events\"\n )\n harvest_sq = harvest_with_unit_yield(session).subquery(\"harvest_with_unit_yield\")\n locations_sq = locations_with_extras(session).subquery(\"locations\")\n\n query = (\n session.query(\n BatchClass.id.label(\"batch_id\"),\n BatchClass.tray_size,\n BatchClass.number_of_trays,\n CropTypeClass.id.label(\"crop_type_id\"),\n CropTypeClass.name.label(\"crop_type_name\"),\n weigh_events_sq.c.event_time.label(\"weigh_time\"),\n propagate_events_sq.c.event_time.label(\"propagate_time\"),\n transfer_events_sq.c.event_time.label(\"transfer_time\"),\n harvest_events_sq.c.event_time.label(\"harvest_time\"),\n transfer_events_sq.c.next_action_time.label(\"expected_harvest_time\"),\n locations_sq.c.id.label(\"location_id\"),\n locations_sq.c.zone,\n locations_sq.c.aisle,\n locations_sq.c.column,\n locations_sq.c.shelf,\n locations_sq.c.summary.label(\"location_summary\"),\n harvest_sq.c.yield_per_sqm,\n harvest_sq.c.crop_yield,\n harvest_sq.c.waste_disease,\n harvest_sq.c.waste_defect,\n harvest_sq.c.over_production,\n (harvest_events_sq.c.event_time - transfer_events_sq.c.event_time).label(\n \"grow_time\"\n ),\n case(\n [\n (harvest_events_sq.c.event_time != None, \"harvest\"),\n (transfer_events_sq.c.event_time != None, \"transfer\"),\n (propagate_events_sq.c.event_time != None, \"propagate\"),\n (weigh_events_sq.c.event_time != None, \"weigh\"),\n ],\n else_=None,\n ).label(\"last_event\"),\n )\n .join(CropTypeClass, CropTypeClass.id == BatchClass.crop_type_id)\n # We inner join on weigh_events, because if the batch doesn't have a weigh event\n # it doesn't really exist, but outer join on the others since they are optional.\n .join(weigh_events_sq, weigh_events_sq.c.batch_id == BatchClass.id)\n .outerjoin(propagate_events_sq, propagate_events_sq.c.batch_id == BatchClass.id)\n .outerjoin(transfer_events_sq, transfer_events_sq.c.batch_id == BatchClass.id)\n .outerjoin(harvest_events_sq, harvest_events_sq.c.batch_id == BatchClass.id)\n .outerjoin(locations_sq, locations_sq.c.id == transfer_events_sq.c.location_id)\n .outerjoin(harvest_sq, harvest_sq.c.batch_event_id == harvest_events_sq.c.id)\n )\n return query", "def list(self, request):\n urls = {\n 'msg': 'Must use bulk_by_sample to get SCCmec Primer hits',\n }\n\n return Response(urls)", "def pp_all_batches(self):\n for batch in self.batch_headers:\n self.pp_batch(batch)", "def bulk_by_sample(self, request):\n if request.method == 'POST':\n validator = validate_list_of_ids(request.data, max_query=500)\n if validator['has_errors']:\n return Response({\n \"message\": validator['message'],\n \"data\": request.data\n })\n else:\n return self.formatted_response(get_sccmec_proteins_by_sample(\n request.data['ids'],\n request.user.pk\n ))", "def produce_query_batches(self):\n self.__generate_queries()\n return self.__bobs", "def sample_batch(self) -> List:\n return self.buffer.sample(self.batch_size)", "def get_batch_runs(self):\n return self.batch_runs", "def batch(self):\n return self._batch", "def _message_batches(cls, records):\n # Dump the records to a list of minimal json\n records_json = [\n json.dumps(record, separators=(',', ':')) for record in records\n ]\n\n current_batch_size = 0\n current_batch = []\n for record in records_json:\n line_len = len(record)\n # Check if the max size of the batch has been reached or if the current\n # record will exceed the max batch size and start a new batch\n if ((len(current_batch) == cls.MAX_BATCH_COUNT) or\n (current_batch_size + line_len > cls.MAX_BATCH_SIZE)):\n yield current_batch[:]\n current_batch_size = 0\n del current_batch[:]\n\n if line_len > cls.MAX_BATCH_SIZE:\n LOGGER.error('Record too large (%d) to send to SQS:\\n%s', line_len, record)\n cls._log_failed(1)\n continue\n\n # Add the record to the batch\n current_batch_size += line_len\n current_batch.append(record)\n\n # yield the result of the last batch (no need to copy via slicing)\n if current_batch:\n yield current_batch", "def request_chunking(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"request_chunking\")", "def fetch_batch(self, phase):\n pass", "def batches(set_name):\n global num_batches, args, ds_sizes \n # num_batches = how many batches in each dataset(train, valid, test)\n # ds_sizes = dataset_sizes \n for b in range(num_batches[set_name]):\n bi = b * args.batch_size # one batch mul batch_size \n bj = (b + 1) * args.batch_size \n if b == num_batches[set_name] - 1:\n bj = ds_sizes[set_name] # maybe only remainer set\n yield bi, bj", "def get_events_batch() -> PayloadDictList:\n ...", "def get_chunks_result(self, data_keys: List[str], fetch_only: bool = False) -> List:", "def batches(self, batch_size, count):\n entries = self.entries()\n for _ in range(count):\n yield [next(entries) for _ in range(batch_size)]", "def get(self, request, work_batch_id):\n try:\n work_batch = WorkBatch.objects.get(pk=work_batch_id)\n except WorkBatch.DoesNotExist:\n raise ResourceDoesNotExist\n\n file_list = WorkBatchFile.objects.filter(\n work_batch=work_batch,\n ).select_related('file', 'dist').order_by('name')\n\n return self.paginate(\n request=request,\n queryset=file_list,\n order_by='name',\n paginator_cls=OffsetPaginator,\n on_results=lambda x: serialize(x, request.user),\n )", "def test_batch_accepting():\n client = create_client()\n message = types.PubsubMessage(data=b'foo')\n\n # At first, there are no batches, so this should return a new batch\n # which is also saved to the object.\n ante = len(client._batches)\n batch = client.batch('topic_name', message, autocommit=False)\n assert len(client._batches) == ante + 1\n assert batch is client._batches['topic_name']\n\n # A subsequent request should return the same batch.\n batch2 = client.batch('topic_name', message, autocommit=False)\n assert batch is batch2\n assert batch2 is client._batches['topic_name']" ]
[ "0.6342829", "0.6229871", "0.59056467", "0.58580834", "0.5704401", "0.5654031", "0.5634934", "0.5585", "0.5473519", "0.53536516", "0.53245217", "0.53240734", "0.52888346", "0.5250839", "0.52369195", "0.5228739", "0.52019703", "0.5178603", "0.5165569", "0.5162922", "0.51555914", "0.5149599", "0.5143663", "0.5126864", "0.5030711", "0.49995616", "0.498898", "0.4967965", "0.4949232", "0.4944052" ]
0.6636962
0
This view returns the list of batches associated to a PUBMED use case in english language
def get_auto_anno_PUBMED_batch_list(request): json_resp = {} usecase = request.GET.get('usecase') # print(usecase) languages = ['English', 'english'] use_obj = UseCase.objects.get(name=usecase) json_resp['batch_list'] = [] batch = Report.objects.filter(name=use_obj,language__in = languages,institute = 'PUBMED').values('batch') for el in batch: if el['batch'] not in json_resp['batch_list']: json_resp['batch_list'].append( el['batch']) # print(json_resp['batch_list']) json_resp['batch_list'] = sorted(json_resp['batch_list']) # print(json_resp) return JsonResponse(json_resp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_PUBMED_batch_list(request):\n\n json_resp = {}\n usecase = request.GET.get('usecase')\n # print(usecase)\n use_obj = UseCase.objects.get(name=usecase)\n json_resp['batch_list'] = []\n batch = Report.objects.filter(name=use_obj,institute = 'PUBMED').values('batch')\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)", "def get_auto_anno_batch_list(request):\n\n json_resp = {}\n usecase = request.GET.get('usecase')\n # print(usecase)\n use_obj = UseCase.objects.get(name=usecase)\n json_resp['batch_list'] = []\n languages = ['English','english']\n batch = Report.objects.filter(name=use_obj,language__in = languages).exclude(institute = 'PUBMED').values('batch')\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)", "def view_batches(request):\n\n template = 'batch_list.html'\n\n context = {\n 'invalid_due_date': request.GET.get('invalid_due_date')\n }\n\n try:\n get_batches(request, context)\n except Exception as e:\n context['error'] = '{} {}'.format(e, traceback.format_exc())\n\n # TODO: GO PAF - Start\n context['go_funder1'] = go_funder.objects.get(funder_code='1')\n context['go_funder2'] = go_funder.objects.get(funder_code='2')\n context['go_funder3'] = go_funder.objects.get(funder_code='3')\n context['go_funder4'] = go_funder.objects.get(funder_code='4')\n context['go_funder5'] = go_funder.objects.get(funder_code='5')\n # TODO: GO PAF - End\n\n return render(request, template, context)", "def get_batch_list(request):\n\n\n json_resp = {}\n json_resp['batch_list'] = []\n\n usecase = request.GET.get('usecase',None)\n # print(usecase)\n if usecase is None:\n batch = Report.objects.all().exclude(institute='PUBMED').values('batch')\n else:\n use_obj = UseCase.objects.get(name=usecase)\n batch = Report.objects.filter(name=use_obj).exclude(institute = 'PUBMED').values('batch')\n\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)", "def view_batches() -> str:\r\n tank_possibilities = [\"Albert\", \"Brigadier\", \"Camilla\", \"Dylon\", \"Emily\",\r\n \"Florence\", \"Gertrude\", \"Harry\", \"R2D2\",\r\n \"No Tank Needed\"]\r\n return render_template(\"view_batches.html\",\r\n batch_output=current_brewings,\r\n tank_options=tank_possibilities)", "def list(self, request):\n urls = {\n 'msg': 'Must use bulk_by_sample to get SCCmec Primer hits',\n }\n\n return Response(urls)", "def pubmed_missing_auto(request):\n\n usecases = UseCase.objects.all()\n json_resp = {}\n json_resp['annotated'] = 0\n json_resp['tot'] = 0\n json_resp['usecase'] = []\n languages = ['English','english']\n for el in usecases:\n use = el.name\n json_resp[use] = {}\n batches = []\n batches.append('all')\n if use.lower() in ['colon','lung','uterine cervix']:\n report = Report.objects.filter(name=el,language__in=languages,institute = 'PUBMED')\n for el in report:\n if el.batch not in batches:\n batches.append(el.batch)\n count_rep = report.count()\n\n if count_rep > 0:\n json_resp['usecase'].append(str(use))\n json_resp['tot'] = json_resp['tot'] + count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n json_resp['annotated'] = json_resp['annotated'] + groundTruths\n\n for batch in batches:\n\n json_resp[use][batch] = {}\n if batch == 'all' or batch is None:\n report = Report.objects.filter(name=use,language__in=languages, institute='PUBMED')\n count_rep = report.count()\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n else:\n report = Report.objects.filter(name=use,language__in=languages, institute='PUBMED',batch = batch)\n count_rep = report.count()\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and batch = %s and r.language in %s;\",\n [str(use), 'Robot_user', 'labels', 'PUBMED',batch,tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n # print('risposta',json_resp)\n return JsonResponse(json_resp)", "def _get_batch(self):\n url = self._base_url + urlConfig.URLS['Project'] + '/' + self._project_id + '/batch'\n response = apiCall.get(self._get_token(), url,self._proxy, {}, 10)\n logging.debug(response)\n return response", "def data_en(request):\n files = myFile.objects.order_by('name')\n context = {'files' : files}\n return render(request, 'sacms/data_en.html', context)", "def _batch_thm_embedding(self, thms: List[Text]) -> List[THM_EMB_TYPE]:\n # The checkpoint should have exactly one value in this collection.\n thms = self._thm_string_for_predictions(thms)\n embeddings = self._sess.run(\n fetches=self._graph.get_collection('thm_net'),\n feed_dict={self._graph.get_collection('thm_string')[0]: thms})[0]\n return embeddings", "def get_partlist(self):\n\n # Longdale check\n longdale_user = user_is_ingroup(self.request, \"longdale_user\")\n # REtrieve the correct queryset, sorted on the correct levels\n if longdale_user:\n qs = [prt for prt in Part.objects.all().order_by('corpus__lng', 'corpus__name', 'name')]\n else:\n longdale = \"Longdale\"\n qs = [prt for prt in Part.objects.exclude(Q(name__istartswith=longdale)).order_by('corpus__lng', 'corpus__name', 'name')]\n # REtrieve the correct queryset, sorted on the correct levels\n # qs = [prt for prt in Part.objects.all().order_by('corpus__lng', 'corpus__name', 'name')]\n # Start the output\n html = []\n # Initialize the variables whose changes are important\n lVars = [\"corpus_lng\", \"corpus_name\", \"name\"]\n lFuns = [Part.language, [\"corpus\", \"name\"], [\"name\"]]\n # Get a list of items containing 'first' and 'last' information\n lItem = get_item_list(lVars, lFuns, qs)\n # REturn this list\n return lItem", "def show_messages():\n\n messages = Message.query.all()\n # translation_list = [\"\"]\n\n for message in messages:\n # message.translation gives list of objects. All the translation for the \n # language. Here assgin it to one trans_text based on user's language\n # selection. \n message.translation = Translation.query.filter_by(language=g.user.language, \n message_id=message.message_id).first()\n\n return render_template(\"messages.html\", messages=messages, user=g.user)", "def list_messages(self):", "def book_language_list(request):\n languages = Language.objects.all().order_by('-name')\n return render(request, 'library/book_language_list.html', {\"languages\": languages, })", "def word_sets_list(request):\n if request.method == 'POST':\n word_sets = request.user.wordset_set.filter(pk__in=request.POST.getlist('word_set'))\n if request.POST.get('submit_action') == 'delete':\n word_sets.delete()\n elif request.POST.get('submit_action') == 'merge':\n target = word_sets[0]\n target.merge(word_sets[1:])\n elif request.POST.get('submit_action') == 'download_txt':\n file_content = generate_txt_file(word_sets)\n response = HttpResponse(file_content, content_type='text/plain; charset=utf-8')\n response['Content-Disposition'] = 'attachment; filename=\"words.txt\"'\n return response\n elif request.POST.get('submit_action') == 'download_email':\n file_content = generate_txt_file(word_sets)\n email = EmailMessage('Zestawy słówek', 'Plik znajduje się w załączniku',\n EMAIL_HOST_USER, [request.user.email],\n attachments=[('words.txt', file_content,\n 'text/plain; charset=utf-8')])\n email.send()\n return redirect(word_sets_list)\n\n word_sets = request.user.wordset_set.all().order_by('-pub_date')\n return render(request, 'word_sets_list.html', {'word_sets': word_sets})", "def fetch_batch(self, phase):\n pass", "def retag_all_batches(apps, schema_editor):\n pass", "def batch(self):\n return self._client.batch()", "def index(self):\n try:\n query = h.eagerload_morpheme_language_model(Session.query(MorphemeLanguageModel))\n query = h.add_order_by(query, dict(request.GET), self.query_builder)\n return h.add_pagination(query, dict(request.GET))\n except Invalid, e:\n response.status_int = 400\n return {'errors': e.unpack_errors()}", "def translations_max_hops(request):\n list = []\n translations_max_hops = StaticMicrotask.objects.all().filter(scoring_done=True).order_by('hop_count')[:5]\n \n for i in translations_max_hops:\n dict = {}\n dict['username'] = i.user\n dict['original_sentence'] = i.original_sentence\n dict['translated_sentence'] = i.translated_sentence\n dict['hops'] = i.hop_count\n list.append(dict)\n \n data = {'translations_max_hops':list}\n return render_to_response('my_admin_tools/menu/translations_max_hops.html',data,context_instance=RequestContext(request))", "def loadbatch():\n s=\"select * from tblbatch where status='1'\"\n c.execute(s)\n data=c.fetchall()\n return data", "def fetch_messages(self, label, size, startwith=0):\n messages = Room.objects.get(label=label).messages.order_by('-created')[startwith:size]\n messages = MessageSerializers(messages, many=True)\n return messages.data", "def report_missing_auto(request):\n\n usecases = UseCase.objects.all()\n json_resp = {}\n languages = ['english', 'English']\n for el in usecases:\n use = el.name\n batches = []\n batches.append('all')\n if use.lower() in ['colon','lung','uterine cervix']:\n report = Report.objects.filter(name=el,language__in=languages).exclude(institute = 'PUBMED')\n count_rep = report.count()\n for rp in report:\n if rp.batch not in batches:\n batches.append(rp.batch)\n # print(el)\n # print(count_rep)\n\n if count_rep > 0:\n json_resp[use] = {}\n for batch in batches:\n batch = str(batch)\n json_resp[use][batch] = {}\n if batch == 'all':\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n else:\n report_count = Report.objects.filter(name=el,batch = batch,language__in=languages).exclude(institute = 'PUBMED').count()\n json_resp[use][batch]['tot'] = report_count\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and batch = %s and r.language in %s;\",\n [str(use), 'Robot_user', 'labels', 'PUBMED',batch, tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n # print(json_resp)\n return JsonResponse(json_resp)", "def listing_messages(listing_id):\n Listing.query.get_or_404(listing_id)\n\n auth_username = get_jwt_identity()\n all_messages = Message.find_by_listing(listing_id, auth_username)\n print(\"ALL MESSAGES: \", all_messages)\n serialized = [message.serialize() for message in all_messages]\n return (jsonify(messages=serialized), 200)", "def batches_list(project='batch', n_batches=5):\n\tbatches_links = [(project, i+1, f\"Batch {i+1}\") for i in range(n_batches)]\n\treturn batches_links", "def handler_unbabel_translations():\n jobs = []\n for item in db.stories.find({}):\n for lang in [l[0] for l in UNBABEL_API_LANGUAGES if l[0] != 'en']:\n uid = item.get('unbabel_uid_{}'.format(lang), None)\n if uid:\n jobs.append(get_unbabel_translation.s(uid, lang))\n job = group(jobs)\n job.apply_async()\n return job", "def get_batch(self):\n return self.batch", "def list(request):\n files = PoFile.objects.all()\n return render_to_response('poeditor/list.html', {\n 'files' : files,\n }, context_instance=RequestContext(request))", "def preprocess(self, requests):\r\n input_batch = None\r\n for idx, data in enumerate(requests):\r\n text = data.get(\"data\")\r\n if text is None:\r\n text = data.get(\"body\")\r\n input_text = text.decode('utf-8')\r\n\r\n ################input处理\r\n question = input_text\r\n entity = self.NER(question)\r\n print('your question:{}\\nentity:{}'.format(question,entity))\r\n ################处理完毕\r\n return [entity]", "def list(self, jobguid=\"\", executionparams=None):" ]
[ "0.65641147", "0.6093252", "0.59606016", "0.5839453", "0.55629605", "0.5310953", "0.5296694", "0.51423275", "0.5020829", "0.49711797", "0.49675223", "0.496288", "0.49492076", "0.48867327", "0.48834974", "0.48540273", "0.48291108", "0.4824442", "0.48152086", "0.48104975", "0.4788306", "0.47848827", "0.47738683", "0.47413623", "0.4741174", "0.4738398", "0.47152135", "0.47133538", "0.4711155", "0.47021148" ]
0.6816767
0
This view handles the download of one or more reports' groundtruths (including the GT majority vote based.
def download_all_reports(request): request_body_json = json.loads(request.body) report_list = request_body_json['report_list'] mode = request_body_json['format'] action = request_body_json['action'] annot = request_body_json['annotation_mode'] if annot == 'Manual': annot = 'Human' elif annot == 'Automatic': annot = 'Robot' try: response = HttpResponse(content_type='text/csv') resp = download_report_gt(report_list, action, annot, mode, response) if mode == 'biocxml' or mode == 'biocjson': return HttpResponse(resp, content_type='application/xml') elif mode == 'csv': return resp elif mode == 'json': return JsonResponse(resp) except Exception as e: print(e) json_error = {'error': e} return JsonResponse(json_error)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_all_ground_truths(request):\n\n json_resp = {}\n json_resp['ground_truth'] = []\n cursor = connection.cursor()\n mode = request.GET.get('gt_mode',None)\n if mode is None:\n human = NameSpace.objects.get(ns_id = 'Human')\n robot = NameSpace.objects.get(ns_id = 'Robot')\n gt_human = GroundTruthLogFile.objects.filter(ns_id = human)\n agent = User.objects.get(ns_id = robot,username = 'Robot_user')\n gt_robot = GroundTruthLogFile.objects.filter(ns_id = robot,username = agent)\n for el in gt_human:\n gt_json = el.gt_json\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n for el in gt_robot:\n gt_json = el.gt_json\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n cursor.execute(\"SELECT gt_json FROM ground_truth_log_file WHERE ns_id = %s AND username != %s\",['Robot','Robot_user'])\n ans = cursor.fetchall()\n for el in ans:\n gt_json = json.loads(el[0])\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n\n elif mode.lower() == 'automatic':\n cursor.execute(\n \"SELECT gt_json FROM ground_truth_log_file WHERE ns_id = %s AND username != %s\",\n ['Robot', 'Robot_user'])\n\n #CAMBIO\n # cursor.execute(\n # \"SELECT g.gt_json FROM ground_truth_log_file AS g INNER JOIN ground_truth_log_file AS gg ON g.id_report = gg.id_report AND g.language = gg.language AND g.gt_type = gg.gt_type AND g.id_report = gg.id_report AND g.ns_id = gg.ns_id WHERE g.ns_id = %s AND g.username != %s AND gg.username = %s AND g.insertion_time != gg.insertion_time\",\n # ['Robot', 'Robot_user', 'Robot_user'])\n ans = cursor.fetchall()\n for el in ans:\n gt_json = json.loads(el[0])\n\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n\n return JsonResponse(json_resp)", "def download_ground_truths(request):\n\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n path1 = os.path.join(workpath, './static/temp/temp.csv')\n path2 = os.path.join(workpath, './static/BioC/temp_files/to_download.csv')\n if os.path.exists(path1):\n os.remove(path1)\n if os.path.exists(path2):\n os.remove(path2)\n username = request.session['username']\n inst = request.GET.get('institute',None)\n if inst == '':\n inst = None\n else:\n inst = str(inst)\n use = request.GET.get('usec',None)\n if use == '':\n use = None\n else:\n use = str(use)\n report_type = request.GET.get('report_type',None)\n if report_type == '':\n report_type = None\n annotation_mode = request.GET.get('mode',None)\n if annotation_mode == '':\n annotation_mode = None\n lang = request.GET.get('lang',None)\n if lang == '':\n lang = None\n else:\n lang = str(lang)\n batch = request.GET.get('batch','') # added 22/10/2021\n if batch == '' or batch == 'all':\n batch = None\n else:\n batch = int(batch)\n\n all = request.GET.get('all_gt',None)\n action = request.GET.get('action',None)\n format = request.GET.get('format',None)\n json_resp = {}\n json_resp['ground_truth'] = []\n if format == 'json' or all =='all' :\n json_resp = create_json_to_download(report_type,action,username,use,annotation_mode,inst,lang,all,batch)\n return JsonResponse(json_resp)\n\n elif format == 'csv':\n response = HttpResponse(content_type='text/csv')\n resp = create_csv_to_download(report_type,annotation_mode,username,use,inst,lang,action,response,batch)\n return resp\n\n elif format == 'biocxml':\n json_keys_to_display = request.session['fields']\n json_keys_to_ann = request.session['fields_to_ann']\n if report_type == 'pubmed':\n json_keys_to_display = ['year','authors','volume','journal']\n json_keys_to_ann = ['title','abstract']\n json_keys = json_keys_to_display + json_keys_to_ann\n resp = generate_bioc(json_keys,json_keys_to_ann,username,action,lang,use,inst,'xml',annotation_mode,report_type,batch)\n return HttpResponse(resp,content_type='application/xml')\n\n elif format == 'biocjson':\n json_keys_to_display = request.session['fields']\n json_keys_to_ann = request.session['fields_to_ann']\n json_keys = json_keys_to_display + json_keys_to_ann\n resp = generate_bioc(json_keys,json_keys_to_ann,username,action,lang,use,inst,'json',annotation_mode,report_type,batch)\n return HttpResponse(resp,content_type='application/xml')", "def get_gt_list(request):\n\n groundTruths = 0\n json_resp = {}\n username =request.GET.get('username',None)\n ins = request.GET.get('inst',None)\n lang = request.GET.get('lang',None)\n use = request.GET.get('use',None)\n action = request.GET.get('action',None)\n token = request.GET.get('token',None)\n reptype = request.GET.get('reptype',None)\n languages = ['English','english']\n annotation_mode = request.GET.get('annotation_mode',None)\n if ins == '':\n ins = None\n if use == '':\n use = None\n if lang == '':\n lang = None\n if reptype == '':\n reptype = 'reports'\n if token == 'all':\n ns_robot = NameSpace.objects.get(ns_id='Robot')\n ns_human = NameSpace.objects.get(ns_id='Human')\n rob_user = User.objects.get(username='Robot_user',ns_id=ns_robot)\n list_gt = GroundTruthLogFile.objects.filter(username = rob_user).count() + GroundTruthLogFile.objects.filter(ns_id=ns_human).count()\n groundTruths = list_gt\n gt_rob = GroundTruthLogFile.objects.filter(ns_id=ns_robot,username = rob_user)\n\n i = 0\n # print(groundTruths)\n for el in gt_rob:\n gts = GroundTruthLogFile.objects.filter(ns_id=ns_robot,gt_type = el.gt_type,id_report = el.id_report_id,language = el.language).exclude(insertion_time = el.insertion_time)\n gts_count = gts.count()\n # print('count: '+str(i)+' '+str(gts.count()))\n i = i+1\n groundTruths = groundTruths + gts_count\n\n\n else:\n with connection.cursor() as cursor:\n if reptype == 'reports':\n if annotation_mode == 'Human':\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language WHERE r.institute = COALESCE(%s,r.institute) AND r.name = %s AND r.language = COALESCE(%s,r.language) AND g.gt_type = %s AND g.ns_id = %s and r.institute != %s AND username = COALESCE(%s,g.username)\",\n [ins, use, lang, action, 'Human','PUBMED',username])\n groundTruths = cursor.fetchone()[0]\n elif annotation_mode == 'Robot':\n # CAMBIO\n # cursor.execute(\n # \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language INNER JOIN ground_truth_log_file AS gg ON g.id_report = gg.id_report AND g.language = gg.language AND g.ns_id = gg.ns_id AND g.gt_type = gg.gt_type WHERE r.institute = COALESCE(%s,r.institute) AND r.name = %s AND r.language = COALESCE(%s,r.language) AND g.gt_type = %s AND g.ns_id = %s AND g.username != %s AND gg.username = %s AND g.insertion_time != gg.insertion_time AND r.institute != %s\",\n # [ins, use, lang, action, 'Robot', 'Robot_user', 'Robot_user','PUBMED'])\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language WHERE r.institute = COALESCE(%s,r.institute) AND r.name = %s AND r.language = COALESCE(%s,r.language) AND g.gt_type = %s AND g.ns_id = %s AND g.username != %s AND r.institute != %s AND username = COALESCE(%s,g.username)\",\n [ins, use, lang, action, 'Robot', 'Robot_user','PUBMED',username])\n groundTruths = cursor.fetchone()[0]\n else:\n if annotation_mode == 'Human':\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language WHERE r.name = %s AND r.language in %s AND g.gt_type = %s AND g.ns_id = %s and r.institute = %s AND username = COALESCE(%s,g.username)\",\n [use, tuple(languages), action, 'Human','PUBMED',username])\n groundTruths = cursor.fetchone()[0]\n elif annotation_mode == 'Robot':\n #CAMBIO\n # cursor.execute(\n # \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language INNER JOIN ground_truth_log_file AS gg ON g.id_report = gg.id_report AND g.language = gg.language AND g.ns_id = gg.ns_id AND g.gt_type = gg.gt_type WHERE r.name = %s AND r.language in %s AND g.gt_type = %s AND g.ns_id = %s AND g.username != %s AND gg.username = %s AND g.insertion_time != gg.insertion_time AND r.institute = %s\",\n # [use, tuple(languages), action, 'Robot', 'Robot_user', 'Robot_user','PUBMED'])\n # groundTruths = cursor.fetchone()[0]\n\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language WHERE r.name = %s AND r.language in %s AND g.gt_type = %s AND g.ns_id = %s AND g.username != %s AND r.institute = %s AND username = COALESCE(%s,g.username)\",\n [use, tuple(languages), action, 'Robot', 'Robot_user','PUBMED',username])\n groundTruths = cursor.fetchone()[0]\n\n\n\n\n json_resp['ground_truths'] = groundTruths\n # print(json_resp)\n return JsonResponse(json_resp)", "def get_reports_from_action(request):\n\n username = request.session['username']\n mode1 = request.session['mode']\n mode = NameSpace.objects.get(ns_id=mode1)\n language = request.session['language']\n report_to_ret = []\n action = request.GET.get('action',None)\n user = User.objects.get(username = username,ns_id=mode)\n gt = GroundTruthLogFile.objects.filter(username = user,ns_id=mode, language = language, gt_type = action).order_by('-insertion_time')\n if gt.exists():\n if mode1 == 'Human':\n for element in gt:\n val = (element.id_report_id,element.insertion_time.replace(tzinfo=timezone.utc).astimezone(tz=None))\n report_to_ret.append(val)\n\n elif mode1 == 'Robot':\n user_rob = User.objects.get(username = 'Robot_user',ns_id = mode)\n for el in gt:\n # gt_rob = GroundTruthLogFile.objects.get(id_report = el.id_report_id, language = language, gt_type = el.gt_type,ns_id=mode, username=user_rob)\n # if el.insertion_time != gt_rob.insertion_time:\n val = (el.id_report_id, el.insertion_time.replace(tzinfo=timezone.utc).astimezone(tz=None))\n report_to_ret.append(val)\n\n jsonDict = {}\n jsonDict['reports_presence'] = report_to_ret\n # print(jsonDict)\n return JsonResponse(jsonDict)", "def report_missing_auto(request):\n\n usecases = UseCase.objects.all()\n json_resp = {}\n languages = ['english', 'English']\n for el in usecases:\n use = el.name\n batches = []\n batches.append('all')\n if use.lower() in ['colon','lung','uterine cervix']:\n report = Report.objects.filter(name=el,language__in=languages).exclude(institute = 'PUBMED')\n count_rep = report.count()\n for rp in report:\n if rp.batch not in batches:\n batches.append(rp.batch)\n # print(el)\n # print(count_rep)\n\n if count_rep > 0:\n json_resp[use] = {}\n for batch in batches:\n batch = str(batch)\n json_resp[use][batch] = {}\n if batch == 'all':\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n else:\n report_count = Report.objects.filter(name=el,batch = batch,language__in=languages).exclude(institute = 'PUBMED').count()\n json_resp[use][batch]['tot'] = report_count\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and batch = %s and r.language in %s;\",\n [str(use), 'Robot_user', 'labels', 'PUBMED',batch, tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n # print(json_resp)\n return JsonResponse(json_resp)", "def pubmed_missing_auto(request):\n\n usecases = UseCase.objects.all()\n json_resp = {}\n json_resp['annotated'] = 0\n json_resp['tot'] = 0\n json_resp['usecase'] = []\n languages = ['English','english']\n for el in usecases:\n use = el.name\n json_resp[use] = {}\n batches = []\n batches.append('all')\n if use.lower() in ['colon','lung','uterine cervix']:\n report = Report.objects.filter(name=el,language__in=languages,institute = 'PUBMED')\n for el in report:\n if el.batch not in batches:\n batches.append(el.batch)\n count_rep = report.count()\n\n if count_rep > 0:\n json_resp['usecase'].append(str(use))\n json_resp['tot'] = json_resp['tot'] + count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n json_resp['annotated'] = json_resp['annotated'] + groundTruths\n\n for batch in batches:\n\n json_resp[use][batch] = {}\n if batch == 'all' or batch is None:\n report = Report.objects.filter(name=use,language__in=languages, institute='PUBMED')\n count_rep = report.count()\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n else:\n report = Report.objects.filter(name=use,language__in=languages, institute='PUBMED',batch = batch)\n count_rep = report.count()\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and batch = %s and r.language in %s;\",\n [str(use), 'Robot_user', 'labels', 'PUBMED',batch,tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n # print('risposta',json_resp)\n return JsonResponse(json_resp)", "def get_data(request):\n\n json_resp = {}\n # reports = Report.objects.filter(name = UseCase.objects.get(name=request.session['usecase']),institute = request.session['institute'],language = request.session['language'])\n\n json_resp['reports'] = []\n institute = request.GET.get('institute',request.session['institute'])\n usecase = request.GET.get('usecase',request.session['usecase'])\n print(usecase)\n language = request.GET.get('language',request.session['language'])\n ns_human = NameSpace.objects.get(ns_id='Human')\n ns_robot = NameSpace.objects.get(ns_id='Robot')\n user_robot = User.objects.get(username='Robot_user', ns_id=ns_robot)\n # usec = UseCase.objects.get(name = usecase)\n # reports = Report.objects.filter(name = usec,institute = institute, language = language).values('id_report')\n # gt_report = GroundTruthLogFile.objects.filter(language = language).exclude(username = user_robot,id_report__in=reports).order_by('id_report').distinct('id_report')\n cursor = connection.cursor()\n cursor.execute(\"SELECT r.id_report,r.language,r.report_json,r.name,r.institute,r.batch,COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND r.institute = %s AND r.language = %s AND g.username != %s GROUP BY (r.id_report,r.language,r.report_json,r.name,r.institute,r.batch)\",[usecase,institute,language,'Robot_user'])\n gt_report_ids = []\n indice = 0\n st = time.time()\n for el in cursor.fetchall():\n\n # report = Report.objects.get(language = language, id_report = el.id_report_id)\n gt_report_ids.append(el[0])\n # print(str(indice))\n indice +=1\n # report = Report.objects.get(id_report=el.id_report, language=el.language)\n # language = report.language\n\n # user_robot = User.objects.get(username = 'Robot_user',ns_id = ns_robot)\n gt_human = 1\n gt_robot = 0\n\n rep = json.loads(el[2])\n new_rep = {}\n for key in rep.keys():\n nkey = key+ '_0'\n new_rep[nkey] = rep[key]\n\n total = el[6]\n\n new_rep['usecase'] = usecase\n new_rep['id_report_not_hashed'] = rep.get('report_id',el[0])\n new_rep['id_report'] = el[0]\n new_rep['institute'] = institute\n new_rep['language'] = language\n new_rep['batch'] = el[5]\n\n json_resp['reports'].append({'total':total, 'report':new_rep,'id_report':el[0], 'language':language})\n\n usec = UseCase.objects.get(name = usecase)\n reports = Report.objects.filter(institute = institute,language = language,name = usec).exclude(id_report__in=gt_report_ids)\n # print(reports.count())\n indice = 0\n st = time.time()\n for el in reports:\n report = el\n # print(str(indice))\n indice += 1\n # report = Report.objects.get(id_report=el.id_report, language=el.language)\n # language = report.language\n\n # user_robot = User.objects.get(username = 'Robot_user',ns_id = ns_robot)\n gt_human = 0\n gt_robot = 0\n\n rep = report.report_json\n new_rep = {}\n for key in rep.keys():\n nkey = key + '_0'\n new_rep[nkey] = rep[key]\n\n total = gt_human + gt_robot\n\n new_rep['usecase'] = report.name_id\n new_rep['id_report_not_hashed'] = rep.get('report_id', report.id_report)\n new_rep['id_report'] = report.id_report\n new_rep['institute'] = report.institute\n new_rep['language'] = report.language\n new_rep['batch'] = report.batch\n\n json_resp['reports'].append(\n {'total': total, 'report': new_rep, 'id_report': report.id_report, 'language': report.language})\n # print('elaboro1',str(end1-st1))\n tot = time.time()\n print('totale',str(tot-st))\n\n return JsonResponse(json_resp,safe=False)", "def get_presence_robot_user(request):\n\n id_report = request.GET.get('id_report', None)\n language = request.GET.get('language', None)\n use = request.GET.get('usecase', None)\n rep = request.GET.get('report_type', None)\n json_resp = {'auto_annotation_count': 0}\n cursor = connection.cursor()\n\n reports_list = None\n if request.method == 'POST':\n request_body_json = json.loads(request.body)\n reports_list = request_body_json['reports']\n\n if id_report is not None and language is not None:\n\n usecase = Report.objects.get(id_report=id_report, language=language)\n use = usecase.name_id\n cursor.execute(\n \"SELECT COUNT(*) FROM ground_truth_log_file as g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.username = %s and r.name = %s\",\n ['Robot_user', str(use)])\n ans = cursor.fetchone()[0]\n json_resp = {'auto_annotation_count': (ans)}\n\n elif use is not None and rep is not None:\n # print(rep)\n if rep == 'reports':\n cursor.execute(\n \"SELECT COUNT(*) FROM ground_truth_log_file as g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.ns_id = %s AND g.username = %s and r.name = %s and r.institute != %s\",\n ['Robot', 'Robot_user', str(use), 'PUBMED'])\n\n elif rep == 'pubmed':\n cursor.execute(\n \"SELECT COUNT(*) FROM ground_truth_log_file as g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.username = %s and r.name = %s and r.institute = %s\",\n ['Robot_user', str(use), 'PUBMED'])\n\n ans = cursor.fetchone()[0]\n if ans > 0:\n json_resp = {'auto_annotation_count': ans}\n # print(json_resp)\n elif reports_list is not None:\n report_list = json.loads(reports_list)\n # print(report_list)\n usecase_list = []\n for rep in report_list:\n\n if rep['usecase'] not in usecase_list:\n usecase_list.append(rep['usecase'])\n for u in usecase_list:\n cursor = connection.cursor()\n cursor.execute(\n \"SELECT COUNT(*) FROM ground_truth_log_file as g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.username = %s and r.name = %s\",\n ['Robot_user', str(u)])\n ans = cursor.fetchone()[0]\n if ans > 0:\n json_resp = {'auto_annotation_count': ans}\n else:\n json_resp = {'auto_annotation_count': 0}\n\n elif use is None and reports_list is None and id_report is None and language is None:\n robot = NameSpace.objects.get(ns_id='Robot')\n gt = GroundTruthLogFile.objects.filter(ns_id=robot)\n json_resp = {'auto_annotation_count': gt.count()}\n\n print(json_resp)\n return JsonResponse(json_resp)", "def test_submission_download(client, two_challenge_sets):\n submission = SubmissionFactory(\n phase=two_challenge_sets.challenge_set_1.challenge.phase_set.get(),\n creator=two_challenge_sets.challenge_set_1.participant,\n )\n\n tests = [\n # (\n # image response + annotation response not test ground truth,\n # user\n # )\n (403, None),\n (403, two_challenge_sets.challenge_set_1.non_participant),\n (302, two_challenge_sets.challenge_set_1.participant),\n (403, two_challenge_sets.challenge_set_1.participant1),\n (302, two_challenge_sets.challenge_set_1.creator),\n (302, two_challenge_sets.challenge_set_1.admin),\n (403, two_challenge_sets.challenge_set_2.non_participant),\n (403, two_challenge_sets.challenge_set_2.participant),\n (403, two_challenge_sets.challenge_set_2.participant1),\n (403, two_challenge_sets.challenge_set_2.creator),\n (403, two_challenge_sets.challenge_set_2.admin),\n (302, two_challenge_sets.admin12),\n (403, two_challenge_sets.participant12),\n (302, two_challenge_sets.admin1participant2),\n ]\n\n for test in tests:\n response = get_view_for_user(\n url=submission.predictions_file.url, client=client, user=test[1]\n )\n assert response.status_code == test[0]", "def test_submission_download(client, two_challenge_sets):\n submission = SubmissionFactory(\n challenge=two_challenge_sets.challenge_set_1.challenge,\n creator=two_challenge_sets.challenge_set_1.participant,\n )\n\n tests = [\n # (\n # image response + annotation response not test ground truth,\n # user\n # )\n (403, None),\n (403, two_challenge_sets.challenge_set_1.non_participant),\n (403, two_challenge_sets.challenge_set_1.participant),\n (403, two_challenge_sets.challenge_set_1.participant1),\n (302, two_challenge_sets.challenge_set_1.creator),\n (302, two_challenge_sets.challenge_set_1.admin),\n (403, two_challenge_sets.challenge_set_2.non_participant),\n (403, two_challenge_sets.challenge_set_2.participant),\n (403, two_challenge_sets.challenge_set_2.participant1),\n (403, two_challenge_sets.challenge_set_2.creator),\n (403, two_challenge_sets.challenge_set_2.admin),\n (302, two_challenge_sets.admin12),\n (403, two_challenge_sets.participant12),\n (302, two_challenge_sets.admin1participant2),\n ]\n\n for test in tests:\n response = get_view_for_user(\n url=submission.predictions_file.url, client=client, user=test[1]\n )\n assert response.status_code == test[0]", "def download_answers(request: HttpRequest):\n\n # creation of the basic list to print in download_answers.html\n\n activities = Activity.objects.exclude(activity_type='course')\\\n .filter(id__in=Answer.objects.values_list(\"activity_id\", flat=True)\n .distinct()).exclude(id=0).values_list(\"name\", \"id\", \"parent_id\").order_by('id')\n sizeMaxActivity = max(map(lambda activity: len(activity[0]), activities))//1.3\n\n pls = PL.objects.filter(id__in=Answer.objects.values_list(\n \"pl_id\", flat=True).distinct()).values_list(\"name\", \"id\").order_by('id')\n sizeMaxPl = max(map(lambda pl: len(pl[0]), pls))//1.3\n\n courses = Activity.objects.values_list(\n \"name\", \"id\").filter(activity_type=\"course\").order_by('id')\n sizeMaxCourse = max(map(lambda course: len(course[0]), courses))//1.3\n\n tags = set()\n tags.update(['tag1', 'tag2', 'tag3', 'tag4'])\n\n parents = {id: Activity.objects.values_list('parent_id', 'name').get(\n id=id) for id in Activity.objects.values_list(\n \"parent_id\", flat=True).distinct().filter(parent_id__isnull=False)}\n\n students = list(map(lambda student: (student[0], split(\n \"[/_]\", student[1])[1]), Profile.objects.filter(role=4).values_list(\"user_id\", \"avatar\")))\n teachers = list(map(lambda teacher: (teacher[0], split(\n \"[/_]\", teacher[1])[1]), Profile.objects.filter(role=2).values_list(\"user_id\", \"avatar\")))\n\n if not request.user.is_staff:\n raise PermissionDenied\n if \"start\" in request.GET or \"end\" in request.GET:\n sql_request = Q() # creation of an emtpy request that we will feed gradually\n \n answers = Answer.objects.select_related(\"activity\", \"pl\", \"user\")\n\n # the differents boolean follow down, help us to know if elements\n # are present in the request and to calculate if\n # they're required in the final result or not\n \n startInRequest = \"start\" in request.GET and request.GET[\"start\"] != ''\n endInRequest = \"end\" in request.GET and request.GET[\"end\"] != ''\n\n plInRequest = \"pl\" in request.GET and request.GET['pl'].isnumeric() or \\\n \"pl2\" in request.GET and request.GET['pl2'].isnumeric()\n activityInRequest = \"activity\" in request.GET and request.GET['activity'].isnumeric() or \\\n \"activity2\" in request.GET and request.GET['activity2'].isnumeric()\n courseInRequest = \"course\" in request.GET and request.GET[\"course\"].isnumeric()\n\n maxInRequest = \"max\" in request.GET and request.GET[\"max\"].isnumeric()\n minInRequest = \"min\" in request.GET and request.GET[\"min\"].isnumeric()\n\n actifInRequest = \"actif\" in request.GET and request.GET[\"actif\"] == \"on\"\n\n limit = \"limit\" in request.GET and request.GET[\"limit\"].isnumeric()\n login = \"teacherLogin\" in request.GET and request.GET[\"teacherLogin\"].isnumeric() or \\\n \"studentLogin\" in request.GET and request.GET[\"studentLogin\"].isnumeric()\n\n\n # the lines below are adding the potential filters to sql_request\n\n sql_request = filter_by_date(startInRequest, endInRequest, request, sql_request)\n \n try:\n sql_request = filter_by_pl(plInRequest, request, sql_request)\n except (ValueError, PL.DoesNotExist):\n return HttpResponseNotFound(\"PL does not exist\")\n\n try:\n sql_request = filter_by_activity(activityInRequest, request, sql_request)\n except (ValueError, Activity.DoesNotExist):\n return HttpResponseNotFound(\"Activity does not exist\")\n\n try:\n sql_request = filter_by_course(courseInRequest, request, sql_request, parents)\n except (Activity.DoesNotExist, ValueError):\n return HttpResponseNotFound(\"Course does not exist\")\n\n \n try:\n sql_request = filter_by_login_or_role(login, request, sql_request, teachers, students)\n except (ValueError, Profile.DoesNotExist):\n return HttpResponseNotFound(\"User does not exist\")\n \n \n\n if \"exclude_grade\" in request.GET and request.GET[\"exclude_grade\"] == \"on\":\n sql_request &= ~Q(grade=None)\n\n # tags are complicated to retrieve so potentially to review with PLaTon\n # if \"tags[]\" in request.GET :\n # answers = answers.filter( tag__in = request.GET.getlist(\"tags[]\"))\n\n sql_request = filter_by_grade(minInRequest, maxInRequest, request, sql_request)\n \n if actifInRequest:\n sql_request &= Q(activity_id__in=Activity.objects.select_related(\n \"id\").all().filter(open=True).values_list(\"id\", flat=True))\n\n answers = answers.filter(sql_request) # application of the differents filter\n\n if answers.count() == 0:\n return HttpResponseBadRequest(\n \"There is no informations in our database linked to your request\", status=400)\n \n if limit:\n answers = answers[:int(request.GET[\"limit\"])]\n\n # creation of a dictionnary which will have the key 'id'\n # equal to the answer's id, and an other dictionnary in value\n # that will stock the informations about the user and the exercise\n # that he submits\n dic = dict()\n slice_size = 1_000\n\n\n for i in range(0, answers.count(), slice_size):\n for answer in answers[i: i + slice_size]:\n dic[answer.id] = {\n \"user\": answer.user.get_username(),\n \"seed\": answer.seed,\n \"date\": str(answer.date),\n \"grade\": answer.grade,\n \"pl_id\": answer.pl.id,\n \"pl_name\": answer.pl.name,\n \"include_answers\": answer.answers if \"include_answers\" in request.GET else None,\n \"enseignement\": PL.objects.all().values_list(\n \"rel_path\", flat=True).get(id=answer.pl.id).split('/')[0],\n \"tag\": answer.pl.json[\"tag\"].split(\"|\")\n if \"include_tag\" in request.GET and \"tag\" in answer.pl.json else None,\n }\n \n try:\n if answer.activity is None:\n answer.activity = Activity.objects.get(pl=answer.pl.id)\n except Activity.DoesNotExist:\n for value in [\"activity_id\", \"activity_name\", \"open\", \"cours\"]:\n dic[answer.id][value] = None\n else:\n dic[answer.id][\"activity_id\"] = answer.activity.id\n dic[answer.id][\"activity_name\"] = answer.activity.name\n dic[answer.id][\"open\"] = answer.activity.open\n course = find_course(parents, answer.activity.parent_id, answer.activity.name)\n dic[answer.id][\"course\"] = course[1]\n dic[answer.id][\"course_id\"] = course[0]\n \n stream = io.StringIO(json.dumps(dic))\n response = StreamingHttpResponse(stream, content_type=\"application/json\")\n response['Content-Disposition'] = 'attachment;filename=answers.json'\n return response\n # elif(\"course\" in request.GET):\n # # here receive of a requeste to filter the activity list if a course is selected\n # if(\"course\" in request.GET and request.GET['course'].isnumeric()):\n # course_id = int(request.GET['course'])\n # activities = filter((lambda activity:\n # find_course(parents, activity[2], activity[0])[0] == course_id\n # if activity[2] is not None else False ), activities)\n \n # elif(\"activity\" in request.GET and request.GET['activity'].isnumeric()):\n # pass\n\n \n \n return render(\n request,\n \"playexo/download_answers.html\",\n {\n 'activities': list(activities),\n 'pls': pls,\n 'students': students,\n 'teachers': teachers,\n 'tags': tags,\n 'courses': courses,\n 'sizeMaxActivity': sizeMaxActivity,\n 'sizeMaxCourse': sizeMaxCourse,\n 'sizeMaxPL': sizeMaxPl\n }\n )", "def survivor_reports(request):\n\n if request.method == 'GET':\n data = {}\n total_survivors = infected = non_infected = water = food = medication = ammunition = pointslost = 0\n for i in Survivor.objects.all():\n total_survivors += 1\n if i.infected is False:\n non_infected += 1\n water += i.water\n food += i.food\n medication += i.medication\n ammunition += i.ammunition\n if i.infected is True:\n infected += 1\n pointslost += (4 * i.water)\n pointslost += (3 * i.food)\n pointslost += (2 * i.medication)\n pointslost += (1 * i.ammunition)\n\n if total_survivors != 0:\n data['Percentage of infected survivors'] = str(round((infected/total_survivors), 2) * 100) + '%'\n data['Percentage of non-infected survivors'] = str(round((non_infected/total_survivors), 2) * 100) + '%'\n data['Average amount of water by survivor'] = round(water/non_infected,1)\n data['Average amount of food by survivor'] = round(food/non_infected,1)\n data['Average amount of medication by survivor'] = round(medication/non_infected,1)\n data['Average amount of ammunition by survivor'] = round(ammunition/non_infected,1)\n data['Points lost because of infected survivor'] = pointslost\n else:\n data['Percentage of infected survivors'] = '0.0%'\n data['Percentage of non-infected survivors'] = '0.0%'\n data['Average amount of water by survivor'] = 0\n data['Average amount of food by survivor'] = 0\n data['Average amount of medication by survivor'] = 0\n data['Average amount of ammunition by survivor'] = 0\n data['Points lost because of infected survivor'] = 0\n return Response(data, status=status.HTTP_200_OK)", "def get_test_report(request, **kwargs): \n\t\n #Fetching the details of the selected event\n test_list = sidecar.events.test_report(project_id=kwargs['test_id'])\n report_list = []\n\t\n #Creating the list for the report\n for tests in test_list._logs:\n\tjson_test = json.loads(tests['data'])\n\ttests['success'] = json_test['success'] \n\ttests['time'] = json_test['time']\n\ttests['test_cases'] = json_test['test_cases']\n\treport_list.append(tests)\n\n #Making the context and sending to template\n context = {\n \"page_title\": _(\"Test Results\"),\n \"tests\": report_list\n }\n return render(request, 'rally_dashboard/events/test_detail.html', context)", "def check_gt_existence(request):\n\n\n action = request.GET.get('action',None)\n id_report = request.GET.get('id_report',None)\n language = request.GET.get('language',None)\n username = request.session['username']\n mode = request.session['mode']\n cursor = connection.cursor()\n cursor.execute(\"SELECT COUNT(*) FROM ground_truth_log_file where ns_id = %s and username = %s and language = %s and id_report = %s and gt_type = %s\",[mode,username,language,id_report,action])\n count = cursor.fetchone()[0]\n json_resp = {'count':count}\n return JsonResponse(json_resp)", "def check_auto_presence_for_configuration(request):\n\n report_type = request.GET.get('report_type',None)\n usecase = request.GET.get('usecase',None)\n language = request.GET.get('language',None)\n institute = request.GET.get('institute',None)\n batch = request.GET.get('batch',None)\n languages = ['English','english']\n # print('BATCH',str(batch))\n use = UseCase.objects.get(name=usecase)\n json_resp = {}\n mode = NameSpace.objects.get(ns_id = 'Robot')\n user = User.objects.get(ns_id = mode, username='Robot_user')\n\n if report_type == 'pubmed':\n cursor = connection.cursor()\n cursor.execute(\"SELECT COUNT(*) FROM ground_truth_log_file AS g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.ns_id = %s AND g.username = %s AND r.institute=%s AND r.language in %s AND r.name = %s AND r.batch = %s\",['Robot','Robot_user','PUBMED',tuple(languages),str(usecase),int(batch)])\n reports = cursor.fetchone()[0]\n json_resp['count'] = reports\n\n elif report_type == 'reports':\n cursor = connection.cursor()\n cursor.execute(\n \"SELECT COUNT(*) FROM ground_truth_log_file AS g INNER JOIN report AS r ON r.id_report = g.id_report AND r.language = g.language WHERE g.ns_id = %s AND g.username = %s AND r.institute!=%s AND r.institute = %s AND r.language = %s AND r.name = %s AND r.batch = %s\",\n ['Robot', 'Robot_user', 'PUBMED',str(institute),str(language),str(usecase),int(batch)])\n reports = cursor.fetchone()[0]\n json_resp['count'] = reports\n print(json_resp)\n return JsonResponse(json_resp)", "def download_report(self, keywords, date='all', geo='all', geor='all', graph = 'all_csv', sort=0, scale=0, sa='N'):\n params = urllib.urlencode({\n 'q': \",\".join(keywords),\n 'date': date,\n 'graph': graph,\n 'geo': geo,\n 'geor': geor,\n 'sort': str(sort),\n 'scale': str(scale),\n 'sa': sa\n }) \n self.raw_data = self.opener.open('http://www.google.com/trends/viz?' + params).read()[2::2]\n self._build_header_dictionary()", "def get_gt_action_based(request):\n\n action = request.GET.get('action',None)\n ns = request.GET.get('annotation_mode',None)\n\n if ns == 'Manual':\n ns = 'Human'\n elif ns == 'Automatic':\n ns = 'Robot'\n gts = GroundTruthLogFile.objects.filter(gt_type=action)\n\n if ns is not None:\n ns_id = NameSpace.objects.get(ns_id = ns)\n gts = GroundTruthLogFile.objects.filter(ns_id = ns_id, gt_type = action)\n\n json_resp = {'count':gts.count()}\n return JsonResponse(json_resp)", "def dwn_all_saved_results(request):\n \n sources = []\n for i in Source.objects.filter(user=request.user):\n sources.append((i.source_id, i.datetime_extracted.strftime('%d/%m/%Y %H:%M'), i.source))\n \n data = []\n for s, timee, s_name in sources:\n objs = ExtractedRelation.objects.filter(source=s)\n for i in objs:\n data.append((i.sentence, i.head, i.tail, i.pred_relation, i.sentiment, i.conf, timee, s_name, i.rel_id, os.path.basename(i.ckpt)))\n \n df = pd.DataFrame(data, columns=['Sentence', 'Head', 'Tail', 'Predicted Relation', 'Predicted Sentiment', 'Confidence', 'Extraction Time', 'Source', 'rel_id', 'Checkpoint'])\n df.to_csv(\"temp/all_analysis_results.csv\", index=False)\n \n return FileResponse(open('temp/all_analysis_results.csv','rb'))", "def generate_report():\n\n # Fetch the top 3 most viewed articles and number of views and print them\n articles_query = get_articles_query()\n popular_articles = execute_query(articles_query)\n print_top_articles(popular_articles)\n\n # Fetch the most popular authors and print them\n authors_query = get_authors_query()\n popular_authors = execute_query(authors_query)\n print_authors(popular_authors)\n\n # Print the days when there were more than 1% errors in HTTP requests\n errors_query = get_errorData_query()\n error_data = execute_query(errors_query)\n print_error_data(error_data)", "def test_get_all_reports(self):\n client = APIClient()\n response = client.get(reverse(\n 'articles:get-all-reports'),**self.header_user2)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def show_campaigns(request, utm_campaign, **kwargs):\n \n err_msg = ''\n try:\n err_msg = str(kwargs['kwargs']['err_msg'])\n except:\n pass\n \n test_type_override = ''\n try:\n test_type_override = MySQLdb._mysql.escape_string(request.POST['test_type_override'])\n \n if test_type_override == 'Banner':\n test_type_var = FDH._TESTTYPE_BANNER_\n elif test_type_override == 'Landing Page':\n test_type_var = FDH._TESTTYPE_LP_\n elif test_type_override == 'Banner and LP':\n test_type_var = FDH._TESTTYPE_BANNER_LP_\n \n except:\n test_type_var = ''\n pass\n \n try:\n \"\"\" Find the earliest and latest page views for a given campaign \"\"\"\n lptl = DL.LandingPageTableLoader()\n ccrml = DL.CiviCRMLoader()\n \n start_time = ccrml.get_earliest_donation(utm_campaign)\n end_time = ccrml.get_latest_donation(utm_campaign)\n \n one_step = lptl.is_one_step(start_time, end_time, utm_campaign) \n \n if not(one_step): \n start_time = lptl.get_earliest_campaign_view(utm_campaign)\n end_time = lptl.get_latest_campaign_view(utm_campaign) \n\n interval = 1\n \n \"\"\" Create reporting object to retrieve campaign data and write plots to image repo on disk \"\"\"\n ir = DR.IntervalReporting(was_run=False, use_labels=False, font_size=20, plot_type='line', query_type='campaign', file_path=projSet.__web_home__ + 'campaigns/static/images/')\n \n \"\"\" Produce analysis on the campaign view data \"\"\" \n ir.run(start_time, end_time, interval, 'views', utm_campaign, {}, one_step=one_step)\n \n \"\"\" \n ESTIMATE THE START AND END TIME OF THE CAMPAIGN\n ===============================================\n \n Search for the first instance when more than 10 views are observed over a sampling period\n \"\"\"\n \n col_names = ir._data_loader_.get_column_names()\n \n views_index = col_names.index('views')\n ts_index = col_names.index('ts')\n \n row_list = list(ir._data_loader_._results_) # copy the query results\n for row in row_list:\n if row[views_index] > 100:\n start_time_est = row[ts_index]\n break\n row_list.reverse()\n for row in row_list:\n if row[views_index] > 100:\n end_time_est = row[ts_index]\n break\n \n \n \"\"\"\n BUILD THE VISUALIZATION FOR THE TEST VIEWS OF THIS CAMAPAIGN\n ============================================================ \n \"\"\"\n \n \"\"\" Read the test name \"\"\"\n ttl = DL.TestTableLoader()\n row = ttl.get_test_row(utm_campaign)\n test_name = ttl.get_test_field(row ,'test_name')\n \n \"\"\" Regenerate the data using the estimated start and end times \"\"\"\n ir = DR.IntervalReporting(was_run=False, use_labels=False, font_size=20, plot_type='line', query_type='campaign', file_path=projSet.__web_home__ + 'campaigns/static/images/')\n ir.run(start_time_est, end_time_est, interval, 'views', utm_campaign, {}, one_step=one_step)\n \n \"\"\" Determine the type of test (if not overridden) and retrieve the artifacts \"\"\"\n test_type, artifact_name_list = FDH.get_test_type(utm_campaign, start_time, end_time, DL.CampaignReportingLoader(query_type=''), test_type_var)\n \n return render_to_response('campaigns/show_campaigns.html', {'utm_campaign' : utm_campaign, 'test_name' : test_name, 'start_time' : start_time_est, 'end_time' : end_time_est, 'one_step' : one_step, \\\n 'artifacts' : artifact_name_list, 'test_type' : test_type, 'err_msg' : err_msg}, context_instance=RequestContext(request)) \n\n except Exception as inst:\n \n logging.error('Failed to correctly produce campaign diagnostics.')\n logging.error(type(inst))\n logging.error(inst.args)\n logging.error(inst)\n \n \"\"\" Return to the index page with an error \"\"\"\n err_msg = 'There is insufficient data to analyze this campaign: %s. Check to see if the <a href=\"/LML/\">impressions have been loaded</a>. <br><br>ERROR:<br><br>%s' % (utm_campaign, inst.__str__())\n \n return index(request, kwargs={'err_msg' : err_msg})", "def download_report():\n entities = get_names()\n save_csv(entities)", "def getReport(request):\n\n\t#parameters needed for different REST API's\n\tparams = {\n\t\t'rid':-1,\n\t\t'year':-1,\n\t\t'con_num':-1,\n\t\t'assign_num':-1,\n\t\t'item_num':-1,\n\t\t'wtype': -1,\n\t\t'payno': -1,\n\t\t'snap': 0, #default is 0 for snapshots (for now)\n\t\t'issue_date': -1,\n\t}\n\n\t#loop over the parameters and set them if they appear in the api url\n\tfor p in params:\n\t\tif p in request.GET:\n\t\t\tparams[p] = request.GET[p]\n\n\n\t#get the request session and load data\n\ts = requests.Session()\n\tif not isinstance(rgen.ReportGenerator.get_url(params), dict):\n\t\tresponse = s.get(rgen.ReportGenerator.get_url(params))\n\n\t\t#set the iterator and the content\n\t\tit = json.loads(response.content)\n\t\tcontent = json.loads(response.content)\n\t\t\n\t\t#while a next page exists, parse the api\n\t\tpageNum = 1\n\t\twhile \"next\" in it:\n\t\t\tresponse = s.get(rgen.ReportGenerator.get_url(params) + '?page=' + str(pageNum))\n\t\t\tit = json.loads(response.content)\n\t\t\tcontent[\"items\"].extend(it[\"items\"])\n\t\t\tpageNum += 1\n\n\telse:\n\t\t#if the url is a list\n\t\tcontent = {}\n\t\tfor part in rgen.ReportGenerator.get_url(params):\n\t\t\tresponse = s.get(rgen.ReportGenerator.get_url(params)[part])\n\t\t\tit = json.loads(response.content)\n\t\t\t#content = {\"part1\":{\"items\":[]}, \"part2\":{\"items\":[]}, \"part3\":{\"items\":[]}}\n\t\t\t\n\t\t\tcontent[part] = {}\n\t\t\tcontent[part][\"items\"] = []\n\t\t\tcontent[part][\"items\"].extend(it[\"items\"])\n\n\t\t\tpageNum = 1\n\t\t\twhile \"next\" in it:\n\t\t\t\tresponse = s.get(rgen.ReportGenerator.get_url(params)[part] + '?page=' + str(pageNum))\n\t\t\t\tit = json.loads(response.content)\n\t\t\t\tcontent[part][\"items\"].extend(it[\"items\"])\n\t\t\t\tpageNum += 1\n\t\n\t#set the file object to be returned as a download\n\tfile = HttpResponse(rgen.ReportGenerator.formExcel(content, params), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n\tif params[\"rid\"] == '70':\n\t\tfile['Content-Disposition'] = 'attachment; filename=' + rgen.r_dict[params[\"rid\"]][1] + ' No.' + params['issue_date'] + '.xlsx'\n\telse:\n\t\tfile['Content-Disposition'] = 'attachment; filename=' + rgen.r_dict[params[\"rid\"]][1] + '.xlsx'\n\ts.close()\n\treturn file", "def download_report(ctx, report_ids):\n client = ctx.obj[\"client\"]\n for report_id in report_ids:\n try:\n report = Report(id=report_id)\n client.wait(report)\n client.results(report)\n result = client.download_results(report)\n click.secho(f\"Success {report.id}: {result}\", fg=\"green\")\n except Exception:\n traceback.print_exc()\n click.secho(f\"Failed {report_id}\", fg=\"red\")", "def admin_evaluate_reports(request):\n if not validate_request(request): return redirect(reverse(URL_FORBIDDEN))\n \n admin = auth.get_user(request)\n \n if request.method == \"GET\":\n all_thesis = [] # list of dict\n \n for panelmember in PanelMember.objects.filter(Q(status = 'F') | Q(status = 'Z')).filter(feedback_at = 'A'):\n thesis = panelmember.thesis\n dict = {}\n dict['title'] = thesis.title\n dict['student_full_name'] = thesis.student.first_name + \" \" + thesis.student.last_name\n dict['report'] = panelmember.feedback_with_referee_details\n dict['student_username'] = thesis.student.user.username\n dict['id'] = thesis.id\n dict['referee_name'] = panelmember.referee.user.first_name + ' ' + panelmember.referee.user.last_name\n dict['referee_id'] = panelmember.referee.id\n all_thesis.append(dict)\n \n return render(request, 'app/admin/view_finalReports.html', {\n 'title':'Final Reports',\n 'layout_data' : get_layout_data(request),\n 'all_thesis' : all_thesis\n })\n elif request.method == \"POST\":\n form = PanelMember2Form(request.POST, request.FILES)\n \n\n thesis = int(request.POST['thesis'])\n referee = int(request.POST['referee'])\n \n if form.is_valid() and validate_pdf(request.FILES['feedback_without_referee_details']):\n referee = Referee.objects.get(id = referee)\n thesis = Thesis.objects.get(id = thesis)\n panelmember = PanelMember.objects.get(thesis = thesis,referee = referee)\n panelmember.feedback_at = 'G'\n \n time = str(datetime.datetime.now())\n timestamp = ''\n for i in time:\n if not (i == ':' or i == '-'):\n timestamp += i\n request.FILES['feedback_without_referee_details'].name = \"Evaluation_Report_\"+thesis.student.user.username+\"_\"+timestamp+\".pdf\"\n \n panelmember.feedback_without_referee_details = request.FILES['feedback_without_referee_details']\n panelmember.save()\n\n total_feedbacks = PanelMember.objects.filter(thesis = thesis, feedback_at = 'G').count()\n if total_feedbacks == thesis.indian_referees_required + thesis.foreign_referees_required:\n _update_student_status(thesis, STATUS_ID_THESIS_FEEDBACKS_RECEIVED) \n\n # send notification to all guide\n send_notification_to_all_guides(admin, thesis, \"A feedback report has been sent of student \" + thesis.student.first_name + \" \" + thesis.student.last_name)\n # email\n subject = \"[Feed Back reports] of the Thesis titled\" + thesis.title\n content = \"<br>Dear Sir/Madam,</br><br></br><br></br>\"+\"A feedback report has been sent of student \" + thesis.student.first_name + \" \" + thesis.student.last_name +'. Please Check the PhD Portal for more details.'+\"<br></br><br></br>Regards,<br></br>PhDPortal.\"\n \n email = []\n\n for thesisGuide in ThesisGuide.objects.filter(thesis = thesis):\n receiver = Faculty.objects.get(user = thesisGuide.guide.user)\n email.append(receiver.email)\n\n send_email_task.delay(email, subject, content)\n return redirect(reverse(admin_evaluate_reports))\n else:\n return redirect(reverse(URL_BAD_REQUEST))\n else:\n return redirect(reverse(URL_BAD_REQUEST))", "def get_last_gt(request):\n\n username = request.session['username']\n mode1 = request.session['mode']\n mode = NameSpace.objects.get(ns_id=mode1)\n language = request.session['language']\n usecase = request.session['usecase']\n institute = request.session['institute']\n batch = request.session['batch']\n jsonDict = {}\n token = request.GET.get('configure',None)\n if token is None:\n gt_json = get_last_groundtruth(username,None,None,None,mode,batch)\n else:\n gt_json = get_last_groundtruth(username,usecase,language,institute,mode,batch)\n\n if gt_json is None:\n jsonDict['groundtruth'] = ''\n jsonDict['report'] = ''\n jsonDict['report_id'] = ''\n else:\n jsonDict['groundtruth'] = gt_json\n id_report = gt_json['id_report']\n language = gt_json['language']\n report = Report.objects.get(id_report=id_report, language=language)\n jsonDict['report'] = report.report_json\n jsonDict['report_id'] = id_report\n return JsonResponse(jsonDict)", "def check_presence_exa_conc_lab(request):\n\n # reports = request.GET.get('reports',None)\n rep = request.GET.get('id_report',None)\n language = request.GET.get('language',None)\n usecase = request.GET.get('usecase',None)\n reports = None\n if request.method == 'POST':\n request_body_json = json.loads(request.body)\n reports = request_body_json['reports']\n if rep is not None and language is not None:\n report = Report.objects.get(id_report = rep,language = language)\n usecase = report.name_id\n # print(usecase)\n json_resp = {}\n if usecase in ['colon','uterine cervix','lung']:\n bool = check_exa_lab_conc_only(usecase)\n print('bool',bool)\n else:\n bool = [False,False]\n json_resp['labels'] = bool[0]\n json_resp['concepts'] = bool[1]\n elif usecase is not None:\n json_resp = {}\n json_resp['labels'] = False\n json_resp['concepts'] = False\n\n # labels = []\n # concepts = []\n json_resp = {}\n if usecase in ['colon','uterine cervix','lung']:\n bool = check_exa_lab_conc_only(usecase)\n else:\n bool = [False,False]\n json_resp['labels'] = bool[0]\n json_resp['concepts'] = bool[1]\n # labels.append(bool[0])\n # concepts.append(bool[1])\n # if False in labels:\n # json_resp['labels'] = False\n # else:\n # json_resp['labels'] = True\n #\n # if False in concepts:\n # json_resp['concepts'] = False\n # else:\n # json_resp['concepts'] = True\n elif reports is not None:\n report_list = json.loads(reports)\n json_resp = {}\n json_resp['labels'] = False\n json_resp['concepts'] = False\n usecases = []\n for rep in report_list:\n # rep = json.loads(rep)\n if rep['usecase'] not in usecases:\n usecases.append(rep['usecase'])\n labels = []\n concepts = []\n for u in usecases:\n # print(u)\n json_resp = {}\n if u in ['colon', 'uterine cervix', 'lung']:\n bool = check_exa_lab_conc_only(u)\n else:\n bool = [False, False]\n\n labels.append(bool[0])\n concepts.append(bool[1])\n if False in labels:\n json_resp['labels'] = False\n else:\n json_resp['labels'] = True\n\n if False in concepts:\n json_resp['concepts'] = False\n else:\n json_resp['concepts'] = True\n\n else:\n json_resp={'error':'a usecase is needed'}\n\n print(json_resp)\n return JsonResponse(json_resp)", "def get_user_ground_truth(request):\n\n user = request.GET.get('user',None)\n action = request.GET.get('action',None)\n mode = request.GET.get('mode',None)\n report = request.GET.get('report',None)\n language = request.GET.get('language',request.session['language'])\n mode_obj = NameSpace.objects.get(ns_id=mode)\n report = Report.objects.get(id_report = report, language = language)\n gt = get_user_gt(user,mode_obj,report,language,action)\n return JsonResponse(gt)", "def results_overview():\n\n if request.method == \"POST\":\n # clean out all unwanted storage\n if _pool:\n _pool.terminate()\n del _drivers[:]\n \n return redirect(url_for('upload.upload'))\n\n # GET\n \n # Captures the global drivers variable\n global _drivers\n\n processType = g.processType\n coreFactor = g.coreFactor\n\n if processType == 'parallel':\n current_app.logger.info(time.ctime() + \"\\tProcess pool with %s processes initialized for execution\" % coreFactor)\n results = _pool.map(_get_result, _drivers)\n else:\n results = [_get_result(driver) for driver in _drivers]\n\n return render_template('checks/results_overview.html', results=results)", "def test_output_download(client):\n user1, user2 = UserFactory(), UserFactory()\n job = AlgorithmJobFactory(creator=user1)\n\n detection_interface = ComponentInterface(\n store_in_database=False,\n relative_path=\"detection_results.json\",\n slug=\"detection-results\",\n title=\"Detection Results\",\n kind=ComponentInterface.Kind.ANY,\n )\n detection_interface.save()\n job.algorithm_image.algorithm.outputs.add(detection_interface)\n\n output_civ = ComponentInterfaceValue.objects.create(\n interface=detection_interface\n )\n detection = {\n \"detected points\": [\n {\"type\": \"Point\", \"start\": [0, 1, 2], \"end\": [3, 4, 5]}\n ]\n }\n output_civ.file.save(\n \"detection_results.json\",\n ContentFile(\n bytes(json.dumps(detection, ensure_ascii=True, indent=2), \"utf-8\")\n ),\n )\n job.outputs.add(output_civ)\n\n tests = [\n (403, None),\n (302, user1),\n (403, user2),\n ]\n\n for test in tests:\n response = get_view_for_user(\n url=job.outputs.first().file.url, client=client, user=test[1]\n )\n assert response.status_code == test[0]" ]
[ "0.6838261", "0.65341705", "0.62299836", "0.6090916", "0.59187865", "0.5832907", "0.5767083", "0.5675658", "0.56663036", "0.5666215", "0.5635911", "0.5478633", "0.5468885", "0.54647183", "0.54031974", "0.5370372", "0.53674585", "0.5365349", "0.5343065", "0.53406143", "0.5329562", "0.53288096", "0.5326233", "0.53237927", "0.5314595", "0.5278548", "0.52725124", "0.52585316", "0.52462864", "0.52325463" ]
0.65963656
1
This view returns the usecases which have not nor exa labels nor exa concepts
def get_uses_missing_exa(request): use_to_ret = {} use_to_ret['labels_present'] = [] use_to_ret['concepts_present'] = [] use_to_ret['labels_missing'] = [] use_to_ret['concepts_missing'] = [] uses = ['colon','uterine cervix','lung'] for el in uses: usecase = UseCase.objects.get(name=el) presence = True if Report.objects.filter(name = usecase).count() > 0: if not AnnotationLabel.objects.filter(name = usecase, annotation_mode = 'Manual and Automatic').exists(): use_to_ret['labels_missing'].append(el) else: use_to_ret['labels_present'].append(el) cursor = connection.cursor() cursor.execute("SELECT c.annotation_mode FROM concept AS c INNER JOIN concept_has_uc AS hc ON c.concept_url = hc.concept_url WHERE hc.name = %s",[str(el)]) ans = cursor.fetchall() for concept in ans: if concept[0] != 'Manual and Automatic': presence = False break if len(ans) > 0: if presence == False: use_to_ret['concepts_missing'].append(el) else: use_to_ret['concepts_present'].append(el) else: use_to_ret['concepts_missing'].append(el) return JsonResponse(use_to_ret)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_presence_exa_conc_lab(request):\n\n # reports = request.GET.get('reports',None)\n rep = request.GET.get('id_report',None)\n language = request.GET.get('language',None)\n usecase = request.GET.get('usecase',None)\n reports = None\n if request.method == 'POST':\n request_body_json = json.loads(request.body)\n reports = request_body_json['reports']\n if rep is not None and language is not None:\n report = Report.objects.get(id_report = rep,language = language)\n usecase = report.name_id\n # print(usecase)\n json_resp = {}\n if usecase in ['colon','uterine cervix','lung']:\n bool = check_exa_lab_conc_only(usecase)\n print('bool',bool)\n else:\n bool = [False,False]\n json_resp['labels'] = bool[0]\n json_resp['concepts'] = bool[1]\n elif usecase is not None:\n json_resp = {}\n json_resp['labels'] = False\n json_resp['concepts'] = False\n\n # labels = []\n # concepts = []\n json_resp = {}\n if usecase in ['colon','uterine cervix','lung']:\n bool = check_exa_lab_conc_only(usecase)\n else:\n bool = [False,False]\n json_resp['labels'] = bool[0]\n json_resp['concepts'] = bool[1]\n # labels.append(bool[0])\n # concepts.append(bool[1])\n # if False in labels:\n # json_resp['labels'] = False\n # else:\n # json_resp['labels'] = True\n #\n # if False in concepts:\n # json_resp['concepts'] = False\n # else:\n # json_resp['concepts'] = True\n elif reports is not None:\n report_list = json.loads(reports)\n json_resp = {}\n json_resp['labels'] = False\n json_resp['concepts'] = False\n usecases = []\n for rep in report_list:\n # rep = json.loads(rep)\n if rep['usecase'] not in usecases:\n usecases.append(rep['usecase'])\n labels = []\n concepts = []\n for u in usecases:\n # print(u)\n json_resp = {}\n if u in ['colon', 'uterine cervix', 'lung']:\n bool = check_exa_lab_conc_only(u)\n else:\n bool = [False, False]\n\n labels.append(bool[0])\n concepts.append(bool[1])\n if False in labels:\n json_resp['labels'] = False\n else:\n json_resp['labels'] = True\n\n if False in concepts:\n json_resp['concepts'] = False\n else:\n json_resp['concepts'] = True\n\n else:\n json_resp={'error':'a usecase is needed'}\n\n print(json_resp)\n return JsonResponse(json_resp)", "def usecases(request):\n\n context = {\n\n }\n\n return render(request, 'hydraviewer/usecases.html', context)", "def get_presence_examode_concepts(request):\n\n json_resp = {}\n json_resp['concepts'] = get_presence_exa_concepts()\n json_resp['labels'] = get_presence_exa_labels()\n # print(json_resp)\n return JsonResponse(json_resp)", "def test_returns_all_studies_with_no_query(self):\n url = self.get_url()\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))", "def test_returns_all_studies_with_no_query(self):\n url = self.get_url()\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))", "def purposes(self):\n\n return ('train', 'enroll', 'probe')", "def test_text_classifier_get_details_all(self):\n pass", "def test_returns_all_studies_without_tagged_traits_without_given_tag_with_only(self):\n tag = TagFactory.create()\n study = self.studies[0]\n get_data = {'q': '', 'forward': ['{' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))", "def test_context_data_with_valid_search_and_no_results(self):\n response = self.client.get(self.get_url(self.study.pk), {'description': 'test'})\n context = response.context\n self.assertIn('form', context)\n self.assertTrue(context['has_results'])\n self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)", "def df_sample_concepts(self):\n return self.abundance_mat_mult(False)", "def fixture_fixture_intended_uses_example():\n test_example = IntendedUses(\n purpose_of_model=PURPOSE_OF_MODEL,\n intended_uses=INTENDED_USES,\n factors_affecting_model_efficiency=FACTORS_AFFECTING_MODEL_EFFICIENCY,\n risk_rating=RISK_RATING,\n explanations_for_risk_rating=EXPLANATIONS_FOR_RISK_RATING,\n )\n return test_example", "def test_context_data_with_valid_search_and_no_results(self):\n response = self.client.get(self.get_url(self.study.pk), {'description': 'test'})\n context = response.context\n self.assertIn('form', context)\n self.assertTrue(context['has_results'])\n self.assertIsInstance(context['results_table'], tables.SourceDatasetTableFull)", "def test_other_study_not_in_queryset(self):\n # Delete all but five source traits, so that there are 5 from each study.\n models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()\n self.source_traits = list(models.SourceTrait.objects.all())\n study2 = factories.StudyFactory.create()\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), models.SourceTrait.objects.all().count())\n for trait in source_traits2:\n self.assertIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)", "def test_for_criteria(self):\n ignore = ['interpreter_method', 'average_by_sample_or_site', 'include_nrm']\n values = ([dic['value'] for dic in self.acceptance_criteria.values() if (dic['criterion_name'] not in ignore and dic['value'] != -999)])\n return values", "def test_returns_all_studies_with_unreviewed_tagged_traits_without_given_tag_with_only(self):\n tag = TagFactory.create()\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study, tag=tag)\n tagged_traits.append(tmp)\n get_data = {'q': '', 'forward': ['{' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))", "def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)", "def test_other_study_not_in_queryset(self):\n # Delete all but five source traits, so that there are 5 from each study.\n models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()\n self.source_traits = list(models.SourceTrait.objects.all())\n study2 = factories.StudyFactory.create()\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), len(self.source_traits))\n for trait in source_traits2:\n self.assertNotIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)", "def test_other_study_not_in_queryset(self):\n # Delete all but five source traits, so that there are 5 from each study.\n models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()\n self.source_traits = list(models.SourceTrait.objects.all())\n study2 = factories.StudyFactory.create()\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), len(self.source_traits))\n for trait in source_traits2:\n self.assertNotIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)", "def conc_view(request):\n\n usecase = request.session['usecase']\n mode = request.session['mode']\n auto_required = request.GET.get('ns_id',None)\n jsonDict = {}\n concepts = {}\n notEmpty = False\n jsonDict['concepts'] = []\n if mode == 'Human' or auto_required == 'Human':\n cursor = connection.cursor()\n cursor.execute(\"SELECT DISTINCT b.name FROM belong_to as b inner join concept_has_uc as ch on ch.concept_url = b.concept_url inner join concept as c on c.concept_url = ch.concept_url where ch.name = %s AND annotation_mode in %s\",[str(usecase),('Manual','Manual and Automatic')])\n ar = cursor.fetchall()\n areas = []\n for el in ar:\n areas.append(el[0])\n for area in areas:\n name = area\n concepts[name] = []\n concepts_list_final = get_concepts_by_usecase_area(usecase, name,'Human')\n for c in concepts_list_final:\n if c not in concepts[name]:\n concepts[name].append(c)\n notEmpty = True\n if notEmpty == True:\n jsonDict['concepts'] = concepts\n\n elif mode == 'Robot' or auto_required == 'Robot':\n with transaction.atomic():\n with connection.cursor() as cursor:\n\n areas = ['Diagnosis', 'Test', 'Procedure', 'Anatomical Location']\n for area in areas:\n concepts[area] = get_concepts_by_usecase_area(usecase, area, 'Robot')\n if len(concepts[area]) > 0:\n notEmpty = True\n if notEmpty == True:\n jsonDict['concepts'] = concepts\n print(concepts)\n\n return JsonResponse(jsonDict)", "def restrict2use_case(self, use_case, limit=1000):\r\n\r\n\t\tdisease = self.disease[use_case]\r\n\t\tsparql = \"PREFIX exa: <https://w3id.org/examode/ontology/> \" \\\r\n\t\t\t\"PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> \" \\\r\n\t\t\t\"PREFIX mondo: <http://purl.obolibrary.org/obo/MONDO_> \" \\\r\n\t\t\t\"PREFIX dcterms: <http://purl.org/dc/terms/> \"\\\r\n\t\t\t\"select ?iri ?iri_label ?iri_SNOMED_code ?iri_UMLS_code ?semantic_area ?semantic_area_label where { \" \\\r\n\t\t\t\"?iri rdfs:label ?iri_label ; exa:associatedDisease mondo:\" + disease + \". \" \\\r\n\t\t\t\"filter (langMatches( lang(?iri_label), 'en')). \" \\\r\n\t\t\t\"OPTIONAL {?iri exa:hasSNOMEDCode ?iri_SNOMED_code .} \" \\\r\n\t\t\t\"OPTIONAL {?iri dcterms:conformsTo ?iri_UMLS_code .} \" \\\r\n\t\t\t\"OPTIONAL {?iri exa:hasSemanticArea ?semantic_area . \" \\\r\n\t\t\t\"?semantic_area rdfs:label ?semantic_area_label . \" \\\r\n\t\t\t\"filter (langMatches( lang(?semantic_area_label), 'en')).} \" \\\r\n\t\t\t\"} \" \\\r\n\t\t\t\"limit \" + str(limit)\r\n\t\t# issue sparql query\r\n\t\tresultSet = self.ontology.query(query_object=sparql)\r\n\t\t# convert query output to DataFrame\r\n\t\tontology_dict = defaultdict(list)\r\n\t\tfor row in resultSet:\r\n\t\t\t# store entity as IRI\r\n\t\t\tontology_dict['iri'].append(str(row.iri))\r\n\t\t\t# store additional information associated w/ entity\r\n\t\t\tontology_dict['label'].append(str(row.iri_label))\r\n\t\t\tontology_dict['SNOMED'].append(str(row.iri_SNOMED_code) if row.iri_SNOMED_code is not None else None)\r\n\t\t\tontology_dict['UMLS'].append(str(row.iri_UMLS_code)if row.iri_UMLS_code is not None else None)\r\n\t\t\tontology_dict['semantic_area'].append(str(row.semantic_area))\r\n\t\t\tontology_dict['semantic_area_label'].append(str(row.semantic_area_label))\r\n\t\tif use_case == 'celiac':\r\n\t\t\t# Add negative result\r\n\t\t\t# store entity as IRI\r\n\t\t\tontology_dict['iri'].append('https://w3id.org/examode/ontology/NegativeResult')\r\n\t\t\t# store additional information associated w/ entity\r\n\t\t\tontology_dict['label'].append('Negative Result')\r\n\t\t\tontology_dict['SNOMED'].append('M-010100')\r\n\t\t\tontology_dict['UMLS'].append(None)\r\n\t\t\tontology_dict['semantic_area'].append('http://purl.obolibrary.org/obo/NCIT_C15220')\r\n\t\t\tontology_dict['semantic_area_label'].append('Diagnosis')\r\n\t\t\t# Add inconclusive result\r\n\t\t\t# store entity as IRI\r\n\t\t\tontology_dict['iri'].append('https://w3id.org/examode/ontology/InconclusiveOutcome')\r\n\t\t\t# store additional information associated w/ entity\r\n\t\t\tontology_dict['label'].append('Inconclusive Outcome')\r\n\t\t\tontology_dict['SNOMED'].append(None)\r\n\t\t\tontology_dict['UMLS'].append(None)\r\n\t\t\tontology_dict['semantic_area'].append('http://purl.obolibrary.org/obo/NCIT_C15220')\r\n\t\t\tontology_dict['semantic_area_label'].append('Diagnosis')\r\n\t\treturn pd.DataFrame(ontology_dict)", "def test_other_study_not_in_queryset(self):\n # Delete all but five source traits, so that there are 5 from each study.\n study2 = factories.StudyFactory.create()\n datasets2 = factories.SourceDatasetFactory.create_batch(\n 5, source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only datasets from the correct study are found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that the other study's datasets do not show up.\n self.assertEqual(len(returned_pks), len(self.source_datasets))\n for dataset in datasets2:\n self.assertNotIn(dataset.i_id, returned_pks)\n for dataset in self.source_datasets:\n self.assertIn(dataset.i_id, returned_pks)", "def test_other_study_not_in_queryset(self):\n # Delete all but five source traits, so that there are 5 from each study.\n study2 = factories.StudyFactory.create()\n datasets2 = factories.SourceDatasetFactory.create_batch(\n 5, source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only datasets from the correct study are found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that the other study's datasets do not show up.\n self.assertEqual(len(returned_pks), len(self.source_datasets))\n for dataset in datasets2:\n self.assertNotIn(dataset.i_id, returned_pks)\n for dataset in self.source_datasets:\n self.assertIn(dataset.i_id, returned_pks)", "def enseemble_results_extra(self, questions, topn):\n ok_vocab = self.get_vocabulary()\n new_vocab = [(w, self.model.wv.vocab[w]) for w in ok_vocab]\n new_vocab = {w.upper(): v for w, v in new_vocab}\n new_vocab = dict(new_vocab)\n\n results = []\n for line_no, line in enumerate(utils.smart_open(questions)):\n # TODO: use level3 BLAS (=evaluate multiple questions at once), for speed\n line = utils.to_unicode(line)\n if line.startswith(': '):\n continue\n else:\n\n try:\n a, b, c, expected = [word.upper() for word in line.split()]\n except ValueError:\n logger.info(\"skipping invalid line #%i in %s\", line_no, questions)\n continue\n if a not in new_vocab or b not in new_vocab or c not in new_vocab or expected not in new_vocab:\n \"\"\"if a not in new_vocab:\n print(\"Dont know: \" + a)\n if b not in new_vocab:\n print(\"Dont know: \" + b)\n if c not in new_vocab:\n print(\"Dont know: \" + c)\n if expected not in new_vocab:\n print(\"Dont know: \" + expected)\n \"\"\"\n logger.debug(\"skipping line #%i with OOV words: %s\", line_no, line.strip())\n results.append(None)\n continue\n\n original_vocab = self.get_vocabulary()\n self.set_vocabulary(new_vocab)\n ignore = {a, b, c} # input words to be ignored\n #print('topn')\n #print(topn)\n # find the most likely prediction, ignoring OOV words and input words\n sims = self.most_similar(positive_words=[b, c], negative_words=[a], topn = topn)\n # print(\"sims\")\n #print(sims)\n self.set_vocabulary(original_vocab)\n inner_results = []\n for predict in sims:\n predicted = predict[0]\n predicted = predicted.upper()\n predicted_tuple = (predicted, predict[1])\n #print(predicted_tuple)\n inner_results.append(predicted_tuple)\n #print(predicted)\n results.append(inner_results)\n #print(results)\n\n return results", "def workbench_scenarios(): \n return [\n (\"TermsXBlock\",\n \"\"\"<terms/>\n \"\"\"),\n (\"Multiple TermsXBlock\",\n \"\"\"<vertical_demo>\n <terms/>\n <terms/>\n <terms/>\n </vertical_demo>\n \"\"\"),\n ]", "def test_index_view_with_no_questions(self):\r\n response = self.client.get(reverse('index'))\r\n self.assertEqual(response.status_code, 200)\r\n self.assertContains(response, \"No Eproject are available.\")\r\n self.assertQuerysetEqual(response.context['latest_question_list'], [])", "def test_get_unused_explores(fc: fetcher.Fetcher, test_model, test_unused_explores):\n unused_explores = fc.get_unused_explores(model=test_model[\"name\"])\n assert all(e in test_unused_explores for e in unused_explores)", "def test_get_vulnerability_occurrences_summary(self):\n pass", "def compute_filtered_features(self, features):\n return [\n feature\n for feature in features\n if \"\".join(feature.qualifiers.get(\"is_edit\", \"false\")) != \"true\"\n ]", "def test_no_data(self):\n response = self.client.get(reverse('education:demographics'))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context.get(\"json_data\"), None)\n self.assertEqual(response.context.get(\"all_cohort\"), None)\n self.assertEqual(response.context.get(\"all_rate\"), None)\n for demo in State.GROUP_NAMES:\n self.assertEqual(response.context.get(demo+\"_cohort\"), None)\n self.assertEqual(response.context.get(demo+\"_rate\"), None)\n self.assertContains(response, \"Home\")\n self.assertContains(response, \"No Data Available\")\n self.assertNotContains(response, \"Students in 15-16 Cohort\")", "def drop_irrelevant_practices(df):\n\n is_relevant = df.groupby(\"practice\").value.any()\n return df[df.practice.isin(is_relevant[is_relevant == True].index)]" ]
[ "0.59388274", "0.58805186", "0.58260775", "0.53480375", "0.53480375", "0.53390783", "0.5233926", "0.5202363", "0.5183914", "0.5163577", "0.5127641", "0.5122592", "0.5121663", "0.5119788", "0.51088226", "0.5098412", "0.5095758", "0.5095758", "0.50584406", "0.50419766", "0.50284976", "0.50284976", "0.50271124", "0.5026035", "0.50199884", "0.5019369", "0.5011691", "0.50069654", "0.4997589", "0.49957287" ]
0.7342495
0
This view returns the languages available for a report
def get_report_translations(request): id_report = request.GET.get('id_report',None) if id_report is not None: languages = [] lang = Report.objects.filter(id_report = id_report) for el in lang: if el.language not in languages: languages.append(el.language) json_resp = {} # print(languages) json_resp['languages'] = languages return JsonResponse(json_resp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def languages(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'languages')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def wikiLanguages():\n return languages", "def languages(self):\n\n return self._request('/languages')", "def languages():\n r = requests.get('http://translate.yandex.net/api/v1/tr.json/getLangs')\n return r.json['dirs']", "def available_languages(self):\n data = self._run(\n url_path=\"languages/available\"\n )\n return data['result'].get('languages', [])", "def getLanguages(self):\n return self.__getColumnData(Q_LANGUAGES, 'language')", "def get_langs():\r\n temp = \"\"\r\n translate_client = translate.Client()\r\n for i in translate_client.get_languages():\r\n temp += i['name'] + \": \" + i['language'] + \"\\n\"\r\n\r\n return temp", "def languages_display(self):\n is_draft = self.extended_object.publisher_is_draft\n node = self.extended_object.node\n current_and_descendant_nodes = node.__class__.get_tree(parent=node)\n\n course_runs = (\n CourseRun.objects.filter(\n direct_course__extended_object__node__in=current_and_descendant_nodes,\n direct_course__extended_object__publisher_is_draft=is_draft,\n )\n .exclude(catalog_visibility=\"hidden\")\n .only(\"languages\")\n )\n languages = list(\n {x for course_languages in course_runs for x in course_languages.languages}\n )\n instance = CourseRun(languages=languages)\n return instance.get_languages_display()", "def languages(self):\n return self._languages", "def get_langs(id):", "def get_all_languages():\n\tdef _get():\n\t\tif not frappe.db:\n\t\t\tfrappe.connect()\n\t\treturn frappe.db.sql_list('select name from tabLanguage')\n\treturn frappe.cache().get_value('languages', _get)", "def book_language_list(request):\n languages = Language.objects.all().order_by('-name')\n return render(request, 'library/book_language_list.html', {\"languages\": languages, })", "def get_languages(self):\n titles = Title.objects.filter(page=self)\n if not hasattr(self, \"languages_cache\"):\n languages = []\n for t in titles:\n if t.language not in languages:\n languages.append(t.language)\n self.languages_cache = languages\n return self.languages_cache", "def test_get_languages(self):\n languages = get_languages(self.edition_data[\"languages\"])\n self.assertEqual(languages, [\"English\"])", "def GetLanguages():\n return GetDataFromCsvFile('languages.csv')", "def get_languages():\n\n api = (api_name, 'languages')\n\n response = make_request(api=api, action='get', **{})\n status_code = response.status_code\n content = response.text\n\n msg = str(status_code) + ' : ' + content\n \n logger.debug(\"response from spanglish languages: {}\".format(response))\n logger.debug(\"response statuscode from spanglish languages: {}\".format(status_code))\n\n click.echo(\"response message: %s \" % msg)", "def languages(self):\n return LanguageCodes.english_names", "def languages(self) -> localedata.LocaleDataDict:\n return self._data['languages']", "def Languages(self, default=[\"en\"]):\n return self.data.get('metadata', {}).get('languages', default)", "def get_localization(self):\n return self._request_data(\"/lokarria/localization\")", "def getLocales(self):\n pass", "def get_lang_to_export(self, cr, uid, external_session, context=None):\n\n if context is None:\n return []\n else:\n return context.get('lang_to_export') or [context.get('lang')]", "def getAvailableLanguages(self):\n url = \"http://www.youtube.com/api/timedtext?v=%s&type=list\" % self.video_id\n xml = urllib2.urlopen(url)\n tree = ET.parse(xml)\n root = tree.getroot()\n languages = {}\n for child in root:\n languages[child.attrib[\"lang_code\"]] = child.attrib[\"lang_translated\"]\n return languages", "def GetLanguages(cls):\n return sorted(cls._LANGUAGE_PER_TAG.items())", "def fetch_languages(self):\r\n \r\n # tokenize, clean and filter document tokens \r\n toks = [re.sub(r'[^a-zA-Z]','', tok.text.lower().strip()) for tok in self.doc]\r\n toks = [tok for tok in toks if len(tok)>1 and tok in LANGUAGES]\r\n toks = sorted(set(toks))\r\n \r\n return toks", "def allLocales(self):\n return util.parseLocales(urlopen(self.all_url).read())", "def get_localizations(request):\n return JsonResponse(get_all_objects(Localization, LocalizationSerializer), safe=False)", "def grepo(request):\n return {\n \"GREPO_LANGUAGES\": Language.objects.all().values_list(\"name\", flat=True)\n }", "def browserLanguages(request):\n fallback = []\n accepted = request.http_accept_language\n if accepted:\n # Extract the languages names from the string\n accepted = accepted.split(',')\n accepted = map(lambda x: x.split(';')[0], accepted)\n # Add base language for each sub language. If the user specified\n # a sub language like \"en-us\", we will try to to provide it or\n # a least the base language \"en\" in this case.\n for lang in accepted:\n lang = lang.lower()\n fallback.append(lang)\n if '-' in lang:\n baselang = lang.split('-')[0]\n fallback.append(baselang)\n return fallback", "def get_languages(self) -> dict:\n request_url = self.__API_URL.format(user=self._user,\n project=self._project)\n response = self._http.request('GET', request_url,\n headers=config.HEADERS)\n\n # Handle limits and wrong responses\n if response.status > 205:\n raise StatusError(status=response.status)\n\n return json.loads(response.data)" ]
[ "0.7279651", "0.72215253", "0.7170446", "0.6814636", "0.6712208", "0.66994166", "0.66713786", "0.6651076", "0.65438974", "0.6543734", "0.6529538", "0.65234035", "0.6458752", "0.64573747", "0.6453006", "0.6448716", "0.64473933", "0.64339", "0.64140487", "0.6352627", "0.633507", "0.6269988", "0.622862", "0.62006044", "0.618521", "0.61824125", "0.6148293", "0.61400306", "0.6102768", "0.61025584" ]
0.7563172
0
This view returns return the usecases of medtag reports
def medtag_reports(request): json_resp = {} json_resp['usecase'] = [] reps = Report.objects.all() for r in reps: if not r.id_report.startswith('PUBMED_') and not str(r.name_id) in json_resp['usecase']: json_resp['usecase'].append(str(r.name_id)) return JsonResponse(json_resp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_medtag_reports(request):\n\n json_resp = {}\n json_resp['count'] = 0\n medtag_arts = Report.objects.all().exclude(institute = 'PUBMED')\n # for el in pubmed_arts:\n # if el.id_report.startswith('PUBMED'):\n json_resp['count'] = medtag_arts.count()\n return JsonResponse(json_resp,safe=False)", "def usecases(request):\n\n context = {\n\n }\n\n return render(request, 'hydraviewer/usecases.html', context)", "def pubmed_missing_auto(request):\n\n usecases = UseCase.objects.all()\n json_resp = {}\n json_resp['annotated'] = 0\n json_resp['tot'] = 0\n json_resp['usecase'] = []\n languages = ['English','english']\n for el in usecases:\n use = el.name\n json_resp[use] = {}\n batches = []\n batches.append('all')\n if use.lower() in ['colon','lung','uterine cervix']:\n report = Report.objects.filter(name=el,language__in=languages,institute = 'PUBMED')\n for el in report:\n if el.batch not in batches:\n batches.append(el.batch)\n count_rep = report.count()\n\n if count_rep > 0:\n json_resp['usecase'].append(str(use))\n json_resp['tot'] = json_resp['tot'] + count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n json_resp['annotated'] = json_resp['annotated'] + groundTruths\n\n for batch in batches:\n\n json_resp[use][batch] = {}\n if batch == 'all' or batch is None:\n report = Report.objects.filter(name=use,language__in=languages, institute='PUBMED')\n count_rep = report.count()\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n else:\n report = Report.objects.filter(name=use,language__in=languages, institute='PUBMED',batch = batch)\n count_rep = report.count()\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and batch = %s and r.language in %s;\",\n [str(use), 'Robot_user', 'labels', 'PUBMED',batch,tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n # print('risposta',json_resp)\n return JsonResponse(json_resp)", "def summary(request, tag=''):\n if tag:\n if tag.startswith(\"@\"):\n target = Target.objects.get(id=tag[1:])\n summaries_by_value, consfield_summaries = target.getSummaries()\n elif tag.startswith(\"~\"):\n # TODO: is there a sensible analogous summary for users,\n # or does it look completely different? \n pass\n else:\n # TODO: other log types\n pass\n\n # replace fieldnames in tag_groups with fieldsummaries in grouped_summaries\n grouped_summaries = [ ( gtuple[0], [ summaries_by_value.pop(t) for t in gtuple[1] if t in summaries_by_value])\n for gtuple in tag_groups ]\n # add misc consensus fields\n grouped_summaries.append(('other consensus fields',\n [ summaries_by_value.pop(k) for k in summaries_by_value.keys()\n if summaries_by_value[k].is_consensus and summaries_by_value[k].fieldname ]))\n # add misc consensus labels\n grouped_summaries.append(('consensus labels',\n [ summaries_by_value.pop(k) for k in summaries_by_value.keys()\n if summaries_by_value[k].is_consensus]))\n grouped_summaries[-1][-1].sort(lambda x,y:cmp(x.count,y.count))\n # add misc adhoc fields\n grouped_summaries.append(('adhoc fields',\n [ summaries_by_value.pop(k) for k in summaries_by_value.keys()\n if summaries_by_value[k].fieldname ]))\n grouped_summaries[-1][-1].sort(lambda x,y:cmp(x.count,y.count))\n # add misc adhoc labels\n grouped_summaries.append(('adhoc labels',\n [ summaries_by_value.pop(k) for k in summaries_by_value.keys()]))\n grouped_summaries[-1][-1].sort(lambda x,y:cmp(x.count,y.count))\n \n return render_to_response('summary.html',{'grouped_summaries':grouped_summaries, 'consfield_summaries':consfield_summaries, 'tag':tag})", "def get_stats_array_per_usecase(request):\n\n mode = request.GET.get('mode',None)\n usern = request.GET.get('member',request.session['username'])\n username = User.objects.get(username=usern, ns_id=mode)\n language = request.GET.get('language',request.session['language'])\n institute = request.GET.get('institute',request.session['institute'])\n batch = request.GET.get('batch',request.session['batch'])\n json_dict = {}\n js = {}\n js['original'] = {}\n js['percent'] = {}\n json_dict['medtag'] = get_array_per_usecase(username,mode,language,institute,batch)\n json_dict['pubmed'] = get_array_per_usecase_PUBMED(username,mode,language,institute,batch)\n\n\n # print(json_dict)\n return JsonResponse(json_dict)", "def pubmed_reports(request):\n\n json_resp = {}\n json_resp['usecase'] = []\n reps = Report.objects.all()\n for r in reps:\n\n if r.id_report.startswith('PUBMED_') and not str(r.name_id) in json_resp['usecase']:\n json_resp['usecase'].append(str(r.name_id))\n return JsonResponse(json_resp)", "def generate_report():\n\n # Fetch the top 3 most viewed articles and number of views and print them\n articles_query = get_articles_query()\n popular_articles = execute_query(articles_query)\n print_top_articles(popular_articles)\n\n # Fetch the most popular authors and print them\n authors_query = get_authors_query()\n popular_authors = execute_query(authors_query)\n print_authors(popular_authors)\n\n # Print the days when there were more than 1% errors in HTTP requests\n errors_query = get_errorData_query()\n error_data = execute_query(errors_query)\n print_error_data(error_data)", "def index(request, error='', message='', tag=None, tag_id=None, comment_id=None, event_id=None, metric_id=None):\n if tag:\n metrics_to_plot = Metric.objects.filter(tags=tag)\n else:\n metrics_to_plot = Metric.objects.filter(source='appleweekly')\n\n traffic_to_plot = []\n for metric in metrics_to_plot:\n metric_traffic = list(Traffic.objects.filter(metric=metric))\n if metric_traffic:\n traffic_to_plot.append(metric_traffic)\n\n chart = False\n\n for m in metrics_to_plot:\n if m.source == 'appleweekly':\n try:\n #Import Apple weekly summary metrics, but just for one-time use - don't save in db.\n append = traffic_to_plot.append #Avoid re-calling the .append function in the middle of all those loops.\n for w in AppleWeeklySummary.merged.all():\n for field in AppleWeeklySummary._meta._fields(): #This grabs a list of field objects from the model specified as part of the stats app\n if field.verbose_name == m.appleweeklyfield: #Verbose name is specified as (\"verbose_name\") in stats/models/apple_summary.py\n append(Traffic(date=w.week_beginning, count=w.__dict__[field.name], metric=m))\n except:\n debug.onscreen('WARNING: Can\\'t find any Apple summary data. Have you imported it?')\n elif m.source == 'itu-collection-chart':\n try:\n #Add the first chartrecord of the day to traffic_to_plot\n dates = []\n chartrecords = ItuCollectionChartScan.objects.filter(itucollection=m.itucollection).order_by('date')\n for chartrecord in chartrecords:\n if chartrecord.date.date() not in dates:\n dates.append(chartrecord.date.date())\n for date in dates:\n chartrecords_day = []\n for chartrecord in chartrecords:\n if chartrecord.date.date() == date:\n chartrecords_day.append(chartrecord)\n traffic_to_plot.append(\n Traffic(date=date, count=(-1 * chartrecords_day[0].position), metric=m))\n chart = True\n except:\n error += 'Failed to process traffic for an itu-collection-chart.'\n elif m.source == 'itu-item-chart':\n try:\n #Add the first chartrecord of the day to traffic_to_plot\n dates = []\n chartrecords = ItuItemChartScan.objects.filter(ituitem=m.ituitem).order_by('date')\n for chartrecord in chartrecords:\n if chartrecord.date.date() not in dates:\n dates.append(chartrecord.date.date())\n for date in dates:\n chartrecords_day = []\n for chartrecord in chartrecords:\n if chartrecord.date.date() == date:\n chartrecords_day.append(chartrecord)\n traffic_to_plot.append(\n Traffic(date=date, count=(-1 * chartrecords_day[0].position), metric=m))\n chart = True\n except:\n error += 'Failed to process traffic for an itu-item-chart.'\n elif m.source =='itu-#tc':\n try:\n dates_processed = []\n for tc_scan in ItuScanLog.objects.filter(mode=2).order_by('time'):\n date = tc_scan.time.date()\n if date not in dates_processed:\n dates_processed.append(date)\n tc_count = ItuCollectionChartScan.objects.filter(scanlog=tc_scan,\n itucollection__institution=m.ituinstitution).count()\n traffic_to_plot.append(Traffic(date=date, count=tc_count, metric=m))\n except:\n error += 'Failed to process traffic for the # of collections in the top 200.'\n elif m.source =='itu-#ti':\n try:\n dates_processed = []\n for ti_scan in ItuScanLog.objects.filter(mode=3).order_by('time'):\n date = ti_scan.time.date()\n if date not in dates_processed:\n dates_processed.append(date)\n ti_count = ItuItemChartScan.objects.filter(scanlog=ti_scan,\n ituitem__institution=m.ituinstitution).count()\n traffic_to_plot.append(Traffic(date=date, count=ti_count, metric=m))\n except:\n error += 'Failed to process traffic for the # of collections in the top 200.'\n\n #NOTE: We do not need to handle the temporal range of comments and events since this is done automatically by Timeplot.\n\n from_itunes_u = Category.objects.get(description='From iTunes U')\n #Create comments in the feedback database if they don't already exist.\n for itu_comment in ItuComment.objects.filter(ituinstitution__name = 'Oxford University'):\n comment = Comment(\n date=itu_comment.date,\n time=datetime.time(0,0,0),\n source=itu_comment.itucollectionhistorical.name + ' - comment by ' + itu_comment.source,\n detail=itu_comment.detail,\n user_email='[email protected]',\n moderated=True,\n category=from_itunes_u,\n itu_source=itu_comment\n )\n if Comment.objects.filter(detail=itu_comment.detail).count() > 0:\n pass\n else:\n comment.save()\n if tag:\n comments_to_plot = Comment.objects.filter(moderated=True,tags=tag)\n events_to_plot = Event.objects.filter(moderated=True,tags=tag)\n else:\n comments_to_plot = Comment.objects.filter(moderated=True)\n events_to_plot = Event.objects.filter(moderated=True)\n\n categories_to_plot = []\n for category in comments_to_plot.values_list('category').distinct():\n categories_to_plot.append(Category.objects.get(id=category[0]))\n for category in events_to_plot.values_list('category').distinct():\n if Category.objects.get(id=category[0]) not in categories_to_plot:\n categories_to_plot.append(Category.objects.get(id=category[0]))\n\n return render_to_response('feedback/index.html', {\n 'metrics_to_plot': metrics_to_plot,\n 'metric_textfiles': create_metric_textfiles(traffic_to_plot,metrics_to_plot),\n 'categories_to_plot': categories_to_plot,\n 'comments_to_plot': comments_to_plot,\n 'events': events_to_plot,\n 'chart': chart,\n 'error': error,\n 'message': message,\n 'tag': tag, 'tag_id': tag_id, 'tags': Tag.objects.all(), 'comment_id': comment_id, 'event_id': event_id, 'metric_id': metric_id,\n }, context_instance=RequestContext(request))", "def odag():\r\n document.add_heading('OnDemand Application Generation (ODAG)', 1)\r\n\r\n odag_metrics = ['enabled',\r\n 'maxConcurrentRequests',\r\n 'logLevel']\r\n odag = get_qlik_sense.get_odag()\r\n num_of_metric = len(odag_metrics)\r\n table = document.add_table(rows=num_of_metric+1, cols=2)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'details'\r\n\r\n for metric in range(len(odag_metrics)):\r\n row = table.rows[metric+1]\r\n row.cells[0].text = str(odag_metrics[metric])\r\n row.cells[1].text = str(odag[0][metric])\r\n document.add_page_break()", "def analysis_view(request):\n return render(request, \"tracker/analysis.html\")", "def summary_list(request):\r\n qs = Summary.objects.all()\r\n context = {'summary_records': qs}\r\n return render(request, 'weather/summary_list.html', context)", "def views(request):\n tag = Tag.objects.filter(name=\"Global\")\n gauges = Gauge.objects.filter(tags=tag)\n return render_to_response('dashboard/views.js',{'gauges': gauges} )", "def training_report_view(request, application_slug):\n return training_report(request, application_slug, attach=False)", "def get_uses_missing_exa(request):\n\n use_to_ret = {}\n use_to_ret['labels_present'] = []\n use_to_ret['concepts_present'] = []\n use_to_ret['labels_missing'] = []\n use_to_ret['concepts_missing'] = []\n uses = ['colon','uterine cervix','lung']\n for el in uses:\n usecase = UseCase.objects.get(name=el)\n presence = True\n if Report.objects.filter(name = usecase).count() > 0:\n if not AnnotationLabel.objects.filter(name = usecase, annotation_mode = 'Manual and Automatic').exists():\n use_to_ret['labels_missing'].append(el)\n else:\n use_to_ret['labels_present'].append(el)\n\n cursor = connection.cursor()\n cursor.execute(\"SELECT c.annotation_mode FROM concept AS c INNER JOIN concept_has_uc AS hc ON c.concept_url = hc.concept_url WHERE hc.name = %s\",[str(el)])\n ans = cursor.fetchall()\n for concept in ans:\n if concept[0] != 'Manual and Automatic':\n presence = False\n break\n if len(ans) > 0:\n if presence == False:\n use_to_ret['concepts_missing'].append(el)\n else:\n use_to_ret['concepts_present'].append(el)\n else:\n use_to_ret['concepts_missing'].append(el)\n\n return JsonResponse(use_to_ret)", "def performance_analysis(request):\n context = {}\n\n return render(request, 'classroom_main/performance_analysis.html', context)", "def report_missing_auto(request):\n\n usecases = UseCase.objects.all()\n json_resp = {}\n languages = ['english', 'English']\n for el in usecases:\n use = el.name\n batches = []\n batches.append('all')\n if use.lower() in ['colon','lung','uterine cervix']:\n report = Report.objects.filter(name=el,language__in=languages).exclude(institute = 'PUBMED')\n count_rep = report.count()\n for rp in report:\n if rp.batch not in batches:\n batches.append(rp.batch)\n # print(el)\n # print(count_rep)\n\n if count_rep > 0:\n json_resp[use] = {}\n for batch in batches:\n batch = str(batch)\n json_resp[use][batch] = {}\n if batch == 'all':\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n else:\n report_count = Report.objects.filter(name=el,batch = batch,language__in=languages).exclude(institute = 'PUBMED').count()\n json_resp[use][batch]['tot'] = report_count\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and batch = %s and r.language in %s;\",\n [str(use), 'Robot_user', 'labels', 'PUBMED',batch, tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n # print(json_resp)\n return JsonResponse(json_resp)", "def meta(request):\r\n\r\n\tdatatype = request.GET.get('datatype', 'RNA')\r\n\ttissue = request.GET.get('tissue', 'brain')\r\n\tcategory = request.GET.get('category', 'region')\r\n\tgroup = request.GET.get('group', 'PFC')\r\n\tcomparison = request.GET.get('comparison', 'AD-vs-Control')\r\n\tfeature_symbols_in_interest = request.GET.get('features', '').split(' ')\r\n\tcollection_name = \"%s_%s_%s-%s_%s\" % (datatype,\r\n\t\t\t\t\t\t\t\t\t\t\ttissue,\r\n\t\t\t\t\t\t\t\t\t\t\tcategory,\r\n\t\t\t\t\t\t\t\t\t\t\tgroup,\r\n\t\t\t\t\t\t\t\t\t\t\tcomparison)\r\n\t\"\"\"\r\n\t\tWe should split POST[\"featureInput\"] here\r\n\t\"\"\"\r\n\t# import pdb; pdb.set_trace();\r\n\t# feature_symbols_in_interest = split_feature_input_to_list(request.POST[\"featureInput\"])\r\n\r\n\t# way_to_choose_probe = request.GET.get('way_to_choose_probe', 'fold change')\r\n\r\n\trecords = list(meta_stat_client.get_all_records(collection_name))\r\n\trecords_all_teststat = list(test_stat_client.get_all_records(collection_name))\r\n\trecord_sample_count = test_stat_client.get_all_sample_count(collection_name)\r\n\trecord_disease_state = test_stat_client.get_all_disease_state(collection_name)\r\n\trecord_all_datasets = test_stat_client.get_all_datasets(collection_name)\r\n\r\n\t# Turn into dataframe\r\n\trecords = pd.DataFrame(records)\r\n\trecords_all_teststat = pd.DataFrame(records_all_teststat)\r\n\r\n\t# Select features in interest\r\n\tfilt_ind = records['symb'].isin(feature_symbols_in_interest)\r\n\trecords_queried = records[filt_ind]\r\n\r\n\trecords_queried['entrez_gene_id'] = records_queried.apply(from_symbol_to_entrez_gene_id, axis=1)\r\n\t\t\t\t\r\n\r\n\t# Select top 10 by meta-p-value\r\n\trecords_top_10 = records.sort('pval', ascending=True).iloc[0:9, ]\r\n\t# records_top_10 = records.sort('pval', ascending=True)\r\n\r\n\trecords_top_10['entrez_gene_id'] = records_top_10.apply(from_symbol_to_entrez_gene_id, axis=1)\r\n\t\r\n\t# Get meta info for this collection\r\n\tmeta_df = pd.DataFrame(record_sample_count, index=['sample_count'], columns=record_all_datasets)\r\n\tmeta_df = pd.DataFrame.transpose(meta_df)\r\n\tmeta_df['state_1_count'] = pd.Series(record_disease_state).apply(sum)\r\n\tmeta_df['state_0_count'] = meta_df['sample_count'] - meta_df['state_1_count']\r\n\tsymbol_count_list = []\r\n\t\r\n\tfor dataset in record_all_datasets:\r\n\t\tsymb_count = records_all_teststat[records_all_teststat['dataset_accession'] == dataset].shape[0]\r\n\t\tsymbol_count_list.append(symb_count)\r\n\r\n\tmeta_df['feature_count'] = symbol_count_list\r\n\tmeta_df['dataset_accession'] = meta_df.index\r\n\t# import pdb;pdb.set_trace();\r\n\r\n\t# Add string ids\r\n\trecords_queried['string_id'] = from_single_symbol_to_string_id(records_queried['symb'])\r\n\t# import pdb;pdb.set_trace();\r\n\t# records_top_10['string_id'] = from_single_symbol_to_string_id(records_top_10['symb'])\r\n\t# import pdb;pdb.set_trace();\r\n\t\r\n\tunion_feature_count = records.shape[0]\r\n\tcheck_all_presence = lambda x : '?' not in x['eff']\r\n\t\r\n\tintersect_feature_count = sum(records.apply(check_all_presence, axis=1))\r\n\t\r\n\r\n\t# Output queried records to dictionary\r\n\tmeta_stat_queried = records_queried.to_dict(outtype='records')\r\n\tmeta_stat_top_10 = records_top_10.to_dict(outtype='records')\r\n\tmeta_info = meta_df.to_dict(outtype='records')\r\n\t# import pdb;pdb.set_trace();\r\n\r\n\treturn render(request, 'meta_stat.html',\r\n\t\t\t\t{\r\n\t\t\t\t\t'meta_stat_queried' : meta_stat_queried,\r\n\t\t\t\t\t'meta_stat_top_10' : meta_stat_top_10,\r\n\t\t\t\t\t'collection_name' : collection_name,\r\n\t\t\t\t\t'feature_string' : '+'.join(feature_symbols_in_interest),\r\n\t\t\t\t\t'meta_info' : meta_info,\r\n\t\t\t\t\t'union_feature_count' : union_feature_count,\r\n\t\t\t\t\t'intersect_feature_count' : intersect_feature_count\r\n\t\t\t\t})", "def overview(request):\n LOGGER.info('Rendering WMT16 HIT overview for user \"{0}\".'.format(\n request.user.username or \"Anonymous\"))\n \n # Re-initialise random number generator.\n seed(None)\n \n # Collect available language pairs for the current user.\n language_codes = set([x[0] for x in LANGUAGE_PAIR_CHOICES])\n language_pairs = request.user.groups.filter(name__in=language_codes)\n \n # Collect available annotation projects for the current user.\n annotation_projects = request.user.project_set.all()\n \n hit_data = []\n total = [0, 0, 0]\n\n for language_pair in language_pairs:\n for annotation_project in annotation_projects:\n hit = _compute_next_task_for_user(request.user, annotation_project, language_pair)\n user_status = HIT.compute_status_for_user(request.user, annotation_project, language_pair)\n for i in range(3):\n total[i] = total[i] + user_status[i]\n \n if hit:\n # Convert status seconds back into datetime.time instances.\n for i in range(2):\n user_status[i+1] = seconds_to_timedelta(int(user_status[i+1]))\n \n hit_data.append(\n (hit.get_language_pair_display(), hit.get_absolute_url(),\n hit.hit_id, user_status, annotation_project)\n )\n \n # Convert total seconds back into datetime.timedelta instances.\n total[1] = seconds_to_timedelta(int(total[2]) / float(int(total[0]) or 1))\n \n # Remove microseconds to get a nicer timedelta rendering in templates.\n total[1] = total[1] - timedelta(microseconds=total[1].microseconds)\n \n total[2] = seconds_to_timedelta(int(total[2]))\n \n groups = _identify_groups_for_user(request.user)\n group = None\n if len(groups) > 1:\n LOGGER.debug(u'User \"{0}\" assigned to multiple annotation groups: {1}'.format(\n request.user.username or u'Anonymous',\n u', '.join([x.name for x in groups]))\n )\n group = groups[0]\n \n if group is not None:\n group_name = group.name\n group_status = HIT.compute_status_for_group(group)\n for i in range(2):\n group_status[i+1] = seconds_to_timedelta(int(group_status[i+1]))\n \n else:\n group_status = None\n group_name = None\n \n LOGGER.debug(u'\\n\\nHIT data for user \"{0}\":\\n\\n{1}\\n'.format(\n request.user.username or \"Anonymous\",\n u'\\n'.join([u'{0}\\t{1}\\t{2}\\t{3}'.format(*x) for x in hit_data])))\n\n # Compute admin URL for super users.\n admin_url = None\n if request.user.is_superuser:\n admin_url = reverse('admin:index')\n \n dictionary = {\n 'active_page': \"OVERVIEW\",\n 'hit_data': hit_data,\n 'total': total,\n 'group_name': group_name,\n 'group_status': group_status,\n 'admin_url': admin_url,\n 'title': 'WMT16 Dashboard',\n 'annotation_groups': [x.name for x in groups],\n }\n dictionary.update(BASE_CONTEXT)\n \n LOGGER.info(dictionary.values())\n \n return render(request, 'wmt16/overview.html', dictionary)", "def summaryView(request):\n\n alert_errors = []\n alert_infos = []\n alert_filters = []\n\n runs = get_runs_from_request_filters(\n request, alert_errors, alert_infos, alert_filters\n )\n\n summary = SummaryReport(runs)\n\n context = {\n \"refs\": summary.reference_runs(),\n \"runs\": summary.runs_checked_per_type(),\n \"tk_maps\": summary.tracker_maps_per_type(),\n \"certified_runs\": summary.certified_runs_per_type(),\n \"sums\": summary.sum_of_quantities_per_type(),\n \"alert_errors\": alert_errors,\n \"alert_infos\": alert_infos,\n \"alert_filters\": alert_filters,\n }\n\n return render(request, \"certhelper/summary.html\", context)", "def test_get_derived_metric_tags(self):\n pass", "def test_get_vulnerability_occurrences_summary(self):\n pass", "def get_designs(self):", "def get_test_report(request, **kwargs): \n\t\n #Fetching the details of the selected event\n test_list = sidecar.events.test_report(project_id=kwargs['test_id'])\n report_list = []\n\t\n #Creating the list for the report\n for tests in test_list._logs:\n\tjson_test = json.loads(tests['data'])\n\ttests['success'] = json_test['success'] \n\ttests['time'] = json_test['time']\n\ttests['test_cases'] = json_test['test_cases']\n\treport_list.append(tests)\n\n #Making the context and sending to template\n context = {\n \"page_title\": _(\"Test Results\"),\n \"tests\": report_list\n }\n return render(request, 'rally_dashboard/events/test_detail.html', context)", "def get_data(request):\n\n json_resp = {}\n # reports = Report.objects.filter(name = UseCase.objects.get(name=request.session['usecase']),institute = request.session['institute'],language = request.session['language'])\n\n json_resp['reports'] = []\n institute = request.GET.get('institute',request.session['institute'])\n usecase = request.GET.get('usecase',request.session['usecase'])\n print(usecase)\n language = request.GET.get('language',request.session['language'])\n ns_human = NameSpace.objects.get(ns_id='Human')\n ns_robot = NameSpace.objects.get(ns_id='Robot')\n user_robot = User.objects.get(username='Robot_user', ns_id=ns_robot)\n # usec = UseCase.objects.get(name = usecase)\n # reports = Report.objects.filter(name = usec,institute = institute, language = language).values('id_report')\n # gt_report = GroundTruthLogFile.objects.filter(language = language).exclude(username = user_robot,id_report__in=reports).order_by('id_report').distinct('id_report')\n cursor = connection.cursor()\n cursor.execute(\"SELECT r.id_report,r.language,r.report_json,r.name,r.institute,r.batch,COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND r.institute = %s AND r.language = %s AND g.username != %s GROUP BY (r.id_report,r.language,r.report_json,r.name,r.institute,r.batch)\",[usecase,institute,language,'Robot_user'])\n gt_report_ids = []\n indice = 0\n st = time.time()\n for el in cursor.fetchall():\n\n # report = Report.objects.get(language = language, id_report = el.id_report_id)\n gt_report_ids.append(el[0])\n # print(str(indice))\n indice +=1\n # report = Report.objects.get(id_report=el.id_report, language=el.language)\n # language = report.language\n\n # user_robot = User.objects.get(username = 'Robot_user',ns_id = ns_robot)\n gt_human = 1\n gt_robot = 0\n\n rep = json.loads(el[2])\n new_rep = {}\n for key in rep.keys():\n nkey = key+ '_0'\n new_rep[nkey] = rep[key]\n\n total = el[6]\n\n new_rep['usecase'] = usecase\n new_rep['id_report_not_hashed'] = rep.get('report_id',el[0])\n new_rep['id_report'] = el[0]\n new_rep['institute'] = institute\n new_rep['language'] = language\n new_rep['batch'] = el[5]\n\n json_resp['reports'].append({'total':total, 'report':new_rep,'id_report':el[0], 'language':language})\n\n usec = UseCase.objects.get(name = usecase)\n reports = Report.objects.filter(institute = institute,language = language,name = usec).exclude(id_report__in=gt_report_ids)\n # print(reports.count())\n indice = 0\n st = time.time()\n for el in reports:\n report = el\n # print(str(indice))\n indice += 1\n # report = Report.objects.get(id_report=el.id_report, language=el.language)\n # language = report.language\n\n # user_robot = User.objects.get(username = 'Robot_user',ns_id = ns_robot)\n gt_human = 0\n gt_robot = 0\n\n rep = report.report_json\n new_rep = {}\n for key in rep.keys():\n nkey = key + '_0'\n new_rep[nkey] = rep[key]\n\n total = gt_human + gt_robot\n\n new_rep['usecase'] = report.name_id\n new_rep['id_report_not_hashed'] = rep.get('report_id', report.id_report)\n new_rep['id_report'] = report.id_report\n new_rep['institute'] = report.institute\n new_rep['language'] = report.language\n new_rep['batch'] = report.batch\n\n json_resp['reports'].append(\n {'total': total, 'report': new_rep, 'id_report': report.id_report, 'language': report.language})\n # print('elaboro1',str(end1-st1))\n tot = time.time()\n print('totale',str(tot-st))\n\n return JsonResponse(json_resp,safe=False)", "def drought_ag_risk_map(request):\n \n view_center = [-105.2, 39.0]\n view_options = MVView(\n projection='EPSG:4326',\n center=view_center,\n zoom=7.0,\n maxZoom=12,\n minZoom=5\n )\n\n # TIGER state/county mapserver\n tiger_boundaries = MVLayer(\n source='TileArcGISRest',\n options={'url': 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/State_County/MapServer'},\n legend_title='States & Counties',\n layer_options={'visible':True,'opacity':0.2},\n legend_extent=[-112, 36.3, -98.5, 41.66]) \n \n ##### WMS Layers - Ryan\n usdm_legend = MVLegendImageClass(value='Drought Category',\n image_url='http://ndmc-001.unl.edu:8080/cgi-bin/mapserv.exe?map=/ms4w/apps/usdm/service/usdm_current_wms.map&version=1.3.0&service=WMS&request=GetLegendGraphic&sld_version=1.1.0&layer=usdm_current&format=image/png&STYLE=default')\n usdm_current = MVLayer(\n source='ImageWMS',\n options={'url': 'http://ndmc-001.unl.edu:8080/cgi-bin/mapserv.exe?',\n 'params': {'LAYERS':'usdm_current','FORMAT':'image/png','VERSION':'1.1.1','STYLES':'default','MAP':'/ms4w/apps/usdm/service/usdm_current_wms.map'}},\n layer_options={'visible':False,'opacity':0.25},\n legend_title='USDM',\n legend_classes=[usdm_legend],\n legend_extent=[-126, 24.5, -66.2, 49])\n \n # USGS Rest server for HUC watersheds \n watersheds = MVLayer(\n source='TileArcGISRest',\n options={'url': 'https://hydro.nationalmap.gov/arcgis/rest/services/wbd/MapServer'},\n legend_title='HUC Watersheds',\n layer_options={'visible':False,'opacity':0.4},\n legend_extent=[-112, 36.3, -98.5, 41.66])\n \n # Sector drought vulnerability county risk score maps -> from 2018 CO Drought Plan update\n vuln_legend = MVLegendImageClass(value='Risk Score',\n image_url='/static/tethys_gizmos/data/ag_vuln_legend.jpg')\n ag_vuln_kml = MVLayer(\n source='KML',\n options={'url': '/static/tethys_gizmos/data/CO_Ag_vuln_score_2018.kml'},\n layer_options={'visible':True,'opacity':0.75},\n legend_title='Ag Risk Score',\n feature_selection=True,\n legend_classes=[vuln_legend],\n legend_extent=[-109.5, 36.5, -101.5, 41.6])\n \n # Define GeoJSON layer\n # Data from CoCoRaHS Condition Monitoring: https://www.cocorahs.org/maps/conditionmonitoring/\n with open(como_cocorahs) as f:\n data = json.load(f)\n \n # the section below is grouping data by 'scalebar' drought condition\n # this is a work around for displaying each drought report classification with a unique colored icon\n data_sd = {}; data_md ={}; data_ml={}\n data_sd[u'type'] = data['type']; data_md[u'type'] = data['type']; data_ml[u'type'] = data['type']\n data_sd[u'features'] = [];data_md[u'features'] = [];data_ml[u'features'] = []\n for element in data['features']:\n if 'Severely Dry' in element['properties']['scalebar']:\n rdate = datetime.datetime.strptime(element['properties']['reportdate'][:10],\"%Y-%m-%d\")\n if rdate >= week20:\n data_sd[u'features'].append(element)\n if 'Moderately Dry' in element['properties']['scalebar']:\n rdate = datetime.datetime.strptime(element['properties']['reportdate'][:10],\"%Y-%m-%d\")\n if rdate >= week20:\n data_md[u'features'].append(element)\n if 'Mildly Dry' in element['properties']['scalebar']:\n rdate = datetime.datetime.strptime(element['properties']['reportdate'][:10],\"%Y-%m-%d\")\n if rdate >= week20:\n data_ml[u'features'].append(element)\n \n cocojson_sevdry = MVLayer(\n source='GeoJSON',\n options=data_sd,\n legend_title='CoCoRaHS Condition Monitor',\n legend_extent=[-112, 36.3, -98.5, 41.66],\n feature_selection=False,\n legend_classes=[MVLegendClass('point', 'Severely Dry', fill='#67000d')],\n layer_options={'style': {'image': {'circle': {'radius': 6,'fill': {'color': '#67000d'},'stroke': {'color': '#ffffff', 'width': 1},}}}})\n\n cocojson_moddry = MVLayer(\n source='GeoJSON',\n options=data_md,\n legend_title='',\n legend_extent=[-112, 36.3, -98.5, 41.66],\n feature_selection=False,\n legend_classes=[MVLegendClass('point', 'Moderately Dry', fill='#a8190d')],\n layer_options={'style': {'image': {'circle': {'radius': 6,'fill': {'color': '#a8190d'},'stroke': {'color': '#ffffff', 'width': 1},}}}})\n\n cocojson_mildry = MVLayer(\n source='GeoJSON',\n options=data_ml,\n legend_title='',\n legend_extent=[-112, 36.3, -98.5, 41.66],\n feature_selection=False,\n legend_classes=[MVLegendClass('point', 'Mildly Dry', fill='#f17d44')],\n layer_options={'style': {'image': {'circle': {'radius': 6,'fill': {'color': '#f17d44'},'stroke': {'color': '#ffffff', 'width': 1},}}}})\n\n \n # Define map view options\n drought_ag_risk_map_view_options = MapView(\n height='100%',\n width='100%',\n controls=['ZoomSlider', 'Rotate', 'ScaleLine', 'FullScreen',\n {'MousePosition': {'projection': 'EPSG:4326'}},\n {'ZoomToExtent': {'projection': 'EPSG:4326', 'extent': [-130, 22, -65, 54]}}],\n layers=[tiger_boundaries,cocojson_sevdry,cocojson_moddry,cocojson_mildry,ag_vuln_kml,usdm_current,watersheds],\n view=view_options,\n basemap='OpenStreetMap',\n legend=True\n )\n\n context = {\n 'drought_ag_risk_map_view_options':drought_ag_risk_map_view_options,\n }\n\n return render(request, 'co_drought/drought_ag_risk.html', context)", "def getMeasures():", "def all_bugs_chart(request):\n labels = []\n data = []\n\n queryset = Bug.objects.values('title', 'id').order_by('-created').exclude(status='Resolved').annotate(\n bug_votes=Count('votes'))[:5]\n for entry in queryset:\n labels.append(entry['title'])\n data.append(entry['bug_votes'])\n\n return JsonResponse(data={\n 'labels': labels,\n 'data': data,\n })", "def data_set_characterization(request):\n admin_approved = Phenotype.objects.filter(phenotypestatus__status='A')\n user_submitted = Phenotype.objects.filter(phenotypestatus__status='U')\n sample_files = SampleFile.objects.all()\n\n admin_approved_count = len(admin_approved)\n user_submitted_count = len(user_submitted)\n\n traits_by_chrom = SNP.objects.filter(Q(snpstatus__status='A') | Q(snpstatus__status='U')).values('chromosome_num').annotate(num_traits=Count('chromosome_num'))\n\n context = {'user_submitted_count':user_submitted_count,\n 'admin_approved_count':admin_approved_count,\n 'sample_files':sample_files,\n 'traits_by_chrom':traits_by_chrom}\n\n return render(request, 'SNP_Feature_View/data_set_characterization.html', context)", "def get_queryset(self, request):\n qs = super().get_queryset(request)\n qs = qs.prefetch_related('writerinwork_set')\n qs = qs.prefetch_related('writers')\n qs = qs.prefetch_related('library_release__library')\n qs = qs.annotate(models.Count('cwr_exports', distinct=True))\n qs = qs.annotate(models.Count('recordings', distinct=True))\n return qs", "def measurements():\n measurements_for_displaying = db.session.query(Measurement).all()\n return render_template('measurement/measurements.html', measurements=measurements_for_displaying)" ]
[ "0.665116", "0.59123224", "0.55685633", "0.55224574", "0.5493389", "0.5459506", "0.54581726", "0.5311828", "0.530699", "0.52737117", "0.5272512", "0.527097", "0.52511895", "0.5246451", "0.5195243", "0.5169995", "0.51681095", "0.5164607", "0.51173073", "0.50936943", "0.5049706", "0.50459784", "0.501436", "0.49599352", "0.49533662", "0.49336955", "0.4921607", "0.49154752", "0.4891702", "0.48755807" ]
0.7262409
0
This view returns return the usecases of pubmed reports
def pubmed_reports(request): json_resp = {} json_resp['usecase'] = [] reps = Report.objects.all() for r in reps: if r.id_report.startswith('PUBMED_') and not str(r.name_id) in json_resp['usecase']: json_resp['usecase'].append(str(r.name_id)) return JsonResponse(json_resp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pubmed_missing_auto(request):\n\n usecases = UseCase.objects.all()\n json_resp = {}\n json_resp['annotated'] = 0\n json_resp['tot'] = 0\n json_resp['usecase'] = []\n languages = ['English','english']\n for el in usecases:\n use = el.name\n json_resp[use] = {}\n batches = []\n batches.append('all')\n if use.lower() in ['colon','lung','uterine cervix']:\n report = Report.objects.filter(name=el,language__in=languages,institute = 'PUBMED')\n for el in report:\n if el.batch not in batches:\n batches.append(el.batch)\n count_rep = report.count()\n\n if count_rep > 0:\n json_resp['usecase'].append(str(use))\n json_resp['tot'] = json_resp['tot'] + count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n json_resp['annotated'] = json_resp['annotated'] + groundTruths\n\n for batch in batches:\n\n json_resp[use][batch] = {}\n if batch == 'all' or batch is None:\n report = Report.objects.filter(name=use,language__in=languages, institute='PUBMED')\n count_rep = report.count()\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n else:\n report = Report.objects.filter(name=use,language__in=languages, institute='PUBMED',batch = batch)\n count_rep = report.count()\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s AND institute = %s and batch = %s and r.language in %s;\",\n [str(use), 'Robot_user', 'labels', 'PUBMED',batch,tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n # print('risposta',json_resp)\n return JsonResponse(json_resp)", "def usecases(request):\n\n context = {\n\n }\n\n return render(request, 'hydraviewer/usecases.html', context)", "def summaryView(request):\n\n alert_errors = []\n alert_infos = []\n alert_filters = []\n\n runs = get_runs_from_request_filters(\n request, alert_errors, alert_infos, alert_filters\n )\n\n summary = SummaryReport(runs)\n\n context = {\n \"refs\": summary.reference_runs(),\n \"runs\": summary.runs_checked_per_type(),\n \"tk_maps\": summary.tracker_maps_per_type(),\n \"certified_runs\": summary.certified_runs_per_type(),\n \"sums\": summary.sum_of_quantities_per_type(),\n \"alert_errors\": alert_errors,\n \"alert_infos\": alert_infos,\n \"alert_filters\": alert_filters,\n }\n\n return render(request, \"certhelper/summary.html\", context)", "def check_PUBMED_reports(request):\n\n json_resp = {}\n json_resp['count'] = 0\n pubmed_arts = Report.objects.filter(institute = 'PUBMED')\n for el in pubmed_arts:\n if el.id_report.startswith('PUBMED'):\n json_resp['count'] += 1\n return JsonResponse(json_resp,safe=False)", "def medtag_reports(request):\n\n json_resp = {}\n json_resp['usecase'] = []\n reps = Report.objects.all()\n for r in reps:\n if not r.id_report.startswith('PUBMED_') and not str(r.name_id) in json_resp['usecase']:\n json_resp['usecase'].append(str(r.name_id))\n return JsonResponse(json_resp)", "def check_medtag_reports(request):\n\n json_resp = {}\n json_resp['count'] = 0\n medtag_arts = Report.objects.all().exclude(institute = 'PUBMED')\n # for el in pubmed_arts:\n # if el.id_report.startswith('PUBMED'):\n json_resp['count'] = medtag_arts.count()\n return JsonResponse(json_resp,safe=False)", "def summary_list(request):\r\n qs = Summary.objects.all()\r\n context = {'summary_records': qs}\r\n return render(request, 'weather/summary_list.html', context)", "def training_report_view(request, application_slug):\n return training_report(request, application_slug, attach=False)", "def survivor_reports(request):\n\n if request.method == 'GET':\n data = {}\n total_survivors = infected = non_infected = water = food = medication = ammunition = pointslost = 0\n for i in Survivor.objects.all():\n total_survivors += 1\n if i.infected is False:\n non_infected += 1\n water += i.water\n food += i.food\n medication += i.medication\n ammunition += i.ammunition\n if i.infected is True:\n infected += 1\n pointslost += (4 * i.water)\n pointslost += (3 * i.food)\n pointslost += (2 * i.medication)\n pointslost += (1 * i.ammunition)\n\n if total_survivors != 0:\n data['Percentage of infected survivors'] = str(round((infected/total_survivors), 2) * 100) + '%'\n data['Percentage of non-infected survivors'] = str(round((non_infected/total_survivors), 2) * 100) + '%'\n data['Average amount of water by survivor'] = round(water/non_infected,1)\n data['Average amount of food by survivor'] = round(food/non_infected,1)\n data['Average amount of medication by survivor'] = round(medication/non_infected,1)\n data['Average amount of ammunition by survivor'] = round(ammunition/non_infected,1)\n data['Points lost because of infected survivor'] = pointslost\n else:\n data['Percentage of infected survivors'] = '0.0%'\n data['Percentage of non-infected survivors'] = '0.0%'\n data['Average amount of water by survivor'] = 0\n data['Average amount of food by survivor'] = 0\n data['Average amount of medication by survivor'] = 0\n data['Average amount of ammunition by survivor'] = 0\n data['Points lost because of infected survivor'] = 0\n return Response(data, status=status.HTTP_200_OK)", "def generate_report():\n\n # Fetch the top 3 most viewed articles and number of views and print them\n articles_query = get_articles_query()\n popular_articles = execute_query(articles_query)\n print_top_articles(popular_articles)\n\n # Fetch the most popular authors and print them\n authors_query = get_authors_query()\n popular_authors = execute_query(authors_query)\n print_authors(popular_authors)\n\n # Print the days when there were more than 1% errors in HTTP requests\n errors_query = get_errorData_query()\n error_data = execute_query(errors_query)\n print_error_data(error_data)", "def report_missing_auto(request):\n\n usecases = UseCase.objects.all()\n json_resp = {}\n languages = ['english', 'English']\n for el in usecases:\n use = el.name\n batches = []\n batches.append('all')\n if use.lower() in ['colon','lung','uterine cervix']:\n report = Report.objects.filter(name=el,language__in=languages).exclude(institute = 'PUBMED')\n count_rep = report.count()\n for rp in report:\n if rp.batch not in batches:\n batches.append(rp.batch)\n # print(el)\n # print(count_rep)\n\n if count_rep > 0:\n json_resp[use] = {}\n for batch in batches:\n batch = str(batch)\n json_resp[use][batch] = {}\n if batch == 'all':\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n else:\n report_count = Report.objects.filter(name=el,batch = batch,language__in=languages).exclude(institute = 'PUBMED').count()\n json_resp[use][batch]['tot'] = report_count\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and batch = %s and r.language in %s;\",\n [str(use), 'Robot_user', 'labels', 'PUBMED',batch, tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n # print(json_resp)\n return JsonResponse(json_resp)", "def analysis_view(request):\n return render(request, \"tracker/analysis.html\")", "def get_reports(request):\n\n inst = request.GET.get('institute',None)\n use = request.GET.get('usec',None)\n print(use)\n lang = request.GET.get('lang',None)\n batch = request.GET.get('batch',None)\n all = request.GET.get('all',None)\n actual_report = request.GET.get('actual_report',None)\n if all == 'all':\n # All the reports are returned independently of the usecase, the language or institute.\n use_obj = UseCase.objects.get(name = use)\n reps = Report.objects.filter(institute = inst,name = use_obj,language = lang).values('id_report','report_json','language')\n json_resp = {}\n json_resp['report'] = []\n\n for report in reps:\n json_rep = {}\n json_rep['id_report'] = report['id_report']\n json_rep['language'] = report['language']\n json_rep['report_json'] = report['report_json']\n json_resp['report'].append(json_rep)\n return JsonResponse(json_resp)\n\n if(inst != None and use != None and lang != None and batch != None):\n\n \"\"\" It is used in the options modal: if the reuqired combination of institute, language and usecase has 0 reports\n associated, a message is returned. In this case this view returns the number of reports associated to a specific \n configuration required \"\"\"\n\n rep = Report.objects.filter(institute = inst, name = use, language = lang, batch = batch)\n json_count = {'count':len(rep)}\n return JsonResponse(json_count)\n\n usecase = request.session.get('usecase',None)\n mode1 = request.session.get('mode',None)\n mode = NameSpace.objects.get(ns_id=mode1)\n language = request.session.get('language',None)\n institute = request.session.get('institute',None)\n username = request.session['username']\n batch = request.session['batch']\n token = request.GET.get('configure',None) # This parameter is set when\n\n jsonError = {'error':'something wrong with params!'}\n if usecase is not None and language is not None and institute is not None and batch is not None:\n # Get the reports associated to the usecase, language and institute of the SESSION\n reports1 = Report.objects.filter(name = usecase, language = language, institute = institute,batch=batch)\n if mode1 == 'Robot':\n # gts_r = GroundTruthLogFile.objects.filter(language = language,ns_id=mode).values('id_report')\n # gts_r1 = GroundTruthLogFile.objects.filter(language=language, ns_id=mode).order_by(\n # 'id_report').distinct('id_report').values('id_report')\n # ids1 = [el['id_report'] for el in gts_r1]\n # print(len(ids1))\n gts_r1 = GroundTruthLogFile.objects.filter(id_report__in = reports1,language = language,ns_id=mode).order_by('id_report').distinct('id_report').values('id_report')\n ids = [el['id_report'] for el in gts_r1]\n # print(len(ids))\n # print(ids == ids1)\n # for el in gts_r1:\n # # if el['id_report'] not in ids and Report.objects.filter(language = language, id_report = el['id_report'], batch = batch).exists():\n # ids.append(el['id_report'])\n\n reports1 = Report.objects.filter(id_report__in=ids,name = usecase, language = language, institute = institute,batch = batch)\n\n json_resp = {}\n json_resp['report'] = []\n if reports1.exists():\n reports = reports1.values('id_report','report_json','language')\n for report in reports:\n json_rep = {}\n json_rep['id_report'] = report['id_report']\n json_rep['language'] = report['language']\n json_rep['report_json'] = report['report_json']\n json_resp['report'].append(json_rep)\n\n json_resp['report'].sort(key=lambda json: json['id_report'], reverse=False) # Reports are sorted by ID\n # json_resp['report'].sort(key=lambda json: json['report_json']['report_id'], reverse=False) # Reports are sorted by ID\n json_resp['index'] = 0\n\n if token is not None:\n # Get the last ground truth given the session parameters.\n gt = get_last_groundtruth(username, usecase, language, institute,mode,batch)\n else:\n # Get the last ground truth of the user.\n gt = get_last_groundtruth(username,None, None, None,mode,batch)\n\n if gt is not None:\n # The index is updated and it characterizes the first report of the list shown to the user.\n id_report = gt['id_report']\n use = gt['use_case']\n lang = gt['language']\n institute = gt['institute']\n report_json = Report.objects.get(id_report = id_report, name = use, language = lang, institute = institute)\n rep_json = report_json.report_json\n index = json_resp['report'].index({'id_report':id_report,'language':lang,'report_json':rep_json})\n json_resp['index'] = int(index)\n if actual_report is not None:\n index = json_resp['report'].index(actual_report)\n json_resp['index'] = int(index)\n\n return JsonResponse(json_resp)\n else:\n return JsonResponse(jsonError,status=500)", "def report(request):\n\tuser_docs = Document.objects.filter(user=User.objects.get(username=request.user.username))\n\t# breakpoint()\n\tdaily_uploads = user_docs.filter(created_at__day=timezone.now().strftime(\"%d\"))\n\tmonthly_uploads = user_docs.filter(created_at__month=timezone.now().strftime(\"%m\"))\n\tyearly_uploads = user_docs.filter(created_at__year=timezone.now().strftime(\"%Y\"))\n\n\tdaily_count = daily_uploads.count()\n\tmonthly_count = monthly_uploads.count()\n\tyearly_count = yearly_uploads.count()\n\t# breakpoint()\n\n\tif 'doc_name' in request.GET:\n\t\tpdf_list = user_docs.filter(name__icontains=request.GET['doc_name'])\n\telif 'month' in request.GET:\n\t\tpdf_list = user_docs.filter(created_at__month=request.GET['month'])\n\t\t# breakpoint()\n\telif 'year' in request.GET:\n\t\tpdf_list = user_docs.filter(created_at__year=request.GET['year'])\n\telif 'from' in request.GET and 'to' in request.GET:\n\t\t# breakpoint()\n\t\tpdf_list = user_docs.filter(created_at__range=[request.GET['from'],request.GET['to']])\n\n\telse:\n\t\tpdf_list = user_docs\n\tcontext = {'daily_count': daily_count, 'monthly_count': monthly_count, 'yearly_count': yearly_count, 'pdf_list':pdf_list}\n\n\treturn render(request, 'document_manager/report.html', context)", "def admin_evaluate_reports(request):\n if not validate_request(request): return redirect(reverse(URL_FORBIDDEN))\n \n admin = auth.get_user(request)\n \n if request.method == \"GET\":\n all_thesis = [] # list of dict\n \n for panelmember in PanelMember.objects.filter(Q(status = 'F') | Q(status = 'Z')).filter(feedback_at = 'A'):\n thesis = panelmember.thesis\n dict = {}\n dict['title'] = thesis.title\n dict['student_full_name'] = thesis.student.first_name + \" \" + thesis.student.last_name\n dict['report'] = panelmember.feedback_with_referee_details\n dict['student_username'] = thesis.student.user.username\n dict['id'] = thesis.id\n dict['referee_name'] = panelmember.referee.user.first_name + ' ' + panelmember.referee.user.last_name\n dict['referee_id'] = panelmember.referee.id\n all_thesis.append(dict)\n \n return render(request, 'app/admin/view_finalReports.html', {\n 'title':'Final Reports',\n 'layout_data' : get_layout_data(request),\n 'all_thesis' : all_thesis\n })\n elif request.method == \"POST\":\n form = PanelMember2Form(request.POST, request.FILES)\n \n\n thesis = int(request.POST['thesis'])\n referee = int(request.POST['referee'])\n \n if form.is_valid() and validate_pdf(request.FILES['feedback_without_referee_details']):\n referee = Referee.objects.get(id = referee)\n thesis = Thesis.objects.get(id = thesis)\n panelmember = PanelMember.objects.get(thesis = thesis,referee = referee)\n panelmember.feedback_at = 'G'\n \n time = str(datetime.datetime.now())\n timestamp = ''\n for i in time:\n if not (i == ':' or i == '-'):\n timestamp += i\n request.FILES['feedback_without_referee_details'].name = \"Evaluation_Report_\"+thesis.student.user.username+\"_\"+timestamp+\".pdf\"\n \n panelmember.feedback_without_referee_details = request.FILES['feedback_without_referee_details']\n panelmember.save()\n\n total_feedbacks = PanelMember.objects.filter(thesis = thesis, feedback_at = 'G').count()\n if total_feedbacks == thesis.indian_referees_required + thesis.foreign_referees_required:\n _update_student_status(thesis, STATUS_ID_THESIS_FEEDBACKS_RECEIVED) \n\n # send notification to all guide\n send_notification_to_all_guides(admin, thesis, \"A feedback report has been sent of student \" + thesis.student.first_name + \" \" + thesis.student.last_name)\n # email\n subject = \"[Feed Back reports] of the Thesis titled\" + thesis.title\n content = \"<br>Dear Sir/Madam,</br><br></br><br></br>\"+\"A feedback report has been sent of student \" + thesis.student.first_name + \" \" + thesis.student.last_name +'. Please Check the PhD Portal for more details.'+\"<br></br><br></br>Regards,<br></br>PhDPortal.\"\n \n email = []\n\n for thesisGuide in ThesisGuide.objects.filter(thesis = thesis):\n receiver = Faculty.objects.get(user = thesisGuide.guide.user)\n email.append(receiver.email)\n\n send_email_task.delay(email, subject, content)\n return redirect(reverse(admin_evaluate_reports))\n else:\n return redirect(reverse(URL_BAD_REQUEST))\n else:\n return redirect(reverse(URL_BAD_REQUEST))", "def data_set_characterization(request):\n admin_approved = Phenotype.objects.filter(phenotypestatus__status='A')\n user_submitted = Phenotype.objects.filter(phenotypestatus__status='U')\n sample_files = SampleFile.objects.all()\n\n admin_approved_count = len(admin_approved)\n user_submitted_count = len(user_submitted)\n\n traits_by_chrom = SNP.objects.filter(Q(snpstatus__status='A') | Q(snpstatus__status='U')).values('chromosome_num').annotate(num_traits=Count('chromosome_num'))\n\n context = {'user_submitted_count':user_submitted_count,\n 'admin_approved_count':admin_approved_count,\n 'sample_files':sample_files,\n 'traits_by_chrom':traits_by_chrom}\n\n return render(request, 'SNP_Feature_View/data_set_characterization.html', context)", "def list(self, request, scope=None):\n\n qs = self.get_queryset()\n if scope == 'summary':\n total = qs.aggregate(total=Sum('total_value'))['total'] or 0.0\n return Response({'total_investment': total}, status=200)\n else:\n serializer = self.get_serializer(qs, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def referee_evaluate_synopsis(request):\n if not validate_request(request): return redirect(reverse(URL_FORBIDDEN))\n \n user = auth.get_user(request)\n referee = Referee.objects.get(user = user)\n \n if request.method == \"GET\":\n all_thesis = [] # list of dict\n \n for panelMember in PanelMember.objects.filter(referee = referee).filter(status = 'S'):\n thesis = panelMember.thesis\n dict = {}\n dict['title'] = thesis.title\n dict['student_full_name'] = thesis.student.first_name + \" \" + thesis.student.last_name\n dict['synopsis'] = thesis.synopsis\n dict['student_username'] = thesis.student.user.username\n dict['id'] = thesis.id\n \n all_thesis.append(dict)\n \n return render(request, 'app/referee/evaluate_synopsis.html', {\n 'title':'Unevaluated PhD Synopsis',\n 'layout_data' : get_layout_data(request),\n 'all_thesis' : all_thesis\n })\n else:\n return redirect(reverse(URL_BAD_REQUEST))", "def get_stats_array_per_usecase(request):\n\n mode = request.GET.get('mode',None)\n usern = request.GET.get('member',request.session['username'])\n username = User.objects.get(username=usern, ns_id=mode)\n language = request.GET.get('language',request.session['language'])\n institute = request.GET.get('institute',request.session['institute'])\n batch = request.GET.get('batch',request.session['batch'])\n json_dict = {}\n js = {}\n js['original'] = {}\n js['percent'] = {}\n json_dict['medtag'] = get_array_per_usecase(username,mode,language,institute,batch)\n json_dict['pubmed'] = get_array_per_usecase_PUBMED(username,mode,language,institute,batch)\n\n\n # print(json_dict)\n return JsonResponse(json_dict)", "def Reports(request):\n assert isinstance(request, HttpRequest)\n iscapable =False\n if request.user.username in get_librarians():\n iscapable=True;\n #getting books per each department\n booksperdepart={}\n borrowedperStudent={}\n ordersplacedbylibrairans={}\n \n books=get_valid_Books()\n invent=get_Inv()\n for k,v in books.items():\n if v.dpt_id.name not in booksperdepart.keys():\n booksperdepart[v.dpt_id.name]=v.invt.qty\n else:\n li =booksperdepart[v.dpt_id.name]\n li+=v.invt.qty\n booksperdepart[v.dpt_id.name]=li\n libmem =get_libmems()\n borrowed=get_Borrowed()\n for k,v in borrowed.items():\n composite=v.cwid.cwid.stu_name+\" - \"+v.cwid.cwid.stu_id\n if composite not in borrowedperStudent.keys():\n borrowedperStudent[composite]=1\n else:\n li =borrowedperStudent[composite]\n li+=1\n borrowedperStudent[composite]=li\n librianorders=get_LibrarianOrders()\n for k,v in librianorders.items():\n composite=v.lb_id.name+\" - \"+v.lb_id_id\n if composite not in ordersplacedbylibrairans.keys():\n ordersplacedbylibrairans[composite]=[list([v.i_id.i_id.title,v.qty,v.i_id.i_id.dpt_id.name,v.status])]\n else:\n li =ordersplacedbylibrairans[composite]\n li.append(list([v.i_id.i_id.title,v.qty,v.i_id.i_id.dpt_id.name,v.status]))\n ordersplacedbylibrairans[composite]=li\n\n \n\n\n \n\n return render(\n request,\n 'app/reports.html',\n {\n 'title':'Reports Page',\n 'perdptbks':list(zip(booksperdepart.keys(),booksperdepart.values())),\n 'peruserbks':list(zip(borrowedperStudent.keys(),borrowedperStudent.values())),\n 'perlibrarian':list(zip(ordersplacedbylibrairans.keys(),ordersplacedbylibrairans.values())),\n 'iscapable':iscapable,\n 'year':datetime.now().year,\n }\n )", "def get_test_report(request, **kwargs): \n\t\n #Fetching the details of the selected event\n test_list = sidecar.events.test_report(project_id=kwargs['test_id'])\n report_list = []\n\t\n #Creating the list for the report\n for tests in test_list._logs:\n\tjson_test = json.loads(tests['data'])\n\ttests['success'] = json_test['success'] \n\ttests['time'] = json_test['time']\n\ttests['test_cases'] = json_test['test_cases']\n\treport_list.append(tests)\n\n #Making the context and sending to template\n context = {\n \"page_title\": _(\"Test Results\"),\n \"tests\": report_list\n }\n return render(request, 'rally_dashboard/events/test_detail.html', context)", "def public_workouts(request):\n practices = Practice.objects.all()\n individuals = Individual.objects.all()\n workouts = Workout.objects.all()\n c = Context({\n 'workout_list': workouts,\n 'practice_list': practices,\n 'individuals_list': individuals,\n 'title': 'Community Workouts',\n })\n return render_to_response(\"workouts/list.html\", c,\n context_instance=RequestContext(request))", "def summary(request):\n\n # Generate counts of some of the main objects\n num_courses = models.Course.objects.all().count()\n num_quizzes = models.Quiz.objects.all().count()\n num_questions = models.Question.objects.count()\n num_students = models.User.objects.count()\n num_visits = request.session.get('num_visits', 1)\n request.session['num_visits'] = num_visits + 1\n\n context = {\n 'num_courses': num_courses,\n 'num_quizzes': num_quizzes,\n 'num_questions': num_questions,\n 'num_students': num_students,\n 'num_visits': num_visits,\n }\n\n return Response(context)", "def analysis():\n\n response_all_doctors_and_appointments = requests.post(server_url + 'doctor/all_doctors_and_all_appointments')\n doctors_and_appointments = response_all_doctors_and_appointments.json()\n\n return render_template('clerks/analysis.html', doctors_and_appointments=doctors_and_appointments)", "def pyp_reports(request):\n student_id = int(get_from_matchdict('id', request.matchdict))\n\n pdf = get_from_matchdict('pdf', request.matchdict)\n check = request.params.get('check')\n if check and check.lower() == 'true':\n check = True\n else:\n check = False\n\n internal_check = request.params.get('internal_check')\n\n mb_user = request.session.get('mb_user', None)\n if not mb_user:\n # FIXME: Need to re-do it\n pass\n # api_token = request.params.get('api_token')\n # if not api_token or api_token != gns.config.managebac.api_token:\n # return HTTPForbidden()\n elif mb_user.type.startswith('Advisor') or mb_user.type == 'Account Admins':\n # let them in\n pass\n else:\n return HTTPForbidden()\n\n term_id = gns.config.managebac.current_term_id\n with DBSession() as session:\n try:\n rep_statement = session.query(PrimaryReport).\\\n options(joinedload('course')).\\\n filter(\n PrimaryReport.term_id == term_id,\n PrimaryReport.student_id == student_id,\n # PrimaryReport.homeroom_comment!=''\n )\n stu_statement = session.query(Students).filter_by(id=student_id)\n student = stu_statement.one()\n report = rep_statement.one()\n gns.tutorial(\"Got the target student\",edit=(stu_statement, '.sql'))\n gns.tutorial(\"Got Primary report with course information\", edit=(rep_statement, '.sql'))\n except NoResultFound:\n if pdf:\n # raw_input('no report entry for this student: {} with term_id {}'.format(student_id, term_id))\n raise HTTPNotFound()\n else:\n raise HTTPFound(location=request.route_url(\"student_pyp_report_no\", id=student_id))\n except MultipleResultsFound:\n print(\"Issue with database!\")\n raise HTTPInternalServerError(\"Issue with database!\")\n\n title = u\"IGB International School (June 2016): Student Report for {} {}\".format(student.first_name, student.last_name)\n\n # This bit is the only manual info that isn't on managebac\n uoi_table = {\n -1: {\n # ey sem 1\n 1: dict(title=\"Who We Are\", central_idea=\"Playing and learning together enables us to come to new understandings.\"),\n 2: dict(title=\"Sharing The Planet\", central_idea=\"Our lives are interconnected with living things.\"),\n # ey sem 2\n 3: dict(title=\"How the World Works\", central_idea=\"Water is all around us and has many uses.\"),\n 4: dict(title=\"How We Express Ourselves\", central_idea=\"Stories inform, provoke us and provide enjoyment.\"),\n },\n 0: {\n # kg sem 1\n 1: dict(title=\"Who We Are\", central_idea=\"We are part of a community who work, learn, and play together\"),\n 2: dict(title=\"How We Organise Ourselves\", central_idea=\"Communities create systems to fullfill a need.\"),\n 3: dict(title=\"Where We Are in Place and Time\", central_idea=\"Shelters look different and serve a purpose.\"),\n\n # kg sem 2\n 4: dict(title=\"Sharing the Planet\", central_idea=\"People's choices and actions impact the environment and their community.\"),\n 5: dict(title=\"How the World Works\", central_idea=\"Our body and man made resources help protect us from the natural environment.\"),\n 6: dict(title=\"How We Express Ourselves\", central_idea=\"An audience can be engaged through performance.\")\n },\n 1: {\n # gr1 sem 1\n 1: dict(title=\"How we organize ourselves\", central_idea=\"Humans use tools and strategies to understand and organise their environment.\"),\n 2: dict(title=\"Who We Are\", central_idea=\"Games provide us with opportunities to develop an understanding of ourselves and others.\"),\n 3: dict(title=\"How We Express Ourselves\", central_idea=\"Celebrations are an opportunity to reflect and appreciate cultures and beliefs.\"),\n # gr1 sem 2\n 4: dict(title=\"How the World Works\", central_idea=\"Machines make a difference to the way we live our lives.\"),\n 5: dict(title=\"Sharing the Planet\", central_idea=\"Water is essential to life and is a limited resource to many.\"),\n 6: dict(title=\"Where We Are in Place and Time\", central_idea=\"Clocks are a universal measurement tool of time that have had an impact in the past and the present.\"),\n },\n 2: {\n # gr2 sem 1\n 1: dict(title=\"Who We Are\", central_idea=\"With rights come responsibilities.\"),\n 2: dict(title=\"How We Express Ourselves\", central_idea=\"Cultures tell stories in different ways and for different reasons.\"),\n 3: dict(title=\"How We Organize Ourselves\", central_idea=\"Number system provide a common language we can use to make sense of the world.\"),\n # gr2 sem 2\n 4: dict(title=\"Sharing The Planet\", central_idea=\"Plants sustain life on earth and we have a responsible role to play\"),\n 5: dict(title=\"Where we are in Place and Time\", central_idea=\"Influence can change people and their environment.\"),\n 6: dict(title=\"How the World Works\", central_idea=\"Forces are a vital part of our survival.\"),\n },\n 3: {\n # gr3 sem 1\n 1: dict(title=\"How We Organise Ourselves\", central_idea=\"Communication connects people.\"),\n 2: dict(title=\"Sharing the Planet\", central_idea=\"People can conserve the world's resources through responsible behaviours\"),\n 3: dict(title=\"Where We are in Place and Time\", central_idea=\"Innovations from past civilizations have an influence on the present\"),\n # gr3 sem 2\n 4: dict(title=\"How the World Works\", central_idea=\"Safe structures are designed and built for purpose and consider the environment and materials.\"),\n 5: dict(title=\"Who We Are\", central_idea=\"Communication connects people and communities.\"),\n 6: dict(title=\"How We Express Ourselves\", central_idea=\"Nature can inspire people to express their creativity.\"),\n },\n 4: {\n # gr4 sem 1\n 1: dict(title=\"How We Express Ourselves\", central_idea=\"Media influences how we think and the choices we make.\"),\n 2: dict(title=\"Sharing the Planet\", central_idea=\"Organisms rely on one another to balance ecosystems.\"),\n 3: dict(title=\"How we Organise Ourselves\", central_idea=\"Societies establish systems for trade and commerce to meet needs and wants.\"),\n # gr4 sem 2\n 4: dict(title=\"Where We Are in Place and Time\", central_idea=\"The quest for understanding has led to exploration and discovery.\"),\n 5: dict(title=\"How The World Works\", central_idea=\"Earth has formed over time and is still changing.\"),\n 6: dict(title=\"Who We Are\", central_idea=\"People's beliefs influence their actions.\"),\n },\n 5: {\n # gr5 sem 1\n 1: dict(title=\"How we Organise Ourselves\", central_idea=\"All societies have rules and reasons for these rules.\"),\n 2: dict(title=\"Where We Are in Place and Time\", central_idea=\"Malaysia's cultural diversity has been shaped by its history.\"),\n 3: dict(title=\"How the World Works\", central_idea=\"Changes to matter can be of a chemical and/or physical nature.\"),\n # gr5 sem 2\n 4: dict(title=\"Sharing The Planet\", central_idea=\"The choices we make during moments of conflict affect our relationships\"),\n 5: dict(title=\"How We Express Ourselves: Exhibition\", central_idea=\"Artists seek to evoke an emotional response from their audience.\"),\n 6: dict(title=\"Who We Are\", central_idea=\"External and internal factors cause changes in our lives\"),\n },\n }\n\n chinese_teachers = {\n 10792613: [11203970, 10836999, 10912649, 10863230, 11544715, 11707916, 11609996, 11707918, 11708046, 10912651, 11707928, 11274137, 11707932, 11707934, 11204000, 11204641, 11204001, 11708067, 11270692, 11707940, 11204385, 11563304, 11204008, 11153068, 11573550, 11707952, 10882225, 11204017, 11707957, 10834618, 10866874, 11080380, 10893375, 11707840, 11190340, 10834630, 11611847, 10834633, 10834636, 11693517, 11707984, 11203923, 11707859, 10834645, 10834648, 10834649, 10834651, 11707870, 11182305, 11203938, 11200870, 10973671, 11707882, 11708014, 11203950, 11203952, 11708018, 11203954, 10882162, 11633398, 11707900, 11538429, 11124222, 11135103, 11737995, 11621139, 11707870, 10882159], # xiaopiong\n 11256632: [11204609, 10836994, 11707907, 11135108, 10836999, 11135112, 10837001, 11203979, 10865037, 11707924, 11621141, 11203988, 11204377, 11173915, 10913691, 11204637, 10856823, 11204383, 11204640, 11707939, 11204392, 11614634, 11364525, 10882226, 11204660, 11190071, 10834616, 10834617, 11464377, 10866873, 10866876, 10834621, 10834622, 10866877, 10856636, 11578945, 11611841, 10893379, 10834628, 10834625, 11611847, 10834635, 10834640, 10834642, 10834643, 11930324, 11707860, 11203926, 11707990, 11426392, 11502297, 11578839, 11707869, 11708005, 10834661, 11203946, 11324785, 11124210, 10863222, 11124215, 10856824, 11203961, 10856826, 11124219, 11204605, 11707902, 10986488], # nancy\n }\n\n students_chinese_teachers = {}\n\n for teacher_id, student_ids in chinese_teachers.items():\n with DBSession() as session:\n teacher = session.query(Teachers).filter_by(id=teacher_id).one()\n for this_student in student_ids:\n students_chinese_teachers[this_student] = teacher\n\n bahasa_teachers = {\n 10872708: [10908165, 10856828],\n }\n students_bahasa_teachers = {}\n for teacher_id, student_ids in bahasa_teachers.items():\n with DBSession() as session:\n teacher = session.query(Teachers).filter_by(id=teacher_id).one()\n for this_student in student_ids:\n students_bahasa_teachers[this_student] = teacher\n\n if 'Grade' in report.course.name or 'Kindergarten' in report.course.name:\n which_folder = 'grades'\n template = 'frontend:elem_reports/templates/student_pyp_report.pt'\n\n with DBSession() as session:\n try:\n rep_statement = session.query(PrimaryReport).\\\n options(joinedload('course')).\\\n options(joinedload('sections')).\\\n options(joinedload('sections.learning_outcomes')).\\\n options(joinedload('sections.teachers')).\\\n options(joinedload('sections.strands')).\\\n options(joinedload('teacher')).\\\n filter(\n PrimaryReport.term_id == term_id,\n PrimaryReport.student_id == student_id\n )\n att_statement = session.query(Absences).filter_by(term_id=term_id, student_id=student_id)\n\n attendance = att_statement.one()\n report = rep_statement.one()\n\n gns.tutorial(\"Got K-5 report info with joined information\", edit=(rep_statement, '.sql'), banner=True)\n except NoResultFound:\n if pdf:\n # raw_input(\"No K-5 report entry\")\n raise HTTPNotFound()\n else:\n raise HTTPFound(location=request.route_url(\"student_pyp_report_no\", id=student_id))\n\n subject_rank = {\n 'language': 0,\n 'mathematics': 1,\n 'unit of inquiry 1': 2,\n 'unit of inquiry 2': 3,\n 'unit of inquiry 3': 4,\n 'unit of inquiry 4': 4.1,\n 'unit of inquiry 5': 4.2,\n 'unit of inquiry 6': 4.3,\n 'art': 5,\n 'music': 6,\n 'physical education': 7,\n 'bahasa melayu': 8,\n 'chinese': 9,\n 'host nation': 10,\n 'self-management': 10000\n }\n report.sections = sorted([section for section in report.sections if subject_rank.get(section.name.lower(), 10001) < 10000], key=lambda x: subject_rank.get(x.name.lower(), 1000))\n report.sections = [section for section in report.sections if section.comment]\n\n # Only output sections that have any data in them\n # Comment out during development\n # report.sections = [section for section in report.sections if section.comment]\n\n if 'Kindergarten' in report.course.grade:\n grade_norm = 0\n else:\n grade_norm = int(re.sub(\"[^0-9]\", \"\", report.course.grade))\n\n rotate_list = [0, 1, 2, 5, 9]\n pagination_list = [0, 1, 4, 7, 10]\n\n for section in report.sections:\n section.rank = subject_rank.get(section.name.lower())\n report.sections = [s for s in report.sections if s.rank not in [4.1, 4.2, 4.3]] # skip\n\n gns.tutorial(\"Formatting each subject area in this order: {}\".format(\", \".join([r.name for r in report.sections])), banner=True)\n for section in report.sections:\n # Substitute the correct Chinese teachers based on manual info above\n # Do first so all subsequent operations take place properly\n if section.rank == 9 and student.id in students_chinese_teachers:\n section.teachers = [students_chinese_teachers.get(student.id)]\n\n if section.rank == 8 and student.id in students_bahasa_teachers:\n # Host Nations? and Bahasa mixed up maybe?\n section.teachers = [students_bahasa_teachers.get(student.id)]\n\n section.append_uoi_table = section.rank == 4\n section.display_rotated = section.rank in rotate_list\n\n if section.rank in [2]:\n section.organization_header = 'Units of Inquiry'\n section.name_after = \"\"\n elif section.rank in [3, 4]:\n section.organization_header = 'skip'\n section.name_after = \"\"\n else:\n section.organization_header = section.name + ' (' + \" & \".join([s.first_name + ' ' + s.last_name for s in section.teachers]) + ')'\n section.name_after = \"\"\n\n # Set the unit title if it needs to be\n if section.rank in [2, 3, 4, 4.1, 4.2, 4.3]:\n which_uoi = int(re.sub(\"[^0-9]\", \"\", section.name))\n section.name = uoi_table.get(grade_norm)[which_uoi]['title']\n\n # Determine pagination\n if section.rank in pagination_list: # TODO What about more than two inquiry units?\n section.pagination = True\n else:\n section.pagination = False\n\n section.learning_outcomes = sorted(section.learning_outcomes, key=lambda x: x.which)\n\n # Standardize the headings\n if section.rank in [2, 3, 4, 4.1, 4.2, 4.3]:\n section.name = section.name.title()\n section.name_after = uoi_table.get(grade_norm)[which_uoi]['central_idea']\n\n en_dash = u'\\u2013'\n for outcome in section.learning_outcomes:\n\n if section.rank in [2, 3, 4]:\n # Unit of inquiry\n outcome.heading = \"\"\n\n elif section.rank not in [0, 1]:\n outcome.heading = \"\" # blank\n\n else:\n # If it's a subject that we care to keep the data, standardize the format:\n outcome.heading = outcome.heading.replace(en_dash, '-')\n match = re.match('(.*)-', outcome.heading)\n if match:\n outcome.heading = match.group(1).strip()\n\n # Evaluates and adds data to items\n old_heading = None\n for outcome in section.learning_outcomes:\n\n if outcome.heading != old_heading:\n # Mark that indicates we need to evaluate\n\n if section.rank in [0, 1]:\n # Determine the effort assigned by the teacher for this\n effort = [s.selection for s in section.strands if s.label_titled.startswith(outcome.heading)]\n effort = effort[0] if len(effort) == 1 else (effort[0] if len(set(effort)) == 1 else \"<?>\")\n else:\n effort = [s.selection for s in section.strands if s.selection]\n effort = effort[0] if len(set(effort)) == 1 else str(effort)\n outcome.effort = {'G': \"Good\", 'N': \"Needs Improvement\", 'O': \"Outstanding\"}.get(effort, None)\n\n if not outcome.effort and internal_check:\n # Raise a problem here\n raise ReportIncomplete('something') # FIXME: There is no report incomplete exception\n\n old_heading = outcome.heading\n\n if not outcome.selection and internal_check:\n raise ReportIncomplete('something')\n gns.tutorial(\"Completed formatting of {} section\".format(section.name))\n\n report.sections = [s for s in report.sections if s.rank not in [4.1, 4.2, 4.3]] # skip\n\n elif 'Early' in report.course.name:\n which_folder = 'early_years'\n template = 'frontend:elem_reports/templates/student_pyp_ey_report.pt'\n\n # 1/2: semeseter\n # 0/1: early years\n\n ey_report_indicators = {\n 1: {\n 0: [\n {'name': 'Listening & Speaking', 'content': 'Learners show an understanding of the value of speaking and listening to communicate. They are using language to name their environment, to get to know each other, to initiate and explore relationships, to question and inquire.'},\n {'name': 'Viewing & Presenting', 'content': 'Learners show an understanding that the world around them is full of visual language that conveys meaning. They are able to interpret and respond to visual texts. They are extending and using visual language in more purposeful ways.'},\n {'name': 'Reading & Writing', 'content': 'Learners show an understanding that print represents the real or the imagined world. They have a concept of a \"book\", and an awareness of some of its structural elements. They use visual cues to recall sounds and the words they are \"reading\" to construct meaning.'},\n ],\n 1: [\n {'name': 'Number', 'content': 'Learners will understand that numbers are used for many different purposes in the real world. They will develop an understanding of one-to-one correspondence, be able to count and use number words and numerals to represent quantities.'},\n {'name': 'Shape and Space', 'content': 'Learners will develop an understanding that shapes have characteristics that can be described and compared.'},\n {'name': 'Pattern', 'content': 'Learners will develop an understanding that patterns and sequences occur in everyday situations. They will be able to identify and extend patterns in various ways.'},\n {'name': 'Measurement', 'content': 'Learners will develop an understanding of how measurement involves the comparison of objects and ordering.They will be able to identify and compare attributes of real objects.'},\n {'name': 'Data', 'content': 'Learners will develop an understanding of how the collection and organization of information helps to make sense of the world. They will sort and label objects by attributes and discuss information represented in graphs including pictographs and tally marks.'}\n ]\n },\n 2: {\n 0: [\n {'name': 'Listening & Speaking', 'content': 'Learners will show an understanding of the value of speaking and listening to communicate. They will use language to name their environment, to get to know each other, to initiate and explore relationships, to question and inquire.'},\n {'name': 'Viewing & Presenting', 'content': 'Learners will show an understanding that the world around them is full of visual language that conveys meaning. They will interpret and respond to visual texts. They will be extending and using visual language in more purposeful ways.'},\n {'name': 'Reading & Writing', 'content': 'Learners will show an understanding that print represents the real or the imagined world. They will develop the concept of a &ldquo;book&rdquo;, and an awareness of some of its structural elements. They will use visual cues to recall sounds and the words they are &ldquo;reading&rdquo; to construct meaning.'},\n ],\n 1: [\n {'name': 'Number', 'content': 'Learners will understand that numbers are used for many different purposes in the real world. They will develop an understanding of one-to-one correspondence, be able to count and use number words and numerals to represent quantities.'},\n {'name': 'Shape and Space', 'content': 'Learners will understand and use common language to describe paths, regions and boundaries of their immediate environment.'},\n {'name': 'Pattern', 'content': 'Learners will understand that patterns and sequences occur in everyday situations. They will be able to identify, describe, extend and create patterns in various ways.'},\n {'name': 'Measurement', 'content': 'Learners will develop an understanding of how measurement involves the comparison of objects and the ordering and sequencing of events. They will be able to identify, compare and describe attributes of real objects as well as describe and sequence familiar events in their daily routine.'},\n {'name': 'Data', 'content': 'Learners will develop an understanding of how the collection and organization of information helps to make sense of the world. They will sort and label objects by attributes and discuss information represented in graphs including pictographs and tally marks. The learners will discuss chance in daily events.'},\n ],\n },\n }\n with DBSession() as session:\n try: \n report = session.query(PrimaryReport).\\\n options(joinedload('course')).\\\n options(joinedload('sections')).\\\n options(joinedload('sections.learning_outcomes')).\\\n options(joinedload('sections.teachers')).\\\n options(joinedload('teacher')).\\\n filter(\n PrimaryReport.term_id == term_id,\n PrimaryReport.student_id == student_id,\n ).one()\n student = session.query(Students).filter_by(id=student_id).one()\n attendance = session.query(Absences).filter_by(term_id=term_id, student_id=student_id).one()\n except NoResultFound:\n if pdf:\n raise HTTPNotFound()\n else:\n raise HTTPFound(location=request.route_url(\"student_pyp_report_no\", id=student_id))\n\n subject_rank = {\n 'self-management': -1,\n 'language': 0,\n 'mathematics': 1,\n 'unit of inquiry 1': 2,\n 'unit of inquiry 2': 3,\n 'unit of inquiry 3': 4,\n 'unit of inquiry 4': 4.1,\n 'unit of inquiry 5': 4.2,\n 'unit of inquiry 6': 4.3,\n 'art': 5,\n 'music': 6,\n 'physical education': 7,\n 'bahasa melayu': 8,\n 'chinese': 9,\n 'host nation': 10\n }\n\n report.sections = sorted([section for section in report.sections if subject_rank.get(section.name.lower()) < 10000], key=lambda x: subject_rank.get(x.name.lower(), 1000))\n # report.sections = report_sections\n # Filter out the un-needed units of inquiry\n # report.sections = [s for s in report.sections if s.rank <= 1 or (s.rank >= 4 and s.rank not in [4,4.1])]\n\n\n # Only output sections that have any data in them\n # Comment out during development\n # report.sections = [section for section in report.sections if section.comment and subject_rank.get(section.name.lower()) not in [2, 3]]\n\n grade_norm = -1\n\n pagination_list = [0, 3, 7, 10]\n\n for section in report.sections:\n\n section.rank = subject_rank.get(section.name.lower())\n\n if section.rank == -1:\n # blurb for self-management\n section.blurb = \"<i><p>Within the PYP, the approaches to learning skill of self management encompasses the development of gross and fine motor skills, spatial awareness, safety, healthy lifestyles, codes of behaviour and informed choices. </p><p>In an Early Years context these are reflected through the play based approach to teaching and learning. Reporting about self management in Early Years focuses on the whole child, stressing the importance of developing independence, social and emotional skills such as making relationships, managing feelings and behaviour, self confidence and self awareness. In addition the development of physical skills (moving and handling, health and self care) are highlighted as well. </p></i>\"\n else:\n section.blurb = \"\"\n\n if section.rank in [0, 1]: # Could be Lanugage & Maths, set up the report indicators\n ey = int('Early Years 1' in report.course.name) + 1\n section.report_indicators = ey_report_indicators[ey][section.rank] # change this to 2 later\n else:\n section.report_indicators = None\n\n # Substitute the correct Chinese teachers based on manual info above\n if section.rank == 9 and student.id in students_chinese_teachers:\n section.teachers = [students_chinese_teachers.get(student.id)]\n\n if section.rank in [999999]: # Turn this off\n section.organization_header = \"Units of Inquiry\"\n section.name_after = \"\"\n elif section.rank in [4, 4.1]:\n section.organization_header = 'skip'\n section.name_after = \"\"\n else:\n section.organization_header = None\n section.name_after = ' (' + \" & \".join([s.first_name + ' ' + s.last_name for s in section.teachers]) + ')'\n\n if section.rank in [2, 3, 4, 4.1, 4.2,4.3,4.4]:\n which_uoi = int(re.sub(\"[^0-9]\", \"\", section.name))\n section.name = uoi_table.get(grade_norm)[which_uoi]['title']\n section.name_after = \"\"\n\n # Determine pagination\n if section.rank in pagination_list: #TODO What about more than two inquiry units?\n section.pagination = True\n else:\n section.pagination = False\n\n if section.rank in [2, 3, 4, 4.1, 4.2,4.3,4.4]:\n section.name = section.name.title() \n section.name_after = uoi_table.get(grade_norm)[which_uoi]['central_idea']\n\n section.learning_outcomes = sorted(section.learning_outcomes, key=lambda x: x.which)\n\n # ey sections\n report.sections = [s for s in report.sections if s.rank not in [4, 4.1]]\n\n\n options={\n 'quiet': '',\n 'disable-javascript': '',\n 'encoding': 'utf-8',\n 'header-html': 'http://igbisportal.vagrant:6543/header-html',\n 'header-spacing': '5',\n\n\n 'footer-html': 'http://igbisportal.vagrant:6543/footer-html?student_id={}'.format(student.id),\n\n 'print-media-type': '',\n\n 'margin-left': '3mm',\n 'margin-right': '3mm',\n 'margin-bottom': '10mm'\n }\n\n\n if check:\n stu = student.first_nickname_last_studentid\n message = []\n for s in report.sections:\n if not s.teachers:\n message.append(\"No teacher assigned in {}\".format(s.name))\n #raise HTTPNotFound(\"##No teacher assigned for {} in {}##\".format(stu, s.name))\n if not s.comment:\n teachers = \",\".join([t.username_handle for t in s.teachers])\n message.append('{} missing {} comment'.format(teachers, s.name))\n #raise HTTPNotFound('##{} missing {} comment for {}##'.format(teachers, s.name, stu))\n\n if s.learning_outcomes and not 'Early' in report.course.name:\n\n if s.overall_comment == 'N/A':\n for o in s.learning_outcomes:\n if hasattr(o, 'effort') and not o.effort:\n teachers = \",\".join([t.username_handle for t in s.teachers])\n message.append('{} did not enter {} effort for {}'.format(teachers, o.heading, s.name))\n # raise HTTPNotFound()\n if not o.selection:\n teachers = \",\".join([t.username_handle for t in s.teachers])\n message.append('{} did not enter {} indication for {}'.format(teachers, o.heading, s.name))\n # raise HTTPNotFound('##{} did not enter indication for {} in {}##'.format(teachers, s.name, stu))\n\n elif s.overall_comment == '':\n teachers = \",\".join([t.username_handle for t in s.teachers])\n message.append('{} did not enter effort for single subject {}'.format(teachers, s.name)) \n\n if message:\n raise HTTPNotFound('##\\n({}) {}:\\n\\t{}##'.format(student.grade, student.first_nickname_last_studentid, \"\\n\\t\".join(message)))\n\n raise HTTPFound()\n\n with DBSession() as session:\n try:\n record = session.query(db.table.PrimaryReportLastUpdated).filter(db.table.PrimaryReportLastUpdated.student_id == student.id).one()\n last_updated = record.timestamp\n last_updated_date = last_updated.strftime(gns.config.reports.last_updated_format)\n except NoResultFound:\n last_updated_date = '<Unknown>'\n except MultipleResultsFound:\n last_updated_date = '<Internal DB Error: Multiple results found>'\n\n if pdf:\n result = render(template,\n dict(\n title=title,\n report=report,\n student=student,\n attendance=attendance,\n pdf=True,\n download_url=\"\",\n link_to_mb=\"\",\n last_updated=\"\",\n ),\n request=request)\n import pdfkit # import here because installation on server is hard\n\n prefix_file_name = '{}/pdf-downloads/{}/{}-Grade{}-{}-[{}]-'.format(\n gns.config.paths.home,\n which_folder,\n '55048',\n grade_norm,\n student.first_name + '-' + student.last_name,\n student.student_id\n )\n\n full_file = '{}({}).pdf'.format(prefix_file_name, last_updated_date)\n\n for _file in glob.glob(\"{}.*\".format(prefix_file_name)):\n # Remove any old stuff still lingering in there\n if _file != full_file:\n os.remove(_file)\n\n path = '{}/pdf-downloads/{}/{}-Grade{}-{}-[{}]-({}).pdf'.format(\n gns.config.paths.home,\n which_folder,\n '55048',\n grade_norm,\n student.first_name + '-' + student.last_name,\n student.student_id,\n last_updated_date\n )\n\n gns.tutorial(\"Sending to pdfkit, also saving to {path}\".format(path=path), edit=(result, '.pretty'), banner=True)\n try:\n pdffile = pdfkit.from_string(result, path, options=options) # render as HTML and return as a string\n except OSError as err:\n return HTTPInternalServerError(\"Problem with file? {}\".format(err))\n\n pdffile # not used\n if pdf.lower() == \"download\":\n content_type = \"application/octet-stream\"\n\n response = FileResponse(path, request=request, content_type=content_type)\n response.content_disposition = u\"attachment; filename={}.pdf\".format(title)\n return response\n\n else:\n content_type = \"application/pdf\"\n response = FileResponse(path, request=request, content_type=content_type, charset='utf-8')\n return response\n\n else:\n # Check when it was last updated\n\n if gns.tutorial_on:\n import pkg_resources\n package, filename = template.split(\":\")\n abspath = pkg_resources.resource_filename(*template.split(\":\"))\n from chameleon import PageTemplateFile\n template_file = PageTemplateFile(abspath)\n gns.tutorial(\"Loaded the template\", edit=(template_file.read(), '.html'), banner=True)\n result = render(template,\n dict(\n title=title,\n report=report,\n student=student,\n attendance=attendance,\n pdf=False,\n download_url=\"/students/{}/pyp_report/download/\".format(student.id),\n link_to_mb=\"https://igbis.managebac.com/classes/{}/pyp-gradebook/tasks/term_grades?student={}&term={}\".format(report.course.id, student.id, gns.config.managebac.current_term_id),\n last_updated=last_updated_date,\n ),\n request=request\n )\n response = Response(result)\n return response", "def referee_evaluate_thesis(request):\n\n if not validate_request(request): return redirect(reverse(URL_FORBIDDEN))\n\n \n user = auth.get_user(request)\n referee = Referee.objects.get(user = user)\n \n if request.method == \"GET\":\n all_thesis = [] # list of dict\n \n for panelMember in PanelMember.objects.filter(referee = referee).filter(status = 'A'):\n thesis = panelMember.thesis\n dict = {}\n dict['title'] = thesis.title\n\n dict['student_full_name'] = thesis.student.first_name + ' ' + thesis.student.last_name\n dict['synopsis'] = thesis.synopsis\n dict['thesis'] = thesis.thesis\n dict['keywords'] = []\n\n if panelMember.answer_for_questions == True:\n if thesis.thesis_modifications == \"NULL\" or thesis.thesis_modifications == \"\":\n dict['thesis_modifications'] = None\n else:\n dict['thesis_modifications'] = thesis.thesis_modifications\n else:\n dict['thesis_modifications'] = None\n\n\n for keys in ThesisKeyword.objects.filter(thesis = thesis):\n dict['keywords'].append((IEEEKeyword.objects.get(id = keys.keyword.id)).keyword)\n \n dict['student_username'] = thesis.student.user.username\n dict['id'] = thesis.id\n \n all_thesis.append(dict)\n return render(\n request,\n 'app/referee/evaluate_thesis.html',\n {\n 'title':'Evaluate Thesis',\n 'layout_data' : get_layout_data(request),\n 'all_thesis' : all_thesis\n }\n )\n else:\n return redirect(reverse(URL_BAD_REQUEST))", "def achieve_viewall(request):\n is_loggedin, username = get_session_variables(request)\n contrib_list = []\n article_list = []\n gsoc_list = []\n speaker_list = []\n intern_list = []\n contest_participant_list = []\n icpc_participants_regional_list = []\n icpc_participants_final_list = []\n\n contrib_list_all = Contribution.objects.all()\n contrib_list = Contribution.objects.all()[:5]\n article_list = Article.objects.all()[:5]\n gsoc_list = Gsoc.objects.all()[:5]\n speaker_list = Speaker.objects.all()[:5]\n intern_list = Intern.objects.all()[:5]\n contest_list = Contest_won.objects.all()[:5]\n\n \n contrib_org = {}\n if contrib_list_all:\n for contrib in contrib_list_all:\n if contrib.org_name not in contrib_org.keys():\n contrib_org[contrib.org_name] = 0\n\n for contrib in contrib_list:\n contrib_org[contrib.org_name] += 1\n\n if contest_list:\t\n contest_participant_list = []\n\tfor contest_won_obj in contest_list:\t\n\t c_id = contest_won_obj.contest_id\n\t c_p_objs = Contest_won_participant.objects.filter(contest_id = c_id)\n\t contest_participant_list.extend(c_p_objs)\n \n icpc_list_regionals = ACM_ICPC_detail.objects.filter(level='regional').order_by('ranking')[:2]\n if icpc_list_regionals:\n for icpc_obj in icpc_list_regionals:\n team = icpc_obj.team_name\n member1 = [icpc_obj.participant1_name, \\\n get_username_from_email(icpc_obj.participant1_email)]\n\n member2 = [icpc_obj.participant2_name, \\\n get_username_from_email(icpc_obj.participant2_email)]\n\n member3 = [icpc_obj.participant3_name, \\\n get_username_from_email(icpc_obj.participant3_email)]\n\n icpc_participant_list = [icpc_obj, member1,member2,member3]\n icpc_participants_regional_list.append(icpc_participant_list)\n\n icpc_list_finals = ACM_ICPC_detail.objects.filter(level='finals').order_by('ranking')[:2]\n if icpc_list_finals:\n for icpc_obj in icpc_list_finals:\n team = icpc_obj.team_name\n member1 = [icpc_obj.participant1_name, \\\n get_username_from_email(icpc_obj.participant1_email)]\n\n member2 = [icpc_obj.participant2_name, \\\n get_username_from_email(icpc_obj.participant2_email)]\n\n member3 = [icpc_obj.participant3_name, \\\n get_username_from_email(icpc_obj.participant3_email)]\n\n icpc_participant_list = [icpc_obj, member1,member2,member3]\n icpc_participants_final_list.append(icpc_participant_list)\n\n return render_to_response('achievement/achievement_viewall.html',\\\n\t\t{'username':username, \\\n 'is_loggedin':is_loggedin, \\\n 'contrib_list':contrib_list, \\\n 'contrib_org':contrib_org,\\\n 'article_list':article_list, \\\n 'gsoc_list':gsoc_list, \\\n 'speaker_list':speaker_list, \\\n 'intern_list':intern_list, \\\n 'contest_list':contest_list, \\\n 'contest_participant_list':contest_participant_list, \\\n 'icpc_participants_final_list':icpc_participants_final_list, \\\n 'icpc_participants_regional_list':icpc_participants_regional_list}, \\\n RequestContext(request))", "def display_report(request, **kwargs):\n\n #Getting the report of the tests \n try:\n outputStr = sidecar.events.test_logs(project_id=kwargs['project_id'])\n outputStr = outputStr.results\n except Exception, e:\n outputStr = \"Updating the logs...\"\n \n #Making the output\n context = {\n \"page_title\": _(\"Test Report\"),\n \"test_report\": outputStr\n }\n return render(request, 'rally_dashboard/events/view_report.html', context)", "def performance_analysis(request):\n context = {}\n\n return render(request, 'classroom_main/performance_analysis.html', context)", "def get_data(request):\n\n json_resp = {}\n # reports = Report.objects.filter(name = UseCase.objects.get(name=request.session['usecase']),institute = request.session['institute'],language = request.session['language'])\n\n json_resp['reports'] = []\n institute = request.GET.get('institute',request.session['institute'])\n usecase = request.GET.get('usecase',request.session['usecase'])\n print(usecase)\n language = request.GET.get('language',request.session['language'])\n ns_human = NameSpace.objects.get(ns_id='Human')\n ns_robot = NameSpace.objects.get(ns_id='Robot')\n user_robot = User.objects.get(username='Robot_user', ns_id=ns_robot)\n # usec = UseCase.objects.get(name = usecase)\n # reports = Report.objects.filter(name = usec,institute = institute, language = language).values('id_report')\n # gt_report = GroundTruthLogFile.objects.filter(language = language).exclude(username = user_robot,id_report__in=reports).order_by('id_report').distinct('id_report')\n cursor = connection.cursor()\n cursor.execute(\"SELECT r.id_report,r.language,r.report_json,r.name,r.institute,r.batch,COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND r.institute = %s AND r.language = %s AND g.username != %s GROUP BY (r.id_report,r.language,r.report_json,r.name,r.institute,r.batch)\",[usecase,institute,language,'Robot_user'])\n gt_report_ids = []\n indice = 0\n st = time.time()\n for el in cursor.fetchall():\n\n # report = Report.objects.get(language = language, id_report = el.id_report_id)\n gt_report_ids.append(el[0])\n # print(str(indice))\n indice +=1\n # report = Report.objects.get(id_report=el.id_report, language=el.language)\n # language = report.language\n\n # user_robot = User.objects.get(username = 'Robot_user',ns_id = ns_robot)\n gt_human = 1\n gt_robot = 0\n\n rep = json.loads(el[2])\n new_rep = {}\n for key in rep.keys():\n nkey = key+ '_0'\n new_rep[nkey] = rep[key]\n\n total = el[6]\n\n new_rep['usecase'] = usecase\n new_rep['id_report_not_hashed'] = rep.get('report_id',el[0])\n new_rep['id_report'] = el[0]\n new_rep['institute'] = institute\n new_rep['language'] = language\n new_rep['batch'] = el[5]\n\n json_resp['reports'].append({'total':total, 'report':new_rep,'id_report':el[0], 'language':language})\n\n usec = UseCase.objects.get(name = usecase)\n reports = Report.objects.filter(institute = institute,language = language,name = usec).exclude(id_report__in=gt_report_ids)\n # print(reports.count())\n indice = 0\n st = time.time()\n for el in reports:\n report = el\n # print(str(indice))\n indice += 1\n # report = Report.objects.get(id_report=el.id_report, language=el.language)\n # language = report.language\n\n # user_robot = User.objects.get(username = 'Robot_user',ns_id = ns_robot)\n gt_human = 0\n gt_robot = 0\n\n rep = report.report_json\n new_rep = {}\n for key in rep.keys():\n nkey = key + '_0'\n new_rep[nkey] = rep[key]\n\n total = gt_human + gt_robot\n\n new_rep['usecase'] = report.name_id\n new_rep['id_report_not_hashed'] = rep.get('report_id', report.id_report)\n new_rep['id_report'] = report.id_report\n new_rep['institute'] = report.institute\n new_rep['language'] = report.language\n new_rep['batch'] = report.batch\n\n json_resp['reports'].append(\n {'total': total, 'report': new_rep, 'id_report': report.id_report, 'language': report.language})\n # print('elaboro1',str(end1-st1))\n tot = time.time()\n print('totale',str(tot-st))\n\n return JsonResponse(json_resp,safe=False)" ]
[ "0.6889685", "0.67816097", "0.6578456", "0.6462005", "0.6354941", "0.60960084", "0.6039205", "0.5924308", "0.59200716", "0.58842385", "0.58725315", "0.58610183", "0.58609855", "0.5834699", "0.5742797", "0.5734736", "0.572304", "0.56792724", "0.5666692", "0.56550825", "0.5648603", "0.56460035", "0.5644065", "0.5629163", "0.56243056", "0.5606261", "0.56049025", "0.5569737", "0.55219495", "0.5519488" ]
0.7125116
0
Returns the fuzz target of |benchmark|
def get_fuzz_target(benchmark): # Do this because of OSS-Fuzz-on-demand. # TODO(metzman): Use classes to mock a benchmark config for # OSS_FUZZ_ON_DEMAND. return benchmark_config.get_config(benchmark).get( 'fuzz_target', environment.get('FUZZ_TARGET'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fuzzer_benchmark_key(fuzzer: str, benchmark: str):\n return fuzzer + ' ' + benchmark", "def get_benchmark(self, benchmark):\n\t\tif not isinstance(benchmark, str) and not callable(benchmark): return benchmark\n\t\telif benchmark in self.classes:\treturn self.classes[benchmark]()\n\t\traise TypeError('Passed benchmark is not defined!')", "def findBenchFromDevice(self, device):\n return device.bench", "def fuzz():\n if FUZZ:\n time.sleep(random.random())", "def get_fuzzer(self, *, catalog: Optional[str] = None) -> BaseFuzzer:\n return self.get_fuzzer_cls(catalog=catalog)()", "def benchmark_result(self):\n return self._benchmark_id", "def main():\r\n _evaluative_test(5)\r\n _fuzz_test(1)\r\n _fuzz_test(1, 512)\r\n _fuzz_test(1, 1512)\r\n _fuzz_test(1000)\r\n _fuzz_test(1000, 512)\r\n _fuzz_test(1000, 4077)", "def oss_fuzz_benchmark(fs):\n benchmark_config_contents = yaml.dump(OSS_FUZZ_BENCHMARK_CONFIG)\n benchmark_config_file = os.path.join(utils.ROOT_DIR, 'benchmarks',\n OSS_FUZZ_BENCHMARK_NAME,\n 'benchmark.yaml')\n fs.create_file(benchmark_config_file, contents=benchmark_config_contents)\n return OSS_FUZZ_BENCHMARK_NAME", "def findBenchFromInstrument(self, instrument):\n return instrument.bench", "def get_benchmark(client):\n r = client.get(config.API_PATH() + '/benchmarks')\n benchmarks = json.loads(r.data)\n return benchmarks['benchmarks'][0]['id']", "def get_fan_target(self):\n return self.__fan_target", "def fuzz(self):\n self.total_mutant_index = 0\n self.total_num_mutations = self.num_mutations()\n\n self._main_fuzz_loop(self._iterate_protocol())", "def umbrae(self, benchmark: np.ndarray = None):\n return self.mbrae(benchmark) / (1 - self.mbrae(benchmark))", "def get_fuzzy_match(object, answer, threshold=80):\n answer_phrase = generate_ngrams(answer)\n if answer_phrase:\n best_match = [fuzz.ratio(object, phr) for phr in answer_phrase]\n if np.max(best_match)>threshold:\n return np.max(best_match), answer_phrase[np.argmax(best_match)]\n else:\n return 0,''\n else:\n return 0, ''", "def get_builder_image_url(benchmark, fuzzer, docker_registry):\n return f'{docker_registry}/builders/{fuzzer}/{benchmark}'", "def benchmark(self):\n logger.info(self.benchmark.__doc__)\n return self.run(self.benchmark_profile())", "def fuzz(binary, sample, verbose,loop_limit, prog):\n # PLACEHOLDER\n # Check magic bytes / struct of sample input -> make best guess for input format\n # This should be moved into mutation logic -> this is a shortcut for midpoint\n sample_processed = SampleParser(sample)\n \n try:\n # data: that one plaintext file\n # ASCII text: plaintext\n # JSON data: json\n # CSV text: csv\n # HTML document, ASCII text: xml2\n mutations = { # walmart switch statement\n 'JSON' : lambda sample_processed:JsonMutator(sample_processed.data, min=2, max=10),\n 'CSV': lambda sample_processed:CsvMutator(sample_processed.csv(), min=2, max=10),\n 'JFIF': lambda sample_processed:JpgMutator(sample_processed.jpg(), min=2, max=10),\n 'XML': lambda sample_processed:XmlMutator(sample_processed.xml(), min=2, max=10),\n 'HTML document, ASCII text': lambda sample_processed:XmlMutator(sample_processed.xml(), min=2, max=10)\n }[sample_processed.guess](sample_processed)\n except KeyError as e:\n print('Unmatched data type: {}, defaulting to generic mutator'.format(e))\n mutations = Mutator(sample_processed.data)\n # need a default: ascii\n except Exception as e:\n print(\"mutator fucked up: {}\".format(e))\n \n print('Running fuzzer with a {} second limit...'.format(TIME_LIMIT))\n\n # nevermind\n strategy = mutations.complex_mutate\n \n # Loop for whole timelimit \n # In future - try multiple strategies in time limit\n cov = float(0)\n last = 0\n while(1):\n prog.iterations += 1 \n if (prog.iterations - last >= loop_limit):\n prog.reinit_breakpoints\n cov = float(0)\n mutations.reset()\n\n # in future, call parent method -> give me a mutation.. \n current_input = strategy()\n\n # Spawn process - should be stopped after exec. \n pid, status = prog.spawn_process(stdout=False)\n prog.getregs()\n # Now that the process has been spawned, we can populate the breakpoints\n prog.populate_breakpoints()\n if verbose:\n print(current_input)\n print(\"coverage: {}, this run: {}\".format(prog.coverage(), cov))\n print(\"pid {}\".format(pid))\n #prog.breakpoint_status()\n\n # Start the process proper \n prog.cont()\n prog.send(current_input) \n\n # simulate EOF \n prog.close_input() \n # why in the everloving fuck does RESIZING A TERMINAL alter the behaviour of waitpid ????????\n # sigwinch. thats why. \n \n if prog.coverage() > cov:\n cov = prog.coverage()\n mutations.add_pop(current_input)\n last = prog.iterations\n # Wait for something to happen. \n while(1):\n # sigsegv doesn't count as a termination signal.\n # since it gets caught by ptrace (only sigkill goes through ptrace) \n # WSTOPSIG == 11 == SIGSEGV -> segfault\n\n \n\n pid, status = prog.wait()\n if(os.WIFSTOPPED(status) and (os.WSTOPSIG(status) == signal.SIGSEGV)):\n # Placeholder -> Need to create file with crash input and integrate \n # fuzzing engine. \n\n # Update stats\n prog.getregs()\n prog.crash_eips.append(prog.registers.eip) \n #if verbose:\n # print(\"Input crashed program with signal: {}\".format(os.WSTOPSIG(status)))\n\n with open(\"bad.txt\", \"ab+\") as f:\n # write the byte string\n # since most formats have newlines in them\n f.write(str(current_input).encode(\"unicode-escape\") + b\"\\n\")\n break\n # we have hit one of our basic block breakpoints\n elif(os.WIFSTOPPED(status) and (os.WSTOPSIG(status) == signal.SIGTRAP)):\n # we need to decrement eip, replace the breakpoint with its saved value\n prog.restore_current_bp() \n\n elif(os.WIFEXITED(status)):\n break\n\n #prog.step()\n prog.cont()", "def GuessTargets(self, target_name):\n return difflib.get_close_matches(target_name, self.GetTargets(), 10, 0.4)", "def calculate_appropriate_target(self):\n pass", "def calculate_appropriate_target(self):\n pass", "def fuzz(text):\r\n\r\n return ' '.join([fuzz_word(word) for word in text.split()])", "def defuzzification(self):\n self.price = fuzz.defuzz(self.psf,self.fuzzy_output, 'som')", "def get_perf(target, output, mask):\n\n\toutput = np.stack(output, axis=0)\n\tmk = mask*np.reshape(target[:,:,0] == 0, (par['num_time_steps'], par['batch_size']))\n\n\ttarget = np.argmax(target, axis = 2)\n\toutput = np.argmax(output, axis = 2)\n\n\treturn np.sum(np.float32(target == output)*np.squeeze(mk))/np.sum(mk)", "def benchmark_selection(self):\n return self._benchmark_selection", "def run_libfuzzer_engine(tool_name, target_name, arguments, testcase_path,\n output_path, timeout):\n arguments = list(arguments)\n if environment.is_trusted_host():\n from clusterfuzz._internal.bot.untrusted_runner import tasks_host\n\n # TODO(ochang): Remove hardcode.\n return tasks_host.process_testcase('libFuzzer', tool_name, target_name,\n arguments, testcase_path, output_path,\n timeout)\n\n target_path = engine_common.find_fuzzer_path(\n environment.get_value('BUILD_DIR'), target_name)\n if not target_path:\n return engine.ReproduceResult([], 0, 0, '')\n\n engine_impl = libfuzzer_engine.Engine()\n if tool_name == 'minimize':\n func = engine_impl.minimize_testcase\n else:\n assert tool_name == 'cleanse'\n func = engine_impl.cleanse\n\n return func(target_path, arguments, testcase_path, output_path, timeout)", "def get_type(benchmark):\n # TODO(metzman): Use classes to mock a benchmark config for\n # OSS_FUZZ_ON_DEMAND.\n default_value = os.getenv('EXPERIMENT_TYPE', BenchmarkType.CODE.value)\n return benchmark_config.get_config(benchmark).get('type', default_value)", "def double_fuzz(self, original,fl):\n result = self.r.choice(self.mutators)(self, original,fl)\n return self.r.choice(self.mutators)(self, result,fl)", "def _fuzz_current_case(self, path):\n target = self.targets[0]\n\n self.pause() # only pauses conditionally\n\n message_path = \"->\".join([self.nodes[e.dst].name for e in path])\n\n if self.fuzz_node.mutant.name:\n primitive_under_test = self.fuzz_node.mutant.name\n else:\n primitive_under_test = 'no-name'\n\n test_case_name = \"{0}.{1}.{2}\".format(message_path, primitive_under_test, self.fuzz_node.mutant_index)\n\n self._fuzz_data_logger.open_test_case(\"{0}: {1}\".format(self.total_mutant_index, test_case_name),\n name=test_case_name, index=self.total_mutant_index)\n\n self._fuzz_data_logger.log_info(\n \"Type: %s. Default value: %s. Case %d of %d overall.\" % (\n type(self.fuzz_node.mutant).__name__,\n repr(self.fuzz_node.mutant.original_value),\n self.total_mutant_index,\n self.total_num_mutations))\n\n if target.procmon:\n self._fuzz_data_logger.open_test_step('Calling procmon pre_send()')\n target.procmon.pre_send(self.total_mutant_index)\n\n if target.netmon:\n self._fuzz_data_logger.open_test_step('Calling netmon pre_send()')\n target.netmon.pre_send(self.total_mutant_index)\n\n target.open()\n\n self.pre_send(target)\n\n try:\n for e in path[:-1]:\n node = self.nodes[e.dst]\n callback_data = self._callback_current_node(node=node, edge=e)\n self._fuzz_data_logger.open_test_step(\"Transmit Prep Node '{0}'\".format(node.name))\n self.transmit_normal(target, node, e, callback_data=callback_data)\n\n callback_data = self._callback_current_node(node=self.fuzz_node, edge=path[-1])\n except sex.BoofuzzTargetConnectionReset:\n # TODO: Switch _ignore_connection_reset for _ignore_transmission_error, or provide retry mechanism\n if self._ignore_connection_reset:\n self._fuzz_data_logger.log_info(\"Target connection reset.\")\n else:\n self._fuzz_data_logger.log_fail(\"Target connection reset.\")\n except sex.BoofuzzTargetConnectionAborted as e:\n # TODO: Switch _ignore_connection_aborted for _ignore_transmission_error, or provide retry mechanism\n if self._ignore_connection_aborted:\n self._fuzz_data_logger.log_info(\"Target connection lost (socket error: {0} {1}): You may have a \"\n \"network issue, or an issue with firewalls or anti-virus. Try \"\n \"disabling your firewall.\"\n .format(e.socket_errno, e.socket_errmsg))\n else:\n self._fuzz_data_logger.log_fail(\"Target connection lost (socket error: {0} {1}): You may have a \"\n \"network issue, or an issue with firewalls or anti-virus. Try \"\n \"disabling your firewall.\"\n .format(e.socket_errno, e.socket_errmsg))\n\n self._fuzz_data_logger.open_test_step(\"Fuzzing Node '{0}'\".format(self.fuzz_node.name))\n self.transmit_fuzz(target, self.fuzz_node, path[-1], callback_data=callback_data)\n target.close()\n\n if not self._check_for_passively_detected_failures(target=target):\n self._fuzz_data_logger.open_test_step(\"Calling post_send function:\")\n try:\n self.post_send(target=target, fuzz_data_logger=self._fuzz_data_logger, session=self, sock=target)\n except sex.BoofuzzTargetConnectionReset:\n self._fuzz_data_logger.log_fail(\n \"Target connection reset -- considered a failure case when triggered from post_send\")\n except sex.BoofuzzTargetConnectionAborted as e:\n self._fuzz_data_logger.log_info(\"Target connection lost (socket error: {0} {1}): You may have a \"\n \"network issue, or an issue with firewalls or anti-virus. Try \"\n \"disabling your firewall.\"\n .format(e.socket_errno, e.socket_errmsg))\n pass\n except sex.BoofuzzTargetConnectionFailedError:\n self._fuzz_data_logger.log_fail(\n \"Cannot connect to target; target presumed down.\"\n \" Note: Normally a failure should be detected, and the target reset.\"\n \" This error may mean you have no restart method configured, or your error\"\n \" detection is not working.\")\n except Exception:\n self._fuzz_data_logger.log_fail(\n \"Custom post_send method raised uncaught Exception.\" + traceback.format_exc())\n finally:\n target.close()\n self._check_procmon_failures(target=target)\n\n self._fuzz_data_logger.open_test_step(\"Sleep between tests.\")\n self._fuzz_data_logger.log_info(\"sleeping for %f seconds\" % self.sleep_time)\n time.sleep(self.sleep_time)\n\n self._process_failures(target=target)\n\n self._stop_netmon(target=target)\n\n self.export_file()", "def test_make_benchmark_single_ll():\n benchmark = llvm.make_benchmark(INVALID_IR_PATH)\n assert str(benchmark.uri).startswith(\"benchmark://user-v0/\")\n assert benchmark.uri.scheme == \"benchmark\"\n assert benchmark.uri.dataset == \"user-v0\"", "def reference_benchmark_result(self):\n return self._reference_id" ]
[ "0.6541956", "0.62079185", "0.57205427", "0.5588023", "0.55567586", "0.543854", "0.54338294", "0.53948295", "0.5363798", "0.532333", "0.5268365", "0.5229397", "0.5206228", "0.51935446", "0.51308554", "0.5129264", "0.5088721", "0.5079886", "0.5032553", "0.5032553", "0.50242513", "0.5016661", "0.50026", "0.4977756", "0.49376056", "0.49229285", "0.4918181", "0.49130705", "0.49082592", "0.49063617" ]
0.8522614
0
Returns the project of |benchmark|
def get_project(benchmark): return benchmark_config.get_config(benchmark)['project']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def benchmark_result(self):\n return self._benchmark_id", "def benchmark_selection(self):\n return self._benchmark_selection", "def get_benchmark(client):\n r = client.get(config.API_PATH() + '/benchmarks')\n benchmarks = json.loads(r.data)\n return benchmarks['benchmarks'][0]['id']", "def benchmark(self):\n logger.info(self.benchmark.__doc__)\n return self.run(self.benchmark_profile())", "def benchmark_profile(self):\n cb_bin = os.path.join(bin_path, 'compilebench')\n desc = \"benchmark\"\n test_name = \"compilebench_{0}\".format(to_safe_name(desc))\n test = TestProfile(\n name=test_name,\n desc=desc,\n test_path=self.test_path,\n bin_path=bin_path,\n command=\"{0} -D {1} -i 10 --makej\".format(cb_bin, self.test_path))\n\n return test", "def get_benchmark(self, benchmark):\n\t\tif not isinstance(benchmark, str) and not callable(benchmark): return benchmark\n\t\telif benchmark in self.classes:\treturn self.classes[benchmark]()\n\t\traise TypeError('Passed benchmark is not defined!')", "def findBenchFromInstrument(self, instrument):\n return instrument.bench", "def retrieve( self, benchmark, extraLabel='' ):\n if benchmark.reference is ReferenceBenchmark.SP:\n idx = np.argmax( self.sps )\n else:\n # Get reference for operation:\n if benchmark.reference is ReferenceBenchmark.Pd:\n ref = self.pds\n elif benchmark.reference is ReferenceBenchmark.Pf:\n ref = self.pfs\n delta = ref - benchmark.refVal\n idx = np.argmin( np.abs( delta ) )\n return PerformancePoint( name=extraLabel + benchmark.name\n , sp=self.sps[ idx ]\n , pd=self.pds[ idx ]\n , pf=self.pfs[idx]\n , thres=self.thresholds[idx]\n )", "def findBenchFromDevice(self, device):\n return device.bench", "def reference_benchmark_result(self):\n return self._reference_id", "def umbrae(self, benchmark: np.ndarray = None):\n return self.mbrae(benchmark) / (1 - self.mbrae(benchmark))", "def get_project(self, i):\r\n return self.__projects[i]", "def project(self, v):\n return v", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def _get_gcd_project():\n return os.getenv(environment_vars.GCD_DATASET)", "def project():", "def project():", "def project():" ]
[ "0.6512932", "0.6404195", "0.634007", "0.62146837", "0.6029168", "0.59504235", "0.5943799", "0.5894067", "0.581235", "0.5640167", "0.54331803", "0.53937525", "0.53754103", "0.53711575", "0.53711575", "0.53711575", "0.53711575", "0.53711575", "0.53711575", "0.53711575", "0.53711575", "0.53711575", "0.53711575", "0.53711575", "0.53711575", "0.53711575", "0.5320741", "0.5305155", "0.5305155", "0.5305155" ]
0.81019706
0
Returns the type of |benchmark|
def get_type(benchmark): # TODO(metzman): Use classes to mock a benchmark config for # OSS_FUZZ_ON_DEMAND. default_value = os.getenv('EXPERIMENT_TYPE', BenchmarkType.CODE.value) return benchmark_config.get_config(benchmark).get('type', default_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_benchmark(self, benchmark):\n\t\tif not isinstance(benchmark, str) and not callable(benchmark): return benchmark\n\t\telif benchmark in self.classes:\treturn self.classes[benchmark]()\n\t\traise TypeError('Passed benchmark is not defined!')", "def validate_type(benchmark):\n benchmark_type = get_type(benchmark)\n if benchmark_type not in BENCHMARK_TYPE_STRS:\n logs.error('%s has an invalid benchmark type %s, must be one of %s',\n benchmark, benchmark_type, BENCHMARK_TYPE_STRS)\n return False\n return True", "def benchmark_result(self):\n return self._benchmark_id", "def get_benchmark(client):\n r = client.get(config.API_PATH() + '/benchmarks')\n benchmarks = json.loads(r.data)\n return benchmarks['benchmarks'][0]['id']", "def get_testbench_name(self, tb_type: str) -> str:\n return f'{self._meas_name}_TB_{tb_type}'", "def get_perfcount_type(self):\n return self._perfcount_type", "def get_testbench_specs(self, tb_type: str) -> Dict[str, Any]:\n return self._specs['testbenches'][tb_type]", "def _getStatisticType(self, statistic):\n\n instructions = simplejson.loads(statistic.instructions_json)\n return instructions['type']", "def __get_dataset_type(dataset):\n op_type = None\n if isinstance(dataset, de.ShuffleDataset):\n op_type = OpName.SHUFFLE\n elif isinstance(dataset, de.MindDataset):\n op_type = OpName.MINDRECORD\n elif isinstance(dataset, de.BatchDataset):\n op_type = OpName.BATCH\n elif isinstance(dataset, de.SyncWaitDataset):\n op_type = OpName.BARRIER\n elif isinstance(dataset, de.ZipDataset):\n op_type = OpName.ZIP\n elif isinstance(dataset, de.ConcatDataset):\n op_type = OpName.CONCAT\n elif isinstance(dataset, de.MapDataset):\n op_type = OpName.MAP\n elif isinstance(dataset, de.FilterDataset):\n op_type = OpName.FILTER\n elif isinstance(dataset, de.RepeatDataset):\n op_type = OpName.REPEAT\n elif isinstance(dataset, de.SkipDataset):\n op_type = OpName.SKIP\n elif isinstance(dataset, de.TakeDataset):\n op_type = OpName.TAKE\n elif isinstance(dataset, de.ImageFolderDatasetV2):\n op_type = OpName.IMAGEFOLDER\n elif isinstance(dataset, de.GeneratorDataset):\n op_type = OpName.GENERATOR\n elif isinstance(dataset, de.TransferDataset):\n op_type = OpName.DEVICEQUEUE\n elif isinstance(dataset, de.RenameDataset):\n op_type = OpName.RENAME\n elif isinstance(dataset, de.TFRecordDataset):\n op_type = OpName.TFREADER\n elif isinstance(dataset, de.ProjectDataset):\n op_type = OpName.PROJECT\n elif isinstance(dataset, de.MnistDataset):\n op_type = OpName.MNIST\n elif isinstance(dataset, de.ManifestDataset):\n op_type = OpName.MANIFEST\n elif isinstance(dataset, de.VOCDataset):\n op_type = OpName.VOC\n elif isinstance(dataset, de.Cifar10Dataset):\n op_type = OpName.CIFAR10\n elif isinstance(dataset, de.Cifar100Dataset):\n op_type = OpName.CIFAR100\n elif isinstance(dataset, de.CelebADataset):\n op_type = OpName.CELEBA\n elif isinstance(dataset, de.RandomDataset):\n op_type = OpName.RANDOMDATA\n elif isinstance(dataset, de.TextFileDataset):\n op_type = OpName.TEXTFILE\n else:\n raise ValueError(\"Unsupported DatasetOp\")\n\n return op_type", "def benchmark(self):\n logger.info(self.benchmark.__doc__)\n return self.run(self.benchmark_profile())", "def unit_type(self) -> str:", "def type(name):", "def benchmark_selection(self):\n return self._benchmark_selection", "def type_():\n pass", "def type(self):\n return self._instrument_type", "def get_benchmark_specification(benchmark = 'FSI1'):\n if benchmark == 'FSI1':\n rho_s = Constant(1e03)\n nu_s = Constant(0.4)\n mu_s = Constant(5e05)\n rho_f = Constant(1e03)\n nu_f = Constant(1e-03)\n U = 0.2\n T_end = 60.0\n result = \"results-FSI1/\"\n elif benchmark == 'FSI2':\n rho_s = Constant(1e04)\n nu_s = Constant(0.4)\n mu_s = Constant(5e05)\n rho_f = Constant(1e03)\n nu_f = Constant(1e-03)\n U = 1.0\n T_end = 15.0\n result = \"results-FSI2/\"\t\t\n elif benchmark == 'FSI3':\n rho_s = Constant(1e03)\n nu_s = Constant(0.4)\n mu_s = Constant(2e06)\n rho_f = Constant(1e03)\n nu_f = Constant(1e-03)\n U = 2.0\n T_end = 20.0\n result = \"results-FSI3/\"\t\t\n else:\n raise ValueError('\"{}\" is a wrong name for problem specification.'.format(benchmark))\n v_max = Constant(1.5*U) # mean velocity to maximum velocity \n # (we have parabolic profile)\n E_s = Constant(2*mu_s*(1+nu_s))\n lambda_s = Constant((nu_s*E_s)/((1+nu_s)*(1-2*nu_s)))\n mu_f = Constant(nu_f*rho_f)\n return v_max, lambda_s, mu_s, rho_s, mu_f, rho_f, T_end, result", "def test_type(self):\n return self._test_type", "def get_test_type(self):\n return self.test_type", "def type(self):\n # easy enough\n return self._dataset._pyre_id.type", "def compare_performance(self):\n\n if self.label_type == \"categorical\":\n self._eval_classifier()\n\n elif self.label_type == \"numerical\":\n self._eval_regressor()\n\n return self.performance_comparison", "def criterion_type(self) -> str:\n return pulumi.get(self, \"criterion_type\")", "def criterion_type(self) -> str:\n return pulumi.get(self, \"criterion_type\")", "def sample_type(self) -> str:\n return self._sample_type", "def _type(self) -> str:\n ...", "def benchmark_profile(self):\n cb_bin = os.path.join(bin_path, 'compilebench')\n desc = \"benchmark\"\n test_name = \"compilebench_{0}\".format(to_safe_name(desc))\n test = TestProfile(\n name=test_name,\n desc=desc,\n test_path=self.test_path,\n bin_path=bin_path,\n command=\"{0} -D {1} -i 10 --makej\".format(cb_bin, self.test_path))\n\n return test", "def type(cls):", "def create_benchmark(self, benchmark):\n self.crd_client.create_namespaced_custom_object(\n group=\"ripsaw.cloudbulldozer.io\",\n version=\"v1alpha1\",\n namespace=benchmark[\"metadata\"][\"namespace\"],\n plural=\"benchmarks\",\n body=benchmark,\n )", "def get_engine_type(self):", "def _get_measure_class(measure_number, year=config.get('calculation.measures_year')):\n return MEASURE_NUMBER_TO_CLASS[year][measure_number]['measure_type']", "def TMVAType(name):\n from ROOT import TMVA\n name = name.lower()\n if name.find('bdt') >= 0:\n return TMVA.Types.kBDT\n elif name.find('llh') >= 0:\n return TMVA.Types.kLikelihood\n else:\n raise ValueError('Unsupported TMVA classifier type')" ]
[ "0.7427512", "0.6534945", "0.6310375", "0.63042027", "0.6122598", "0.6042788", "0.58986205", "0.58470327", "0.58307236", "0.58241946", "0.5813828", "0.5782032", "0.5728832", "0.570958", "0.5697104", "0.56663436", "0.56439966", "0.5572599", "0.5552849", "0.5550577", "0.5532445", "0.5532445", "0.55131143", "0.5511789", "0.5510949", "0.54829085", "0.5481124", "0.5477772", "0.54583263", "0.54505855" ]
0.8139171
0
Get the URL of the docker runner image for fuzzing the benchmark with fuzzer.
def get_runner_image_url(experiment, benchmark, fuzzer, docker_registry): tag = 'latest' if environment.get('LOCAL_EXPERIMENT') else experiment return f'{docker_registry}/runners/{fuzzer}/{benchmark}:{tag}'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_builder_image_url(benchmark, fuzzer, docker_registry):\n return f'{docker_registry}/builders/{fuzzer}/{benchmark}'", "def _to_dockerfile_url(image):\n path = \"/\".join((image.platform, image.release, image.architecture, \"Dockerfile\"))\n return git.get_github_blob_url(path, ref=f\"v{image.version}\")", "def get_fuzz_target(benchmark):\n # Do this because of OSS-Fuzz-on-demand.\n # TODO(metzman): Use classes to mock a benchmark config for\n # OSS_FUZZ_ON_DEMAND.\n return benchmark_config.get_config(benchmark).get(\n 'fuzz_target', environment.get('FUZZ_TARGET'))", "def get_image_name():\n try:\n return os.environ['AIRFLOW_IMAGE']\n except KeyError:\n raise Exception(\"Please provide docker image name to pytest using environment variable AIRFLOW_IMAGE\")", "def getBuildbotURL():", "def get_rule_container_image_uri(name, region):\n if name is not None and name.startswith(\"DetailedProfilerProcessingJobConfig\"):\n # should have the format like \"123456789012.dkr.ecr.us-west-2.amazonaws.com/detailed-profiler-processing:latest\"\n return image_uris.retrieve(detailed_framework_name, region)\n\n return image_uris.retrieve(framework_name, region)", "def docker_runner_factory(image):\n\n mapping = {\n 'gunicorn': GunicornDockerRunner,\n 'redis': RedisDockerRunner,\n 'consul': ConsulDockerRunner,\n 'postgres': PostgresDockerRunner,\n 'registrator': RegistratorDockerRunner,\n 'solr': SolrDockerRunner\n }\n\n for key in mapping:\n if key in image:\n return mapping[key]\n\n return DockerRunner", "def get_buildbot_url():\n return \"http://10.45.4.98:8001/\"", "def _ensure_image(testkit_path, branch_name, artifacts_path):\n # Construct Docker image name from branch name\n image_name = \"runner:%s\" % branch_name\n image_path = os.path.join(testkit_path, \"runner_image\")\n docker.build_and_tag(image_name, image_path, log_path=artifacts_path)\n\n return image_name", "def _get_dockerfiles_for_test() -> str:\n project_root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n dockerfiles_dir = os.path.join(project_root_dir, \"dockerfiles\")\n if sys.version_info[0:2] == (3, 6):\n return os.path.join(dockerfiles_dir, \"centos7.Dockerfile\")\n elif sys.version_info[0:2] == (3, 9):\n return os.path.join(dockerfiles_dir, \"rocky8.Dockerfile\")\n else:\n raise Exception(\n \"Running the tests with INMANTA_TEST_INFRA_SETUP=true is only supported using a python3.6 or python3.9 venv\"\n )", "def get_image_registry_url(self, image_name):\n c = self._oc_command([\"get\", \"is\", image_name,\n \"--output=jsonpath=\\'{ .status.dockerImageRepository }\\'\"])\n try:\n internal_registry_name = run_cmd(c, return_output=True)\n except subprocess.CalledProcessError as ex:\n raise ConuException(\"oc get is failed: %s\" % ex)\n\n logger.info(\"Image registry url: %s\", internal_registry_name)\n\n return internal_registry_name.replace(\"'\", \"\").replace('\"', '')", "def _get_image(runtime):\n return \"{}:{}\".format(LambdaContainer._IMAGE_REPO_NAME, runtime)", "def _dockerfile(self):\n return self.config.get('docker', {}).get('dockerfile', 'Dockerfile')", "def get_image_url():", "def dockerfile(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"dockerfile\")", "def get_flower_url(self):\n return \"http://{0}:{1}\".format(self.get_head_node_ip(), self.FLOWER_PORT)", "def _GetBuildBotUrl(builder_host, builder_port):\n if (builder_host == BISECT_BUILDER_HOST and\n builder_port == BISECT_BUILDER_PORT):\n return TRY_SERVER_URL\n else:\n return 'http://%s:%s' % (builder_host, builder_port)", "def dockerfile(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dockerfile\")", "def get_base_docker_image(docker_file):\n with open(docker_file) as f:\n from_line = next(\n line for line in f.read().split(\"\\n\") if line.startswith(\"FROM\")\n )\n _from, base_image = from_line.split()\n return base_image", "def url(self):\r\n return \"{}/container/{}\".format(BASE_URL, self.unit_locator)", "def ping_docker():\n with Docker('unittest-36', image='python:3.6') as tun:\n return tun.call(python_version)[:2]", "def docker_image_name(self):\n raise NotImplementedError", "def dockerfile_dir(self):\n return self._dockerfile_dir", "def docker_worker():", "def http_service(docker_ip: Any, docker_services: Any) -> Any:\n # `port_for` takes a container port and returns the corresponding host port\n port = docker_services.port_for(\"fdk-baseregistries-publisher\", HOST_PORT)\n url = \"http://{}:{}\".format(docker_ip, port)\n docker_services.wait_until_responsive(\n timeout=30.0, pause=0.1, check=lambda: is_responsive(url)\n )\n return url", "def url(self, name):\n return '%s/%s' % (self.container_url, name)", "def docker_image_tag(self, app):\n return f\"briefcase/{app.bundle}.{app.app_name.lower()}:{app.target_vendor}-{app.target_codename}\"", "def build_view_run_url(contest_id: int, run_id: int) -> str:\n parse_url = urllib.parse.urlparse(config.EJUDGE_PROXY_BASE_URL)\n new_path = str(pathlib.PurePosixPath(parse_url.path, f'c{contest_id}', f'r{run_id}'))\n parse_url = parse_url._replace(path=new_path)\n return parse_url.geturl()", "def pyfunc_serve_from_docker_image(image_name, host_port, extra_args=None):\n env = dict(os.environ)\n env.update(LC_ALL=\"en_US.UTF-8\", LANG=\"en_US.UTF-8\")\n scoring_cmd = [\"docker\", \"run\", \"-p\", \"%s:8080\" % host_port, image_name]\n if extra_args is not None:\n scoring_cmd += extra_args\n return _start_scoring_proc(cmd=scoring_cmd, env=env)", "def build_nighthawk_benchmark_image_from_source(manager: source_manager.SourceManager) -> None:\n # TODO: Inject the builder object into this method\n builder = nighthawk_builder.NightHawkBuilder(manager)\n builder.build_nighthawk_benchmark_image()" ]
[ "0.8196786", "0.6458271", "0.5928085", "0.5869728", "0.5578148", "0.55031866", "0.5449406", "0.54289633", "0.5422988", "0.53856117", "0.5382283", "0.534175", "0.5297771", "0.5248548", "0.5238496", "0.5210609", "0.5198708", "0.519582", "0.5189414", "0.512294", "0.51160717", "0.51065534", "0.5106042", "0.50968045", "0.5094282", "0.50328517", "0.49583447", "0.49534687", "0.4936412", "0.4935533" ]
0.87715006
0