query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Build the model with args.inn_depth many layers If args.inn_glow is true, then each layer includes 1x1 convolutions.
def build_fc_inn( args: ClusterArgs, input_shape: Tuple[int, ...], level_depth: Optional[int] = None ) -> layers.Bijector: input_dim = input_shape[0] level_depth = level_depth or args.inn_level_depth chain: List[layers.Bijector] = [layers.Flatten()] for i in range(level_depth): if args.inn_batch_norm: chain += [layers.MovingBatchNorm1d(input_dim, bn_lag=args.inn_bn_lag)] if args.inn_glow: chain += [layers.InvertibleLinear(input_dim)] chain += [ layers.MaskedCouplingLayer( input_dim=input_dim, hidden_dims=args.inn_coupling_depth * [args.inn_coupling_channels], mask_type="alternate", swap=(i % 2 == 0) and not args.inn_glow, scaling=args.inn_scaling, ) ] # one last mixing of the channels if args.inn_glow: chain += [layers.InvertibleLinear(input_dim)] else: chain += [layers.RandomPermutation(input_dim)] return layers.BijectorChain(chain)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_layers(self) -> None:\n self.convs_all_levels = nn.ModuleList()\n for i in range(self.start_level, self.end_level + 1):\n convs_per_level = nn.Sequential()\n convs_per_level.add_module(\n f'conv{i}',\n ConvModule(\n self.in_channels,\n self.feat_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n inplace=False,\n bias=False))\n self.convs_all_levels.append(convs_per_level)\n\n conv_branch = []\n for _ in range(self.num_stacked_convs):\n conv_branch.append(\n ConvModule(\n self.feat_channels,\n self.feat_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=False))\n self.conv_branch = nn.Sequential(*conv_branch)\n\n self.conv_pred = nn.Conv2d(\n self.feat_channels, self.out_channels, 1, stride=1)", "def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)", "def build(self, hp):\n\n model = Sequential()\n model.add(Conv2D(filters=hp.Choice('num_filters_0', values=[8, 16, 32, 64]),\n kernel_size=hp.Choice('kernel_size_0', values=[3, 4, 5]),\n activation=hp.Choice('activation_0', values=['relu', 'tanh']),\n input_shape=self.input_shape))\n\n for i in range(hp.Int('num_layers', 1, 3)):\n model.add(Conv2D(filters=hp.Choice('num_filters_%d' % (i + 1), values=[8, 16, 32, 64]),\n kernel_size=hp.Choice('kernel_size_%d' % (i + 1), values=[3, 4, 5]),\n activation=hp.Choice('activation_%d' % (i + 1), values=['relu', 'tanh'])))\n model.add(Flatten())\n model.add(Dense(N_zern))\n model.summary()\n\n model.compile(optimizer=keras.optimizers.Adam(hp.Choice('learning_rate', values=[1e-3, 5e-4, 1e-4])),\n loss='mean_squared_error')\n return model", "def build_cnn(input_var=None, w_init=None, n_layers=(4, 2, 1), n_filters_first=32, imsize=32, n_colors=3):\n weights = [] # Keeps the weights for all layers\n count = 0\n # If no initial weight is given, initialize with GlorotUniform\n if w_init is None:\n w_init = [lasagne.init.GlorotUniform()] * sum(n_layers)\n # Input layer\n network = InputLayer(shape=(None, n_colors, imsize, imsize),\n input_var=input_var)\n for i, s in enumerate(n_layers):\n for l in range(s):\n network = Conv2DLayer(network, num_filters=n_filters_first * (2 ** i), filter_size=(3, 3),\n W=w_init[count], pad='same')\n count += 1\n weights.append(network.W)\n network = MaxPool2DLayer(network, pool_size=(2, 2))\n return network, weights", "def build_model(top_layer, input_shape):\n top_layer.built = True\n for layer in top_layer._layers:\n layer.built = True\n\n # fix batch norm building without calling build ... see:\n # https://github.com/tensorflow/tensorflow/blob/d3b421bc5c86b4dcce8470721c6e24055a4b3ef1/tensorflow/python/keras/layers/normalization.py#L985\n if isinstance(layer, tf.keras.layers.BatchNormalization):\n ndims = len(input_shape)\n # Convert axis to list and resolve negatives\n if isinstance(layer.axis, int):\n layer.axis = [layer.axis]\n for idx, x in enumerate(layer.axis):\n if x < 0:\n layer.axis[idx] = ndims + x\n\n # build model recursively\n if hasattr(layer, \"layers\"):\n build_model(layer, input_shape)", "def build_model(self, constructor, args):\n dims = {'en': 300, 'es': 50}\n dists = {'en': 'Normal',\n 'es': 'Normal',}\n z_dim = args.model_args.get('z_dim', 64)\n h_dim = args.model_args.get('h_dim', 64)\n n_layers = args.model_args.get('n_layers', 3)\n gauss_out = (args.model != 'MultiDKS') \n encoders = {'en': models.common.DeepGaussianMLP(dims['en'], z_dim, h_dim, n_layers),\n 'es': models.common.DeepGaussianMLP(dims['es'], z_dim, h_dim, n_layers)}\n decoders = {'en': models.common.DeepGaussianMLP(z_dim, dims['en'], h_dim, n_layers),\n 'es': models.common.DeepGaussianMLP(z_dim, dims['es'], h_dim, n_layers)}\n custom_mods = [m for m in ['en', 'es'] if m in args.modalities]\n model = constructor(args.modalities,\n dims=(dims[m] for m in args.modalities),\n dists=[dists[m] for m in args.modalities],\n encoders={m: encoders[m] for m in custom_mods},\n decoders={m: decoders[m] for m in custom_mods},\n z_dim=z_dim, h_dim=h_dim,\n device=args.device, **args.model_args)\n return model", "def build(self):\n self.build_inputs()\n self.build_word_embeddings()\n self.build_encoder()\n self.build_fc()\n self.build_loss()\n self.build_global_step()", "def _build_model(self):\n \n #convolutional part\n conv_inputs = keras.Input(shape = self._state_shape[0])\n c1 = layers.Conv2D(filters = 4, kernel_size = 2, strides = (2,2), padding = \"same\", activation = 'relu')(conv_inputs)\n c2 = layers.Conv2D(filters = 8, kernel_size = 2, strides = (1,1), padding = \"same\", activation = 'relu')(c1)\n flat = layers.Flatten()(c2)\n\n\n #current green phase layer\n # phase_inputs = keras.Input(shape = (self._state_shape[1],))\n \n #elapsed green time layer\n elapsed_time_inputs = keras.Input(shape = (self._state_shape[2],))\n \n \n #combine elapsed time and green time layer\n # combined_green = layers.concatenate([phase_inputs, elapsed_time_inputs])\n # green_dense = layers.Dense(10, activation='relu')(elapsed_time_inputs)\n \n #combine green layer with conv layer\n all_combined = layers.concatenate([elapsed_time_inputs, flat])\n dense = layers.Dense(32, activation='relu')(all_combined)\n dense = layers.Dense(16, activation='relu')(dense)\n outputs = layers.Dense(self._output_dim, activation='linear')(dense)\n \n model = keras.Model(inputs = [conv_inputs, elapsed_time_inputs], outputs = outputs, name='simple_CNN') \n model.compile(loss=losses.mean_squared_error, optimizer=Adam(lr=self._learning_rate))\n \n return model", "def build(width, height, depth, classes):\n model = Sequential()\n inputShape = (height, width, depth)\n # Variable chanDim is set to -1 if the order of the inputShape is (height, width, depth)\n # meaning the depth of the channel comes last in the triple\n chanDim = -1\n\n if K.image_data_format == \"channel_first\":\n inputShape = (depth, height, width)\n # if the channel is first in the triple (depth, height, width) we set chanDim to 1\n # Batch normalization layers use the channel dimension in the process, that is why we specficy the order\n chanDim = 1\n\n # The first set of CONV -> RELU where after each we apply BN layers to avoid overfitting\n # and a POOL -> DO that also help in reducing overfitting and increase the classification accuracy\n # First set of CONV -> RELU -> BN use 32 filters each with 3x3 shape\n # The consecutive CONV -> RELU -> BN layers allow the network to learn more rich features, which\n # is a common practice when training deeper CNNs, before applying POOL layer to reduce the spatial dimensions\n # of the input image\n # Then we apply POOL layer with a size of 2x2, and since we do not provide explicitly stride, keras asumes 2x2 S\n # Finally, a DROPOUT layer with a probabliy of 25%\n model.add(Conv2D(32, (3, 3), padding=\"same\", input_shape=inputShape))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(Conv2D(32, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n # The second set of CONV -> RELU -> BN layers now learn 64 filters with 3x3 shape\n # It is common to increase the number of filters as the spatial input size decreases deeper in the network.\n model.add(Conv2D(64, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(Conv2D(64, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n # We add flatten layer to flatten the output of the previous layer\n # Then we add the only FC layer (512 nodes) with a RELU activation and a BN\n # Further applying a DO layer with p = 0.5\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n\n # Finally a softmax classifier\n model.add(Dense(classes))\n model.add(Activation(\"softmax\"))\n\n return model", "def build_model():\n model_weights = np.load(WEIGHTS_PATH, encoding='latin1').item()\n model = Sequential()\n model.add(InputLayer(batch_input_shape=(1, None, 1)))\n\n filter_parameters = [\n {'name': 'conv1', 'num_filters': 16, 'padding': 32,\n 'kernel_size': 64, 'conv_strides': 2,\n 'pool_size': 8, 'pool_strides': 8},\n\n {'name': 'conv2', 'num_filters': 32, 'padding': 16,\n 'kernel_size': 32, 'conv_strides': 2,\n 'pool_size': 8, 'pool_strides': 8},\n\n {'name': 'conv3', 'num_filters': 64, 'padding': 8,\n 'kernel_size': 16, 'conv_strides': 2},\n\n {'name': 'conv4', 'num_filters': 128, 'padding': 4,\n 'kernel_size': 8, 'conv_strides': 2},\n\n {'name': 'conv5', 'num_filters': 256, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2,\n 'pool_size': 4, 'pool_strides': 4},\n\n {'name': 'conv6', 'num_filters': 512, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2},\n\n {'name': 'conv7', 'num_filters': 1024, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2},\n\n {'name': 'conv8_2', 'num_filters': 401, 'padding': 0,\n 'kernel_size': 8, 'conv_strides': 2},\n ]\n\n for x in filter_parameters:\n model.add(ZeroPadding1D(padding=x['padding']))\n model.add(Conv1D(x['num_filters'],\n kernel_size=x['kernel_size'],\n strides=x['conv_strides'],\n padding='valid'))\n weights = model_weights[x['name']]['weights'].reshape(model.layers[-1].get_weights()[0].shape)\n biases = model_weights[x['name']]['biases']\n\n model.layers[-1].set_weights([weights, biases])\n\n if 'conv8' not in x['name']:\n gamma = model_weights[x['name']]['gamma']\n beta = model_weights[x['name']]['beta']\n mean = model_weights[x['name']]['mean']\n var = model_weights[x['name']]['var']\n\n model.add(BatchNormalization())\n model.layers[-1].set_weights([gamma, beta, mean, var])\n model.add(Activation('relu'))\n if 'pool_size' in x:\n model.add(MaxPooling1D(pool_size=x['pool_size'],\n strides=x['pool_strides'],\n padding='valid'))\n\n #\n return Model(inputs=model.input, outputs=model.get_layer('activation_7').output)", "def _build_depth_graph(self):\n self.depth_net = DepthNetwork(self.cfg.STRUCTURE, is_training=False)\n images = self.images_placeholder[tf.newaxis]\n poses = self.poses_placeholder[tf.newaxis]\n intrinsics = self.intrinsics_placeholder[tf.newaxis]\n\n # fix the input shape\n images = tf.reshape(images, [1, 5, 192, 1088, 3])\n Ts = VideoSE3Transformation(matrix=poses)\n\n depths = self.depth_net.forward(Ts, images, intrinsics)\n self.outputs['depths'] = depths", "def mobile_unet(\n inputs, \n depth=3,\n filters=64,\n out_channels=None,\n out_activation=None,\n kernel_initializer=None,\n dtype=None,\n name=None,\n seed=None,\n):\n\n name_scope = name\n if name_scope:\n name = lambda n: '{}/{}'.format(name_scope, n)\n else:\n name = lambda n: n\n kernel_initializer = kernel_initializer_generator(\n kernel_initializer=kernel_initializer,\n seed=seed,\n )\n\n x = inputs\n levels = []\n\n x = k.layers.Conv2D(\n filters=filters//2,\n kernel_size=3, \n padding='same', \n activation='relu',\n kernel_initializer=next(kernel_initializer),\n name=name('convdw00'),\n )(x)\n\n x = k.layers.SeparableConv2D(\n filters=filters, \n kernel_size=3, \n padding='same', \n activation='relu',\n depthwise_initializer=next(kernel_initializer),\n pointwise_initializer=next(kernel_initializer),\n name=name('convdw01'),\n )(x)\n\n for i in range(1, depth+1):\n levels.append(x)\n\n x = k.layers.SeparableConv2D(\n filters=filters*2**i, \n kernel_size=3, \n strides=2,\n padding='same', \n activation='relu',\n depthwise_initializer=next(kernel_initializer),\n pointwise_initializer=next(kernel_initializer),\n name=name('convdw{}0'.format(i)),\n )(x)\n\n x = k.layers.SeparableConv2D(\n filters=filters*2**i, \n kernel_size=3, \n padding='same', \n activation='relu',\n depthwise_initializer=next(kernel_initializer),\n pointwise_initializer=next(kernel_initializer),\n name=name('convdw{}1'.format(i)),\n )(x)\n \n for i in range(depth-1, -1, -1):\n x = k.layers.UpSampling2D(interpolation='bilinear', name=name('up{}0'.format(i)))(x)\n\n x = k.layers.SeparableConv2D(\n filters=filters*2**i,\n kernel_size=3,\n padding='same',\n activation='relu',\n depthwise_initializer=next(kernel_initializer),\n pointwise_initializer=next(kernel_initializer),\n name=name('up{}1'.format(i)),\n )(x)\n\n x = k.layers.Concatenate(name=name('concat{}'.format(i)))([x, levels.pop()])\n\n for j in range(2):\n x = k.layers.SeparableConv2D(\n filters=filters*2**i, \n kernel_size=3, \n padding='same', \n activation='relu',\n depthwise_initializer=next(kernel_initializer),\n pointwise_initializer=next(kernel_initializer),\n name=name('convuw{}{}'.format(i,j)),\n )(x)\n\n if out_channels:\n x = k.layers.Conv2D(\n filters=out_channels,\n kernel_size=1, \n activation=out_activation,\n kernel_initializer=next(kernel_initializer),\n name=name('convout'),\n )(x)\n \n return x", "def _build(self):\n with tf.variable_scope (self.name + '_architecutre') as scope:\n images_square = unflatten_layer ( self.images )\n visualize_images(images_square)\n\n # Conv Layer 1\n conv1_out, params = conv_2d_layer ( input = images_square,\n neurons = CONV_1_N,\n filter_size = CONV_1_FILT,\n name = 'enc_conv_1',\n visualize = True )\n process_params(params, name = self.name)\n e1_params = params\n pool1_out = max_pool_2d_layer ( input = conv1_out, name = 'enc_pool_1')\n # lrn1_out = local_response_normalization_layer (pool1_out, name = 'lrn_1' )\n\n # Conv Layer 2\n conv2_out, params = conv_2d_layer ( input = pool1_out,\n neurons = CONV_2_N,\n filter_size = CONV_2_FILT,\n name = 'enc_conv_2' )\n process_params(params, name = self.name)\n e2_params = params\n pool2_out = max_pool_2d_layer ( input = conv2_out, name = 'enc_pool_2')\n # lrn2_out = local_response_normalization_layer (pool2_out, name = 'lrn_2' )\n\n flattened = flatten_layer(pool2_out)\n\n # Dropout Layer 1 \n flattened_dropout = dropout_layer ( input = flattened,\n prob = self.dropout_prob,\n name = 'enc_dropout_1') \n\n # Dot Product Layer 1\n fc1_out, params = dot_product_layer ( input = flattened_dropout,\n neurons = HIDDEN_1,\n name = 'enc_dot_1')\n process_params(params, name = self.name)\n e3_params = params \n\n # Dropout Layer 2 \n fc1_out_dropout = dropout_layer ( input = fc1_out,\n prob = self.dropout_prob,\n name = 'enc_dropout_2')\n # Dot Product Layer 2\n fc2_out, params = dot_product_layer ( input = fc1_out_dropout, \n neurons = HIDDEN_2,\n name = 'enc_dot_2')\n process_params(params, name = self.name)\n e4_params = params \n\n # Dropout Layer 3 \n fc2_out_dropout = dropout_layer ( input = fc2_out,\n prob = self.dropout_prob,\n name = 'enc_dropout_3')\n \n # Dot Product Layer 2\n self.codeword, params = dot_product_layer ( input = fc2_out_dropout, \n neurons = CODEWORD_LENGTH,\n activation = CODE_ACTIVATION,\n name = 'enc_dot_2')\n process_params(params, name = self.name)\n process_codeword_normalization_regularizer(self.codeword, \n coeff = AUTOENCODER_CODEWORD_COEFF,\n name = self.name)\n e5_params = params \n # tf.summary.histogram('codewords', self.codeword)\n # self.hash = threshold_layer ( input = self.codeword,\n # name = 'hash')\n # process_hash_regularizer(self.codeword, coeff = AUTOENCODER_HASH_COEFF,\n # name = self.name)\n\n # Decoder ... \n decoder_1_out, params = dot_product_layer ( input = self.codeword, \n neurons = HIDDEN_2,\n params = [tf.transpose(e5_params[0]), None],\n name = 'decoder_dot_1')\n d1_params = params\n process_params([params[1]], name = self.name)\n \n dec_1_out_dropout = dropout_layer ( input = decoder_1_out,\n prob = self.dropout_prob,\n name = 'dec_dropout_1')\n\n decoder_2_out, params = dot_product_layer ( input = dec_1_out_dropout, \n neurons = HIDDEN_1,\n params = [tf.transpose(e4_params[0]), None],\n name = 'decoder_dot_2')\n d2_params = params\n process_params([params[1]], name = self.name)\n \n # dropout 2\n dec_2_out_dropout = dropout_layer ( input = decoder_2_out,\n prob = self.dropout_prob,\n name = 'dec_dropout_2')\n\n decoder_3_out, params = dot_product_layer ( input = dec_2_out_dropout, \n neurons = 1250,\n params = [tf.transpose(e3_params[0]), None],\n name = 'decoder_dot_3')\n d3_params = params\n process_params([params[1]], name = self.name)\n\n # DeConv Layer 1\n # The output shapes need to be changed according to architecture.\n\n dec_3_square = unflatten_layer ( decoder_3_out, channels = CONV_2_N )\n upsample_1 = upsampling_layer (dec_3_square, size = (10,10), name = 'dec_upsampling_1')\n\n deconv1_out, params = deconv_2d_layer ( input = upsample_1,\n neurons = CONV_1_N,\n filter_size = CONV_2_FILT,\n output_shape = (12,12),\n # n_outs = MINI_BATCH_SIZE,\n stride = (1,1,1,1), \n params = [e2_params[0], None], \n name = 'dec_deconv_1' )\n\n process_params([params[1]], name = self.name)\n d4_params = params\n\n # DeConv Layer 2\n upsample_2 = upsampling_layer (deconv1_out, size = (24,24), name = 'dec_upsampling_2')\n decoded_images_square, params = deconv_2d_layer ( input = upsample_2,\n neurons = 1,\n filter_size = CONV_1_FILT,\n stride = (1,1,1,1),\n output_shape = (28,28),\n # n_outs = MINI_BATCH_SIZE, \n params = [e1_params[0], None], \n activation = 'tanh', \n name = 'dec_deconv_2' )\n \n process_params([params[1]], name = self.name)\n d5_params = params \n \n self.decoded = flatten_layer (decoded_images_square, in_shp = [-1, 28, 28, 1])\n visualize_images(decoded_images_square, name = 'decoded')\n # This is because transpose don't initialize.\n self.params = [ [e5_params[0], d1_params[1] ],\n [e4_params[0], d2_params[1] ],\n [e3_params[0], d3_params[1] ],\n [e2_params[0], d4_params[1] ],\n [e1_params[0], d5_params[1] ] ]\n\n with tf.variable_scope (self.name + '_objectives') as scope: \n with tf.variable_scope( self.name + '_decoder_error') as scope:\n reconstruction_error = rmse(self.images, self.decoded) \n tf.add_to_collection( self.name + '_objectives', reconstruction_error ) \n tf.summary.scalar('reconstruction_error', reconstruction_error)\n\n self._cook_optimizer( \n lr = AUTOENCODER_LR, \n optimizer = AUTOENCODER_OPTIMIZER,\n l1_coeff = AUTOENCODER_L1_COEFF,\n l2_coeff = AUTOENCODER_WEIGHT_DECAY_COEFF)", "def build(self):\n\n layers = GiraffeLayer.get_all_structural()\n \n for layer in layers:\n\n self.add_objects_from_layer(layer)\n\n return self", "def _build_model(self, image_input_source, encoder_input_source, dropout_toggle):\n\t\t# We have to match this output size.\n\t\tbatch, input_height, input_width, input_depth = image_input_source.get_shape().as_list()\n\t\n\t\tfilter_sizes = [64, 64, 64] # Like VGG net, except made by a stupid person.\n\t\n\t\t# Convolutional ops will go here.\n\t\tc0, wc0, bc0 = self._build_conv(image_input_source, [3, 3, input_depth, filter_sizes[0]], [1, 1, 1, 1], activate=False)\n\t\tc1 = self._build_max_pool(c0, [1, 2, 2, 1], [1, 2, 2, 1])\n\t\tc2, wc2, bc2 = self._build_conv(self._build_dropout(c1, dropout_toggle), [3, 3, filter_sizes[0], filter_sizes[1]], [1, 1, 1, 1])\n\t\tc3 = self._build_max_pool(c2, [1, 2, 2, 1], [1, 2, 2, 1])\n\t\tc4, wc4, bc4 = self._build_conv(self._build_dropout(c3, dropout_toggle), [3, 3, filter_sizes[1], filter_sizes[2]], [1, 1, 1, 1])\n\t\tc5 = self._build_max_pool(c4, [1, 2, 2, 1], [1, 2, 2, 1])\n\t\tconv_output = c5\n\t\n\t\t# Transition to FC layers.\n\t\tpre_flat_shape = conv_output.get_shape().as_list()\n\t\tflatten = tf.reshape(conv_output, [-1, pre_flat_shape[1]*pre_flat_shape[2]*pre_flat_shape[3]])\n\t\n\t\t# Dense connections\n\t\tfc0, wf0, bf0 = self._build_fc(flatten, 512)\n\t\tfc1, wf1, bf1 = self._build_fc(fc0, 512)\n\t\tfc2, wf2, bf2 = self._build_fc(self._build_dropout(fc1, dropout_toggle), self.REPRESENTATION_SIZE)\n\t\tfc_out = fc2\n\t\n\t\t# Output point and our encoder mix-in.\n\t\tmu_output, wmu, bmu = self._build_fc(fc_out, self.REPRESENTATION_SIZE)\n\t\tz_output, wz, bz = self._build_fc(fc_out, self.REPRESENTATION_SIZE)\n\t\tencoded_output = tf.random_normal(mean=mu_output, stddev=z_output, shape=z_output.get_shape()) #tf.nn.softmax(fc_out)\n\t\tencoded_input = self._build_dropout(encoder_input_source + encoded_output, dropout_toggle) # Mix input and enc.\n\t\tencoded_input.set_shape(encoded_output.get_shape()) # Otherwise we can't ascertain the size.\n\t\n\t\t# More dense connections on the offset.\n\t\tdfc2, dwf2, dbf2 = self._build_fc(encoded_input, 512, weight=tf.transpose(wf2), bias=tf.transpose(bf1))\n\t\tdfc1, dwf1, dbf1 = self._build_fc(dfc2, 512, weight=tf.transpose(wf1), bias=tf.transpose(bf0))\n\t\tdfc0, dwf0, dbf0 = self._build_fc(self._build_dropout(dfc1, dropout_toggle), flatten.get_shape().as_list()[-1], weight=tf.transpose(wf0))\n\t\n\t\t# Expand for more convolutional operations.\n\t\tunflatten = tf.reshape(dfc0, [-1, pre_flat_shape[1], pre_flat_shape[2], pre_flat_shape[3]]) #pre_flat_shape)\n\t\n\t\t# More convolutions here.\n\t\tdc5 = self._build_unpool(unflatten, [1, 2, 2, 1])\n\t\tdc4, wdc4, bdc4 = self._build_deconv(self._build_dropout(dc5, dropout_toggle), c3.get_shape().as_list(), [3, 3, filter_sizes[1], filter_sizes[2]], [1, 1, 1, 1])\n\t\tdc3 = self._build_unpool(dc4, [1, 2, 2, 1])\n\t\tdc2, wdc2, bdc2 = self._build_deconv(self._build_dropout(dc3, dropout_toggle), c1.get_shape().as_list(), [3, 3, filter_sizes[0], filter_sizes[1]], [1, 1, 1, 1])\n\t\tdc1 = self._build_unpool(dc2, [1, 2, 2, 1])\n\t\tdc0, wdc0, bdc0 = self._build_deconv(dc1, [batch, input_height, input_width, input_depth], [3, 3, input_depth, filter_sizes[0]], [1, 1, 1, 1], activate=False)\n\t\tdeconv_output = dc0\n\t\n\t\t# Return result + encoder output\n\t\treturn deconv_output, encoded_output", "def build_model(self):\n\n input_placeholder = Input(shape = self.input_shape)\n x = ZeroPadding2D((3, 3))(input_placeholder)\n\n # Stage 1\n x = self.main_path_block(x, 64, (7, 7), 'valid', 'conv1', 'bn_conv1', 'relu', (2, 2))\n x = MaxPooling2D((3, 3), strides = (2, 2))(x)\n\n # Stage 2\n x = self.convolutional_block(x, 3, [64, 64, 256], 2, 'a', 1)\n x = self.identity_block(x, 3, [64, 64, 256], 2, 'b')\n x = self.identity_block(x, 3, [64, 64, 256], 2, 'c')\n\n # Stage 3\n x = self.convolutional_block(x, 3, [128, 128, 512], 3, 'a', 2)\n x = self.identity_block(x, 3, [128, 128, 512], 3, 'b')\n x = self.identity_block(x, 3, [128, 128, 512], 3, 'c')\n x = self.identity_block(x, 3, [128, 128, 512], 3, 'd')\n\n # Stage 4\n x = self.convolutional_block(x, 3, [256, 256, 1024], 4, 'a', 2)\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'b')\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'c')\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'd')\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'e')\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'f')\n\n # Stage 5\n x = self.convolutional_block(x, 3, [512, 512, 2048], 5, 'a', 2)\n x = self.identity_block(x, 3, [512, 512, 2048], 5, 'b')\n x = self.identity_block(x, 3, [512, 512, 2048], 5, 'c')\n \n # Average Pooling Layer\n x = AveragePooling2D((2, 2), name = 'avg_pool')(x)\n \n # Fully Connected Layer\n x = Flatten()(x)\n x = Dense(\n self.classes,\n activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet50')", "def build_cut_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model = Model(model.input, model.get_layer(self.ex_last_conv_layer_name2).output)\n model.summary()\n return model", "def build_net(self, inputs):\n with tf.variable_scope(self._scope, self._scope, [inputs]) as sc:\n end_points_collection = sc.name + '_end_points'\n\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.batch_norm],\n outputs_collections=end_points_collection):\n net = slim.conv2d(inputs, 32, 3, 1, scope='conv1')\n net = slim.conv2d(net, 32, 3, 1, scope='conv2')\n\n net = slim.conv2d(net, 64, 3, 1, scope='conv3')\n net = slim.conv2d(net, 64, 3, 1, scope='conv4')\n\n net = slim.max_pool2d(net, 2, 2, scope='pool1')\n\n net = slim.conv2d(net, 128, 3, 1, scope='conv5')\n net = slim.conv2d(net, 128, 3, 1, scope='conv6')\n\n net = slim.max_pool2d(net, 2, 2, scope='pool2')\n\n net = slim.conv2d(net, 256, 3, scope='conv7')\n net = slim.conv2d(net, 256, 3, scope='conv8')\n\n net = slim.max_pool2d(net, 2, [2, 1], scope='pool3')\n\n net = slim.conv2d(net, 512, 3, scope='conv9')\n net = slim.conv2d(net, 512, 3, scope='conv10')\n\n net = slim.max_pool2d(net, 2, [1, 1], scope='pool4')\n\n net = slim.conv2d(net, 512, 2, padding='VALID', scope='conv11')\n\n net = slim.dropout(net, keep_prob=0.5)\n\n self.end_points = utils.convert_collection_to_dict(end_points_collection)\n self.net = net", "def sweep_image_model():\n for c1 in [4, 8, 16]:\n for c2 in [2, 4]:\n for c3 in [2, 4]:\n for c4 in [1, 2]:\n flags = flag_reader.read_flag()\n print(c1)\n flags.channel_list = c1 * np.array([1, c2, c2*c3, c2*c3*c4])\n print('channel list = ', flags.channel_list)\n flags.last_dim = flags.channel_list[-1]\n flags.model_name = flags.data_set + '_channel_' + str(flags.channel_list).replace('[','').replace(']','').replace(' ','_') + \\\n '_dim_last_' + str(flags.last_dim) + '_ind_' + str(flags.comp_ind) + \\\n '_lr_{}_decay_{}_reg_{}_bs_{}'.format(flags.lr, flags.lr_decay_rate, flags.reg_scale, flags.batch_size)\n print(flags.model_name)\n training_from_flag(flags)", "def build_model(self):\n states = layers.Input(shape=(self.state_size,), name='states')\n actions = layers.Input(shape=(self.action_size,), name='actions')\n\n # Hidden Layers for state pathway\n net_states = layers.Dense(units=320, kernel_regularizer=regularizers.l2(0.01), activation='relu')(states)\n net_states = layers.BatchNormalization()(net_states)\n net_states = layers.Dropout(0.25)(net_states)\n net_states = layers.Dense(units=160, kernel_regularizer=regularizers.l2(0.01), activation='relu')(net_states)\n net_states = layers.BatchNormalization()(net_states)\n net_states = layers.Dropout(0.25)(net_states)\n net_states = layers.Dense(units=80, kernel_regularizer=regularizers.l2(0.01), activation='relu')(net_states)\n net_states = layers.BatchNormalization()(net_states)\n net_states = layers.Dropout(0.25)(net_states)\n net_states = layers.Dense(units=40, kernel_regularizer=regularizers.l2(0.01), activation='relu')(net_states)\n net_states = layers.BatchNormalization()(net_states)\n net_states = layers.Dropout(0.25)(net_states)\n\n # Hidden Layer for action pathway\n net_actions = layers.Dense(units=320, kernel_regularizer=regularizers.l2(0.01), activation='relu')(actions)\n net_actions = layers.BatchNormalization()(net_actions)\n net_actions = layers.Dropout(0.25)(net_actions)\n net_actions = layers.Dense(units=160, kernel_regularizer=regularizers.l2(0.01), activation='relu')(net_actions)\n net_actions = layers.BatchNormalization()(net_actions)\n net_actions = layers.Dropout(0.25)(net_actions)\n net_actions = layers.Dense(units=80, kernel_regularizer=regularizers.l2(0.01), activation='relu')(net_actions)\n net_actions = layers.BatchNormalization()(net_actions)\n net_actions = layers.Dropout(0.25)(net_actions)\n net_actions = layers.Dense(units=40, kernel_regularizer=regularizers.l2(0.01), activation='relu')(net_actions)\n net_actions = layers.BatchNormalization()(net_actions)\n net_actions = layers.Dropout(0.25)(net_actions)\n\n # Combine state and action pathways\n net = layers.Add()([net_states, net_actions])\n net = layers.Activation('relu')(net)\n\n # Final Output layer\n Q_values = layers.Dense(units=1, name='q_values')(net)\n\n # Create a Keras Model\n self.model = models.Model(inputs=[states, actions], outputs=Q_values)\n\n # Define Optimizer and Compile the Model\n optimizer = optimizers.Adam(lr=0.0001)\n self.model.compile(optimizer=optimizer, loss='mse')\n\n # Action Gradients (derivative of Q_Value\n action_gradient = K.gradients(Q_values, actions)\n\n # Function to fetch action gradients\n self.get_action_gradients = K.function(\n inputs=[*self.model.input, K.learning_phase()],\n outputs=action_gradient\n )", "def __init__(self, layers, input_size):\n super(ConvNetMaker, self).__init__()\n self.conv_layers = []\n self.fc_layers = []\n # h, w, d = 32, 32, 3\n h, w, d = input_size, input_size, 3\n previous_layer_filter_count = 3\n previous_layer_size = h * w * d\n num_fc_layers_remained = len([1 for l in layers if l.startswith('FC')])\n for layer in layers:\n if layer.startswith('Conv'):\n filter_count = int(layer[4:])\n self.conv_layers += [\n nn.Conv2d(previous_layer_filter_count,\n filter_count,\n kernel_size=3,\n padding=1),\n nn.BatchNorm2d(filter_count),\n nn.ReLU(inplace=True)\n ]\n\n previous_layer_filter_count = filter_count\n d = filter_count\n previous_layer_size = h * w * d\n elif layer.startswith('MaxPool'):\n self.conv_layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n h, w = int(h / 2.0), int(w / 2.0)\n previous_layer_size = h * w * d\n elif layer.startswith('FC'):\n num_fc_layers_remained -= 1\n current_layer_size = int(layer[2:])\n if num_fc_layers_remained == 0:\n self.fc_layers += [nn.Linear(previous_layer_size,\n current_layer_size)]\n else:\n self.fc_layers += [nn.Linear(previous_layer_size,\n current_layer_size),\n nn.ReLU(inplace=True)]\n previous_layer_size = current_layer_size\n\n conv_layers = self.conv_layers\n fc_layers = self.fc_layers\n self.conv_layers = nn.Sequential(*conv_layers)\n self.fc_layers = nn.Sequential(*fc_layers)", "def build_cnn(self):\n model = Sequential()\n model.add(Conv2D(24, (1, 3), activation = 'relu', input_shape = (1, grid_size*grid_size+2, 1)))\n model.add(Conv2D(24, (1, 3), activation = 'relu', input_shape = (1, grid_size*grid_size+2, 1)))\n model.add(Flatten())\n model.add(Dense(len(ACTIONS), activation = 'linear'))\n model.compile(loss = 'mse', optimizer = Adam(lr = alpha))\n\n return model", "def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n self._net_outputs = self.online_convnet(self.state_ph, training=True)\n self._q_argmax = tf.argmax(self._net_outputs.q_values, axis=1)[0]\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n training=True)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)", "def build(self):\n\n self.W = self.init([self.n_atom_input_feat, self.n_output])\n self.b = model_ops.zeros(shape=[\n self.n_output,\n ])\n\n self.trainable_weights = self.W + self.b", "def construct(self):\n self.input_size = self.numplanes * self.boardsize**2\n \n if self.hidden:\n layers = [\n torch.nn.Linear(self.input_size, self.hidden), \n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden, self.boardsize**2)\n ]\n else:\n layers = [torch.nn.Linear(self.input_size, self.boardsize**2)]\n\n self.layers = torch.nn.ModuleList(layers)\n self.optimizer = torch.optim.Adam(self.parameters(), lr=1e-5)\n logging.info(\"Model initialized: %s\", self)", "def build_layers(self):\n raise NotImplementedError", "def build_bisenet(inputs, num_classes):\n\n ### The spatial path\n ### The number of feature maps for each convolution is not specified in the paper\n ### It was chosen here to be equal to the number of feature maps of a classification\n ### model at each corresponding stage\n # spatial_net = fluid.layers.resize_bilinear(inputs, [Image_Height/8, Image_Width/8])\n # print('spatial_net_1',spatial_net)\n\n ## spatial path\n spatial_net = ConvBlock(inputs, num_filters=64, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=128, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=256, kernel_size=3, stride=2)\n # print(\"spatial_net:\", spatial_net)\n\n # spatial_net = fluid.layers.resize_bilinear(spatial_net, [Image_Height/8, Image_Width/8])\n # print('spatial_net_2',spatial_net)\n ### Context path\n model = ResNet(is_test=False)\n # spatial_net = model.bottleneck_block1(inputs)\n end_points_16, end_points_32 = model.net(inputs)\n net_4 = AttentionRefinementModule(end_points_16, num_filters=512)\n net_5 = AttentionRefinementModule(end_points_32, num_filters=1024)\n global_channels = fluid.layers.reduce_mean(net_5, [2, 3], keep_dim=True)\n net_5_scaled = fluid.layers.elementwise_mul(net_5, global_channels, axis=0)\n\n ### Combining the paths\n net_4 = Upsample(net_4, scale=2)\n net_5_scaled = Upsample(net_5_scaled, scale=4)\n # print('net_4, net_5:', [net_4, net_5_scaled])\n # layers_concat = list()\n # layers_concat.append(spatial_net)\n ## layers_concat.append(net_4)\n # layers_concat.append(net_5_scaled)\n context_net = fluid.layers.concat([spatial_net, net_4, net_5_scaled], axis=1) #\n # context_net = fluid.layers.concat(input=layers_concat,axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([net_4, net_5_scaled], axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([spatial_net,context_net], axis=1)\n # print('context_net2',context_net)\n\n ### FFM\n # net = FeatureFusionModule(input_1=spatial_net, input_2=context_net, num_filters=num_classes)\n net = FeatureFusionModule(inputs=context_net, num_filters=num_classes)\n\n # print('net', net)\n\n ## [batch_zize, num_filters, 128, 64]\n\n ### Final upscaling and finish\n # net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 128])\n # print('conv2d_transpose', net)\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn1')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[128, 256])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn2')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 512])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn3')\n #net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[512, 1024])\n #net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')\n # print('net',net)\n net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')\n\n net = fluid.layers.conv2d(net, num_classes, 1)\n return net", "def _build_model(self):\n\n with tf.variable_scope(\"Matchnet\", reuse=tf.AUTO_REUSE):\n # For determining the runtime shape\n x_shp = tf.shape(self.x_in)\n\n # -------------------- Network archintecture --------------------\n # Build graph\n print(\"Building Graph\")\n self.logits = build_graph(self.x_in, self.is_training, self.config)\n # ---------------------------------------------------------------\n\n # Turn into weights for each sample\n weights = tf.nn.relu(tf.tanh(self.logits))\n\n # Make input data (num_img_pair x num_corr x 4)\n xx = tf.transpose(tf.reshape(\n self.x_in, (x_shp[0], x_shp[2], 4)), (0, 2, 1))\n\n # Create the matrix to be used for the eight-point algorithm\n X = tf.transpose(tf.stack([\n xx[:, 2] * xx[:, 0], xx[:, 2] * xx[:, 1], xx[:, 2],\n xx[:, 3] * xx[:, 0], xx[:, 3] * xx[:, 1], xx[:, 3],\n xx[:, 0], xx[:, 1], tf.ones_like(xx[:, 0])\n ], axis=1), (0, 2, 1))\n print(\"X shape = {}\".format(X.shape))\n wX = tf.reshape(weights, (x_shp[0], x_shp[2], 1)) * X\n print(\"wX shape = {}\".format(wX.shape))\n XwX = tf.matmul(tf.transpose(X, (0, 2, 1)), wX)\n print(\"XwX shape = {}\".format(XwX.shape))\n\n # Recover essential matrix from self-adjoing eigen\n e, v = tf.self_adjoint_eig(XwX)\n self.e_hat = tf.reshape(v[:, :, 0], (x_shp[0], 9))\n # Make unit norm just in case\n self.e_hat /= tf.norm(self.e_hat, axis=1, keep_dims=True)", "def build_model(self):\n for link in self.links:\n # if from neuron is input to graph, add it to input_neurons set\n if self.is_input_neuron(link.from_neuron_id):\n self.input_neurons.add(link.from_neuron_id)\n # add weight to neuron\n if link.to_neuron_id not in self.weights:\n self.weights[link.to_neuron_id] = []\n self.weights[link.to_neuron_id].append(link.weight)\n # add input to neuron\n if link.to_neuron_id not in self.connections:\n self.connections[link.to_neuron_id] = []\n self.connections[link.to_neuron_id].append(link.from_neuron_id)", "def build(imageWidth, imageHeight, imageDepth, classesNumber, finalAct=\"sigmoid\"):\n\n # inizializzo il modello come sequenziale\n model = Sequential()\n inputShape = (imageHeight, imageWidth, imageDepth)\n chanDim = -1\n\n # Primo blocco Conv2D, Relu, Normalization, MaxPool\n # Utilizzo 32 filtri 3*3\n model.add(Conv2D(filters=32, kernel_size=(3, 3), padding=\"same\", input_shape=inputShape))\n # con attivazione Rectified Linear Unit\n model.add(Activation(\"relu\"))\n # applico una batch normalization\n model.add(BatchNormalization(axis=chanDim))\n # un MaxPooling 3*3\n model.add(MaxPooling2D(pool_size=(3, 3)))\n # ed un 25% di dropout per ridurre overfitting\n model.add(Dropout(0.25))\n\n # Secondo blocco\n model.add(Conv2D(64, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(Conv2D(64, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n # Terzo blocco\n model.add(Conv2D(128, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(Conv2D(128, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n # Passo ai Fully Connected Layers\n # Trasformo il modello in un vettore\n model.add(Flatten())\n model.add(Dense(1024))\n model.add(Activation(\"sigmoid\"))\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n\n # Infine utilizzo l'attivazione per la rete\n model.add(Dense(classesNumber))\n model.add(Activation(finalAct))\n\n return model", "def model_builder(loaded_bottleneck_model, top_layer):\n input1 = layers.Input(loaded_bottleneck_model.layers[0].input_shape[1:])\n weights_0 = loaded_bottleneck_model.layers[0].get_weights()\n # layer 1\n input_layer = loaded_bottleneck_model.layers[1]\n weights_1 = input_layer.get_weights()\n conv1d_1 = layer_from_config(copy_layer(input_layer))(input1)\n # layer 2\n input_layer = loaded_bottleneck_model.layers[2]\n weights_2 = input_layer.get_weights()\n conv1d_2 = layer_from_config(copy_layer(input_layer))(conv1d_1)\n # layer 3\n input_layer = loaded_bottleneck_model.layers[3]\n print(input_layer.get_config())\n weights_3 = input_layer.get_weights()\n add_1 = layers.Add()([conv1d_1, conv1d_2])\n # layer 4\n input_layer = loaded_bottleneck_model.layers[4]\n weights_4 = input_layer.get_weights()\n conv1d_3 = layer_from_config(copy_layer(input_layer))(add_1)\n # layer 5\n input_layer = loaded_bottleneck_model.layers[5]\n weights_5 = input_layer.get_weights()\n print(input_layer.get_config())\n add_2 = layers.Add()([add_1, conv1d_3])\n # layer 6\n input_layer = loaded_bottleneck_model.layers[6]\n weights_6 = input_layer.get_weights()\n conv1d_4 = layer_from_config(copy_layer(input_layer))(add_2)\n # layer 7\n input_layer = loaded_bottleneck_model.layers[7]\n weights_7 = input_layer.get_weights()\n print(input_layer.get_config())\n add_3 = layers.Add()([add_2, conv1d_4])\n # layer 8\n input_layer = loaded_bottleneck_model.layers[8]\n weights_8 = input_layer.get_weights()\n conv1d_5 = layer_from_config(copy_layer(input_layer))(add_3)\n # layer 9\n input_layer = loaded_bottleneck_model.layers[9]\n weights_9 = input_layer.get_weights()\n print(input_layer.get_config())\n add_4 = layers.Add()([add_3, conv1d_5])\n # layer 10\n input_layer = loaded_bottleneck_model.layers[10]\n weights_10 = input_layer.get_weights()\n conv1d_6 = layer_from_config(copy_layer(input_layer))(add_4)\n # layer 11\n input_layer = loaded_bottleneck_model.layers[11]\n weights_11 = input_layer.get_weights()\n print(input_layer.get_config())\n add_5 = layers.Add()([add_4, conv1d_6])\n # layer 12\n input_layer = loaded_bottleneck_model.layers[12]\n weights_12 = input_layer.get_weights()\n conv1d_7 = layer_from_config(copy_layer(input_layer))(add_5)\n # layer 13\n input_layer = loaded_bottleneck_model.layers[13]\n weights_13 = input_layer.get_weights()\n print(input_layer.get_config())\n add_6 = layers.Add()([add_5, conv1d_7])\n # layer 14\n input_layer = loaded_bottleneck_model.layers[14]\n weights_14 = input_layer.get_weights()\n conv1d_8 = layer_from_config(copy_layer(input_layer))(add_6)\n # layer 15\n input_layer = loaded_bottleneck_model.layers[15]\n weights_15 = input_layer.get_weights()\n print(input_layer.get_config())\n add_7 = layers.Add()([add_6, conv1d_8])\n # layer 16\n input_layer = loaded_bottleneck_model.layers[16]\n weights_16 = input_layer.get_weights()\n conv1d_9 = layer_from_config(copy_layer(input_layer))(add_7)\n # layer 17\n input_layer = loaded_bottleneck_model.layers[17]\n weights_17 = input_layer.get_weights()\n print(input_layer.get_config())\n add_8 = layers.Add()([add_7, conv1d_9])\n # layer 18\n input_layer = loaded_bottleneck_model.layers[18]\n weights_18 = input_layer.get_weights()\n conv1d_10 = layer_from_config(copy_layer(input_layer))(add_8)\n # layer 19\n input_layer = loaded_bottleneck_model.layers[19]\n weights_19 = input_layer.get_weights()\n print(input_layer.get_config())\n add_9 = layers.Add()([add_8, conv1d_10])\n # layer 19_slice\n lambda_layer = layers.Lambda(slice, output_shape=slice_output_shape)(add_9)\n lambda_layer.trainable = False\n # layer 19_padding\n padding_layer = layers.Lambda(\n pad, output_shape=pad_output_shape)(lambda_layer)\n padding_layer.trainable = False\n # layer 19_reshape_0\n reshaping_layer_0 = layers.Lambda(\n reshape_0, output_shape=reshape_output_shape_0)(padding_layer)\n reshaping_layer_0.trainable = False\n pooling_layer = layers.MaxPooling2D(pool_size=(\n 15, 1), strides=None, padding=\"same\")(reshaping_layer_0)\n pooling_layer.trainable = False\n reshaping_layer = layers.Lambda(\n reshape, output_shape=reshape_output_shape)(pooling_layer)\n reshaping_layer.trainable = False\n # layer 20\n input_layer = top_layer.layers[0]\n weights_20 = input_layer.get_weights()\n lr = layer_from_config(copy_layer(input_layer))(reshaping_layer)\n # weights\n new_model = Model(inputs=input1, outputs=lr)\n bpnk_model = Model(inputs=input1, outputs=add_9)\n new_model.layers[0].set_weights(weights_0)\n new_model.layers[1].set_weights(weights_1)\n new_model.layers[2].set_weights(weights_2)\n new_model.layers[3].set_weights(weights_3)\n new_model.layers[4].set_weights(weights_4)\n new_model.layers[5].set_weights(weights_5)\n new_model.layers[6].set_weights(weights_6)\n new_model.layers[7].set_weights(weights_7)\n new_model.layers[8].set_weights(weights_8)\n new_model.layers[9].set_weights(weights_9)\n new_model.layers[10].set_weights(weights_10)\n new_model.layers[11].set_weights(weights_11)\n new_model.layers[12].set_weights(weights_12)\n new_model.layers[13].set_weights(weights_13)\n new_model.layers[14].set_weights(weights_14)\n new_model.layers[15].set_weights(weights_15)\n new_model.layers[16].set_weights(weights_16)\n new_model.layers[17].set_weights(weights_17)\n new_model.layers[18].set_weights(weights_18)\n new_model.layers[19].set_weights(weights_19)\n # for i in range(20,26):\n # new_model.layers[i].set_weights(weights)\n new_model.layers[-1].set_weights(weights_20)\n\n bpnk_model.layers[0].set_weights(weights_0)\n bpnk_model.layers[1].set_weights(weights_1)\n bpnk_model.layers[2].set_weights(weights_2)\n bpnk_model.layers[3].set_weights(weights_3)\n bpnk_model.layers[4].set_weights(weights_4)\n bpnk_model.layers[5].set_weights(weights_5)\n bpnk_model.layers[6].set_weights(weights_6)\n bpnk_model.layers[7].set_weights(weights_7)\n bpnk_model.layers[8].set_weights(weights_8)\n bpnk_model.layers[9].set_weights(weights_9)\n bpnk_model.layers[10].set_weights(weights_10)\n bpnk_model.layers[11].set_weights(weights_11)\n bpnk_model.layers[12].set_weights(weights_12)\n bpnk_model.layers[13].set_weights(weights_13)\n bpnk_model.layers[14].set_weights(weights_14)\n bpnk_model.layers[15].set_weights(weights_15)\n bpnk_model.layers[16].set_weights(weights_16)\n bpnk_model.layers[17].set_weights(weights_17)\n bpnk_model.layers[18].set_weights(weights_18)\n bpnk_model.layers[19].set_weights(weights_19)\n\n new_model.compile(optimizer=\"Adam\", loss=\"mean_squared_error\")\n bpnk_model.compile(optimizer=\"Adam\", loss=\"mean_squared_error\")\n return new_model, bpnk_model", "def build(width, height, depth, classes, stages, filters, include_top, pooling,\n reg=1e-3, bnEps=2e-5, bnMom=0.0):\n inputShape = (height, width, depth)\n chanDim = -1\n\n if K.image_data_format() == \"channels_first\":\n inputShape = (depth, height, width)\n chanDim = 1\n\n inputs = Input(shape=inputShape)\n\n\n # block 1 (initial conv block)\n x = ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(inputs)\n x = Conv2D(64, (7,7), use_bias=False, strides=(2,2),\n kernel_initializer=\"he_normal\", kernel_regularizer=l2(reg))(x)\n x = BatchNormalization(axis=chanDim, name=\"bn_conv1\")(x)\n x = Activation(\"relu\")(x)\n x = ZeroPadding2D(padding=((1,1), (1,1)), name=\"pool1_pad\")(x)\n x = MaxPooling2D(3, strides=2)(x)\n\n for i in range(0, len(stages)):\n stride = (1,1) if i == 0 else (2,2) # block 2 (projection block) w stride(1,1)\n\n print(\"Stage {}, Stride={}\".format(i, stride))\n x = SEResNet.residual_module(x, filters[i+1], stride,\n chanDim=chanDim, red=True, bnEps=bnEps, bnMom=bnMom)\n for j in range(0, stages[i] + 1): #stacking res block to each depth layer\n x = SEResNet.residual_module(x, filters[i+1], stride=(1,1),\n chanDim=chanDim, bnEps=bnEps,\n bnMom=bnMom)\n x = BatchNormalization(axis=chanDim, epsilon=bnEps,\n momentum=bnMom)(x)\n x = Activation(\"relu\")(x)\n\n if include_top:\n x = GlobalAveragePooling2D()(x)\n x = Dense(classes, use_bias=False, kernel_regularizer=l2(reg),\n activation='softmax')(x)\n else:\n if pooling == 'avg':\n print(\"Adding average pool\")\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n model = Model(inputs=inputs, outputs=x, name=\"SEResNet\")\n return model", "def build_generator(self):\n noise_shape = (self.dimensions_noise,)\n\n # This block of code can be a little daunting, but essentially it automatically calculates the required starting\n # array size that will be correctly upscaled to our desired image size.\n #\n # We have 5 Upsample2D layers which each double the images width and height, so we can determine the starting\n # x size by taking (x / 2^upsample_count) So for our target image size, 256x192, we do the following:\n # x = (192 / 2^5), y = (256 / 2^5) [x and y are reversed within the model]\n # We also need a 3rd dimension which is chosen relatively arbitrarily, in this case it's 64.\n model = Sequential()\n model.add(\n Dense(\n self.starting_filters\n * (self.img_size[0] // (2 ** self.upsample_layers))\n * (self.img_size[1] // (2 ** self.upsample_layers)),\n activation=\"relu\",\n input_shape=noise_shape,\n )\n )\n model.add(\n Reshape(\n (\n (self.img_size[0] // (2 ** self.upsample_layers)),\n (self.img_size[1] // (2 ** self.upsample_layers)),\n self.starting_filters,\n )\n )\n )\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(UpSampling2D()) # 6x8 -> 12x16\n model.add(Conv2D(1024, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(UpSampling2D()) # 12x16 -> 24x32\n model.add(Conv2D(512, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(UpSampling2D()) # 24x32 -> 48x64\n model.add(Conv2D(256, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(UpSampling2D()) # 48x64 -> 96x128\n model.add(Conv2D(128, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(UpSampling2D()) # 96x128 -> 192x256\n model.add(Conv2D(64, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(Conv2D(32, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(Conv2D(self.channels, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"tanh\"))\n\n model.summary()\n\n noise = Input(shape=noise_shape)\n img = model(noise)\n\n return Model(noise, img)", "def network_modified(input):\n\n up6 = upsample_and_concat( conv5, conv4, 256, 512 , 'up_conv1' )\n conv6=slim.conv2d(up6, 256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_1')\n conv6=slim.conv2d(conv6,256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_2')\n\n up7 = upsample_and_concat( conv6, conv3, 128, 256 , 'up_conv2' )\n conv7=slim.conv2d(up7, 128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_1')\n conv7=slim.conv2d(conv7,128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_2')\n\n up8 = upsample_and_concat( conv7, conv2, 64, 128 , 'up_conv3')\n conv8=slim.conv2d(up8, 64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_1')\n conv8=slim.conv2d(conv8,64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_2')\n\n up9 = upsample_and_concat( conv8, conv1, 32, 64 , 'up_conv4')\n conv9=slim.conv2d(up9, 32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_1')\n conv9=slim.conv2d(conv9,32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_2')\n\n conv10=slim.conv2d(conv9,12,[1,1], rate=1, activation_fn=None, scope='g_conv10')\n out = tf.depth_to_space(conv10,2)\n return out", "def __init__(self, z_dim, initailize_weights=True):\n super().__init__()\n self.z_dim = z_dim\n\n self.depth_conv1 = conv2d(1, 32, kernel_size=3, stride=2)\n self.depth_conv2 = conv2d(32, 64, kernel_size=3, stride=2)\n self.depth_conv3 = conv2d(64, 64, kernel_size=4, stride=2)\n self.depth_conv4 = conv2d(64, 64, stride=2)\n self.depth_conv5 = conv2d(64, 128, stride=2)\n self.depth_conv6 = conv2d(128, self.z_dim, stride=2)\n\n self.depth_encoder = nn.Linear(4 * self.z_dim, 2 * self.z_dim)\n self.flatten = Flatten()\n\n if initailize_weights:\n init_weights(self.modules())", "def build_network(self):\n\n input_placeholder = Input(shape = self.input_shape)\n\n # Stage 1\n x = self.main_path_block(\n input_placeholder,\n 64, (7, 7), 'same',\n 'conv1', 'bn_conv1',\n activation = 'relu',\n strides = (2, 2)\n )\n x = MaxPooling2D((3, 3), strides = (2, 2), padding = 'same')(x)\n\n # Stage 2\n x = self.identity_block(x, 64, 'relu', 2, 'a', False)\n x = self.identity_block(x, 64, 'relu', 2, 'b')\n\n # Stage 3\n x = self.convolutional_block(x, [128, 128, 128], 'relu', 3, 'a')\n x = self.identity_block(x, 128, 'relu', 3, 'b')\n\n # Stage 4\n x = self.convolutional_block(x, [256, 256, 256], 'relu', 4, 'a')\n x = self.identity_block(x, 256, 'relu', 4, 'b')\n\n # Stage 5\n x = self.convolutional_block(x, [512, 512, 512], 'relu', 5, 'a')\n x = self.identity_block(x, 512, 'relu', 4, 'b')\n\n # Fully Connected Layers\n x = BatchNormalization(axis = 3)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D((2, 1), padding = 'valid', strides = (2, 2))(x)\n x = Flatten()(x)\n x = Dense(512)\n x = Dense(\n self.classes, activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet18')", "def build_model(self):\n\n # If center crop is valid, output dimensions will be the image dimensions.\n\n if self.is_crop:\n image_dims = [self.output_height, self.output_width, self.c_dim]\n else:\n # Otherwise, input dimensions will not be changed.\n image_dims = [self.input_height, self.input_width, self.c_dim]\n self.image_dims = image_dims\n\n # GAN latent variable.\n self.z = tf.placeholder(tf.float32, [FLAGS.batch_size + FLAGS.cs_batch_size, self.z_dim], name='z')\n\n if not FLAGS.just_cs: # If we are not training only on measurements (non-compressed data is present).\n self.inputs = tf.placeholder(tf.float32, [\n FLAGS.batch_size + FLAGS.cs_batch_size] + image_dims, name='real_images')\n self.sample_inputs = tf.placeholder(tf.float32, [self.sample_num] + image_dims, name='sample_inputs')\n\n if self.cs_learning: # If not DCGAN.\n # Load the A (sampling) matrix.\n if self.superres_learning:\n self.A_val = get_A_superres()\n FLAGS.cs_num_measurements = self.A_val.shape[1]\n elif self.inpaint_learning:\n self.A_val = get_A_inpaint()\n FLAGS.cs_num_measurements = self.A_val.shape[1]\n else:\n # Use the same A matrix for all of the experiments.\n A_path = os.path.join(FLAGS.sampling_mat_dir,\n '{}_w{}_h{}_m{}_a{}.pckl'.format(FLAGS.dataset, FLAGS.output_width,\n FLAGS.output_height, FLAGS.cs_num_measurements,\n FLAGS.a_ind))\n if os.path.exists(A_path):\n with open(A_path, 'r') as f:\n self.A_val = cPickle.load(f)\n else:\n raise RuntimeError('[!] No A matrix was found at {}. Run the following command first:\\n'\n 'python main.py --cfg <config> --generate_A'.format(A_path))\n\n self.A = tf.constant(self.A_val, dtype=tf.float32)\n\n # Batch size for compressed measurements.\n cs_bsize = FLAGS.cs_batch_size\n if self.cs_m_and_orig:\n # In algorithm 2, we use the compressed original samples to\n # train the discriminator of the measurements too.\n cs_bsize = cs_bsize + FLAGS.batch_size\n\n if self.cs_m_and_orig:\n print('[#] Solving for both the measurements and original inputs.')\n # Placeholder for original samples.\n self.orig_inputs = tf.placeholder(tf.float32, [\n FLAGS.batch_size] + image_dims, name='real_images')\n elif FLAGS.just_cs:\n print('[#] Solving for just the measurements, not the original inputs.')\n\n # Placeholder for compressed measurements.\n self.xs_target = tf.placeholder(tf.float32, [cs_bsize] + self.image_dims, name='target_xs')\n # Compute y_batch on GPU.\n self.y_batch = tf.matmul(tf.reshape(self.xs_target, [cs_bsize, -1]), self.A) + \\\n FLAGS.cs_noise_std * tf.random_normal([cs_bsize, FLAGS.cs_num_measurements])\n\n if FLAGS.just_cs: # If we are in only compressed measurements mode, the inputs are y_batches.\n self.inputs = self.y_batch\n\n # Batch normalization (deals with poor initialization helps gradient flow).\n self.d_bn1 = batch_norm(name='d_bn1')\n self.d_bn2 = batch_norm(name='d_bn2')\n self.d_bn3 = batch_norm(name='d_bn3')\n self.g_bn0 = batch_norm(name='g_bn0')\n self.g_bn1 = batch_norm(name='g_bn1')\n self.g_bn2 = batch_norm(name='g_bn2')\n self.g_bn3 = batch_norm(name='g_bn3')\n\n # Generator network.\n self.G = self.generator(self.z, batch_size=FLAGS.cs_batch_size + FLAGS.batch_size)\n\n # Discriminator network outputs.\n self.D, self.D_logits = self.discriminator(self.inputs, batch_size=FLAGS.cs_batch_size + FLAGS.batch_size)\n\n def sigmoid_cross_entropy_with_logits(x, y):\n return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, labels=y)\n\n # If both compressed measurements and original images are used.\n if self.cs_m_and_orig:\n measured_generator_output = tf.matmul(\n tf.reshape(self.G, [FLAGS.cs_batch_size + FLAGS.batch_size, FLAGS.n_inputs]), self.A)\n self.D_, self.D_logits_ = self.discriminator(measured_generator_output, reuse=True,\n batch_size=FLAGS.cs_batch_size + FLAGS.batch_size)\n # Create disciriminator for original inputs.\n self.D_orig, self.D_logits_orig = self.discriminator(self.orig_inputs, reuse=False,\n batch_size=FLAGS.batch_size, orig=True)\n # Give the generator to the original inputs discriminator.\n self.D_orig_, self.D_logits_orig_ = self.discriminator(self.G, reuse=True,\n batch_size=FLAGS.cs_batch_size + FLAGS.batch_size,\n orig=True)\n\n # Losses and summaries.\n self.d_loss_orig_real = tf.reduce_mean(\n sigmoid_cross_entropy_with_logits(self.D_logits_orig, tf.ones_like(self.D_orig)))\n\n self.d_loss_orig_fake = tf.reduce_mean(\n sigmoid_cross_entropy_with_logits(self.D_logits_orig_, tf.zeros_like(self.D_orig_)))\n\n self.d_loss_orig_sum = scalar_summary(\"d_loss_orig_sum\", self.d_loss_orig_real + self.d_loss_orig_fake)\n\n self.g_loss_orig = tf.reduce_mean(\n sigmoid_cross_entropy_with_logits(self.D_logits_orig_, tf.ones_like(self.D_orig_)))\n\n # If only compressed measurements are used.\n elif FLAGS.just_cs:\n # The measurements on the output of the generator.\n measured_generator_output = tf.matmul(\n tf.reshape(self.G, [FLAGS.cs_batch_size + FLAGS.batch_size, FLAGS.n_inputs]), self.A)\n\n self.D_, self.D_logits_ = self.discriminator(measured_generator_output, reuse=True,\n batch_size=FLAGS.cs_batch_size + FLAGS.batch_size)\n\n # If only original images are used.\n else:\n self.D_, self.D_logits_ = self.discriminator(self.G, reuse=True,\n batch_size=FLAGS.cs_batch_size + FLAGS.batch_size)\n\n # Losses and summaries.\n self.d_loss_real = tf.reduce_mean(sigmoid_cross_entropy_with_logits(self.D_logits, tf.ones_like(self.D)))\n self.d_loss_fake = tf.reduce_mean(sigmoid_cross_entropy_with_logits(self.D_logits_, tf.zeros_like(self.D_)))\n\n self.g_loss = tf.reduce_mean(sigmoid_cross_entropy_with_logits(self.D_logits_, tf.ones_like(self.D_)))\n\n self.d_loss_real_sum = scalar_summary(\"d_loss_real\", self.d_loss_real)\n self.d_loss_fake_sum = scalar_summary(\"d_loss_fake\", self.d_loss_fake)\n\n self.d_loss = self.d_loss_real + self.d_loss_fake\n\n # If both compressed measurements and original images are used, add the two losses.\n if self.cs_m_and_orig:\n self.g_loss = self.g_loss + self.g_loss_orig\n self.d_loss = self.d_loss + self.d_loss_orig_real + self.d_loss_orig_fake\n\n self.g_loss_sum = scalar_summary(\"g_loss\", self.g_loss)\n self.d_loss_sum = scalar_summary(\"d_loss\", self.d_loss)\n\n t_vars = tf.global_variables()\n\n # Gather variables.\n self.d_vars = [var for var in t_vars if 'disc' in var.name]\n self.g_vars = [var for var in t_vars if 'generator' in var.name]\n self.other_vars = [var for var in tf.global_variables() if 'save_' in var.name]\n\n self.save_var_names = self.d_vars + self.g_vars + self.other_vars\n\n # Build the Compressed Sensing estimator.\n if self.cs_learning:\n self.cs_grad_estimator()\n\n # Keep all checkpoints.\n if self.keep_all:\n self.saver = tf.train.Saver(self.save_var_names, max_to_keep=None)\n else:\n self.saver = tf.train.Saver(self.save_var_names)", "def build(self):\n self.build_inputs()\n self.build_image_embeddings()\n self.build_seq_embeddings()\n self.build_encoder()\n self.build_prediction_model()\n self.setup_encoder_initializer()\n self.setup_global_step()\n self.list_trainable_variables()", "def __init__(self, in_dimension, layer_1d, layer_2d, layer_3d, categorical_dimension, latent_dimension):\n super(VAEEncoderGumbel, self).__init__()\n self.latent_dimension = latent_dimension\n self.categorical_dimension = categorical_dimension\n\n # Reduce dimension up to second last layer of Encoder\n self.encode_nn = nn.Sequential(\n nn.Linear(in_dimension, layer_1d),\n nn.ReLU(),\n nn.Linear(layer_1d, layer_2d),\n nn.ReLU(),\n nn.Linear(layer_2d, layer_3d),\n nn.ReLU(),\n nn.Linear(layer_3d, latent_dimension*categorical_dimension),\n nn.ReLU()\n )", "def build_models(config):\n inputs = Input(shape=(config['patch_height'], config['patch_width'], config['depth'], config['channel']),\n name='patchimg')\n\n kernelinitfun = keras.initializers.RandomNormal(mean=0.0, stddev=0.1, seed=None)\n activationfun = 'relu'\n # kernelinitfun = 'glorot_normal'\n\n x = Conv3D(32, (3, 3, 3), padding='same', kernel_initializer=kernelinitfun, name='conv1_1')(inputs)\n x = Conv3D(32, (3, 3, 3), padding='same', kernel_initializer=kernelinitfun, name='conv1_2')(x)\n # x = Dropout(0.3)(x)\n x = BatchNormalization(name='bn1')(x)\n x = Activation(activationfun)(x)\n x = MaxPooling3D(name='mp1', strides=(2, 2, 1))(x)\n x = Conv3D(32, (3, 3, 3), padding='same', kernel_initializer=kernelinitfun, name='conv2_1')(x)\n x = Conv3D(32, (3, 3, 3), padding='same', kernel_initializer=kernelinitfun, name='conv2_2')(x)\n # x = Dropout(0.2)(x)\n x = BatchNormalization(name='bn2')(x)\n x = Activation(activationfun)(x)\n x = MaxPooling3D(name='mp2', strides=(2, 2, 1))(x)\n x = Conv3D(32, (3, 3, 3), strides=(1, 1, 1), padding='same', kernel_initializer=kernelinitfun, name='conv3_1')(x)\n x = Conv3D(32, (3, 3, 3), strides=(1, 1, 1), padding='same', kernel_initializer=kernelinitfun, name='conv3_2')(x)\n # x = Dropout(0.5)(x)\n x = BatchNormalization(name='bn3')(x)\n x = Activation(activationfun)(x)\n x = MaxPooling3D(name='mp3', strides=(2, 2, 1))(x)\n x = Conv3D(32, (3, 3, 3), strides=(1, 1, 1), padding='same', kernel_initializer=kernelinitfun, name='conv4_1')(x)\n x = Conv3D(32, (3, 3, 3), strides=(1, 1, 1), padding='same', kernel_initializer=kernelinitfun, name='conv4_2')(x)\n # x = Dropout(0.5)(x)\n x = BatchNormalization(name='bn4')(x)\n x = Activation(activationfun)(x)\n\n x4 = Flatten(name='aux_fx')(x)\n\n source_classifier = Dropout(0.5)(x4)\n source_classifier = Dense(512, activation='softmax', name=\"mo1\")(source_classifier)\n source_classifier = Dropout(0.5)(source_classifier)\n source_classifier = Dense(128, activation='softmax', name=\"mo2\")(source_classifier)\n # source_classifier = Dropout(0.3)(source_classifier)\n source_classifier = Dense(1, name=\"mo\")(source_classifier)\n\n domain_classifier = Dense(32, activation='linear', name=\"do4\")(x4)\n domain_classifier = BatchNormalization(name=\"do5\")(domain_classifier)\n domain_classifier = Activation(\"elu\", name=\"do6\")(domain_classifier)\n domain_classifier = Dropout(0.5)(domain_classifier)\n\n domain_classifier = Dense(2, activation='softmax', name=\"do\")(domain_classifier)\n\n adamop = keras.optimizers.Adam(learning_rate=1e-5, beta_1=0.9, beta_2=0.999, amsgrad=False)\n comb_model = Model(inputs=inputs, outputs=[source_classifier, domain_classifier])\n comb_model.compile(optimizer=adamop,\n loss={'mo': 'mae', 'do': 'categorical_crossentropy'},\n loss_weights={'mo': 1, 'do': 2}, metrics=['accuracy'], )\n\n source_classification_model = Model(inputs=inputs, outputs=[source_classifier])\n source_classification_model.compile(optimizer=adamop,\n loss={'mo': 'mae'}, metrics=['accuracy'], )\n\n domain_classification_model = Model(inputs=inputs, outputs=[domain_classifier])\n domain_classification_model.compile(optimizer=adamop,\n loss={'do': 'categorical_crossentropy'}, metrics=['accuracy'])\n\n embeddings_model = Model(inputs=inputs, outputs=[x4])\n embeddings_model.compile(optimizer=adamop, loss='categorical_crossentropy', metrics=['accuracy'])\n\n return comb_model, source_classification_model, domain_classification_model, embeddings_model", "def build_transformation_network(n_styles, depthwise_separable_conv):\n\n image_input = Input((None, None, 3), name=\"image\")\n style_weights = Input((n_styles, ), name=\"style_weights\")\n\n net = conv_block(image_input,\n style_weights,\n filters=32,\n kernel_size=(9, 9),\n strides=(1, 1),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = conv_block(net,\n style_weights,\n filters=64,\n kernel_size=(3, 3),\n strides=(2, 2),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = conv_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(2, 2),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = upsampling_block(net,\n style_weights,\n interpolation_factor=2,\n filters=64,\n kernel_size=(3, 3),\n strides=(1, 1),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = upsampling_block(net,\n style_weights,\n interpolation_factor=2,\n filters=32,\n kernel_size=(3, 3),\n strides=(1, 1),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = conv_block(net,\n style_weights,\n filters=3,\n kernel_size=(9, 9),\n strides=(1, 1),\n activation=\"sigmoid\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = Lambda(lambda t: t * 255.0, name=\"output\")(net)\n\n return Model([image_input, style_weights], net, name=\"transform_net\")", "def build(self, *args, **kwargs):\n self._layer_counter = 0\n r = self._build_impl(*args, **kwargs)\n \n # Call the init functions \n if self._build_counter == 0:\n for initlayer in self._layers_to_init:\n if initlayer['initfnkwargs']:\n initlayer['initfn'](initlayer['layer'], **initlayer['initfnkwargs'])\n else:\n initlayer['initfn'](initlayer['layer'])\n \n self._build_counter += 1\n return r", "def build(self):\n self.build_inputs()\n self.image_embeddings = self.build_image_embeddings(self.images)\n self.seq_embeddings = self.build_seq_embeddings(self.input_seqs)\n self.build_model()\n self.setup_inception_initializer()\n self.setup_global_step()", "def init_encoder(self):\n\n vgg = models.vgg16(pretrained=True)\n\n blocks = [self.layer_1,\n self.layer_2,\n self.layer_3,\n self.layer_4,\n self.layer_5]\n\n ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]\n features = list(vgg.features.children())\n\n vgg_layers = []\n for _layer in features:\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n\n merged_layers = []\n for idx, conv_block in enumerate(blocks):\n if idx < 2:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit]\n else:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit,\n conv_block.conv3.cbr_unit]\n for _unit in units:\n for _layer in _unit:\n if isinstance(_layer, nn.Conv2d):\n merged_layers.append(_layer)\n\n assert len(vgg_layers) == len(merged_layers)\n\n for l1, l2 in zip(vgg_layers, merged_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data\n\n blocks = [self.layer_11,\n self.layer_12,\n self.layer_13,\n self.layer_14,\n self.layer_15]\n\n ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]\n features = list(vgg.features.children())\n\n vgg_layers = []\n for _layer in features:\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n\n merged_layers = []\n for idx, conv_block in enumerate(blocks):\n if idx < 2:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit]\n else:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit,\n conv_block.conv3.cbr_unit]\n for _unit in units:\n for _layer in _unit:\n if isinstance(_layer, nn.Conv2d):\n merged_layers.append(_layer)\n\n assert len(vgg_layers) == len(merged_layers)\n\n for l1, l2 in zip(vgg_layers, merged_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data", "def __init__(self):\r\n\r\n super(Net, self).__init__()\r\n\r\n\r\n pre= models.inception_v3(pretrained=True)\r\n # Freeze model weights\r\n for param in pre.parameters():\r\n param.requires_grad = False\r\n\r\n n_inputs = pre.fc.in_features\r\n n_inputsaux = pre.AuxLogits.fc.in_features\r\n\r\n pre.fc = nn.Sequential(\r\n nn.Linear(n_inputs, 2000),\r\n nn.ReLU(inplace=True),\r\n nn.Dropout(0.4),\r\n nn.BatchNorm1d(2000),\r\n )\r\n\r\n pre.AuxLogits.fc = nn.Linear(n_inputsaux, 6)\r\n\r\n self.firstlayers = pre\r\n\r\n self.fc1 = nn.Sequential(\r\n nn.Linear(2000, 1500),\r\n nn.ReLU(inplace=True),\r\n nn.Dropout(0.4),\r\n nn.BatchNorm1d(1500),\r\n nn.Linear(1500, 1500),\r\n nn.ReLU(inplace=True),\r\n nn.Dropout(0.4),\r\n nn.BatchNorm1d(1500),\r\n nn.Linear(1500, 1000),\r\n nn.ReLU(inplace=True),\r\n nn.BatchNorm1d(1000),\r\n nn.Linear(1000, 1000),\r\n nn.ReLU(inplace=True),\r\n nn.BatchNorm1d(1000),\r\n nn.Linear(1000, 1000),\r\n nn.ReLU(inplace=True),\r\n nn.BatchNorm1d(1000),\r\n nn.Linear(1000, 1000),\r\n nn.ReLU(inplace=True),\r\n nn.BatchNorm1d(1000),\r\n nn.Linear(1000, 1000),\r\n nn.ReLU(inplace=True),\r\n nn.BatchNorm1d(1000),\r\n nn.Linear(1000, 1000),\r\n nn.ReLU(inplace=True),\r\n nn.BatchNorm1d(1000),\r\n nn.Linear(1000, 800),\r\n nn.ReLU(),\r\n nn.BatchNorm1d(800),\r\n nn.Linear(800, 800),\r\n nn.ReLU(),\r\n )\r\n\r\n self.fc2 = nn.Sequential(\r\n nn.Linear(800, 600),\r\n nn.ReLU(),\r\n nn.BatchNorm1d(600),\r\n nn.Linear(600, 400),\r\n nn.ReLU(),\r\n nn.Linear(400, 300),\r\n nn.ReLU(),\r\n nn.BatchNorm1d(300),\r\n nn.Linear(300, 200),\r\n nn.ReLU(),\r\n nn.BatchNorm1d(200),\r\n nn.Linear(200, 100),\r\n nn.ReLU(),\r\n nn.Linear(100, 50),\r\n nn.ReLU(),\r\n nn.Linear(50, 6),\r\n nn.Sigmoid()\r\n )\r\n\r\n # Spatial transformer localization-network\r\n self.localization = nn.Sequential(\r\n nn.Conv2d(3, 8, kernel_size=20),\r\n nn.ReLU(True),\r\n nn.Conv2d(8, 10, kernel_size=15, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=15, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=15, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=10, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=8, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=8, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=8, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=8, padding=1),\r\n nn.MaxPool2d(2, stride=2),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=8, padding=1),\r\n nn.MaxPool2d(2, stride=2),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=5, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=5, padding=1),\r\n nn.MaxPool2d(2, stride=2),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=5, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=5, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=5),\r\n nn.ReLU(True)\r\n\r\n )\r\n\r\n # Regressor for the 3 * 2 affine matrix\r\n self.fc_loc = nn.Sequential(\r\n nn.Linear(10 * 15 * 15, 32),\r\n nn.ReLU(True),\r\n nn.Linear(32, 3 * 2)\r\n )\r\n\r\n # Initialize the weights/bias with identity transformation\r\n self.fc_loc[2].weight.data.zero_()\r\n self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))\r\n\r\n self.phase = 0", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * self.cls_out_channels,\n 3,\n padding=1)\n self.atss_reg = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n self.atss_iou = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 1, 3, padding=1)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])\n\n # we use the global list in loss\n self.cls_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]\n self.reg_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]", "def create(self):\n \n \"\"\" A solo prepressing reduction network in the head \"\"\"\n print(\"pre_reduction\")\n with tf.name_scope('pre_reduction'):\n conv1 = NW.conv(self.X, 7, 7, 64, 2, 2, name='conv1')\n pool1 = NW.max_pool(conv1, 3, 3, 2, 2, name='pool1')\n norm1 = NW.lrn(pool1, 2, 2e-05, 0.75, name='norm1')\n reduction2 = NW.conv(norm1, 1, 1, 64, 1, 1, name='reduction2')\n conv2 = NW.conv(reduction2, 3, 3, 192, 1, 1,name='conv2')\n norm2 = NW.lrn(conv2, 2, 2e-05, 0.75, name='norm2')\n pool2 = NW.max_pool(norm2, 3, 3, 2, 2, name='pool2')\n \n \"\"\" 1st inception layer group \"\"\"\n print(\"icp1\")\n with tf.name_scope('icp1'):\n # branch 0\n icp1_out0 = NW.conv(pool2, 1, 1, 64, 1, 1, name='icp1_out0')\n # branch 1\n icp1_reduction1 = NW.conv(pool2, 1, 1, 96, 1, 1, name='icp1_reduction1')\n icp1_out1 = NW.conv(icp1_reduction1, 3, 3, 128, 1, 1, name='icp1_out1')\n # branch 2\n icp1_reduction2 = NW.conv(pool2, 1, 1, 16, 1, 1, name='icp1_reduction2')\n icp1_out2 = NW.conv(icp1_reduction2, 5, 5, 32, 1, 1, name='icp1_out2')\n # branch 3\n icp1_pool = NW.max_pool(pool2, 3, 3, 1, 1, name='icp1_pool')\n icp1_out3 = NW.conv(icp1_pool, 1, 1, 32, 1, 1, name='icp1_out3')\n # concat\n icp2_in = NW.concat([icp1_out0,\n icp1_out1,\n icp1_out2,\n icp1_out3], 3, 'icp2_in')\n\n \"\"\" 2nd inception layer group \"\"\"\n print(\"icp2\")\n with tf.name_scope('icp2'):\n # branch 0\n icp2_out0 = NW.conv(icp2_in, 1, 1, 128, 1, 1, name='icp2_out0')\n # branch 1\n icp2_reduction1 = NW.conv(icp2_in, 1, 1, 128, 1, 1, name='icp2_reduction1')\n icp2_out1 = NW.conv(icp2_reduction1, 3, 3, 192, 1, 1, name='icp2_out1')\n # branch 2\n icp2_reduction2 = NW.conv(icp2_in, 1, 1, 32, 1, 1, name='icp2_reduction2')\n icp2_out2 = NW.conv(icp2_reduction2, 5, 5, 96, 1, 1, name='icp2_out2')\n # branch 3\n icp2_pool = NW.max_pool(icp2_in, 3, 3, 1, 1, name='icp2_pool')\n icp2_out3 = NW.conv(icp2_pool, 1, 1, 64, 1, 1, name='icp2_out3')\n # concat\n icp2_out = NW.concat([icp2_out0,\n icp2_out1,\n icp2_out2,\n icp2_out3], 3, 'icp2_out')\n \n \"\"\" 3rd inception layer group \"\"\"\n print(\"icp3\")\n with tf.name_scope('icp3'):\n icp3_in = NW.max_pool(icp2_out, 3, 3, 2, 2, name='icp3_in')\n # branch 0\n icp3_out0 = NW.conv(icp3_in, 1, 1, 192, 1, 1, name='icp3_out0')\n # branch 1\n icp3_reduction1 = NW.conv(icp3_in, 1, 1, 96, 1, 1, name='icp3_reduction1')\n icp3_out1 = NW.conv(icp3_reduction1, 3, 3, 208, 1, 1, name='icp3_out1')\n # branch 2\n icp3_reduction2 = NW.conv(icp3_in, 1, 1, 16, 1, 1, name='icp3_reduction2')\n icp3_out2 = NW.conv(icp3_reduction2, 5, 5, 48, 1, 1, name='icp3_out2')\n # branch 3\n icp3_pool = NW.max_pool(icp3_in, 3, 3, 1, 1, name='icp3_pool')\n icp3_out3 = NW.conv(icp3_pool, 1, 1, 64, 1, 1, name='icp3_out3')\n # concat\n icp3_out = NW.concat([icp3_out0,\n icp3_out1,\n icp3_out2,\n icp3_out3], 3, 'icp3_out')\n \n \"\"\" 1st classify branch \"\"\"\n with tf.name_scope('cls1'):\n cls1_pool = NW.avg_pool(icp3_out, 5, 5, 3, 3, padding='VALID', name='cls1_pool')\n cls1_reduction_pose = NW.conv(cls1_pool, 1, 1, 128, 1, 1, name='cls1_reduction_pose')\n cls1_fc1_pose = NW.fc(cls1_reduction_pose, 1024, name='cls1_fc1_pose')\n cls1_fc_pose_xy = NW.fc(cls1_fc1_pose, 2, relu=False, name='cls1_fc_pose_xy')\n cls1_fc_pose_ab = NW.fc(cls1_fc1_pose, 2, relu=False, name='cls1_fc_pose_ab')\n self.layers[\"cls1_fc_pose_xy\"] = cls1_fc_pose_xy\n self.layers[\"cls1_fc_pose_ab\"] = cls1_fc_pose_ab\n \n \"\"\" 4st inception layer group \"\"\"\n print(\"icp4\")\n with tf.name_scope('icp4'):\n # branch 0\n icp4_out0 = NW.conv(icp3_out, 1, 1, 160, 1, 1, name='icp4_out0')\n # branch 1\n icp4_reduction1 = NW.conv(icp3_out, 1, 1, 112, 1, 1, name='icp4_reduction1')\n icp4_out1 = NW.conv(icp4_reduction1, 3, 3, 224, 1, 1, name='icp4_out1')\n # branch 2\n icp4_reduction2 = NW.conv(icp3_out, 1, 1, 24, 1, 1, name='icp4_reduction2')\n icp4_out2 = NW.conv(icp4_reduction2, 5, 5, 64, 1, 1, name='icp4_out2')\n # branch 3\n icp4_pool = NW.max_pool(icp3_out, 3, 3, 1, 1, name='icp4_pool')\n icp4_out3 = NW.conv(icp4_pool, 1, 1, 64, 1, 1, name='icp4_out3')\n # concat\n icp4_out = NW.concat([icp4_out0,\n icp4_out1,\n icp4_out2,\n icp4_out3],3, name='icp4_out')\n\n \"\"\" 5st inception layer group \"\"\"\n print(\"icp5\")\n with tf.name_scope('icp5'):\n # branch 0\n icp5_out0 = NW.conv(icp4_out, 1, 1, 128, 1, 1, name='icp5_out0')\n # branch 1\n icp5_reduction1 = NW.conv(icp4_out, 1, 1, 128, 1, 1, name='icp5_reduction1')\n icp5_out1 = NW.conv(icp5_reduction1, 3, 3, 256, 1, 1, name='icp5_out1')\n # branch 2\n icp5_reduction2 = NW.conv(icp4_out,1, 1, 24, 1, 1, name='icp5_reduction2')\n icp5_out2 = NW.conv(icp5_reduction2, 5, 5, 64, 1, 1, name='icp5_out2')\n # branch 3\n icp5_pool = NW.max_pool(icp4_out,3, 3, 1, 1, name='icp5_pool')\n icp5_out3 = NW.conv(icp5_pool, 1, 1, 64, 1, 1, name='icp5_out3')\n # concat\n icp5_out = NW.concat([icp5_out0, \n icp5_out1, \n icp5_out2, \n icp5_out3], 3, name='icp5_out')\n \n \"\"\" 6st inception layer group \"\"\"\n print(\"icp6\")\n with tf.name_scope('icp6'):\n # branch 0\n icp6_out0 = NW.conv(icp5_out, 1, 1, 112, 1, 1, name='icp6_out0')\n # branch 1\n icp6_reduction1 = NW.conv(icp5_out, 1, 1, 144, 1, 1, name='icp6_reduction1')\n icp6_out1 = NW.conv(icp6_reduction1, 3, 3, 288, 1, 1, name='icp6_out1')\n # branch 2\n icp6_reduction2 = NW.conv(icp5_out, 1, 1, 32, 1, 1, name='icp6_reduction2')\n icp6_out2 = NW.conv(icp6_reduction2, 5, 5, 64, 1, 1, name='icp6_out2')\n # branch 3\n icp6_pool = NW.max_pool(icp5_out,3, 3, 1, 1, name='icp6_pool')\n icp6_out3 = NW.conv(icp6_pool, 1, 1, 64, 1, 1, name='icp6_out3')\n # concat\n icp6_out = NW.concat([icp6_out0,\n icp6_out1,\n icp6_out2,\n icp6_out3], 3, name='icp6_out')\n\n \"\"\" 2nd classify branch \"\"\"\n with tf.name_scope('cls2'):\n cls2_pool = NW.avg_pool(icp6_out, 5, 5, 3, 3, padding='VALID', name='cls2_pool')\n cls2_reduction_pose = NW.conv(cls2_pool, 1, 1, 128, 1, 1, name='cls2_reduction_pose')\n cls2_fc1 = NW.fc(cls2_reduction_pose, 1024, name='cls2_fc1')\n cls2_fc_pose_xy = NW.fc(cls2_fc1, 2, relu=False, name='cls2_fc_pose_xy')\n cls2_fc_pose_ab = NW.fc(cls2_fc1, 2, relu=False, name='cls2_fc_pose_ab')\n self.layers[\"cls2_fc_pose_xy\"] = cls2_fc_pose_xy\n self.layers[\"cls2_fc_pose_ab\"] = cls2_fc_pose_ab\n\n \"\"\" 7st inception layer group \"\"\"\n print(\"icp7\")\n with tf.name_scope('icp7'):\n # branch 0\n icp7_out0 = NW.conv(icp6_out, 1, 1, 256, 1, 1, name='icp7_out0')\n # branch 1\n icp7_reduction1 = NW.conv(icp6_out, 1, 1, 160, 1, 1, name='icp7_reduction1')\n icp7_out1 = NW.conv(icp7_reduction1, 3, 3, 320, 1, 1, name='icp7_out1')\n # branch 2\n icp7_reduction2 = NW.conv(icp6_out, 1, 1, 32, 1, 1, name='icp7_reduction2')\n icp7_out2 = NW.conv(icp7_reduction2, 5, 5, 128, 1, 1, name='icp7_out2')\n # branch 3\n icp7_pool = NW.max_pool(icp6_out, 3, 3, 1, 1, name='icp7_pool')\n icp7_out3 = NW.conv(icp7_pool, 1, 1, 128, 1, 1, name='icp7_out3')\n # concat\n icp7_out = NW.concat([icp7_out0,\n icp7_out1,\n icp7_out2,\n icp7_out3], 3, name='icp7_out')\n\n \"\"\" 8st inception layer group \"\"\"\n print(\"icp8\")\n with tf.name_scope('icp8'):\n icp8_in = NW.max_pool(icp7_out, 3, 3, 2, 2, name='icp8_in')\n # branch 0\n icp8_out0 = NW.conv(icp8_in, 1, 1, 256, 1, 1, name='icp8_out0')\n # branch 1\n icp8_reduction1 = NW.conv(icp8_in, 1, 1, 160, 1, 1, name='icp8_reduction1')\n icp8_out1 = NW.conv(icp8_reduction1, 3, 3, 320, 1, 1, name='icp8_out1')\n # branch 2\n icp8_reduction2 = NW.conv(icp8_in, 1, 1, 32, 1, 1, name='icp8_reduction2')\n icp8_out2 = NW.conv(icp8_reduction2, 5, 5, 128, 1, 1, name='icp8_out2')\n # branch 3\n icp8_pool = NW.max_pool(icp8_in, 3, 3, 1, 1, name='icp8_pool')\n icp8_out3 = NW.conv(icp8_pool, 1, 1, 128, 1, 1, name='icp8_out3')\n # concat\n icp8_out = NW.concat([icp8_out0,\n icp8_out1,\n icp8_out2,\n icp8_out3], 3, name='icp8_out')\n \n \"\"\" 9st inception layer group \"\"\"\n print(\"icp9\")\n with tf.name_scope('icp9'):\n # branch 0\n icp9_out0 = NW.conv(icp8_out, 1, 1, 384, 1, 1, name='icp9_out0')\n # branch 1\n icp9_reduction1 = NW.conv(icp8_out, 1, 1, 192, 1, 1, name='icp9_reduction1')\n icp9_out1 = NW.conv(icp9_reduction1, 3, 3, 384, 1, 1, name='icp9_out1')\n # branch 2\n icp9_reduction2 = NW.conv(icp8_out, 1, 1, 48, 1, 1, name='icp9_reduction2')\n icp9_out2 = NW.conv(icp9_reduction2, 5, 5, 128, 1, 1, name='icp9_out2')\n # branch 3\n icp9_pool = NW.max_pool(icp8_out, 3, 3, 1, 1, name='icp9_pool')\n icp9_out3 = NW.conv(icp9_pool, 1, 1, 128, 1, 1, name='icp9_out3')\n # concat\n icp9_out = NW.concat([icp9_out0,\n icp9_out1,\n icp9_out2,\n icp9_out3], 3, name='icp9_out')\n\n \"\"\" 3rd classify branch \"\"\"\n with tf.name_scope('cls3'):\n cls3_pool = NW.avg_pool(icp9_out, 7, 7, 1, 1, padding='VALID', name='cls3_pool')\n cls3_fc1_pose = NW.fc(cls3_pool, 2048, name='cls3_fc1_pose')\n cls3_fc_pose_xy = NW.fc(cls3_fc1_pose, 2, relu=False, name='cls3_fc_pose_xy')\n cls3_fc_pose_ab = NW.fc(cls3_fc1_pose, 2, relu=False, name='cls3_fc_pose_ab')\n self.layers[\"cls3_fc_pose_xy\"] = cls3_fc_pose_xy\n self.layers[\"cls3_fc_pose_ab\"] = cls3_fc_pose_ab", "def build_neuron_network(nb_features_map: Union[Sequence[int], None] = None,\n size_linear_layers: Union[Sequence[int], None] = None,\n dropout_rate: Union[Tuple[float, float], float] = 0.3,\n conv_kernel_size: Union[Sequence[int], int] = 3,\n conv_stride: int = 1,\n conv_padding: int = 1,\n conv_activation: str = \"relu\",\n conv_architecture: str = \"CPD\",\n pool_kernel_size: int = 2,\n pool_stride: int = 2,\n dense_activation: str = \"relu\",\n pretrained: Union[str, None] = None,\n grayscale: bool = True,\n optimizer: str = \"Adam\",\n weight_decay: float = 0.,\n learning_rate: float = 0.001,\n ) -> Tuple[nn.Module, List, torch.optim.Optimizer]:\n # Initializations\n if pretrained is not None:\n grayscale = False\n if grayscale:\n channels = 1\n else:\n channels = 3\n if nb_features_map is None:\n nb_features_map = [8]\n if size_linear_layers is None:\n size_linear_layers = []\n height = 224\n width = 224\n module = nn.Module()\n shapes = [(\"input\", channels, height, width)]\n layers = {\"extractor\": [], \"regressor\": []}\n if not hasattr(dropout_rate, \"__len__\"):\n dropout_rate = (dropout_rate, 0.)\n next_dropout_rate = dropout_rate[0]\n # If a pretrained model is used:\n if pretrained is None:\n # Input checks\n if hasattr(conv_kernel_size, \"__len__\"):\n if len(conv_kernel_size) != len(nb_features_map):\n raise ValueError(\"The length of nb_features_map shall match the length of conv_kernel_size\")\n else:\n conv_kernel_size = [conv_kernel_size] * len(nb_features_map)\n # Feature extractor\n next_layer_type = itertools.cycle(conv_architecture)\n nb_feature_map = None\n i = 0\n while True:\n layer_type = next(next_layer_type)\n if layer_type == \"C\":\n # Convolutional layer\n try:\n nb_feature_map = nb_features_map[i]\n except IndexError:\n break\n name = \"conv2d-{:02d}\".format(i+1)\n conv = nn.Conv2d(shapes[-1][1], nb_feature_map, conv_kernel_size[i], stride=conv_stride,\n padding=conv_padding)\n layers[\"extractor\"].append((name, conv))\n h, w = output_shape_conv_and_pool_layer(rows=shapes[-1][2], columns=shapes[-1][3],\n kernel=conv_kernel_size[i], stride=conv_stride,\n padding=conv_padding)\n shapes.append((name, nb_feature_map, h, w))\n i += 1\n # Activation\n if conv_activation == \"relu\":\n activ = nn.ReLU()\n elif conv_activation == \"elu\":\n activ = nn.ELU(alpha=0.1)\n elif conv_activation == \"leaky\":\n activ = nn.LeakyReLU()\n else:\n activ = nn.ReLU()\n name = \"{}-{:02d}\".format(conv_activation, i)\n layers[\"extractor\"].append((name, activ))\n # activation does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n elif layer_type == \"P\":\n # Max-pooling\n name = \"maxpool2d-{:02d}\".format(i)\n pool = nn.MaxPool2d(pool_kernel_size, pool_stride)\n layers[\"extractor\"].append((name, pool))\n h, w = output_shape_conv_and_pool_layer(rows=shapes[-1][2], columns=shapes[-1][3],\n kernel=pool_kernel_size, stride=pool_stride)\n shapes.append((name, nb_feature_map, h, w))\n elif layer_type == \"D\":\n # Dropout\n if next_dropout_rate > 0.:\n name = \"dropout-{:02d}\".format(i)\n dropout = nn.Dropout(p=next_dropout_rate)\n layers[\"extractor\"].append((name, dropout))\n # Dropout does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n next_dropout_rate += dropout_rate[1]\n elif layer_type == \"B\":\n # Batch normalization\n name = \"batchnorm-{:02d}\".format(i)\n batch = nn.BatchNorm2d(shapes[-1][1])\n layers[\"extractor\"].append((name, batch))\n # Batch norm. does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n # Add a flatten layer\n name = \"flatten\"\n flatten = nn.Flatten(1)\n layers[\"extractor\"].append((name, flatten))\n shapes.append((name, shapes[-1][1] * shapes[-1][2] * shapes[-1][3]))\n # Create extractor module\n extractor = nn.Sequential(OrderedDict(layers[\"extractor\"]))\n module.add_module(\"extractor\", extractor)\n elif pretrained == \"VGG16\":\n pre_trained = models.vgg16(pretrained=True)\n modules = []\n for _name, _module in pre_trained.named_children():\n if _name != 'classifier':\n modules.append((_name, _module))\n modules.append((\"flatten\", nn.Flatten(1)))\n vgg16 = nn.Sequential(OrderedDict(modules))\n # Freeze all parameters in the pre-trained model\n # So we prevent gradients from being calculated, it will save computation time\n for param in vgg16.parameters():\n param.requires_grad = False\n module.add_module('extractor', vgg16)\n shapes.append((pretrained, 25088))\n else:\n raise ValueError(f\"Unknown pre-trained model '{pretrained}'.\")\n # Regressor\n for i, size_linear_layer in enumerate(size_linear_layers):\n # Add a linear layer\n name = \"linear-{:02d}\".format(i + 1)\n linear = nn.Linear(shapes[-1][1], size_linear_layer)\n layers[\"regressor\"].append((name, linear))\n shapes.append((name, size_linear_layer))\n # Activation\n if dense_activation == \"relu\":\n activ = nn.ReLU()\n elif dense_activation == \"elu\":\n activ = nn.ELU(alpha=0.1)\n elif dense_activation == \"leaky\":\n activ = nn.LeakyReLU()\n else:\n activ = nn.ReLU()\n name = \"{}-{:02d}\".format(dense_activation, i + 1)\n layers[\"regressor\"].append((name, activ))\n shapes.append((name, shapes[-1][1])) # activation does not change the size\n # Dropout\n if next_dropout_rate > 0.:\n name = \"dropout-{:02d}\".format(i + 1)\n dropout = nn.Dropout(p=next_dropout_rate)\n layers[\"regressor\"].append((name, dropout))\n shapes.append((name, shapes[-1][1])) # Dropout does not change the size of array\n next_dropout_rate += dropout_rate[1]\n # Add the final layer, the output size is fixed to 68 x 2 = 136\n name = \"output\"\n linear = nn.Linear(shapes[-1][1], 136)\n layers[\"regressor\"].append((name, linear))\n shapes.append((name, 136))\n # Create regressor module\n regressor = nn.Sequential(OrderedDict(layers[\"regressor\"]))\n module.add_module(\"regressor\", regressor)\n # Weight initialization\n module.apply(weight_initialization)\n # Optimizer\n if optimizer == \"Adam\":\n optim = torch.optim.Adam(module.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optimizer == \"AdamW\":\n optim = torch.optim.AdamW(module.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optimizer == \"SGD\":\n optim = torch.optim.SGD(module.parameters(), lr=learning_rate, weight_decay=weight_decay, momentum=0.9)\n else:\n raise ValueError(f\"Unknown optimizer {optimizer}.\")\n return module, shapes, optim", "def build_graph(self):\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()", "def build_model(self, input_size=(9, 1), k_reg=keras.regularizers.l2(1e-8), a_reg=keras.regularizers.l1(1e-8)):\n data_input = keras.Input(shape=input_size)\n normed_data = keras.layers.BatchNormalization()(data_input)\n flat_normed_data = keras.layers.Flatten()(normed_data)\n\n out = keras.layers.Dense(1, activation=\"sigmoid\")(flat_normed_data)\n\n self.model = keras.Model(inputs=data_input, outputs=out)\n self.model.summary()", "def construct_model():\n import lbann\n\n # Layer graph\n input = lbann.Input(target_mode='N/A', name='inp_data')\n # data is 64*64*4 images + 15 scalar + 5 param\n #inp_slice = lbann.Slice(input, axis=0, slice_points=\"0 16399 16404\",name='inp_slice')\n inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0,args.ydim,args.ydim+5]),name='inp_slice')\n gt_y = lbann.Identity(inp_slice,name='gt_y')\n gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used\n\n zero = lbann.Constant(value=0.0,num_neurons='1',name='zero')\n one = lbann.Constant(value=1.0,num_neurons='1',name='one')\n\n z_dim = 20 #Latent space dim\n\n z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=\"20\")\n model = macc_models.MACCWAE(args.zdim,args.ydim,cf=args.mcf,use_CNN=args.useCNN)\n d1_real, d1_fake, d_adv, pred_y = model(z,gt_y)\n\n d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce')\n d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce')\n d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce')\n img_loss = lbann.MeanSquaredError([pred_y,gt_y])\n rec_error = lbann.L2Norm2(lbann.WeightedSum([pred_y,gt_y], scaling_factors=\"1 -1\"))\n\n layers = list(lbann.traverse_layer_graph(input))\n # Setup objective function\n weights = set()\n src_layers = []\n dst_layers = []\n for l in layers:\n if(l.weights and \"disc0\" in l.name and \"instance1\" in l.name):\n src_layers.append(l.name)\n #freeze weights in disc2\n if(l.weights and \"disc1\" in l.name):\n dst_layers.append(l.name)\n for idx in range(len(l.weights)):\n l.weights[idx].optimizer = lbann.NoOptimizer()\n weights.update(l.weights)\n l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4)\n d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01)\n obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,img_loss,rec_error,l2_reg])\n # Initialize check metric callback\n metrics = [lbann.Metric(img_loss, name='recon_error')]\n #pred_y = macc_models.MACCWAE.pred_y_name\n callbacks = [lbann.CallbackPrint(),\n lbann.CallbackTimer(),\n lbann.CallbackSaveModel(dir=args.dump_models),\n lbann.CallbackReplaceWeights(source_layers=list2str(src_layers),\n destination_layers=list2str(dst_layers),\n batch_interval=2)]\n\n if(args.ltfb_batch_interval > 0) :\n callbacks.append(lbann.CallbackLTFB(batch_interval=args.ltfb_batch_interval,metric='recon_error',\n low_score_wins=True,\n exchange_hyperparameters=True))\n\n # Construct model\n return lbann.Model(args.num_epochs,\n serialize_io=True,\n weights=weights,\n layers=layers,\n metrics=metrics,\n objective_function=obj,\n callbacks=callbacks)", "def build_generator2D_(self, model_shape, filters=32, k_size=4, z_size=500, summary=False, model_file=None, name='gan_g_'):\n if (model_file):\n \"\"\"\n Load pretreined model\n \"\"\"\n model = self.utils.build_pretrained_model(model_file)\n if (summary):\n model.summary()\n return model\n else:\n\n n_rows = model_shape[0]\n n_cols = model_shape[1]\n c_dims = model_shape[2]\n input_shape = (z_size,)\n if n_rows % 8 !=0:\n height = n_rows//8 + 1\n else:\n height = n_rows//8\n if n_cols % 8 !=0:\n width = n_cols//8 +1\n else:\n width = n_cols//8\n \n num_init_neurons = 8*filters \n reshape_size= (height, width, num_init_neurons)\n \n # 8*height, 4*height, 2*height, height = n_rows, n_rows//2, n_rows//4, n_rows//8\n rows_matching = np.equal([2*height, 4*height, 8*height], [n_rows//4, n_rows//2, n_rows])\n index_rows = np.where(np.logical_not(rows_matching))[0]\n if len(index_rows) > 0:\n index_rows = index_rows[0]\n # print(index_rows)\n # 8*width, 4*width, 2*width, width = n_cols//1, n_cols//2, n_cols//4, n_cols//8\n cols_matching = np.equal([2*width, 4*width, 8*width], [n_cols//4, n_cols//2, n_cols])\n index_cols = np.where(np.logical_not(cols_matching))[0]\n if len (index_cols) > 0:\n index_cols = index_cols[0]\n # print(index_cols)\n\n input_layer = Input(shape=input_shape, name=name+'input')\n g = Dense(width * height * num_init_neurons, kernel_initializer=RandomNormal(stddev=0.02), name=name+'dense')(input_layer)\n g = Reshape(reshape_size, name=name+'reshape')(g)\n g = BatchNormalization(momentum=0.8, name=name+'bn_dense')(g, training=True)\n g = Activation(activation='relu', name=name+'relu')(g)\n\n g = self.Conv2DTranspose_Block(g, 4*filters, name=name+'1')\n if index_rows==0 or index_cols==0:\n g = BilinearUpsampling(output_size=(n_rows//4, n_cols//4), name=name+'bilinear')(g)\n g = self.Conv2DTranspose_Block(g, 2*filters, k_size=k_size, name=name+'2')\n if index_rows==1 or index_cols==1:\n g = BilinearUpsampling(output_size=(n_rows//2, n_cols//2), name=name+'bilinear')(g)\n g = self.Conv2DTranspose_Block(g, 1*filters, k_size=k_size, name=name+'3')\n if index_rows==2 or index_cols==2:\n g = BilinearUpsampling(output_size=(n_rows, n_cols), name=name+'bilinear')(g) \n g = self.Conv2DTranspose_Block(g, c_dims, strides=1, activation='tanh', k_size=k_size, name=name+'4', bn=False)\n\n model = Model(inputs=[input_layer], outputs=[g], name='Generator')\n if (summary):\n model.summary()\n return model", "def _make_conv_level(in_channels, out_channels, num_convs, norm_func,\n stride=1, dilation=1):\n layers = []\n for i in range(num_convs):\n layers.extend([\n nn.Conv2D(in_channels, out_channels, kernel_size=3,\n stride=stride if i == 0 else 1,\n padding=dilation, bias_attr=False, dilation=dilation),\n norm_func(out_channels),\n nn.ReLU()])\n\n in_channels = out_channels\n\n return nn.Sequential(*layers)", "def init_encoder(self):\n\n vgg = models.vgg16(pretrained=True)\n\n blocks = [self.layer_1,\n self.layer_2,\n self.layer_3,\n self.layer_4,\n self.layer_5]\n\n ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]\n features = list(vgg.features.children())\n\n vgg_layers = []\n for _layer in features:\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n\n merged_layers = []\n for idx, conv_block in enumerate(blocks):\n if idx < 2:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit]\n else:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit,\n conv_block.conv3.cbr_unit]\n for _unit in units:\n for _layer in _unit:\n if isinstance(_layer, nn.Conv2d):\n merged_layers.append(_layer)\n\n assert len(vgg_layers) == len(merged_layers)\n\n for l1, l2 in zip(vgg_layers, merged_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data", "def __init__(self, in_channels=3, n_classes=21):\n super(UpNet, self).__init__()\n\n self.layer_1 = UpNetLayer_ParticularEncoder_2(in_channels, 64, 2)\n self.layer_2 = UpNetLayer_Encoder(64, 128, 2)\n self.layer_3 = UpNetLayer_Encoder(128, 256, 3)\n self.layer_4 = UpNetLayer_Encoder(256, 512, 3)\n self.layer_6 = UpNetLayer_ParticularEncoder(512, 1024, 3)\n\n self.layer_inter = UpNetLayer_Dropout()\n\n self.layer_7 = UpNetLayer_Decoder_Particular(1024, 512, 3)\n self.layer_8 = UpNetLayer_Decoder(512, 256, 3)\n self.layer_9 = UpNetLayer_Decoder(256, 128, 3)\n self.layer_10 = UpNetLayer_Decoder(128, 64, 2)\n self.layer_11 = UpNetLayer_Decoder_Particular_2(64, n_classes, 2)", "def __init__(\n self,\n hidden_channels,\n kernel_size,\n dilation_rate,\n n_layers,\n yin_start,\n yin_scope,\n yin_shift_range,\n gin_channels=0,\n ):\n super().__init__()\n self.in_channels = yin_scope\n self.out_channels = yin_scope\n self.hidden_channels = hidden_channels\n self.kernel_size = kernel_size\n self.dilation_rate = dilation_rate\n self.n_layers = n_layers\n self.gin_channels = gin_channels\n\n self.yin_start = yin_start\n self.yin_scope = yin_scope\n self.yin_shift_range = yin_shift_range\n\n self.pre = nn.Conv1d(self.in_channels, hidden_channels, 1)\n self.dec = modules.WN(\n hidden_channels,\n kernel_size,\n dilation_rate,\n n_layers,\n gin_channels=gin_channels,\n )\n self.proj = nn.Conv1d(hidden_channels, self.out_channels, 1)", "def model_build(self):\n\n # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!\n X_input = Input(self.inputData[0].shape)\n\n '''\n # CONV -> BN -> RELU Block applied to X\n X = Conv2D(8, (8, 8), name='conv0')(X_input)\n X = BatchNormalization(name='bn0')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool0')(X)\n X = Dropout(0.1, name='dropout0')(X)\n\n X = Conv2D(16, (16, 16), name='conv1')(X)\n X = BatchNormalization(name='bn1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool1')(X)\n X = Dropout(0.1, name='dropout1')(X)\n\n X = Conv2D(16, (32, 32), name='conv2')(X)\n X = BatchNormalization(name='bn2')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool2')(X)\n X = Dropout(0.1, name='dropout2')(X)\n' '''\n\n X = Dense(500, activation='relu', name='fc0')(X_input)\n X = Dropout(0.1, name='dropout1')(X)\n X = Dense(500, activation='relu', name='fc1')(X)\n X = Dropout(0.1, name='dropout2')(X)\n X = Dense(3, activation='softmax', name='fc2')(X)\n\n # Create model. This creates your Keras model instance, you'll use this instance to train/test the model.\n self.model = Model(inputs=X_input, outputs=X, name='acouModel')", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.retina_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n 3,\n padding=1)\n self.retina_reg = nn.Conv2d(\n self.feat_channels, self.num_anchors * 4, 3, padding=1)", "def _init_layers(self) -> None:\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n pred_pad_size = self.pred_kernel_size // 2\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_reg = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 4,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_centerness = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 1,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])", "def model_CENTSD_33conv(patch_size, batch_size, FAST_network=False, FAST_imgheight=None, FAST_imgwidth=None, nonlin_func='leaky'):\n\n if nonlin_func == 'leaky':\n nonlin = nonlinearities.LeakyRectify(leaky_param)\n elif nonlin_func == 'elu':\n nonlin = nonlinearities.elu\n else:\n print 'Error! Unsupported non-linearity function'\n return\n\n if FAST_network:\n l_in0 = layers.InputLayer(\n shape=(1, 1, FAST_imgheight, FAST_imgwidth),name='l_in0')\n else:\n l_in0 = layers.InputLayer(\n shape=(batch_size, in_channels, patch_size, patch_size),name='l_in0')\n\n layer_params = {'conv_num_filters':32,\n 'conv_filter_size':(3,3),\n 'conv_border_mode':border_mode,\n 'conv_nonlinearity':nonlin,\n 'batch_norm':True,\n 'maxpool':True,\n 'maxpool_ds':(2,2)}\n layer = layer_factory(in_layer=l_in0,layer_type='conv',**layer_params)\n\n layer_params = {'conv_num_filters':64,\n 'conv_filter_size':(3,3),\n 'conv_border_mode':border_mode,\n 'conv_nonlinearity':nonlin,\n 'batch_norm':True,\n 'maxpool':True,\n 'maxpool_ds':(2,2)}\n layer = layer_factory(in_layer=layer,layer_type='conv',**layer_params)\n\n layer_params = {'conv_num_filters':128,\n 'conv_filter_size':(3,3),\n 'conv_border_mode':border_mode,\n 'conv_nonlinearity':nonlin,\n 'batch_norm':True,\n 'maxpool':True,\n 'maxpool_ds':(2,2)}\n layer = layer_factory(in_layer=layer,layer_type='conv',**layer_params)\n\n layer_params = {'conv_num_filters':256,\n 'conv_filter_size':(3,3),\n 'conv_border_mode':border_mode,\n 'conv_nonlinearity':nonlin,\n 'batch_norm':True,\n 'maxpool':True,\n 'maxpool_ds':(2,2)}\n layer = layer_factory(in_layer=layer,layer_type='conv',**layer_params)\n\n layer_params = {'conv_num_filters':512,\n 'conv_filter_size':(2,2),\n 'conv_border_mode':border_mode,\n 'conv_nonlinearity':nonlin,\n 'batch_norm':True,\n 'maxpool':False,\n 'maxpool_ds':(2,2)}\n layer = layer_factory(in_layer=layer,layer_type='conv',**layer_params)\n\n return layer", "def SRCNNv2(input_shape, depth_multiplier=1, multi_output=False):\n inputs = Input(input_shape, name=\"inputs\")\n conv1 = Convolution2D(filters=64, kernel_size=9, padding=\"same\", name=\"conv1\", activation=\"relu\")(inputs)\n conv2 = Convolution2D(filters=64, kernel_size=7, padding=\"same\", name=\"conv2\", activation=\"relu\")(conv1)\n #conv3 = Convolution2D(filters=64, kernel_size=3, padding=\"same\", name=\"conv3\", activation=\"relu\")(conv2)\n\n mapping = Convolution2D(filters=32, kernel_size=5, padding=\"same\", name=\"mapping\", activation=\"relu\")(conv2)\n #mapping2 = Convolution2D(filters=16, kernel_size=7, padding=\"same\", name=\"mapping2\", activation=\"relu\")(mapping)\n \n \n if multi_output:\n out = Convolution2D(filters=2, kernel_size=5, padding=\"same\", name=\"output\", activation=\"sigmoid\")(mapping)\n else:\n out = Convolution2D(filters=1, kernel_size=5, padding=\"same\", name=\"output\", activation=\"sigmoid\")(mapping)\n return Model(inputs, out)", "def build_generator2D(self, model_shape, filters=32, k_size=4, z_size=500, summary=False, model_file=None, name='gan_g_'):\n if (model_file):\n \"\"\"\n Load pretreined model\n \"\"\"\n model = self.utils.build_pretrained_model(model_file)\n if (summary):\n model.summary()\n return model\n else:\n\n n_rows = model_shape[0]\n n_cols = model_shape[1]\n input_shape = (z_size,)\n if n_rows % 8 !=0:\n height = n_rows//8 + 1\n else:\n height = n_rows//8\n if n_cols % 8 !=0:\n width = n_cols//8 +1\n else:\n width = n_cols//8\n \n num_init_neurons = 8*filters \n reshape_size= (height, width, num_init_neurons)\n \n # 8*height, 4*height, 2*height, height = n_rows, n_rows//2, n_rows//4, n_rows//8\n rows_matching = np.equal([2*height, 4*height, 8*height], [n_rows//4, n_rows//2, n_rows])\n index_rows = np.where(np.logical_not(rows_matching))[0]\n if len(index_rows) > 0:\n index_rows = index_rows[0]\n # print(index_rows)\n # 8*width, 4*width, 2*width, width = n_cols//1, n_cols//2, n_cols//4, n_cols//8\n cols_matching = np.equal([2*width, 4*width, 8*width], [n_cols//4, n_cols//2, n_cols])\n index_cols = np.where(np.logical_not(cols_matching))[0]\n if len (index_cols) > 0:\n index_cols = index_cols[0]\n # print(index_cols)\n\n input_layer = Input(shape=input_shape, name=name+'input')\n g = Dense(width * height * num_init_neurons, kernel_initializer=RandomNormal(stddev=0.02), name=name+'dense')(input_layer)\n g = Reshape(reshape_size, name=name+'reshape')(g)\n g = BatchNormalization(momentum=0.8, name=name+'bn_dense')(g, training=True)\n g = Activation(activation='relu', name=name+'relu')(g)\n\n g = self.Conv2DTranspose_Block(g, 4*filters, name=name+'1')\n if index_rows==0 or index_cols==0:\n g = BilinearUpsampling(output_size=(n_rows//4, n_cols//4), name=name+'bilinear')(g)\n g = self.Conv2DTranspose_Block(g, 2*filters, k_size=k_size, name=name+'2')\n if index_rows==1 or index_cols==1:\n g = BilinearUpsampling(output_size=(n_rows//2, n_cols//2), name=name+'bilinear')(g)\n g = self.Conv2DTranspose_Block(g, 1*filters, k_size=k_size, name=name+'3')\n if index_rows==2 or index_cols==2:\n g = BilinearUpsampling(output_size=(n_rows, n_cols), name=name+'bilinear')(g) \n g = self.Conv2DTranspose_Block(g, 2, strides=1, activation='tanh', k_size=k_size, name=name+'4', bn=False)\n\n model = Model(inputs=[input_layer], outputs=[g], name='Generator')\n if (summary):\n model.summary()\n return model", "def compile(self):\n m, n = self.input_shape[1], self.input_shape[2]\n\n inp = Input(shape=self.input_shape, traces=True)\n self.add_layer(inp, \"DoG\")\n\n s1 = LIFNodes(shape=(18, m, n), traces=True)\n self.add_layer(s1, \"conv_1\")\n c1 = LIFNodes(shape=(18, m // 2, n // 2), traces=True)\n self.add_layer(c1, \"pool_1\")\n\n s2 = LIFNodes(shape=(24, m // 2, n // 2), traces=True)\n self.add_layer(s2, \"conv_2\")\n c2 = LIFNodes(shape=(24, m // 4, n // 4), traces=True)\n self.add_layer(c2, \"pool_2\")\n\n s3 = LIFNodes(shape=(32, m // 4, n // 4), traces=True)\n self.add_layer(s3, \"conv_3\")\n f = LIFNodes(shape=(32, 1), traces=True)\n self.add_layer(f, \"global_pool\")\n\n conv1 = Conv2dConnection(inp, s1, 5, padding=2, weight_decay=0.01,\n nu=0.01, update_rule=PostPre, decay=0.5)\n self.add_connection(conv1, \"DoG\", \"conv_1\")\n pool1 = MaxPool2dConnection(s1, c1, 2, 2, decay=0.5)\n self.add_connection(pool1, \"conv_1\", \"pool_1\")\n\n conv2 = Conv2dConnection(c1, s2, 3, padding=1, weight_decay=0.01,\n nu=0.01, update_rule=PostPre, decay=0.5)\n self.add_connection(conv2, \"pool_1\", \"conv_2\")\n pool2 = MaxPool2dConnection(s2, c2, 2, 2, decay=0.5)\n self.add_connection(pool2, \"conv_2\", \"pool_2\")\n\n conv3 = Conv2dConnection(c2, s3, 3, padding=1, weight_decay=0.01,\n nu=0.01, update_rule=PostPre, decay=0.5)\n self.add_connection(conv3, \"pool_2\", \"conv_3\")\n global_pool = MaxPool2dConnection(s3, f, (m // 4, n // 4), decay=0.5)\n self.add_connection(global_pool, \"conv_3\", \"global_pool\")\n\n monitor = NetworkMonitor(self, layers=[\"DoG\", \"conv_1\", \"pool_1\",\n \"conv_2\", \"pool_2\",\n \"conv_3\", \"global_pool\"],\n connections=[(\"DoG\", \"conv_1\"),\n (\"pool_1\", \"conv_2\"),\n (\"pool_2\", \"conv_3\")],\n state_vars=[\"w\", \"s\"])\n self.add_monitor(monitor, \"network_monitor\")\n\n return self", "def __init__(self, num_channels, z_dim, conv_dim, image_size, num_gpu,\n num_extra_layers, use_BN):\n super(Generator, self).__init__()\n assert image_size % 16 == 0, \"image size must be a multiple of 16!\"\n\n self.num_gpu = num_gpu\n self.layer = nn.Sequential()\n\n conv_depth = conv_dim // 2\n conv_size = 4\n\n while conv_size != image_size:\n conv_depth = conv_depth * 2\n conv_size *= 2\n\n # input convt layer\n # input is Z\n # output is [conv_depth x 4 x 4]\n # e.g. if image_size = 64, then output is [(conv_dim * 8) x 4 x 4]\n self.layer.add_module(\n \"init.{}-{}.convt\".format(z_dim, conv_depth),\n nn.ConvTranspose2d(z_dim, conv_depth, 4, 1, 0, bias=False))\n if use_BN:\n self.layer.add_module(\n \"init.{}.batchnorm\".format(conv_depth),\n nn.BatchNorm2d(conv_depth))\n self.layer.add_module(\n \"init.{}.relu\".format(conv_depth),\n nn.ReLU(True))\n\n # pyramid convt layers\n conv_size = 4\n while conv_size < image_size // 2:\n # output is [(conv_depth // 2) x (conv_size * 2) x (conv_size * 2)]\n self.layer.add_module(\n \"pyramid.{}-{}.convt\".format(conv_depth, conv_depth // 2),\n nn.ConvTranspose2d(conv_depth, conv_depth // 2,\n 4, 2, 1, bias=False))\n if use_BN:\n self.layer.add_module(\n \"pyramid.{}.batchnorm\".format(conv_depth // 2),\n nn.BatchNorm2d(conv_depth // 2))\n self.layer.add_module(\n \"pyramid.{}.relu\".format(conv_depth // 2),\n nn.ReLU(True))\n conv_depth //= 2\n conv_size *= 2\n\n # extra convt layers\n for idx in range(num_extra_layers):\n self.layer.add_module(\n \"extra-{}.{}-{}.conv\".format(idx, conv_depth, conv_depth),\n nn.Conv2d(conv_depth, conv_depth, 3, 1, 1, bias=False))\n if use_BN:\n self.layer.add_module(\n \"extra-{}.{}.batchnorm\".format(idx, conv_depth),\n nn.BatchNorm2d(conv_depth))\n self.layer.add_module(\n \"extra-{}.{}.relu\".format(idx, conv_depth),\n nn.ReLU(True))\n\n # output convt layer\n # output is [num_channels x conv_dim x conv_dim]\n self.layer.add_module(\n \"final.{}-{}.convt\".format(conv_depth, num_channels),\n nn.ConvTranspose2d(conv_depth, num_channels, 4, 2, 1, bias=False))\n self.layer.add_module(\n \"final.{}.tanh\".format(num_channels),\n nn.Tanh())", "def SRCNNex(input_shape, depth_multiplier=1, multi_output=False):\n \n inputs = Input(input_shape, name=\"inputs\")\n conv1 = Convolution2D(filters=64, kernel_size=9, padding=\"same\", name=\"conv1\", activation=\"relu\")(inputs)\n mapping = Convolution2D(filters=32, kernel_size=5, padding=\"same\", name=\"mapping\", activation=\"relu\")(conv1)\n \n if multi_output:\n out = Convolution2D(filters=2, kernel_size=5, padding=\"same\", name=\"output\", activation=\"sigmoid\")(mapping)\n else:\n out = Convolution2D(filters=1, kernel_size=5, padding=\"same\", name=\"output\", activation=\"sigmoid\")(mapping)\n return Model(inputs, out)", "def _create_model(self):\n ref = 0 if self.m_cfg['configs']['recursive'] else -1\n out_t, l_t, models = [], [], []\n in_t = [tf.keras.Input(batch_size=self.m_cfg['configs']['batch'],\n shape=self.m_cfg['configs']['patch'])]\n for level in np.arange(self.levels):\n if not self.m_cfg['configs']['recursive'] or not level:\n lat, res, layers = self._set_level_ops(in_t[-1], level)\n opt = self._inst_optimizer()\n self.opt += [opt]\n curr_layers = sum(layers, [])\n vars = sum(list(map(lambda l: l.variables, curr_layers)), [])\n self.vars.append(vars)\n elif self.m_cfg['configs']['recursive']:\n lat, res, layers = self._set_level_ops(in_t[-1], level, layers)\n\n out_t += [res]\n l_t += [lat]\n in_t += [tf.keras.layers.Subtract()([in_t[ref], out_t[-1]])]\n\n inputs, outputs = in_t[0], [in_t[:-1], l_t, out_t]\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.loss = Losses(self.m_cfg['configs']['loss']).value", "def mgcNetArchNin(outLayer, l2_val, **kwargs):\n\n def_vals = {\"input_img_rows\" : 72,\n \"input_img_cols\" : 72,\n \"channels\" : 1,\n \"nb_classes\" : 13\n } # default parameters value\n\n for k, v in def_vals.items():\n kwargs.setdefault(k, v)\n\n input_img_rows = kwargs['input_img_rows']\n input_img_cols = kwargs['input_img_cols']\n channels = kwargs['channels']\n nb_classes = kwargs['nb_classes']\n\n \n # Input: 72 x 72 x 1\n img_shape = layers.Input(shape = (input_img_rows, input_img_cols, channels))\n\n # Layer 1\n #------------------------\n conv1 = layers.Conv2D(filters=16, kernel_size=(5, 5), padding='valid', activation='relu')(img_shape)\n conv1 = layers.Conv2D(filters=16, kernel_size=(1, 1), activation='relu')(conv1)\n conv1 = layers.Conv2D(filters=16, kernel_size=(1, 1), activation='relu')(conv1)\n conv1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)\n conv1 = layers.Dropout(0.4)(conv1)\n\n # Layer 2\n #------------------------\n conv2 = layers.Conv2D(filters=32, kernel_size=(5, 5), padding='same', activation='relu')(conv1)\n conv2 = layers.Conv2D(filters=32, kernel_size=(1, 1), activation='relu')(conv2)\n conv2 = layers.Conv2D(filters=32, kernel_size=(1, 1), activation='relu')(conv2)\n conv2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)\n conv2 = layers.Dropout(0.4)(conv2)\n\n # Layer 3\n #------------------------\n conv3 = layers.Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu')(conv2)\n conv3 = layers.Conv2D(filters=64, kernel_size=(1, 1), activation='relu')(conv3)\n conv3 = layers.Conv2D(filters=64, kernel_size=(1, 1), activation='relu')(conv3)\n conv3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)\n conv3 = layers.Dropout(0.4)(conv3)\n\n # Layer 4\n #------------------------\n #conv4 = layers.Conv2D(filters=128, kernel_size=(2, 2), padding='same', activation='relu')(conv3)\n #conv4 = layers.Conv2D(filters=128, kernel_size=(1, 1), activation='relu')(conv4)\n #conv4 = layers.Conv2D(filters=128, kernel_size=(1, 1), activation='relu')(conv4)\n #conv4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)\n #conv4 = layers.Dropout(0.4)(conv4)\n\n # Layer 5\n #------------------------\n output = layers.Conv2D(filters=128, kernel_size=(2, 2), padding='same', activation='relu')(conv3) # skip layer 4\n output = layers.Conv2D(filters=64, kernel_size=(1, 1), activation='relu')(output)\n output = layers.Conv2D(filters=32, kernel_size=(1, 1))(output)\n output = layers.MaxPooling2D(pool_size=(2, 2))(output)\n output = layers.Dropout(0.4)(output)\n\n \n # FC Layer\n #------------------------\n outputmlp = layers.Flatten()(output)\n outputmlp = layers.Dense(64, activation = 'relu')(outputmlp)\n outputmlp = layers.Dropout(0.5)(outputmlp)\n\n predictionsMlp = layers.Dense(nb_classes, activation='softmax')(outputmlp)\n \n \n # global averaging\n weight_decay=1E-4\n concat_axis = 1\n \n x = BatchNormalization(axis=concat_axis,\n gamma_regularizer=regularizers.l2(weight_decay),\n beta_regularizer=regularizers.l2(weight_decay))(output)\n x = Activation('relu')(x)\n x = layers.Dropout(0.4)(x)\n x = GlobalAveragePooling2D(data_format=K.image_data_format())(x)\n \n predictionsGloAvg = layers.Dense(nb_classes,\n activation='softmax',\n kernel_regularizer=regularizers.l2(weight_decay),\n bias_regularizer=regularizers.l2(weight_decay))(x)\n \n if outLayer == \"gloAvg\":\n predictions = predictionsGloAvg\n elif outLayer == \"mlp\":\n predictions = predictionsMlp\n \n # prediction model\n model = Model(img_shape, predictions, name = 'net_in_net')\n\n\n return model", "def construct_from_anatomy(self, anet, architecture):\n # construct conv layer for input -> LGNd\n self.area_channels['input'] = INPUT_SIZE[0]\n self.area_size['input'] = INPUT_SIZE[1]\n \n out_sigma = 1\n out_channels = np.floor(anet.find_layer('LGNd','').num/out_sigma/INPUT_SIZE[1]/INPUT_SIZE[2])\n architecture.set_num_channels('LGNd', '', out_channels)\n self.area_channels['LGNd'] = out_channels\n \n out_size = INPUT_SIZE[1] * out_sigma\n self.area_size['LGNd'] = out_size\n \n convlayer = ConvLayer('input', 'LGNd',\n ConvParam(in_channels=INPUT_SIZE[0], \n out_channels=out_channels,\n gsh=INPUT_GSH,\n gsw=INPUT_GSW, out_sigma=out_sigma),\n out_size)\n self.layers.append(convlayer)\n \n # construct conv layers for all other connections\n G, _ = anet.make_graph()\n Gtop = nx.topological_sort(G)\n root = next(Gtop) # get root of graph\n for i, e in enumerate(nx.edge_bfs(G, root)):\n \n in_layer_name = e[0].area+e[0].depth\n out_layer_name = e[1].area+e[1].depth\n print('constructing layer %s: %s to %s'%(i, in_layer_name, out_layer_name))\n \n in_conv_layer = self.find_conv_target_area(in_layer_name)\n in_size = in_conv_layer.out_size\n in_channels = in_conv_layer.params.out_channels\n \n out_anat_layer = anet.find_layer(e[1].area, e[1].depth)\n \n out_sigma = get_out_sigma(e[0].area, e[0].depth, e[1].area, e[1].depth)\n out_size = in_size * out_sigma\n self.area_size[e[1].area+e[1].depth] = out_size\n\n if SUBFIELDS:\n pixel_area = calculate_pixel_area_with_visual_field(architecture, e[1].area, e[1].depth)\n out_channels = np.floor(out_anat_layer.num / pixel_area)\n else:\n out_channels = np.floor(out_anat_layer.num/out_size**2)\n\n architecture.set_num_channels(e[1].area, e[1].depth, out_channels)\n self.area_channels[e[1].area+e[1].depth] = out_channels\n \n convlayer = ConvLayer(in_layer_name, out_layer_name, \n ConvParam(in_channels=in_channels, \n out_channels=out_channels,\n gsh=architecture.get_kernel_peak_probability(e[0].area, e[0].depth, e[1].area, e[1].depth),\n gsw=architecture.get_kernel_width_pixels(e[0].area, e[0].depth, e[1].area, e[1].depth), out_sigma=out_sigma),\n out_size)\n \n self.layers.append(convlayer)", "def baseUNet(input_shape,conv_depth,n_classes,init_w,dropout):\n inputs = Input(input_shape)\n\n c1=Conv2D(conv_depth,\n (3,3),\n activation='relu',\n padding='same',\n kernel_initializer=init_w)(inputs)\n\n c1=Conv2D(conv_depth,\n (3,3),\n activation='relu',\n padding=\"same\",\n kernel_initializer=init_w)(c1)\n\n # pool down to next layer\n pool1 = MaxPooling2D((2,2),strides = (2,2))(c1)\n\n conv_depth *= 2\n\n # convolute down again\n conv2 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool1)\n\n conv2 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv2)\n \n # pool down again\n pool2 = MaxPooling2D((2,2),strides = (2,2))(conv2)\n\n conv_depth *= 2 \n\n # Convolution\n conv3 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool2)\n\n conv3 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv3)\n \n # pool down\n pool3 = MaxPooling2D((2,2),strides = (2,2))(conv3)\n\n conv_depth *= 2 \n # Convolution\n conv4 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool3)\n\n conv4 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv4)\n \n # pool down \n pool4 = MaxPooling2D((2,2),strides = (2,2))(conv4)\n\n conv_depth *=2 \n\n # Convolution\n conv5 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool4)\n\n conv5 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv5)\n\n drop = Dropout(dropout)(conv5)\n\n conv_depth /= 2\n conv_depth = int(conv_depth) \n # do upsampling\n up1 = UpSampling2D(size = (2,2))(drop)\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up1)\n \n # add in skip info\n cat1 = concatenate([conv4,conv6],axis = 3)\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat1)\n\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv6)\n\n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up2 = UpSampling2D(size = (2,2))(conv6)\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up2)\n \n # add in skip info\n cat2 = concatenate([conv3,conv7],axis = 3)\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat2)\n\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv7)\n \n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up3 = UpSampling2D(size = (2,2))(conv7)\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size=(3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up3)\n \n # add in skip info\n cat3 = concatenate([conv2,conv8],axis = 3)\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat3)\n\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv8)\n \n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up4 = UpSampling2D(size = (2,2))(conv8)\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up4)\n \n # add in skip info\n cat4 = concatenate([c1,conv9],axis = 3)\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat4)\n\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv9)\n\n outputs = Conv2D(n_classes, 1, activation = 'softmax')(conv9)\n\n return outputs,inputs", "def build_model(self):\n self.G = Generator(self.g_conv_dim)\n self.D = Discriminator(self.d_conv_dim, self.c_dim)\n self.generator = Generator(self.g_conv_dim).train(False)\n\n self.G = nn.DataParallel(self.G)\n self.D = nn.DataParallel(self.D)\n\n # For Adam (Unofficial)\n # self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])\n # self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr, [self.beta1, self.beta2])\n\n # For RMSprop(Official)\n self.g_optimizer = torch.optim.RMSprop(self.G.parameters(), lr=0.0001)\n self.d_optimizer = torch.optim.RMSprop(self.D.parameters(), lr=0.0001)\n\n self.accumulate(self.generator, self.G.module, 0)\n # self.print_network(self.G, 'G')\n # self.print_network(self.D, 'D')\n \n self.G.to(self.device)\n self.D.to(self.device)\n self.generator.to(self.device)\n\n # weight init\n self.G.apply(self.weights_init)\n self.D.apply(self.weights_init)\n self.generator.apply(self.weights_init)", "def build_model():\n mdl = Sequential()\n\n # normalization\n mdl.add(Lambda(lambda x: x/128. - 1, input_shape=IMAGE_SHAPE, name=\"input\"))\n\n # trim image\n mdl.add(Lambda(lambda x: x[:, 10:-10, :, :]))\n\n # convolutions\n mdl.add(Convolution2D(24, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(36, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(48, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same',))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same',))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Flatten())\n\n mdl.add(Dense(128, activation='relu'))\n mdl.add(Dense(64, activation='relu'))\n mdl.add(Dense(1, name=\"output\"))\n\n mdl.summary()\n\n return mdl", "def __init__(self, in_channels=3, out_channels=3, n_layers=6):\n\n super().__init__()\n generator_list = nn.ModuleList()\n # c7s1-64\n generator_list.append(nn.ReflectionPad2d(3))\n generator_list.append(ConvBlock(\n in_channels, 64, kernel_size=7, stride=1, padding=0,\n leaky=False, instance_norm=True, bias=False\n ))\n # d128, d256 (down-sampling layers)\n generator_list.append(ConvBlock(\n 64, 128, kernel_size=3, stride=2, padding=1,\n leaky=False, instance_norm=True, bias=False\n ))\n generator_list.append(ConvBlock(\n 128, 256, kernel_size=3, stride=2, padding=1,\n leaky=False, instance_norm=True, bias=False\n ))\n # resblocks\n for n in range(n_layers):\n generator_list.append(ResnetBlock(256))\n # u128, u64 (up-sampleing layers)\n generator_list.append(ConvTransBlock(256, 128))\n generator_list.append(ConvTransBlock(128, 64))\n # c7s1-3\n generator_list.append(nn.ReflectionPad2d(3))\n generator_list.append(nn.Conv2d(64, out_channels,\n kernel_size=7, padding=0)\n )\n generator_list.append(nn.Tanh())\n # define generator model\n self.model = nn.Sequential(*generator_list)\n self._initialize_params()", "def build_u_net(input_size, filters, u_depth):\n input_layer = Input(shape=(input_size, input_size, 1), name=\"input_layer\")\n\n residual_connections = []\n for i in range(u_depth):\n if i == 0:\n x = Conv2D(filters, **CONV_LAYER_SETTINGS)(input_layer)\n else:\n x = Conv2D(filters, **CONV_LAYER_SETTINGS)(x)\n\n x = Dropout(0.1)(x)\n residual = Conv2D(filters, **CONV_LAYER_SETTINGS)(x)\n residual_connections.append(residual)\n x = MaxPool2D(pool_size=(2, 2))(residual)\n filters *= 2\n\n padding = [184, 88, 40, 16, 4]\n for i in range(u_depth):\n x = Conv2D(filters, **CONV_LAYER_SETTINGS)(x)\n x = Dropout(0.1)(x)\n x = Conv2D(filters, **CONV_LAYER_SETTINGS)(x)\n filters = int(filters / 2)\n x = Conv2DTranspose(filters, (2, 2), strides=(2, 2))(x)\n x = concatenate([Cropping2D(padding.pop())(residual_connections.pop()), x])\n\n x = Conv2D(filters, **CONV_LAYER_SETTINGS)(x)\n x = Dropout(0.1)(x)\n x = Conv2D(filters, **CONV_LAYER_SETTINGS)(x)\n output_layer = Conv2D(1, (1, 1), 1, activation=sigmoid)(x)\n\n model = Model(inputs=input_layer, outputs=output_layer)\n\n return model", "def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()", "def _set_model(self):\n print(\"Setting up model...\")\n # Encoder\n inputs = Input(batch_shape=(None,) + self.input_shape)\n self.inputs=inputs\n # Instantiate encoder layers \n for i in range(len(self.filters)):\n if i==0:\n Q = Conv2D(self.filters[i], (self.KernelDim[i], self.KernelDim[i]), \n strides=(self.strides[i], self.strides[i]),padding='same')(inputs)\n Q = BatchNormalization()(Q)\n Q = Activation('relu')(Q)\n else:\n Q = Conv2D(self.filters[i], (self.KernelDim[i], self.KernelDim[i]), padding='same',\n strides=(self.strides[i], self.strides[i]))(Q) \n Q = BatchNormalization()(Q)\n Q = Activation('relu')(Q) \n \n Q_4 = Flatten()\n Q_6 = Dropout(self.dropout)\n Q_z_mean = Dense(self.latent_dim)\n Q_z_log_var = Dense(self.latent_dim)\n\n # Set up encoder\n flat = Q_4(Q)\n hidden= Q_6(flat)\n\n # Parameters for continous latent distribution\n z_mean = Q_z_mean(hidden)\n z_log_var = Q_z_log_var(hidden)\n self.encoder =Model(inputs, z_mean)\n\n # Sample from latent distributions\n encoding = Lambda(self._sampling_normal, output_shape=(self.latent_dim,))([z_mean, z_log_var])\n self.encoding = encoding\n # Generator\n # Instantiate generator layers to be able to sample from latent\n # distribution later\n out_shape = (int(np.ceil(self.input_shape[0] / np.prod(self.strides) )), int(np.ceil(self.input_shape[1] / np.prod(self.strides))), self.filters[-1])\n \n #G_0 = Dense(self.hidden_dim)\n #G_00= BatchNormalization()\n #G_01= Activation('relu')\n G_d = Dropout(self.dropout)\n G_1 = Dense(np.prod(out_shape))\n G_10= BatchNormalization()\n G_11= Activation('relu')\n G_2 = Reshape(out_shape)\n G=[]\n for i in range(len(self.filters)):\n if i==0:\n G_ = Conv2DTranspose(self.filters[-1], (self.KernelDim[-1], self.KernelDim[-1]), \n strides=(self.strides[-1], self.strides[-1]),padding='same')\n G.append(G_)\n G_ = BatchNormalization()\n G.append(G_)\n G_ = Activation('relu')\n G.append(G_) \n else:\n G_ = Conv2DTranspose(self.filters[-i-1], (self.KernelDim[-i-1], self.KernelDim[-i-1]), padding='same',\n strides=(self.strides[-i-1], self.strides[-i-1]))\n G.append(G_)\n G_ = BatchNormalization()\n G.append(G_)\n G_ = Activation('relu')\n G.append(G_) \n \n G_5_= BilinearUpsampling(output_size=(self.input_shape[0], self.input_shape[1]))\n G_6 = Conv2D(self.input_shape[2], (2, 2), padding='same',\n strides=(1, 1), activation=self.act, name='generated')\n # Apply generator layers\n #x = G_0(encoding)\n #x = G_00(x)\n #x = G_01(x)\n x = G_d(encoding)\n x = G_1(x)\n x = G_10(x) \n x = G_11(x)\n x = G_2(x)\n \n for i in range(len(G)):\n x = G[i](x)\n \n x = G_5_(x)\n generated = G_6(x)\n self.model =Model(inputs, generated)\n # Set up generator\n inputs_G = Input(batch_shape=(None, self.latent_dim))\n #x = G_0(inputs_G)\n #x = G_00(x)\n #x = G_01(x)\n x = G_d(inputs_G) \n x = G_1(x)\n x = G_10(x) \n x = G_11(x) \n x = G_2(x)\n \n for i in range(len(G)):\n x = G[i](x)\n \n x = G_5_(x)\n generated_G = G_6(x)\n self.generator = Model(inputs_G, generated_G)\n\n # Store latent distribution parameters\n self.z_mean = z_mean\n self.z_log_var = z_log_var\n\n # Compile models\n #self.opt = RMSprop()\n self.model.compile(optimizer=self.opt, loss=self._vae_loss)\n # Loss and optimizer do not matter here as we do not train these models\n self.generator.compile(optimizer=self.opt, loss='mse')\n self.model.summary()\n print(\"Completed model setup.\")", "def build_weight_model(self):\r\n model = nn.Sequential(\r\n nn.Linear(self.in_channels, self.out_channels),\r\n )\r\n init_sequential_weights(model)\r\n return model", "def build_weight_model(self):\r\n model = nn.Sequential(\r\n nn.Linear(self.in_channels, self.out_channels),\r\n )\r\n init_sequential_weights(model)\r\n return model", "def build_model(self):\n \n start_time = time.time()\n print(\"build model started\")\n # label\n self.FA = tf.placeholder(dtype=tf.int32, shape=[None])\n self.ges = tf.placeholder(dtype=tf.int32, shape=[None])\n self.obj = tf.placeholder(dtype=tf.int32, shape=[None])\n \n self.images = tf.placeholder(dtype=tf.float32, shape=[None, height, width, 3])\n batch_size = tf.shape(self.images)[0]\n rgb_scaled = self.images * 255.0\n\n # Convert RGB to BGR\n VGG_MEAN = [103.939, 116.779, 123.68]\n red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n bgr = tf.concat(axis=3, values=[\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ])\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n \n with tf.variable_scope(\"vgg19\"):\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.conv3_4 = self.conv_layer(self.conv3_3, \"conv3_4\")\n self.pool3 = self.max_pool(self.conv3_4, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n self.conv4_4 = self.conv_layer(self.conv4_3, \"conv4_4\")\n self.pool4 = self.max_pool(self.conv4_4, 'pool4')\n\n self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n self.conv5_4 = self.conv_layer(self.conv5_3, \"conv5_4\")\n self.pool5 = self.max_pool(self.conv5_4, 'pool5')\n\n \n shape = self.pool5.get_shape()\n size = 1\n for dim in shape[1:]:\n size *= dim.value\n \n # dense\n with tf.variable_scope('dense') as scope:\n # Move everything into depth so we can perform a single matrix multiply.\n reshape = tf.reshape(self.pool5, [-1, size])\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[size, 192]))\n biases = tf.get_variable('biases', [192], initializer=tf.constant_initializer(0.1))\n dense = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n\n\n # linear layer(WX + b),\n with tf.variable_scope('softmax_linear_FA') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 2]))\n biases = tf.get_variable('biases', [2], initializer=tf.constant_initializer(0.1))\n softmax_linear_FA = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_FA = tf.nn.softmax(softmax_linear_FA)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.FA, logits=softmax_linear_FA, name='cross_entropy')\n cross_entropy_mean_FA = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_ges') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 13]))\n biases = tf.get_variable('biases', [13], initializer=tf.constant_initializer(0.1))\n softmax_linear_ges = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_ges = tf.nn.softmax(softmax_linear_ges)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.ges, logits=softmax_linear_ges, name='cross_entropy')\n cross_entropy_mean_ges = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_obj') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 24]))\n biases = tf.get_variable('biases', [24], initializer=tf.constant_initializer(0.1))\n softmax_linear_obj = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_obj = tf.nn.softmax(softmax_linear_obj)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.obj, logits=softmax_linear_obj, name='cross_entropy')\n cross_entropy_mean_obj = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n self.loss = cross_entropy_mean_FA + cross_entropy_mean_ges + cross_entropy_mean_obj\n self.lr = tf.placeholder(tf.float32, [])\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(self.lr)\n grads_and_vars = optimizer.compute_gradients(self.loss)\n self.train_op = optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)\n self.data_dict = None\n print((\"build model finished: %ds\" % (time.time() - start_time)))", "def build_model():\n model = models.Sequential()\n\n # # Anti-overfit methods\n # model.add(layers.BatchNormalization())\n # model.add(layers.Dropout(0.5))\n # regularizers.l1_l2(l1=0.01, l2=0.01)\n\n model.add(layers.Conv2D(200, (3, 3), activation='relu',\n input_shape=nnc.INPUT_SHAPE))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(200, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(150, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(100, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(256, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(7, activation='sigmoid'))\n model.compile(optimizer=nnc.OPTIMIZER, loss=nnc.LOSS, metrics=nnc.METRICS)\n\n # # Print the model to the console\n model.summary()\n # # Print the model to a png file\n # utils.plot_model(model, show_shapes=True, to_file=nnc.MODEL_PLOT_PATH)\n # # Turn into multi-gpu model\n # model = utils.multi_gpu_model(model, gpus=2)\n\n return model", "def build_model(self):\n input_pencil = tf.keras.Input((128,128,3))\n # generator's output\n gen_image = self.gan_generator.model(input_pencil)\n # generator's output\n x = self.gan_discriminator.model([input_pencil,gen_image])\n model = tf.keras.Model(input_pencil,[x,gen_image])\n # compiling the model\n model.compile(loss=['hinge', 'mae'], optimizer = self.optimizer,loss_weights=[1,100], metrics=['accuracy'])\n self.model = model", "def __init__(self, in_channels=3, n_classes=21):\n super(SegNet_1, self).__init__()\n\n self.layer_1 = SegnetLayer_Encoder(in_channels, 64, 2)\n self.layer_2 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_3 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_4 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_5 = SegnetLayer_Encoder(512, 1024, 3)\n self.layer_6 = SegnetLayer_Encoder(1024, 1024, 3)\n\n self.layer_7 = SegnetLayer_Decoder(1024, 1024, 3)\n self.layer_8 = SegnetLayer_Decoder(1024, 512, 3)\n self.layer_9 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_10 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_11 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_12 = SegnetLayer_Decoder(64, n_classes, 2)", "def create_cnn(num_half_rows, num_half_columns, num_channels):\n\n error_checking.assert_is_integer(num_half_rows)\n error_checking.assert_is_integer(num_half_columns)\n error_checking.assert_is_integer(num_channels)\n\n error_checking.assert_is_greater(num_half_rows, 0)\n error_checking.assert_is_greater(num_half_columns, 0)\n error_checking.assert_is_greater(num_channels, 0)\n\n regularizer_object = keras.regularizers.l1_l2(l1=L1_WEIGHT, l2=L2_WEIGHT)\n\n num_grid_rows = 2 * num_half_rows + 1\n num_grid_columns = 2 * num_half_columns + 1\n input_layer_object = keras.layers.Input(\n shape=(num_grid_rows, num_grid_columns, num_channels)\n )\n\n current_num_filters = None\n current_layer_object = None\n\n # Add convolutional layers.\n for _ in range(NUM_CONV_LAYER_SETS):\n for _ in range(NUM_CONV_LAYERS_PER_SET):\n\n if current_num_filters is None:\n current_num_filters = (\n num_channels * NUM_CHANNELS_TO_FIRST_NUM_FILTERS)\n this_input_layer_object = input_layer_object\n\n else:\n current_num_filters *= 2\n this_input_layer_object = current_layer_object\n\n current_layer_object = keras.layers.Conv2D(\n filters=current_num_filters,\n kernel_size=(NUM_CONV_FILTER_ROWS, NUM_CONV_FILTER_COLUMNS),\n strides=(1, 1), padding='valid', data_format='channels_last',\n dilation_rate=(1, 1), activation=None, use_bias=True,\n kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=regularizer_object\n )(this_input_layer_object)\n\n current_layer_object = keras.layers.LeakyReLU(\n alpha=SLOPE_FOR_RELU\n )(current_layer_object)\n\n if CONV_LAYER_DROPOUT_FRACTION is not None:\n current_layer_object = keras.layers.Dropout(\n rate=CONV_LAYER_DROPOUT_FRACTION\n )(current_layer_object)\n\n if USE_BATCH_NORMALIZATION:\n current_layer_object = keras.layers.BatchNormalization(\n axis=-1, center=True, scale=True\n )(current_layer_object)\n\n current_layer_object = keras.layers.MaxPooling2D(\n pool_size=(NUM_POOLING_ROWS, NUM_POOLING_COLUMNS),\n strides=(NUM_POOLING_ROWS, NUM_POOLING_COLUMNS),\n padding='valid', data_format='channels_last'\n )(current_layer_object)\n\n these_dimensions = numpy.array(\n current_layer_object.get_shape().as_list()[1:], dtype=int)\n num_features = numpy.prod(these_dimensions)\n\n current_layer_object = keras.layers.Flatten()(current_layer_object)\n\n # Add intermediate dense layers.\n _, num_outputs_by_dense_layer = (\n architecture_utils.get_dense_layer_dimensions(\n num_input_units=num_features, num_classes=NUM_CLASSES,\n num_dense_layers=NUM_DENSE_LAYERS)\n )\n\n for k in range(NUM_DENSE_LAYERS - 1):\n current_layer_object = keras.layers.Dense(\n num_outputs_by_dense_layer[k], activation=None, use_bias=True,\n kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=regularizer_object\n )(current_layer_object)\n\n current_layer_object = keras.layers.LeakyReLU(\n alpha=SLOPE_FOR_RELU\n )(current_layer_object)\n\n if DENSE_LAYER_DROPOUT_FRACTION is not None:\n current_layer_object = keras.layers.Dropout(\n rate=DENSE_LAYER_DROPOUT_FRACTION\n )(current_layer_object)\n\n if USE_BATCH_NORMALIZATION:\n current_layer_object = keras.layers.BatchNormalization(\n axis=-1, center=True, scale=True\n )(current_layer_object)\n\n # Add output layer (also dense).\n current_layer_object = keras.layers.Dense(\n NUM_CLASSES, activation=None, use_bias=True,\n kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=regularizer_object\n )(current_layer_object)\n\n current_layer_object = keras.layers.Activation(\n 'softmax'\n )(current_layer_object)\n\n if DENSE_LAYER_DROPOUT_FRACTION is not None and NUM_DENSE_LAYERS == 1:\n current_layer_object = keras.layers.Dropout(\n rate=DENSE_LAYER_DROPOUT_FRACTION\n )(current_layer_object)\n\n # Put the whole thing together and compile.\n cnn_model_object = keras.models.Model(\n inputs=input_layer_object, outputs=current_layer_object)\n cnn_model_object.compile(\n loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adam(),\n metrics=LIST_OF_METRIC_FUNCTIONS)\n\n cnn_model_object.summary()\n return cnn_model_object", "def build_net(self, trainable=True, name=None):\n input_shape = self._env_spec.observation_space.shape\n assert len(input_shape) in [2, 3]\n if len(input_shape) == 2:\n input_shape = (1, ) + input_shape\n\n with tf.variable_scope(name):\n l_in = layers.InputLayer(shape=(None, self._obs_dim), name=\"obs\")\n l_hid = layers.reshape(\n l_in, ([0], ) + input_shape, name=\"reshape_input\")\n\n if self._batch_norm:\n l_hid = layers.batch_norm(l_hid)\n\n for idx, conv_filter, filter_size, stride, pad in zip(\n range(len(self._conv_filters)),\n self._conv_filters,\n self._conv_filter_sizes,\n self._conv_strides,\n self._conv_pads,\n ):\n l_hid = layers.Conv2DLayer(\n l_hid,\n num_filters=conv_filter,\n filter_size=filter_size,\n stride=(stride, stride),\n pad=pad,\n nonlinearity=self._hidden_nonlinearity,\n name=\"conv_hidden_%d\" % idx,\n weight_normalization=self._weight_normalization,\n trainable=trainable,\n )\n if self._pooling:\n l_hid = layers.Pool2DLayer(\n l_hid, pool_size=self._pool_size)\n if self._batch_norm:\n l_hid = layers.batch_norm(l_hid)\n\n l_hid = layers.flatten(l_hid, name=\"conv_flatten\")\n l_action = layers.InputLayer(\n shape=(None, self._action_dim), name=\"actions\")\n\n n_layers = len(self._hidden_sizes) + 1\n if n_layers > 1:\n action_merge_layer = \\\n (self._action_merge_layer % n_layers + n_layers) % n_layers\n else:\n action_merge_layer = 1\n\n for idx, size in enumerate(self._hidden_sizes):\n if self._batch_norm:\n l_hid = batch_norm(l_hid)\n\n if idx == action_merge_layer:\n l_hid = layers.ConcatLayer([l_hid, l_action])\n\n l_hid = layers.DenseLayer(\n l_hid,\n num_units=size,\n nonlinearity=self._hidden_nonlinearity,\n trainable=trainable,\n name=\"hidden_%d\" % (idx + 1))\n\n if action_merge_layer == n_layers:\n l_hid = layers.ConcatLayer([l_hid, l_action])\n\n l_output = layers.DenseLayer(\n l_hid,\n num_units=1,\n nonlinearity=self._output_nonlinearity,\n trainable=trainable,\n name=\"output\")\n\n output_var = layers.get_output(l_output)\n\n f_qval = tensor_utils.compile_function(\n [l_in.input_var, l_action.input_var], output_var)\n output_layer = l_output\n obs_layer = l_in\n action_layer = l_action\n\n return f_qval, output_layer, obs_layer, action_layer", "def __init__(self, embed_size, dropout=0.5, image_model='resnet101', simple=False, pretrained=True):\n super(EncoderCNN, self).__init__()\n resnet = globals()[image_model](pretrained=pretrained)\n modules = list(resnet.children())[:-2] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n \n self.linear = nn.Sequential(nn.Conv2d(resnet.fc.in_features, embed_size, kernel_size=1, padding=0),\n nn.Dropout2d(dropout))\n\n self.simple = simple\n if simple:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))", "def build_model(self):\n cfg = self.cfg\n\n print('Building model')\n self.model = SimpleNet(cfg, cfg.MODEL, 0, **cfg.MODEL.BACKBONE.PARAMS)\n self.model.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.model)))\n self.optim = build_optimizer(self.model, cfg.OPTIM)\n self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)\n self.register_model('model', self.model, self.optim, self.sched)\n\n fdim = self.model.fdim\n self.classifier = nn.Linear(fdim, self.num_classes)\n print('# params: {:,}'.format(count_num_param(self.classifier)))\n self.classifier.to(self.device)\n self.optim_classifier = build_optimizer(self.classifier, cfg.OPTIM)\n self.sched_classifier = build_lr_scheduler(self.optim_classifier, cfg.OPTIM)\n self.register_model('classifier', self.classifier, self.optim_classifier, self.sched_classifier)", "def __init__(self, ):\n super().__init__()\n channels = 3\n\n # Initial convolution block\n out_features = 64\n # encoder\n self.input = nn.Sequential(\n nn.ReflectionPad2d(channels),\n nn.Conv2d(3, out_features, (7, 7)),\n nn.InstanceNorm2d(out_features),\n nn.MaxPool2d(2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(out_features, out_features * 2, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n nn.MaxPool2d(2),\n nn.Conv2d(out_features * 2, out_features * 4, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.in0 = nn.InstanceNorm2d(256)\n self.block0 = blocks()\n self.block1 = blocks()\n self.block2 = blocks()\n self.block3 = blocks()\n self.block4 = blocks()\n self.block5 = blocks()\n self.block6 = blocks()\n self.block7 = blocks()\n\n self.out = nn.Sequential(\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 4, out_features * 2, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 2, out_features, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.ReflectionPad2d(channels),\n nn.Conv2d(out_features, channels, 7),\n nn.Tanh(),\n )", "def get_model(summary=False):\n\timage_input=Input(shape=(220,220,5),name='image_input')\n\tbranch1_conv1=Conv2D(64, kernel_size=(3, 3), border_mode='same', input_shape=(220,220,5), activation='relu')(image_input)\n\tbranch1_conv2=Conv2D(64, kernel_size=(1, 1), border_mode='same', activation='relu')(branch1_conv1)\t\n\tbranch1_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch1_conv1)\n\tbranch2_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch1_pool1)\n\tbranch2_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch2_conv1)\t\n\tbranch2_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch2_conv2)\n\tbranch3_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch2_pool1)\n\tbranch3_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch3_conv1)\t\n\tbranch3_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch3_conv2)\n\tbranch4_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch3_pool1)\n\tbranch4_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch4_conv1)\t\n\tbranch4_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch4_conv2)\n\tbranch5_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch4_pool1)\n\tbranch5_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch5_conv1)\t\n\tbranch5_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch5_conv2)\n\tbranch6_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch5_pool1)\n\tbranch6_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch6_conv1)\t\n\tbranch6_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch6_conv2)\n\tbranch1_flat=Flatten()(branch6_pool1)\n\tdrop=Dropout(.3)(branch1_flat)\n\t# FC layers group\n\tdense1=Dense(512, activation='relu', name='fc1')(drop)\n\tdrop1=Dropout(.3)(dense1)\n\tdense2=Dense(256, activation='relu', name='fc2')(drop1)\n\tdrop3=Dropout(.3)(dense2)\n\tout=Dense(2, activation='softmax', name='fc4')(drop3)\n\tmodel=Model(inputs=image_input,outputs=out)\n\treturn model", "def __init__(self, embed_size, dropout=0.5, image_model='resnet101', simple=False, pretrained=True):\n super(EncoderCNNFeatures, self).__init__()\n resnet = globals()[image_model](pretrained=pretrained)\n\n self.linear = nn.Sequential(nn.Conv2d(resnet.fc.in_features, embed_size, kernel_size=1, padding=0),\n nn.Dropout2d(dropout))\n\n self.simple = simple\n if simple:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))", "def build_unet(input_layer = Input((128,128,3)), start_depth=64, activation='relu', initializer='he_normal'):\n\n # 128 -> 64\n conv1 = Conv2D_BN(input_layer, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n conv1 = Conv2D_BN(conv1, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n pool1 = MaxPooling2D((2, 2))(conv1)\n\n # 64 -> 32\n conv2 = Conv2D_BN(pool1, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n conv2 = Conv2D_BN(conv2, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n pool2 = MaxPooling2D((2, 2))(conv2)\n\n # 32 -> 16\n conv3 = Conv2D_BN(pool2, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n conv3 = Conv2D_BN(conv3, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n pool3 = MaxPooling2D((2, 2))(conv3)\n\n # 16 -> 8\n conv4 = Conv2D_BN(pool3, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n conv4 = Conv2D_BN(conv4, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n pool4 = MaxPooling2D((2, 2))(conv4)\n\n # Middle\n convm=cbam_block(pool4)\n\n # 8 -> 16\n deconv4 = Conv2DTranspose(convm, start_depth * 8, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv4 = concatenate([deconv4, conv4])\n uconv4 = Conv2D_BN(uconv4, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv4 = Conv2D_BN(uconv4, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n\n # 16 -> 32\n deconv3 = Conv2DTranspose(uconv4, start_depth * 4, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv3 = concatenate([deconv3, conv3])\n uconv3 = Conv2D_BN(uconv3, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv3 = Conv2D_BN(uconv3, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n\n # 32 -> 64\n deconv2 = Conv2DTranspose(uconv3, start_depth * 2, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv2 = concatenate([deconv2, conv2])\n uconv2 = Conv2D_BN(uconv2, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv2 = Conv2D_BN(uconv2, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n\n # 64 -> 128\n deconv1 = Conv2DTranspose(uconv2, start_depth * 1, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv1 = concatenate([deconv1, conv1])\n uconv1 = Conv2D_BN(uconv1, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv1 = Conv2D_BN(uconv1, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n\n output_layer = Conv2D(1, (1,1), padding=\"same\", activation=\"sigmoid\")(uconv1)\n\n return output_layer", "def _set_model(self):\n print(\"Setting up model...\")\n # Encoder\n inputs = Input(batch_shape=(None,) + self.input_shape)\n self.inputs=inputs\n # Instantiate encoder layers \n for i in range(len(self.filters)):\n if i==0:\n Q = Conv2D(self.filters[i], (self.KernelDim[i], self.KernelDim[i]), \n strides=(self.strides[i], self.strides[i]),padding='same',activation='relu')(inputs)\n else:\n Q = Conv2D(self.filters[i], (self.KernelDim[i], self.KernelDim[i]), padding='same',\n activation='relu',strides=(self.strides[i], self.strides[i]))(Q) \n \n Q_4 = Flatten()\n Q_5 = Dense(self.hidden_dim, activation='relu')\n Q_6 = Dropout(self.dropout)\n Q_z_mean = Dense(self.latent_dim)\n Q_z_log_var = Dense(self.latent_dim)\n\n # Set up encoder\n flat = Q_4(Q)\n dp = Q_5(flat)\n hidden= Q_6(dp)\n\n # Parameters for continous latent distribution\n z_mean = Q_z_mean(hidden)\n z_log_var = Q_z_log_var(hidden)\n self.encoder =Model(inputs, z_mean)\n\n # Sample from latent distributions\n encoding = Lambda(self._sampling_normal, output_shape=(self.latent_dim,))([z_mean, z_log_var])\n self.encoding = encoding\n # Generator\n # Instantiate generator layers to be able to sample from latent\n # distribution later\n out_shape = (int(np.ceil(self.input_shape[0] / np.prod(self.strides) )), int(np.ceil(self.input_shape[1] / np.prod(self.strides))), self.filters[-1])\n \n G_0 = Dense(self.hidden_dim, activation='relu')\n G_d = Dropout(self.dropout)\n G_1 = Dense(np.prod(out_shape), activation='relu')\n G_2 = Reshape(out_shape)\n G=[]\n for i in range(len(self.filters)):\n if i==0:\n G_ = Conv2DTranspose(self.filters[-1], (self.KernelDim[-1], self.KernelDim[-1]), \n strides=(self.strides[-1], self.strides[-1]),padding='same',activation='relu') \n else:\n G_ = Conv2DTranspose(self.filters[-i-1], (self.KernelDim[-i-1], self.KernelDim[-i-1]), padding='same',\n activation='relu',strides=(self.strides[-i-1], self.strides[-i-1]))\n G.append(G_)\n \n G_5_= BilinearUpsampling(output_size=(self.input_shape[0], self.input_shape[1]))\n G_6 = Conv2D(self.input_shape[2], (2, 2), padding='same',\n strides=(1, 1), activation=self.act, name='generated')\n # Apply generator layers\n x = G_0(encoding)\n x = G_d(x)\n x = G_1(x)\n x = G_2(x)\n \n for i in range(len(G)):\n x = G[i](x)\n \n x = G_5_(x)\n generated = G_6(x)\n self.model =Model(inputs, generated)\n # Set up generator\n inputs_G = Input(batch_shape=(None, self.latent_dim))\n x = G_0(inputs_G)\n x = G_1(x)\n x = G_2(x)\n \n for i in range(len(self.filters)):\n x = G[i](x)\n \n x = G_5_(x)\n generated_G = G_6(x)\n self.generator = Model(inputs_G, generated_G)\n\n # Store latent distribution parameters\n self.z_mean = z_mean\n self.z_log_var = z_log_var\n\n # Compile models\n #self.opt = RMSprop()\n self.model.compile(optimizer=self.opt, loss=self._vae_loss)\n # Loss and optimizer do not matter here as we do not train these models\n self.generator.compile(optimizer=self.opt, loss='mse')\n self.model.summary()\n print(\"Completed model setup.\")", "def build_model(self):\r\n self.images, self.labels = self.dataloader.get_model_inputs()\r\n\r\n model = SimpleModel(self.images, self.labels, output_dim=F.output_dim, scope='source_regressor')\r\n self.out, _ = model.get_model()\r\n self.get_loss()", "def _init_layers(self):\n cls_branch = []\n for _ in range(self.num_reg_fcs):\n cls_branch.append(Linear(self.embed_dims, self.embed_dims))\n cls_branch.append(nn.LayerNorm(self.embed_dims))\n cls_branch.append(nn.ReLU(inplace=True))\n cls_branch.append(Linear(self.embed_dims, self.cls_out_channels))\n fc_cls = nn.Sequential(*cls_branch)\n\n reg_branch = []\n for _ in range(self.num_reg_fcs):\n reg_branch.append(Linear(self.embed_dims, self.embed_dims))\n reg_branch.append(nn.ReLU())\n reg_branch.append(Linear(self.embed_dims, self.code_size))\n reg_branch = nn.Sequential(*reg_branch)\n\n past_traj_reg_branch = []\n for _ in range(self.num_reg_fcs):\n past_traj_reg_branch.append(\n Linear(self.embed_dims, self.embed_dims))\n past_traj_reg_branch.append(nn.ReLU())\n past_traj_reg_branch.append(\n Linear(self.embed_dims, (self.past_steps + self.fut_steps)*2))\n past_traj_reg_branch = nn.Sequential(*past_traj_reg_branch)\n\n def _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n # last reg_branch is used to generate proposal from\n # encode feature map when as_two_stage is True.\n num_pred = (self.transformer.decoder.num_layers + 1) if \\\n self.as_two_stage else self.transformer.decoder.num_layers\n\n if self.with_box_refine:\n self.cls_branches = _get_clones(fc_cls, num_pred)\n self.reg_branches = _get_clones(reg_branch, num_pred)\n self.past_traj_reg_branches = _get_clones(\n past_traj_reg_branch, num_pred)\n else:\n self.cls_branches = nn.ModuleList(\n [fc_cls for _ in range(num_pred)])\n self.reg_branches = nn.ModuleList(\n [reg_branch for _ in range(num_pred)])\n self.past_traj_reg_branches = nn.ModuleList(\n [past_traj_reg_branch for _ in range(num_pred)])\n if not self.as_two_stage:\n self.bev_embedding = nn.Embedding(\n self.bev_h * self.bev_w, self.embed_dims)", "def _build_model(self):\n\n # Build Encoder\n inputs = Input(shape=(self.n_features_,))\n # Input layer\n layer = Dense(self.n_features_, activation=self.hidden_activation)(\n inputs)\n # Hidden layers\n for neurons in self.encoder_neurons:\n layer = Dense(neurons, activation=self.hidden_activation,\n activity_regularizer=l2(self.l2_regularizer))(layer)\n layer = Dropout(self.dropout_rate)(layer)\n # Create mu and sigma of latent variables\n z_mean = Dense(self.latent_dim)(layer)\n z_log = Dense(self.latent_dim)(layer)\n # Use parametrisation sampling\n z = Lambda(self.sampling, output_shape=(self.latent_dim,))(\n [z_mean, z_log])\n # Instantiate encoder\n encoder = Model(inputs, [z_mean, z_log, z])\n if self.verbosity >= 1:\n encoder.summary()\n\n # Build Decoder\n latent_inputs = Input(shape=(self.latent_dim,))\n # Latent input layer\n layer = Dense(self.latent_dim, activation=self.hidden_activation)(\n latent_inputs)\n # Hidden layers\n for neurons in self.decoder_neurons:\n layer = Dense(neurons, activation=self.hidden_activation)(layer)\n layer = Dropout(self.dropout_rate)(layer)\n # Output layer\n outputs = Dense(self.n_features_, activation=self.output_activation)(\n layer)\n # Instatiate decoder\n decoder = Model(latent_inputs, outputs)\n if self.verbosity >= 1:\n decoder.summary()\n # Generate outputs\n outputs = decoder(encoder(inputs)[2])\n\n # Instantiate VAE\n vae = Model(inputs, outputs)\n vae.add_loss(self.vae_loss(inputs, outputs, z_mean, z_log))\n vae.compile(optimizer=self.optimizer)\n if self.verbosity >= 1:\n vae.summary()\n return vae", "def __init__(self):\n #conv1\n n = inp_width*inp_height\n #poczatkowe wagi sieci sa ustalane losowo z rozkladu normalnego. Umieszczane sa one na liscie matryc wag\n self.Weights = [np.random.randn(layers[0][1],inp_channels,layers[0][2],layers[0][2])/np.sqrt(n)]\n out_Size = inp_width - layers[0][2] + 1 #zmienna zawiera rozmiar wyjscia danej warstwy\n #inicjalizacja progow \n self.Biases = [initBias*np.ones( layers[0][1] )]\n #przypisanie parametrow warstwie poolingu\n self.poolParams = [(layers[1][1], layers[1][2])]\n out_Size = out_Size/2 \n #conv 2\n n = out_Size*out_Size*layers[0][1]\n self.Weights.append(np.random.randn(layers[2][1],layers[0][1],layers[2][2],layers[2][2])/np.sqrt(n))\n out_Size = out_Size - layers[2][2]+1\n self.Biases.append(initBias*np.ones(layers[2][1]))\n #pool 2\n self.poolParams.append((layers[3][1],layers[3][2]))\n out_Size = out_Size/2 \n #conv 3\n n = out_Size*out_Size*layers[2][1]\n self.Weights.append(np.random.randn(layers[4][1],layers[2][1],out_Size,out_Size)/np.sqrt(n))\n out_Size = 1\n self.Biases.append(initBias*np.ones(layers[4][1]))\n #fully connected 1\n n = layers[4][1]\n self.Weights.append(np.random.randn(layers[5][1],layers[4][1])/np.sqrt(n))\n self.Biases.append(initBias*np.ones(layers[5][1]))\n #fully connected 2\n n = layers[5][1]\n self.Weights.append(np.random.randn(layers[6][1],layers[5][1])/np.sqrt(n))\n self.Biases.append(initBias*np.ones(layers[6][1]))\n\n self.Weights = np.asarray(self.Weights)\n self.Biases = np.asarray(self.Biases)\n \n delta_W = []\n delta_B = []\n for i in range(5):\n delta_W.append(np.zeros(self.Weights[i].shape))\n delta_B.append(np.zeros(self.Biases[i].shape))\n self.delta_W = np.asarray(delta_W)\n self.delta_B = np.asarray(delta_B)", "def build_modules(self):\n self.backbone = Backbone(\n self.configs['backbone'],\n freeze_backbone=self.configs['freeze_backbone'],\n freeze_batchnorm=True\n )\n\n backbone_channel_sizes = get_backbone_channel_sizes(self.backbone)\n\n self.fpn = FeaturePyramidNetwork(\n backbone_channel_sizes=backbone_channel_sizes,\n min_feature_level=self.configs['min_feature_level'],\n max_feature_level=self.configs['max_feature_level'],\n feature_size=self.configs['pyramid_feature_size']\n )\n\n self.shared_conv_model = SharedConvModel(\n input_feature_size=self.configs['pyramid_feature_size'],\n feature_size=self.configs['shared_conv_feature_size'],\n num_layers=self.configs['shared_conv_num_layers']\n )\n\n if self.configs['shared_conv_num_layers'] > 0:\n shared_conv_output_size = self.configs['shared_conv_feature_size']\n else:\n shared_conv_output_size = self.configs['pyramid_feature_size']\n\n self.ofn = ObjectFinderNetwork(\n input_feature_size=shared_conv_output_size,\n feature_size=self.configs['finder_feature_size'],\n num_layers=self.configs['finder_num_layers']\n )\n\n self.ofn_loss_fn\n\n # self.classification_model = ClassificationModel()\n #\n # self.regression_model = RegressionModel()", "def __call__(self, inputs, training):\n\n self.training = training\n input_shape = inputs.shape\n if self.data_format == 'channels_first':\n img_size = (input_shape[2], input_shape[3])\n else:\n img_size = (input_shape[1], input_shape[2])\n\n with self._model_variable_scope('ssd300_model'):\n if self.data_format == 'channels_first':\n # Convert the inputs from channels_last (NHWC) to channels_first (NCHW).\n # This provides a large performance boost on GPU. See\n # https://www.tensorflow.org/performance/performance_guide#data_formats\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n\n net = super(Model, self).__call__(inputs, training)\n\n with self._model_variable_scope('ssd300_model'):\n\n net = self._atrous_convolution_2d(net, filters=1024,\n kernel_size=3,\n atrous_rate=6, name='fc6')\n\n net = self._conv2d(net, filters=1024, kernel_size=1,\n padding='same', name='fc7')\n\n net = self._conv2d(net, filters=256, kernel_size=1,\n padding='same', name='conv6_1')\n\n net = self._conv2d(net, filters=512, kernel_size=3,\n strides=2,\n padding='same', name='conv6_2')\n\n net = self._conv2d(net, filters=128, kernel_size=1,\n padding='same', name='conv7_1')\n\n net = self._conv2d(fixed_padding(net, 3, self.data_format),\n filters=256, kernel_size=3,\n strides=2,\n padding='valid', name='conv7_2')\n\n net = self._conv2d(net, filters=128, kernel_size=1,\n padding='same', name='conv8_1')\n\n net = self._conv2d(net, filters=256, kernel_size=3,\n strides=2,\n padding='same', name='conv8_2')\n\n if self.data_format == 'channels_first':\n net = tf.reduce_mean(net, [2, 3])\n else:\n net = tf.reduce_mean(net, [1, 2])\n self.layers['pool6'] = net\n\n # Prediction from conv4_3\n conv4_3_norm = self._normalize(net, 20, name='conv4_3_norm')\n num_priors = 3\n x = self._conv2d(conv4_3_norm, filters=num_priors * 4, kernel_size=3,\n padding='same', name='conv4_3_norm_mbox_loc')\n self.layers['conv4_3_norm_mbox_loc_flat'] = tf.layers.flatten(x, name='conv4_3_norm_mbox_loc_flat')\n\n x = self._conv2d(conv4_3_norm, filters=num_priors * self.num_classes,\n kernel_size=3, padding='same',\n name='conv4_3_norm_mbox_conf')\n self.layers['conv4_3_norm_mbox_conf_flat'] = tf.layers.flatten(x, name='conv4_3_norm_mbox_conf_flat')\n\n prior_box = PriorBox(img_size, min_size=30.0, aspect_ratios=[2],\n variances=[0.1, 0.1, 0.2, 0.2],\n name='conv4_3_norm_mbox_priorbox')\n net['conv4_3_norm_mbox_priorbox'] = prior_box(conv4_3_norm)\n\n return net", "def __init__(self):\n # def __init__(self, embed_size, input_channels):\n super(EncoderCNN, self).__init__()\n self.off_model = OffsetCNN()\n self.sig_model = SignificanceCNN()\n self.sigmoid = nn.Sigmoid()\n self.W = nn.Conv3d(1, 1, (5, 1, 1))", "def _set_model(self):\n print(\"Setting up model...\")\n # Encoder\n inputs = Input(batch_shape=(None,) + self.input_shape)\n self.inputs=inputs\n # Instantiate encoder layers \n for i in range(len(self.filters)):\n if i==0:\n Q = Conv2D(self.filters[i], (self.KernelDim[i], self.KernelDim[i]), \n strides=(self.strides[i], self.strides[i]),padding='same')(inputs)\n Q = BatchNormalization()(Q)\n Q = Activation('relu')(Q)\n else:\n Q = Conv2D(self.filters[i], (self.KernelDim[i], self.KernelDim[i]), padding='same',\n strides=(self.strides[i], self.strides[i]))(Q) \n Q = BatchNormalization()(Q)\n Q = Activation('relu')(Q) \n \n Q_4 = Flatten()\n Q_5 = Dense(self.hidden_dim)\n Q_50= BatchNormalization()\n Q_51=Activation('relu')\n Q_6 = Dropout(self.dropout)\n Q_z_mean = Dense(self.latent_dim)\n Q_z_log_var = Dense(self.latent_dim)\n\n # Set up encoder\n flat = Q_4(Q)\n db = Q_5(flat)\n da = Q_50(db)\n dp = Q_51(da)\n hidden= Q_6(dp)\n\n # Parameters for continous latent distribution\n z_mean = Q_z_mean(hidden)\n z_log_var = Q_z_log_var(hidden)\n self.encoder =Model(inputs, z_mean)\n\n # Sample from latent distributions\n encoding = Lambda(self._sampling_normal, output_shape=(self.latent_dim,))([z_mean, z_log_var])\n self.encoding = encoding\n # Generator\n # Instantiate generator layers to be able to sample from latent\n # distribution later\n out_shape = (int(np.ceil(self.input_shape[0] / np.prod(self.strides) )), int(np.ceil(self.input_shape[1] / np.prod(self.strides))), self.filters[-1])\n \n G_0 = Dense(self.hidden_dim)\n G_00= BatchNormalization()\n G_01= Activation('relu')\n G_d = Dropout(self.dropout)\n G_1 = Dense(np.prod(out_shape))\n G_10= BatchNormalization()\n G_11= Activation('relu')\n G_2 = Reshape(out_shape)\n G=[]\n for i in range(len(self.filters)):\n if i==0:\n G_ = Conv2DTranspose(self.filters[-1], (self.KernelDim[-1], self.KernelDim[-1]), \n strides=(self.strides[-1], self.strides[-1]),padding='same')\n G.append(G_)\n G_ = BatchNormalization()\n G.append(G_)\n G_ = Activation('relu')\n G.append(G_) \n else:\n G_ = Conv2DTranspose(self.filters[-i-1], (self.KernelDim[-i-1], self.KernelDim[-i-1]), padding='same',\n strides=(self.strides[-i-1], self.strides[-i-1]))\n G.append(G_)\n G_ = BatchNormalization()\n G.append(G_)\n G_ = Activation('relu')\n G.append(G_) \n \n G_5_= BilinearUpsampling(output_size=(self.input_shape[0], self.input_shape[1]))\n G_6 = Conv2D(self.input_shape[2], (2, 2), padding='same',\n strides=(1, 1), activation=self.act, name='generated')\n # Apply generator layers\n x = G_0(encoding)\n x = G_00(x)\n x = G_01(x)\n x = G_d(x)\n x = G_1(x)\n x = G_10(x) \n x = G_11(x)\n x = G_2(x)\n \n for i in range(len(G)):\n x = G[i](x)\n \n x = G_5_(x)\n generated = G_6(x)\n self.model =Model(inputs, generated)\n # Set up generator\n inputs_G = Input(batch_shape=(None, self.latent_dim))\n x = G_0(inputs_G)\n x = G_00(x)\n x = G_01(x)\n x = G_d(x) \n x = G_1(x)\n x = G_10(x) \n x = G_11(x) \n x = G_2(x)\n \n for i in range(len(G)):\n x = G[i](x)\n \n x = G_5_(x)\n generated_G = G_6(x)\n self.generator = Model(inputs_G, generated_G)\n\n # Store latent distribution parameters\n self.z_mean = z_mean\n self.z_log_var = z_log_var\n\n # Compile models\n #self.opt = RMSprop()\n self.model.compile(optimizer=self.opt, loss=self._vae_loss)\n # Loss and optimizer do not matter here as we do not train these models\n self.generator.compile(optimizer=self.opt, loss='mse')\n self.model.summary()\n print(\"Completed model setup.\")", "def build_model(self):\n self.g12 = G12(conv_dim=self.g_conv_dim)\n init_weights(self.g12, init_type='normal')\n self.g21 = G21(conv_dim=self.g_conv_dim)\n init_weights(self.g21, init_type='normal')\n self.d1 = D1(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d1, init_type='normal')\n self.d2 = D2(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d2, init_type='normal')\n self.dreid = DSiamese(class_count=self.num_classes_market)\n\n g_params = list(self.g12.parameters()) + list(self.g21.parameters())\n d_params = list(self.d1.parameters()) + list(self.d2.parameters())\n dr_params = list(self.dreid.parameters())\n\n self.g_optimizer = optim.Adam(g_params, self.lr, [self.beta1, self.beta2])\n self.d_optimizer = optim.Adam(d_params, self.lr, [self.beta1, self.beta2])\n self.dr_optimizer = optim.Adam(dr_params, self.lr, [self.beta1, self.beta2])\n\n if torch.cuda.is_available():\n self.g12.cuda()\n self.g21.cuda()\n self.d1.cuda()\n self.d2.cuda()\n self.dreid.cuda()", "def __init__(self, img_size, latent_dim=10):\n super(EncoderBurgess, self).__init__()\n\n # Layer parameters\n hid_channels = 32\n kernel_size = 4\n hidden_dim = 256\n self.latent_dim = latent_dim\n self.img_size = img_size\n # Shape required to start transpose convs\n self.reshape = (hid_channels, kernel_size, kernel_size)\n n_chan = self.img_size[0]\n\n # Convolutional layers\n cnn_kwargs = dict(stride=2, padding=1)\n self.conv1 = nn.Conv2d(n_chan, hid_channels, kernel_size, **cnn_kwargs)\n self.conv2 = nn.Conv2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)\n self.conv3 = nn.Conv2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)\n\n # If input image is 64x64 do fourth convolution\n if self.img_size[1] == self.img_size[2] == 64:\n self.conv_64 = nn.Conv2d(\n hid_channels, hid_channels, kernel_size, **cnn_kwargs\n )\n\n # Fully connected layers\n self.lin1 = nn.Linear(np.product(self.reshape), hidden_dim)\n self.lin2 = nn.Linear(hidden_dim, hidden_dim)\n\n # Fully connected layers for mean and variance\n self.mu_logvar_gen = nn.Linear(hidden_dim, self.latent_dim * 2)" ]
[ "0.6300217", "0.6081184", "0.6023422", "0.60000265", "0.5995302", "0.59686184", "0.59679914", "0.5925756", "0.59211046", "0.5914937", "0.59135234", "0.5860571", "0.5852755", "0.5852108", "0.581896", "0.5807225", "0.5793126", "0.57791585", "0.577166", "0.5771581", "0.57675666", "0.5765284", "0.57633984", "0.5760104", "0.575881", "0.5758575", "0.57572913", "0.57562757", "0.57526946", "0.5751958", "0.57449114", "0.5733674", "0.5726653", "0.57184625", "0.5718429", "0.57143205", "0.5701118", "0.5698859", "0.56942636", "0.5693443", "0.5691169", "0.5689448", "0.56889963", "0.56840307", "0.56791407", "0.56785107", "0.567577", "0.56756043", "0.5670468", "0.56582844", "0.5657219", "0.5653586", "0.56527805", "0.5648827", "0.5647372", "0.5646717", "0.56453073", "0.56364864", "0.56359947", "0.5628579", "0.5627726", "0.5623856", "0.5620005", "0.56175107", "0.5614111", "0.5609878", "0.5598723", "0.5596347", "0.55948144", "0.5585807", "0.55799955", "0.55775285", "0.5576935", "0.55752224", "0.557154", "0.55704165", "0.55704165", "0.5563614", "0.5560347", "0.5559854", "0.55564475", "0.55516356", "0.5536738", "0.55360687", "0.5535779", "0.5529769", "0.55287504", "0.5528348", "0.55236375", "0.55201125", "0.5519112", "0.55190873", "0.55159116", "0.5513667", "0.5511805", "0.54988277", "0.54910696", "0.5480186", "0.54780847", "0.547626" ]
0.66995215
0
Construct one block of the conv INN
def _block(args: ClusterArgs, input_dim: int) -> layers.Bijector: _chain: List[layers.Bijector] = [] if args.inn_idf: _chain += [ layers.IntegerDiscreteFlow(input_dim, hidden_channels=args.inn_coupling_channels) ] _chain += [layers.RandomPermutation(input_dim)] else: if args.inn_batch_norm: _chain += [layers.MovingBatchNorm2d(input_dim, bn_lag=args.inn_bn_lag)] if args.inn_glow: _chain += [layers.Invertible1x1Conv(input_dim, use_lr_decomp=True)] else: _chain += [layers.RandomPermutation(input_dim)] if args.inn_scaling == "none": _chain += [ layers.AdditiveCouplingLayer( input_dim, hidden_channels=args.inn_coupling_channels, num_blocks=args.inn_coupling_depth, pcnt_to_transform=0.25, ) ] elif args.inn_scaling == "sigmoid0.5": _chain += [ layers.AffineCouplingLayer( input_dim, num_blocks=args.inn_coupling_depth, hidden_channels=args.inn_coupling_channels, ) ] else: raise ValueError(f"Scaling {args.inn_scaling} is not supported") # if args.inn_jit: # block = jit.script(block) return layers.BijectorChain(_chain)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_unet(input_layer = Input((128,128,3)), start_depth=64, activation='relu', initializer='he_normal'):\n\n # 128 -> 64\n conv1 = Conv2D_BN(input_layer, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n conv1 = Conv2D_BN(conv1, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n pool1 = MaxPooling2D((2, 2))(conv1)\n\n # 64 -> 32\n conv2 = Conv2D_BN(pool1, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n conv2 = Conv2D_BN(conv2, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n pool2 = MaxPooling2D((2, 2))(conv2)\n\n # 32 -> 16\n conv3 = Conv2D_BN(pool2, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n conv3 = Conv2D_BN(conv3, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n pool3 = MaxPooling2D((2, 2))(conv3)\n\n # 16 -> 8\n conv4 = Conv2D_BN(pool3, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n conv4 = Conv2D_BN(conv4, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n pool4 = MaxPooling2D((2, 2))(conv4)\n\n # Middle\n convm=cbam_block(pool4)\n\n # 8 -> 16\n deconv4 = Conv2DTranspose(convm, start_depth * 8, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv4 = concatenate([deconv4, conv4])\n uconv4 = Conv2D_BN(uconv4, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv4 = Conv2D_BN(uconv4, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n\n # 16 -> 32\n deconv3 = Conv2DTranspose(uconv4, start_depth * 4, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv3 = concatenate([deconv3, conv3])\n uconv3 = Conv2D_BN(uconv3, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv3 = Conv2D_BN(uconv3, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n\n # 32 -> 64\n deconv2 = Conv2DTranspose(uconv3, start_depth * 2, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv2 = concatenate([deconv2, conv2])\n uconv2 = Conv2D_BN(uconv2, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv2 = Conv2D_BN(uconv2, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n\n # 64 -> 128\n deconv1 = Conv2DTranspose(uconv2, start_depth * 1, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv1 = concatenate([deconv1, conv1])\n uconv1 = Conv2D_BN(uconv1, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv1 = Conv2D_BN(uconv1, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n\n output_layer = Conv2D(1, (1,1), padding=\"same\", activation=\"sigmoid\")(uconv1)\n\n return output_layer", "def conv_sequence(layer_in, nfilters, strides, block_fun,\n name_prefix='', pad='same', weights_init='variance_scaling',\n weight_decay=0., activation='relu', filter_size=3):\n net = {}\n curr = layer_in\n\n for i, (nf, s) in enumerate(zip(nfilters, strides)):\n name = '{}conv{}'.format(name_prefix, i+1)\n net[name] = block_fun(curr, nf, filter_size, name,\n padding=pad,\n strides=s,\n weights_init=weights_init,\n weight_decay=weight_decay,\n activation=activation)\n curr = net[name]\n\n return net, curr", "def layer_construction(self, in_channel, out_channel, stride, num_blocks):\n layer = [ResBlock(in_channel,out_channel,stride)]\n for i in range(0, num_blocks-1):\n layer.append(ResBlock(out_channel * 4, out_channel))\n\n return nn.Sequential(*layer)", "def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):\r\n super(ResnetBlock, self).__init__()\r\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)", "def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]\n\n return nn.Sequential(*conv_block)", "def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]\n\n return nn.Sequential(*conv_block)", "def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n super(ResnetBlock, self).__init__()\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)", "def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n super(ResnetBlock, self).__init__()\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)", "def build_simple_block(self, incoming_layer, names,\n num_filters, filter_size, stride, pad,\n use_bias=False, nonlin=rectify):\n net = []\n net.append((\n names[0],\n ConvLayer(incoming_layer, num_filters, filter_size, pad, stride,\n flip_filters=False, nonlinearity=None) if use_bias\n else ConvLayer(incoming_layer, num_filters, filter_size, stride, pad, b=None,\n flip_filters=False, nonlinearity=None)\n ))\n \n net.append((\n names[1],\n BatchNormLayer(net[-1][1])\n ))\n if nonlin is not None:\n net.append((\n names[2],\n NonlinearityLayer(net[-1][1], nonlinearity=nonlin)\n ))\n \n return dict(net), net[-1][0]", "def __init__(self, indim, outdim, ksize=3, stride=1, activation=nn.ReLU):\n\n # Run initialization for super class\n super(ConvBlock, self).__init__()\n\n # Check ksize, stride requirements\n assert (ksize % 2) == 1\n assert stride == 1\n assert indim == outdim\n\n # Store proper activation function depending on configuration\n self.activ = activation\n\n # Compute padding according to `ksize`. Make sure\n # that this will not cause image width and height to change.\n padding = ksize // 2\n\n # We will follow the architecture in slide 76 of lecture 21, but with\n # our `_conv` function as our conv ``block''. We'll also use\n # nn.Sequential() and its `add_module' function. Note that the 64 and\n # 256 in that slide are just examples, and you should instead use indim\n # and outdim.\n #\n # Also note that we are creating these layers with support for\n # different `ksize`, `stride`, `padding`, unlike previous assignment.\n self.layers = nn.Sequential()\n self.layers.add_module(\"conv_1\", self._conv(indim, indim, 1, 1, 0))\n self.layers.add_module(\"conv_2\", self._conv(\n indim, indim, ksize, 1, padding))\n self.layers.add_module(\"conv_3\", self._conv(indim, outdim, 1, 1, 0))", "def conv_block(index,\n in_channels,\n out_channels=N_FILTERS,\n padding=0,\n pooling=True):\n if pooling:\n conv = nn.Sequential(\n OrderedDict([\n ('conv'+str(index), nn.Conv2d(in_channels, out_channels, \\\n K_SIZE, padding=padding)),\n ('bn'+str(index), nn.BatchNorm2d(out_channels, momentum=1, \\\n affine=True)),\n ('relu'+str(index), nn.ReLU(inplace=True)),\n ('pool'+str(index), nn.MaxPool2d(MP_SIZE))\n ]))\n else:\n conv = nn.Sequential(\n OrderedDict([\n ('conv'+str(index), nn.Conv2d(in_channels, out_channels, \\\n K_SIZE, padding=padding)),\n ('bn'+str(index), nn.BatchNorm2d(out_channels, momentum=1, \\\n affine=True)),\n ('relu'+str(index), nn.ReLU(inplace=True))\n ]))\n return conv", "def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\r\n conv_block = []\r\n p = 0\r\n if padding_type == 'reflect':\r\n conv_block += [nn.ReflectionPad2d(1)]\r\n elif padding_type == 'replicate':\r\n conv_block += [nn.ReplicationPad2d(1)]\r\n elif padding_type == 'zero':\r\n p = 1\r\n else:\r\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\r\n\r\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\r\n if use_dropout:\r\n conv_block += [nn.Dropout(0.5)]\r\n\r\n p = 0\r\n if padding_type == 'reflect':\r\n conv_block += [nn.ReflectionPad2d(1)]\r\n elif padding_type == 'replicate':\r\n conv_block += [nn.ReplicationPad2d(1)]\r\n elif padding_type == 'zero':\r\n p = 1\r\n else:\r\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\r\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]\r\n\r\n return nn.Sequential(*conv_block)", "def __init__(self, nIn, nOut, type: str, bottleneck,\n bnWidth):\n super(ConvBN, self).__init__()\n layer = []\n nInner = nIn\n if bottleneck is True:\n nInner = min(nInner, bnWidth * nOut)\n layer.append(nn.Conv2d(\n nIn, nInner, kernel_size=1, stride=1, padding=0, bias=False))\n layer.append(nn.BatchNorm2d(nInner))\n layer.append(nn.ReLU(True))\n\n if type == 'normal':\n layer.append(nn.Conv2d(nInner, nOut, kernel_size=3,\n stride=1, padding=1, bias=False))\n elif type == 'down':\n layer.append(nn.Conv2d(nInner, nOut, kernel_size=3,\n stride=2, padding=1, bias=False))\n else:\n raise ValueError\n\n layer.append(nn.BatchNorm2d(nOut))\n layer.append(nn.ReLU(True))\n\n self.net = nn.Sequential(*layer)", "def __init__(self, nIn, nOut, type: str, bottleneck,\n bnWidth):\n super(ConvBN, self).__init__()\n layer = []\n nInner = nIn\n if bottleneck is True:\n nInner = min(nInner, bnWidth * nOut)\n layer.append(nn.Conv2d(\n nIn, nInner, kernel_size=1, stride=1, padding=0, bias=False))\n layer.append(nn.BatchNorm2d(nInner))\n layer.append(nn.ReLU(True))\n\n if type == 'normal':\n layer.append(nn.Conv2d(nInner, nOut, kernel_size=3,\n stride=1, padding=1, bias=False))\n elif type == 'down':\n layer.append(nn.Conv2d(nInner, nOut, kernel_size=3,\n stride=2, padding=1, bias=False))\n else:\n raise ValueError\n\n layer.append(nn.BatchNorm2d(nOut))\n layer.append(nn.ReLU(True))\n\n self.net = nn.Sequential(*layer)", "def _block(self, filters, inp):\r\n layer_1 = BatchNormalization()(inp)\r\n act_1 = Activation('relu')(layer_1)\r\n conv_1 = Conv2D(filters, (3, 3),\r\n padding='same',\r\n kernel_initializer=self.initializer)(act_1)\r\n layer_2 = BatchNormalization()(conv_1)\r\n act_2 = Activation('relu')(layer_2)\r\n conv_2 = Conv2D(filters, (3, 3),\r\n padding='same',\r\n kernel_initializer=self.initializer)(act_2)\r\n return (conv_2)", "def _building_block_v1(inputs, filters, training, projection_shortcut, strides,\n data_format):\n shortcut = inputs\n\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n shortcut = batch_norm(inputs=shortcut, training=training,\n data_format=data_format)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=strides,\n data_format=data_format)\n inputs = batch_norm(inputs, training, data_format)\n inputs = tf.nn.relu(inputs)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=1,\n data_format=data_format)\n inputs = batch_norm(inputs, training, data_format)\n inputs += shortcut\n inputs = tf.nn.relu(inputs)\n\n return inputs", "def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias, last=False):\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n if last:\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias)]\n else:\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]\n\n return nn.Sequential(*conv_block)", "def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias, last=False):\n super(ResnetBlock, self).__init__()\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias, last)", "def __init__(self, momentum: float = .5):\n super(VanillaEncoder, self).__init__()\n self.conv1 = PointNetConv2Layer(64, momentum)\n self.conv2 = PointNetConv2Layer(64, momentum)\n self.conv3 = PointNetConv2Layer(64, momentum)\n self.conv4 = PointNetConv2Layer(128, momentum)\n self.conv5 = PointNetConv2Layer(1024, momentum)", "def _contracting_block(self, in_channels, out_channels, kernel_size=3):\n block = nn.Sequential(\n nn.Conv2d(kernel_size=kernel_size, in_channels=in_channels, out_channels=out_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels),\n nn.Conv2d(kernel_size=kernel_size, in_channels=out_channels, out_channels=out_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels),\n )\n return block", "def _contracting_block(self, in_channels, out_channels, kernel_size=3):\n block = nn.Sequential(\n nn.Conv2d(kernel_size=kernel_size, in_channels=in_channels, out_channels=out_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels),\n nn.Conv2d(kernel_size=kernel_size, in_channels=out_channels, out_channels=out_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels),\n )\n return block", "def _construct_block(self, block_info):\n layer_name = block_info[0]\n if layer_name=='Conv2d':\n in_channels, out_channels, kernel_size = block_info[1:]\n return nn.Conv2d(in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size)\n elif layer_name=='ReLU':\n return nn.ReLU(inplace=True)\n elif layer_name=='MaxPool2d':\n kernel_size, stride = block_info[1:]\n return nn.MaxPool2d(kernel_size=kernel_size,\n stride=stride)\n elif layer_name=='BatchNorm2d':\n num_features = block_info[1]\n return nn.BatchNorm2d(num_features=num_features)\n elif layer_name=='Linear':\n in_features, out_features = block_info[1:]\n return nn.Linear(in_features=in_features,\n out_features=out_features)\n else:\n raise Exception(\"_construct_block cannot construct block\")", "def convbn_block(self, inp, conv, scale, offset, buffermean, buffervar, training, activation=tf.nn.relu, max_pool_pad='VALID'):\n stride, no_stride = [1, 2, 2, 1], [1, 1, 1, 1]\n inp = tf.nn.conv2d(inp, conv, no_stride, 'SAME')\n inp, buffermean, buffervar = self.batch_norm(inp, buffermean, buffervar, scale, offset, training)\n inp = activation(inp)\n inp = tf.nn.max_pool(inp, stride, stride, max_pool_pad)\n return inp, buffermean, buffervar", "def __init__(self,\n input_dim: int,\n in_channels: int,\n output_dim: Optional[int] = None,\n out_channels: Optional[int] = None,\n kernel_size: int = 3,\n dilation: int = 1,\n stride: int = 1,\n ):\n super().__init__()\n\n # Assert same shape\n self.input_dim = input_dim\n self.output_dim = input_dim if output_dim is None else output_dim\n\n self.in_channels = in_channels\n self.out_channels = out_channels if out_channels is None else out_channels\n\n # Determine the padding required for keeping the same sequence length\n assert dilation >= 1 and stride >= 1, \"Dilation and stride must be >= 1.\"\n self.dilation, self.stride = dilation, stride\n self.kernel_size = kernel_size\n\n padding = self.determine_padding(self.input_dim, self.output_dim)\n\n self.conv = nn.Conv1d(\n in_channels,\n out_channels,\n self.kernel_size,\n padding=padding)", "def __init__(self,\r\n in_channels_1, in_channels_2, out_channels,\r\n kernel_size_1, kernel_size_2,\r\n stride_1, up_stride_2,\r\n padding_1, up_padding_2, output_padding=0,\r\n activation_in='relu', activation_out='lrelu',\r\n norm_in='bn', norm_out='none'):\r\n\r\n super(RefinementBlock, self).__init__()\r\n\r\n self.conv_1 = conv_block(\r\n in_channels=in_channels_1,\r\n out_channels=out_channels,\r\n kernel_size=kernel_size_1,\r\n stride=stride_1,\r\n padding=padding_1,\r\n norm='none',\r\n activation=activation_in\r\n )\r\n\r\n self.upconv_2 = upconv_block(\r\n in_channels=in_channels_2,\r\n out_channels=out_channels,\r\n kernel_size=kernel_size_2,\r\n stride=up_stride_2,\r\n padding=up_padding_2,\r\n output_padding=output_padding,\r\n norm=norm_in,\r\n activation='none'\r\n )\r\n\r\n self.conv_3 = conv_block(\r\n in_channels=out_channels,\r\n out_channels=out_channels,\r\n kernel_size=3,\r\n stride=1,\r\n padding=1,\r\n norm='none',\r\n activation='none'\r\n )\r\n\r\n self.conv_4 = conv_block(\r\n in_channels=out_channels,\r\n out_channels=out_channels,\r\n kernel_size=3,\r\n stride=1,\r\n padding=1,\r\n norm='none',\r\n activation='none'\r\n )\r\n\r\n self.out_act = _activation(act_type=activation_out)\r\n self.out_norm = _norm(norm_type=norm_out, channels=out_channels)", "def basic_block(x, num_features, cfg, name):\n x = Conv1D(num_features, kernel_size=3, padding='same', use_bias=True,\n kernel_regularizer=l2(cfg.weight_decay), kernel_initializer=taejun_uniform(), name=f'{name}_conv')(x)\n x = BatchNormalization(name=f'{name}_norm')(x)\n x = Activation('relu', name=f'{name}_relu')(x)\n x = MaxPool1D(pool_size=3, name=f'{name}_pool')(x)\n return x", "def _make_layer(self, out_channels, num_blocks, stride):\n\n # we have num_block blocks per layer, the first block\n # could be 1 or 2, other blocks would always be 1\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n type(self).current_block += 1\n # use ParsevalBasicBlock for residual block that needs retraining\n # 9 is the total block of resnet18\n if type(self).current_block + self._k > 9:\n block = ParsevalBasicBlock\n else:\n block = BasicBlock\n layers.append(block(self.in_channels, out_channels, stride))\n self.in_channels = out_channels * block.expansion\n\n return nn.Sequential(*layers)", "def __init__(self,\r\n in_channels_r, out_channels_r, in_channels_u, out_channels,\r\n kernel_size_in, kernel_size_out,\r\n up_stride_in, stride_out,\r\n up_padding_in, padding_out, output_padding=0,\r\n activation_in='lrelu', activation_out='lrelu',\r\n norm_in='bn', norm_out='none'):\r\n super(RecovecyBlock, self).__init__()\r\n\r\n self.in_upconv = upconv_block(\r\n in_channels=in_channels_r,\r\n out_channels=out_channels_r,\r\n kernel_size=kernel_size_in,\r\n stride=up_stride_in,\r\n padding=up_padding_in,\r\n output_padding=output_padding,\r\n norm=norm_in,\r\n activation=activation_in\r\n )\r\n\r\n self.out_conv = conv_block(\r\n in_channels=out_channels_r + in_channels_u,\r\n out_channels=out_channels,\r\n kernel_size=kernel_size_out,\r\n stride=stride_out,\r\n padding=padding_out,\r\n norm=norm_out,\r\n activation=activation_out\r\n )", "def conv_block(x_in, nf, strides=1):\n ndims = len(x_in.get_shape()) - 2\n assert ndims in [1, 2, 3], \"ndims should be one of 1, 2, or 3. found: %d\" % ndims\n\n Conv = getattr(KL, 'Conv%dD' % ndims)\n x_out = Conv(nf, kernel_size=3, padding='same',\n kernel_initializer='he_normal', strides=strides)(x_in)\n x_out = LeakyReLU(0.2)(x_out)\n return x_out", "def __init__(self, rng, input, n_in = 0, n_out = 0, \n halfWinSize = 0, activation = T.nnet.relu, mask = None):\n self.input = input\n self.n_in = n_in\n self.n_out = n_out\n\tself.halfWinSize = halfWinSize\n\n windowSize = 2*halfWinSize + 1\n self.filter_size = windowSize\n\n # reshape input to shape (batchSize, n_in, nRows=1, nCols=seqLen) \n in4conv2D = input.dimshuffle(0, 1, 'x', 2)\n\n # initialize the filter\n w_shp = (n_out, n_in, 1, windowSize)\n\tif activation == T.nnet.relu:\n W_values = np.asarray(\n rng.normal(scale = np.sqrt(2. / (n_in*windowSize + n_out)),\n size = w_shp), \n dtype = theano.config.floatX )\n\telse:\n W_values = np.asarray(\n rng.uniform(low = - np.sqrt(6. / (n_in*windowSize + n_out)), \n high = np.sqrt(6. / (n_in*windowSize + n_out)), \n size = w_shp),\n dtype=theano.config.floatX\n )\n if activation == theano.tensor.nnet.sigmoid:\n \tW_values *= 4\n\n self.W = theano.shared(value=W_values, name='ResConv1d_W', borrow=True)\n\n b_shp = (n_out,)\n self.b = theano.shared(\n np.asarray(rng.uniform(low = -.0, high = .0, size = b_shp), \n dtype=input.dtype), \n name ='ResConv1d_b', \n borrow=True)\n\n # conv_out and conv_out_bias have shape (batch_size, n_out, 1, nCols)\n conv_out = T.nnet.conv2d(in4conv2D, self.W, \n filter_shape=w_shp, border_mode='half')\n if activation is not None:\n conv_out_bias = activation(conv_out + \n self.b.dimshuffle('x', 0, 'x', 'x'))\n else:\n conv_out_bias = (conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))\n\n\t## out2 has shape (batchSize, n_out, nCols)\n out2 = conv_out_bias.dimshuffle(0, 1, 3, 2)[:, :, :, 0]\n\n if mask is not None:\n ## since we did zero padding at left side of the input tensor\n ## we need to reset these positions to 0 again after convolution \n ## to avoid introducing noise\n ## mask has shape (batchSize, #positions_to_be_masked)\n\n ##take the subtensor of out2 that needs modification\n out2_sub = out2[:, :, :mask.shape[1] ]\n mask_new = mask.dimshuffle(0, 'x', 1)\n self.output = T.set_subtensor(out2_sub, T.mul(out2_sub, mask_new))\n else:\n self.output = out2\n\n\t##self.output has shape (batchSize, n_out, nCols)\n\n # parameters of the model\n self.params=[self.W, self.b]\n\n self.paramL1 = abs(self.W).sum() + abs(self.b).sum()\n self.paramL2 = (self.W**2).sum() + (self.b**2).sum()", "def _make_conv_level(in_channels, out_channels, num_convs, norm_func,\n stride=1, dilation=1):\n layers = []\n for i in range(num_convs):\n layers.extend([\n nn.Conv2D(in_channels, out_channels, kernel_size=3,\n stride=stride if i == 0 else 1,\n padding=dilation, bias_attr=False, dilation=dilation),\n norm_func(out_channels),\n nn.ReLU()])\n\n in_channels = out_channels\n\n return nn.Sequential(*layers)", "def fun_n_to_one_big(self, block_index, block_dim, nc1_size):\n ub_output_tmp = self.tik_instance.Tensor(\n \"float32\", (4, self.c_block_size), name=\"ub_output_tmp\",\n scope=tik.scope_ubuf)\n ub_output = self.tik_instance.Tensor(\n \"float32\", (1, self.c_block_size), name=\"ub_output\",\n scope=tik.scope_ubuf)\n ub_input = self.tik_instance.Tensor(\n \"float32\", (240*4, self.c_block_size), name=\"ub_input\",\n scope=tik.scope_ubuf)\n input_num = _ceil_div(self.in_size_h*self.in_size_w*16, 240*64)\n if input_num > 1:\n thread_num = 2\n else:\n thread_num = 1\n\n nc1 = self.batch_size*self.c1_size\n with self.tik_instance.if_scope(block_index != block_dim - 1):\n with self.tik_instance.for_range(0, nc1_size) as nc1_index:\n self.tik_instance.vector_dup(MASK, ub_output_tmp, 0.0, 1, 1, 8)\n self.tik_instance.vector_dup(16, ub_output, 0.0, 1, 1, 8)\n with self.tik_instance.for_range(\n 0, input_num, thread_num=thread_num) as input_index:\n with self.tik_instance.if_scope(\n input_index != input_num - 1):\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, 8*240, 0, 0)\n self.tik_instance.vadd(MASK, ub_output_tmp[0],\n ub_input[0], ub_output_tmp[0],\n 240, 1, 1, 1, 0, 8, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, (self.in_size_h*self.in_size_w -\n input_index*4*240)*2, 0, 0)\n with self.tik_instance.for_range(\n 0, self.in_size_h*self.in_size_w -\n input_index*4*240) as tmp_index:\n self.tik_instance.vadd(16, ub_output[0],\n ub_input[tmp_index*16],\n ub_output[0], 1, 1,\n 1, 1, 0, 2, 0)\n self.tik_instance.vadd(16, ub_output[0],\n ub_output_tmp[0],\n ub_output[0],\n 4, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size + nc1_index)*16],\n ub_output[0], 0, 1, 2, 0, 0)\n with self.tik_instance.else_scope():\n with self.tik_instance.for_range(0, nc1 - (block_dim - 1)*nc1_size)\\\n as nc1_index:\n self.tik_instance.vector_dup(MASK, ub_output_tmp, 0.0, 1, 1, 8)\n self.tik_instance.vector_dup(16, ub_output, 0.0, 1, 1, 8)\n with self.tik_instance.for_range(\n 0, input_num, thread_num=thread_num) as input_index:\n with self.tik_instance.if_scope(\n input_index != input_num - 1):\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, 8*240, 0, 0)\n self.tik_instance.vadd(MASK, ub_output_tmp[0],\n ub_input[0], ub_output_tmp[0],\n 240, 1, 1, 1, 0, 8, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, (self.in_size_h*self.in_size_w -\n input_index*4*240)*2, 0, 0)\n with self.tik_instance.for_range(\n 0, self.in_size_h*self.in_size_w -\n input_index*4*240) as tmp_index:\n self.tik_instance.vadd(16, ub_output[0],\n ub_input[tmp_index*16],\n ub_output[0], 1, 1,\n 1, 1, 0, 8, 0)\n self.tik_instance.vadd(16, ub_output[0],\n ub_output_tmp[0],\n ub_output[0],\n 4, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size + nc1_index)*16],\n ub_output[0], 0, 1, 2, 0, 0)", "def _make_layer(self, X, name, block, num_blocks, out_channels):\n\n for i in range(0, num_blocks):\n X = block(X, name = name + '_block{}'.format(i), out_channels=out_channels)\n return X", "def _create_conv(cls, onnx_node, inputs, opset_version):\n kernel = tuple(onnx_node.attrs[\"kernel_shape\"])\n padding = tuple(\n onnx_node.attrs[\"pads\"]) if \"pads\" in onnx_node.attrs else (0, 0)\n stride = tuple(onnx_node.getattr('strides', (1, 1)))\n # default the odd_padding is 0, once there are same pad mode, we modify it\n # for odd_padding, please refer the autegrade.py\n odd_padding = (0, 0, 0, 0)\n if \"auto_pad\" in onnx_node.attrs:\n auto_pad = utils.force_unicode(onnx_node.attrs['auto_pad'])\n if auto_pad in ('SAME_UPPER', 'SAME_LOWER'):\n padding, odd_padding = utils.get_padding_shape(\n auto_pad, inputs[0].shape[2:], kernel, stride)\n\n # not support dilation\n dilation = onnx_node.getattr('dilations', 1)\n if dilation != 1 and list(dilation) != [1, 1]:\n raise ValueError(\"Not implemented yet for dilation\")\n group = onnx_node.getattr('group', 1)\n\n # only support 1d or 2d\n if len(kernel) > 2:\n raise ValueError(\"Only implemented for 1d or 2d\")\n\n bias = len(inputs) == 3\n x = inputs[0]\n x_shape = inputs[0].shape\n in_channels = x_shape[1]\n w_shape = inputs[1].shape\n out_channels = w_shape[0]\n assert w_shape[1] == in_channels // group\n\n if inputs[0].device.id() == -1:\n if group != 1:\n raise NotImplementedError\n else:\n handle = singa.ConvHandle(x.data, kernel, stride, padding,\n in_channels, out_channels, bias,\n group)\n else:\n handle = singa.CudnnConvHandle(x.data, kernel, stride, padding,\n in_channels, out_channels, bias,\n group)\n\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(handle, odd_padding)", "def construct(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample:\n identity = self.conv_down_sample(identity)\n identity = self.bn_down_sample(identity)\n\n out = self.add(out, identity)\n out = self.relu(out)\n\n return out", "def conv_block(self, inp, conv, bias, activation=tf.nn.relu, max_pool_pad='VALID'):\n stride, no_stride = [1, 2, 2, 1], [1, 1, 1, 1]\n inp = tf.nn.conv2d(inp, conv, no_stride, 'SAME') + bias\n inp = activation(inp)\n inp = tf.nn.max_pool(inp, stride, stride, max_pool_pad)\n return inp", "def __init__(self, n):\n super().__init__()\n self.n = n\n self.block_names = ['block4', 'block7', 'block8', 'block9', 'block10', 'block11']\n block_sizes = [512, 1024, 512, 256, 256, 256]\n for name, size in zip(self.block_names, block_sizes):\n setattr(self, name, nn.Conv2d(size, self.n*cfg.ASPECT_RATIOS, kernel_size=3, padding=1))", "def construct(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.conv3(x)\n x = self.bn3(x)\n x = self.relu(x)\n x = self.conv4(x)\n x = self.bn4(x)\n x = self.relu(x)\n x = self.conv5(x)\n x = self.bn5(x)\n x = self.relu(x)\n x = self.conv6(x)\n x = self.bn6(x)\n x = self.relu(x)\n return x", "def conv_block(image, arg_params=None, name='flow', ext=''):\n\n conv_1d_layer = Conv1dLayer(ext=ext, params=arg_params)\n block1 = conv_1d_layer(image, 32, 9, name=name + '_block1')\n block2 = conv_1d_layer(block1, 32, 7, name=name + '_block2')\n block2_1 = conv_1d_layer(block2, 32, 3, 1, name=name + '_block2_1')\n\n block3 = conv_1d_layer(block2_1, 64, 5, name=name + '_block3')\n block3_1 = conv_1d_layer(block3, 64, 3, 1, name=name + '_block3_1')\n\n block4 = conv_1d_layer(block3_1, 128, 5, name=name + '_block4')\n block4_1 = conv_1d_layer(block4, 128, 3, 1, name=name + '_block4_1')\n\n block5 = conv_1d_layer(block4_1, 256, 5, name=name + '_block5')\n block5_1 = conv_1d_layer(block5, 256, 3, 1, name=name + '_block5_1')\n\n out = [('block5_1', block5_1)]\n for i in range(1, 6):\n exec('out.append((\\'block' + str(i) +'\\', block' + str(i) + '))')\n if i > 1:\n exec('out.append((\\'block' + str(i) +'_1\\', block' + str(i) + '_1))')\n\n return OrderedDict(out)", "def __init__(self, in_channels, out_channels):\n super(CNN, self).__init__()\n self.conv1 = nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=5, padding=1)", "def __init__(self, channel_in, channel_out, kernel_size, stride, padding, output_padding=0):\n super().__init__()\n self.block = nn.Sequential(\n nn.ConvTranspose2d(channel_in, channel_out, kernel_size, stride, padding, output_padding),\n nn.BatchNorm2d(channel_out)\n )\n self.act = nn.ReLU()", "def __init__(self, in_channels=3, n_classes=21):\n super(SegNet_1, self).__init__()\n\n self.layer_1 = SegnetLayer_Encoder(in_channels, 64, 2)\n self.layer_2 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_3 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_4 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_5 = SegnetLayer_Encoder(512, 1024, 3)\n self.layer_6 = SegnetLayer_Encoder(1024, 1024, 3)\n\n self.layer_7 = SegnetLayer_Decoder(1024, 1024, 3)\n self.layer_8 = SegnetLayer_Decoder(1024, 512, 3)\n self.layer_9 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_10 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_11 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_12 = SegnetLayer_Decoder(64, n_classes, 2)", "def construct(self, x):\n identity = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out = self.add(out, identity)\n out = self.relu(out)\n\n return out", "def define_encoder_block(layer_in, n_filters, batchnorm=True):\n\n # weight initialization\n init = RandomNormal(stddev=0.02)\n # add downsampling layer\n g = Conv2D(n_filters, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(layer_in)\n # conditionally add batch normalization\n if batchnorm:\n g = BatchNormalization()(g, training=True)\n # leaky relu activation\n g = LeakyReLU(alpha=0.2)(g)\n\n return g", "def fun_n_to_one_small(self, block_index, block_dim, nc1_size):\n nc1 = self.batch_size*self.c1_size\n in_size_w_num = _ceil_div(self.in_size_w, 4)\n with self.tik_instance.if_scope(block_index != block_dim - 1):\n with self.tik_instance.for_range(0, nc1_size) as nc1_index:\n ub_input = self.tik_instance.Tensor(\n \"float32\", (self.in_size_h, self.in_size_w,\n self.c_block_size),\n name=\"ub_input\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_input[0], self.grads_gm[(block_index*nc1_size +\n nc1_index) * self.in_size_h *\n self.in_size_w*16],\n 0, 1, self.in_size_h*self.in_size_w*2, 0, 0)\n if in_size_w_num > 1:\n with self.tik_instance.for_range(0, in_size_w_num) \\\n as w_in_index:\n with self.tik_instance.if_scope(\n w_in_index != in_size_w_num - 1):\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n MASK, ub_input[w_in_index*64],\n ub_input[w_in_index*64 + self.in_size_w*16],\n ub_input[w_in_index*64], self.in_size_h-1,\n 1, 1, 1, 0, self.in_size_w*2, 0)\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64], 3,\n 1, 1, 1, 0, 2, 0)\n with self.tik_instance.else_scope():\n if self.in_size_h != 1:\n self.tik_instance.vadd((self.in_size_w - (\n in_size_w_num-1)*4)*16,\n ub_input[w_in_index*64],\n ub_input[w_in_index*64 +\n self.in_size_w *\n 16],\n ub_input[w_in_index*64],\n self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w *\n 2, 0)\n if self.in_size_w-(in_size_w_num-1)*4 > 1:\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64],\n self.in_size_w-(in_size_w_num-1)*4-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[64], ub_input[0],\n in_size_w_num-1, 1, 1, 1, 0, 8, 0)\n else:\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n self.in_size_w*16, ub_input[0],\n ub_input[self.in_size_w*16],\n ub_input[0], self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w*2, 0)\n if self.in_size_w != 1:\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[16],\n ub_input[0], self.in_size_w-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size+nc1_index)*16],\n ub_input[0], 0, 1, 2, 0, 0)\n with self.tik_instance.else_scope():\n with self.tik_instance.for_range(\n 0, nc1 - (block_dim - 1)*nc1_size) as nc1_index:\n ub_input = self.tik_instance.Tensor(\n \"float32\", (self.in_size_h, self.in_size_w,\n self.c_block_size),\n name=\"ub_output\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16],\n 0, 1, self.in_size_h*self.in_size_w*2, 0, 0)\n if in_size_w_num > 1:\n with self.tik_instance.for_range(0, in_size_w_num) \\\n as w_in_index:\n with self.tik_instance.if_scope(\n w_in_index != in_size_w_num - 1):\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n MASK, ub_input[w_in_index*64],\n ub_input[w_in_index*64 + self.in_size_w*16],\n ub_input[w_in_index*64], self.in_size_h-1,\n 1, 1, 1, 0, self.in_size_w*2, 0)\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64], 3, 1, 1, 1, 0, 2, 0)\n with self.tik_instance.else_scope():\n if self.in_size_h != 1:\n self.tik_instance.vadd((self.in_size_w-(\n in_size_w_num-1)*4)*16,\n ub_input[w_in_index*64],\n ub_input[w_in_index*64 +\n self.in_size_w *\n 16],\n ub_input[w_in_index*64],\n self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w *\n 2, 0)\n if self.in_size_w-(in_size_w_num-1)*4 > 1:\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64],\n self.in_size_w-(in_size_w_num-1)*4-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[64], ub_input[0],\n in_size_w_num-1, 1, 1, 1, 0, 8, 0)\n else:\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n self.in_size_w*16, ub_input[0],\n ub_input[self.in_size_w*16],\n ub_input[0], self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w*2, 0)\n if self.in_size_w != 1:\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[16], ub_input[0],\n self.in_size_w-1, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size+nc1_index)*16],\n ub_input[0], 0, 1, 2, 0, 0)", "def __init__(self, in_size, out_size, kernel_size=3, stride=2, padding=1, output_padding=1):\n super().__init__()\n ConvTransBlockList = nn.ModuleList()\n ConvTransBlockList.append(nn.ConvTranspose2d(in_size, out_size,\n kernel_size=kernel_size, stride=stride,\n padding=padding, output_padding=output_padding,\n bias=False)\n )\n ConvTransBlockList.append(nn.InstanceNorm2d(out_size))\n ConvTransBlockList.append(nn.ReLU())\n self.model = nn.Sequential(*ConvTransBlockList)", "def __init__(self, conv_block_args, deconv_block_args, flat_channels,\n flat_kernel_size):\n super().__init__()\n\n # Perform a number of steps validating the input arguments\n self._validate_parameters(conv_block_args, deconv_block_args,\n flat_channels, flat_kernel_size)\n\n # Create lists of conv and deconv blocks from the configurations\n # passed as arguments to this function\n self.conv_blocks = nn.ModuleList([\n ConvBlock(**args)\n for args in conv_block_args\n ])\n\n self.deconv_blocks = nn.ModuleList([\n DeconvBlock(**args)\n for args in deconv_block_args\n ])\n\n # The input and output from the flat channels must be compatible\n # with the configurations for the conv and deconv blocks\n flat_in_channels = conv_block_args[-1]['out_channels']\n flat_out_channels = deconv_block_args[0]['in_channels']\n\n # Setup the flat layers\n self.flat = nn.Conv2d(flat_in_channels, flat_channels,\n flat_kernel_size)\n self.flat2 = nn.Conv2d(flat_channels, flat_channels, 1)\n self.unflatten = nn.ConvTranspose2d(flat_channels, flat_out_channels,\n flat_kernel_size)", "def conv_block(\r\n in_channels,\r\n out_channels,\r\n kernel_size,\r\n stride,\r\n dilation=1,\r\n):\r\n pad_mode = 'same'\r\n padding = 0\r\n\r\n dbl = nn.SequentialCell(\r\n [\r\n nn.Conv2d(\r\n in_channels,\r\n out_channels,\r\n kernel_size=kernel_size,\r\n stride=stride,\r\n padding=padding,\r\n dilation=dilation,\r\n pad_mode=pad_mode,\r\n ),\r\n nn.BatchNorm2d(out_channels, momentum=0.1),\r\n nn.ReLU(),\r\n ]\r\n )\r\n init_cov(dbl[0])\r\n init_bn(dbl[1])\r\n return dbl", "def build_net(self, inputs):\n with tf.variable_scope(self._scope, self._scope, [inputs]) as sc:\n end_points_collection = sc.name + '_end_points'\n\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.batch_norm],\n outputs_collections=end_points_collection):\n net = slim.conv2d(inputs, 32, 3, 1, scope='conv1')\n net = slim.conv2d(net, 32, 3, 1, scope='conv2')\n\n net = slim.conv2d(net, 64, 3, 1, scope='conv3')\n net = slim.conv2d(net, 64, 3, 1, scope='conv4')\n\n net = slim.max_pool2d(net, 2, 2, scope='pool1')\n\n net = slim.conv2d(net, 128, 3, 1, scope='conv5')\n net = slim.conv2d(net, 128, 3, 1, scope='conv6')\n\n net = slim.max_pool2d(net, 2, 2, scope='pool2')\n\n net = slim.conv2d(net, 256, 3, scope='conv7')\n net = slim.conv2d(net, 256, 3, scope='conv8')\n\n net = slim.max_pool2d(net, 2, [2, 1], scope='pool3')\n\n net = slim.conv2d(net, 512, 3, scope='conv9')\n net = slim.conv2d(net, 512, 3, scope='conv10')\n\n net = slim.max_pool2d(net, 2, [1, 1], scope='pool4')\n\n net = slim.conv2d(net, 512, 2, padding='VALID', scope='conv11')\n\n net = slim.dropout(net, keep_prob=0.5)\n\n self.end_points = utils.convert_collection_to_dict(end_points_collection)\n self.net = net", "def _conv_block( inputs, filters, kernel, strides, nl):\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n x = Conv2D(filters, kernel, padding='same', strides=strides)(inputs)\n x = BatchNormalization(axis=channel_axis)(x)\n return _return_activation(x, nl)", "def _building_block_v2(inputs, filters, training, \n projection_shortcut, strides,\n data_format):\n shortcut = inputs\n inputs = batch_norm(inputs, training, data_format)\n inputs = tf.nn.relu(inputs)\n ENDING_POINTS.append(inputs)\n\n # The projection shortcut should come after the first batch norm and ReLU\n # since it performs a 1x1 convolution.\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n\n inputs = conv3d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, \n time_kernel_size=3, strides=strides,\n data_format=data_format, time_stride=strides)\n\n inputs = batch_norm(inputs, training, data_format)\n inputs = tf.nn.relu(inputs)\n inputs = conv3d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, \n time_kernel_size=3, strides=1,\n data_format=data_format)\n\n return inputs + shortcut", "def __init__(self, *units):\n super().__init__()\n self.convs = torch.nn.ModuleList([\n torch.nn.Conv2d(in_, out, 3, 1, 1)\n for in_, out in zip(units[:-1], units[1:])\n ])", "def up_block(x, out_channels, name, training=True):\n with tf.variable_scope(name):\n bn0 = ops.BatchNorm(name='bn_0')\n bn1 = ops.BatchNorm(name='bn_1')\n x_0 = x\n x = tf.nn.relu(bn0(x))\n x = usample(x)\n x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, training, 'snconv1')\n x = tf.nn.relu(bn1(x))\n x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, training, 'snconv2')\n\n x_0 = usample(x_0)\n x_0 = ops.snconv2d(x_0, out_channels, 1, 1, 1, 1, training, 'snconv3')\n\n return x_0 + x", "def conv_block(inputs, out_channels, name='conv', training=False, block_idx=0):\n with tf.variable_scope(name):\n conv = tf.keras.layers.Conv2D(\n filters=out_channels,\n kernel_size=3,\n padding='same')(inputs)\n conv = bn[block_idx](conv, training=training)\n conv = tf.nn.relu(conv)\n out = tf.contrib.layers.max_pool2d(conv, 2)\n return out", "def make_block(self, block_ix, downsample, ker_size, block_len):\n stride = int(downsample) + 1\n n_in_filters = self.filters[block_ix]\n n_filters = self.filters[block_ix+1]\n mult_fact = 1 if block_ix == 0 else 6\n\n block = [MBConv(n_in_filters, n_filters, ker_size, stride, mult_fact)]\n block += [MBConv(n_filters, n_filters, ker_size, 1, mult_fact) for _ in range(block_len-1)]\n return block", "def __init__(self, in_ch=2048, out_ch=256):\n super(ChannelCompress, self).__init__()\n num_bottleneck = 1000\n add_block = []\n add_block += [nn.Linear(in_ch, num_bottleneck)]\n add_block += [nn.BatchNorm1d(num_bottleneck)]\n add_block += [nn.ReLU()]\n\n add_block += [nn.Linear(num_bottleneck, 500)]\n add_block += [nn.BatchNorm1d(500)]\n add_block += [nn.ReLU()]\n add_block += [nn.Linear(500, out_ch)]\n\n # Extra BN layer, need to be removed\n #add_block += [nn.BatchNorm1d(out_ch)]\n\n add_block = nn.Sequential(*add_block)\n add_block.apply(weights_init_kaiming)\n self.model = add_block", "def __init__(self, c):\n super(DeepConv, self).__init__(self)\n\n self.block_1 = BaseBlock(c, 16)\n self.block_2 = BaseBlock(16, 32)\n self.block_3 = BaseBlock(32, 32)\n\n self._body = nn.Sequential(self.block_1, self.block_2, self.block_3)", "def init_encoder(self):\n\n vgg = models.vgg16(pretrained=True)\n\n blocks = [self.layer_1,\n self.layer_2,\n self.layer_3,\n self.layer_4,\n self.layer_5]\n\n ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]\n features = list(vgg.features.children())\n\n vgg_layers = []\n for _layer in features:\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n\n merged_layers = []\n for idx, conv_block in enumerate(blocks):\n if idx < 2:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit]\n else:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit,\n conv_block.conv3.cbr_unit]\n for _unit in units:\n for _layer in _unit:\n if isinstance(_layer, nn.Conv2d):\n merged_layers.append(_layer)\n\n assert len(vgg_layers) == len(merged_layers)\n\n for l1, l2 in zip(vgg_layers, merged_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data", "def identity_block(X, f, filters, stage, block):\n\n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n # Retrieve Filters\n F1, F2, F3 = filters\n\n # Save the input value.\n X_shortcut = X\n\n # First component of main path\n X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n\n # Second component of main path\n X = Conv2D(filters = F2, kernel_size= (f,f),strides= (1,1), padding= 'same' , name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path\n X = Conv2D(filters = F3, kernel_size= (1,1),strides= (1,1), padding= 'valid' , name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation\n X = layers.add([X,X_shortcut])\n X = Activation('relu')(X)\n\n return X", "def __init__(self, num_in, num_out, g=1, stride=1, d=(1,1),norm=None):\r\n super(MFunit, self).__init__()\r\n num_mid = num_in if num_in <= num_out else num_out\r\n self.conv1x1x1_in1 = Conv3d_Block(num_in,num_in//4,kernel_size=1,stride=1,norm=norm)\r\n self.conv1x1x1_in2 = Conv3d_Block(num_in//4,num_mid,kernel_size=1,stride=1,norm=norm)\r\n self.conv3x3x3_m1 = DilatedConv3DBlock(num_mid,num_out,kernel_size=(3,3,3),stride=stride,g=g,d=(d[0],d[0],d[0]),norm=norm) # dilated\r\n self.conv3x3x3_m2 = DilatedConv3DBlock(num_out,num_out,kernel_size=(3,3,1),stride=1,g=g,d=(d[1],d[1],1),norm=norm)\r\n # self.conv3x3x3_m2 = DilatedConv3DBlock(num_out,num_out,kernel_size=(1,3,3),stride=1,g=g,d=(1,d[1],d[1]),norm=norm)\r\n\r\n # skip connection\r\n if num_in != num_out or stride != 1:\r\n if stride == 1:\r\n self.conv1x1x1_shortcut = Conv3d_Block(num_in, num_out, kernel_size=1, stride=1, padding=0,norm=norm)\r\n if stride == 2:\r\n # if MF block with stride=2, 2x2x2\r\n self.conv2x2x2_shortcut = Conv3d_Block(num_in, num_out, kernel_size=2, stride=2,padding=0, norm=norm) # params\r", "def build_transformation_network(n_styles, depthwise_separable_conv):\n\n image_input = Input((None, None, 3), name=\"image\")\n style_weights = Input((n_styles, ), name=\"style_weights\")\n\n net = conv_block(image_input,\n style_weights,\n filters=32,\n kernel_size=(9, 9),\n strides=(1, 1),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = conv_block(net,\n style_weights,\n filters=64,\n kernel_size=(3, 3),\n strides=(2, 2),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = conv_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(2, 2),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = upsampling_block(net,\n style_weights,\n interpolation_factor=2,\n filters=64,\n kernel_size=(3, 3),\n strides=(1, 1),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = upsampling_block(net,\n style_weights,\n interpolation_factor=2,\n filters=32,\n kernel_size=(3, 3),\n strides=(1, 1),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = conv_block(net,\n style_weights,\n filters=3,\n kernel_size=(9, 9),\n strides=(1, 1),\n activation=\"sigmoid\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = Lambda(lambda t: t * 255.0, name=\"output\")(net)\n\n return Model([image_input, style_weights], net, name=\"transform_net\")", "def _identity_block(self, input_tensor, kernel_size, filters, stage, block, dilation=1):\n filters1, filters2, filters3 = filters\n if backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n\n if block > 'z':\n block = chr(ord(block) - ord('z') + ord('A') - 1)\n\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = layers.Conv2D(filters1, (1, 1),\n kernel_initializer='he_normal',\n name=conv_name_base + '2a')(input_tensor)\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(filters2, kernel_size,\n padding='same',\n kernel_initializer='he_normal',\n name=conv_name_base + '2b',\n dilation_rate=dilation)(x)\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(filters3, (1, 1),\n kernel_initializer='he_normal',\n name=conv_name_base + '2c')(x)\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)\n\n x = layers.add([x, input_tensor])\n x = layers.Activation('relu')(x)\n return x", "def build_bisenet(inputs, num_classes):\n\n ### The spatial path\n ### The number of feature maps for each convolution is not specified in the paper\n ### It was chosen here to be equal to the number of feature maps of a classification\n ### model at each corresponding stage\n # spatial_net = fluid.layers.resize_bilinear(inputs, [Image_Height/8, Image_Width/8])\n # print('spatial_net_1',spatial_net)\n\n ## spatial path\n spatial_net = ConvBlock(inputs, num_filters=64, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=128, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=256, kernel_size=3, stride=2)\n # print(\"spatial_net:\", spatial_net)\n\n # spatial_net = fluid.layers.resize_bilinear(spatial_net, [Image_Height/8, Image_Width/8])\n # print('spatial_net_2',spatial_net)\n ### Context path\n model = ResNet(is_test=False)\n # spatial_net = model.bottleneck_block1(inputs)\n end_points_16, end_points_32 = model.net(inputs)\n net_4 = AttentionRefinementModule(end_points_16, num_filters=512)\n net_5 = AttentionRefinementModule(end_points_32, num_filters=1024)\n global_channels = fluid.layers.reduce_mean(net_5, [2, 3], keep_dim=True)\n net_5_scaled = fluid.layers.elementwise_mul(net_5, global_channels, axis=0)\n\n ### Combining the paths\n net_4 = Upsample(net_4, scale=2)\n net_5_scaled = Upsample(net_5_scaled, scale=4)\n # print('net_4, net_5:', [net_4, net_5_scaled])\n # layers_concat = list()\n # layers_concat.append(spatial_net)\n ## layers_concat.append(net_4)\n # layers_concat.append(net_5_scaled)\n context_net = fluid.layers.concat([spatial_net, net_4, net_5_scaled], axis=1) #\n # context_net = fluid.layers.concat(input=layers_concat,axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([net_4, net_5_scaled], axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([spatial_net,context_net], axis=1)\n # print('context_net2',context_net)\n\n ### FFM\n # net = FeatureFusionModule(input_1=spatial_net, input_2=context_net, num_filters=num_classes)\n net = FeatureFusionModule(inputs=context_net, num_filters=num_classes)\n\n # print('net', net)\n\n ## [batch_zize, num_filters, 128, 64]\n\n ### Final upscaling and finish\n # net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 128])\n # print('conv2d_transpose', net)\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn1')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[128, 256])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn2')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 512])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn3')\n #net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[512, 1024])\n #net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')\n # print('net',net)\n net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')\n\n net = fluid.layers.conv2d(net, num_classes, 1)\n return net", "def _make_layer(self, block, layer_num, in_channel, out_channel):\r\n layers = []\r\n darkblk = block(in_channel, out_channel)\r\n layers.append(darkblk)\r\n\r\n for _ in range(1, layer_num):\r\n darkblk = block(out_channel, out_channel)\r\n layers.append(darkblk)\r\n\r\n return nn.SequentialCell(layers)", "def construct_from_anatomy(self, anet, architecture):\n # construct conv layer for input -> LGNd\n self.area_channels['input'] = INPUT_SIZE[0]\n self.area_size['input'] = INPUT_SIZE[1]\n \n out_sigma = 1\n out_channels = np.floor(anet.find_layer('LGNd','').num/out_sigma/INPUT_SIZE[1]/INPUT_SIZE[2])\n architecture.set_num_channels('LGNd', '', out_channels)\n self.area_channels['LGNd'] = out_channels\n \n out_size = INPUT_SIZE[1] * out_sigma\n self.area_size['LGNd'] = out_size\n \n convlayer = ConvLayer('input', 'LGNd',\n ConvParam(in_channels=INPUT_SIZE[0], \n out_channels=out_channels,\n gsh=INPUT_GSH,\n gsw=INPUT_GSW, out_sigma=out_sigma),\n out_size)\n self.layers.append(convlayer)\n \n # construct conv layers for all other connections\n G, _ = anet.make_graph()\n Gtop = nx.topological_sort(G)\n root = next(Gtop) # get root of graph\n for i, e in enumerate(nx.edge_bfs(G, root)):\n \n in_layer_name = e[0].area+e[0].depth\n out_layer_name = e[1].area+e[1].depth\n print('constructing layer %s: %s to %s'%(i, in_layer_name, out_layer_name))\n \n in_conv_layer = self.find_conv_target_area(in_layer_name)\n in_size = in_conv_layer.out_size\n in_channels = in_conv_layer.params.out_channels\n \n out_anat_layer = anet.find_layer(e[1].area, e[1].depth)\n \n out_sigma = get_out_sigma(e[0].area, e[0].depth, e[1].area, e[1].depth)\n out_size = in_size * out_sigma\n self.area_size[e[1].area+e[1].depth] = out_size\n\n if SUBFIELDS:\n pixel_area = calculate_pixel_area_with_visual_field(architecture, e[1].area, e[1].depth)\n out_channels = np.floor(out_anat_layer.num / pixel_area)\n else:\n out_channels = np.floor(out_anat_layer.num/out_size**2)\n\n architecture.set_num_channels(e[1].area, e[1].depth, out_channels)\n self.area_channels[e[1].area+e[1].depth] = out_channels\n \n convlayer = ConvLayer(in_layer_name, out_layer_name, \n ConvParam(in_channels=in_channels, \n out_channels=out_channels,\n gsh=architecture.get_kernel_peak_probability(e[0].area, e[0].depth, e[1].area, e[1].depth),\n gsw=architecture.get_kernel_width_pixels(e[0].area, e[0].depth, e[1].area, e[1].depth), out_sigma=out_sigma),\n out_size)\n \n self.layers.append(convlayer)", "def build(self, block_size):", "def construct(self, x):\n x = self.conv(x)\n h0 = self.up_conv1(x)\n l0 = self.up_conv2(h0)\n h1 = self.up_conv3(l0 - x)\n return h1 + h0", "def _final_block(self, in_channels, mid_channels, out_channels, kernel_size=3):\n block = nn.Sequential(\n nn.Conv2d(kernel_size=kernel_size, in_channels=in_channels, out_channels=mid_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(mid_channels),\n nn.Conv2d(kernel_size=kernel_size, in_channels=mid_channels, out_channels=mid_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(mid_channels),\n nn.Conv2d(kernel_size=kernel_size, in_channels=mid_channels, out_channels=out_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels)\n )\n return block", "def _final_block(self, in_channels, mid_channels, out_channels, kernel_size=3):\n block = nn.Sequential(\n nn.Conv2d(kernel_size=kernel_size, in_channels=in_channels, out_channels=mid_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(mid_channels),\n nn.Conv2d(kernel_size=kernel_size, in_channels=mid_channels, out_channels=mid_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(mid_channels),\n nn.Conv2d(kernel_size=kernel_size, in_channels=mid_channels, out_channels=out_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels)\n )\n return block", "def BasicBlock(cin, cout, n):\n layers = [\n nn.Conv2d(cin, cout, 3, padding=1),\n nn.BatchNorm2d(cout),\n nn.ReLU()\n ]\n for _ in range(n - 1):\n layers.append(nn.Conv2d(cout, cout, 3, padding=1))\n layers.append(nn.BatchNorm2d(cout))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(2))\n return nn.Sequential(*layers)", "def _singlechannelTransformixArray(self, niiIn):\n\t\t# here we will get the extension of the image and will convert it to the nift-1\n\t\t# format if it is not already in that format. While users can supply their own\n\t\t# nifti formatted image to the pipeline, this ensures that other file formats\n\t\t# can be used, although, it creates additionally overhead\n\t\t# here we supply all preprocessing commands that were used to preprocess or morph\n\t\t# the array size of the input image through the hdiprep workflow. Transformix\n\t\t# must be run on images with the same size as the elastix registration\n\t\tif ((self.out_ext!=\".nii\") or (self.target_size!=None) or (self.pad!=None)):\n\t\t\t# get the shape of the image\n\t\t\tshp = len(niiIn.hdi.data.image_shape)\n\t\t\t# create new name for the temporary image\n\t\t\ttmp_nm = os.path.join(out_dir, next(tempfile._get_candidate_names())+\".nii\")\n\t\t\t# export nifti intermediate\n\t\t\tprint('Creating nifti-1 intermediate for registration')\n\t\t\t# check for padding\n\t\t\tif self.pad!=None:\n\t\t\t\t# pad the single-channel\n\t\t\t\tniiIn.hdi.data.image = np.pad(niiIn.hdi.data.image,[(self.pad[0], self.pad[0]), (self.pad[1], self.pad[1])],mode='constant')\n\t\t\t# check for image resizing\n\t\t\tif (self.target_size != None) and (self.crops==None):\n\t\t\t\t# transform the image\n\t\t\t\tniiIn.hdi.data.image = resize(niiIn.hdi.data.image,self.target_size)\n\n\t\t\t# Create nifti oject -- transpose axes because of the transformation!\n\t\t\tnii_im = nib.Nifti1Image(niiIn.hdi.data.image.T, affine=np.eye(4))\n\t\t\t#Save the nifti image\n\t\t\tnib.save(nii_im,str(tmp_nm))\n\t\t\t# remove the nifit memory\n\t\t\tnii_im = None\n\t\t\t# update the image name\n\t\t\tprint('Using nifti-1 intermediate for registration')\n\t\t\t# update the input image\n\t\t\tself.in_im = Path(tmp_nm)\n\t\t\t# update the intermediate flag\n\t\t\tself.intermediate = True\n\t\t\t#Remove loaded image to clear memory\n\t\t\tniiIn = None\n\n\t\t#Print update\n\t\tprint('Detected single channel input images...')\n\t\t#Update the fixed channels\n\t\tself.in_channels.append(self.in_im)\n\n\t\t#add transform -- check for list size\n\t\tif len(self.tps) > 1:\n\t\t\t#Run the composition function for transformix\n\t\t\tres_name = MultiTransformix(in_im = self.in_im, out_dir = self.out_dir, tps = self.tps)\n\n\t\t#Otherwise only use the first transform parameter\n\t\telse:\n\t\t\t#Updatethe command with the single channel path alone\n\t\t\tself.command = self.command + ' -in ' + str(self.in_im)\n\t\t\t#use the first transform parameter file\n\t\t\tself.command = self.command + ' -tp ' + str(self.tps[0])\n\t\t\t#Update the command with the output directory\n\t\t\tself.command = self.command + ' -out ' + str(self.out_dir)\n\t\t\t#Run single channel transformix without temporary directories\n\t\t\tRunTransformix(self.command)\n\t\t\t#Get a result name for the output of transformix (assumes nifti for now)\n\t\t\tres_name = Path(os.path.join(self.out_dir,\"result\"+self.in_im.suffix))\n\n\t\t#Create a new name\n\t\tnew_name = Path(os.path.join(self.out_dir,self.baseName+'_result'+self.out_ext))\n\n\t\t# check if the output format needs to be switched -- set by the user\n\t\tif (self.out_ext!=\".nii\") or (self.trim!=None):\n\t\t\t# use HDIreader for now to parse image and exporter to export\n\t\t\tniiIn = hdi_reader.HDIreader(\n\t\t\t path_to_data=self.in_im,\n\t\t\t path_to_markers=None,\n\t\t\t flatten=False,\n\t\t\t subsample=None,\n\t\t\t mask=None,\n\t\t\t save_mem=False\n\t\t\t)\n\t\t\t# check the trim\n\t\t\tif self.trim!=None:\n\t\t\t\t# trim the image borders\n\t\t\t\tniiIn.hdi.data.image = niiIn.hdi.data.image[self.trim:-self.trim,self.trim:-self.trim]\n\t\t\t# export new data\n\t\t\thdi_exporter.HDIexporter(niiIn.hdi.data.image,new_name)\n\t\telse:\n\t\t\t# simply rename the file that is already in the nifti format\n\t\t\tres_name.rename(new_name)", "def _make_layer(self, out_channels, num_blocks):\n layers = []\n for i in range(num_blocks):\n stride = 2 if i == 0 else 1\n layers.append(InvertedResidual(in_channels=self.in_channels, out_channels=out_channels, stride=stride, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, with_cp=self.with_cp))\n self.in_channels = out_channels\n return nn.Sequential(*layers)", "def convert_convolution(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n kernel_dims = list(parse_helper(attrs, \"kernel\"))\n stride_dims = list(parse_helper(attrs, \"stride\", [1, 1]))\n pad_dims = list(parse_helper(attrs, \"pad\", [0, 0]))\n num_group = int(attrs.get(\"num_group\", 1))\n dilations = list(parse_helper(attrs, \"dilate\", [1, 1]))\n\n pad_dims = pad_dims + pad_dims\n\n conv_node = onnx.helper.make_node(\n \"Conv\",\n inputs=input_nodes,\n outputs=[name],\n kernel_shape=kernel_dims,\n strides=stride_dims,\n dilations=dilations,\n pads=pad_dims,\n group=num_group,\n name=name\n )\n\n return [conv_node]", "def build_network(self):\n net = self.ccf_data\n\n # Reshape [length] -> [length, 1].\n net = tf.expand_dims(net, -1)\n\n # create summary object\n summary = []\n\n for i in self.hparams.conv_block_filters:\n for _ in range(self.hparams.conv_layers_per_block):\n input_shape = net.shape.as_list()\n conv_op = tf.keras.layers.Conv1D(filters=i, kernel_size=self.hparams.kernel_size, padding='same',\n activation=tf.nn.relu)\n net = conv_op(net)\n summary.append(\"Conv1D-{}-{}. Input shape: {}. Output shape: {}\".format(self.hparams.kernel_size, i, input_shape,\n net.shape.as_list()))\n pool_size = 2\n strides = 2\n max_pool = tf.keras.layers.MaxPool1D(pool_size=pool_size, strides=strides)\n net = max_pool(net)\n summary.append(\"MaxPool1D-{}. Pool Size: {}. Strides: {}\".format(self.hparams.kernel_size, pool_size, strides))\n\n for i in self.hparams.final_conv_num_filters:\n conv_op = tf.keras.layers.Conv1D(filters=i, kernel_size=self.hparams.kernel_size, padding='same',\n activation=tf.nn.relu)\n net = conv_op(net)\n flatten = tf.keras.layers.Flatten()\n net = flatten(net)\n\n for i in self.hparams.dense_num_layers:\n dense = tf.keras.layers.Dense(i, activation=tf.nn.relu)\n net = dense(net)\n\n # output layer\n output = tf.keras.layers.Dense(1)\n net = tf.squeeze(output(net))\n\n self.summary = \"\\n\".join(summary)\n self.predicted_rv = net", "def __conv_block(self, x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4):\n\t\teps = 1.1e-5\n\t\tconv_name_base = \"conv\" + str(stage) + \"_\" + str(branch)\n\t\trelu_name_base = \"relu\" + str(stage) + \"_\" + str(branch)\n\n\t\t# 1x1 Convolution (Bottleneck layer)\n\t\tinter_channel = nb_filter * 4 \n\t\tx = BatchNormalization(epsilon=eps, axis=self.concat_axis, name=conv_name_base+\"_x1_bn\")(x)\n\t\tx = Scale(axis=self.concat_axis, name=conv_name_base+\"_x1_scale\")(x)\n\t\tx = Activation(\"relu\", name=relu_name_base+\"_x1\")(x)\n\t\tx = Conv2D(inter_channel, (1, 1), name=conv_name_base+\"_x1\", use_bias=False)(x)\n\n\t\tif dropout_rate:\n\t\t\tx = Dropout(dropout_rate)(x)\n\n\t\t# 3x3 Convolution\n\t\tx = BatchNormalization(epsilon=eps, axis=self.concat_axis, name=conv_name_base+\"_x2_bn\")(x)\n\t\tx = Scale(axis=self.concat_axis, name=conv_name_base+\"_x2_scale\")(x)\n\t\tx = Activation(\"relu\", name=relu_name_base+\"_x2\")(x)\n\t\tx = ZeroPadding2D((1, 1), name=conv_name_base+\"_x2_zeropadding\")(x)\n\t\tx = Conv2D(nb_filter, (3, 3), name=conv_name_base+\"_x2\", use_bias=False)(x)\n\n\t\tif dropout_rate:\n\t\t\tx = Dropout(dropout_rate)(x)\n\n\t\treturn x", "def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, dropout):\n super().__init__()\n # Padding on the left side\n pad = torch.nn.ZeroPad2d((padding, 0, 0, 0))\n # Convolutional network (we didnt include weight norms)\n conv2d1 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, kernel_size),\n stride=stride, dilation=dilation)\n elu = nn.ELU()\n dropout = nn.Dropout(dropout)\n conv2d2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=(1, kernel_size),\n stride=stride, dilation=dilation)\n self.net = nn.Sequential(pad, conv2d1, elu, dropout, pad, conv2d2, elu, dropout)", "def init_encoder(self):\n\n vgg = models.vgg16(pretrained=True)\n\n blocks = [self.layer_1,\n self.layer_2,\n self.layer_3,\n self.layer_4,\n self.layer_5]\n\n ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]\n features = list(vgg.features.children())\n\n vgg_layers = []\n for _layer in features:\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n\n merged_layers = []\n for idx, conv_block in enumerate(blocks):\n if idx < 2:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit]\n else:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit,\n conv_block.conv3.cbr_unit]\n for _unit in units:\n for _layer in _unit:\n if isinstance(_layer, nn.Conv2d):\n merged_layers.append(_layer)\n\n assert len(vgg_layers) == len(merged_layers)\n\n for l1, l2 in zip(vgg_layers, merged_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data\n\n blocks = [self.layer_11,\n self.layer_12,\n self.layer_13,\n self.layer_14,\n self.layer_15]\n\n ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]\n features = list(vgg.features.children())\n\n vgg_layers = []\n for _layer in features:\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n\n merged_layers = []\n for idx, conv_block in enumerate(blocks):\n if idx < 2:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit]\n else:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit,\n conv_block.conv3.cbr_unit]\n for _unit in units:\n for _layer in _unit:\n if isinstance(_layer, nn.Conv2d):\n merged_layers.append(_layer)\n\n assert len(vgg_layers) == len(merged_layers)\n\n for l1, l2 in zip(vgg_layers, merged_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data", "def __init__(self, ):\n super().__init__()\n channels = 3\n\n # Initial convolution block\n out_features = 64\n # encoder\n self.input = nn.Sequential(\n nn.ReflectionPad2d(channels),\n nn.Conv2d(3, out_features, (7, 7)),\n nn.InstanceNorm2d(out_features),\n nn.MaxPool2d(2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(out_features, out_features * 2, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n nn.MaxPool2d(2),\n nn.Conv2d(out_features * 2, out_features * 4, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.in0 = nn.InstanceNorm2d(256)\n self.block0 = blocks()\n self.block1 = blocks()\n self.block2 = blocks()\n self.block3 = blocks()\n self.block4 = blocks()\n self.block5 = blocks()\n self.block6 = blocks()\n self.block7 = blocks()\n\n self.out = nn.Sequential(\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 4, out_features * 2, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 2, out_features, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.ReflectionPad2d(channels),\n nn.Conv2d(out_features, channels, 7),\n nn.Tanh(),\n )", "def identity_building_block(input_tensor, kernel_size, filters, stage, block, training=None):\n \n filters1, filters2 = filters\n if tf.keras.backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = tf.keras.layers.Conv2D(filters1, kernel_size,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n bias_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n name=conv_name_base + '2a')(input_tensor)\n x = tf.keras.layers.BatchNormalization(axis=bn_axis,\n name=bn_name_base + '2a',\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(\n x, training=training)\n x = tf.keras.layers.Activation('relu')(x)\n\n x = tf.keras.layers.Conv2D(filters2, kernel_size,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n bias_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n name=conv_name_base + '2b')(x)\n x = tf.keras.layers.BatchNormalization(axis=bn_axis,\n name=bn_name_base + '2b',\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(\n x, training=training)\n\n x = tf.keras.layers.add([x, input_tensor])\n x = tf.keras.layers.Activation('relu')(x)\n return x", "def repeat_block(inp, out_filters, dropout=0.2):\n skip = inp\n\n c1 = ConvBnElu(inp, out_filters, dilation_rate=4)\n c1 = SpatialDropout2D(dropout)(c1)\n c2 = ConvBnElu(add([skip, c1]), out_filters, dilation_rate=3)\n c2 = SpatialDropout2D(dropout)(c2)\n c3 = ConvBnElu(c2, out_filters, dilation_rate=2)\n c3 = SpatialDropout2D(dropout)(c3)\n c4 = ConvBnElu(add([c2, c3]), out_filters, dilation_rate=1)\n\n return c4", "def __init__(self, channel_in, channel_out, kernel_size, stride, padding, residual=False):\n super().__init__()\n self.block = nn.Sequential(\n nn.Conv2d(channel_in, channel_out, kernel_size, stride, padding),\n nn.BatchNorm2d(channel_out)\n )\n self.act = nn.ReLU()\n self.residual = residual", "def _build(self, inputs):\n\n # calculate how many slots we need from the 3 dimensions of the incoming conv layer (filter w/h plus depth)\n dims = inputs.get_shape().as_list()\n new_dim = 1\n for d in dims[1:]: # leave first axis as is (batch)\n new_dim = new_dim * d # multiply 'em up\n return tf.reshape(inputs, [-1, new_dim]) # -1=keep this dimension as is (it could be anything as this is the number of samples) and flatten the others", "def build(self, input_shape: tf.Tensor):\n self.conv = tf.keras.layers.Conv2D(\n self.channels, (1, 1), input_shape=input_shape)\n self.bn = tf.keras.layers.BatchNormalization(momentum=self.momentum)", "def conv_building_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2), training=None):\n filters1, filters2 = filters\n if tf.keras.backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = tf.keras.layers.Conv2D(filters1, kernel_size, strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n bias_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n name=conv_name_base + '2a')(input_tensor)\n x = tf.keras.layers.BatchNormalization(axis=bn_axis,\n name=bn_name_base + '2a',\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(\n x, training=training)\n x = tf.keras.layers.Activation('relu')(x)\n\n x = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n bias_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n name=conv_name_base + '2b')(x)\n x = tf.keras.layers.BatchNormalization(axis=bn_axis,\n name=bn_name_base + '2b',\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(\n x, training=training)\n\n shortcut = tf.keras.layers.Conv2D(filters2, (1, 1), strides=strides,\n kernel_initializer='he_normal',\n kernel_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n bias_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n name=conv_name_base + '1')(input_tensor)\n shortcut = tf.keras.layers.BatchNormalization(\n axis=bn_axis, name=bn_name_base + '1',\n momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON)(\n shortcut, training=training)\n\n x = tf.keras.layers.add([x, shortcut])\n x = tf.keras.layers.Activation('relu')(x)\n return x", "def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for s in strides:\n layers.append(block(self.in_planes, planes, s))\n self.in_planes = planes * block.expansion\n\n return nn.SequentialCell(*layers)", "def identity_block(X, f, filters, stage, block):\n \n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieve Filters\n F1, F2, F3 = filters\n \n # Save the input value. You'll need this later to add back to the main path. \n X_shortcut = X\n \n # First component of main path\n X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n \n # Second component of main path\n X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path\n X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation\n X = Add()([X,X_shortcut])\n X = Activation('relu')(X)\n \n return X", "def decoder_block(layer_in, skip_in, n_filters, dropout=True):\n\n # weight initialization\n init = RandomNormal(stddev=0.02)\n # add upsampling layer\n g = Conv2DTranspose(n_filters, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(layer_in)\n # add batch normalization\n g = BatchNormalization()(g, training=True)\n # conditionally add dropout\n if dropout:\n g = Dropout(0.5)(g, training=True)\n # merge with skip connection\n g = Concatenate()([g, skip_in])\n # relu activation\n g = Activation('relu')(g)\n\n return g", "def __init__(self, in_channels=3, in_channels1=3, n_classes=21):\n super(SegNet, self).__init__()\n\n self.layer_1 = SegnetLayer_Encoder(in_channels, 64, 2)\n self.layer_2 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_3 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_4 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_5 = SegnetLayer_Encoder(512, 512, 3)\n\n self.layer_6 = SegnetLayer_Decoder(512, 512, 3)\n self.layer_7 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_8 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_9 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_10 = SegnetLayer_Decoder(64, n_classes, 2)\n\n self.layer_11 = SegnetLayer_Encoder(in_channels1, 64, 2)\n self.layer_12 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_13 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_14 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_15 = SegnetLayer_Encoder(512, 512, 3)\n\n self.layer_16 = SegnetLayer_Decoder(512, 512, 3)\n self.layer_17 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_18 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_19 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_110 = SegnetLayer_Decoder(64, n_classes, 2)\n\n self.layer_1110 = UNet_Decoder_Particular(n_classes * 2, n_classes)", "def conv_block(inputs, kernel_size, filters, stage, block,\n stride=(2, 2), training=True):\n nb_filter1, nb_filter2, nb_filter3 = filters\n\n scope_name = 'conv'+str(stage)+block+'_branch'\n scope_name = 'bn'+str(stage)+block+'_branch'\n\n x = slim.conv2d(inputs,nb_filter1,[1,1],stride=stride,padding='VALID',activation_fn=None,\n trainable=training,scope=scope_name+'2a')\n x = slim.batch_norm(x,scope=scope_name+'2a',is_training=training)\n x = tf.nn.relu(x)\n\n\n x = slim.conv2d(x,nb_filter2,[kernel_size,kernel_size],stride=[1,1],padding='SAME',\n activation_fn=None,trainable=training,scope=scope_name+'2b')\n x = slim.batch_norm(x,scope=scope_name+'2b',is_training=training)\n x = tf.nn.relu(x)\n\n\n x = slim.conv2d(x,nb_filter3,[1,1],stride=[1,1],padding='VALID',\n activation_fn=None,trainable=training,scope=scope_name+'2c')\n x = slim.batch_norm(x,scope=scope_name+'2c',is_training=training)\n\n\n\n shortcut = slim.conv2d(inputs,nb_filter3,[1,1],stride=stride,padding='VALID',activation_fn=None,\n trainable=training,scope=scope_name+'1')\n x = slim.batch_norm(shortcut,scope=scope_name+'1',is_training=training)\n\n \n x = tf.add(x,shortcut)\n x = tf.nn.relu(x,name='res'+str(stage)+block+\"_out\")\n return x", "def make_stage(num_blocks, input_channels, output_channels, stride, expand_ratio, norm, activation):\n blocks = []\n blocks.append(\n InvertedResBlock(input_channels, output_channels, stride=stride, expand_ratio=expand_ratio,\n norm=norm, activation=activation, use_shortcut=False)\n )\n for i in range(num_blocks - 1):\n blocks.append(\n InvertedResBlock(output_channels, output_channels, stride=1, expand_ratio=expand_ratio,\n norm=norm, activation=activation)\n )\n\n return blocks", "def __init__(self, in_channels=3, out_channels=3, n_layers=6):\n\n super().__init__()\n generator_list = nn.ModuleList()\n # c7s1-64\n generator_list.append(nn.ReflectionPad2d(3))\n generator_list.append(ConvBlock(\n in_channels, 64, kernel_size=7, stride=1, padding=0,\n leaky=False, instance_norm=True, bias=False\n ))\n # d128, d256 (down-sampling layers)\n generator_list.append(ConvBlock(\n 64, 128, kernel_size=3, stride=2, padding=1,\n leaky=False, instance_norm=True, bias=False\n ))\n generator_list.append(ConvBlock(\n 128, 256, kernel_size=3, stride=2, padding=1,\n leaky=False, instance_norm=True, bias=False\n ))\n # resblocks\n for n in range(n_layers):\n generator_list.append(ResnetBlock(256))\n # u128, u64 (up-sampleing layers)\n generator_list.append(ConvTransBlock(256, 128))\n generator_list.append(ConvTransBlock(128, 64))\n # c7s1-3\n generator_list.append(nn.ReflectionPad2d(3))\n generator_list.append(nn.Conv2d(64, out_channels,\n kernel_size=7, padding=0)\n )\n generator_list.append(nn.Tanh())\n # define generator model\n self.model = nn.Sequential(*generator_list)\n self._initialize_params()", "def identity_block(self,X,stage,block):\n conv_name_base='res'+str(stage)+block+'_branch'\n bn_name_base='bn'+str(stage)+block+'_branch'\n \n #retrieve the filters\n #F1,F2,F3=filters\n \n X_shortcut=X\n \n #first component of main path\n X=Conv3D(self.filter, (3,3,1),kernel_initializer=self.init,kernel_regularizer=self.regularizer,padding='same')(X)\n X=BatchNormalization(axis=3)(X)\n X=Activation('relu')(X)\n \n #second component of main path\n X=Conv3D(self.filter, (3,3,1),kernel_initializer=self.init,kernel_regularizer=self.regularizer,padding='same')(X)\n\n \n #final step, add the shortcut\n X=Add()([X,X_shortcut])\n X=Activation('relu')(X)\n \n \n return X", "def construct(self, x):\n x = self.conv(x)\n l0 = self.down_conv1(x)\n h0 = self.down_conv2(l0)\n l1 = self.down_conv3(h0 - x)\n return l1 + l0", "def __init__(self, in_channels=3, n_classes=21):\n super(SegNet, self).__init__()\n\n self.layer_1 = SegnetLayer_Encoder(in_channels, 64, 2)\n self.layer_2 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_3 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_4 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_5 = SegnetLayer_Encoder(512, 512, 3)\n\n self.layer_6 = SegnetLayer_Decoder(512, 512, 3)\n self.layer_7 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_8 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_9 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_10 = SegnetLayer_Decoder(64, n_classes, 2)", "def construct(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n c1 = self.maxpool(x)\n\n c2 = self.layer1(c1)\n identity = c2\n if not self.weights_update:\n identity = F.stop_gradient(c2)\n c3 = self.layer2(identity)\n c4 = self.layer3(c3)\n c5 = self.layer4(c4)\n\n return identity, c3, c4, c5", "def conv_block(input, filters, phase=phase):\r\n\r\n conv_block = tf.layers.conv2d(\r\n inputs=input,\r\n filters=filters,\r\n kernel_size=3,\r\n padding=\"SAME\",\r\n activation=tf.nn.relu)\r\n\r\n #conv_block = tf.contrib.layers.batch_norm(\r\n # conv_block, \r\n # center=True, scale=True, \r\n # is_training=phase)\r\n\r\n #conv_block = tf.nn.leaky_relu(\r\n # features=conv_block,\r\n # alpha=0.2)\r\n #conv_block = tf.nn.relu(conv_block)\r\n\r\n return conv_block", "def ResBlock(input_tensor, filters):\n \n conv_1 = Conv2D(filters = filters, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal') \n conv_1a = conv_1(input_tensor) # Shared weights conv layer\n batch_1 = BatchNormalization()(conv_1a)\n relu_1 = Activation(\"relu\")(batch_1)\n drop_1 = Dropout(drop)(relu_1)\n conv_1b = conv_1(drop_1) # Shared weights conv layer\n batch_1 = BatchNormalization()(conv_1b)\n return batch_1", "def _conv_block_d(x, out_channel):\n x = tf.keras.layers.Conv2D(\n out_channel, kernel_size=3, strides=1, padding='same', use_bias=False)(\n x)\n x = tf.keras.layers.BatchNormalization(momentum=0.8)(x)\n x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)\n\n x = tf.keras.layers.Conv2D(\n out_channel, kernel_size=4, strides=2, padding='same', use_bias=False)(\n x)\n x = tf.keras.layers.BatchNormalization(momentum=0.8)(x)\n x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)\n return x", "def baseUNet(input_shape,conv_depth,n_classes,init_w,dropout):\n inputs = Input(input_shape)\n\n c1=Conv2D(conv_depth,\n (3,3),\n activation='relu',\n padding='same',\n kernel_initializer=init_w)(inputs)\n\n c1=Conv2D(conv_depth,\n (3,3),\n activation='relu',\n padding=\"same\",\n kernel_initializer=init_w)(c1)\n\n # pool down to next layer\n pool1 = MaxPooling2D((2,2),strides = (2,2))(c1)\n\n conv_depth *= 2\n\n # convolute down again\n conv2 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool1)\n\n conv2 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv2)\n \n # pool down again\n pool2 = MaxPooling2D((2,2),strides = (2,2))(conv2)\n\n conv_depth *= 2 \n\n # Convolution\n conv3 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool2)\n\n conv3 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv3)\n \n # pool down\n pool3 = MaxPooling2D((2,2),strides = (2,2))(conv3)\n\n conv_depth *= 2 \n # Convolution\n conv4 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool3)\n\n conv4 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv4)\n \n # pool down \n pool4 = MaxPooling2D((2,2),strides = (2,2))(conv4)\n\n conv_depth *=2 \n\n # Convolution\n conv5 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool4)\n\n conv5 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv5)\n\n drop = Dropout(dropout)(conv5)\n\n conv_depth /= 2\n conv_depth = int(conv_depth) \n # do upsampling\n up1 = UpSampling2D(size = (2,2))(drop)\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up1)\n \n # add in skip info\n cat1 = concatenate([conv4,conv6],axis = 3)\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat1)\n\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv6)\n\n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up2 = UpSampling2D(size = (2,2))(conv6)\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up2)\n \n # add in skip info\n cat2 = concatenate([conv3,conv7],axis = 3)\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat2)\n\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv7)\n \n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up3 = UpSampling2D(size = (2,2))(conv7)\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size=(3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up3)\n \n # add in skip info\n cat3 = concatenate([conv2,conv8],axis = 3)\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat3)\n\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv8)\n \n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up4 = UpSampling2D(size = (2,2))(conv8)\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up4)\n \n # add in skip info\n cat4 = concatenate([c1,conv9],axis = 3)\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat4)\n\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv9)\n\n outputs = Conv2D(n_classes, 1, activation = 'softmax')(conv9)\n\n return outputs,inputs", "def identity_block(inputs, kernel_size, filters, stage, block,\n training):\n nb_filter1, nb_filter2, nb_filter3 = filters\n\n scope_name = 'conv'+str(stage)+block+'_branch'\n scope_name = 'bn'+str(stage)+block+'_branch'\n\n x = slim.conv2d(inputs,nb_filter1,[1,1],stride=[1,1],padding='VALID',activation_fn=None,\n trainable=training,scope=scope_name+'2a')\n x = slim.batch_norm(x,scope=scope_name+'2a',is_training=training)\n x = tf.nn.relu(x)\n\n\n x = slim.conv2d(x,nb_filter2,[kernel_size,kernel_size],stride=[1,1],padding='SAME',\n activation_fn=None,trainable=training,scope=scope_name+'2b')\n x = slim.batch_norm(x,scope=scope_name+'2b',is_training=training)\n x = tf.nn.relu(x)\n\n\n x = slim.conv2d(x,nb_filter3,[1,1],stride=[1,1],padding='VALID',\n activation_fn=None,trainable=training,scope=scope_name+'2c')\n x = slim.batch_norm(x,scope=scope_name+'2c',is_training=training)\n\n \n x = tf.add(x,inputs)\n x = tf.nn.relu(x,name='res'+str(stage)+block+\"_out\")\n return x" ]
[ "0.594262", "0.5939619", "0.59235996", "0.59181947", "0.5897077", "0.5897077", "0.587706", "0.587706", "0.58607095", "0.5825037", "0.58241177", "0.58117056", "0.57984984", "0.57984984", "0.57732284", "0.5751117", "0.570624", "0.568427", "0.5672911", "0.5664635", "0.5664635", "0.56595457", "0.5653298", "0.5630465", "0.5630259", "0.56150496", "0.56100726", "0.5574252", "0.5537112", "0.5512556", "0.5511769", "0.5507139", "0.5506787", "0.5495442", "0.54949397", "0.5487569", "0.5486907", "0.5473573", "0.5466588", "0.5466212", "0.54621667", "0.5453761", "0.5447732", "0.5427721", "0.54268634", "0.54242176", "0.54229957", "0.5420608", "0.5418447", "0.5409364", "0.5408405", "0.5406218", "0.54010785", "0.5384983", "0.5364774", "0.5360394", "0.53593236", "0.53429085", "0.5338419", "0.5331182", "0.5330854", "0.53244865", "0.53193647", "0.5312547", "0.53039294", "0.5302793", "0.52918077", "0.52904344", "0.52904344", "0.5287505", "0.5285256", "0.52848905", "0.5284523", "0.52828246", "0.5280927", "0.52721965", "0.5271037", "0.5270415", "0.52665365", "0.5263786", "0.52624303", "0.5262287", "0.5259172", "0.52578914", "0.52572864", "0.525295", "0.5246296", "0.5246102", "0.52439755", "0.5231819", "0.522567", "0.5224193", "0.5224008", "0.52145725", "0.52129024", "0.5207966", "0.5207489", "0.52050436", "0.5204633", "0.5203309" ]
0.5344787
57
This gets called when the source file was moved.
def updateFontPath(self, newFontPath): self.fontPath = newFontPath
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_moved(self, event):\n print(\"Moved\")\n time.sleep(5)\n self.moveFile(event.dest_path)", "def move_file(self, ctx):\n pass", "def on_moved(self, event):\n super(myEventHandler,self).on_moved(event)\n #moveto events from external folders have no src_path\n source = event.src_path\n dest = event.dest_path\n if event.is_directory:\n splitpath = split(source)\n splitdest = split(dest)\n if splitpath[1] == splitdest[1]:\n try:\n #where are we moving from\n pass\n #file = splitpath[1]\n #pathtoonedir = self.onedir.getonedirrectory()\n #oldpath = splitpath[0].replace(pathtoonedir ,\"\")\n #calculate new path\n #newpath = splitdest[0].replace(pathtoonedir ,\"\")\n #if oldpath is \"\":\n # oldpath = os.path.sep\n #self.onedir.movefile(file,newpath,oldpath)\n except OSError as e:\n print \"Error copying file! \" + e\n exit(1)\n else:\n #rename!!!!!!!!\n oldname = source\n newname = dest\n pathtoonedir = self.onedir.getonedirrectory()\n oldname = oldname.replace(pathtoonedir ,\"\")\n newname = newname.replace(pathtoonedir ,\"\")\n self.onedir.renamedirectory(oldname,newname)\n else:\n #if it comes from outside the folder structure\n if source is None:\n try:\n #use os.path.split to get file name and path\n splitpath = split(dest)\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n relpath = splitpath[0].replace(pathtoonedir ,\"\")\n self.onedir.sendfile(file, relpath)\n except OSError as e:\n print \"Error copying file! \" + e.strerror\n exit(1)\n except IOError as e:\n print \"IOerror creating file \" + e.strerror\n else:\n #file was moved!\n #check if name stays the same i.e. it's a move not a rename!\n splitpath = split(source)\n splitdest = split(dest)\n if splitpath[1] == splitdest[1]:\n try:\n #where are we moving from\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n oldpath = splitpath[0].replace(pathtoonedir ,\"\")\n #calculate new path\n newpath = splitdest[0].replace(pathtoonedir ,\"\")\n if oldpath is \"\":\n oldpath = os.path.sep\n self.onedir.movefile(file,newpath,oldpath)\n except OSError as e:\n print \"Error copying file! \" + e\n exit(1)\n else:\n #rename!!!!!!!!\n file = splitpath[1]\n newname = splitdest[1]\n pathtoonedir = self.onedir.getonedirrectory()\n path = splitpath[0].replace(pathtoonedir ,\"\")\n if path is \"\":\n path = os.path.sep\n else:\n path = path[1:]\n self.onedir.rename(file,path,newname)", "def source_finished(self):\n # Don't raise NotImplementedError, IDE complains\n raise RuntimeError(\"source_finished called on a regular plugin\")", "def move(self,fileName,destDir):\n self.unload(fileName)\n FileInfos.move(self,fileName,destDir)", "def on_modified(self, event):\n super(myEventHandler,self).on_modified(event)\n if event.is_directory:\n try:\n source = event.src_path\n dest = event.src_dest\n pathtoonedir = self.onedir.getonedirrectory()\n source = source.replace(pathtoonedir ,\"\")\n dest = dest.replace(pathtoonedir ,\"\")\n self.onedir.renamedirectory(source, dest)\n except Exception as e:\n print e\n exit(1)\n else:\n source = event.src_path\n try:\n #use os.path.split to get file name and path\n splitpath = split(source)\n file = splitpath[1]\n if file.startswith('.'):\n return\n pathtoonedir = self.onedir.getonedirrectory()\n relpath = splitpath[0].replace(pathtoonedir ,\"\")\n self.onedir.sendfile(file, relpath)\n except OSError as e:\n print \"Error copying file! \" + e.strerror\n exit(1)\n except IOError as e:\n print \"IOerror creating file \" + e.strerror\n exit(1)", "def on_moved(self, event):\n\n # build the relative source and destination paths\n source_path = event.src_path.replace(self.root_path, \".\")\n destination_path = event.dest_path.replace(self.root_path, '.')\n is_directory = event.is_directory\n\n # propagate the moved event if server connection is established\n if self.protocol.connected:\n self.protocol.send_move_event(is_directory, source_path, destination_path)\n else:\n logging.info(\"Connection with server has not been established, changes will not be propagated.\")", "def on_created(self, event):\n print(\"Created\")\n time.sleep(5)\n self.moveFile(event.src_path)", "def on_file_changed(self, path):\n\t\tpass", "def act_move_file(self, file_source, file_target):\n try:\n if not os.path.isfile(file_source):\n return\n path = os.path.dirname(file_target)\n if not os.path.exists(path):\n os.makedirs(path)\n shutil.move(file_source, file_target)\n #shutil.copy2(file_source, file_target)\n #os.remove(file_source)\n self.logger.debug('%s: Action: <move> %s -> %s', self.name, file_source, file_target)\n except:\n self.logger.exception('Error on file move: %s -> %s', file_source, file_target)", "def moveFile(self, srcPath):\n # Gets the classification for the file type of the path moved\n classification = self.classifyFile(srcPath)\n\n if classification:\n # Gets the output path given the file type\n newPath = self.outPaths[classification][\"outPath\"] + srcPath.split(\"/\")[-1]\n\n # Execute instruction\n os.replace(srcPath, newPath)", "def mv(self, source: str, filename: str) -> None:\n\n self.cp(source, filename)\n self.rm(source)", "def pre_move_hook(self, from_module, to_module):\n raise NotImplementedError()", "def handle_file(self, source_path, dest_path):\n raise NotImplemented", "def moveFile(source, dest):\n try:\n shutil.move(source, dest) \n except IOError as e:\n print (\"Unable to move file. %s\" %(e))", "def move(self,fileName,destDir):\n if not os.path.exists(destDir): \n os.makedirs(destDir)\n srcPath = os.path.join(self.dir,fileName)\n destPath = os.path.join(destDir,fileName)\n renameFile(srcPath,destPath)\n self.refresh()", "def process(self, source, dest):\n\n if os.path.isfile(dest):\n print(\"File %s exists -> aborting\" % dest)\n exit(1)\n print(dest)\n \n fin = open(source)\n fout = open(dest, 'w')\n for l in fin.readlines():\n l = l.replace(\"AUTHOR\", self.author)\n l = l.replace(\"DESCRIPTION\", self.description)\n l = l.replace(\"NAMESPACE\", self.namespace)\n l = l.replace(\"MyComponent\", self.className)\n l = l.replace(\"INCDIR\", self.hDir)\n l = l.replace(\"CXXDIR\", self.cxxDir)\n l = l.replace(\"YEAR\", str(self.now.year))\n l = l.replace(\"DATE\", \"%d %s %d\" % (self.now.day, self.now.strftime(\"%b\"), self.now.year))\n fout.write(l)\n fout.close()\n fin.close()", "def handleMove(self):\n pass", "def moveFile(sourceFullPath,targetDir):\n\n thisFunc = inspect.currentframe().f_code.co_name\n try:\n shutil.move(sourceFullPath,targetDir)\n return True\n except Exception as e:\n print(f\"{thisFunc} issue: {e}\")\n return False", "def on_modified(self, event):\n \n if not event.is_directory: \n\n file_name = os.path.basename(event.src_path)\n \n if file_name not in self.ignore_files:\n parent = os.path.dirname(event.src_path)\n file_id = list(filter(lambda f: f[\"name\"] == file_name, self.filesystem[parent][\"files\"]))[0][\"id\"]\n self.gapy.update_file(file_id, path=parent)\n self.gapy.logger.info(\"The file {} was modified, the content was updated\".format(file_name, parent))\n print(f\"\\nThe file {file_name} was modified and synchronized\")", "def _tell_source(self) -> int:\n raise NotImplementedError() # pragma: no cover", "def renamed(self, source, dest):\r\n self.__close_and_reload(source, new_filename=dest)", "def process_IN_MOVED_TO(self, event):\n self.ProcessFile(event.name)", "def movedir(self):\n pass", "def movefile(destpath,filename,sourcepath):\n\n\tcommand = 'mv ' + filename + ' ' + destpath\n\t\n\ttry :\n\t\tst = commands.getstatusoutput(command)\n\texcept Exception:\n\t\traise", "def test_verify_changed_source_file(self):\n # This test was made to pass in fixing Bug #1354880\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n\n # Edit source file\n with open('testfiles/various_file_types/executable', 'r+') as f:\n f.write('This changes a source file.')\n\n # Test verify for the file\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])", "def process(self, event):\n # the file will be processed there\n print event.src_path, event.event_type # print now only for degug\n\n for i in self.ignore:\n if i in event.src_path or os.path.isdir(event.src_path):\n print \"Ignoring...\"\n return\n\n mod_file = event.src_path.split(self.source)[1]\n for r in self.rules:\n mod_file = mod_file.replace(r[0], r[1])\n\n print \"Writing:\", (self.destination + mod_file)\n \n input_file = utils.readFile(event.src_path)\n\n file_type = mod_file.split(\".\")[-1]\n reverted = utils.revert( input_file, \"(*\", \"*)\" ) if file_type == \"thy\" else utils.revert( input_file, \"/*\", \"*/\" )\n \n if len( reverted ) == 0 and len( input_file ) != 0:\n print \"Something might be wrong??\"\n else: utils.writeFile( self.destination + mod_file, reverted )", "def move_back(self) -> None:\n if self._file_was_moved:\n os.rename(self._new_path, self._file_path)\n pass", "def touch_moved(self, touch):\n\t\tpass", "def move_file(source, destination):\n shutil.move(source, destination)", "def refresh_source(self):\n pass", "def on_deleted(self, event):\n super(myEventHandler,self).on_deleted(event)\n #print \"Removed: \" + event.src_path\n if self.onedir.cookies is None or not self.onedir.autosyncstatus():\n return\n\n source = event.src_path\n if event.is_directory:\n try:\n pathtoonedir = self.onedir.getonedirrectory()\n relpath = source.replace(pathtoonedir ,\"\")\n self.onedir.deldirectory(relpath)\n except Exception as e:\n print \"Error syncing directory\" + e\n exit(1)\n else:\n try:\n #use os.path.split to get file name and path\n splitpath = split(source)\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n #print pathtoonedir\n #print \"truncated path:\"\n relpath = splitpath[0].replace(pathtoonedir ,\"\")\n self.onedir.deletefile(file, relpath)\n except OSError as e:\n print \"Error copying file! \" + e.strerror\n exit(1)\n except IOError as e:\n print \"IOerror creating file \" + e.strerror\n exit(1)", "def record_move(self, original_filepath, new_filepath):\r\n self.record[new_filepath] = original_filepath\r\n logging.debug('Recorded move from {} to {}'.format(original_filepath, new_filepath))", "def process_IN_MOVED_FROM(self, event):", "def _rename_complete(self, src):\n if not src.path.endswith('_copied'):\n fname = op.basename(src.path)\n new_path = \"{0}_copied\".format(src.path)\n os.rename(src.path, new_path)\n # fix the path in the BIDSTree object also\n if isinstance(src, BIDSTree):\n src.path = new_path\n # also rename the branch in the filetree\n sid = self.master.file_treeview.sid_from_text(fname)\n self.master.file_treeview.item(sid[0],\n text=\"{0}_copied\".format(fname))\n # the hidden filepath value also needs to be updated\n new_vals = list(self.master.file_treeview.item(sid[0])['values'])\n new_vals[1] = new_path\n self.master.file_treeview.item(sid[0], values=new_vals)", "def move(self):\n pass", "def relocate(self, source, destination):\n destination_dir = os.path.dirname(destination)\n if not os.path.exists(destination_dir):\n self.subdir(destination_dir)\n os.rename(source, destination)", "def _copy_file ( self, source, dest ):\n return", "def do_stage(self, mirror_only=False):\n super().do_stage(mirror_only)\n stsrc = self.stage.source_path\n srcpath = os.path.join( stsrc, self.build_directory )\n ppath = ancestor (srcpath)\n shutil.move(stsrc, stsrc+\"_old\")\n mkdirp(ppath)\n shutil.move(stsrc+\"_old\",srcpath)", "def move_file(source, destination):\n #source = client_variables.output_folder\n #destination = client_variables.client_folder\n copyfiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for copyfile in copyfiles:\n if copyfile.endswith(ext):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)\n elif copyfile.startswith('GetTotalByYearReport'):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)", "def touched_files(self, parent):", "def move(self, dst, src): # pragma: no cover\n raise NotImplementedError(\"Implement this\")", "def _rename_ondisk(self):\n if not self.has_moved or not self.renames_remaining:\n return\n\n try:\n os.rename(self.rename_phase_src, self.rename_phase_dst)\n except Exception:\n sys.stderr.write(\"Failed to renamed '%s' to '%s'\\n\" %\n (self.rename_phase_src,\n self.rename_phase_dst))\n raise\n\n self._rename_phase += 1", "def setSourceFile(filename):", "def do_source(self, line):\n\n if self.root_directory:\n self.source_file = self.root_directory + \"/\" + line\n self.do_check_file(self.source_file)\n else:\n self.source_file = line\n self.do_check_file(self.source_file)", "def __post_init__(self, *args, **kwargs) -> None:\n super().__post_init__(*args, **kwargs)\n\n self._path = Path(source_file).resolve()\n\n super().__init__(length, length)\n\n if not self._path.exists():\n raise ConfigFileError(\n f\"The source file {self.path} does not exist...\")", "def move_to_complete(metadata: Metadata):\n\n func = f\"{__name__}.move_to_complete\"\n\n metadata_updated = get_destination(metadata)\n moved = move(metadata[\"full_clipname\"], metadata_updated[\"destination\"])\n metadata_updated[\"destination\"] = moved\n\n post_event(\n \"log_info\",\n f\"{func}\",\n f\"The file was moved from: {metadata_updated['full_clipname']}\",\n )\n post_event(\n \"log_info\",\n f\"{func}\",\n f\"The file was moved to: {metadata_updated['destination']}\",\n )\n\n return metadata_updated", "def delete_original( self ):\n try:\n os.remove( self.PATH_TO_SOURCE_FILE )\n copy_check = utility_code.checkFileExistence( self.PATH_TO_SOURCE_FILE ) # should not exist\n if copy_check == 'exists':\n message = 'deletion of original file at ```%s``` failed, as determined by copy_check' % self.PATH_TO_SOURCE_FILE\n log.error( message )\n sys.exit( message )\n else:\n log.info( 'deletion successful of original file at ```%s```' % self.PATH_TO_SOURCE_FILE )\n except Exception, e:\n message = 'deletion of original file at ```%s``` failed; exception, `%s`' % ( self.PATH_TO_SOURCE_FILE, unicode(repr(e)) )\n log.error( message )\n sys.exit( message )\n return", "def move(self, direction):\n # replace with your code\n pass", "def move(self, direction):\n # replace with your code\n pass", "def move_from_temp_directory(self):", "def _onmove(self, event):", "def process(self, event):\n # the file will be processed there\n print event.src_path, event.event_type\n\n if os.path.isfile(\"/Users/filename.zip\") == True:\n os.remove(\"/Users/filename.zip\")\n print (\"existing file is removed \")\n shutil.make_archive(\"directory\", \"zip\", \"/Users/directory/\")\n print (\"delete existing zip file and created a new zip file\")\n else:\n print (\"There is no zip file at the moment\")\n shutil.make_archive(\"directory\",\"zip\", \"/Users/directory\")\n print (\" A new zip file is created now \")", "def process(self, event):\n # the file will be processed there\n print (event.src_path, event.event_type) # print now only for degug", "def reloadfile(self, ):\n self.loadfile()", "def move(self):\n raise NotImplementedError", "def copy_source(self, filename, new_filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n new_file_path = os.path.join(self.storage_path, new_filename)\n shutil.copyfile(file_path, new_file_path)", "def test_verify_changed_source_file_adjust_mtime(self):\n\n # Get the atime and mtime of the file\n file_info = os.stat('testfiles/various_file_types/executable')\n\n # Set the atime and mtime of the file to the time that we collected, as on some systems\n # the times from a stat call don't match what a utime will set.\n os.utime('testfiles/various_file_types/executable', (file_info.st_atime, file_info.st_mtime))\n\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n\n # Edit source file\n with open('testfiles/various_file_types/executable', 'r+') as f:\n f.write('This changes a source file.')\n\n # Set the atime and mtime for the file back to what it was prior to the edit\n os.utime('testfiles/various_file_types/executable', (file_info.st_atime, file_info.st_mtime))\n\n # Test verify for the file\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])", "def replace(self):\n if self.success is False:\n raise TaskError('not ready')\n try:\n temp_src = '/tmp/' + str(random.randint(10000, 99999)) + '.mp3'\n os.move(self.source, temp_src)\n os.move(self.target, self.source)\n os.unlink(temp_src)\n except OSError as e:\n print(e)", "def change_block(self, move):\n fpath = os.path.split(os.path.abspath(self.source_path))[0] # current directory\n fname = os.path.split(os.path.abspath(self.source_path))[1] # current file\n pre, _ = fname.split('_B')\n flist = glob.glob(fpath + '/' + pre + '_B*.nwb')\n curr_ind = flist.index(self.source_path)\n if curr_ind + move == len(flist):\n new_file = flist[0]\n else:\n new_file = flist[curr_ind + move]\n self.open_another_file(filename=new_file)", "def move_to(self, path: str) -> None:\n self._new_path = os.path.join(path, self.annot_type, os.path.basename(self._file_path))\n os.rename(self._file_path, self._new_path)\n self._file_was_moved = True", "def on_modified(self, event):\n path = Path(event.src_path)\n if path.is_file() and path.suffix == '.json':\n self.load_configuration(path)\n self.hook(self.configuration)", "def addName(src):\n global dicttomove\n global finaldicttomove\n global listtomove\n global filesplitter\n listp = src.split(filesplitter)\n oldName = listp[len(listp)-1]\n\n #request a name change\n extlist = oldName.split('.')\n ext = extlist[len(extlist)-1]\n print(\"type 'x' to skip moving this file\")\n print(\"type 'xall' to skip all future files and move whatever you renamed so far\")\n print(\"type 'q' to quit and move nothing\")\n print(\"NO SPACES ALLOWED IN NAME PROVIDED\")\n finalName = input(\"curr name: \"+oldName+\" <typeName>.\"+ext+\" :\")\n if(finalName == 'x'):\n i = -1\n for p in range(0, len(listtomove)-1):\n if(listtomove[p] == src):\n i = p\n listtomove.pop(i)\n elif(finalName == 'q'):\n sys.exit(2)\n elif(finalName == \"xall\"):\n print(\"Skipping the rest of the source files ....\")\n return True\n else:\n finalName = finalName+'.'+ext\n i = -1\n for p in range(0, len(listtomove)-1):\n if(listtomove[p] == src):\n i = p\n finaldicttomove[src] = os.path.join(destfolder,finalName)\n return False", "def set_source_file(self, source_file):\n self.set_attribute(\"source_file\", source_file)", "def hmove(src_path, res_path):\n os.rename(src_path, res_path)", "def process(self):\n first_line = self.setup[\"first_line\"]\n last_line = self.setup[\"last_line\"]\n\n self.logger.info(\"Using lines %s - %s\", first_line, last_line)\n\n path_temp = \"{}_\".format(self.path)\n\n with open(self.path, \"r\") as src, open(path_temp, \"w\") as dest:\n lines = src.r..\n copy_lines = lines[first_line-1:last_line]\n dest.write(\"\".join(copy_lines))\n\n os.rename(path_temp, self.path)", "def move(self, **kwargs):\n if os.path.exists(self.old_artifact_path):\n if os.path.exists(self.target):\n shutil.rmtree(self.target)\n log.info(\"Copying %s on the local filesystem\" % self.type)\n shutil.copytree(self.old_artifact_path, self.target)\n else:\n log.warning(\"Not moving docs, because the build dir is unknown.\")", "def update_source(self):\n if self.verbose:\n print(\"Updating source\")\n self.source.data = self.source_data\n if self.source.selected is not None:\n self.source.selected.indices = self.selection\n for c in self.callbacks[\"update_source\"]:\n c()\n self.pending_update = False\n if self.update_buffer is not None:\n self.context.doc.add_next_tick_callback(self.update_buffer)\n self.update_buffer = None", "def update(src):", "def edited_file_locations(self):", "def on_created(self, event):\n super(myEventHandler,self).on_created(event)\n #not syncing empty directories serverside atm\n if self.onedir.cookies is None or not self.onedir.autosyncstatus():\n return\n source = event.src_path\n if event.is_directory:\n try:\n pathtoonedir = self.onedir.getonedirrectory()\n relpath = source.replace(pathtoonedir ,\"\")\n self.onedir.senddirectory(relpath)\n except Exception as e:\n print \"Error syncing directory\" + e\n exit(1)\n else:\n source = event.src_path\n try:\n #use os.path.split to get file name and path\n splitpath = split(source)\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n relpath = splitpath[0].replace(pathtoonedir ,\"\")\n self.onedir.sendfile(file, relpath)\n except OSError as e:\n print \"Error copying file! \" + e.strerror\n exit(1)\n except IOError as e:\n print \"IOerror creating file \" + e.strerror\n exit(1)", "def target_test_file_source_content():\n return 'changed'", "def process(self, source_path: pathlib.Path) -> bool:", "def CopyFileTo(self, filename): # real signature unknown; restored from __doc__\n pass", "def startFile(self, newFileName):\n \n pass", "def file_move(session, dc_ref, src_file, dst_file):\n LOG.debug(\"Moving file from %(src)s to %(dst)s.\",\n {'src': src_file, 'dst': dst_file})\n vim = session._get_vim()\n move_task = session._call_method(\n session._get_vim(),\n \"MoveDatastoreFile_Task\",\n vim.get_service_content().fileManager,\n sourceName=src_file,\n sourceDatacenter=dc_ref,\n destinationName=dst_file,\n destinationDatacenter=dc_ref)\n session._wait_for_task(move_task)\n LOG.debug(\"File moved\")", "def rename(self, target):\r\n py.process.cmdexec(\"svn move --force %s %s\" %(str(self), str(target)))", "def post_move_class_propagation(token_stream, parse_tree, args):\n has_import = False\n has_exact_import = False\n\n file_to_check = open(file=args.file, mode='r')\n for line in file_to_check.readlines():\n text_line = line.replace('\\n', '').replace('\\r', '').strip()\n if (text_line.startswith('import') and text_line.endswith(source_package + '.' + class_identifier + ';')) \\\n or (text_line.startswith('import') and text_line.endswith(source_package + '.*;')):\n has_import = True\n break\n if (text_line.startswith('import') and text_line.endswith(target_package + '.' + class_identifier + ';')) \\\n or (text_line.startswith('import') and text_line.endswith(target_package + '.*;')):\n has_exact_import = True\n break\n\n if not has_exact_import:\n print(f\"Start checking file \\\"{file_to_check.name}\\\" *** {file_counter}/100\")\n\n replace_dependent_object_listener = ReplaceDependentObjectsListener(\n common_token_stream=token_stream, source_package=source_package, target_package=target_package,\n class_identifier=class_identifier, filename=args.file, has_import=has_import\n )\n walker = ParseTreeWalker()\n walker.walk(t=parse_tree, listener=replace_dependent_object_listener)\n\n with open(args.file, mode='w', newline='') as f:\n f.write(replace_dependent_object_listener.token_stream_rewriter.getDefaultText().replace(\"\\r\", \"\"))\n\n print(f\"Finish checking file \\\"{file_to_check.name}\\\" *** {file_counter}/100\")", "def transcode(self, src: Path, dest: Path) -> None:\n pass", "def on_deleted(self, event):\n\t\tfileInfo = event.src_path.split('/')\n\n\t\tprint fileInfo[2] + ' deleted from ' + fileInfo[1]", "def move(self, newPath):\n\t\tif self.hasUdim:\n\t\t\tfor a in self.udimPaths:\n\t\t\t\ta.move(newPath)\n\t\telse:\n\t\t\tsuper( textureFile, self ).move( newPath )", "def testMoveAndCopyFile(self):\n try:\n remoteLocator = self.__pathPdbxDictionaryFile\n fn = self.__fileU.getFileName(remoteLocator)\n # _, fn = os.path.split(remoteLocator)\n lPath = os.path.join(self.__workPath, fn)\n ok = self.__fileU.get(remoteLocator, lPath)\n self.assertTrue(ok)\n # Test copy file\n dPath2 = os.path.join(self.__workPath, \"tdir\")\n ok = self.__fileU.mkdir(dPath2)\n self.assertTrue(ok)\n lPath2 = os.path.join(dPath2, fn)\n ok = self.__fileU.put(lPath, lPath2)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath2)\n self.assertTrue(ok)\n # Remove copied file (to test moving file next)\n ok = self.__fileU.remove(lPath2)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath2)\n self.assertFalse(ok)\n # Test move file\n ok = self.__fileU.replace(lPath, lPath2)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertFalse(ok)\n ok = self.__fileU.exists(lPath2)\n self.assertTrue(ok)\n # Now clean up files and dirs\n ok = self.__fileU.remove(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.remove(dPath2)\n self.assertTrue(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def my_rename(self, src, dst):\n self.renamerCalled = True", "def move(self, name, source, dest):\n self.m.path.assert_absolute(source)\n self.m.path.assert_absolute(dest)\n self._run(name, ['move', source, dest])\n self.m.path.mock_copy_paths(source, dest)\n self.m.path.mock_remove_paths(source)", "def move_file(self, path: PathLike, dest: PathLike, force: bool = False):", "def parse_source_file(self, filepath):\n raise NotImplementedError('Not Implemented')", "def doEvent(self, source):\n pass", "def on_deleted(self, event):\n\n file_name = os.path.basename(event.src_path)\n self.gapy.logger.warn(\"The file {} was deleted from local system\".format(file_name))\n print(f\"\\n\\033[91m The file {file_name} was deleted from local system \\033[0m\")\n choice = input(\"\\nDo you want to remove the file from Google Drive? [y/n]: \")\n \n if re.match(\"[yY]\", choice):\n file_name = os.path.basename(event.src_path)\n parent = os.path.dirname(event.src_path)\n file_id = list(filter(lambda f: f[\"name\"] == file_name, self.filesystem[parent][\"files\"]))[0][\"id\"]\n self.gapy.delete_file(file_id)\n self.remove_from_filesystem(event.src_path)\n self.gapy.logger.warning(\"The file {} was removed from Google Drive\".format(file_name))\n print(f\"\\n\\033[91m The file {file_name} was also removed from Google Drive \\033[0m\")\n\n else:\n self.remove_from_filesystem(event.src_path)\n self.gapy.logger.warning(\"\\nThe file {} is not removed from Google Drive\".format(os.path.basename(event.src_path)))\n \n self.update_fs()", "def classify(source_name):\n maindir = os.path.dirname(__file__)\n subdir = os.path.join(maindir, source_name)\n if not os.path.exists(subdir):\n os.makedirs(subdir)\n #for fits_file in glob.glob('*.fits')\n for fits_file in glob.glob('*.fits'):\n fits_content = fits.open(fits_file)\n try:\n if fits_content[0].header['targname'] == source_name:\n fits_content.close()\n new_name = os.path.join(subdir, fits_file)\n os.rename(fits_file, new_name)\n print 'moved file {0}'.format(fits_file)\n except KeyError:\n pass\n finally:\n fits_content.close()", "def on_dir_change(self, event):\r\n\r\n if self.dir_change_callback is not None:\r\n self.dir_change_callback(event)\r\n event.Skip()", "def move_callback(self, old_chain, old_key, new_chain, new_key):\n src = os.path.join(self._to_path(old_chain), old_key)\n dst = os.path.join(self._to_path(new_chain), new_key)\n shutil.move(src, dst)", "def process_file(file_name):\n pass # delete this line and replace with your code here", "def copy_file_check(self):\n pass", "def move_to(self, file_name, to_dir, change_name_to=None):\n raise NotImplementedError", "def _copyFile(self, source, dstDir):\n dstFile = os.path.join(dstDir, os.path.basename(source))\n touch = \"/usr/bin/touch\" if OSUtilities.isMacOS() else \"/bin/touch\"\n subprocess.call([touch, dstFile])\n subprocess.call([\"/bin/cp\", source, dstDir])\n self._logger.info(\"Copying file \" + source + \" to \" + dstDir)\n self._numCopiedFiles += 1", "def move_file(self, from_path: str, to_path: str, force: bool = False) -> Dict:\n raise NotImplementedError", "def copy_from(self, file_name, from_dir):\n raise NotImplementedError", "def move(self, dest_fqpath):\n ret = move_file(self._host, self._fqpath, dest_fqpath)\n\n if ret:\n # TODO: change this to use a setter/getter for heavy lifting once\n # and can reset everything from one place\n self._previous_fqpath = self._fqpath\n self._fqpath = dest_fqpath\n\n return True\n\n return False", "def getSource():", "def _process_file_movement(src:str, dest:str, is_move=False)->bool:\n debug_str = \"move\" if (is_move) else \"copy\"\n \n objects = _list_objects(src) # list objects\n for obj in objects:\n if _is_dir(dest) or _is_dir(src):\n temp_dest = _append_object(dest, _get_dest_obj_name(src, obj))\n else:\n temp_dest = dest\n \n if _is_s3(src) and _is_s3(dest): #s3 to s3\n src_bucket, _ = _extract_bucket_key(src)\n dest_bucket, dest_key = _extract_bucket_key(temp_dest)\n print(f\"{debug_str} file s3://{src_bucket}/{obj} to {temp_dest}\")\n status = _copy_s3_to_s3(src_bucket, obj, dest_bucket, dest_key)\n if status and is_move:\n aws_s3_rm(f\"s3://{src_bucket}/{obj}\")\n elif _is_s3(src): # s3 to local\n src_bucket, _ = _extract_bucket_key(src)\n _create_local_dir(temp_dest) # create dir if doesn't exist\n print(f\"{debug_str} file s3://{src_bucket}/{obj} to {temp_dest}\")\n status = _copy_s3_to_local(src_bucket, obj, temp_dest)\n if status and is_move:\n aws_s3_rm(f\"s3://{src_bucket}/{obj}\")\n elif _is_s3(dest): # local to s3\n dest_bucket, dest_key = _extract_bucket_key(temp_dest)\n print(f\"{debug_str} file {obj} to {temp_dest}\")\n status = _copy_local_to_s3(obj, dest_bucket, dest_key)\n if status and is_move:\n os.remove(obj) \n \n if not status:\n raise Error(f\"S3 {debug_str} failed.\")\n return True", "def process_IN_MOVE_SELF(self, event):" ]
[ "0.7210631", "0.6951233", "0.68050367", "0.67851174", "0.6536601", "0.646156", "0.6355364", "0.63365686", "0.6311357", "0.6226218", "0.6219465", "0.61488163", "0.6109679", "0.6086167", "0.60812443", "0.60367584", "0.60130316", "0.60042614", "0.597683", "0.5955222", "0.5948722", "0.593198", "0.58642554", "0.5855672", "0.5852726", "0.58465827", "0.58456445", "0.5821817", "0.58145124", "0.5795147", "0.5738841", "0.5728433", "0.5715005", "0.5706316", "0.56958145", "0.5695712", "0.56953275", "0.5644726", "0.5620284", "0.5602683", "0.55697554", "0.55528367", "0.5522663", "0.54781485", "0.54705215", "0.5445233", "0.54435223", "0.54250604", "0.54138386", "0.54138386", "0.54136276", "0.5413263", "0.5408739", "0.5407876", "0.5403765", "0.5389233", "0.53751075", "0.53604496", "0.53553635", "0.53536344", "0.5343889", "0.5341188", "0.53376824", "0.5331757", "0.53149444", "0.53106654", "0.5308208", "0.5294342", "0.52916914", "0.52907217", "0.52892363", "0.52823305", "0.5280924", "0.5251935", "0.52484083", "0.5240589", "0.5236889", "0.5229119", "0.522655", "0.5222699", "0.51999426", "0.5199164", "0.51883703", "0.51861674", "0.51848936", "0.5184526", "0.51838213", "0.51822275", "0.51763976", "0.51724124", "0.51529455", "0.51507425", "0.5144185", "0.51419544", "0.5134443", "0.51330537", "0.5124069", "0.51041675", "0.5103965", "0.5097866", "0.5097743" ]
0.0
-1
Subclasses may override this to return a list of external files, that clients can observe for changes.
def getExternalFiles(self): return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_files(self):\r\n return self._filelist", "def getFiles(self):\n\t\treturn os.listdir(self.getPath())", "def listFiles(self):\n pass", "def get_files(self):\n return self.ebook_file.get_files()", "def files(self):\r\n return self._files", "def files(self):\n try:\n return glob.glob(self.path)\n except (AttributeError, TypeError):\n try:\n return glob.glob(self.alias)\n except (AttributeError, TypeError):\n return []", "def files(self):\n return self._files", "def files(self):\r\n return files.Files(self)", "def get_files(self) -> tp.Iterable[str]:\n return os.listdir(self.path)", "def get_filenames(self):\n return self.filenames", "def get_file_list(self):\n try:\n for filename in os.listdir(SHARED_DIR):\n self.file_list.append(filename)\n except Exception as e:\n print \"Error: retriving file list, %s\" % e", "def files(self):\n return self._changeset.get('files', [])", "def external_archives(self):\n return self._external_archives", "def getFiles(self):\n return self.model.getFiles()", "def get_files(self) -> list:\n files = []\n for file in os.listdir(self.root):\n if file.endswith(f\".{self.suffix}\"):\n files.append(os.path.join(self.root, file))\n return files", "def list_files(self):\n re_css = re.compile(r'\\.css$')\n re_js = re.compile(r'\\.js$')\n re_adminlte2 = re.compile(r'adminlte2')\n file_list = []\n print \"static path is %s\" % self.static_path\n for dirpath, _, files in os.walk(self.static_path):\n if not re_adminlte2.search(dirpath):\n for name in files:\n if re_css.search(name) or re_js.search(name):\n file_list.append(os.path.join(dirpath, name))\n return file_list", "def files(self):\n return self._files.items()", "def GetFileNames(self):\n return self.files", "def get(self):\n return self._files", "def get_files(self):\n return self._files.values()", "def files(self):\r\n url = '{0}/files'.format(self.get_url())\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def extract_files(self) -> list:\n pass", "def filenames(self):\n return self._filenames", "def get_config_files(self):\n flag, i = self.inotify\n\n if flag:\n kwargs = {}\n\n if PY3:\n kwargs['timeout_s'] = 0\n\n filenames = set()\n\n for event in i.event_gen(**kwargs):\n if event is None:\n break\n\n filenames.add(event[3])\n\n return list(filenames)\n\n else:\n return os.listdir(self.watch)", "def get_all_files(self):\n dp = FileSystemDataProvider.FileSystemDataProvider(self.folder)\n filenames = dp.getFileNames()\n htmlOut = \"available files:\"+\", \".join(filenames)\n return htmlOut", "def filenames(self):\n pass", "def getAllFiles(self):\n\n\t\treturn self.getFilesForDirs([])", "def get_files(self):\n return self._get_brains(\"File\")", "def all_changed_files(self):\n return [path_to_file_type(os.path.join(self.path, p)) for p in self.changed_paths() if p]", "def get_included_files(self):\n return self._includedfiles", "def getGlobusFiles(self):\n\t\treturn self.transfer_client.operation_ls(self.transfer_client.endpoint_search(DATA_ENDPOINT_NAME)[0]['name'])", "def all_files(self) -> List[IdentifiedFile]:\n return [self.main_file, *self.labware_files, *self.data_files]", "def make_files(self):\n return []", "def list_output_files(self):\r\n fname = self.__get_output_filename()\r\n return [fname] if fname else []", "def contents(self):\n entries = []\n walk = next(os.walk(self.path))\n entries.extend(LocalFolder(os.path.join(walk[0], f)) for f in walk[1])\n entries.extend(LocalFile(os.path.join(walk[0], f)) for f in walk[2])\n return entries", "def files(self):\n files = [self.submission]\n if self.kind == 'script':\n files.append(self.exec_script)\n if self.kind == 'function':\n files.append(self.function)\n return files", "def included_files(self) -> Iterable[str]:\n return self._incl_files", "def listen_files_list(self, directory):\r\n files = [f for f in os.listdir(directory) if\r\n f[-len(self.fileExt):] == self.fileExt]\r\n return files", "def filelist(self):\n msg = \"Collection of (str) file paths to mock\"\n raise NotImplementedError(msg)", "def get_files_paths(self):\n return self.__files_paths", "def files(self):\n if self._files is None:\n if helpers['isoinfo']: # TODO\n # It's safe to specify -R even for non-rockridge ISOs\n args = [\"-i\", self.path, \"-f\", \"-R\"]\n # At this time we don't support Joliet extensions\n output = helpers['isoinfo'].call(args)\n result = []\n for line in output.split(\"\\n\"):\n # discard non-file output lines\n if not line or line[0] != \"/\":\n continue\n # Non-Rock-Ridge filenames look like this in isoinfo:\n # /IOSXR_CONFIG.TXT;1\n # but the actual filename thus is:\n # /iosxr_config.txt\n if self.disk_subformat != \"rockridge\" and \";1\" in line:\n line = line.lower()[:-2]\n # Strip the leading '/'\n result.append(line[1:])\n self._files = result\n return self._files", "def list_files(self):\n ret = []\n for fname in self.files:\n ret.append('filename: %s\\t replica locations: %s' %\n (fname, ','.join(self.files[fname])))\n return ret", "def get_files(self):\n\n if str(self.files) == \"unset\": return []\n if len(self.files) > 0: return self.files[:int(Settings.get_upload_max())]\n if len(Settings.get_input_as_files()) > 0:\n self.files = Settings.get_input_as_files()\n return self.files\n # prompt skip\n if not Settings.is_prompt() and Settings.get_category() == None:\n self.files = \"unset\"\n return []\n files = []\n if len(self.files) == 0:\n files = File.select_file_upload_method()\n if str(files[0]) == \"unset\" or str(files) == \"unset\":\n self.files = \"unset\"\n files = []\n if Settings.is_prompt(): return []\n if files == None: files = []\n # get files from appropriate source's menu selection\n if Settings.get_source() == \"google\":\n googleFiles = Google_File.get_files()\n if len(files) == 0 and len(googleFiles) > 0:\n files = Google_File.select_files()\n elif len(files) == 0 and len(googleFiles) == 0:\n self.files = \"unset\"\n return []\n elif Settings.get_source() == \"remote\":\n remoteFiles = Remote.get_files()\n if len(remoteFiles) > 0:\n files = Remote.select_files()\n elif len(files) == 0 and len(remoteFiles) == 0:\n self.files = \"unset\"\n return []\n elif Settings.get_source() == \"local\":\n localFiles = File.get_files()\n if len(files) == 0 and len(localFiles) > 0:\n files = File.select_files()\n elif len(files) == 0 and len(localFiles) == 0:\n self.files = \"unset\"\n return []\n filed = []\n for file in files:\n # turn all folders into their files\n if isinstance(file, Folder) or isinstance(file, Google_Folder): filed.extend(file.get_files())\n else:\n # flag that the files include a performer\n if hasattr(file, \"performer\"):\n self.hasPerformers = True\n filed.append(file)\n self.files = filed[:int(Settings.get_upload_max())]\n return self.files", "def get_all_files(self):\n\t\tfiles_list = []\n\t\tfor path, subdirs, files in os.walk(self.root):\n\t\t for name in files:\n\t\t \tfiles_list.append(os.path.join(self.root, name))\n\t\treturn files_list[0:-1]", "def list_files(self):\n if self.remote:\n return self.remote.list_files()\n\n M.mset('DUZ',self.DUZ)\n M.mset('U', \"^\")\n if self.isProgrammer:\n M.mset('DUZ(0)', \"@\")\n rv = []\n s0 = \"0\"\n while s0 != \"\":\n s0, name = M.mexec(\n '''set s0=$order(^DIC(s0)) Q:s0'=+s0 I $D(^DIC(s0,0))&$D(^DIC(s0,0,\"GL\"))&$$VFILE^DILFD(s0) S s1=$P(^DIC(s0,0),U,1)''',\n M.INOUT(s0), M.INOUT(\"\"))\n if name:\n rv.append((name, s0))\n return rv", "def files(self):\r\n files = []\r\n for path in self.paths:\r\n if os.path.isdir(path):\r\n files.extend(glob.glob(os.path.join(path, f'*{self.ext}')))\r\n else:\r\n files.extend(glob.glob(path))\r\n return list(set(self.get_pattern(fname) for fname in files))", "def filepaths(self):\n pass", "def files():\n return get_cached(\"files.json\")", "def files(self) -> List[str]:\n return [packet.name for packet in self.packets.file_description.values()]", "def get_object_list(self, url):\n path = self.base_path / url\n return [\n os.fspath((Path(dirpath) / filename).relative_to(path))\n for dirpath, _, files in os.walk(path)\n for filename in files\n if filename != path\n ]", "def contents(self):\n # list_folder on \"/\" isn't supported for some reason.\n path = \"\" if self.path == \"/\" else self.path\n result = execute(pdbox.dbx.files_list_folder, path)\n entries = [get_remote(None, meta=e) for e in result.entries]\n\n # TODO: Verify that this works.\n while result.has_more:\n # As long as there are more pages to look through,\n # add their contents to the list of entries.\n more = execute(pdbox.dbx.files_list_folder_continue, result.cursor)\n entries.extend(get_remote(None, meta=e) for e in more)\n\n return entries", "def get_file_list(self, file_type='.pkl'):\n # Note (Somil): Since we moved from a string to a list convention for data directories, we are adding\n # additional code here to make sure it is backwards compatible.\n if isinstance(self.p.data_creation.data_dir, str):\n self.p.data_creation.data_dir = [self.p.data_creation.data_dir]\n \n file_list = []\n for i in range(len(self.p.data_creation.data_dir)):\n file_list.extend([os.path.join(self.p.data_creation.data_dir[i], f)\n for f in os.listdir(self.p.data_creation.data_dir[i]) if f.endswith(file_type)])\n return file_list", "def __get_files(self):\r\n \r\n files = []\r\n with requests.Session() as s:\r\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'}\r\n respons = s.get(self.__url, headers=headers).text\r\n soup = BeautifulSoup(respons, 'html.parser')\r\n data_files = [link.get('href') for link in soup.find_all('a', class_=\"btn-primary\")]\r\n for year in soup.find_all('td', class_=\"align-middle\"):\r\n regex = re.compile(r\"data/data-?gis({year}|\\-rok\\-{year})\\.zip\".format(year=year.text))\r\n if any((match := regex.match(link)) for link in data_files):\r\n files.append(match.group(0))\r\n else:\r\n files.append(data_files[-1])\r\n return files", "def get_imports() -> list[FileImporters]:\n g.ledger.changed()\n return g.ledger.ingest.import_data()", "def locations(self):\n return [part.file for part in self.iterParts() if part]", "def locations(self):\n return [part.file for part in self.iterParts() if part]", "def locations(self):\n return [part.file for part in self.iterParts() if part]", "def files(self) -> \"FileMetadataList\":\n from cognite.client.data_classes import FileMetadataList\n\n return self._retrieve_related_resources(FileMetadataList, self._cognite_client.files)", "def files(self):\n self._printer('\\tFiles Walk')\n for directory in self.directory:\n for path in os.listdir(directory):\n full_path = os.path.join(directory, path)\n if os.path.isfile(full_path):\n if not path.startswith('.'):\n self.filepaths.append(full_path)\n return self._get_filepaths()", "def retrieve_all_files(self):\n result = utilities.rscandir(\n self.folder(), ignore_dirs=[\".git\"])\n\n return result", "def get_all_js_files(self, root):\n res = []\n\n for fname in os.listdir(root):\n mo = re.match(r'(\\w+)\\.js$', fname)\n if mo:\n res.append({\n 'name': mo.group(1),\n 'src': file_contents(os.path.join(root, mo.group()))\n })\n\n return res", "def list_filenames(self):\n l = []\n for path, dirs, files in os.walk(self.archive_path):\n for file in files:\n l.append(os.path.relpath(os.path.join(path,file),self.archive_path))\n l.sort()\n return l", "def files(self):\n from office365.sharepoint.files.file_collection import FileCollection\n return self.properties.get(\"Files\",\n FileCollection(self.context, ResourcePath(\"Files\", self.resource_path)))", "def getOpenFilenames(self):\n filenames = []\n for editor in self.editors:\n fn = editor.getFileName()\n if fn is not None and fn not in filenames and os.path.exists(fn):\n # only return names of existing files\n filenames.append(fn)\n \n return filenames", "def get_file_handlers(self):\n return []", "def list_files():\n files = []\n for filename in os.listdir(UPLOAD_DIRECTORY):\n path = os.path.join(UPLOAD_DIRECTORY, filename)\n if os.path.isfile(path):\n files.append(filename)\n return jsonify(files)", "def gridfs_files(self):\n return self[\"files\"]", "def get_library_content(self):\n from glob import glob\n try:\n os.path.isdir(self.source)\n lst = glob(self.source + '/*')\n except TypeError:\n lst = self.source\n dircheck = True\n while dircheck is True:\n dircheck = False\n newlst = []\n for entry in lst:\n if os.path.isdir(entry):\n newlst.extend(glob(entry + '/*'))\n dircheck = True\n else:\n newlst.append(entry)\n lst = newlst\n return lst", "def Sources():\n return _sources", "def GetFilesForTool(self):\n raise NotImplementedError()", "def _get_parsed_files(self):\n\n parsed = []\n with Historical_ROAs_Parsed_Table() as t:\n for row in t.execute(f'SELECT * FROM {t.name}'):\n parsed.append(row['file'])\n return parsed", "def get_my_files():\n return [file for file in os.listdir(os.getcwd()) if os.path.isfile(file)]", "def remote_paths(self) -> list:\r\n results: list = []\r\n\r\n if self.imports_node is not None:\r\n results.extend([node.text for node in filter(is_import_node, self.imports_node)\r\n if startswith(node.text, self.remote_schemas, ignorecase=True)])\r\n\r\n if self.folders_node is not None:\r\n results.extend([node.text for node in filter(is_folder_node, self.folders_node)\r\n if startswith(node.text, self.remote_schemas, ignorecase=True)])\r\n\r\n return results", "def find_all_files(self):\n look4files = [ f for f in listdir(self.file_location) if isfile(join(self.file_location,f)) ]\n return look4files", "def find_data(self):\n data_list = []\n for root, dirs, files in os.walk(pathfinder.data_path()):\n for name in files:\n data_list.append(os.path.join(root, name))\n return data_list", "def get_file_list():\n wb = xw.Workbook.caller()\n path_input = xw.Range('Macro', 'FilePath').value\n l_file_path = glob.glob(path_input + '[!~]*.*')\n l_file_name = [l.split('/')[-1] for l in l_file_path]\n xw.Range('Macro', 'FileField').clear_contents()\n xw.Range('Macro', 'C_FilePath').options(transpose=True).value = l_file_path\n xw.Range('Macro', 'C_FileName').options(transpose=True).value = l_file_name\n xw.Sheet('Macro').activate()\n wb.macro('ShowMsg')(\"Choose DataType for all the listed files\")", "def all_files(self):\n return self.common_files + self.collapse_human_files + self.reseq_human_files + self.sirv_files", "def get_files_to_be_indexed(self):\n\t\tfiles = self.get_all_files()\n\t\tfiles_list = []\n\t\tfor name in files:\n\t\t\tif(name.split('.')[-1] in self.accepted_formats and os.stat(os.path.join(self.root, name)).st_size < 5000000):\n\t\t\t\tfiles_list.append(os.path.join(self.root, name))\n\t\treturn files_list[0:-1]", "def filenames(self):\n return self._files.keys()", "def filenames(self):\n return self._files.keys()", "def FindFile(self, fd, external=True):\n return_list = []\n for sub_store in self.GetChildrenByPriority(allow_external=external):\n found = sub_store.FindFile(fd)\n if found:\n if isinstance(found, list):\n return_list.extend(found)\n else:\n return_list.append(found)\n\n return return_list", "def _configFiles(self):\n import glob\n ret = [] \n for ext in self.configManager.extensions:\n ret.extend(\n glob.glob(f\"{self.pipelinesDir}/{self.pipeName}/*{ext}\"))\n return ret", "def files(self):\n files = []\n if self.package_type == 'package':\n file_data = dict([(k, self[k]) \\\n for k in ['size', 'sha1', 'sha256', 'md5sum']])\n file_data['name'] = self['filename'].split('/')[-1]\n files.append(file_data)\n else:\n for d in self['files']:\n file_data = d.copy()\n # Get checksum data as well...\n for key in ['sha1', 'sha256']:\n for data in self['checksums-' + key]:\n if file_data['name'] == data['name']:\n file_data[key] = data[key]\n files.append(file_data)\n return files", "def get_filelist(import_path, extension):\n filelist = []\n for root, dirs, files in os.walk(import_path):\n filelist += glob.glob(os.path.join(root, '*.' + extension))\n return filelist", "def _get_files(self):\n # pylint: disable=unused-variable\n for dirpath, __, filenames in os.walk(self.start_location):\n for file_ in filenames:\n if file_.endswith('.py'):\n yield \"{0}{1}\".format(dirpath, file_)", "def GetCheckedFiles(self):\n return self._model.get_checked_files()", "def get_files(self):\n m = []\n for post in self:\n m.append(post.FileName)\n return list(sorted(set(m), reverse=True))", "def __getFileList(self, path, filterRe):\n path = os.path.abspath(path)\n files = []\n for dirname, _, names in os.walk(path):\n files.extend([os.path.join(dirname, f)\n for f in names\n if re.match(filterRe, f)]\n )\n return files", "def list_appro_files(self, root_path, files):\n listed_files = [os.path.join(root_path, f) for f in os.listdir(root_path) \n if os.path.isfile(os.path.join(root_path, f))]\n if self.extensions:\n listed_files = [f for f in listed_files \n if f.split(\".\")[-1] in self.extensions]\n return listed_files", "def files_list(self):\n path = os.path.join(self.path_to_sentences)\n log.info(\"Listing files from directory: %s\", path)\n all_files = os.listdir(path)\n wav_files_only = [filename for filename in all_files if\n filename.lower().endswith('.wav')]\n return wav_files_only", "def get_urls(self):\r\n if self.mod.filename:\r\n return [x + self.mod.filename for x in self.mod.service.get_mirrors()]", "def collect_documents(self):\n documents = []\n ignored = []\n for path in self.paths:\n try:\n current_document = MAE_Document(path)\n except UnsupportedMIMETypeError as e:\n ignored.append(str(e))\n else:\n documents.append(current_document)\n if ignored:\n print \"Some files were ignored:\"\n for file in ignored:\n print \"\\t%s\" % file\n return documents", "def GetExpectationFilepaths(self) -> List[str]:\n raise NotImplementedError()", "def ls(self):\n files = self.drive.files().list().execute().get(\"files\", [])\n for f in files:\n print(f[\"name\"], f[\"mimeType\"])", "def _deleted_sources(self):\r\n # We compute the list lazily.\r\n if self._lazy_deleted_sources is None:\r\n with self.context.new_workunit('find-deleted-sources'):\r\n if os.path.exists(self._analysis_file):\r\n products = self._analysis_parser.parse_products_from_path(self._analysis_file)\r\n buildroot = get_buildroot()\r\n old_sources = products.keys() # Absolute paths.\r\n self._lazy_deleted_sources = [os.path.relpath(src, buildroot) for src in old_sources\r\n if not os.path.exists(src)]\r\n else:\r\n self._lazy_deleted_sources = []\r\n return self._lazy_deleted_sources", "def listoffiles(self, path):\n return os.listdir(path)", "def getcontent(self):\n filelist=[]\n if len(self.filelist) == 0:\n return \"empty directory\"\n else:\n for file in self.filelist:\n filelist.append(file)\n return filelist", "def retrieve_tracked_files(self):\n result = []\n\n for key in self.repo.index.entries.keys():\n\n result.append(os.path.join(self.repo.working_dir, key[0]))\n\n return result", "def public_files(self) -> Pattern:\n return self._parse_pattern(self.get(\"public_files\", None))", "def getLocalFiles(self):\r\n\r\n for dirpath, dirnames, filenames in os.walk(self.dlLocation):\r\n for name in filenames:\r\n currentPath = os.path.join(dirpath, name)\r\n currentPath = re.sub('^\\w:', '', currentPath)\r\n currentPath = re.sub(r\"\\\\\",'/', currentPath)\r\n self.localStore.append(currentPath)\r\n\r\n return self.localStore" ]
[ "0.76476604", "0.74927044", "0.7429381", "0.73926634", "0.7326719", "0.7309371", "0.7282806", "0.7274433", "0.722302", "0.7204074", "0.71247035", "0.7104432", "0.7064401", "0.7045729", "0.70131254", "0.7007956", "0.69954735", "0.6967799", "0.6939747", "0.69316167", "0.6928061", "0.6897135", "0.68945515", "0.68898344", "0.6883732", "0.68604004", "0.68532354", "0.6811624", "0.6776267", "0.6767703", "0.67261225", "0.67132074", "0.6680076", "0.6672662", "0.66510135", "0.663105", "0.66194373", "0.66178465", "0.661633", "0.6614445", "0.66084826", "0.66004187", "0.65853024", "0.6576041", "0.65751386", "0.65699065", "0.6565852", "0.6558728", "0.65573144", "0.6553814", "0.6551043", "0.65301913", "0.6521345", "0.65117776", "0.6453544", "0.6453544", "0.6453544", "0.64456224", "0.6438004", "0.64155334", "0.6412316", "0.6406541", "0.6398806", "0.6395699", "0.6394148", "0.6391158", "0.63835406", "0.63754284", "0.63723266", "0.6368055", "0.63663834", "0.6361962", "0.6355272", "0.6349447", "0.6348301", "0.6347026", "0.6343161", "0.6340527", "0.63357586", "0.63357586", "0.6323376", "0.6312922", "0.6309253", "0.63008744", "0.63007563", "0.6295925", "0.62899685", "0.6289733", "0.62884915", "0.6286052", "0.62857527", "0.6282551", "0.6278931", "0.6268762", "0.6264447", "0.6260605", "0.6248994", "0.62350434", "0.6234825", "0.622891" ]
0.9027203
0
`externalFilePath` is None or an external file. If it is None, the main source file was changed on disk, else the externalFilePath was changed. This method should return True if it can update itself, in which case font.load() will be called. If it returns False, the font will be discarded and rebuilt from scratch.
def canReloadWithChange(self, externalFilePath): return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fix_font(src_root, dst_root, file_path, is_hinted, save_unmodified):\n\n src_file = os.path.join(src_root, file_path)\n\n print('Font file: %s' % src_file)\n font = ttLib.TTFont(src_file)\n modified = False\n\n modified |= fix_revision(font)\n modified |= fix_fstype(font)\n modified |= fix_vendor_id(font)\n modified |= fix_name_table(font)\n modified |= fix_attachlist(font)\n modified |= fix_os2_unicoderange(font)\n # leave line gap for non-noto fonts alone, metrics are more constrained there\n if font_data.font_name(font).find('Noto') != -1:\n modified |= fix_linegap(font)\n\n tables_to_drop = TABLES_TO_DROP\n if not is_hinted:\n modified |= drop_hints(font)\n tables_to_drop += ['fpgm', 'prep', 'cvt']\n\n modified |= drop_tables(font, tables_to_drop)\n\n fixed_path = fix_path(file_path, is_hinted)\n if fixed_path != file_path:\n print('changed file_path from \"%s\" to \"%s\"' % (file_path, fixed_path))\n modified = True\n\n if not modified:\n print('No modification necessary')\n if modified or save_unmodified:\n # wait until we need it before we create the dest directory\n dst_file = os.path.join(dst_root, fixed_path)\n dst_dir = path.dirname(dst_file)\n if not path.isdir(dst_dir):\n os.makedirs(dst_dir)\n font.save(dst_file)\n print('Wrote %s' % dst_file)", "def file_should_be_processed(self, filepath):\n try:\n image.load(filepath)\n return True\n except:\n return False", "def setExternal(self):\n self.__external = True", "def checkFileDirty(self, fn):\n for editor in self.editors:\n if Utilities.samepath(fn, editor.getFileName()):\n break\n else:\n return True\n \n res = self.checkDirty(editor)\n return res", "def LoadFile(self, path):\n # Post notification that a file load is starting\n ed_msg.PostMessage(ed_msg.EDMSG_FILE_OPENING, path)\n self.file.SetPath(path)\n txt = self.file.Read()\n if txt is not None:\n if self.file.IsRawBytes() and not ebmlib.IsUnicode(txt):\n self.AddStyledText(txt)\n self.SetReadOnly(True) # Don't allow editing of raw bytes\n else:\n self.SetText(txt)\n else:\n self.file.SetPath('')\n return False\n\n if self.file.GetLastError() != 'None':\n # Return false if there was an encoding error and a fallback\n # was used. So the caller knows to check the error status\n return False\n else:\n return True", "def has_changed(self) -> bool:\n # TODO: Add in change logic here\n state = None\n if state != self._file_state:\n self._changed_flag = True\n self._file_state = state\n return self._changed_flag", "def _load_external(self, url):\n if url.startswith('//'):\n # then we have to rely on the base_url\n if self.base_url and 'https://' in self.base_url:\n url = 'https:' + url\n else:\n url = 'http:' + url\n\n if url.startswith('http://') or url.startswith('https://'):\n css_body = self._load_external_url(url)\n else:\n stylefile = url\n if not os.path.isabs(stylefile):\n stylefile = os.path.abspath(\n os.path.join(self.base_path or '', stylefile)\n )\n if os.path.exists(stylefile):\n with codecs.open(stylefile, encoding='utf-8') as f:\n css_body = f.read()\n elif self.base_url:\n url = urllib.parse.urljoin(self.base_url, url)\n return self._load_external(url)\n else:\n raise ValueError(\"Could not find external style: %s\" %\n stylefile)\n return css_body", "def HasFont(self):\r\n\r\n return self._font != wx.NullFont", "def askopenfont(self, *args, **kw):\n\n self.tk.tk_setPalette('#888888')\n save_update_step = self.update_step\n self.update_step = 0\n\n filename = tkinter.filedialog.askopenfilename(parent=self.tk)\n if filename:\n self.readwtf(filename)\n self.redraw_letters()\n self.update_step = save_update_step\n self.tk.tk_setPalette('#000000')", "def has_source_file( self ):\n return self._source_file is not None", "def updateFontPath(self, newFontPath):\n self.fontPath = newFontPath", "def change_exteditor(self):\r\n path, valid = QInputDialog.getText(self, self.tr('External editor'),\r\n self.tr('External editor executable path:'),\r\n QLineEdit.Normal,\r\n CONF.get(self.ID, 'external_editor/path'))\r\n if valid:\r\n CONF.set(self.ID, 'external_editor/path', unicode(path))", "def in_file(self):\n return self.on_disk and not self.in_cached_file", "def _check_or_apply_style(file_path, style_config, apply):\n # Ref: https://gist.github.com/oskopek/496c0d96c79fb6a13692657b39d7c709\n with open(file_path, \"r\") as f:\n notebook = nbformat.read(f, as_version=nbformat.NO_CONVERT)\n nbformat.validate(notebook)\n\n changed = False\n for cell in notebook.cells:\n if cell[\"cell_type\"] != \"code\":\n continue\n src = cell[\"source\"]\n lines = src.split(\"\\n\")\n if len(lines) <= 0 or \"# noqa\" in lines[0]:\n continue\n # yapf will puts a `\\n` at the end of each cell, and if this is the\n # only change, cell_changed is still False.\n formatted_src, cell_changed = yapf.yapflib.yapf_api.FormatCode(\n src, style_config=style_config)\n if formatted_src.endswith(\"\\n\"):\n formatted_src = formatted_src[:-1]\n if cell_changed:\n cell[\"source\"] = formatted_src\n changed = True\n\n if apply:\n with open(file_path, \"w\") as f:\n nbformat.write(notebook, f, version=nbformat.NO_CONVERT)\n\n return not changed", "def _check_if_cff_file_needs_rewriting(self, content):\n logger.info(\"Checking if we can re-use injection config file...\")\n if os.path.isfile(self.config_file_name) is False:\n logger.info(\"...no config file {} found.\".format(self.config_file_name))\n return True\n else:\n logger.info(\n \"...OK: config file {} already exists.\".format(self.config_file_name)\n )\n\n with open(self.config_file_name, \"r\") as f:\n file_content = f.read()\n if file_content == content:\n logger.info(\n \"...OK: file contents match, no update of {} required.\".format(\n self.config_file_name\n )\n )\n return False\n else:\n logger.info(\n \"...file contents unmatched, updating {}.\".format(\n self.config_file_name\n )\n )\n return True", "def external_use(self, external_use):\n if self.local_vars_configuration.client_side_validation and external_use is None: # noqa: E501\n raise ValueError(\"Invalid value for `external_use`, must not be `None`\") # noqa: E501\n\n self._external_use = external_use", "def update(file: Path, validate_assets: bool, external_url: str) -> None:\n\n mutate(file, validate_assets, external_url, upsert=False)", "def load_file(self, update=True): # type: (bool) -> None\n if os.path.isfile(self._file):\n logging.info('Cyra is reading your config from %s' % self._file)\n\n with open(self._file, 'r') as f:\n toml_str = f.read()\n self.load_toml(toml_str)\n else:\n self._modified = True\n\n # Write file if non existent or modified\n if update:\n self.save_file()", "def check_loader(self, dt):\n if EVENTS['FILE_PATH'] and EVENTS['CAN_WRITE']:\n self.editor.load_file(EVENTS['FILE_PATH'])\n EVENTS['CAN_WRITE'] = False", "def _before_reference_check(self, maya_file, client_data=None):\n\n if self.is_artella_path():\n self.validate_environment_for_callback('BeforeReferenceCheck')\n\n raw_full_name = maya_file.rawFullName()\n if not dccplugin.DccPlugin().is_path_translated(\n raw_full_name) and dccplugin.DccPlugin().is_artella_path(raw_full_name):\n convert_path = dccplugin.DccPlugin().convert_path(raw_full_name)\n maya_file.setRawFullName(convert_path)\n\n return True", "def check_cached_data_okay_to_use(self, cl_mfd):\n\n need_new = \"Will create new SFT file(s).\"\n\n logger.info(\"Checking if we can re-use existing SFT data file(s)...\")\n for sftfile in self.sftfilenames:\n if os.path.isfile(sftfile) is False:\n logger.info(\n \"...no SFT file matching '{}' found. {}\".format(sftfile, need_new)\n )\n return False\n logger.info(\"...OK: file(s) found matching '{}'.\".format(sftfile))\n\n if os.path.isfile(self.config_file_name):\n if np.any(\n [\n os.path.getmtime(sftfile) < os.path.getmtime(self.config_file_name)\n for sftfile in self.sftfilenames\n ]\n ):\n logger.info(\n (\n \"...the config file '{}' has been modified since\"\n \" creation of the SFT file(s) '{}'. {}\"\n ).format(self.config_file_name, self.sftfilepath, need_new)\n )\n return False\n else:\n logger.info(\n \"...OK: The config file '{}' is older than the SFT file(s)\"\n \" '{}'.\".format(self.config_file_name, self.sftfilepath)\n )\n # NOTE: at this point we assume it's safe to re-use, since\n # _check_if_cff_file_needs_rewriting()\n # should have already been called before\n elif \"injectionSources\" in cl_mfd:\n raise RuntimeError(\n \"Commandline requires file '{}' but it is missing.\".format(\n self.config_file_name\n )\n )\n\n logger.info(\"...checking new commandline against existing SFT header(s)...\")\n # here we check one SFT header from each SFT file,\n # assuming that any concatenated file has been sanely constructed with\n # matching CLs\n for sftfile in self.sftfilenames:\n catalog = lalpulsar.SFTdataFind(sftfile, None)\n cl_old = utils.get_commandline_from_SFTDescriptor(catalog.data[0])\n if len(cl_old) == 0:\n logger.info(\n \"......could not obtain comparison commandline from first SFT\"\n \" header in old file '{}'. {}\".format(sftfile, need_new)\n )\n return False\n if not utils.match_commandlines(cl_old, cl_mfd):\n logger.info(\n \"......commandlines unmatched for first SFT in old\"\n \" file '{}':\".format(sftfile)\n )\n logger.info(cl_old)\n logger.info(cl_mfd)\n logger.info(need_new)\n return False\n logger.info(\"......OK: Commandline matched with old SFT header(s).\")\n logger.info(\n \"...all data consistency checks passed: Looks like existing\"\n \" SFT data matches current options, will re-use it!\"\n )\n return True", "def reload_tailed_file(self):\n try:\n self.file_ = open(self.tailed_file, \"r\")\n self.size = os.path.getsize(self.tailed_file)\n\n # Go to the head of file\n self.file_.seek(0, 1)\n\n return True\n except:\n return False", "def hasChanged(self):\n return ((self.mtime != getmtime(self.path)) or\n (self.size != os.path.getsize(self.path)) )", "def load_file(self, file_path):\n self.disabled = True\n if not EVENTS['IS_OBJ']:\n self.disabled = False\n with open(file_path, 'r') as file:\n data = file.read()\n file.close()\n self.text = data\n EVENTS['EDITOR_SAVED'] = True", "def can_use_tool_config_disk_file(self, repository, repo, file_path, changeset_revision):\n if not file_path or not os.path.exists(file_path):\n # The file no longer exists on disk, so it must have been deleted at some previous\n # point in the change log.\n return False\n if changeset_revision == repository.tip(self.app):\n return True\n file_name = basic_util.strip_path(file_path)\n latest_version_of_file = \\\n self.get_latest_tool_config_revision_from_repository_manifest(repo, file_name, changeset_revision)\n can_use_disk_file = filecmp.cmp(file_path, latest_version_of_file)\n try:\n os.unlink(latest_version_of_file)\n except Exception:\n pass\n return can_use_disk_file", "def test_verify_changed_source_file(self):\n # This test was made to pass in fixing Bug #1354880\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n\n # Edit source file\n with open('testfiles/various_file_types/executable', 'r+') as f:\n f.write('This changes a source file.')\n\n # Test verify for the file\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])", "def file_changed(self):\n if not self.lst_file_item:\n return\n state = self.txt_state.text()\n new_filename = self.txt_file.text()\n self.lst_file_item.setText(new_filename)\n self.symbols[state] = new_filename\n\n error, self.preview_file = self.check_image(new_filename)\n if not error:\n self.lbl_image.setText(\"\")\n self.preview = True\n else:\n self.lbl_image.setText(error)\n self.update()", "def is_valid_file(self, file_path):\n return True", "def check_line_edits_and_refresh_filestate(self):\r\n\t\t# line edit changes (other places where filestate is updated: browse button clicks, ok click)\r\n\t\tif self.source_img_entry.isModified():\r\n\t\t\tself.filestate.set_source_img_filename(self.source_img_entry.text().replace(\"\\\\\", \"/\"))\r\n\t\tif self.existing_case and self.source_db_entry.isModified():\r\n\t\t\tself.filestate.set_source_db_filename(self.source_db_entry.text().replace(\"\\\\\", \"/\"))\r\n\t\tif self.sink_dir_entry.isModified():\r\n\t\t\tself.filestate.set_sink_dir_name(self.sink_dir_entry.text().replace(\"\\\\\", \"/\"))", "def has_unsaved_changes(self):\n return self._file_content != self.buffer.text", "def external(self) -> bool:\n return getattr(self.args, 'external', False) or self.is_sandbox", "def load_file(self, file_path):\n if self.no_update:\n return False\n import urllib\n\n remote_base_url = self._make_git_raw_base_url()\n remote_url = \"%s%s\" % (remote_base_url, file_path)\n self.echo(\"Loading %s from github\" % (file_path))\n web_file = urllib.URLopener()\n web_file.retrieve(remote_url, file_path)\n web_file.close()\n return True", "def dirty(self):\n return self._orig_line is not None", "def SetActiveBigFontFile(self,path):\n\t\tself.acad.ActiveDocument.ActiveTextStyle.BigFontFile=path", "def _externalEditor(self,prev_content):\n f, filename = tempfile.mkstemp()\n fd = os.fdopen(f, 'wb')\n fd.write(prev_content.encode(\"utf-8\"))\n fd.close()\n\n # Looping on \"data\" until we get well-formatted XML\n valid_xml = False\n text = \"\"\n while not valid_xml:\n try :\n raw_input(\"Launching external text editor.\" +\n \"Press enter to continue\")\n os.system(self.editor + \" \" +filename)\n\n #Retrieve edited text\n with open(filename, 'rb') as f:\n doc = xml.dom.minidom.parse(f)\n\n # Parse our XHTML file\n text = doc.getElementsByTagName(\"body\")[0].toxml()\n text = text.replace(\"<body>\", \"\").replace(\"</body>\", \"\")\n\n #All is fine, break loop\n valid_xml = True\n\n except xml.parsers.expat.ExpatError as err:\n error(\"Not valid XML\\n{0}\\n\"+\\\n \"Please, try again\".format(err))\n except Exception as err :\n error(\"Unknown error while parsing XML\\n\"+\\\n \"error type = {0}\\n\"+\\\n \"error display = {1}\\n\"+\\\n \"Please, try again\".format(type(err), err))\n\n os.unlink(filename)\n return text", "def is_declaring_file(self, address, file_path):", "def validate_txtfile(path):\n bFile = True if mimetypes.guess_type(path)[0] == 'text/plain' else False\n return bFile", "def process_file(self, filepath, only_if_updated=True):\n raise NotImplementedError()", "def rebuild_from_changed_theme_data(self):\n super().rebuild_from_changed_theme_data()\n has_any_changed = False\n\n if self._check_misc_theme_data_changed(attribute_name='rect_width',\n default_value=170,\n casting_func=int):\n has_any_changed = True\n\n if has_any_changed:\n self.rebuild()", "def load_font(fontSize):\n f1='/usr/share/fonts/corefonts/arialbd.ttf' \n f2='/usr/share/fonts/truetype/msttcorefonts/Arial_Bold.ttf'\n if os.path.isfile(f1): font=ImageFont.truetype(f1,fontSize)\n if os.path.isfile(f2): font=ImageFont.truetype(f2,fontSize)\n return font", "def isExternalType(self):\n return self.typ in self.EXTERNAL_TYPES", "def SetFont(self, font):\r\n \r\n if self._header_win:\r\n self._header_win.SetFont(font)\r\n self.CalculateAndSetHeaderHeight()\r\n self._header_win.Refresh()\r\n \r\n if self._main_win:\r\n return self._main_win.SetFont(font)\r\n else:\r\n return False", "def update_editor ( self ):\n super( ReadonlyFontEditor, self ).update_editor()\n set_font( self )", "def SetActiveFontFile(self,path):\n\t\tself.acad.ActiveDocument.ActiveTextStyle.fontFile=path", "def update_editor ( self ):\n super( SimpleFontEditor, self ).update_editor()\n set_font( self )", "def is_modified_file(self, file_path: str) -> bool:\n _is_modified = bool(\n file_path in [file_path for file_path in self.changed_files]\n )\n if _is_modified:\n _change_type = self.changed_files[file_path]\n _is_modified = re.findall(r'[AMR].*', _change_type)\n return _is_modified", "def _lint(self, js):\n path = ka_root.join('javascript', 'd.js')\n self.set_file_contents(path, js)\n return i18n_lint.lint_js_files_are_translated([path])", "def update_editor ( self ):\n super( TextFontEditor, self ).update_editor()\n set_font( self )", "def is_third_party(self) -> bool:\n for third_party_import_string in self.third_party_import_strings:\n if self.source.startswith(third_party_import_string):\n return True\n\n return False", "def loadCharacter(self,file_path) -> bool:\n\t\tself.sheet = CharacterSheet()\n\t\tprint(f\"Loading an existing character from file.\\nSource file: {file_path}\")\n\t\tself.sheet.load(file_path)\n\t\tprint(f\"Successfully loaded character '{self.sheet.choice_names['Name']}' from file!\")\n\t\treturn True", "def dirty(self) -> bool:\n return len(self.detect_changed_files()) != 0", "def is_outdated(compiler_suite: str, grammar_source: str) -> bool:\n try:\n _, grammar, _, _ = load_compiler_suite(compiler_suite)\n return grammar_changed(grammar(), grammar_source)\n except ValueError:\n return True", "def load_font(self, filename: str) -> None:\n try:\n from fontTools import ttLib\n except:\n raise ImportError(\n 'You need to install library fonttools to add new fonts: '\n 'pip install fonttools'\n )\n self.filename = str(Path(filename))\n self.font = ttLib.TTFont(self.filename)\n\n # TODO: cmap needs to be modifiedfor this to work\n self.cmap = self.font['cmap'].getcmap(3,1).cmap\n self.glyph_set = self.font.getGlyphSet()\n\n self.font_descriptor = self._get_font_descriptor()", "def check_txt(txtpath):\r\n if not os.path.isfile(txtpath) :\r\n return False\r\n nowTime = time.time()\r\n ageTime = nowTime - 20\r\n fileTime = os.path.getmtime(txtpath)\r\n if fileTime > ageTime:\r\n return True\r\n return False", "def check(filepath: str) -> bool:\n\n logging.debug(filepath, extra=dict(status=\"checking\"))\n\n try:\n with open(filepath) as f:\n toml.load(f)\n except toml.TomlDecodeError as err:\n logging.error(filepath, extra=dict(status=err.msg))\n return False\n\n logging.info(filepath, extra=dict(status=\"ok\"))\n return True", "def has_changed(self):\n timestamp = os.stat(self.filename).st_mtime\n if timestamp > self.last_timestamp:\n self.last_timestamp = timestamp\n return True\n return False", "def preliminary_file_check(self):\n\n if self.has_error():\n return False\n\n if not self.filepath:\n self.add_error(\"A file was specified!\")\n return False\n\n if not isfile(self.filepath):\n self.add_error(\"The file was not found: %s\" % basename(self.filepath))\n return False\n\n if getsize(self.filepath) < 1:\n self.add_error(\"The file is empty (no bytes): %s\" % basename(self.filepath))\n return False\n\n if self.file_ext in ['xls', 'xlsx']:\n self.is_excel = True\n\n return True", "def refresh(self):\n hasChanged = self.hasChanged()\n if hasChanged: self.loadIni()\n if len(self.loadFiles) > 255:\n del self.loadFiles[255:]\n self.safeSave()\n return hasChanged", "def process_cell(path, cell):\n buttons_replaced = process_buttons(path, cell)\n load_magic_replaced = process_load_magic(path, cell)\n if buttons_replaced or load_magic_replaced:\n modified_cell = True\n else:\n modified_cell = False\n return modified_cell", "def process(self, source_path: pathlib.Path) -> bool:", "def _IsResourceFile(self, path):\n\n raise NotImplementedError", "def IsOk(*args, **kwargs):\n return _gdi_.Font_IsOk(*args, **kwargs)", "def _check_extension(self):\n if self.extension in Config.override_ext:\n expected_mimetype = Config.override_ext[self.extension]\n else:\n expected_mimetype, encoding = mimetypes.guess_type(self.src_path,\n strict=False)\n if expected_mimetype in Config.aliases:\n expected_mimetype = Config.aliases[expected_mimetype]\n is_known_extension = self.extension in mimetypes.types_map.keys()\n if is_known_extension and expected_mimetype != self.mimetype:\n # LOG: improve this string\n self.make_dangerous('expected_mimetype')", "def isApplied(self):\n return self.file in settings['mosh.resourceReplacer.applied']", "def __editorConfigChanged(self, editor):\n fn = editor.getFileName()\n line, pos = editor.getCursorPosition()\n enc = editor.getEncoding()\n lang = editor.getLanguage()\n eol = editor.getEolIndicator()\n zoom = editor.getZoom()\n self.__setSbFile(\n fn, line + 1, pos, encoding=enc, language=lang, eol=eol, zoom=zoom)\n self._checkActions(editor, False)", "def has_file(self) -> bool:\n return self._file is not None", "def parsed_in_original(self, filep: Optional[str]) -> bool:\n if not filep:\n return False # pragma: no cover\n return self._parsed_by_parser_paths(filep, self.existing_paths)", "def from_dir_changed(self):\n text = self.from_dir.toPlainText().strip()\n if os.path.exists(text):\n sqlite.w('update settings set source_path = (?) where id is 1', text)\n all_files = self.get_all_files_from_path(text, extension='PDF')\n self.pdf_files = self.make_all_files_dictionary(all_files)\n\n if not self.pdf_files:\n return\n\n self.reset_widgets(all=True)\n self.draw_pdf_files()", "def has_resource_changed(self, resource):\n logger.debug(\"Checking for changes in %s\" % resource)\n self.load_template_if_needed()\n self.load_site_if_needed()\n\n target = File(self.site.config.deploy_root_path.child(\n resource.relative_deploy_path))\n if not target.exists or target.older_than(resource.source_file):\n logger.debug(\"Found changes in %s\" % resource)\n return True\n if resource.source_file.is_binary:\n logger.debug(\"No Changes found in %s\" % resource)\n return False\n if self.site.config.needs_refresh() or \\\n not target.has_changed_since(self.site.config.last_modified):\n logger.debug(\"Site configuration changed\")\n return True\n\n deps = self.get_dependencies(resource)\n if not deps or None in deps:\n logger.debug(\"No changes found in %s\" % resource)\n return False\n content = self.site.content.source_folder\n layout = Folder(self.site.sitepath).child_folder('layout')\n logger.debug(\"Checking for changes in dependents:%s\" % deps)\n for dep in deps:\n if not dep:\n return True\n source = File(content.child(dep))\n if not source.exists:\n source = File(layout.child(dep))\n if not source.exists:\n return True\n if target.older_than(source):\n return True\n logger.debug(\"No changes found in %s\" % resource)\n return False", "def test_metadata_contains_current_font(self):\n\n contents = self.read_metadata_contents()\n fm = Metadata.get_family_metadata(contents)\n\n is_canonical = False\n for font_metadata in fm.fonts:\n font = Font.get_ttfont_from_metadata(self.operator.path, font_metadata)\n\n _weights = []\n for value, intvalue in weights.items():\n if intvalue == font.OS2_usWeightClass:\n _weights.append(value)\n\n for w in _weights:\n current_font = \"%s %s\" % (font.familyname, w)\n if font_metadata.full_name != current_font:\n is_canonical = True\n\n if not is_canonical:\n v = map(lambda x: font.familyname + ' ' + x, _weights)\n msg = 'Canonical name in font expected: [%s] but %s'\n self.fail(msg % (v, font_metadata.full_name))", "def isfile (self, path):\r\n pass", "def needs_reinit(self):\n current_cmake_generator = self.config.get(\"cmake_generator\")\n stored_cmake_generator = self._stored_config.get(\"cmake_generator\")\n return ((current_cmake_generator != stored_cmake_generator) or\n not self.has_stored_config_file())", "def checkDirty(self, editor, autosave=False):\n if editor.isModified():\n fn = editor.getFileName()\n # ignore the dirty status, if there is more than one open editor\n # for the same file\n if fn and self.getOpenEditorCount(fn) > 1:\n return True\n \n if fn is None:\n fn = editor.getNoName()\n autosave = False\n if autosave:\n res = editor.saveFile()\n else:\n res = E5MessageBox.okToClearData(\n self.ui,\n QCoreApplication.translate('ViewManager', \"File Modified\"),\n QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<p>The file <b>{0}</b> has unsaved changes.</p>\"\"\")\n .format(fn),\n editor.saveFile)\n if res:\n self.setEditorName(editor, editor.getFileName())\n return res\n \n return True", "def correct_font(source_name, unhinted_name, target_font_name, family_name):\n\n font = ttLib.TTFont(source_name)\n unhinted = ttLib.TTFont(unhinted_name)\n\n # apply web-specific fixes before shared, so that sub/family names are\n # correct for black weights and their bold bits will be set\n apply_web_specific_fixes(font, unhinted, family_name)\n temporary_touchups.apply_temporary_fixes(font)\n temporary_touchups.update_version_and_revision(font)\n font.save(target_font_name)", "def _processed_filepath(self, filepath):\n processed = False\n if filepath in self.processed_filepaths.values():\n processed = True\n\n return processed", "def _verify_original_file_path(self):\n if not isinstance(self._original_file_path, bytes):\n self._original_file_path = Path(self._original_file_path).expanduser()\n if not self._original_file_path.exists():\n raise FileNotFoundError(FILE_NOT_FOUND_ERROR.format(self._original_file_path))\n\n self._original_file_name = self._original_file_path.name", "def source_changed(source, cache):\n return os.path.getmtime(source)>os.path.getmtime(cache)", "def needs_refreshing(filepath):\n today = datetime.date.today()\n year = today.year - 2000 # Obviously does not work prior to 2000\n if today.month <= 6:\n current_season = str(year - 1) + str(year)\n else:\n current_season = str(year) + str(year + 1)\n return (current_season in filepath and\n last_modified_date(filepath) != today)", "def _verify_original_file_type(self, file_name):\n self._original_file_path = file_name\n\n available_instrument_types = INSTRUMENT_TO_TYPE_MAP[self._instrument]\n\n instrument_file_type = None\n\n # Check if file contains any of the necessary identifiers\n for available_type in available_instrument_types:\n for identifier in available_instrument_types[available_type]:\n if (search(identifier, self._original_file_path)) or (search(identifier, self._original_file_path)):\n instrument_file_type = available_type\n\n if not instrument_file_type:\n raise ValueError(INVALID_FILE_TYPE_ERROR.format(self._original_file_path, self._instrument))\n else:\n self._original_file_type = instrument_file_type", "def did_change(ls, params: DidChangeTextDocumentParams):\n _validate(ls, params)", "def should_use_onnx_loader(self, disable_custom_outputs=None):\n tmp_script = Script()\n inp_loader = \"check_needs_modify\"\n needs_modify = self._get_modify_onnx_loader(tmp_script, inp_loader, disable_custom_outputs) != inp_loader\n needs_shape_inference = (\n self.onnx_shape_inference_args is not None and self.onnx_shape_inference_args.do_shape_inference\n )\n needs_save = self.onnx_save_args is not None and self.onnx_save_args.path is not None\n # Currently, other loaders do not support external data, so we must fall back to the ONNX loader if it's present.\n return (\n not self.model_args.model_type.is_onnx()\n or needs_modify\n or self.load_external_data\n or needs_shape_inference\n or needs_save\n )", "def on_file_changed(self, path):\n\t\tpass", "def download_update(filepath: str = \"words.txt\") -> bool:\n fpath = Path(filepath)\n if fpath.exists():\n if fpath.is_dir():\n raise ValueError(\"Filepath is a folder, not a file\")\n return False\n\n data = requests.get(\n \"https://raw.githubusercontent.com/dwyl/english-words/master/words_alpha.txt\",\n stream=True,\n )\n data.raise_for_status()\n size = int(data.headers.get(\"content-length\", 0))\n with tqdm(\n total=size, unit=\"iB\", unit_scale=True, desc=\"Downloading words.txt\"\n ) as t:\n with fpath.open(mode=\"w+\") as f:\n for chunk in data.iter_content(BSIZE):\n t.update(len(chunk))\n f.write(chunk.decode(\"UTF-8\"))\n return True", "def proc_file(self, file_name: str, differential: bool) -> bool:\n self.file_name = file_name\n self.jsonf = jsonasobj.load(open(file_name))\n defn_type = 'differential' if differential else 'snapshot'\n if defn_type not in self.jsonf:\n log_warning(\"'%s' not found\" % defn_type, self.file_name)\n return False\n\n # If we are dealing with a constrained type:\n # 1) Record the fact that the base type is referenced from this file\n # 2) Create a map from the constrained elements back to the base\n constrained_type = {} # map from base type to constraint name\n if self.jsonf.get('type') == 'constraint':\n for element in self.jsonf.differential.element:\n if 'name' in element:\n assert '[x]' not in element.path, \"Renaming a parameterized type is too wierd\"\n path_name = PathElement.name_for(element, element.type[0])\n self.entries.setdefault(path_name,\n PathElement(self.file_name, self.namespaces)).add_file_ref(file_name)\n constrained_type[path_name] = element.name\n\n # Iterate over the element constraints\n for element in [e for e in self.jsonf[defn_type].element if 'name' in e]:\n if element.name in self.entries:\n log_warning('Name %s is referenced multiple times' % element.name, self.file_name)\n self.entries[element.name] = PathElement(self.file_name, self.namespaces)\n self.entries[element.name].defining_file(element.name, file_name)\n self.entries[element.name].constraints[element.path] = Properties(self.file_name)\n if 'path' in element:\n self.path_map[element.path] = element.name\n\n for element in [e for e in self.jsonf[defn_type].element if 'name' not in e]:\n if 'type' in element and len(element.type) > 1:\n log_warning(\"%s has multiple types and is not a choice or reference union\" % element.path)\n self.add_to_parent(element, constrained_type, file_name)\n return len(self.jsonf[defn_type].element) > 0", "def _is_prebuilt(self, cfg, patch_idx, prefix=\"PREFIX\"):\n ext = None\n dir = None\n\n if (cfg.load_models_dir is None):\n return False\n\n # Get the unique lookup file path\n fpath = self._get_unique_lookup_filepath(patch_idx, cfg.load_models_dir, prefix, NNModel._M_FILE_EXT)\n\n # Check the existence of the file\n if not os.path.isfile(fpath):\n raise Exception('Model file does not exist: {0}'.format(fpath))\n\n return True", "def is_new_file(self):\n return self.filename is None", "def modify_input_file(filepath, updated_file_list):\n lines = 0 # current input line number\n file_changed = False # the file has changed\n\n # find and change matching lines\n pattern = re.compile(\"[Cc]opyright\")\n with open(filepath, mode='r', encoding='utf-8', newline='') as file_in:\n for line in file_in:\n lines += 1\n if pattern.search(line) and __old_date in line:\n line = line.replace(__old_date, __new_date)\n file_changed = True\n updated_file_list.append(line)\n return file_changed", "def check(self, evidence, path_on_disk):\n return True", "def is_trained(self) -> bool:", "def compare_to_file(editor, filepath):\n with open(filepath) as f:\n gps_assert(editor.get_chars().strip(), f.read().strip())", "def load(board: Board) -> bool:\r\n\r\n file_name = filedialog.askopenfilename(\r\n initialdir = os.getcwd(), title = 'Select file', \r\n filetypes = (('Text files','*.txt'),('All files','*.*'))\r\n )\r\n\r\n if not file_name:\r\n return False\r\n\r\n try:\r\n f = open(file_name,'r')\r\n contents = f.read()\r\n f.close()\r\n except OSError:\r\n messagebox.showinfo(message = 'Could not read the file ' + file_name + '.')\r\n return False\r\n \r\n if not board.read_from_string(contents):\r\n messagebox.showinfo(message = 'You have chosen wrong or a damaged file.')\r\n return False\r\n\r\n return True", "def is_file(self):\n return self.type == \"file\"", "def confirm_file(self,fn):\n\t\t\n\t\tif self.autoreload: \n\t\t\tprint '[STATUS] autoreloading %s'%fn\n\t\t\treturn True\n\t\telse:\n\t\t\tcaller = sys._getframe().f_back.f_code.co_name\n\t\t\tprint \"[STATUS] function %s found %s\"%(caller,fn)\n\t\t\tans = raw_input('[QUESTION] is this file valid else quit (y/N)? ')\n\t\t\tif re.match('^(y|Y)',ans): return True\n\t\t\telse: raise Exception('\\n[ERROR] file was invalid and must be deleted manually:\\n%s'%fn)\n\t\t\t#---! may want to later allow a file deletion if the user says the file is invalid\t\t", "def fix_revision(font):\n version = font_data.font_version(font)\n match = re.match(r'Version (\\d{1,5})\\.(\\d{1,5})', version)\n major_version = match.group(1)\n minor_version = match.group(2)\n\n accuracy = len(minor_version)\n font_revision = font_data.printable_font_revision(font, accuracy)\n expected_font_revision = major_version+'.'+minor_version\n if font_revision != expected_font_revision:\n font['head'].fontRevision = float(expected_font_revision)\n print('Fixed fontRevision to %s' % expected_font_revision)\n return True\n\n return False", "def test_verify_changed_source_file_adjust_mtime(self):\n\n # Get the atime and mtime of the file\n file_info = os.stat('testfiles/various_file_types/executable')\n\n # Set the atime and mtime of the file to the time that we collected, as on some systems\n # the times from a stat call don't match what a utime will set.\n os.utime('testfiles/various_file_types/executable', (file_info.st_atime, file_info.st_mtime))\n\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n\n # Edit source file\n with open('testfiles/various_file_types/executable', 'r+') as f:\n f.write('This changes a source file.')\n\n # Set the atime and mtime for the file back to what it was prior to the edit\n os.utime('testfiles/various_file_types/executable', (file_info.st_atime, file_info.st_mtime))\n\n # Test verify for the file\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])", "def shell_font_changed(self, font):\n self.set_font(font)", "def test_font_on_disk_family_equal_in_metadata_json(self):\n contents = self.read_metadata_contents()\n metadata = Metadata.get_family_metadata(contents)\n\n unmatched_fonts = []\n for font_metadata in metadata.fonts:\n try:\n font = Font.get_ttfont_from_metadata(self.operator.path,\n font_metadata)\n except IOError:\n continue\n if font.familyname != font_metadata.name:\n unmatched_fonts.append(font_metadata.filename)\n\n if unmatched_fonts:\n msg = 'Unmatched family name are in fonts: {}'\n self.fail(msg.format(', '.join(unmatched_fonts)))", "def pdb_file_valid(pdb_file_name, user_rand):\n dcd_file = \"media/files/\" + user_rand + '/' + \"scr_for_checks.dcd\"\n\n fix_not_needed = True\n try:\n scr_for_checks(pdb_file_name, user_rand)\n except Exception as e:\n # print(str(e))\n fix_not_needed = False\n finally:\n if os.path.exists(dcd_file):\n os.remove(dcd_file)\n\n if fix_not_needed:\n return True\n\n try:\n fix_pdb(pdb_file_name)\n scr_for_checks(pdb_file_name, user_rand)\n except Exception as e:\n print(str(e))\n return False\n\n return True", "def update_bool(file_path):\n with open(\n file_path, 'r'\n ) as the_result_file_from_spark_for_read_and_abbr_not_allowed_by_pylint:\n content = the_result_file_from_spark_for_read_and_abbr_not_allowed_by_pylint.read(\n )\n update = content.replace('true', 'True').replace('false', 'False')\n with open(\n file_path,\n 'w') as the_result_file_from_spark_for_write_and_abbr_not_allowed:\n the_result_file_from_spark_for_write_and_abbr_not_allowed.write(update)", "def should_run(self):\n # from IPython.html.tasks.py\n\n css_targets = [pjoin(static, 'css', 'style.min.css')]\n css_maps = [t + '.map' for t in css_targets]\n targets = css_targets + css_maps\n if not all(os.path.exists(t) for t in targets):\n # some generated files don't exist\n return True\n earliest_target = sorted(mtime(t) for t in targets)[0]\n\n # check if any .less files are newer than the generated targets\n for dirpath, dirnames, filenames in os.walk(static):\n for f in filenames:\n if f.endswith('.less'):\n path = pjoin(static, dirpath, f)\n timestamp = mtime(path)\n if timestamp > earliest_target:\n return True\n\n return False" ]
[ "0.5081905", "0.50366616", "0.50216615", "0.49829918", "0.49263966", "0.49156058", "0.4806569", "0.47662523", "0.47610694", "0.47367972", "0.47354025", "0.47352326", "0.47022414", "0.47002628", "0.46964857", "0.46519396", "0.46338332", "0.46322864", "0.46244994", "0.4612329", "0.4601962", "0.4596516", "0.45854932", "0.4584008", "0.45715988", "0.4561401", "0.4558571", "0.4543767", "0.45284027", "0.45258027", "0.44952604", "0.44921732", "0.44898742", "0.4489068", "0.44855377", "0.4477554", "0.44765952", "0.44753426", "0.4474694", "0.44664374", "0.44663656", "0.4459215", "0.44585997", "0.4448884", "0.44473004", "0.44392225", "0.4437213", "0.44242325", "0.44210625", "0.44197637", "0.43995807", "0.43937778", "0.43918347", "0.4383398", "0.43787178", "0.43743515", "0.4373282", "0.43709654", "0.4370767", "0.43631238", "0.4358545", "0.43570355", "0.43532404", "0.4351169", "0.43458232", "0.4343333", "0.4337983", "0.43361923", "0.4326091", "0.4316012", "0.43139413", "0.4293777", "0.42932934", "0.42664644", "0.42626852", "0.42611116", "0.42577696", "0.42472774", "0.42322168", "0.4218759", "0.42181656", "0.42119995", "0.420951", "0.42086133", "0.42074597", "0.42057633", "0.42039672", "0.42025885", "0.41966367", "0.41835308", "0.41828096", "0.41815436", "0.4180612", "0.41795617", "0.4178828", "0.41762388", "0.41746125", "0.41716748", "0.41689202", "0.41678154" ]
0.64111775
0
The signal handler for the 'triggered' signal.
def onTriggered(self, action): if action.isCheckable() and action.isChecked(): if self.isExclusive(): last = self._last_checked if last is not None and last is not action: last.setChecked(False) self._last_checked = action
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_triggered(self, slot):\n self.triggered.connect(slot)", "def _action_triggered(self, action, checked=False):\n self._emit_signal_for_action(self.action_triggered, action, checked)", "def on_action_11_triggered(self):\n # TODO: not implemented yet\n exit(0)", "def on_action_Quit_triggered(self):\n quit()", "def trigger(self, type, event):", "def triggered(self, *args, **kwargs): # real signature unknown\n pass", "def signal(self):\n pass", "def on_actionQuit_triggered(self):\n\t\texit()", "def on_action_10_triggered(self):\n # TODO: not implemented yet\n self.showNormal()", "def signal_oi(self):\n pass", "def signal(self, args):\n pass", "def is_triggered(self) -> bool:\n raise NotImplementedError()", "def signal(self):\n self.mainloop().signal()", "def signal(sig, action): # real signature unknown; restored from __doc__\n pass", "def _on_event(self, event) -> None:\n self.signal.emit(event)", "def __call__(self, trigger, type, event):", "def _signal_handler(*args):\n self._user_exit = True", "def on_action_triggered(self):\n # TODO: not implemented yet\n print('打开')\n my_button_open = QMessageBox.about(self, '打开', '点击我打开某些文件')", "def on_action(self, connection, event):\r\n print('[{}] OnAction from {}' .format(event.type.upper(), event.source))", "def handler(signum, frame):\n m.signal()", "def sigint_handler(*args):\n Qt.QApplication.quit()", "def post(self, event, *args, **kwargs):\n self.inq.Signal((event, args, kwargs))", "def signal_handler(self, signal, frame):\r\n print 'You pressed Ctrl+C!'\r\n sys.exit(0)", "def analogueTriggerChangeHandler(val):\n print(\"Analogue Trigger Value Changed: {}\".format(val) )", "def trigger_signal(self, signal: str) -> None:\n logger.debug(\"Triggered Signal %s\", signal)\n for handler in self.signals[signal]:\n if not iscoroutinefunction(handler):\n handler(self, signal)", "def _emit_signal_for_action(self, signal, action, *args):\n index = self.get_index(action)\n if index and index.isValid():\n signal.emit(index, *args)", "def set_signal(self):\n self.pbSave.clicked.connect(self.save_match_result)\n self.pbCancel.clicked.connect(self.close_match_result)\n self.lwScoreList.doubleClicked.connect(self.display_player_name)\n self.lwAssistList.doubleClicked.connect(self.display_player_name)", "def _signal_handler(*_: typing.Any) -> None:\n shutdown_event.set()", "def signal_handler(self, signum, frame):\n if signum == signal.SIGINT:\n self.terminate = True\n elif signum == signal.SIGALRM:\n self.button_handler(self.BUTTON_PIN)", "def on_action_triggered(self):\n # TODO: not implemented yet\n button=QMessageBox.about(self, '帮助','这只是个摆设23333')", "def on_action_4_triggered(self):\n # TODO: not implemented yet\n model = self.model2\n self.doExport(model)\n #raise NotImplementedError", "def on_action_QT_triggered(self):\n # TODO: not implemented yet\n print('关于qt')\n my_button_about_QT = QMessageBox.aboutQt(self, '关于QT')", "def connect_signals(self):\n self.logger.debug(\"Connect signals\")\n # connect special menu action signals\n self.actionNew.triggered.connect(self.new_project)\n self.actionOpen.triggered.connect(self.open_project)\n self.actionClose.triggered.connect(self._parent.close_project)\n self.actionQuit.triggered.connect(self.close)\n self.actionSave.triggered.connect(self._parent.save_project)\n self.actionSettings.triggered.connect(self.settingsCtr.ui.exec)\n self.actionShowLog.triggered.connect(self.showLogRequested.emit)\n self.actionSetRights.triggered.connect(self._parent.do_setrights)\n self.actionInstall.triggered.connect(self.quickinstall)\n self.actionUpload.triggered.connect(self.upload)\n\n self.actionSaveAs.triggered.connect(self.not_working)\n self.actionRecent.triggered.connect(self.not_working)\n self.actionScheduler.triggered.connect(self.not_working)\n\n self.actionUninstall.triggered.connect(self._parent.show_quickuninstall)\n\n self.actionDeploy.triggered.connect(self.not_working)\n\n self.actionBundleCreation.triggered.connect(self.not_working)\n self.actionDepotManager.triggered.connect(self.not_working)\n self.actionStartWinst.triggered.connect(self.not_working)\n self.actionScriptEditor.triggered.connect(self.not_working)\n self.actionHelp.triggered.connect(self.not_working)\n self.actionSearchForUpdates.triggered.connect(self.not_working)\n self.actionShowChangeLog.triggered.connect(self.not_working)\n self.actionAbout.triggered.connect(self.not_working)\n\n # buttons\n self.btnSave.clicked.connect(self._parent.save_project)\n self.btnChangelogEdit.clicked.connect(self._parent.open_changelog_editor)\n self.btnShowScrStruct.clicked.connect(self._parent.show_script_structure)\n\n self.btnScrSetup.clicked.connect(lambda: self.select_script_dialog(\"setup\"))\n self.btnScrUninstall.clicked.connect(lambda: self.select_script_dialog(\"uninstall\"))\n self.btnScrUpdate.clicked.connect(lambda: self.select_script_dialog(\"update\"))\n self.btnScrAlways.clicked.connect(lambda: self.select_script_dialog(\"always\"))\n self.btnScrOnce.clicked.connect(lambda: self.select_script_dialog(\"once\"))\n self.btnScrCustom.clicked.connect(lambda: self.select_script_dialog(\"custom\"))\n self.btnScrUserLogin.clicked.connect(lambda: self.select_script_dialog(\"userlogin\"))\n self.btnScrSetupDel.clicked.connect(lambda: self.select_script_dialog(\"setup\", False))\n self.btnScrUninstallDel.clicked.connect(lambda: self.select_script_dialog(\"uninstall\", False))\n self.btnScrUpdateDel.clicked.connect(lambda: self.select_script_dialog(\"update\", False))\n self.btnScrAlwaysDel.clicked.connect(lambda: self.select_script_dialog(\"always\", False))\n self.btnScrOnceDel.clicked.connect(lambda: self.select_script_dialog(\"once\", False))\n self.btnScrCustomDel.clicked.connect(lambda: self.select_script_dialog(\"custom\", False))\n self.btnScrUserLoginDel.clicked.connect(lambda: self.select_script_dialog(\"userlogin\", False))\n self.btnScrSetupEdit.clicked.connect(self.not_working)\n self.btnScrUninstallEdit.clicked.connect(self.not_working)\n self.btnScrUpdateEdit.clicked.connect(self.not_working)\n self.btnScrAlwaysEdit.clicked.connect(self.not_working)\n self.btnScrOnceEdit.clicked.connect(self.not_working)\n self.btnScrCustomEdit.clicked.connect(self.not_working)\n self.btnScrUserLoginEdit.clicked.connect(self.not_working)\n\n self.btnBuild.clicked.connect(self._parent.build_project)\n self.btnInstall.clicked.connect(self._parent.do_install)\n self.btnInstSetup.clicked.connect(self._parent.do_installsetup)\n self.btnUninstall.clicked.connect(self._parent.do_uninstall)\n self.btnDevFolder.clicked.connect(self.open_project_folder)\n\n self.btnDepAdd.clicked.connect(self._parent.add_dependency)\n self.btnDepModify.clicked.connect(self.submit_dependencies)\n self.btnDepDelete.clicked.connect(lambda a: self._parent.remove_dependency(self.tblDependencies.selectionModel().currentIndex().row()))\n\n self.btnPropAdd.clicked.connect(self._parent.add_property)\n self.btnPropModify.clicked.connect(self.submit_properties)\n self.btnPropDelete.clicked.connect(lambda a: self._parent.remove_property(self.tblProperties.selectionModel().currentIndex().row()))\n self.btnPropRead.clicked.connect(self._parent.get_properties_from_scripts)\n\n self.tblProperties.setModel(self._parent.model_properties)\n self.tblDependencies.setModel(self._parent.model_dependencies)\n self.tblDependencies.selectionModel().selectionChanged.connect(self.update_dependency_fields)\n self.tblProperties.selectionModel().selectionChanged.connect(self.update_property_fields)\n\n self._parent.modelDataUpdated.connect(self.reset_datamapper_and_display)\n self._parent.msgSend.connect(self.set_statbar_text)\n self._parent.processingStarted.connect(self.splash.show)\n self._parent.processingEnded.connect(self.splash.close)\n self._parent.processingEnded.connect(self.set_button_state)", "def signal_handler(self, signum):\n raise Exception(\"Caught signal {0}\".format(signum))", "def signal(self, emission, signal, source):\n logger.info(\"tele2.py:signal() emmision: %s, signal: %s, source: %s\", \n str(emission), str(signal), str(source))\n self.window.emit('back')", "def signal_handler(self, signal_number, frame):\n sys.exit(0)", "def _connect_signals(self):\n\n # File menu\n self.openConfigAction.triggered.connect(self.inputDataModule.on_open_config_clicked)\n self.saveConfigAction.triggered.connect(self.inputDataModule.on_save_config_clicked)\n self.openStatsAction.triggered.connect(self.on_open_stats_file_triggered)\n self.openSpectrogramsAction.triggered.connect(self.on_open_spectrograms_file)\n\n # View menu\n # self.showPlotScreen.triggered.connect(self.view_mod_stats_screening)\n\n # Process menu\n self.processScreeningAction.triggered.connect(self.process_screening)\n self.runIntegrationAction.triggered.connect(self.process_ts_integration)\n self.calcSeascatterAction.triggered.connect(self.calc_seascatter)\n self.calcTFAction.triggered.connect(self.calc_transfer_functions)\n self.calcFatigueAction.triggered.connect(self.calc_fatigue)\n\n # Plot settings menu\n # self.add2HIcon.triggered.connect(self.add_2h_icon)\n self.loggerPlotSettingsAction.triggered.connect(self.on_logger_plot_settings_triggered)\n self.spectPlotSettingsAction.triggered.connect(self.on_spect_plot_settings_triggered)\n\n # Export menu\n self.exportScatterDiagAction.triggered.connect(self.on_export_scatter_diagram_triggered)\n\n # Azure menu\n self.azureSettingsAction.triggered.connect(self.on_azure_account_settings_triggered)\n\n # Filter settings menu\n self.filterSettingsAction.triggered.connect(self.on_filter_settings_triggered)\n\n # Help menu\n self.helpAction.triggered.connect(self.show_help)\n self.aboutAction.triggered.connect(self.show_about)\n\n # Toolbar dashboard buttons\n self.projConfigButton.clicked.connect(self.view_proj_config_mod)\n self.rawDataButton.clicked.connect(self.view_mod_raw_data)\n self.dataQualityButton.clicked.connect(self.view_mod_data_quality)\n self.statsScreeningButton.clicked.connect(self.view_mod_stats_screening)\n self.spectralScreeningButton.clicked.connect(self.view_mod_spectral_screening)\n self.histogramsButton.clicked.connect(self.view_mod_histograms)\n self.seascatterButton.clicked.connect(self.view_mod_seascatter)\n self.transFuncsButton.clicked.connect(self.view_mod_transfer_functions)\n self.fatigueButton.clicked.connect(self.view_mod_fatigue)", "def _connectSignals(self) -> None:\n self._documentsButton.pressed.connect(lambda: self._openPath(path=Path.home().joinpath('Documents')))\n self._desktopButton.pressed.connect(lambda: self._openPath(path=Path.home().joinpath('Desktop')))\n\n # File tab actions\n self._openAction.triggered.connect(lambda: self._openPath(self._mainFileView.selectedIndexes()))\n self._newFolderAction.triggered.connect(lambda: self._addRowToModel(True))\n self._newFileAction.triggered.connect(lambda: self._addRowToModel(False))\n self._exitAction.triggered.connect(self.close)\n\n # Edit tab actions\n self._copyFileAction.triggered.connect(lambda: self._copyFile(self._mainFileView.selectedIndexes()))\n self._copyPathAction.triggered.connect(lambda: self._copyPath(self._mainFileView.selectedIndexes()))\n self._cutAction.triggered.connect(lambda: self._copyFile(self._mainFileView.selectedIndexes(), cut=True))\n self._pasteFileAction.triggered.connect(self._pasteFile)\n self._renameAction.triggered.connect(lambda: self._renameTrigger(self._mainFileView.selectedIndexes()))\n self._bulkRenameAction.triggered.connect(lambda: self._bulkRename(self._mainFileView.selectedIndexes()))\n self._deleteAction.triggered.connect(lambda: self._deleteItem(self._mainFileView.selectedIndexes()))\n\n # View tab actions\n self._refreshAction.triggered.connect(self._listDirectories)\n self._sortAction.triggered.connect(self._sortHandler)\n\n # Navigation buttons' actions\n self._goBackAction.triggered.connect(self._goBack)\n self._goForwardAction.triggered.connect(self._goForward)\n self._goUpAction.triggered.connect(self._goUp)\n\n # path opening from main file view, side file view and address bar\n self._mainFileView.doubleClicked.connect(lambda: self._openPath(self._mainFileView.selectedIndexes()))\n self._sideFileView.clicked.connect(lambda: self._openPath(self._sideFileView.selectedIndexes(), False))\n self._addressBar.returnPressed.connect(lambda: self._openPath(path=Path(self._addressBar.text())))\n\n # auto-adjustment of side file view width\n self._sideFileView.expanded.connect(lambda: self._sideFileView.resizeColumnToContents(0))\n\n # handling item edit\n self._mainFileView.itemDelegate().closeEditor.connect(self._editHandler)\n\n # drag and drop functionality\n self._mainFileView.itemDropped.connect(self._dropMove)\n\n # View tab filter\n self._filterField.textChanged.connect(self._listDirectories)\n self._filterField.returnPressed.connect(lambda: self._listDirectories(self._filterField.text()))", "def set_signal(self):\n eprint(\"Signal caught, ending log...\")\n self.log_sig = True", "def signal_handler(self, signal, frame):\n logger.info('CTRL+C pressed')\n self.trigger_stop()", "def clickEvent(self):\n self.emit(QtCore.SIGNAL('activated(QString &)'), self.text())", "def mousePressEvent(self, event: QMouseEvent):\n if event.button() == Qt.LeftButton:\n # trigger scene signal\n self.scene().node_clicked.emit(self.metadata)", "def onActionChosen(self, agent, action):\n\n pass", "def emit_signal(self, *args): \n\tif len(args) > 0:\n \tprint 'Emitting ' + args[0]\n \tout = 'Arguments: '\n \tfor i in range(len(args)-1):\n\t\t out += str(args[i+1])\n\t\tprint out\n \tself.emit(QtCore.SIGNAL(args[0]), *args)", "def action(self):\n self.action_thread = Thread(target=self._action_then_signal, daemon=True)\n self.action_thread.start()", "def on_action_2_triggered(self):\n # TODO: not implemented yet\n print('关闭')\n sys.exit(0)", "def connectSignals(self):\n QtCore.QObject.connect(self.ui.main_tabs, QtCore.SIGNAL(\"currentChanged(int)\"),\n self.onTabChanged)\n QtCore.QObject.connect(self.ui.main_edit, QtCore.SIGNAL(\"textChanged()\"),\n self.onTextChanged)\n\n QtCore.QObject.connect(self.ui.actionNew_File, QtCore.SIGNAL(\"triggered()\"),\n self.newFile)\n QtCore.QObject.connect(self.ui.actionOpen_File, QtCore.SIGNAL(\"triggered()\"),\n self.openFile)\n QtCore.QObject.connect(self.ui.actionSave, QtCore.SIGNAL(\"triggered()\"),\n self.saveFile)\n QtCore.QObject.connect(self.ui.actionPrint, QtCore.SIGNAL(\"triggered()\"),\n self.printFile)\n QtCore.QObject.connect(self.ui.actionExit, QtCore.SIGNAL(\"triggered()\"),\n lambda: self.quit('data/recent_files.txt'))\n\n QtCore.QObject.connect(self.ui.actionCopy, QtCore.SIGNAL(\"triggered()\"),\n self.ui.main_edit.copy)\n QtCore.QObject.connect(self.ui.actionCut, QtCore.SIGNAL(\"triggered()\"),\n self.ui.main_edit.cut)\n QtCore.QObject.connect(self.ui.actionPaste, QtCore.SIGNAL(\"triggered()\"),\n self.ui.main_edit.paste)\n QtCore.QObject.connect(self.ui.actionRedo, QtCore.SIGNAL(\"triggered()\"),\n self.ui.main_edit.redo)\n QtCore.QObject.connect(self.ui.actionUndo, QtCore.SIGNAL(\"triggered()\"),\n self.ui.main_edit.undo)\n QtCore.QObject.connect(self.ui.actionPreferences, QtCore.SIGNAL(\"triggered()\"),\n self.createPrefWindow)\n\n QtCore.QObject.connect(self.ui.actionAbout, QtCore.SIGNAL(\"triggered()\"),\n self.createAboutWindow)\n QtCore.QObject.connect(self.ui.actionHelp, QtCore.SIGNAL(\"triggered()\"),\n self.createHelpWindow)", "def fire(self):\n self.space.actionflag.fire(self)", "def on_action_QT_triggered(self):\n # TODO: not implemented yet\n button=QMessageBox.aboutQt(self, '介绍Qt')", "def fire(self, sender, argument=None):\n for handler in self.__handlers:\n handler(sender, argument)", "def signal_handler(sig_num, frame):\n global exit_flag\n logger.warn('Signal Recieved: {}'.format(str(sig_num)))\n if sig_num:\n exit_flag = True", "def signal_handler(sig, frame):\r\n print('You pressed Control+C')\r\n led.off()\r\n sys.exit(0)", "def on_action_About_triggered(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def signal_handler(self, signum, frame):\n self._running = False", "def signal_handler(signal, frame):\n print()\n endProgram(0)", "def handler(signum, frame):\n print(\"Signal handler called with signal %i\" % signum)\n sys.exit(-1)", "def use_triggered(self):\n\n self.select_items()\n if self.items_selected:\n for index, item in enumerate(self.items_selected):\n index_selected = self.indices_selected[index]\n frame_selected = index_selected + 1\n item.setText(\"Frame %i included\" % frame_selected)\n item.setBackground(self.background_included)\n item.setForeground(QtGui.QColor(0, 0, 0))\n self.index_included[index_selected] = True\n self.frame_selector.setPhoto(self.frame_index)", "def is_triggered(self):\n return (\n self.is_triggered_fire()\n or self.is_triggered_gas()\n or self.is_triggered_police()\n )", "def signalReceived (self, signal): \n raise NotImplementedError(\"Lack of signalReceived method\")", "def signal_from_widget(self, event):\n self.keyPressEvent(event)", "def action_signal(context, action_id, value):\n return IMPL.action_signal(context, action_id, value)", "def is_triggered_fire(self):\n return self == ArmingState.ALARMING_FIRE_SMOKE", "def onAction(*args):", "def onAction(*args):", "def onAction(*args):", "def onAction(*args):", "def sigint_handler(*args):\n # if QMessageBox.question(None, '', \"Are you sure you want to quit?\",\n # QMessageBox.Yes | QMessageBox.No,\n # QMessageBox.No) == QMessageBox.Yes:\n QApplication.quit()", "def signal_handler(sig, frame):\n raise ExitException()", "def signal_handler(self, signal, frame):\n logger.error(\"Received Signal to Terminate\")\n self.set_lcd_brightness(self.DIM_SHUT)\n ui.runui = False\n # sys.exit(0)", "def dispatch(self, sender, event, *args, **kwargs):\n pass # pragma: no cover", "def handle_event(self, event):", "def sigint_handler(*args):\n sys.stderr.write('\\r')\n QtGui.QApplication.quit()", "def handle_event(self, event):\n pass", "def _slot(self, event) -> None:\n self.events.gui_event(original_event=event)", "def caught_signal(\n self, signals: int, max_signals: int, executor: \"TaskGraphExecutor\"\n ) -> None:", "def catchall_signal_handler(*args, **kwargs): \n print(\"Caught signal (in catchall handler) \" + kwargs['dbus_interface'] + \".\" + kwargs['member'])\n for arg in args:\n print(\" \" + str(arg))", "def requeueHandler(self, signum, frame):\n args = self.args\n print('Signal received', signum, time.time(), flush=True)\n self.SIGNAL_RECEIVED = True\n\n if os.path.isfile(self.HALT_filename):\n print('Job is done, exiting', flush=True)\n exit(0)", "def on_action_5_triggered(self):\n # TODO: not implemented yet\n print('最小化')\n self.showMinimized()", "def trigger(self):\n self._kill = False\n self._trigger.set()", "def event_trigger(self, event):\n return False", "def event_trigger(self, event):\n return False", "def on_action_triggered(self):\n #self.model.clear()\n #self.model.setFilter(u\"代码 = '000002'\")\n #self.model.select()\n # TODO: not implemented yet\n #self.tableView_2.setFreezeNum(2)\n QMessageBox.warning(self,'warning', u\"权限不够\")\n #print str(self.model.data(self.model.index(1, 0)).toString().toUtf8())\n #raise NotImplementedError", "def _emitSheetChangedSignal(self):\r\n # self.sheetChangedSignal.emit(\"sheet\", self.sheetSelect.currentIndex())\r\n self.sheetChangedSignal.emit(self.sheetSelect.currentIndex())", "def signal_handler(signum, frame):\n main.CLOSE = True", "def handler(signum, frame):\n logging.warning(\"Got a {} signal. Doing nothing\".format(signum))", "def chooseAction(self):\n print \"nothing\"\n pass", "def _press(self, event):", "def checksignals(self):\n if self.space.check_signal_action is not None:\n self.space.check_signal_action.perform(self, None)", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def on_event(self, event):\r\n pass", "def signalSetup(self):\n self.ui.b_info.clicked.connect(self.showInfo)\n self.ui.b_save.clicked.connect(self.openSave)\n self.ui.b_vid.clicked.connect(self.openVideo)\n self.ui.b_run.clicked.connect(self.startRun)\n self.ui.b_colour.clicked.connect(self.pickColour)\n self.ui.b_ground_truth.clicked.connect(self.openGroundTruth)\n\n self.ui.t_fps.textChanged.connect(self.changeFps)\n self.ui.t_low.editingFinished.connect(self.changeLow)\n self.ui.t_high.editingFinished.connect(self.changeHigh)\n self.ui.c_error_plot.stateChanged.connect(self.checkFiles)\n self.ui.c_speed_plot.stateChanged.connect(self.checkFiles)\n self.ui.c_crash_plot.stateChanged.connect(self.checkFiles)\n self.ui.combo_superpixel.currentIndexChanged.connect(\n self.changeSuperPixelMethod\n )\n self.ui.c_optimize.stateChanged.connect(self.checkFiles)\n self.ui.c_draw.stateChanged.connect(self.checkFiles)\n self.ui.c_velocity.stateChanged.connect(self.checkFiles)\n self.ui.c_object_detection.stateChanged.connect(self.checkFiles)", "def handle_event(self, event, window):\n pass", "def fire_trigger(self, trigger):\n if not self.exists():\n return\n if trigger in self.events:\n for action in self.events[trigger]:\n action(requestor=self)", "def send(signal, *args, **kwargs):\n _dispatcher.send(signal=signal, *args, **kwargs)", "def clickedAction(self, events):\n print(\"The {} button was clicked!\".format(self.imgname))", "def on_event(self, event):", "def signal_handler(signum, frame):\n sys.exit(0)" ]
[ "0.79236376", "0.6559447", "0.64768285", "0.62798786", "0.61472255", "0.611165", "0.60122555", "0.59837323", "0.59460413", "0.5918585", "0.5875592", "0.576396", "0.5753168", "0.5750694", "0.5705757", "0.5685465", "0.56775683", "0.5605326", "0.55946964", "0.5547762", "0.55368906", "0.5524707", "0.55228597", "0.5481648", "0.5478292", "0.54713774", "0.54315525", "0.5430888", "0.54298824", "0.53814864", "0.53531003", "0.53463125", "0.5342966", "0.5334278", "0.5332969", "0.5330035", "0.53257895", "0.531677", "0.53070986", "0.52973115", "0.5280456", "0.52792126", "0.5245474", "0.52335227", "0.522653", "0.5226414", "0.5201463", "0.5201045", "0.5189964", "0.51877284", "0.51871204", "0.5184178", "0.5172781", "0.51685876", "0.516767", "0.516357", "0.5151142", "0.5140838", "0.51376444", "0.5132298", "0.5125821", "0.5121342", "0.5100679", "0.5100679", "0.5100679", "0.5100679", "0.51004964", "0.5091419", "0.5076626", "0.5075616", "0.50746953", "0.5061438", "0.5056801", "0.5052648", "0.50506556", "0.50388277", "0.5031087", "0.5017881", "0.50135314", "0.49988365", "0.49988365", "0.49962866", "0.49955714", "0.49952233", "0.4983241", "0.4973439", "0.49717352", "0.49696547", "0.4966731", "0.4966731", "0.4966731", "0.4966731", "0.4966731", "0.4961444", "0.4958726", "0.494373", "0.49425092", "0.4937269", "0.49291232", "0.4928075", "0.49276152" ]
0.0
-1
Set the exclusive state of the action group.
def setExclusive(self, exclusive): super(QCustomActionGroup, self).setExclusive(exclusive) if exclusive: last = self._last_checked if last is not None: last.setChecked(True) for action in self.actions(): if action is not last: action.setChecked(False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def action_lock(self):\n self.state = 'locked'", "def set_exclusive(self, exclusive):\n self.widget.setExclusive(exclusive)", "def set_exclusive_active(self, name):\n self.set_all_inactive()\n self.set_active(name)", "def disable(self):\n if self.active != DISABLED:\n self.uimanager.remove_ui(self.active)\n self.uimanager.remove_action_group(self.action_group)\n self.active = DISABLED", "def setLocked( self, state = True ):\n self._xLocked = state\n self._yLocked = state", "def _set_action_enabled(self, action, index):\n action.setEnabled(index.flags() & QtCore.Qt.ItemIsEnabled)", "def disable(self, index):\n self._action(index, StateVariable.enable, missingok=False, value=False)", "def set_exclusive_mouse(self, exclusive):\n super(Window, self).set_exclusive_mouse(exclusive)\n self.exclusive = exclusive", "def setXLocked( self, state = True ):\n self._xLocked = state", "def set_status(self, locked=None, exclusive=None):\n self.locked = locked\n self.exclusive = exclusive", "def _disabled(self, *args, **kwargs):\n raise TypeError(\"'%s' does not support mutable operations.\" %\n self.__class__)", "def _disabled(self, *args, **kwargs):\n raise TypeError(\"'%s' does not support mutable operations.\" %\n self.__class__.__name__)", "def setReadOnly(self, state: bool) -> None:\n ...", "def action_draft(self):\n self.state = 'draft'", "def action_draft(self):\n self.state = 'draft'", "def setDisableWithLayer( self, state ):\n self._disableWithLayer = state\n self.setDirty()", "def select_action(self, state):\n pass", "def set_state(self):\n self.able = not self.able\n self.save()", "def onTriggered(self, action):\n if action.isCheckable() and action.isChecked():\n if self.isExclusive():\n last = self._last_checked\n if last is not None and last is not action:\n last.setChecked(False)\n self._last_checked = action", "def lock_gate(self):\n self.fsm_gate.clear()", "def set_channel_group(self, channel_group):\n super().set_channel_group(channel_group)\n self.skip_flags = self.flagspace.all_flags() # everything but 0", "def action_space(self, state) -> set:\n return {0, 1} # Actions independent of state", "def select_action(self, state):", "def set_inactive(self, name):\n # log.debug(\"ObjectCollection.set_inactive()\")\n\n obj = self.get_by_name(name)\n item = obj.item\n group = self.group_items[obj.kind]\n\n group_index = self.index(group.row(), 0, QtCore.QModelIndex())\n item_index = self.index(item.row(), 0, group_index)\n\n self.view.selectionModel().select(item_index, QtCore.QItemSelectionModel.Deselect)", "def enable(self):\n self.uimanager.insert_action_group(self.action_group, 1)\n self.active = self.uimanager.add_ui_from_string(self.ui)\n self.uimanager.ensure_update()", "def __setstate__(self, state):\n state['_lock'] = Lock()\n self.__dict__.update(state)", "def set_state(self, state):\n if self.state == CHANNEL_MOVE_STATE_NONE:\n self.state = state", "def set_state( self ):", "def __setstate__(self, state):\n return None", "def set_state(self, state: int):", "def set_Off(self):\n if not(self._locked):\n self.__dict__['statusOn']=False\n self._undo_action()\n else:\n self._log.info('The JobProperty %s is blocked', self.__name__)", "def set_immutable(self):\n self._mutable = False", "def set_state(self, state, input_index, **kwargs):\n if state == WAITING and input_index == 1:\n self.state = state", "def set_locked(self, *args):\n return _ida_hexrays.vdui_t_set_locked(self, *args)", "def set_inactive(self):\n self.active = False", "def unlock_gate(self):\n self.fsm_gate.set()", "def reset_state(self, group_snapshot, state):\n body = {'status': state} if state else {}\n return self._action('reset_status', group_snapshot, body)", "def disable(self):\n self.enabled = False\n self.__store(self)", "def set_active(cls, name=None):\r\n if name is None:\r\n cls.active = True\r\n cls.non_actives = {} # Clear not actives\r\n else:\r\n if name in cls.non_actives:\r\n del cls.non_actives[name]", "def toggle(self):\n self.open = not self.open", "def set_disable(self, btn, state):\n if self._disabled_buttons is None:\n self._disabled_buttons = {}\n self._disabled_buttons[btn] = state", "def set_no_longer_active(self):\n with self.redis_client.lock(\"active-lock\"):\n self.set_to_redis(\"active\", \"done\")", "def setOp(self, value):\n raise UnsupportedOperationException(\"Cannot change operator status of a block\")", "def _disable(self):\n self.enabled = False", "def test_ensure_state_untouched_if_not_necessary(self, setState):\n advisory = errata.Advisory(errata_id=123, errata_state='QE')\n advisory.ensure_state('QE')\n setState.assert_not_called()", "def action_anular(self):\n self.write({'state': 'draft'})\n return True", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def disable_mute(self):\n self.mute = False", "def applyLock(self, pkmn):\n pkmn.actionLock = ActionLock(pkmn, \\\n pkmn.lastAction, self.turns-1)", "def async_update_group_state(self) -> None:\n self._state = None\n self._async_update_group_state()", "def leave_group(self):\n\t\tself.sendMessage(ID_CTRL + \"LEAVE\", True)\n\t\tself.joinstate = 0\n\t\tself.createstate = 0\n\t\tself.__key = None", "def toggle(self):\n self.checked = not self.checked\n if self.command:\n self.command(self.name)", "def toggle_selected(self):\n\n self._selected = not self._selected", "def f_unlock(self):\n self._locked = False", "def is_exclusive(self):\n return self.exclusive", "def assignmentsCalced(self, message):\n self.assign_btn.Disable()", "def action_cancel(self):\n self.state = 'canceled'", "def disable(self):\r\n self.update(enabled=False)", "def standby(self):\n self._state = STATE_STANDBY", "def standby(self):\n self._state = STATE_STANDBY", "def _enable_disable_gui(self, state):\r\n self.mainWidget.standbyPushButton.setDisabled(state)\r\n self.mainWidget.eventComboBox.setDisabled(state)\r\n self.mainWidget.roomComboBox.setDisabled(state)\r\n self.mainWidget.dateComboBox.setDisabled(state)\r\n self.mainWidget.talkComboBox.setDisabled(state)\r\n self.mainWidget.audioFeedbackCheckbox.setDisabled(state)", "def setAction(self, value):\n return self._set(action=value)", "def set_group(self, group):\n # Implemented from template for osid.resource.ResourceForm.set_group_template\n if self.get_group_metadata().is_read_only():\n raise errors.NoAccess()\n if not self._is_valid_boolean(group):\n raise errors.InvalidArgument()\n self._my_map['group'] = group", "def disable(self):", "def ToggleLock(self, event):\n pass", "def setMask(self, other=None):\n if other is None:\n self.call('setMask', '')\n else:\n self.call('setMask', other.groupName)", "def cambiar_verde(self):\r\n self.morado.setDisabled(True)", "def update(self, state, action):\n\n if self._selected_room is not None:\n return", "def setLocked(self, value):\n for attr in self._filter():\n attr.setLocked(value)", "def set_state(self, state):\n return self.update(current_state=state)", "def pause(self):\n self.entry['state']=DGG.DISABLED\n self.ignoreAll()", "def toggle(self):\n if self._state in [STATE_OFF, STATE_IDLE, STATE_STANDBY]:\n self._state = STATE_ON\n else:\n self._state = STATE_OFF", "def mutually_exclusive(*funcs, **kwargs):\n # in argparse, mutually exclusive groups ignore the description\n return group(None, *funcs, mutually_exclusive=True, **kwargs)", "def toggle(self) -> None:\n ...", "def send_state(self, action):\r\n self._write_message(\r\n '{\"action\":' + str(int(action)) + ',\"resetGame\":false}')", "def disable(self):\n self.error_code = 'DISABLED'\n self.running = False", "def _switch_group_right(group_id, action, value, workspace, request_user):\n group = group_api.get_group_by_id(group_id)\n\n if action == workspace_constants.ACTION_READ:\n if value:\n workspace_api.add_group_read_access_to_workspace(workspace, group, request_user)\n else:\n workspace_api.remove_group_read_access_to_workspace(workspace, group, request_user)\n elif action == workspace_constants.ACTION_WRITE:\n if value:\n workspace_api.add_group_write_access_to_workspace(workspace, group, request_user)\n else:\n workspace_api.remove_group_write_access_to_workspace(workspace, group, request_user)", "def set_graceful_lock(self, state):\n self.graceful_state = state\n self.reset_update_lock()\n self.lock_update()", "def disable_emission(self):\n self.ask(\"LASER=OFF\")\n self.ask(\"LASER=ON\") # unlocks emission button, does NOT start emission!", "def _act_task_checked(self, iden, b):\n if b:\n self.data.turn_on(iden)\n else:\n self.data.turn_off()", "async def async_turn_on_off(self, state: bool) -> None:\n await self._cluster_handler.write_attributes_safe(\n {self._zcl_attribute: not state if self.inverted else state}\n )\n self.async_write_ha_state()", "def set_state(self, state):\n self.state = state\n self.config(fill=self.state)", "def toggled(self, *args, **kwargs): # real signature unknown\n pass", "def disableEditing(self, disable):\n self.disabled = disable", "def disable():\n boutonPierre[\"state\"] = \"disabled\"\n boutonFeuille[\"state\"] = \"disabled\"\n boutonCiseaux[\"state\"] = \"disabled\"", "def __setstate__(self, state):\n\n self.set(DER = state)", "def begin_not_undoable_action(self):\n self.not_undoable_action = True", "def disable(self):\n self._enabled = False", "def reset_status(self, context, group, status):\n context.authorize(gp_action_policy.RESET_STATUS, target_obj=group)\n field = {'updated_at': timeutils.utcnow(),\n 'status': status}\n group.update(field)\n group.save()", "def set_item_lock_state(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Actions/Items/SetLockState/\"))" ]
[ "0.63360256", "0.6294087", "0.6230919", "0.5686868", "0.56087244", "0.5578127", "0.5569583", "0.5548554", "0.54780567", "0.5418087", "0.5412914", "0.54105514", "0.53850436", "0.535912", "0.535912", "0.5358296", "0.52563006", "0.52535677", "0.5228041", "0.5220662", "0.52197784", "0.5184978", "0.5182204", "0.517202", "0.5165645", "0.5162471", "0.51589423", "0.5156909", "0.51428294", "0.5142129", "0.51405424", "0.51325184", "0.5132454", "0.51267755", "0.50988644", "0.50977165", "0.50869244", "0.5085839", "0.5066113", "0.5058451", "0.5041453", "0.50206685", "0.5012619", "0.50045186", "0.50037146", "0.49959648", "0.49897358", "0.49897358", "0.49897358", "0.49897358", "0.49897358", "0.49897358", "0.49897358", "0.49897358", "0.49897358", "0.49897358", "0.49897358", "0.4985535", "0.4984915", "0.49780616", "0.49723592", "0.49723202", "0.49419788", "0.49386817", "0.49345693", "0.491698", "0.4904296", "0.48910925", "0.4883309", "0.4883309", "0.48795614", "0.4878229", "0.48670954", "0.486184", "0.48590708", "0.48532882", "0.48482117", "0.48471624", "0.48458847", "0.48281735", "0.48217458", "0.4820207", "0.48191652", "0.48162216", "0.4808316", "0.48040482", "0.47968054", "0.47932756", "0.47881132", "0.4787447", "0.4787276", "0.47843072", "0.47708794", "0.477062", "0.47691274", "0.47683927", "0.47631273", "0.4760184", "0.47588488", "0.47508574" ]
0.77546895
0
Create the underlying action group widget.
def create_widget(self): self.widget = QCustomActionGroup(self.parent_widget())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_action(self, *args, **kwargs):\n action_group = kwargs.pop('action_group', None)\n act = QAction(*args, **kwargs)\n if action_group:\n act.setActionGroup(action_group)\n\n return act", "def init_layout(self):\n super(QtActionGroup, self).init_layout()\n widget = self.widget\n for action in self.actions():\n widget.addAction(action)", "def actionGroup(self, QDesignerFormWindowManagerInterface_ActionGroup): # real signature unknown; restored from __doc__\n pass", "def create_widget(self):\n pass", "def widget(self, request, group):", "def create_action(self, parent):\n return QtGui.QAction(parent)", "def create_actions(self):\n dgct = self.curve_toolbar.addAction\n\n agfe = dgct(QtGui.QIcon(\":/greys/greys/full_extent.svg\"),\n \"Full extent graph\"\n )\n self.action_graph_full_extent = agfe\n\n agr = dgct(QtGui.QIcon(\":/greys/greys/reset.svg\"),\n \"Reset graph parameters\"\n )\n self.action_graph_reset = agr\n\n spacer = QtGui.QWidget()\n spacer.setSizePolicy(QtGui.QSizePolicy.Expanding,\n QtGui.QSizePolicy.Expanding)\n self.curve_toolbar.addWidget(spacer)\n\n act_name = \"Instantaneous performance\"\n self.action_fps_display = QtGui.QAction(act_name, self)\n self.curve_toolbar.addAction(self.action_fps_display)\n\n # Remove the placeholder toolbar\n self.ui.toolBar_GraphControls.setVisible(False)", "def create_actions_section(self):\n section = self.sections['Actions:']\n\n AuthButton(section,\n command=partial(self.new_instance_and_window, Nobleman),\n text='Add new lord',\n state=self.sdb_file_exists()).pack(side=LEFT)\n\n TkButton(section, command=self.load_data, text='Reload data',\n state=self.sdb_file_exists()).pack(side=LEFT, padx=100)\n TkButton(section, command=self.map._build_world, text='Build world',\n state=self.sdb_file_exists()).pack(side=LEFT, padx=100)\n TkButton(section, command=self.save_lords, text='Save data',\n state=self.sdb_file_exists()).pack(side=LEFT, padx=100)\n\n AuthButton(section,\n command=partial(self.new_instance_and_window, Location),\n text='Add new location',\n state=self.sdb_file_exists()).pack(side=RIGHT)", "def createOptionsGroup(self):\n self.groupBox = QGroupBox(self.model.get_title())\n self.groupBox.setAlignment(4)\n\n self.load_button = QtGui.QPushButton()\n self.close_button = QtGui.QPushButton()\n\n self.l1 = QLabel(\"Channel: \" + str(self.model.get_channel()))\n self.spin_box = QSpinBox()\n self.spin_box.setMinimumHeight(22)\n\n vbox = QVBoxLayout()\n vbox.addWidget(self.l1)\n vbox.addWidget(self.spin_box)\n vbox.addWidget(self.load_button)\n vbox.addWidget(self.close_button)\n\n self.load_button.setText(\"Load...\")\n self.close_button.setText(\"Close\")\n\n #USE EEG DISPLAY CONTROLLER TO HAVE THE Models LOAD ITS DATA\n loader = DataController(self.model)\n self.load_button.clicked.connect(loader)\n loader.title.connect(self.set_title)\n\n #LET THE MODEL COMMUNICATE IT'S DEAD\n self.close_button.clicked.connect(self.delete)\n\n #Use spin box to switch through channels\n self.spin_box.valueChanged.connect(self.model.set_channel)\n self.spin_box.valueChanged.connect(self.set_channel)\n\n vbox.addStretch(1)\n self.groupBox.setLayout(vbox)\n\n return self.groupBox", "def create_widgets(self):", "def create_widgets( self ):", "def create_widget(self):\n self.widget = wxDockPane(self.parent_widget())", "def _create_buttons(self, layout, title, group):\n\n imports = grid = None\n row = column = 0\n\n for module_name in sorted(self._metadata.keys()):\n if self._metadata[module_name].group != group:\n continue\n\n if imports is None:\n imports = QGroupBox(title)\n grid = QGridLayout()\n\n b = ModuleButton(module_name)\n b.explicitly_required_changed.connect(self._module_toggled)\n\n self._buttons[module_name] = b\n grid.addWidget(b, row, column)\n\n column += 1\n if column == 5:\n row += 1\n column = 0\n\n if imports is not None:\n imports.setLayout(grid)\n layout.addWidget(imports)", "def put_in_groupbox(widget, title):\n box = QtGui.QGroupBox(title)\n layout = QtGui.QHBoxLayout(box)\n layout.addWidget(widget)\n return box", "def request_group_create():\n return Response(render_template('admin/group/create-update.html',\n csrf_token=(\n get_raw_jwt() or {}).get(\"csrf\"),\n target=\"/admin/group/create\"),\n mimetype='text/html')", "def new(cls, gtk_action, parent):\n # This code is similar to code in the loader, investigate\n # if we can use more code reusage\n name = gtk_action.get_name()\n label = gtk_action.get_property('label')\n short_label = gtk_action.get_property('short-label')\n is_important = gtk_action.get_property('is-important')\n tooltip = gtk_action.get_property('tooltip')\n stock_id = gtk_action.get_property('stock-id') or None\n gaction = cls(parent, name, label, short_label, is_important,\n tooltip, stock_id)\n\n # check if it has accelerator\n accel_entry = gtk.accel_map_lookup_entry('<Actions>/%s/%s' %\n (parent.name, name))\n if accel_entry:\n key, modifier = accel_entry\n if key != 0:\n gaction.accelerator = gtk.accelerator_name(key, modifier)\n\n # check if it has signal handler\n callback = gtk_action.get_data('handler')\n if callback:\n gaction.callback = callback\n\n return gaction", "def createGroupMenu(dash_instance):\n\tgroupLabel = html.Label(id='GroupLabel', \n\t\t\t\t\t\t\tstyle=dict(fontWeight='bold', width=50, marginLeft='100', marginRight='20', marginBottom='1px', display='block'), \n\t\t\t\t\t\t\tchildren='Groups: ') \n\tgroupOptions = getGroupOptions(allGroupInfo)\n\tgroupDropDown = dcc.Dropdown(id=\"GroupDropDown\", \n\t options=groupOptions,\n\t\t\t\t\t\t\t\t style=dict(width=350, marginRight='50', display='block'), \n\t\t\t\t\t\t\t\t placeholder='Select a group...', clearable=True, value='')\n\n\treturn groupLabel, groupDropDown", "def __create_button_groups(self):\n button_groups = dbc.ButtonGroup(\n [\n dbc.Button(\"Total\", active=True, id=\"rp_btn_total\"),\n dbc.Button(\"New\", id=\"rp_btn_new\"),\n ],\n size=\"md\",\n className=\"mr-1\",\n )\n return button_groups", "def createWidget(self, parent):\n raise NotImplementedError()", "def _create_group_form_box(self) -> None:\n self.form_group_box = QGroupBox(\"Creating symmetric key\")\n layout = QFormLayout()\n layout.addRow(QLabel(\"Algorithm:\"), self.algorithm_combobox)\n self.form_group_box.setLayout(layout)", "def build(self):\n with self.set_master(sticky=\"nsew\", row_weights=[1], column_weights=[0, 1], auto_columns=0):\n self.build_category_canvas()\n with self.set_master(sticky=\"nsew\", row_weights=[0, 1, 0], column_weights=[1, 1]):\n self.build_previous_range_button(row=0, column=0)\n self.build_hidden_fields_checkbutton(row=0, column=1)\n with self.set_master(sticky=\"nsew\", row=1, column=0, row_weights=[1], column_weights=[1]):\n self.build_entry_frame()\n with self.set_master(sticky=\"nsew\", row=1, column=1, row_weights=[1], column_weights=[1]):\n self.build_field_frame()\n self.build_next_range_button(row=2, column=0)", "def build_ui_widget(self):\n if self._tool_actions:\n multi_button_layout = QtWidgets.QHBoxLayout()\n multi_button_layout.setContentsMargins(0, 0, 0, 0)\n for name, func in self._tool_actions.items():\n btn = ui_utils.ContentResizeButton(name)\n btn.setSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored)\n\n btn.clicked.connect(partial(self._run, func))\n multi_button_layout.addWidget(btn)\n\n multi_button_widget = QtWidgets.QWidget()\n multi_button_widget.setLayout(multi_button_layout)\n main_widget = multi_button_widget\n else:\n btn = ui_utils.ContentResizeButton(\"{}\".format(self.TOOL_NAME))\n btn.setSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored)\n btn.clicked.connect(self._run)\n\n # set Icon on button\n if self.ICON:\n # if it's a string, assume it's a path to an icon image\n if isinstance(self.ICON, str):\n self.ICON = QtGui.QIcon(self.ICON)\n btn.setIcon(self.ICON)\n\n main_widget = btn\n\n return main_widget", "def on_action_widget_open(self, value):\n _log.debug('widget_open %s', value)\n obj: QtWidgets.QWidget = None\n floating = False\n unique_id = None\n args = []\n kwargs = {}\n if isinstance(value, dict):\n floating = bool(value.get('floating', False))\n spec = value['value']\n args = value.get('args', args)\n kwargs = value.get('kwargs', kwargs)\n else:\n spec = value\n if isinstance(spec, str):\n cls_unique_id = get_unique_id(spec)\n if ':' in spec:\n unique_id = spec\n cls_unique_id = unique_id.split(':')[0]\n spec = get_instance(cls_unique_id, default=None)\n if isinstance(spec, type):\n obj = spec(*args, **kwargs)\n else:\n obj = spec\n pubsub_singleton.register(obj, unique_id=unique_id, parent=self)\n unique_id = obj.unique_id\n obj.setObjectName(unique_id)\n obj.dock_widget = DockWidget(obj)\n obj.dock_widget.setObjectName(f'{unique_id}__dock')\n tab_widget = obj.dock_widget.tabWidget()\n tab_widget.setElideMode(QtCore.Qt.TextElideMode.ElideNone)\n self._dock_manager.addDockWidget(QtAds.TopDockWidgetArea, obj.dock_widget)\n pubsub_singleton.publish('registry/style/actions/!render', unique_id)\n if floating:\n dw = obj.dock_widget\n dw.setFloating()\n c = dw.floatingDockContainer()\n c.resize(800, 600)\n if getattr(obj, 'view_skip_undo', False):\n return None\n else:\n return [['registry/view/actions/!widget_close', unique_id],\n ['registry/view/actions/!widget_open', unique_id]]", "def createWidgets(self):\n raise NotImplementedError", "def __init__(\n self,\n group,\n controls):\n self.group = group\n self.controls = controls", "def _setupMetricsGroup(self):\r\n metricsGroupLayout = QVBoxLayout()\r\n self.metricListWidget = QListWidget() # metric names added dynamically from data when loaded\r\n self.metricListWidget.setSelectionMode(QAbstractItemView.MultiSelection) # able to select multiple metrics\r\n metricsGroupLayout.addWidget(self.metricListWidget)\r\n\r\n buttonLayout = QHBoxLayout()\r\n self.selectAllButton = QPushButton(\"Select All\")\r\n self.clearAllButton = QPushButton(\"Clear All\")\r\n self.selectAllButton.clicked.connect(self.selectAll)\r\n self.clearAllButton.clicked.connect(self.clearAll)\r\n buttonLayout.addWidget(self.selectAllButton, 50)\r\n buttonLayout.addWidget(self.clearAllButton, 50)\r\n metricsGroupLayout.addLayout(buttonLayout)\r\n\r\n return metricsGroupLayout", "def group(*args, show: bool = True, parent: str = \"\", before: str = \"\", width: int = 0, pos=[],\n horizontal: bool = False, horizontal_spacing: float = -1.0, id:str='', indent=-1):\n try:\n widget = internal_dpg.add_group(*args, show=show, parent=parent, before=before, width=width,\n horizontal=horizontal, horizontal_spacing=horizontal_spacing, id=id,\n indent=indent, pos=pos)\n internal_dpg.push_container_stack(widget)\n yield widget\n finally:\n internal_dpg.pop_container_stack()", "def __create_containers_list_action(self):\n containers_list = QAction(QIcon(self.path + '/../resources/icons/16x16/list.png'), 'Show containers list.',\n self)\n containers_list.setObjectName('containers_list')\n containers_list.triggered.connect(self.on_containers_list_triggered)\n self.addAction(containers_list)", "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def populate_menu(self):\n # TODO : Performance issue ?\n self.showGroupMenu.clear()\n self.addGroupDataMenu.clear()\n counter = 0\n for group_name in self.group_data.keys():\n counter +=1\n exec('self.groupAct' + str(counter) + ' = QAction(\"' + group_name+'\", self)')\n eval('self.groupAct' + str(counter) + '.triggered.connect(partial(self.load_group, group_name))')\n exec('self.groupAddAct' + str(counter) + ' = QAction(\"' + group_name+'\", self)')\n eval('self.groupAddAct' + str(counter) + '.triggered.connect(partial(self.add_group_data, group_name))')\n self.showGroupMenu.addAction(eval('self.groupAct' + str(counter)))\n self.addGroupDataMenu.addAction(eval('self.groupAddAct' + str(counter)))", "def create_widget(self):\n item = QNodeItem(self)\n self.widget = item", "def createActions(self, window):\n menu_bar = window.qwindow().menuBar()\n tools_menu = find_menu(menu_bar, \"tools\")\n experimental_menu = find_menu(tools_menu, \"experimental\")\n if experimental_menu is None:\n experimental_menu = create_menu(\"experimental\", i18n(\"Experimental\"), parent=tools_menu)\n tools_menu.addAction(experimental_menu.menuAction())\n\n # add action \"instance\"\n experimental_menu.addAction(self._activate_arc_welding_action)", "def create_group(self, label):\n group = OptionGroup(label)\n self.append(group)\n return group", "def createButtonsOnInterface(self, dlg):\n #reclassification dictionary made from the field setup file\n self.reclassificationDict = dlg.makeReclassificationDict()\n #button size defined by the user\n self.size = dlg.slider.value()\n #check if the button must be grouped by category\n withTabs = dlg.checkBox.isChecked()\n #actual button creation step\n self.createButtons(self.reclassificationDict, withTabs)", "def create(self, parent):\n self.widget = QtCore.QObject(parent)", "def _createGroupBox(self):\n\n boxLayout = QVBoxLayout()\n groupBox = QGroupBox('Columns')\n\n # Make buttons exclusive, note tht group is member,\n # since we need to avoid it being garbage collected\n self.buttonGroup = QButtonGroup()\n self.buttonGroup.setExclusive(True)\n\n # Loop through column names\n for idx, column in enumerate(self._model.session['columns']):\n\n # Create check box\n checkBox = FastDmColumnCheck(idx, self.checked, self.buttonGroup)\n checkBox.setText(column)\n\n # If column for current already set, set checked:\n if self._model.session[self._key]['name'] == column:\n checkBox.setChecked(True)\n self.checked['checked'] = self._model.session[self._key]['idx']\n\n # If column for other set, make this checkbox disabled\n if self._key == 'RESPONSE':\n if idx == self._model.session['TIME']['idx']:\n checkBox.setEnabled(False)\n else:\n if idx == self._model.session['RESPONSE']['idx']:\n checkBox.setEnabled(False)\n\n # If is set as condition, disable\n if self._isCondition(column):\n checkBox.setEnabled(False)\n checkBox.setText(column + ' (condition)')\n\n # Add checkbox to layout\n self.buttonGroup.addButton(checkBox, idx)\n boxLayout.addWidget(checkBox)\n\n groupBox.setLayout(boxLayout)\n return groupBox", "def createActions (self):\n self.closeTabAction = QtHelper.createAction(self, self.tr(\"Close\"), self.closeCurrentTab,\n tip = 'Closes the current document')\n self.closeAllTabAction = QtHelper.createAction(self, self.tr(\"Close All\"), self.closeAllTab, \n tip = 'Closes all document')\n\n self.newTestUnitAction = QtHelper.createAction(self, \"Test Unit\", self.newTestUnit,\n icon = QIcon(\":/%s.png\" % TestUnit.TYPE), \n tip = 'Creates a new test unit')\n self.newTestConfigAction = QtHelper.createAction(self, \"Test Config\", self.newTestConfig,\n icon = QIcon(\":/%s.png\" % TestConfig.TYPE), \n tip = 'Creates a new test config')\n self.newTestSuiteAction = QtHelper.createAction(self, \"Test Suite\", self.newTestSuite,\n icon = QIcon(\":/%s.png\" % TestSuite.TYPE), \n shortcut = \"Ctrl+N\", tip = 'Creates a new test suite')\n self.newTestPlanAction = QtHelper.createAction(self, \"Test Plan\", self.newTestPlan,\n icon = QIcon(\":/%s.png\" % TestPlan.TYPE), \n tip = 'Creates a new test plan')\n self.newTestGlobalAction = QtHelper.createAction(self, \"Test Global\", self.newTestGlobal,\n icon = QIcon(\":/%s.png\" % TestPlan.TYPE_GLOBAL), \n tip = 'Creates a new test global')\n self.newTestDataAction = QtHelper.createAction(self, \"Test Data\", self.newTestData,\n icon = QIcon(\":/%s.png\" % TestData.TYPE), \n tip = 'Creates a new test data')\n self.newAdapterAction = QtHelper.createAction(self, \"Adapter\", self.newTestAdapter,\n icon = QIcon(\":/file-adp2.png\"), tip = 'Creates a new adapter')\n self.newLibraryAction = QtHelper.createAction(self, \"Library\", self.newTestLibrary,\n icon = QIcon(\":/file-lib-adp.png\"), tip = 'Creates a new library')\n self.newTxtAction = QtHelper.createAction(self, \"Txt\", self.newTestTxt,\n icon = QIcon(\":/file-txt.png\"), tip = 'Creates a new txt')\n\n\n self.openAction = QtHelper.createAction(self, self.tr(\"Open\"), self.openDoc,\n icon = QIcon(\":/open-test.png\"), shortcut = \"Ctrl+O\", tip = 'Open')\n self.saveAction = QtHelper.createAction(self, self.tr(\"Save\"), self.saveTab, \n shortcut = Settings.instance().readValue( key = 'KeyboardShorcuts/save' ),\n icon = QIcon(\":/save-test.png\"), \n tip = 'Saves the active document')\n self.saveAsAction = QtHelper.createAction(self, self.tr(\"Save As\"), self.saveTabAs,\n icon = QIcon(\":/filesave.png\"), tip = 'Saves the active document as ...')\n self.exportAsAction = QtHelper.createAction(self, self.tr(\"Export\"), self.exportTabAs,\n icon = None, tip = 'Export the active document as ...')\n\n self.saveAllAction = QtHelper.createAction(self, self.tr(\"Save all\"), self.saveAllTabs,\n icon = QIcon(\":/save_all.png\"), tip = 'Saves all documents')\n\n self.printAction = QtHelper.createAction(self, self.tr(\"Print\"), self.printDoc,\n icon = QIcon(\":/printer.png\"), tip = 'Print the current document', \n shortcut = Settings.instance().readValue( key = 'KeyboardShorcuts/print' ) )\n\n self.undoAction = QtHelper.createAction(self, self.tr(\"Undo\"), callback = self.globalCallback,\n icon = QIcon(\":/undo.png\"), data='undo', \n shortcut = \"Ctrl+Z\", tip = 'Undoes the last action' )\n self.redoAction = QtHelper.createAction(self, self.tr(\"Redo\"), callback = self.globalCallback, \n icon = QIcon(\":/redo.png\"), data='redo', \n shortcut = \"Ctrl+Y\", tip = 'Redoes the previously undone action' )\n self.cutAction = QtHelper.createAction(self, self.tr(\"Cut\"), callback = self.globalCallback,\n shortcut = QKeySequence.Cut, data='cut', \n tip = 'Cuts the selection and puts it on the clipboard' )\n self.copyAction = QtHelper.createAction(self, self.tr(\"Copy\"), callback = self.globalCallback,\n shortcut = QKeySequence.Copy, data='copy', \n tip = 'Copies the selection and puts it on the clipboard' )\n self.copyAction.setShortcutContext(Qt.WidgetWithChildrenShortcut)\n self.pasteAction = QtHelper.createAction(self, self.tr(\"Paste\"), callback = self.globalCallback,\n data='paste', shortcut = QKeySequence.Paste, \n tip = 'Inserts clipboard contents' )\n self.pasteAction.setShortcutContext(Qt.WidgetWithChildrenShortcut)\n self.deleteAction = QtHelper.createAction( self, \"Delete Selection\", callback = self.globalCallback,\n data='removeSelectedText', tip = 'Deletes the selection' )\n self.commentAction = QtHelper.createAction(self, \"Comment\", callback = self.globalCallback,\n icon = QIcon(\":/comment.png\"), data='comment', \n tip = 'Insert comment sign at the begining of line' )\n self.uncommentAction = QtHelper.createAction(self, \"Uncomment\", callback = self.globalCallback,\n icon = QIcon(\":/uncomment.png\"), data='uncomment', \n tip = 'Remove comment sign at the begining of line' )\n self.selectAllAction = QtHelper.createAction(self, \"Select All\", self.globalCallback, \n QIcon(\":/select_all.png\"), data='selectAll', \n tip = 'Selects the entire document' )\n self.indentAction = QtHelper.createAction(self, \"Indent\", self.globalCallback, data='indent', \n shortcut = \"Tab\", tip = 'Indent current line or selection' )\n self.unindentAction = QtHelper.createAction(self, \"Unindent\", self.globalCallback, data='unindent', \n shortcut = \"Shift+Tab\", tip = 'Unindent current line or selection' )\n \n self.foldAllAction = QtHelper.createAction(self, \"Fold/Unfold all\", callback = self.globalCallback,\n icon = QIcon(\":/toggle-expand.png\"), \n data='foldAllLines', tip = 'Fold all lines' )\n self.codefoldingAction = QtHelper.createAction(self, \"Code Folding\", self.toggleCodeFolding, \n icon = QIcon(\":/folding.png\"), toggled = True)\n self.codefoldingAction.setChecked( self.codeFolding )\n self.whitespaceVisibilityAction = QtHelper.createAction(self, \"Show whitespace and tabulation\", \n self.toggleWhitespaceVisibility, toggled = True)\n self.whitespaceVisibilityAction.setChecked( self.whitespaceVisible )\n self.indentGuidesVisibilityAction = QtHelper.createAction(self, \"Show indentation guides\", \n self.toggleIndentGuidesVisibility, toggled = True)\n self.indentGuidesVisibilityAction.setChecked( self.indentationGuidesVisible )\n self.linesNumberingAction = QtHelper.createAction(self, \"Line Numbering\", self.toggleLineNumbering, \n toggled = True)\n self.linesNumberingAction.setChecked( self.linesNumbering )\n self.codeWrappingAction = QtHelper.createAction(self, \"Code Wrapping\", self.toggleCodeWrapping, \n icon = None, toggled = True)\n self.codeWrappingAction.setChecked( self.codeWrapping )\n \n \n self.runAction = QtHelper.createAction(self, \"Execute\", self.runDocument,\n tip = 'Executes the current test', icon=QIcon(\":/test-play.png\") )\n \n self.runNowAction = QtHelper.createAction(self, \"Immediately\", self.runDocument,\n tip = 'Executes the current test',\n shortcut=Settings.instance().readValue( key = 'KeyboardShorcuts/run' ) )\n self.runMinimizeAction = QtHelper.createAction(self, \"Immediately + Minimize\", self.runDocumentMinimize,\n tip = 'Executes the current test and minimize the application' )\n self.runReduceAction = QtHelper.createAction(self, \"Immediately + Reduce\", self.runDocumentReduce,\n tip = 'Executes the current test and reduce the application' )\n self.runBackgroundAction = QtHelper.createAction(self, \"Background\", self.runDocumentInBackground,\n tip = 'Executes the current test in background')\n \n self.runDebugAction = QtHelper.createAction(self, \"&Debug\", self.runDocumentDebug,\n tip = 'Executes the current test with debug traces on server' )\n self.runWithoutNotifAction = QtHelper.createAction(self, \"&Without notifications\", self.runDocumentWithoutNotif,\n tip = 'Executes the current test without mail notifications' )\n self.runNoKeepTrAction = QtHelper.createAction(self, \"&Do not keep test result\", self.runDocumentNoKeepTr,\n tip = 'Do not keep test result on archive' )\n\n self.runSchedAction = QtHelper.createAction(self, self.tr(\"Schedule\"), self.schedRunDocument,\n icon = QIcon(\":/schedule.png\"), \n tip = self.tr('Scheduling a run of the current tab') )\n \n self.runSeveralAction = QtHelper.createAction(self, self.tr(\"Grouped\"), self.runSeveralTests,\n icon = QIcon(\":/test-play-several.png\"), tip = self.tr('Run several tests') )\n self.runSeveralAction.setEnabled(False)\n\n self.runStepByStepAction = QtHelper.createAction(self, \"Steps\", self.runDocumentStepByStep,\n tip = 'Execute the current test step by step', \n icon=QIcon(\":/run-state.png\"),\n shortcut = Settings.instance().readValue( key = 'KeyboardShorcuts/steps' ) )\n self.runBreakpointAction = QtHelper.createAction(self, \"Break Point\", self.runDocumentBreakpoint,\n tip = 'Execute the current test with breakpoint', \n icon=QIcon(\":/breakpoint.png\"),\n shortcut = Settings.instance().readValue( key = 'KeyboardShorcuts/breakpoint' ) )\n\n self.checkSyntaxAction = QtHelper.createAction(self, self.tr(\"&Syntax\"), self.checkSyntaxDocument,\n icon = QIcon(\":/check-syntax.png\"), \n tip = self.tr('Checking syntax of the current tab'),\n shortcut = Settings.instance().readValue( key = 'KeyboardShorcuts/syntax' ) )\n self.checkDesignAction = QtHelper.createAction(self, self.tr(\"&Design\"), self.checkDesignDocument,\n icon = QIcon(\":/tds.png\"), \n tip = self.tr('Checking design of the current tab') )\n self.updateTestAction = QtHelper.createAction(self, self.tr(\"&Assistant\"), self.updateMacro,\n icon = QIcon(\":/recorder.png\") , \n tip = self.tr('Update the test with the automation assistant'),\n shortcut = Settings.instance().readValue( key = 'KeyboardShorcuts/assistant' ) )\n \n menu1 = QMenu(self)\n menu1.addAction( self.checkSyntaxAction )\n menu1.addAction( self.checkDesignAction )\n self.checkAction = QtHelper.createAction(self, self.tr(\"Prepare\"), self.prepareDocument,\n tip = self.tr('Prepare the current test'), icon=QIcon(\":/check-syntax.png\") )\n self.checkAction.setMenu(menu1) \n\n menu3 = QMenu(self)\n menu3.addAction( self.runSchedAction )\n menu3.addAction( self.runSeveralAction )\n self.schedAction = QtHelper.createAction(self, self.tr(\"Schedule\"), self.schedRunDocument,\n tip = self.tr('Schedule a test'), icon=QIcon(\":/schedule.png\") )\n self.schedAction.setMenu(menu3) \n \n menu = QMenu(self)\n menu.addAction( self.runNowAction )\n menu.addAction( self.runBackgroundAction )\n menu.addSeparator()\n menu.addAction( self.runMinimizeAction )\n menu.addAction( self.runReduceAction )\n menu.addSeparator()\n menu.addAction( self.runWithoutNotifAction )\n menu.addAction( self.runNoKeepTrAction )\n menu.addSeparator()\n menu.addAction( self.runDebugAction )\n menu.addSeparator()\n menu.addAction( self.runStepByStepAction )\n menu.addAction( self.runBreakpointAction )\n\n self.runAction.setMenu(menu)\n\n self.findAction = QtHelper.createAction(self, self.tr(\"Search\"), self.searchText,\n icon = QIcon(\":/find.png\"), tip = self.tr('Search text'),\n shortcut = Settings.instance().readValue( key = 'KeyboardShorcuts/search' ) )\n self.findAction.setChecked(True)\n \n self.setDefaultActionsValues()", "def new_group(request):\n return edit_group(request, None)", "def createWidget(self, QWidget): # real signature unknown; restored from __doc__\n pass", "def assemble_widget(self) -> widgets.Widget:\n graph_selection = self._create_layer_selection(layer_type=\"graphs\")\n map_selection = self._create_layer_selection(layer_type=\"maps\")\n view_buttons = self.create_visibility_buttons()\n\n widget = widgets.VBox(\n [\n widget_utils.create_html_header(\"Graph Selection\"),\n graph_selection,\n widget_utils.HRULE,\n widget_utils.create_html_header(\"Map Selection\"),\n map_selection,\n widget_utils.HRULE,\n widget_utils.create_html_header(\"View Selection\"),\n view_buttons,\n ]\n )\n\n return widget", "def build_view(frame, box, _view):\n\n\tif isinstance(_view, view.Switch):\n\t\tfor action in _view.get_actions():\n\t\t\tbutton = ActionButton(action, _view)\n\t\t\tbox.pack_start(button.make(frame), False, False, 0)", "def build(self):\n self.icon = 'data/icon.png'\n return CalcGridLayout()", "def openWindow(self):\n # self.showSessionAct.setEnabled(False)\n self.musketeers_widget = MusketeersWidget(parent=self)\n self.setCentralWidget(self.musketeers_widget)\n self.saveGroupMenu = QAction('Save Group', self.fileMenu)\n self.fileMenu.addAction(self.saveGroupMenu)\n self.saveGroupMenu.triggered.connect(self.musketeers_widget.session_widget.save_group)", "def _render_results_group_operations(self):\n\n core.add_text(\n name='Group operations',\n color=self._control_text_color,\n parent=self._window_name)\n\n core.add_button(\n 'Keep newest file, delete all other duplicates', \n callback=self._delete_all_duplicate_click_hander,\n callback_data=self._duplicates_list,\n parent=self._window_name) \n\n core.add_text('', parent=self._window_name)\n\n core.add_separator(parent=self._window_name)", "def _action(self, action, consistencygroup, info=None, **kwargs):\n body = {action: info}\n self.run_hooks('modify_body_for_action', body, **kwargs)\n url = '/consistencygroups/%s/action' % base.getid(consistencygroup)\n resp, body = self.api.client.post(url, body=body)\n return common_base.TupleWithMeta((resp, body), resp)", "def create_widget(self):\n parent = self.parent_widget()\n widget = QToolButton(parent)\n if not isinstance(parent, QToolBar):\n sp = widget.sizePolicy()\n sp.setHorizontalPolicy(QSizePolicy.Minimum)\n widget.setSizePolicy(sp)\n self.widget = widget", "def create_dockable(self, dockable_name, widget):\n pass", "def createGroup(self):\n return _libsbml.GroupsModelPlugin_createGroup(self)", "def actions(self):\r\n return Actions(self)", "def define_actions(self):\n ListView.define_actions(self)\n\n self.all_action = Gtk.ActionGroup(name=self.title + \"/CitationAll\")\n self.edit_action = Gtk.ActionGroup(name=self.title + \"/CitationEdit\")\n\n self._add_action('FilterEdit', None, _('Citation Filter Editor'),\n callback=self.filter_editor,)\n self._add_action('QuickReport', None, _(\"Quick View\"), None, None, None)\n\n self._add_action_group(self.edit_action)\n self._add_action_group(self.all_action)", "def test_createGroup(self):\n tabGroup = widgets.TabGroup(u'group1', u'Group', tabs=[\n widgets.Tab(u'id4', u'Title 4', self.contentFactory)])\n tabs = self.tabs + [\n tabGroup,\n widgets.Tab(u'id5', u'Title 5', self.contentFactory)]\n tabView = widgets.TabView(tabs)\n self.assertEquals(\n tabView.getTabIDs(),\n [u'id1', u'id2', u'id3', u'id4', u'id5'])\n self.assertEquals(\n tabView._tabGroups,\n {u'group1': tabGroup})", "def __init_ui(self):\n self.__maximize_button.setFixedSize(31, 31)\n self.__maximize_button.setIcon(QIcon(SystemInfo.RESOURCES + 'images/buttons/maximize.svg'))\n\n self.__diagram_group.setStyleSheet(\"QGroupBox { border: 1px solid gray; background: white; }\")\n self.__diagram_layout.addWidget(self.__diagram_group)\n\n self.__button_layout = QHBoxLayout()\n self.__button_layout.addWidget(self.__start_button)\n self.__button_layout.addStretch()\n self.__button_layout.addWidget(self.__maximize_button)\n\n main_layout = QVBoxLayout()\n main_layout.addLayout(self.__button_layout, 1)\n main_layout.addLayout(self.__diagram_layout, 1)\n main_layout.addStretch(0)\n\n self.setLayout(main_layout)", "def projectMenuActions( self, action ):\n\tif ( action.text() == 'Create Project' ): \n\t self.CreateProjectWidget()", "def main(self, session: Session) -> None:\n buttons = []\n for group in groups_api.get_user_groups(session.user):\n if session.user in group.admins:\n buttons.append(self.ui.create_button_view(group.name,\n lambda s: self.show_small_menu(group,\n session)))\n self.ui.create_button_group_view(session, \"What group do you want to change?\",\n buttons).draw()", "def child_added(self, child):\n super(QtActionGroup, self).child_added(child)\n if isinstance(child, QtAction):\n self.widget.addAction(child.widget)\n parent = self.parent()\n if parent is not None:\n before = self.find_next_action(child)\n parent.widget.insertAction(before, child.widget)", "def draw(self):\n return group()", "def createOptionsGroup(self):\n #1. Create a widget (here: QGroupBox)\n self.groupBox = QGroupBox()\n self.groupBox.setAlignment(4)\n\n #2. Create a couple of elements\n self.load_button = QtGui.QPushButton()\n self.load_button.setText(\"Load...\")\n self.close_button = QtGui.QPushButton()\n self.open_dialog = QtGui.QPushButton()\n self.open_dialog.setText(\"Specify Motion ROI\")\n self.frame_forward = QtGui.QPushButton()\n self.frame_back = QtGui.QPushButton()\n self.play = QtGui.QPushButton()\n self.reverse_play = QtGui.QPushButton()\n self.stop_vid = QtGui.QPushButton()\n\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"./icons/frame_back.png\"))\n self.frame_back.setIcon(icon)\n\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"./icons/play.png\"))\n self.play.setIcon(icon)\n\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"./icons/stop.png\"))\n self.stop_vid.setIcon(icon)\n\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"./icons/frame_forward.png\"))\n self.frame_forward.setIcon(icon)\n\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"./icons/reverse_play.png\"))\n self.reverse_play.setIcon(icon)\n\n self.close_button.setText(\"Close\")\n self.l1 = QLabel(\"Video\")\n self.spin_box = QSpinBox()\n\n #3. Add them to a QVBoxLayout (Vertical)\n vbox = QVBoxLayout()\n vbox.addWidget(self.l1)\n vbox.addWidget(self.spin_box)\n vbox.addWidget(self.load_button)\n vbox.addWidget(self.close_button)\n vbox.addWidget(self.open_dialog)\n\n vbox.addWidget(self.stop_vid)#\n vbox.addWidget(self.play)#\n vbox.addWidget(self.reverse_play)#\n vbox.addWidget(self.frame_forward)#\n vbox.addWidget(self.frame_back)#\n vbox.addStretch(1)#Add empty QSpacerItem that pushes the buttons upwards\n\n #4. Add layout to widget\n self.groupBox.setLayout(vbox)\n\n #5. connect\n self.open_dialog.clicked.connect(self.choose_ROI)\n self.frame_forward.clicked.connect(self.video_model.frame_forward)\n self.frame_back.clicked.connect(self.video_model.frame_back)\n self.play.clicked.connect(self.video_model.start_play)\n self.stop_vid.clicked.connect(self.video_model.stop_play)\n self.spin_box.valueChanged.connect(self.change_video)\n\n return self.groupBox", "def create_widget(self):\n self.widget = ListView(self.get_context())", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def createOptionsGroup(self):\n #1. Create a widget (here: QGroupBox)\n self.groupBox = QGroupBox()\n self.groupBox.setAlignment(4)\n\n #2. Create a couple of elements\n self.save_to_database = QtGui.QPushButton()\n self.save_to_database.setText(\"Save to database\")\n\n self.line = QFrame()\n self.line.setFrameShape(QFrame.HLine)\n self.line.setFrameShadow(QFrame.Sunken)\n\n #Mother or child select\n\n mother_child = QtGui.QHBoxLayout()\n self.mother_btn = QRadioButton(\"Mother\")\n self.mother_btn.setChecked(True)\n self.mother = True\n mother_child.addWidget(self.mother_btn)\n self.mother_btn.toggled.connect(lambda:self.set_mother(self.mother_btn))\n\n\n self.child_btn = QRadioButton(\"Child\")\n self.child_btn.setChecked(False)\n self.child = False\n mother_child.addWidget(self.child_btn)\n self.child_btn.toggled.connect(lambda:self.set_child(self.child_btn))\n\n #Coordinates display\n self.coordinates1 = QLineEdit()\n self.coordinates1.setFixedWidth(40)\n self.coordinates2 = QLineEdit()\n self.coordinates2.setFixedWidth(40)\n self.coordinates3 = QLineEdit()\n self.coordinates3.setFixedWidth(40)\n self.coordinates4 = QLineEdit()\n self.coordinates4.setFixedWidth(40)\n\n self.title = \"Dyad: \"+str(self.video.get_dyad())+ \"\\t\\t Video: \" + str(self.video.get_camera())\n self.l1 = QLabel(self.title)\n self.l2 = QLabel(\"Comment (optional)\")\n self.l3 = QLabel(\"Coordinates\")\n self.l4 = QLabel(\"Current frame\")\n\n self.comment = QLineEdit()\n self.current_frame = QLineEdit()\n\n #3. Add them to a QVBoxLayout (Vertical)\n hbox = QtGui.QHBoxLayout()\n hbox.addWidget(self.coordinates1)\n hbox.addWidget(self.coordinates2)\n hbox.addWidget(self.coordinates3)\n hbox.addWidget(self.coordinates4)\n\n vbox = QVBoxLayout()\n vbox.addWidget(self.l1)\n vbox.addWidget(self.line)\n vbox.addWidget(self.l2)\n vbox.addWidget(self.comment)\n vbox.addLayout(mother_child)\n vbox.addWidget(self.l4)\n vbox.addWidget(self.current_frame)\n vbox.addWidget(self.l3)\n vbox.addLayout(hbox)\n vbox.addWidget(self.save_to_database)\n\n\n vbox.addStretch(1)#Add empty QSpacerItem that pushes the buttons upwards\n\n #4. Add layout to widget\n self.groupBox.setLayout(vbox)\n\n return self.groupBox", "def create_menu(self, menu_name, menu_actions):\r\n menu_action_group = QActionGroup(self)\r\n menu_action_group.setExclusive(True)\r\n menubar = self.menuBar()\r\n menu = menubar.addMenu(menu_name)\r\n for action in menu_actions:\r\n menu_action_group.addAction(action)\r\n menu.addAction(action)", "def action(self, action_id):\r\n return Action(self, action_id)", "def action(self, action_id):\r\n return Action(self, action_id)", "def populate_main_panel(self, action: str):\n self.main_panel.destroy()\n\n actions = dict(\n word_word=fill_word_word,\n idiom_word=fill_idiom_word,\n idiom_idiom=fill_idiom_idiom)\n\n self.main_panel = MainPanel(self.root, action, actions[action])\n self.side_panel.set_separator(action)\n self.current_action = action", "def _createGroupBox(self):\n\n boxLayout = QVBoxLayout()\n groupBox = QGroupBox('Conditions')\n\n # Only if there are more than two columns we cna have depends\n if len(self._model.session['columns']) > 2:\n # Loop through columns\n for idx, column in enumerate(self._model.session['columns']):\n\n # Create check box\n checkBox = FastDmParamCheck(column, self.depends)\n\n # If parameter already depending on column, set to true\n if column in self._model.parameters[self._key]['depends']:\n checkBox.setChecked(True)\n # If column set as RESPONSE or TIME, disable\n if idx == self._model.session['RESPONSE']['idx'] or \\\n idx == self._model.session['TIME']['idx']:\n checkBox.setEnabled(False)\n # Add label to checkbox\n checkBox.setText(column)\n boxLayout.addWidget(checkBox)\n\n groupBox.setLayout(boxLayout)\n return groupBox", "def add_argument_group(self, *args, **kwargs):\n title = kwargs.get('title', args[0])\n for group in self._action_groups:\n if group.title == title:\n return group\n group = MutableArgumentGroup(self, *args, **kwargs)\n self._action_groups.append(group)\n return group", "def create_widget(self):\n self.widget = UILabel()", "def create_command_group(\n self, name: str, *, aliases: Sequence[str] = (), help_text: str = None\n ) -> \"CommandGroup\":\n kwargs = {\"aliases\": aliases}\n if help_text:\n kwargs[\"help\"] = help_text\n group = CommandGroup(\n self._sub_parsers.add_parser(name, aliases=aliases, help=help_text),\n f\"{self._prefix}:{name}\" if self._prefix else name,\n self._handlers,\n )\n self._add_handler(group.dispatch_handler, name, aliases)\n\n return group", "def buildUI(self):\n\n if cmds.window(\"pyART_AddToCanvasWIN\", exists=True):\n cmds.deleteUI(\"pyART_AddToCanvasWIN\", wnd=True)\n\n # create the main window\n self.mainWin = QtWidgets.QMainWindow(self.pickerUI)\n\n # create the main widget\n self.mainWidget = QtWidgets.QWidget()\n self.mainWin.setCentralWidget(self.mainWidget)\n\n # create the mainLayout\n self.layout = QtWidgets.QVBoxLayout(self.mainWidget)\n\n # load stylesheet\n styleSheetFile = utils.returnNicePath(self.toolsPath, \"Core/Scripts/Interfaces/StyleSheets/animPicker.qss\")\n f = open(styleSheetFile, \"r\")\n self.style = f.read()\n f.close()\n\n self.mainWin.setStyleSheet(self.style)\n\n self.mainWin.setMinimumSize(QtCore.QSize(250, 400))\n self.mainWin.setMaximumSize(QtCore.QSize(250, 400))\n self.mainWin.resize(250, 400)\n\n # set qt object name\n self.mainWin.setObjectName(\"pyART_AddToCanvasWIN\")\n self.mainWin.setWindowTitle(\"Add Module To Canvas\")\n\n # label, listWidget, button\n label = QtWidgets.QLabel(\"Available Modules:\")\n label.setProperty(\"boldFont\", True)\n self.layout.addWidget(label)\n\n self.moduleList = QtWidgets.QListWidget()\n self.moduleList.setMaximumSize(230, 300)\n self.moduleList.setMinimumSize(230, 300)\n self.layout.addWidget(self.moduleList)\n\n # add modules to listWidget\n self.addModulesToList()\n\n # create add button\n button = QtWidgets.QPushButton(\"Add Selected To Canvas\")\n self.layout.addWidget(button)\n button.setObjectName(\"blueButton\")\n button.clicked.connect(self.addSelectedToCanvas)\n\n # show ui\n self.mainWin.show()", "def __init__(self):\r\n\r\n gtk.Dialog.__init__(self, title=_(u\"RTK FMEA/FMECA Design Control and \"\r\n u\"Action Addition Assistant\"),\r\n parent=None,\r\n flags=(gtk.DIALOG_MODAL |\r\n gtk.DIALOG_DESTROY_WITH_PARENT),\r\n buttons=(gtk.STOCK_APPLY, gtk.RESPONSE_ACCEPT,\r\n gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT))\r\n\r\n # Define private dictionary attributes.\r\n\r\n # Define private list attributes.\r\n\r\n # Define private scalar attributes.\r\n\r\n # Define public dictionary attributes.\r\n\r\n # Define public list attributes.\r\n\r\n # Define public scalar attributes.\r\n self.rdoControl = Widgets.make_option_button(None, _(u\"Add control\"))\r\n self.rdoAction = Widgets.make_option_button(self.rdoControl,\r\n _(u\"Add action\"))\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Build-up the containers for the dialog. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n _fixed = gtk.Fixed()\r\n self.vbox.pack_start(_fixed) # pylint: disable=E1101\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Place the widgets used to display general information. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n _label = Widgets.make_label(_(u\"This is the RTK Design Control and \"\r\n u\"Action Addition Assistant. Enter the \"\r\n u\"information requested below and then \"\r\n u\"press 'Apply' to add a new design \"\r\n u\"control or action to the RTK Project \"\r\n u\"database.\"),\r\n width=600, height=-1, wrap=True)\r\n _fixed.put(_label, 5, 10)\r\n _y_pos = _label.size_request()[1] + 50\r\n\r\n # Set the tooltips.\r\n self.rdoControl.set_tooltip_text(_(u\"Select to add a design control \"\r\n u\"to the selected failure cause.\"))\r\n self.rdoAction.set_tooltip_text(_(u\"Select to add an Action to the \"\r\n u\"selected failure cause.\"))\r\n\r\n # Place the widgets.\r\n _fixed.put(self.rdoControl, 10, _y_pos)\r\n _fixed.put(self.rdoAction, 10, _y_pos + 35)\r\n\r\n _fixed.show_all()", "def _build(self):\n tab = self._tab\n tab.setModel(self._model)\n tab.horizontalHeader().setClickable(False)\n sig = SIG(\"sectionClicked(int)\")\n connect(tab.verticalHeader(), sig, self.remove_cond)\n if self._grp_colm==\"New_coln\":\n tab.setItemDelegate(TextDelegate(self))\n tab.setItemDelegateForColumn(1, AstGroupDelegate(self,self._grp_names1))\n\n else :\n tab.setItemDelegate(ValueDelegate(self))\n tab.setItemDelegateForColumn(0, AstGroupDelegate(self, self._grp_names))", "def create_menu(self, parent):\n menu = QtGui.QMenu(parent=parent)\n return menu.menuAction()", "def define_actions(self):\n NavigationView.define_actions(self)\n\n self.define_print_actions()\n self.ref_family = Gtk.ActionGroup(self.title + '/Selection')\n self.ref_family.add_actions([\n ('RefFamily', 'gramps-family', _('reference _Family'), None ,\n _(\"Select the family which is the reference for life ways\"),\n self.selectFamily),\n ])\n self._add_action_group(self.ref_family)", "def _confirm_group(cls):\n if cls.GROUP_NAME in bpy.data.objects:\n return\n #Backup current selection\n selection = ObjectSelection()\n #Create empty object\n bpy.ops.object.empty_add()\n new_group = bpy.context.selected_objects[0]\n new_group.name = cls.GROUP_NAME\n new_group.hide = True\n #Restore selection\n selection.restore()", "def _setupDataGroup(self):\r\n\r\n dataGroupLayout = QVBoxLayout()\r\n\r\n self.sheetSelect = QComboBox()\r\n\r\n sliderLayout = QHBoxLayout()\r\n\r\n self.slider = QSlider(Qt.Horizontal)\r\n self.slider.setTickPosition(QSlider.TicksBelow)\r\n self.slider.setTickInterval(1)\r\n self.slider.setMinimum(1)\r\n self.slider.setMaximum(1)\r\n self.slider.valueChanged.connect(self._emitSliderSignal)\r\n\r\n self.sliderLabel = QLabel(\"\")\r\n\r\n sliderLayout.addWidget(self.slider, 9)\r\n sliderLayout.addWidget(self.sliderLabel, 1)\r\n\r\n dataGroupLayout.addWidget(QLabel(\"Select Sheet\"))\r\n dataGroupLayout.addWidget(self.sheetSelect)\r\n dataGroupLayout.addWidget(QLabel(\"Subset Failure Data\"))\r\n dataGroupLayout.addLayout(sliderLayout)\r\n\r\n return dataGroupLayout", "def add_tools(self, name, *args):\n # Take stretch out\n stretch = self._left.children()[-1]\n stretch.parent(None)\n \n # Add group of widgets\n panel = Panel(title=name, parent=self._left, flex=0)\n vbox = VBox(parent=panel)\n for widget in args:\n widget.parent(vbox)\n \n # Put stretch back in\n stretch.parent(self._left)", "def crea_grupo(self):\r\n \r\n self.comprueba_casos_seleccionados()", "def group(self, request, group_id):\n return OtterGroup(self.store, self.tenant_id,\n group_id, self.dispatcher).app.resource()", "def action(self, QDesignerFormWindowManagerInterface_Action): # real signature unknown; restored from __doc__\n pass", "def register_cli_group(name, instance):\n\n return get_component(CLIPackage.COMPONENT_NAME).register_cli_group(name, instance)", "def getWidget(self):", "def create_widget(self):\r\n self.root.ids.entriesBox.clear_widgets()\r\n num_place = len(self.place_list.list_places) # Determine the number of places in the list\r\n visited_place = 0\r\n for place in self.place_list.list_places: # Loop from first place to last place\r\n name = place.name\r\n # assert isinstance(place.country)\r\n country = place.country\r\n priority = place.priority\r\n visited = place.is_required\r\n display_text = self.generateDisplayText(name, country, priority,\r\n visited) # Display place's information on the widget\r\n if visited == \"n\":\r\n visited_place += 1\r\n button_color = self.getColor(visited)\r\n else:\r\n button_color = self.getColor(visited)\r\n\r\n temp_button = Button(text=display_text, id=place.name,\r\n background_color=button_color) # Mark the place visited\r\n temp_button.bind(on_release=self.press_entry) # Display message of the GUI status\r\n self.root.ids.entriesBox.add_widget(temp_button)\r\n self.message = \"To visit: {}. visited: {}\".format(num_place - visited_place,\r\n visited_place) # Display number of place visited or not visited\r", "def __call__(self, *args: FParams.args, **kwargs: FParams.kwargs) -> DAGNode:\n return self._create_task_group(TaskGroup, *args, **kwargs)", "def create_gui(self):\n\n selectors_widgets = list()\n\n for n in range(4):\n\n selectors_widgets.append(wd.Dropdown(\n options={'': 0, 'Orange': 1, 'Red': 2, 'Blue': 3, 'Green': 4},\n value=0,\n disabled=False,\n layout={'width': '148px'}\n ))\n\n self.confirm_button.on_click(\n self.create_combination_and_rate_function())\n self.user_interact.children = [self.selectors, self.confirm_button]\n\n self.selectors.children = selectors_widgets", "def create_widget(parent, control_name, control_value, trait,\n label_class=None, user_data=None):\n # Create the list widget: a frame\n frame = QtGui.QFrame(parent=parent)\n frame.setFrameShape(QtGui.QFrame.StyledPanel)\n\n # Create tools to interact with the list widget: expand or collapse -\n # add a list item - remove a list item\n tool_widget = QtGui.QWidget(parent)\n layout = QtGui.QHBoxLayout()\n layout.addStretch(1)\n layout.setContentsMargins(0, 0, 0, 0)\n layout.setSpacing(2)\n tool_widget.setLayout(layout)\n # Create the tool buttons\n resize_button = QtGui.QToolButton()\n layout.addWidget(resize_button)\n # Set the tool icons\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\n _fromUtf8(\":/soma_widgets_icons/nav_down\")),\n QtGui.QIcon.Normal, QtGui.QIcon.Off)\n resize_button.setIcon(icon)\n resize_button.setFixedSize(30, 22)\n frame.user_data = user_data\n\n editable_labels = False\n handler = getattr(trait, 'handler', trait)\n if handler.inner_traits():\n editable_labels = True\n frame.inner_trait = trait.handler.inner_traits()[0]\n\n add_button = QtGui.QToolButton()\n delete_button = QtGui.QToolButton()\n layout.addWidget(add_button)\n # Set the tool icons\n icon = QtGui.QIcon()\n icon.addPixmap(\n QtGui.QPixmap(_fromUtf8(\":/soma_widgets_icons/add\")),\n QtGui.QIcon.Normal, QtGui.QIcon.Off)\n add_button.setIcon(icon)\n add_button.setFixedSize(30, 22)\n delete_button.setFixedSize(30, 22)\n # Add list item callback\n add_hook = partial(\n ControllerControlWidget.add_item, weak_proxy(parent),\n control_name, weak_proxy(frame))\n add_button.clicked.connect(add_hook)\n\n # Create the associated controller widget\n controller_widget = ControllerWidget(control_value, parent=frame,\n live=True,\n editable_labels=editable_labels,\n user_data=user_data)\n\n # Store some parameters in the list widget\n frame.trait = trait\n frame.controller = control_value\n frame.controller_widget = controller_widget\n frame.connected = False\n\n # Add the list controller widget to the list widget\n frame.setLayout(controller_widget.layout())\n\n # Set some callback on the controller control tools\n # Resize callback\n resize_hook = partial(\n ControllerControlWidget.expand_or_collapse, weak_proxy(frame),\n weak_proxy(resize_button))\n resize_button.clicked.connect(resize_hook)\n\n if getattr(trait, 'expanded') is False:\n ControllerControlWidget.set_expanded(frame, resize_button, False)\n\n # Create the label associated with the controller widget\n control_label = trait.label\n if control_label is None:\n control_label = control_name\n if label_class is None:\n label_class = QtGui.QLabel\n if control_label is not None:\n label = label_class(control_label, parent)\n else:\n label = None\n\n return (frame, (label, tool_widget))", "def create_component(collection):\n # pylint: disable=W0212\n widget = CustomComponentWidget(collection)\n widget.ui.show()\n if widget.ui.exec_() == QDialog.Accepted:\n link = widget._create_link()\n if link:\n widget._add_link_to_targets(link)", "def createAction(self):\n self.createProjectAction = QtGui.QAction(self.tr(\"&New Project\"), self)\n self.createProjectAction.setShortcut(QtGui.QKeySequence.New)\n self.createProjectAction.setStatusTip(self.tr(\"Create a new project\"))\n self.connect(self.createProjectAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"newProject()\"))\n\n self.openProjectAction = QtGui.QAction(self.tr(\"&Open...\"), self)\n self.openProjectAction.setShortcut(QtGui.QKeySequence.Open)\n self.openProjectAction.setStatusTip(self.tr(\"Open an existing project\"))\n self.connect(self.openProjectAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"openProject()\"))\n\n self.saveProjectAction = QtGui.QAction(self.tr(\"&Save\"), self)\n self.saveProjectAction.setShortcut(QtGui.QKeySequence.Save)\n self.saveProjectAction.setStatusTip(self.tr(\"Save the current project\"))\n self.connect(self.saveProjectAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"save()\"))\n\n self.importVideoAction = QtGui.QAction(self.tr(\"&Import video...\"), self)\n self.importVideoAction.setStatusTip(self.tr(\"Import a video into your project\"))\n self.connect(self.importVideoAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"importVideo()\"))\n\n self.aboutAction = QtGui.QAction(self.tr(\"&About\"), self)\n self.aboutAction.setStatusTip(self.tr(\"Show the credits and authors\"))\n self.connect(self.aboutAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"showAbout()\"))", "def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)", "def create_widget(self, parent, tree):\n widget = wx.Panel(parent)\n sizer = wxSingleWidgetSizer()\n widget.SetSizer(sizer)\n return widget", "def create_group(self, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.post('groups', post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def _action(self, action, group_snapshot, info=None, **kwargs):\n body = {action: info}\n self.run_hooks('modify_body_for_action', body, **kwargs)\n url = '/group_snapshots/%s/action' % base.getid(group_snapshot)\n resp, body = self.api.client.post(url, body=body)\n return common_base.TupleWithMeta((resp, body), resp)", "def add_group(self, *args, **kwds):\n title = kwds.pop('title', None)\n description = kwds.pop('description', None)\n if kwds:\n raise Exception('unknown keyword arguments: %s' % kwds)\n\n # set title, description if args[0] is string\n if isinstance(args[0], string_types):\n title = args[0]\n args = args[1:]\n if isinstance(args[0], string_types):\n description = args[0]\n args = args[1:]\n\n assert all(isinstance(arg, Command) for arg in args), 'all args should be instance of Command'\n self._arg_stack.append(('group', args, {'title': title, 'description': description}))\n return self", "def __initViewActions(self):\n self.viewActGrp = createActionGroup(self)\n self.viewFoldActGrp = createActionGroup(self)\n \n self.zoomInAct = E5Action(\n QCoreApplication.translate('ViewManager', 'Zoom in'),\n UI.PixmapCache.getIcon(\"zoomIn.png\"),\n QCoreApplication.translate('ViewManager', 'Zoom &in'),\n QKeySequence(QCoreApplication.translate(\n 'ViewManager', \"Ctrl++\", \"View|Zoom in\")),\n QKeySequence(QCoreApplication.translate(\n 'ViewManager', \"Zoom In\", \"View|Zoom in\")),\n self.viewActGrp, 'vm_view_zoom_in')\n self.zoomInAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Zoom in on the text'))\n self.zoomInAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Zoom in</b>\"\"\"\n \"\"\"<p>Zoom in on the text. This makes the text bigger.</p>\"\"\"\n ))\n self.zoomInAct.triggered.connect(self.__zoomIn)\n self.viewActions.append(self.zoomInAct)\n \n self.zoomOutAct = E5Action(\n QCoreApplication.translate('ViewManager', 'Zoom out'),\n UI.PixmapCache.getIcon(\"zoomOut.png\"),\n QCoreApplication.translate('ViewManager', 'Zoom &out'),\n QKeySequence(QCoreApplication.translate(\n 'ViewManager', \"Ctrl+-\", \"View|Zoom out\")),\n QKeySequence(QCoreApplication.translate(\n 'ViewManager', \"Zoom Out\", \"View|Zoom out\")),\n self.viewActGrp, 'vm_view_zoom_out')\n self.zoomOutAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Zoom out on the text'))\n self.zoomOutAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Zoom out</b>\"\"\"\n \"\"\"<p>Zoom out on the text. This makes the text smaller.</p>\"\"\"\n ))\n self.zoomOutAct.triggered.connect(self.__zoomOut)\n self.viewActions.append(self.zoomOutAct)\n \n self.zoomResetAct = E5Action(\n QCoreApplication.translate('ViewManager', 'Zoom reset'),\n UI.PixmapCache.getIcon(\"zoomReset.png\"),\n QCoreApplication.translate('ViewManager', 'Zoom &reset'),\n QKeySequence(QCoreApplication.translate(\n 'ViewManager', \"Ctrl+0\", \"View|Zoom reset\")),\n 0,\n self.viewActGrp, 'vm_view_zoom_reset')\n self.zoomResetAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Reset the zoom of the text'))\n self.zoomResetAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Zoom reset</b>\"\"\"\n \"\"\"<p>Reset the zoom of the text. \"\"\"\n \"\"\"This sets the zoom factor to 100%.</p>\"\"\"\n ))\n self.zoomResetAct.triggered.connect(self.__zoomReset)\n self.viewActions.append(self.zoomResetAct)\n \n self.zoomToAct = E5Action(\n QCoreApplication.translate('ViewManager', 'Zoom'),\n UI.PixmapCache.getIcon(\"zoomTo.png\"),\n QCoreApplication.translate('ViewManager', '&Zoom'),\n QKeySequence(QCoreApplication.translate(\n 'ViewManager', \"Ctrl+#\", \"View|Zoom\")),\n 0,\n self.viewActGrp, 'vm_view_zoom')\n self.zoomToAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Zoom the text'))\n self.zoomToAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Zoom</b>\"\"\"\n \"\"\"<p>Zoom the text. This opens a dialog where the\"\"\"\n \"\"\" desired size can be entered.</p>\"\"\"\n ))\n self.zoomToAct.triggered.connect(self.__zoom)\n self.viewActions.append(self.zoomToAct)\n \n self.toggleAllAct = E5Action(\n QCoreApplication.translate('ViewManager', 'Toggle all folds'),\n QCoreApplication.translate('ViewManager', '&Toggle all folds'),\n 0, 0, self.viewFoldActGrp, 'vm_view_toggle_all_folds')\n self.toggleAllAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Toggle all folds'))\n self.toggleAllAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Toggle all folds</b>\"\"\"\n \"\"\"<p>Toggle all folds of the current editor.</p>\"\"\"\n ))\n self.toggleAllAct.triggered.connect(self.__toggleAll)\n self.viewActions.append(self.toggleAllAct)\n \n self.toggleAllChildrenAct = E5Action(\n QCoreApplication.translate(\n 'ViewManager', 'Toggle all folds (including children)'),\n QCoreApplication.translate(\n 'ViewManager', 'Toggle all &folds (including children)'),\n 0, 0, self.viewFoldActGrp, 'vm_view_toggle_all_folds_children')\n self.toggleAllChildrenAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Toggle all folds (including children)'))\n self.toggleAllChildrenAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Toggle all folds (including children)</b>\"\"\"\n \"\"\"<p>Toggle all folds of the current editor including\"\"\"\n \"\"\" all children.</p>\"\"\"\n ))\n self.toggleAllChildrenAct.triggered.connect(\n self.__toggleAllChildren)\n self.viewActions.append(self.toggleAllChildrenAct)\n \n self.toggleCurrentAct = E5Action(\n QCoreApplication.translate('ViewManager', 'Toggle current fold'),\n QCoreApplication.translate('ViewManager', 'Toggle &current fold'),\n 0, 0, self.viewFoldActGrp, 'vm_view_toggle_current_fold')\n self.toggleCurrentAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Toggle current fold'))\n self.toggleCurrentAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Toggle current fold</b>\"\"\"\n \"\"\"<p>Toggle the folds of the current line of the current\"\"\"\n \"\"\" editor.</p>\"\"\"\n ))\n self.toggleCurrentAct.triggered.connect(self.__toggleCurrent)\n self.viewActions.append(self.toggleCurrentAct)\n \n self.clearAllFoldsAct = E5Action(\n QCoreApplication.translate('ViewManager', 'Clear all folds'),\n QCoreApplication.translate('ViewManager', 'Clear &all folds'),\n 0, 0, self.viewFoldActGrp, 'vm_view_clear_all_folds')\n self.clearAllFoldsAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Clear all folds'))\n self.clearAllFoldsAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Clear all folds</b>\"\"\"\n \"\"\"<p>Clear all folds of the current editor, i.e. ensure that\"\"\"\n \"\"\" all lines are displayed unfolded.</p>\"\"\"\n ))\n self.clearAllFoldsAct.triggered.connect(self.__clearAllFolds)\n self.viewActions.append(self.clearAllFoldsAct)\n \n self.unhighlightAct = E5Action(\n QCoreApplication.translate('ViewManager', 'Remove all highlights'),\n UI.PixmapCache.getIcon(\"unhighlight.png\"),\n QCoreApplication.translate('ViewManager', 'Remove all highlights'),\n 0, 0,\n self, 'vm_view_unhighlight')\n self.unhighlightAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Remove all highlights'))\n self.unhighlightAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Remove all highlights</b>\"\"\"\n \"\"\"<p>Remove the highlights of all editors.</p>\"\"\"\n ))\n self.unhighlightAct.triggered.connect(self.__unhighlight)\n self.viewActions.append(self.unhighlightAct)\n \n self.newDocumentViewAct = E5Action(\n QCoreApplication.translate('ViewManager', 'New Document View'),\n UI.PixmapCache.getIcon(\"documentNewView.png\"),\n QCoreApplication.translate('ViewManager', 'New &Document View'),\n 0, 0, self, 'vm_view_new_document_view')\n self.newDocumentViewAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Open a new view of the current document'))\n self.newDocumentViewAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>New Document View</b>\"\"\"\n \"\"\"<p>Opens a new view of the current document. Both views show\"\"\"\n \"\"\" the same document. However, the cursors may be positioned\"\"\"\n \"\"\" independently.</p>\"\"\"\n ))\n self.newDocumentViewAct.triggered.connect(self.__newDocumentView)\n self.viewActions.append(self.newDocumentViewAct)\n \n self.newDocumentSplitViewAct = E5Action(\n QCoreApplication.translate(\n 'ViewManager', 'New Document View (with new split)'),\n UI.PixmapCache.getIcon(\"splitVertical.png\"),\n QCoreApplication.translate(\n 'ViewManager', 'New Document View (with new split)'),\n 0, 0, self, 'vm_view_new_document_split_view')\n self.newDocumentSplitViewAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager',\n 'Open a new view of the current document in a new split'))\n self.newDocumentSplitViewAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>New Document View</b>\"\"\"\n \"\"\"<p>Opens a new view of the current document in a new split.\"\"\"\n \"\"\" Both views show the same document. However, the cursors may\"\"\"\n \"\"\" be positioned independently.</p>\"\"\"\n ))\n self.newDocumentSplitViewAct.triggered.connect(\n self.__newDocumentSplitView)\n self.viewActions.append(self.newDocumentSplitViewAct)\n \n self.splitViewAct = E5Action(\n QCoreApplication.translate('ViewManager', 'Split view'),\n UI.PixmapCache.getIcon(\"splitVertical.png\"),\n QCoreApplication.translate('ViewManager', '&Split view'),\n 0, 0, self, 'vm_view_split_view')\n self.splitViewAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Add a split to the view'))\n self.splitViewAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Split view</b>\"\"\"\n \"\"\"<p>Add a split to the view.</p>\"\"\"\n ))\n self.splitViewAct.triggered.connect(self.__splitView)\n self.viewActions.append(self.splitViewAct)\n \n self.splitOrientationAct = E5Action(\n QCoreApplication.translate('ViewManager', 'Arrange horizontally'),\n QCoreApplication.translate('ViewManager', 'Arrange &horizontally'),\n 0, 0, self, 'vm_view_arrange_horizontally', True)\n self.splitOrientationAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Arrange the splitted views horizontally'))\n self.splitOrientationAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Arrange horizontally</b>\"\"\"\n \"\"\"<p>Arrange the splitted views horizontally.</p>\"\"\"\n ))\n self.splitOrientationAct.setChecked(False)\n self.splitOrientationAct.toggled[bool].connect(self.__splitOrientation)\n self.viewActions.append(self.splitOrientationAct)\n \n self.splitRemoveAct = E5Action(\n QCoreApplication.translate('ViewManager', 'Remove split'),\n UI.PixmapCache.getIcon(\"remsplitVertical.png\"),\n QCoreApplication.translate('ViewManager', '&Remove split'),\n 0, 0, self, 'vm_view_remove_split')\n self.splitRemoveAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Remove the current split'))\n self.splitRemoveAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Remove split</b>\"\"\"\n \"\"\"<p>Remove the current split.</p>\"\"\"\n ))\n self.splitRemoveAct.triggered.connect(self.removeSplit)\n self.viewActions.append(self.splitRemoveAct)\n \n self.nextSplitAct = E5Action(\n QCoreApplication.translate('ViewManager', 'Next split'),\n QCoreApplication.translate('ViewManager', '&Next split'),\n QKeySequence(QCoreApplication.translate(\n 'ViewManager', \"Ctrl+Alt+N\", \"View|Next split\")),\n 0,\n self, 'vm_next_split')\n self.nextSplitAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Move to the next split'))\n self.nextSplitAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Next split</b>\"\"\"\n \"\"\"<p>Move to the next split.</p>\"\"\"\n ))\n self.nextSplitAct.triggered.connect(self.nextSplit)\n self.viewActions.append(self.nextSplitAct)\n \n self.prevSplitAct = E5Action(\n QCoreApplication.translate('ViewManager', 'Previous split'),\n QCoreApplication.translate('ViewManager', '&Previous split'),\n QKeySequence(QCoreApplication.translate(\n 'ViewManager', \"Ctrl+Alt+P\", \"View|Previous split\")),\n 0, self, 'vm_previous_split')\n self.prevSplitAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Move to the previous split'))\n self.prevSplitAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Previous split</b>\"\"\"\n \"\"\"<p>Move to the previous split.</p>\"\"\"\n ))\n self.prevSplitAct.triggered.connect(self.prevSplit)\n self.viewActions.append(self.prevSplitAct)\n \n self.previewAct = E5Action(\n QCoreApplication.translate('ViewManager', 'Preview'),\n UI.PixmapCache.getIcon(\"previewer.png\"),\n QCoreApplication.translate('ViewManager', 'Preview'),\n 0, 0, self, 'vm_preview', True)\n self.previewAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Preview the current file in the web browser'))\n self.previewAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Preview</b>\"\"\"\n \"\"\"<p>This opens the web browser with a preview of\"\"\"\n \"\"\" the current file.</p>\"\"\"\n ))\n self.previewAct.setChecked(Preferences.getUI(\"ShowFilePreview\"))\n self.previewAct.toggled[bool].connect(self.__previewEditor)\n self.viewActions.append(self.previewAct)\n \n self.astViewerAct = E5Action(\n QCoreApplication.translate('ViewManager', 'Python AST Viewer'),\n UI.PixmapCache.getIcon(\"astTree\"),\n QCoreApplication.translate('ViewManager', 'Python AST Viewer'),\n 0, 0, self, 'vm_python_ast_viewer', True)\n self.astViewerAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Show the AST for the current Python file'))\n self.astViewerAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Python AST Viewer</b>\"\"\"\n \"\"\"<p>This opens the a tree view of the AST of the current\"\"\"\n \"\"\" Python source file.</p>\"\"\"\n ))\n self.astViewerAct.setChecked(False)\n self.astViewerAct.toggled[bool].connect(self.__astViewer)\n self.viewActions.append(self.astViewerAct)\n \n self.viewActGrp.setEnabled(False)\n self.viewFoldActGrp.setEnabled(False)\n self.unhighlightAct.setEnabled(False)\n self.splitViewAct.setEnabled(False)\n self.splitOrientationAct.setEnabled(False)\n self.splitRemoveAct.setEnabled(False)\n self.nextSplitAct.setEnabled(False)\n self.prevSplitAct.setEnabled(False)\n self.previewAct.setEnabled(True)\n self.astViewerAct.setEnabled(False)\n self.newDocumentViewAct.setEnabled(False)\n self.newDocumentSplitViewAct.setEnabled(False)\n \n self.splitOrientationAct.setChecked(\n Preferences.getUI(\"SplitOrientationVertical\"))", "def create_widgets(self):\n # self.var_spherical = IntVar()\n # self.var_3d = IntVar()\n # self.var_spatial_audio = IntVar()\n # self.button_open[\"command\"] = self.action_open\n # self.button_inject[\"command\"] = self.action_inject\n pass", "def create_ui(self, parent):\n view = View(\n Item(name=\"text\",\n show_label=False,\n editor=ImageTraitEditor(\n image=ImageResource(self.obj.name,\n search_path=[self.obj.parent.absolute_path]) )),\n id=\"puddle.image_editor.image_editor\",\n kind=\"live\", resizable=True)\n\n ui = self.edit_traits(view=view, parent=parent, kind=\"subpanel\")\n\n return ui", "def make_button(box, label, action):\n\n\t\t\tbtn = Gtk.Button(label)\n\t\t\tbtn.set_border_width(4)\n\t\t\tbtn.set_size_request(100, 50)\n\t\t\tbtn.connect(\"clicked\", action)\n\t\t\tbox.pack_start(btn, True, True, 0)\n\t\t\tbtn.show()\n\t\t\treturn btn", "def _make_buttonbox(self, **kwargs): # pylint: disable=unused-argument\n _tooltips = [\n _(u\"Add a new Usage Profile entity at the same level \"\n u\"as the currently selected entity.\"),\n _(u\"Add a new Usage Profile entity one level below the \"\n u\"currently selected entity.\"),\n _(u\"Remove the curently selected entity from the Usage \"\n u\"Profile.\"),\n _(u\"Save the currently selected Usage Profile line to the open \"\n u\"RAMSTK Program database.\"),\n _(u\"Save the Usage Profile to the open RAMSTK Program \"\n u\"database.\"),\n _(u\"Create the Mission and Usage Profile report.\")\n ]\n _callbacks = [\n self._do_request_insert_sibling, self._do_request_insert_child,\n self._do_request_delete, self._do_request_update,\n self._do_request_update_all\n ]\n _icons = [\n 'insert_sibling', 'insert_child', 'remove', 'save', 'save-all',\n 'reports'\n ]\n\n _buttonbox = RAMSTKListView._make_buttonbox(\n self,\n icons=_icons,\n tooltips=_tooltips,\n callbacks=_callbacks,\n orientation='vertical',\n height=-1,\n width=-1)\n\n return _buttonbox", "def __init__(self, app, *_):\n super(DummyApplicationWindow, self).__init__(app, None)\n self.group_windows=[AbstractGroupWindow(app)]", "def create_widgets(self):\n #create first button\n self.button1 = Button(self, text = \"Start\")\n self.button1.bind\n self.button1.grid()", "def __init__(self, parent=None):\n super().__init__(parent)\n\n topLayout = QtGui.QVBoxLayout(self)\n self.setLayout(topLayout)\n self.setTitle(_('Choose export format subtype'))\n self.subtypeButtons = QtGui.QButtonGroup(self)\n self.subtypeButtons.buttonClicked[int].connect(self.setCurrentSubtype)" ]
[ "0.75482804", "0.6695908", "0.66494614", "0.62164736", "0.60830635", "0.6036351", "0.6004404", "0.58673584", "0.5848936", "0.5833886", "0.5833164", "0.5814237", "0.5802912", "0.5681113", "0.5641236", "0.55654967", "0.5556217", "0.55348945", "0.5505949", "0.5497972", "0.54459685", "0.54414785", "0.54403305", "0.54278845", "0.5423609", "0.5412961", "0.54078853", "0.54073185", "0.5393643", "0.5390085", "0.53798026", "0.5377124", "0.5341619", "0.5304116", "0.52952796", "0.52927166", "0.5265085", "0.52111644", "0.5189545", "0.5188235", "0.51867425", "0.51838875", "0.517563", "0.5173502", "0.5171191", "0.51673216", "0.5127718", "0.51221824", "0.5115778", "0.51023614", "0.5100771", "0.5096505", "0.5091452", "0.5084409", "0.5073858", "0.504417", "0.50396746", "0.5036756", "0.5033484", "0.5026294", "0.50154173", "0.5010396", "0.5010396", "0.50009984", "0.49948576", "0.4993943", "0.49921548", "0.4981287", "0.49752113", "0.49743703", "0.4966636", "0.49457672", "0.4944102", "0.49420154", "0.49373648", "0.49284416", "0.49241894", "0.49196485", "0.49057525", "0.4904144", "0.49007124", "0.4898766", "0.48985654", "0.48968738", "0.48968616", "0.48937577", "0.4885517", "0.48793778", "0.486426", "0.4852897", "0.48503125", "0.4849253", "0.4844063", "0.48407096", "0.48401707", "0.48380738", "0.48358074", "0.48353606", "0.4831446", "0.48262438" ]
0.84242666
0
Initialize the layout for the control.
def init_layout(self): super(QtActionGroup, self).init_layout() widget = self.widget for action in self.actions(): widget.addAction(action)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_layout(self):\n pass", "def init_layout(self):\n\t\tself.pack_start(self.edit, expand=True)\n\t\tself.pack_start(self.button, expand=False)\n\t\tself.show_all()", "def __createLayout(self):\r\n self.__createCanvas()\r\n self.__createButton()\r\n self.__createInputFunction()\r\n self.__createLimits()\r\n self.__styleLayout()", "def init_layout(self):\n\n # create hbox layout (upper horizontal box).\n upper_hbox = QHBoxLayout()\n upper_hbox.setContentsMargins(0, 0, 0, 0)\n\n # set widgets to the hbox layout\n upper_hbox.addWidget(self.newFileBtn)\n upper_hbox.addWidget(self.openVideoBtn)\n upper_hbox.addWidget(self.openAnnotationBtn)\n upper_hbox.addWidget(self.saveBtn)\n upper_hbox.addWidget(self.HelpBtn)\n\n upper_hbox.addItem(self.spacerItem1)\n\n upper_hbox.addWidget(self.radioLabel)\n upper_hbox.addWidget(self.zoomRadio)\n upper_hbox.addWidget(self.wideRadio)\n\n upper_hbox.addItem(self.spacerItem2)\n upper_hbox.addWidget(self.resetBtn)\n upper_hbox.addWidget(self.errorLabel)\n\n\n # ---------------------------------------------------------------------------------------\n\n # create hbox layout (middle horizontal box).\n middle_hbox = QHBoxLayout()\n middle_hbox.setContentsMargins(0, 0, 0, 0)\n \n # set widgets to the hbox layout\n middle_hbox.addWidget(self.canvas)\n middle_hbox.addItem(self.spacerItem3)\n middle_hbox.addWidget(self.container)\n\n # ---------------------------------------------------------------------------------------\n\n # create hbox layout (lower horizontal box).\n lower_hbox = QHBoxLayout()\n lower_hbox.setContentsMargins(0, 0, 0, 0)\n\n # set widgets to the hbox layout\n lower_hbox.addWidget(self.playBtn)\n lower_hbox.addWidget(self.stopBtn)\n lower_hbox.addWidget(self.recordLabel)\n lower_hbox.addWidget(self.checkbox)\n lower_hbox.addWidget(self.speedComboLabel)\n lower_hbox.addWidget(self.speedCombo)\n lower_hbox.addWidget(self.slider)\n lower_hbox.addWidget(self.durationLabel)\n lower_hbox.addWidget(self.lengthLabel)\n\n\n # ---------------------------------------------------------------------------------------\n\n # create vbox layout (vertical box)\n vboxLayout = QVBoxLayout()\n vboxLayout.addLayout(upper_hbox)\n vboxLayout.addLayout(middle_hbox)\n vboxLayout.addLayout(lower_hbox)\n\n #self.setLayout(vboxLayout)\n self.setLayout(vboxLayout)", "def init_layout(self):\n\t\tbox1 = gtk.VBox()\n\t\tbox1.pack_start(self.labelName)\n\t\tbox1.pack_start(self.labelDirectory)\n\t\tif self.labelDefaults is not None:\n\t\t\tbox1.pack_start(self.labelDefaults)\n\n\t\tbox2 = gtk.HBox()\n\t\tbox2.pack_start(self.directorySelector.edit, expand=True)\n\t\tbox2.pack_start(self.directorySelector.button, expand=False)\n\n\t\tbox3 = gtk.VBox()\n\t\tbox3.pack_start(self.editName)\n\t\tbox3.pack_start(box2)\n\t\tif self.comboDefaults is not None:\n\t\t\tbox3.pack_start(self.comboDefaults)\n\n\t\tbox4 = gtk.HBox()\n\t\tbox4.pack_start(box1, expand=False)\n\t\tbox4.pack_start(box3)\n\n\t\tself.vbox.pack_start(box4, expand=False)\n\t\tself.vbox.pack_start(gtk.VBox())\n\n\t\tself.show_all()", "def initUI(self):\n # Set the main layout component.\n self.mainLayout = QVBoxLayout()\n\n if(self.__itemListSize > 0):\n self.mainLayout.setSpacing(0)\n self.mainLayout.setContentsMargins(0, 0, 0, 0)\n\n # Build the loop for QHBoxLayout creation.\n for y in range(self.__ySize):\n # Creating the horizontal layout for X.\n horizontalLayout = QHBoxLayout()\n horizontalLayout.setSpacing(0)\n horizontalLayout.setSizeConstraint(QLayout.SetMaximumSize)\n horizontalLayout.setContentsMargins(0, 0, 0, 0)\n\n for x in range(self.__xSize):\n itemCount = x + y * self.__xSize\n\n if(itemCount < self.__itemListSize):\n # Create the widget.\n item = self.__itemList[itemCount]\n \n else:\n # Add empty string if no item available to keep grid.\n item = QLabel(\"\")\n \n item.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n item.setMinimumWidth(self.__parentGeometry.width()/self.__xSize - 10)\n horizontalLayout.addWidget(item)\n \n self.mainLayout.addLayout(horizontalLayout)\n \n else:\n self.textDisplay = QLabel(self.__emptyLabel)\n self.mainLayout.addWidget(self.textDisplay)\n\n # Set main layout to the window.\n self.setLayout(self.mainLayout)", "def create_layout( self ):", "def _init_default_layout(self):\n self._main_v_layout_ = QVBoxLayout(self)\n self._main_v_layout_.setContentsMargins(0, 0, 0, 0)\n self.setLayout(self._main_v_layout_)", "def do_layout(self):\n self.define_panel_structure()\n self.layout_selection()\n self.layout_data_list()\n self.layout_batch()\n self.layout_button()", "def initialize(self):\n self.setWindowTitle(\"Playlist Maker\")\n self.setGeometry(0,0, 800, 494)\n self.mbox = QVBoxLayout()\n self.hbox = QHBoxLayout()\n self.hbtnbox = QHBoxLayout()", "def layout(self):\n\n # Initialise all plots and widgets\n widgets = self.widgets(width=200)\n\n plot_width = 500\n sizing_mode = 'stretch_height'\n self.init_grid_plot()\n self.init_line_plot(width=plot_width, mode=sizing_mode)\n self.init_distribution_plot(width=plot_width, mode=sizing_mode)\n self.init_school_composition_plot(width=plot_width, mode=sizing_mode)\n self.init_neighbourhood_composition_plot(width=plot_width,\n mode=sizing_mode)\n self.init_distance_plot(width=plot_width, mode=sizing_mode)\n\n # Row with widgets\n if self.params['case'].lower() == 'lattice':\n width = 420\n split = int(len(widgets) / 2.) + 1\n widget_row = row(\n [column(widgets[:split]),\n column(widgets[split:])],\n width=width)\n else:\n width = 210\n widget_row = column(widgets, width=width)\n\n desc = Div(text=open(join(dirname(__file__),\n \"description.html\")).read(),\n margin=0)\n # Column with all the controls and description\n first_col = column(widget_row, width=width, sizing_mode='fixed')\n\n # Column with the grid/map\n second_col = column([\n desc,\n row(self.buttons(), sizing_mode='stretch_width'),\n row(self.grid, sizing_mode='stretch_width')\n ],\n sizing_mode='stretch_width')\n\n # Column with the plots\n third_col = column([\n self.plot, self.distribution_plot, self.distance_plot,\n self.school_composition_plot, self.neighbourhood_composition_plot\n ])\n\n vis_layout = gridplot([[first_col, second_col, third_col]],\n toolbar_location=None)\n\n self.doc.add_root(vis_layout)\n self.doc.title = \"COMPASS\"", "def layout(self):\n pass", "def _setup_layout(self):\r\n\t\tbtn_toggle_server = Button(self, \\\r\n\t\t\ttext = \"啟動伺服器\", command = self._toggle_server, \\\r\n\t\t\tname = \"btn_toggle_server\")\r\n\t\tbtn_toggle_server.pack(side = LEFT)\r\n\r\n\t\tlabel_IP = Label(self, text = \"IP: \", anchor = W)\r\n\t\tlabel_IP.pack(side = LEFT)\r\n\t\tentry_IP = Entry(self, width = 15, name = \"entry_IP\")\r\n\t\tentry_IP.pack(side = LEFT)\r\n\t\tlabel_port = Label(self, text = \"Port: \", anchor = W)\r\n\t\tlabel_port.pack(side = LEFT)\r\n\t\tentry_port = Entry(self, width = 5, name = \"entry_port\")\r\n\t\tentry_port.pack(side = LEFT)\r\n\r\n\t\tlabel_connections = Label(self, text = \"連接數: -/-\", \\\r\n\t\t\tname = \"label_connections\")\r\n\t\tlabel_connections.pack(side = LEFT)", "def __init_ui(self):\n self.__maximize_button.setFixedSize(31, 31)\n self.__maximize_button.setIcon(QIcon(SystemInfo.RESOURCES + 'images/buttons/maximize.svg'))\n\n self.__diagram_group.setStyleSheet(\"QGroupBox { border: 1px solid gray; background: white; }\")\n self.__diagram_layout.addWidget(self.__diagram_group)\n\n self.__button_layout = QHBoxLayout()\n self.__button_layout.addWidget(self.__start_button)\n self.__button_layout.addStretch()\n self.__button_layout.addWidget(self.__maximize_button)\n\n main_layout = QVBoxLayout()\n main_layout.addLayout(self.__button_layout, 1)\n main_layout.addLayout(self.__diagram_layout, 1)\n main_layout.addStretch(0)\n\n self.setLayout(main_layout)", "def setup_layout(self):\n\n # check if we should animate plot\n anim = self.get_option(self.sctn,'animate')\n if anim != None:\n self.animate = anim.lower() in ['t','true','1']\n else:\n self.animate = False\n self.anim_range=[]\n t = self.get_option(self.sctn,'anim_start')\n if t!=None:\n self.anim_range.append(int(t))\n else:\n self.anim_range.append(0)\n t = self.get_option(self.sctn,'anim_end')\n if t!=None:\n self.anim_range.append(int(t))\n else:\n self.anim_range.append(5)\n \n self.times = self.get_option(self.sctn,'times')\n if self.times == \"None\":\n self.times = [None]\n else:\n self.times = self.times.split()\n \n if len(self.variables)>1:\n self.numdata = len(self.variables)\n else:\n self.numdata = len(self.times)\n try:\n self.numcol = int(self.get_option(self.sctn,'ncol'))\n except:\n self.numcol = self.numdata\n if len(self.variables)>1:\n self.numrow = len(self.times)\n else:\n self.numrow = 1", "def __init__(self):\n super(GraphVisualizerCase, self).__init__()\n\n self._v_layout = QVBoxLayout()\n self.setLayout(self._v_layout)\n\n top_label_layout = QHBoxLayout()\n self._v_layout.addLayout(top_label_layout)\n\n self._top_left_label = MathTextLabel()\n top_label_layout.addWidget(self._top_left_label)\n self._top_right_label = MathTextLabel()\n top_label_layout.addWidget(self._top_right_label)\n\n self._center_widget = QFrame()\n self._center_text_label = QLabel()\n self._center_text_label.setAlignment(Qt.AlignCenter)\n layout = QHBoxLayout()\n layout.addWidget(self._center_text_label)\n self._center_widget.setLayout(layout)\n\n self._v_layout.addWidget(self._center_widget)\n\n bot_label_layout = QHBoxLayout()\n self._v_layout.addLayout(bot_label_layout)\n\n self._bot_left_label = MathTextLabel()\n bot_label_layout.addWidget(self._bot_left_label)\n self._bot_right_label = MathTextLabel()\n bot_label_layout.addWidget(self._bot_right_label)", "def layout(self):\n # Create the layout\n boxLayout = QtGui.QGridLayout()\n\n # Add widgets to layout\n boxLayout.addWidget(self.magnitudeLabel,0,0)\n boxLayout.addWidget(self.magnitudeOption,0,1)\n boxLayout.addWidget(self.directionLabel,1,0)\n boxLayout.addWidget(self.directionOption,1,1)\n boxLayout.addWidget(self.horizontalLabel,2,0)\n boxLayout.addWidget(self.horizontalOption,2,1)\n boxLayout.addWidget(self.verticalLabel,3,0)\n boxLayout.addWidget(self.verticalOption,3,1)\n boxLayout.addWidget(self.closeButton,4,1)\n\n # Set layout to window\n self.setLayout(boxLayout)", "def init_layout(self):\n\t\t# layout splitter. directoryList + buttons | logWidget\n\t\tself.pack_start(self.splitter)\n\t\tframe = gtk.Frame()\n\t\tframe.set_shadow_type(gtk.SHADOW_IN)\n\t\tbox1 = gtk.VBox()\n\t\tframe.add(box1)\n\t\tself.splitter.pack1(frame, resize=True, shrink=False)\n\t\tbox2 = gtk.HBox()\n\t\tbox1.pack_start(self.directoryListScroll)\n\t\tbox1.pack_start(box2, expand=False)\n\t\tbox2.pack_start(self.buttonNew)\n\t\tbox2.pack_start(self.buttonEdit)\n\t\tbox2.pack_start(self.buttonUp)\n\t\tbox2.pack_start(self.buttonDown)\n\t\tbox2.pack_start(self.buttonRemove)\n\t\tself.logView.set_shadow_type(gtk.SHADOW_IN)\n\t\tself.splitter.pack2(self.logView, resize=True, shrink=False)\n\n\t\tself.pack_start(self.buttonImport, expand=False)\n\n\t\t# layout first column of settings widgets\n\t\tbox2 = gtk.HBox()\n\t\tself.pack_start(box2, expand=False)\n\t\tbox3 = gtk.VBox()\n\t\tbox2.pack_start(box3)\n\t\tbox3.pack_start(self.checkAutoStartImport)\n\t\tbox3.pack_start(self.checkAppendMessages)\n\n\t\tbox2.pack_start(gtk.VSeparator())\n\n\t\t# layout secund column of settings widgets\n\t\t#TODO: layout is a bit messy here with labels + spinboxes\n\t\t# have not found a way to properly align labes / boxes yet\n\t\tbox3 = gtk.VBox()\n\t\tbox2.pack_start(box3)\n\t\tbox4 = gtk.HBox()\n\t\tbox3.pack_start(box4)\n\t\tbox4.pack_start(self.labelImportTimeout, expand=False)\n\t\tbox4.pack_start(self.spinImportTimeout)\n\t\tbox4 = gtk.HBox()\n\t\tbox3.pack_start(box4)\n\t\tbox4.pack_start(self.labelMaxLogLines, expand=False)\n\t\tbox4.pack_start(self.spinMaxLogLines)\n\n\t\tself.show_all()", "def __styleLayout(self):\r\n style = \"font-family:Times; font: bold; color:\" + self.primaryColor + \"; font-size: 15px\"\r\n # set the layout\r\n layout = QVBoxLayout()\r\n\r\n # canvas \r\n layout.addWidget(self.toolbar)\r\n layout.addWidget(self.canvas)\r\n self.toolbar.setStyleSheet(\"background-color:\" + self.primaryColor + \";\")\r\n\r\n # lower limits of X\r\n layout.addWidget(self.lowerXLabel)\r\n layout.addWidget(self.lowerXField)\r\n self.lowerXLabel.setStyleSheet(style)\r\n self.lowerXField.setStyleSheet(\"background-color:\" + self.primaryColor + \";\")\r\n\r\n # upper limits of X\r\n layout.addWidget(self.upperXLabel)\r\n layout.addWidget(self.upperXField)\r\n self.upperXLabel.setStyleSheet(style)\r\n self.upperXField.setStyleSheet(\"background-color:\" + self.primaryColor + \";\")\r\n\r\n # the input function\r\n layout.addWidget(self.InputFunctionLabel)\r\n layout.addWidget(self.InputFunctionField)\r\n self.InputFunctionLabel.setStyleSheet(style)\r\n self.InputFunctionField.setStyleSheet(\"background-color:\" + self.primaryColor + \";\")\r\n\r\n # the plot button\r\n layout.addWidget(self.button)\r\n self.button.setStyleSheet(style)\r\n\r\n self.setLayout(layout)", "def init_layout(self):\n super(WxTraitsItem, self).init_layout()\n self.refresh_traits_widget(notify=False)", "def initUI(self):\n # Setting the main layout as Vertical.\n self.mainLayout = QHBoxLayout()\n\n # Create title.\n self.title = QLabel(self.__name + \" : \")\n\n # Add description as tooltip.\n self.title.setToolTip(self.__description)\n\n # Add title to main layout.\n self.mainLayout.addWidget(self.title)\n\n # Create ComboBox.\n self.dropDown = QComboBox()\n\n # Add datas to drop down.\n self.dropDown.addItems(self.__datas)\n\n # Set default index to dropdown.\n self.dropDown.setCurrentIndex(self.__currentValue)\n\n # Connect dropdown with update method.\n self.dropDown.currentIndexChanged.connect(self.changeCurrentValue)\n\n # Add ComboBox to main layout.\n self.mainLayout.addWidget(self.dropDown)\n\n # Add the main layout to the window.\n self.setLayout(self.mainLayout)", "def layout(self):\n \n # set size to child\n if self._view is not None:\n self._view.frame = (0, 0, self.width, self.height)", "def _do_layout(self):\n return", "def init_ui(self):\n self.panel_sizer = wx.BoxSizer(wx.VERTICAL)\n self.figure_bmp = wx.StaticBitmap(self, wx.ID_ANY,\n bitmap=self.controller.empty_bitmap(self.bitmap_width,\n self.bitmap_height),\n pos=wx.DefaultPosition, size=wx.DefaultSize)\n self.panel_sizer.Add(self.figure_bmp, ui_defaults.ctrl_pct, wx.CENTER,\n ui_defaults.widget_margin)\n self.SetSizerAndFit(self.panel_sizer)", "def initializeUI(self):\n self.setStyleSheet(abstyle)\n self.setGeometry(140, 100, 860, 484)\n self.setWindowTitle('Emotions Data View')\n self.setupModelView()", "def _initialize(self):\n self._frame = ttk.Frame(master=self._root)\n self._ingredients_frame = ttk.Frame(master=self._frame)\n\n self._create_header()\n self._show_ingredient_list()\n self._create_footer()\n\n self._ingredients_frame.grid(row=1, column=1, columnspan=2)\n self._frame.grid_columnconfigure(1, weight=1, minsize=250)", "def createLayout(self):\n hbox = QtWidgets.QHBoxLayout()\n hbox.addStretch(1)\n okBtn = QtWidgets.QPushButton(\"OK\")\n okBtn.clicked.connect(self.validate)\n cancelBtn = QtWidgets.QPushButton(\"Cancel\")\n cancelBtn.clicked.connect(self.reject)\n hbox.addWidget(okBtn)\n hbox.addWidget(cancelBtn)\n vbox = QtWidgets.QVBoxLayout()\n vbox.addWidget(self.lab)\n vbox.addWidget(self.text)\n vbox.addLayout(hbox)\n self.setLayout(vbox)", "def createLayout(self):\n hbox = QtWidgets.QHBoxLayout()\n hbox.addStretch(1)\n okBtn = QtWidgets.QPushButton(\"OK\")\n okBtn.clicked.connect(self.validate)\n cancelBtn = QtWidgets.QPushButton(\"Cancel\")\n cancelBtn.clicked.connect(self.reject)\n hbox.addWidget(okBtn)\n hbox.addWidget(cancelBtn)\n vbox = QtWidgets.QVBoxLayout()\n vbox.addWidget(self.lab)\n vbox.addWidget(self.text)\n vbox.addLayout(hbox)\n self.setLayout(vbox)", "def _create_layout(self):\n # Level Size\n self._level_size_lay = PySide2.QtWidgets.QHBoxLayout()\n self._level_size_lay.addWidget(self._level_size_lbl)\n self._level_size_lay.addSpacing(10)\n self._level_size_lay.addWidget(self._level_size_x_lbl)\n self._level_size_lay.addWidget(self._level_size_x_spinbox)\n self._level_size_lay.addSpacing(5)\n self._level_size_lay.addWidget(self._level_size_y_lbl)\n self._level_size_lay.addWidget(self._level_size_y_spinbox)\n self._level_size_lay.addSpacing(5)\n self._level_size_lay.addWidget(self._level_size_z_lbl)\n self._level_size_lay.addWidget(self._level_size_z_spinbox)\n self._level_size_lay.addStretch()\n\n # Minimum Length\n self._minimum_length_lay = PySide2.QtWidgets.QHBoxLayout()\n self._minimum_length_lay.addWidget(self._minimum_length_checkbox)\n self._minimum_length_lay.addSpacing(5)\n self._minimum_length_lay.addWidget(self._minimum_length_spinbox)\n self._minimum_length_lay.addStretch()\n\n # Minimum Length\n self._maximum_length_lay = PySide2.QtWidgets.QHBoxLayout()\n self._maximum_length_lay.addWidget(self._maximum_length_checkbox)\n self._maximum_length_lay.addSpacing(5)\n self._maximum_length_lay.addWidget(self._maximum_length_spinbox)\n self._maximum_length_lay.addStretch()\n\n # Seed\n self._seed_lay = PySide2.QtWidgets.QHBoxLayout()\n self._seed_lay.addWidget(self._seed_checkbox)\n self._seed_lay.addSpacing(5)\n self._seed_lay.addWidget(self._seed_le)\n\n # Generator Settings Group Box\n self._generator_lay = PySide2.QtWidgets.QVBoxLayout()\n self._generator_lay.addLayout(self._level_size_lay)\n self._generator_lay.addLayout(self._minimum_length_lay)\n self._generator_lay.addLayout(self._maximum_length_lay)\n self._generator_lay.addLayout(self._seed_lay)\n self._generator_group_box.setLayout(self._generator_lay)\n\n # Block Size\n self._block_size_lay = PySide2.QtWidgets.QHBoxLayout()\n self._block_size_lay.addWidget(self._block_size_lbl)\n self._block_size_lay.addSpacing(10)\n self._block_size_lay.addWidget(self._block_size_x_lbl)\n self._block_size_lay.addWidget(self._block_size_x_spinbox)\n self._block_size_lay.addSpacing(5)\n self._block_size_lay.addWidget(self._block_size_y_lbl)\n self._block_size_lay.addWidget(self._block_size_y_spinbox)\n self._block_size_lay.addSpacing(5)\n self._block_size_lay.addWidget(self._block_size_z_lbl)\n self._block_size_lay.addWidget(self._block_size_z_spinbox)\n self._block_size_lay.addStretch()\n\n # Group Name\n self._group_name_lay = PySide2.QtWidgets.QHBoxLayout()\n self._group_name_lay.addWidget(self._group_name_lbl)\n self._group_name_lay.addSpacing(10)\n self._group_name_lay.addWidget(self._group_name_le)\n\n # Maya Scene Group Box\n self._scene_lay = PySide2.QtWidgets.QVBoxLayout()\n self._scene_lay.addLayout(self._block_size_lay)\n self._scene_lay.addLayout(self._group_name_lay)\n self._scene_group_box.setLayout(self._scene_lay)\n\n # Object Blocks\n for blk_type in VALID_BLOCK_TYPES:\n # Path\n self._object_blocks[blk_type][\"pth_lay\"] = PySide2.QtWidgets.QHBoxLayout()\n self._object_blocks[blk_type][\"pth_lay\"].addWidget(self._object_blocks[blk_type][\"pth_lbl\"])\n self._object_blocks[blk_type][\"pth_lay\"].addSpacing(10)\n self._object_blocks[blk_type][\"pth_lay\"].addWidget(self._object_blocks[blk_type][\"pth_le\"])\n \n # Weight\n self._object_blocks[blk_type][\"weight_lay\"] = PySide2.QtWidgets.QHBoxLayout()\n self._object_blocks[blk_type][\"weight_lay\"].addWidget(self._object_blocks[blk_type][\"weight_lbl\"])\n self._object_blocks[blk_type][\"weight_lay\"].addSpacing(10)\n self._object_blocks[blk_type][\"weight_lay\"].addWidget(self._object_blocks[blk_type][\"weight_spinbox\"])\n self._object_blocks[blk_type][\"weight_lay\"].addStretch()\n\n # Object Block Group\n self._object_blocks[blk_type][\"group_lay\"] = PySide2.QtWidgets.QVBoxLayout()\n self._object_blocks[blk_type][\"group_lay\"].addSpacing(15)\n self._object_blocks[blk_type][\"group_lay\"].addLayout(self._object_blocks[blk_type][\"pth_lay\"])\n self._object_blocks[blk_type][\"group_lay\"].addLayout(self._object_blocks[blk_type][\"weight_lay\"])\n self._object_blocks[blk_type][\"group\"].setLayout(self._object_blocks[blk_type][\"group_lay\"])\n\n # Object Block Group Box\n self._block_lay = PySide2.QtWidgets.QVBoxLayout()\n for blk_type in VALID_BLOCK_TYPES:\n self._block_lay.addWidget(self._object_blocks[blk_type][\"group\"])\n self._block_group_box.setLayout(self._block_lay)\n\n # Buttons\n self._button_lay = PySide2.QtWidgets.QHBoxLayout()\n self._button_lay.addWidget(self._cancel_btn)\n self._block_size_lay.addSpacing(5)\n self._button_lay.addWidget(self._generate_btn)\n\n # Main\n self._main_lay = PySide2.QtWidgets.QVBoxLayout()\n self._main_lay.addWidget(self._generator_group_box)\n self._main_lay.addWidget(self._scene_group_box)\n self._main_lay.addWidget(self._block_group_box)\n self._main_lay.addLayout(self._button_lay)\n\n # Set the layout\n self.setLayout(self._main_lay)", "def _ui_layout(self):\n layout = QtWidgets.QVBoxLayout()\n #layout.setContentsMargins(0,0,0,0)\n\n # layout child widgets\n layout.addWidget(self._label_description)\n layout.addWidget(self._table)\n layout.addWidget(self._checkbox_remember)\n layout.addWidget(self._checkbox_ignore_missing)\n\n # scale widget dimensions based on DPI\n height = get_dpi_scale() * 250\n width = get_dpi_scale() * 400\n self.setMinimumHeight(height)\n self.setMinimumWidth(width)\n\n # apply the widget layout\n self.setLayout(layout)", "def _generate_layout(self):\n\n pass", "def _setup_layout(self):\n\n total_layout = QtWidgets.QVBoxLayout()\n total_layout.addStretch()\n total_layout.addWidget(self.game_over_logo)\n total_layout.addStretch()\n\n self.setLayout(total_layout)", "def setLayout():\n global layout\n\n if p.GetString(\"Layout\") == \"Grid\":\n layoutFlow.setEnabled(False)\n layoutGrid.setEnabled(True)\n layoutStretch.setEnabled(True)\n layout = layoutGrid\n else:\n layoutGrid.setEnabled(False)\n layoutStretch.setEnabled(False)\n layoutFlow.setEnabled(True)\n layout = layoutFlow", "def __init__(self):\n super(GraphVisualizer, self).__init__()\n\n self._layout = QGridLayout()\n self.setLayout(self._layout)\n\n self._next_column = 0\n\n self._columns = []", "def init_widget(self):\n self._build_config()\n self._raw_toolbar.initToolbar(self.config)", "def set_ui(self):\r\n\r\n self.canvas = tk.Canvas(self)\r\n self.canvas.pack()\r\n\r\n self.entry = ttk.Entry(self.canvas, justify=\"center\", font=(\"Calibri\", 12))\r\n\r\n self.grid = Grid(self.canvas)", "def initUI(self) -> None:\n ratio = 70\n width_to_set = (ratio * self.get_current_window_info()[0]) / 100.0\n height_to_set = (ratio * self.get_current_window_info()[1]) / 100.0\n self.setGeometry(200, 100, width_to_set, height_to_set)\n self.createTable()\n # Add box layout, add table to box layout and add box layout to widget\n self.layout = QVBoxLayout()\n self.layout.addWidget(self.tableWidget)\n self.setLayout(self.layout)\n self.setWindowTitle('View files')\n self.show()", "def init_widget(self):", "def init_layout(self):\n super(WxDockPane, self).init_layout()\n self.widget.SetDockWidget(self.dock_widget())", "def __do_layout(self):\n sizer_1 = wx.FlexGridSizer(1, 1, 1, 1)\n sizer_1.Add(self.chat_log, 0, 0, 0)\n sizer_1.Add(self.text_send, 0, wx.ALL, 1)\n self.SetSizer(sizer_1)\n self.Layout()", "def createLayout(self):\n self.fig = Figure()\n self.canvas = FigureCanvas(self.fig)\n self.canvas.setParent(self)\n self.canvas.setFocus()\n self.mpl_toolbar = NavigationToolbar(self.canvas, self)\n self.axes = self.fig.add_subplot(111)\n # Log window\n self.log = QtWidgets.QTextEdit(self)\n self.log.setCurrentFont(self.parent.txteditfont)\n self.log.setFixedHeight(200)\n self.log.setReadOnly(True)\n # Parameters\n plab = QtWidgets.QLabel(\"Peak parameters (type, pos, amp, FWHM, asym, Lfrac)\")\n self.ptext = QtWidgets.QTextEdit(self)\n self.ptext.setCurrentFont(self.parent.txteditfont)\n # Buttons\n self.exeBtn = QtWidgets.QPushButton(\"Compute\")\n self.okBtn = QtWidgets.QPushButton(\"OK\")\n self.cancelBtn = QtWidgets.QPushButton(\"Cancel\")\n\n # set the layout\n vbox = QtWidgets.QVBoxLayout()\n vbox.addWidget(self.canvas)\n vbox.addWidget(self.mpl_toolbar)\n vbox.addWidget(self.log)\n vbox.addWidget(plab)\n vbox.addWidget(self.ptext)\n hbox = QtWidgets.QHBoxLayout()\n hbox.addWidget(self.exeBtn)\n hbox.addWidget(self.okBtn)\n hbox.addWidget(self.cancelBtn)\n vbox.addLayout(hbox)\n self.setLayout(vbox)", "def initUI(self):\n\n grid = QGridLayout()\n grid.addWidget(self.from_currency_label, 0, 0, Qt.AlignRight)\n grid.addWidget(self.from_currency, 0, 1)\n grid.addWidget(self.to_currency_label, 0, 2, Qt.AlignRight)\n grid.addWidget(self.to_currency, 0, 3)\n grid.addWidget(self.from_amount_label, 1, 0)\n grid.addWidget(self.from_amount, 1, 1)\n grid.addWidget(self.to_amount_label, 1, 2)\n grid.addWidget(self.to_amount, 1, 3)\n\n grid.addWidget(self.from_calendar, 2, 0, 1, 2)\n grid.addWidget(self.to_calendar, 2, 2, 1, 2)\n\n grid.addWidget(self.rates_plot, 3, 0, 1, 4)\n grid.addWidget(self.graph_hint, 4, 0, 1, 4)\n\n self.rates_plot.showGrid(x=True, y=True)\n self.rates_plot.setLabel('left', 'Rate')\n self.rates_plot.setLabel('bottom', 'Days')\n self.legend = self.rates_plot.addLegend()\n\n self.setLayout(grid)\n self.setWindowTitle('Currency Converter - Assignment 1 - Arnaud Bourget - 2981151')\n\n self.from_currency.currentIndexChanged.connect(self.updateUI)\n self.to_currency.currentIndexChanged.connect(self.updateUI)\n self.from_amount.valueChanged.connect(self.fromAmountHandler)\n self.from_calendar.selectionChanged.connect(self.fromCalendarHandler)\n self.to_calendar.selectionChanged.connect(self.toCalendarHandler)\n\n self.show()", "def initialize(self):\n\n super(RectTab,self).initialize()\n # special tkinter variables that will be changed with the scales\n self.width = tk.IntVar()\n self.height = tk.IntVar()\n\n # make width scale\n self.widthScale = tk.Scale(self, from_=1, to=5, orient=tk.HORIZONTAL,\n label='Width', resolution=1, variable=self.width,\n command=self.updateSize)\n self.widthScale.grid(column=2, row=6, columnspan=1, sticky='W' + 'E')\n self.widthScale.set(2)\n\n # make height scale\n self.heightScale = tk.Scale(self, from_=1, to=5, orient=tk.HORIZONTAL,\n label='Height', resolution=1, variable=self.height,\n command=self.updateSize)\n self.heightScale.grid(column=2, row=7, columnspan=1, sticky='W' + 'E')\n self.heightScale.set(2)", "def _init_widgets(self):\n comps = self.ui.component_list\n comps.addItems(sorted(self._labels.keys()))\n data = self.ui.data_list\n data.addItems(sorted(self._data.keys()))", "def use(self, layout):\n self._wid.setLayout(layout)\n return layout", "def __init_UI(self):\r\n\r\n ## Setting up the vertical bar\r\n # self.bar = self.verticalScrollBar()\r\n\r\n # Create the inner widget of the scroll area\r\n self.inner_widget = QWidget(self)\r\n self.setWidget(self.inner_widget)\r\n\r\n # Create a vertical layout inside the previous widget\r\n self.__layout = QVBoxLayout(self)\r\n self.inner_widget.setLayout(self.__layout)\r\n\r\n # More settings\r\n self.setWidgetResizable(True)", "def initWidgets(self):\n self.loctext.setText(\"{0:g}\".format(self.loc))\n self.scaletext.setText(\"{0:g}\".format(self.scale))", "def __init__(self, handlerClass):\n\t\tsuper(Layout, self).__init__()\n\t\tself.set_property('orientation', Gtk.Orientation.VERTICAL)\n\t\tself.handler = handlerClass\n\t\tself.init_layout()\n\t\tself.show_all()", "def create_layout( self ):\n # highlight all of our widgets so we can debug layouts.\n # XXX: debugging support.\n self.setStyleSheet( \"border: 1px solid black\" )\n\n # vertical layout of the photo preview and everything else.\n main_layout = QVBoxLayout()\n main_layout.setContentsMargins( 0, 0, 0, 0 )\n main_layout.setSpacing( 0 )\n\n # horizontal layout of everything else\n horizontal_layout = QHBoxLayout()\n horizontal_layout.setContentsMargins( 0, 0, 0, 0 )\n horizontal_layout.setSpacing( 0 )\n\n # vertical layout for the selection and the selection type.\n selection_layout = QVBoxLayout()\n selection_layout.setContentsMargins( 0, 0, 0, 0 )\n selection_layout.setSpacing( 0 )\n\n # selection type label/combo box.\n selection_type_layout = QHBoxLayout()\n selection_type_layout.setContentsMargins( 0, 0, 0, 0 )\n selection_type_layout.setSpacing( 0 )\n selection_type_layout.addWidget( self.selectionBoxLabel )\n selection_type_layout.addWidget( self.selectionBox )\n\n # art record creation/deletion buttons.\n record_modification_layout = QHBoxLayout()\n record_modification_layout.setContentsMargins( 0, 0, 0, 0 )\n record_modification_layout.setSpacing( 0 )\n record_modification_layout.addWidget( self.newRecordButton )\n record_modification_layout.addWidget( self.deleteRecordButton )\n\n selection_layout.addWidget( self.selectionView )\n selection_layout.addLayout( selection_type_layout )\n selection_layout.addLayout( record_modification_layout )\n selection_layout.setStretchFactor( self.selectionView, 1 )\n\n # selected art record information and photo record editing widgets.\n info_and_edit_layout = QGridLayout()\n info_and_edit_layout.setContentsMargins( 0, 0, 0, 0 )\n info_and_edit_layout.setVerticalSpacing( 0 )\n info_and_edit_layout.setHorizontalSpacing( 2 )\n\n # XXX: the layout of these labels is *awful*. need to fix this.\n art_header_label = QLabel( \"<b>Art Record:</b>\" )\n info_and_edit_layout.addWidget( art_header_label,\n 0, 0, 1, 4 )\n\n info_and_edit_layout.setRowStretch( 0, 1 )\n\n type_label = QLabel( \"Type:\" )\n info_and_edit_layout.addWidget( type_label,\n 1, 0 )\n info_and_edit_layout.addWidget( self.artTypeLabel,\n 1, 1 )\n\n size_label = QLabel( \"Size:\" )\n info_and_edit_layout.addWidget( size_label,\n 2, 0 )\n info_and_edit_layout.addWidget( self.artSizeLabel,\n 2, 1 )\n\n quality_label = QLabel( \"Quality:\" )\n info_and_edit_layout.addWidget( quality_label,\n 3, 0 )\n info_and_edit_layout.addWidget( self.artQualityLabel,\n 3, 1 )\n\n date_label = QLabel( \"Date:\" )\n info_and_edit_layout.addWidget( date_label,\n 4, 0 )\n info_and_edit_layout.addWidget( self.artDateLabel,\n 4, 1 )\n\n info_and_edit_layout.setColumnStretch( 1, 1 )\n\n artists_label = QLabel( \"Artists:\" )\n info_and_edit_layout.addWidget( artists_label,\n 1, 2 )\n info_and_edit_layout.addWidget( self.artArtistsLabel,\n 1, 3 )\n\n associates_label = QLabel( \"Associates:\" )\n info_and_edit_layout.addWidget( associates_label,\n 2, 2 )\n info_and_edit_layout.addWidget( self.artAssociatesLabel,\n 2, 3 )\n\n vandals_label = QLabel( \"Vandals:\" )\n info_and_edit_layout.addWidget( vandals_label,\n 3, 2 )\n info_and_edit_layout.addWidget( self.artVandalsLabel,\n 3, 3 )\n\n tags_label = QLabel( \"Tags:\" )\n info_and_edit_layout.addWidget( tags_label,\n 4, 2 )\n info_and_edit_layout.addWidget( self.artTagsLabel,\n 4, 3 )\n\n info_and_edit_layout.setColumnStretch( 3, 1 )\n\n photo_header_label = QLabel( \"<b>Photo Record:</b>\" )\n info_and_edit_layout.addWidget( photo_header_label,\n 5, 0, 1, 4 )\n\n info_and_edit_layout.setRowStretch( 5, 1 )\n\n info_and_edit_layout.addWidget( self.photoProcessingStateComboLabel,\n 6, 0 )\n info_and_edit_layout.addWidget( self.photoProcessingStateComboBox,\n 6, 1 )\n\n info_and_edit_layout.addWidget( self.photoTagsLabel,\n 7, 0 )\n info_and_edit_layout.addWidget( self.photoTagsLineEdit,\n 7, 1,\n 1, 3 )\n\n horizontal_layout.addLayout( selection_layout )\n horizontal_layout.addLayout( info_and_edit_layout )\n horizontal_layout.setStretchFactor( info_and_edit_layout, 1 )\n\n main_layout.addWidget( self.previewArea )\n main_layout.addLayout( horizontal_layout )\n main_layout.setStretchFactor( self.previewArea, 1 )\n\n self.centralWidget = QWidget()\n self.centralWidget.setLayout( main_layout )\n\n self.setCentralWidget( self.centralWidget )", "def init():\n return _libsbml.LayoutExtension_init()", "def _layout_widgets(self):\n self.main_layout = QtGui.QVBoxLayout()\n self.main_layout.addWidget(self.window_message)\n\n self.hbox_layout_buttons = QtGui.QHBoxLayout()\n self.main_layout.addLayout(self.hbox_layout_buttons)\n\n self.hbox_layout_buttons.addStretch()\n self.hbox_layout_buttons.addWidget(self.turn_visibility_on)\n self.hbox_layout_buttons.addWidget(self.turn_visibility_off)\n\n self.setLayout(self.main_layout)", "def LayoutComponents(self):\n sizer = wx.BoxSizer(wx.VERTICAL)\n\n # Add header\n header = self.FindWindow(\"header\")\n if header is not None:\n sizer.Add(header, 0, wx.EXPAND, 0)\n sizer.Add(self.HorizLine(self), 0, wx.ALL | wx.EXPAND, 0)\n\n # Add content\n content = self.FindWindow(\"content\")\n if content is not None:\n sizer.Add(content, 1, wx.EXPAND, 0)\n else:\n sizer.AddSpacer(1)\n\n # Add action buttons\n actions = self.FindWindow(\"actions\")\n if actions is not None:\n sizer.Add(self.HorizLine(self), 0, wx.ALL | wx.EXPAND, 0)\n # proportion is 0 to ask the sizer to never hide the buttons\n sizer.Add(actions, 0, wx.EXPAND, 0)\n\n # Since Layout doesn't happen until there is a size event, you will\n # sometimes have to force the issue by calling Layout yourself. For\n # example, if a frame is given its size when it is created, and then\n # you add child windows to it, and then a sizer, and finally Show it,\n # then it may not receive another size event (depending on platform)\n # in order to do the initial layout. Simply calling self.Layout from\n # the end of the frame's __init__ method will usually resolve this.\n self.SetSizer(sizer)\n self.Layout()", "def get_init_ui(self, container):\n w = self.get_frame(container)\n self.cols_configure(w)\n w.grid(row=0, column=0, sticky=tk.N+tk.W+tk.S+tk.E)\n\n return w", "def layout(self):\n\t\t\n\t\tself.mainSizer = wx.BoxSizer(wx.VERTICAL)\n\t\tbtnSizer = wx.BoxSizer(wx.HORIZONTAL)\n\t\t\n\t\timg = wx.Image(self.photoMaxSize,self.photoMaxSize)\n\t\tself.imageCtrl = wx.StaticBitmap(self, wx.ID_ANY, \n\t\t\t\t\t\t\t\t\t\t wx.Bitmap(img))\n\t\tself.mainSizer.Add(self.imageCtrl, 0, wx.ALL|wx.CENTER, 5)\n\t\tself.imageLabel = wx.StaticText(self, label=\"\")\n\t\tself.mainSizer.Add(self.imageLabel, 0, wx.ALL|wx.CENTER, 5)\n\t\t\n\t\tbtnData = [(\"Previous\", btnSizer, self.onPrevious),\n\t\t\t\t (\"Slide Show\", btnSizer, self.onSlideShow),\n\t\t\t\t (\"Next\", btnSizer, self.onNext)]\n\t\tfor data in btnData:\n\t\t\tlabel, sizer, handler = data\n\t\t\tself.btnBuilder(label, sizer, handler)\n\t\t\t\n\t\tself.mainSizer.Add(btnSizer, 0, wx.CENTER)\n\t\tself.SetSizer(self.mainSizer)", "def _init_ui(self):\n\n hlayout = QtWidgets.QHBoxLayout()\n\n label = QtWidgets.QLabel('Kies een normtraject:')\n\n hlayout.addWidget(label)\n\n self.section_combobox = QtWidgets.QComboBox()\n self.section_combobox.setFixedWidth(60)\n self.section_ids = sorted([''] + io.geometry.import_section_ids(self.datadir))\n self.section_combobox.addItems(self.section_ids)\n\n hlayout.addWidget(self.section_combobox)\n\n self.add_button = QtWidgets.QPushButton('Toevoegen', clicked=self._add_flooddefence)\n\n hlayout.addWidget(self.add_button)\n\n vlayout = QtWidgets.QVBoxLayout()\n vlayout.addLayout(hlayout)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n vlayout.addWidget(line)\n\n self.close_button = QtWidgets.QPushButton('Sluiten', clicked=self.close)\n vlayout.addWidget(self.close_button, 0, QtCore.Qt.AlignRight)\n\n self.setLayout(vlayout)\n\n self.setWindowTitle(\"HB Havens: normtrajecten\")\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)", "def __init__(self):\n # Root window\n self.root = tk.Tk()\n self.root.title(\"Crossword\")\n # Padding frame\n self.frame = tk.Frame(self.root)\n self.frame.pack(fill=\"both\", padx=PAD, pady=PAD)\n # Initialize widget groups\n self.header = HeaderView(self)\n self.puzzle = PuzzleView(self)\n self.clues = CluesView(self)\n # Show widgets\n self.header.show()\n self.puzzle.show()\n self.clues.show()", "def create_layout( self ):\n\n # XXX: debugging layout\n self.setStyleSheet( \"border: 1px solid black\" )\n\n selection_layout = QVBoxLayout()\n selection_layout.setContentsMargins( 0, 0, 0, 0 )\n selection_layout.setSpacing( 0 )\n selection_layout.addWidget( self.selectionView )\n\n selection_type_layout = QHBoxLayout()\n selection_type_layout.setContentsMargins( 0, 0, 0, 0 )\n selection_type_layout.setSpacing( 0 )\n selection_type_layout.addWidget( self.selectionBoxLabel )\n selection_type_layout.addWidget( self.selectionBox )\n selection_type_layout.setStretchFactor( self.selectionBox, 1 )\n\n selection_layout.addLayout( selection_type_layout )\n selection_layout.setStretchFactor( self.selectionView, 1 )\n\n info_layout = QVBoxLayout()\n info_layout.setContentsMargins( 0, 0, 0, 0 )\n info_layout.setSpacing( 0 )\n\n stats_layout = QGridLayout()\n stats_layout.setContentsMargins( 0, 0, 0, 0 )\n stats_layout.setVerticalSpacing( 1 )\n stats_layout.setHorizontalSpacing( 10 )\n\n stats_layout.addWidget( QLabel( \"State:\" ),\n 0, 0 )\n stats_layout.addWidget( self.infoStateLabel,\n 0, 1 )\n\n stats_layout.addWidget( QLabel( \"Art Records:\" ),\n 1, 0 )\n stats_layout.addWidget( self.infoSummaryLabel,\n 1, 1 )\n\n stats_layout.addWidget( QLabel( \"Location:\" ),\n 2, 0 )\n stats_layout.addWidget( self.infoLocationLabel,\n 2, 1 )\n\n stats_layout.addWidget( QLabel( \"Taken:\" ),\n 3, 0 )\n stats_layout.addWidget( self.infoTakenLabel,\n 3, 1 )\n\n stats_layout.addWidget( QLabel( \"Tags:\" ),\n 4, 0 )\n stats_layout.addWidget( self.infoTagsLabel,\n 4, 1 )\n\n stats_layout.setColumnStretch( 1, 1 )\n\n info_layout.addWidget( self.previewArea )\n info_layout.addLayout( stats_layout )\n info_layout.setStretchFactor( self.previewArea, 1 )\n\n self.centralWidget = QWidget()\n self.centralWidget.setLayout( info_layout )\n\n self.selection_dock.widget().setLayout( selection_layout )\n\n self.addDockWidget( Qt.LeftDockWidgetArea, self.selection_dock )\n\n self.setCentralWidget( self.centralWidget )", "def initCentralUic(self):\n self.initFileTableWidget()\n self.initViewerStack()\n self.splitter.setSizes([150, 850])", "def _init_ui(self):\n hlayout = QtWidgets.QHBoxLayout()\n\n hlayout.addWidget(QtWidgets.QLabel('Kies een normtraject:'))\n\n self.section_combobox = QtWidgets.QComboBox()\n self.section_combobox.setFixedWidth(60)\n self._update_combobox()\n\n hlayout.addWidget(self.section_combobox)\n\n self.remove_button = QtWidgets.QPushButton('Verwijderen', clicked=self._del_flooddefence)\n hlayout.addWidget(self.remove_button)\n\n vlayout = QtWidgets.QVBoxLayout()\n vlayout.addLayout(hlayout)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n vlayout.addWidget(line)\n\n self.close_button = QtWidgets.QPushButton('Sluiten', clicked=self.close)\n vlayout.addWidget(self.close_button, 0, QtCore.Qt.AlignRight)\n\n self.setLayout(vlayout)\n\n self.setWindowTitle(\"HB Havens: normtrajecten\")\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)", "def __init__(self):\n super(GraphVisualizerState, self).__init__()\n\n self.done = False\n\n layout = QGridLayout()\n layout.setContentsMargins(0, 0, 0, 0)\n self.setLayout(layout)\n\n self._bg_container = QLabel()\n self._bg_container.resize(self._max_height, self._max_height)\n layout.addWidget(self._bg_container, 1, 0, 1, 4)\n\n self._bg = QPixmap(self._max_height, self._max_height)\n self._bg_container.setPixmap(self._bg)\n\n self._S1_pixmap = QPixmap()\n self._S2_pixmap = QPixmap()\n self._event_pixmap = QPixmap()\n\n self._distance = 1\n\n self._top_labels = []\n self._bot_labels = []\n for i in range(4):\n self._top_labels.append(MathTextLabel(self))\n layout.addWidget(\n self._top_labels[i], 0, i, alignment=Qt.AlignCenter)\n\n self._bot_labels.append(MathTextLabel(self))\n layout.addWidget(\n self._bot_labels[i], 2, i, alignment=Qt.AlignCenter)", "def __init__(self):\n\n super(tpSplitterLayout, self).__init__()\n\n self.setContentsMargins(40, 2, 40, 2)\n\n splitter = tpSplitter(shadow=False, color=(60, 60, 60))\n splitter.setFixedHeight(2)\n\n self.addWidget(splitter)", "def build(self):\n self.main_layout = MainLayout()\n self.main_layout.settings_panel.load_settings()\n return self.main_layout", "def initUi(self):\n self.pop = PopOrderWidget(self)\n\n layout = QVBoxLayout()\n\n area = QScrollArea()\n area.setWidgetResizable(True)\n area.setWidget(self.pop)\n\n btnOk = QPushButton(\"OK\")\n btnOk.clicked.connect(self.aceptar)\n btnCancel = QPushButton(\"Cancelar\")\n btnCancel.clicked.connect(self.reject)\n\n btnLayout = QHBoxLayout()\n btnLayout.addWidget(btnOk)\n btnLayout.addWidget(btnCancel)\n\n layout.addWidget(area)\n layout.addLayout(btnLayout)\n\n self.setLayout(layout)", "def _initialize(self):\n\n cancel_button = ttk.Button(\n master=self._frame,\n text=\"Cancel\",\n command=self._hide_confirmation_window\n )\n\n delete_button = ttk.Button(\n master=self._frame,\n text=\"Delete\",\n command=self._handle_delete\n )\n\n self._initilize_message()\n\n cancel_button.grid(row=1, column=0, padx=5, pady=5)\n delete_button.grid(row=1, column=1, padx=5, pady=5)", "def init_layout(self):\n super(AndroidListView, self).init_layout()\n d = self.declaration\n w = self.widget\n\n # Prepare adapter\n adapter = self.adapter = BridgedListAdapter()\n\n # I'm sure this will make someone upset haha\n adapter.setListView(w, adapter.getId())\n adapter.onRecycleView.connect(self.on_recycle_view)\n adapter.onVisibleCountChanged.connect(self.on_visible_count_changed)\n adapter.onScrollStateChanged.connect(self.on_scroll_state_changed)\n\n if d.items:\n self.set_items(d.items)\n\n w.setAdapter(adapter)\n if d.selected >= 0:\n self.set_selected(d.selected)", "def __init__(self, parent: QWidget):\n super().__init__(parent)\n DiagramFieldView.__diagram_field = self\n\n self.__list: List[DiagramView] = []\n self.__dialog: Dialog = None\n self.__diagram_layout: QVBoxLayout = QVBoxLayout()\n self.__button_layout: QHBoxLayout = QHBoxLayout()\n self.__start_button: StartButtonView = StartButtonView()\n self.__maximize_button: QPushButton = QPushButton()\n\n self.__diagram_group: QtWidgets.QGroupBox = QtWidgets.QGroupBox(self)\n self.__group_layout: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout(self.__diagram_group)\n self.__stretch_widget: QtWidgets.QWidget = QtWidgets.QWidget(self)\n self.__diagram_count: int = 0\n\n self.__start_button.start_signal.connect(self.__clear_diagrams)\n self.__maximize_button.clicked.connect(self.__maximize_on_click)\n ManagerModel.set_diagram_notifier(self)\n self.__init_ui()", "def _setup_ui(self):\n from functools import partial\n\n self.setStyleSheet(\n \"\"\"\n QLabel[labelField=\"true\"] {\n font-weight: bold;\n }\n \"\"\"\n )\n\n # The main layout\n self.main_layout = QtWidgets.QVBoxLayout(self)\n self.main_layout.setContentsMargins(0, 0, 0, 0)\n\n # the form layout\n self.form_layout = QtWidgets.QFormLayout()\n self.form_layout.setLabelAlignment(\n QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter\n )\n\n # store roles\n label_role = QtWidgets.QFormLayout.LabelRole\n field_role = QtWidgets.QFormLayout.FieldRole\n\n self.main_layout.addLayout(self.form_layout)\n\n i = -1\n\n # Reviewer\n i += 1\n reviewer_name_label = QtWidgets.QLabel(self)\n reviewer_name_label.setText(\"Reviewer\")\n self.form_layout.setWidget(i, label_role, reviewer_name_label)\n\n self.reviewer_name_widget = QtWidgets.QLabel(self)\n self.form_layout.setWidget(i, field_role, self.reviewer_name_widget)\n\n # Task Name field\n i += 1\n task_name_label = QtWidgets.QLabel(self)\n task_name_label.setText(\"Task\")\n self.form_layout.setWidget(i, label_role, task_name_label)\n\n self.task_name_widget = QtWidgets.QLabel(self)\n self.form_layout.setWidget(i, field_role, self.task_name_widget)\n\n # # Version Info field\n # from anima.ui.widgets.version import VersionDetailsWidget\n # self.latest_version_widget = VersionDetailsWidget(parent=self)\n # self.main_layout.insertWidget(0, self.latest_version_widget)\n\n # Review Type Field\n i += 1\n review_type_label = QtWidgets.QLabel(self)\n review_type_label.setText(\"Review Type\")\n self.form_layout.setWidget(i, label_role, review_type_label)\n\n self.review_type_widget = ReviewTypeWidget(self)\n self.review_type_widget.currentIndexChanged.connect(\n partial(self.review_type_changed_callback)\n )\n\n self.form_layout.setWidget(i, field_role, self.review_type_widget)\n\n # Timing Field\n i += 1\n effort_label = QtWidgets.QLabel(self)\n effort_label.setText(\"Timing\")\n self.form_layout.setWidget(i, label_role, effort_label)\n\n effort_layout = QtWidgets.QHBoxLayout()\n self.form_layout.setLayout(i, field_role, effort_layout)\n\n from anima.ui.widgets.timing import ScheduleTimingWidget\n from anima import defaults\n\n self.timing_widget = ScheduleTimingWidget(\n self, timing_resolution=defaults.timing_resolution\n )\n self.timing_widget.setEnabled(False)\n # set the default to 1 hour\n self.timing_widget.set_schedule_info(timing=1, unit=\"h\")\n effort_layout.addWidget(self.timing_widget)\n\n # Description Field\n i += 1\n description_label = QtWidgets.QLabel(self)\n description_label.setText(\"Description\")\n self.form_layout.setWidget(i, label_role, description_label)\n\n self.description_widget = QtWidgets.QTextEdit(self)\n self.form_layout.setWidget(i, field_role, self.description_widget)", "def create_layout( self ):\n\n # highlight all of our widgets so we can debug layouts.\n # XXX: debugging support.\n self.setStyleSheet( \"border: 1px solid black\" )\n\n editing_layout = QGridLayout()\n editing_layout.addWidget( QLabel( \"Art Record ID:\" ),\n 0, 0 )\n editing_layout.addWidget( QLabel( \"{:d}\".format( self.record[\"id\"] ) ),\n 0, 1 )\n\n # 1st\n editing_layout.addWidget( self.artTypeComboLabel,\n 1, 0 )\n editing_layout.addWidget( self.artTypeComboBox,\n 1, 1 )\n\n # 2nd\n editing_layout.addWidget( self.artSizeComboLabel,\n 2, 0 )\n editing_layout.addWidget( self.artSizeComboBox,\n 2, 1 )\n\n # 3rd\n editing_layout.addWidget( self.artQualityComboLabel,\n 3, 0 )\n editing_layout.addWidget( self.artQualityComboBox,\n 3, 1 )\n\n # 4th\n editing_layout.addWidget( self.artProcessingStateComboLabel,\n 4, 0 )\n editing_layout.addWidget( self.artProcessingStateComboBox,\n 4, 1 )\n\n # 5th\n editing_layout.addWidget( self.artArtistsListLabel,\n 0, 3 )\n editing_layout.addWidget( self.artArtistsListView,\n 1, 3,\n 4, 1 )\n\n # 6th\n editing_layout.addWidget( self.artAssociatesListLabel,\n 0, 5 )\n editing_layout.addWidget( self.artAssociatesListView,\n 1, 5,\n 4, 1 )\n\n # 7th\n editing_layout.addWidget( self.artVandalsListLabel,\n 0, 7 )\n editing_layout.addWidget( self.artVandalsListView,\n 1, 7,\n 4, 1 )\n\n # 8th\n editing_layout.addWidget( self.artTagsLabel,\n 6, 0 )\n editing_layout.addWidget( self.artTagsLineEdit,\n 6, 1,\n 1, 7 )\n\n # 9th\n editing_layout.addWidget( self.artDateLabel,\n 5, 0 )\n editing_layout.addWidget( self.artDateLineEdit,\n 5, 1 )\n\n # vertical layout of the photo preview and everything else.\n main_layout = QVBoxLayout()\n main_layout.setContentsMargins( 0, 0, 0, 0 )\n main_layout.setSpacing( 0 )\n\n main_layout.addWidget( self.previewArea )\n main_layout.addLayout( editing_layout )\n main_layout.setStretchFactor( self.previewArea, 1 )\n\n self.centralWidget = QGroupBox()\n self.centralWidget.setLayout( main_layout )\n\n self.setCentralWidget( self.centralWidget )", "def drawLayout(self):\r\n self.drawBorder()\r\n self.drawAxes()\r\n self.drawLabels()", "def initGUI(self):\n\n\t\t# Set main frame's location \n\t\tself.grid(row=0, column=0, sticky=\"nsew\")\n\n\t\t# Set path entry frame and its location\n\t\tself.entryFrame = Frame(self, relief = RAISED, borderwidth = 1)\n\t\tself.entryFrame.pack(fill = BOTH, expand = False)\n\t\t# Make label\n\t\tif self.message:\n\t\t\tmessageLabel = Label(self.entryFrame, text = self.message, font=(\"Bradley\", 10))\n\t\t\tmessageLabel.pack(anchor=W, padx=0, pady=0)\n\n\t\t# Set path entry and its location\n\t\tself.filePathEntry = Entry(self.entryFrame, bd = 4, width = 50)\n\t\tself.filePathEntry.pack(side = LEFT, padx=2, pady=1)", "def _setupUi(self):\n self.setupUi(self)\n self.twTree.setStyleSheet(\"background-color: rgb(200, 200, 200)\")", "def __init__(self):\n super(GraphVisualizerPoint, self).__init__()\n self._layout = QGridLayout()\n self.setLayout(self._layout)\n\n self._point_draw = GraphVisualizerPointDraw()\n self._layout.addWidget(self._point_draw, 0, 0)\n\n self._label = MathTextLabel()\n self._layout.addWidget(self._label, 0, 1)", "def _setupUi(self, widget):\n \n widget._setup_vertical_layout()\n widget._setup_horizontal_layout()\n widget._setup_vertical_layout()\n for field in self._fields:\n if field=='channel_idx':\n widget._exit_layout()\n widget._setup_vertical_layout()\n choices = None\n if hasattr(self, field + 's'):\n choices = self.__getattribute__(field + 's')\n widget._setup_gui_element(field, choices)\n widget._exit_layout()\n widget._exit_layout()\n self._setup_fetch_buttons(widget)", "def setup_ui(self):\n self.setLayout(self.main_layout)\n\n self.pv_layout.addWidget(self.pv_protocol_cmb)\n self.pv_layout.addWidget(self.pv_name_line_edt)\n self.pv_layout.addWidget(self.pv_connect_push_btn)\n QTimer.singleShot(0, self.pv_name_line_edt.setFocus)\n\n self.curve_settings_tab.setLayout(self.curves_tab_layout)\n self.chart_settings_tab.setLayout(self.chart_settings_layout)\n self.setup_chart_settings_layout()\n\n self.tab_panel.addTab(self.curve_settings_tab, \"Curves\")\n self.tab_panel.addTab(self.chart_settings_tab, \"Chart\")\n self.tab_panel.hide()\n\n self.crosshair_settings_layout.addWidget(self.enable_crosshair_chk)\n self.crosshair_settings_layout.addWidget(self.cross_hair_coord_lbl)\n\n self.chart_control_layout.addWidget(self.auto_scale_btn)\n self.chart_control_layout.addWidget(self.view_all_btn)\n self.chart_control_layout.addWidget(self.reset_chart_btn)\n self.chart_control_layout.addWidget(self.pause_chart_btn)\n self.chart_control_layout.addLayout(self.crosshair_settings_layout)\n self.chart_control_layout.addWidget(self.import_data_btn)\n self.chart_control_layout.addWidget(self.export_data_btn)\n\n self.chart_control_layout.setStretch(4, 15)\n self.chart_control_layout.insertSpacing(5, 350)\n\n self.chart_layout.addWidget(self.chart)\n self.chart_layout.addLayout(self.chart_control_layout)\n\n self.chart_panel.setLayout(self.chart_layout)\n\n self.splitter.addWidget(self.chart_panel)\n self.splitter.addWidget(self.tab_panel)\n self.splitter.setStretchFactor(0, 0)\n self.splitter.setStretchFactor(1, 1)\n\n self.charting_layout.addWidget(self.splitter)\n\n self.body_layout.addLayout(self.pv_layout)\n self.body_layout.addLayout(self.charting_layout)\n self.body_layout.addLayout(self.chart_control_layout)\n self.main_layout.addLayout(self.body_layout)\n\n self.enable_chart_control_buttons(False)", "def initializeUI(self):\n self.setGeometry(100, 100, 450, 300)\n self.setWindowTitle('Model and View Example')\n\n self.setupModelView()\n\n self.show()", "def build(self):\n self.title = 'Processamento Digital de Imagens'\n self.main_layout = MainLayout()\n return self.main_layout", "def finalize_layout(self):\n self.mbox.addLayout(self.hbtnbox)\n self.mbox.addLayout(self.hbox)\n self.setLayout(self.mbox)", "def init_layout(self):\n super(QtToolButton, self).init_layout()\n for child in self.children():\n if isinstance(child, QtMenu):\n self.widget.setMenu(child.widget)\n break", "def _initUI(self):\n\n self.setWindowTitle(\"HB Havens: onzekerheden\")\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)\n\n hlayout = QtWidgets.QHBoxLayout()\n\n vlayout = QtWidgets.QVBoxLayout()\n\n # Radio buttons\n #----------------------------------------------------------------\n self.button1 = QtWidgets.QRadioButton('Onzekerheden uit steunpunt overnemen')\n self.button2 = QtWidgets.QRadioButton('Onzekerheden uit havenmodel overnemen')\n self.button3 = QtWidgets.QRadioButton('Combinatie van bovenstaande gebruiken')\n\n vlayout.addWidget(self.button1)\n vlayout.addWidget(self.button2)\n vlayout.addWidget(self.button3)\n vlayout.addItem(QtWidgets.QSpacerItem(0, 0, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Expanding))\n\n hlayout.addLayout(vlayout)\n\n vlayout = QtWidgets.QVBoxLayout()\n # Model uncertainties support location\n #----------------------------------------------------------------\n label = QtWidgets.QLabel()\n label.setText('Modelonzekerheden in steunpunt:')\n vlayout.addWidget(label)\n\n self.supportloc_unc_table = widgets.DataFrameWidget(self.supportloc_unc)\n self.supportloc_unc_table.fixed_fit_to_content(90)\n vlayout.addWidget(self.supportloc_unc_table)\n\n label = QtWidgets.QLabel()\n label.setText('Modelonzekerheden in havenmodel (zelf invullen):')\n vlayout.addWidget(label)\n\n self.harbor_unc_table = widgets.DataFrameWidget(self.harbor_unc, editing_enabled=True)\n self.harbor_unc_table.fixed_fit_to_content(90)\n vlayout.addWidget(self.harbor_unc_table)\n\n label = QtWidgets.QLabel()\n label.setText('Gecombineerde modelonzekerheid (berekenen):')\n vlayout.addWidget(label)\n\n calc_button = QtWidgets.QPushButton('Berekenen')\n calc_button.clicked.connect(self._calc_combined_uncertainty)\n vlayout.addWidget(calc_button)\n\n self.combined_unc_table = widgets.DataFrameWidget(self.combined_unc)\n self.combined_unc_table.fixed_fit_to_content(90)\n vlayout.addWidget(self.combined_unc_table)\n\n for table in [self.supportloc_unc_table, self.harbor_unc_table, self.combined_unc_table]:\n table.setShowGrid(True)\n table.setAlternatingRowColors(False)\n\n hlayout.addLayout(vlayout)\n\n vlayout = QtWidgets.QVBoxLayout()\n vlayout.addLayout(hlayout)\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n vlayout.addWidget(line)\n\n\n # Buttons\n #----------------------------------------------------------------\n hbox = QtWidgets.QHBoxLayout()\n hbox.addItem(QtWidgets.QSpacerItem(0, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum))\n # Add ok/close\n self.closebutton = QtWidgets.QPushButton('Sluiten')\n self.closebutton.clicked.connect(self.close)\n hbox.addWidget(self.closebutton)\n # Add ok/close\n self.savebutton = QtWidgets.QPushButton('Opslaan')\n self.savebutton.clicked.connect(self._save)\n hbox.addWidget(self.savebutton)\n\n vlayout.addLayout(hbox)\n\n # Add layout to widget\n self.setLayout(vlayout)\n self.layout().setSpacing(10)", "def _initUI(self):\n\n vlayout = QtWidgets.QVBoxLayout()\n\n # Description\n #----------------------------------------------------------------\n hlayout = QtWidgets.QHBoxLayout()\n\n label = QtWidgets.QLabel()\n label.setText('Locatie:')\n label.setFixedWidth(100)\n hlayout.addWidget(label)\n\n label = QtWidgets.QLabel()\n label.setText(self.name)\n hlayout.addWidget(label)\n hlayout.setSpacing(10)\n\n vlayout.addLayout(hlayout)\n\n # Exportnaam\n #----------------------------------------------------------------\n self.exportname = ParameterInputLine(label='Exportnaam:', labelwidth=100)\n self.exportname.LineEdit.setMinimumWidth(200)\n vlayout.addLayout(self.exportname.layout)\n\n # Exportdatabase\n #----------------------------------------------------------------\n self.exportpath = ExtendedLineEdit(label='SQLite-database:', labelwidth=100, browsebutton=True)\n self.exportpath.BrowseButton.clicked.connect(self._get_path_database)\n vlayout.addLayout(self.exportpath.layout)\n\n # Line\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n vlayout.addWidget(line)\n\n # Buttons\n #----------------------------------------------------------------\n hbox = QtWidgets.QHBoxLayout()\n hbox.addItem(QtWidgets.QSpacerItem(0, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum))\n # Add ok/close\n self.closebutton = QtWidgets.QPushButton('Sluiten')\n self.closebutton.clicked.connect(self.close)\n hbox.addWidget(self.closebutton)\n # Add ok/close\n self.savebutton = QtWidgets.QPushButton('Opslaan')\n self.savebutton.clicked.connect(self._save)\n hbox.addWidget(self.savebutton)\n\n vlayout.addLayout(hbox)\n\n # Add layout to widget\n self.setLayout(vlayout)", "def _setupTab1(self):\r\n horizontalLayout = QHBoxLayout() # main layout\r\n\r\n self.sideMenu = SideMenu1()\r\n horizontalLayout.addLayout(self.sideMenu, 15)\r\n self.plotAndTable = PlotAndTable(\"Plot\", \"Table\")\r\n horizontalLayout.addWidget(self.plotAndTable, 85)\r\n\r\n self.setLayout(horizontalLayout)", "def init_file_explorer(self):\r\n # initiate both sub layouts\r\n self.init_explorer_buttons()\r\n self.init_file_tree()\r\n\r\n # add them to the file explorer layout\r\n self.layout_file_explorer = QtWidgets.QGridLayout()\r\n self.layout_file_explorer.addLayout(self.layout_explorer_btn,0,0)\r\n self.layout_file_explorer.addLayout(self.layout_explorer_tree,1,0)", "def setupUi(self):\n self.setLayout(self.mainLayout)\n self.table.setModel(self.model)\n header = self.table.header()\n header.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)\n self.model.appendRow(self.jobRow)\n self.mainLayout.addWidget(self.table)\n self.buttonLayout.addWidget(self.downButton)\n self.buttonLayout.addWidget(self.deleteLayerButton)\n self.buttonLayout.addWidget(self.addLayerButton)\n self.buttonLayout.addWidget(self.upButton)\n self.mainLayout.addLayout(self.buttonLayout)\n self.table.expandAll()\n self.initLayers()", "def setup_ui(self):\n\t\t\n\t\t# CREATION DU LAYOUT\n\t\tself.layout = QtWidgets.QHBoxLayout(self) #le layout prend la fenetre principal en argument donc notre self\n\t\t\n\t\t# CREATION DES WIDGETS\n\t\tself.cbb_devisesFrom = QtWidgets.QComboBox() #combobox (liste deroulante) pour choisir la devise From\n\t\tself.spn_montant = QtWidgets.QSpinBox() #spinbox (zone affichage) du montant a convertir\n\t\tself.cbb_devisesTo = QtWidgets.QComboBox() #cbb pour choisir la devise To\n\t\tself.spn_montantConverti = QtWidgets.QSpinBox() #spn du montant converti\n\t\tself.btn_inverser = QtWidgets.QPushButton(\"Inverser devises\") #bouton pour inverser les devises\n\t\t\n\t\t# AJOUT AU LAYOUT\n\t\tself.layout.addWidget(self.cbb_devisesFrom)\n\t\tself.layout.addWidget(self.spn_montant)\n\t\tself.layout.addWidget(self.cbb_devisesTo)\n\t\tself.layout.addWidget(self.spn_montantConverti)\n\t\tself.layout.addWidget(self.btn_inverser)", "def init_frame(self):\n self._exit_button.grid(row=0, column=2, sticky=tk.W)\n self._clear_button.grid(row=0, column=0, sticky=tk.E)\n # self._copy_button.grid(row=0, column=1, sticky=(tk.W, tk.W))\n return None", "def layoutDefault(self): # real signature unknown; restored from __doc__\n pass", "def __init__( self, window_size=QSize( DEFAULT_H_SIZE, DEFAULT_V_SIZE ) ):\n super().__init__()\n\n self.centralWidget = None\n self.window_size = window_size\n\n self.create_models()\n self.create_widgets()\n self.create_layout()\n self.create_menus()\n self.set_state()", "def init_gui(self):\n # Choose a layout.\n main_vb = QtGui.QVBoxLayout(self)\n\n # Add a list or tree view.\n self.list_view = QtGui.QListWidget()\n\n # Add the buttons.\n load_btn = QtGui.QPushButton('Load Selected')\n cancel_btn = QtGui.QPushButton('Cancel')\n load_btn.clicked.connect(self.update_list_view)\n cancel_btn.clicked.connect(self.close)\n\n # Connect the list/tree view with a method appropriate for user interaction.\n self.list_view.currentItemChanged['QListWidgetItem*', 'QListWidgetItem*'].connect(self.set_current_name)\n self.list_view.itemChanged['QListWidgetItem*'].connect(self.change_name)\n\n # Add the widgets to the layout.\n btn_hb = QtGui.QHBoxLayout()\n btn_hb.addWidget(load_btn)\n btn_hb.addWidget(cancel_btn)\n main_vb.addWidget(self.list_view)\n main_vb.addLayout(btn_hb)\n\n # Show the GUI.\n self.setGeometry(300, 300, 450, 300)\n self.setWindowTitle('Hello World')\n img_icon = 'C:/Users/caj150430/code/so_much_win.png'\n self.setWindowIcon(QtGui.QIcon(img_icon))\n self.show()", "def initializeUI(self):\n self.setGeometry(100, 100, 300, 200)\n self.setWindowTitle('Event Handling Example')\n\n self.show()", "def init_grid(self):\n self.headlabel.collection = self.books\n self.headlabel.set_label_text()\n self.warnlabel.set_label_text('Welcome to the Reading Tracker 2.0!')\n self.building_grid(None, 'Author')", "def __init__(self):\n\n self.boxes = [\n urwid.AttrMap(urwid.Text('QMap'), 'menu header'),\n urwid.Divider('=')\n ]\n\n self.widget = urwid.Padding(None, align='center')\n self.update()\n\n BaseTimedWidgetWrap.__init__(self, self.widget)", "def _initLayout(self):\n\t\tpanel = wx.Panel(self)\n\n\t\t# Create a font object\n\t\tfont = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT)\n\t\tfont.SetPointSize(9)\n\n\t\t# Vertical sizer will contain multiple horizontal sizers as rows\n\t\tvbox = wx.BoxSizer(wx.VERTICAL)\n\n\t\t# First Row: The text we need to categorize\n\t\thbox1 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tst1 = wx.StaticText(panel, label='Text')\n\t\tst1.SetFont(font)\n\t\thbox1.Add(st1, flag=wx.RIGHT, border=8)\n\t\ttc = wx.TextCtrl(panel)\n\t\tself._textControl = tc\n\t\thbox1.Add(tc, proportion=1)\n\t\tvbox.Add(hbox1, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border=10)\n\t\t\n\t\t# The existing class assignment\n\t\thboxExisting = wx.BoxSizer(wx.HORIZONTAL)\n\t\tlabel = wx.StaticText(panel, label='Current')\n\t\tlabel.SetFont(font)\n\t\thboxExisting.Add(label, flag=wx.RIGHT, border=8)\n\t\t\n\t\tlabel = wx.StaticText(panel, label=\"(unassigned)\")\n\t\tself._existingClass = \"(unassigned)\"\n\t\tself._existingClassLabel = label\n\t\tlabel.SetFont(font)\n\t\thboxExisting.Add(label, flag=wx.RIGHT, border=8)\n\t\tvbox.Add(hboxExisting, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border=10)\n\t\t\n\t\t# Button to keep the current class assignment\n\t\tbutton = wx.Button(panel, label=\"KEEP\", name=\"*KEEP\")\n\t\thboxExisting.Add(button, flag=wx.RIGHT)\n\n\t\t# Button to skip this record, i.e., move to next record without writing this one out\n\t\tbutton = wx.Button(panel, label=\"DELETE\", name=\"*KILL\")\n\t\thboxExisting.Add(button, flag=wx.RIGHT)\n\n\t\tvbox.Add((-1, 10))\n\n\t\t# Buttons for classes that can be assigned to the text\n\t\thbox2 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tst2 = wx.StaticText(panel, label='Reassign to...')\n\t\tst2.SetFont(font)\n\t\thbox2.Add(st2)\n\t\tvbox.Add(hbox2, flag=wx.LEFT | wx.TOP, border=10)\n\n\t\tvbox.Add((-1, 10))\n\n\t\t# Grid of buttons, one for each class label\n\t\thbox3 = wx.GridSizer(8,5,50)\n\t\n\t\tfor label in sorted(labels.LABELS):\n\t\t\tbutton = MyButton(panel, label=label, size=(70, 30), name=label)\n\t\t\thbox3.Add(button)\n\n\t\tvbox.Add(hbox3, flag=wx.LEFT|wx.RIGHT|wx.EXPAND, \n\t\t\tborder=10)\n\t\t\n\t\tpanel.SetSizer(vbox)", "def _init_ui(self):\r\n\t\t\r\n\t\tself.input_frame = Input(self)\r\n\t\tself.input_frame.pack()\r\n\t\t\r\n\t\tbutton_ok = Button(self, text = \"Ping\", command = self._go)\r\n\t\tbutton_ok.pack()\r\n\t\t\r\n\t\tself.result_frame = Result(self)\r\n\t\tself.result_frame.pack()", "def LayoutExtension_init():\n return _libsbml.LayoutExtension_init()", "def _init_widgets(self):\n # Container frame\n self.container = Frame(self)\n # Workspace block\n self.main_container = Frame(self.container)\n\n self.text = Label(self.main_container)\n self.text.config(text=\"PyEventLogViewer is a timeline-based tool used to simplify the way\\n\"\n \"a user can view and explore Windows EVTX files. To begin using this\\n\"\n \"software you must do the following:\\n\\n\"\n \"\\t1) File → New → 'Create a new project'\\n\"\n \"\\t2) Tools → Import Log File → 'Open a specified EVTX file'\\n\"\n \"\\t3) Explore the presented timeline.\\n\"\n \"\\t4) Double-click a specific record to view the XML data for that record.\\n\"\n \"\\t5) File → Export → 'Generate a CSV or HTML file for timeline presentation.'\\n\\n\"\n \"At this point, only System and Security EVTX files are parsable with this software.\")\n\n self.show_var = BooleanVar()\n self.show_check = Checkbutton(self.main_container, text=\"Don't Show on Startup\", variable=self.show_var)\n\n # Action block\n self.button_ok = Button(self.main_container, text='Ok', underline=0, command=self.callback_close)\n self.bind('<Return>', self.callback_close)\n self.bind('<Escape>', self.callback_close)\n\n # Focus on window - required for binds to work.\n self.focus_set()", "def _initialize_widgets(self):\n self.outer_board = [[Frame(self.root, bd = self.FRAME_BORDER_WIDTH, \n relief = self.FRAME_RELIEF) \n for _ in range(self.BOARD_DIM)] \n for _ in range(self.BOARD_DIM)]\n self.inner_boards = [[self._generate_inner_board(r, c) \n for c in range(self.BOARD_DIM)]\n for r in range(self.BOARD_DIM)]", "def create_widgets(self):\r\n self.create_containers()\r\n self.setup_containers()\r\n self.create_panel_widgets()\r\n self.setup_scrollbar()", "def _initUI(self) -> None:\n self._createActions()\n self._addActionsToMoveButtons()\n self._createToolBar()\n self._createStatusBar()\n self._createMainContextMenu()", "def init_ui(self):\n self.master.title(\"Backbone\")\n self.master.geometry(\"300x150\")\n\n self.pack(fill=BOTH, expand=1)\n\n self.btn_upload_file = Button(self, text=\"Upload file\", command=self.upload_file)\n self.btn_upload_file.place(x=90, y=10)\n\n self.btn_create_training_file = Button(self, text=\"Create & upload training file\",\n command=self.create_training_file)\n self.btn_create_training_file.place(x=30, y=40)\n\n self.btn_run_algorithm = Button(self, text=\"Run algorithm\", command=self.run_algorithm)\n self.btn_run_algorithm.place(x=80, y=70)\n\n self.btn_view_results = Button(self, text=\"View Results\", command=self.view_results)\n self.btn_view_results.place(x=85, y=100)", "def init_ui(self):\n raise NotImplementedError" ]
[ "0.84849787", "0.8151154", "0.75447416", "0.7424792", "0.7365498", "0.72760487", "0.7237962", "0.71668446", "0.7161154", "0.71545607", "0.7138541", "0.71254814", "0.70788753", "0.70205253", "0.69035393", "0.69017667", "0.6865221", "0.6782825", "0.6781948", "0.67713374", "0.67560714", "0.67381054", "0.6734642", "0.66995645", "0.6697201", "0.66955227", "0.6689905", "0.6689905", "0.6686576", "0.6674349", "0.6662588", "0.6623356", "0.66054887", "0.6574922", "0.65731966", "0.6550777", "0.65352464", "0.6528146", "0.65256214", "0.6522346", "0.65206534", "0.649673", "0.6491084", "0.6489018", "0.6480665", "0.6467469", "0.64557743", "0.64522195", "0.6393242", "0.63872313", "0.6368257", "0.6363189", "0.63543135", "0.63398224", "0.6328026", "0.6316872", "0.6312217", "0.6304187", "0.62871367", "0.62862027", "0.628053", "0.6279505", "0.6263515", "0.62591213", "0.62535095", "0.62481326", "0.62434876", "0.62369597", "0.6229295", "0.62148905", "0.62096655", "0.6198233", "0.6194032", "0.61938536", "0.6160492", "0.6156947", "0.6155593", "0.61367357", "0.6123482", "0.61171645", "0.6108659", "0.6103645", "0.609685", "0.60914713", "0.60837275", "0.6069968", "0.6039145", "0.6032074", "0.6029263", "0.6023467", "0.6023285", "0.6021554", "0.6014682", "0.6014193", "0.6013842", "0.6012308", "0.6010271", "0.6009568", "0.600835", "0.60027575" ]
0.6108049
81
Locate the QAction object which logically follows the child. If the given child is last in the list of children, then the parent object will be invoked to find the QAction which follows this action group.
def find_next_action(self, child): found = False for dchild in self.children(): if found and isinstance(dchild, QtAction): return dchild.widget else: found = child is dchild parent = self.parent() if parent is not None: return parent.find_next_action(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def child_added(self, child):\n super(QtActionGroup, self).child_added(child)\n if isinstance(child, QtAction):\n self.widget.addAction(child.widget)\n parent = self.parent()\n if parent is not None:\n before = self.find_next_action(child)\n parent.widget.insertAction(before, child.widget)", "def navigate_to_subpath(self, child):\n raise NotImplementedError()", "def connectToChild(self):\n return _libsbml.Transition_connectToChild(self)", "def move_child_after(self, descendant, after, **kwargs):\n return self.sequence_manager.move_child_after(\n descendant, self, after, **kwargs)", "def locateChild(ctx, segments):", "def parent(self, child:QtCore.QModelIndex) -> QtCore.QModelIndex:", "def connectToChild(self):\n return _libsbml.Group_connectToChild(self)", "def child_removed(self, child):\n super(QtActionGroup, self).child_removed(child)\n if isinstance(child, QtAction) and child.widget is not None:\n self.widget.removeAction(child.widget)\n parent = self.parent()\n if parent is not None:\n parent.widget.removeAction(child.widget)", "def navigate_to_subpath(self, child):\n if not isinstance(child, six.string_types):\n raise TypeError(u'Expected child to be a string, was: {}'.format(child))\n if self.field:\n raise AssertionError(u'Currently at a field, cannot go to child: {}'.format(self))\n return Location(self.query_path + (child,))", "def connectToChild(self):\n return _libsbml.FbcOr_connectToChild(self)", "def findAction(self, actionId): #$NON-NLS-1$\r", "def connectToChild(self):\n return _libsbml.Objective_connectToChild(self)", "def indexOfChild(self, child):\n self.__initChild()\n return self.__child.index(child)", "def select(self):\n current_node = self\n while current_node.is_expanded:\n best_action = current_node.best_action()\n current_node = current_node.get_child(best_action)\n return current_node", "def addchild(self, child, index=None, move=True):\n owners = child.owners()\n if len(owners) > 0 and not move:\n return # keep the original owner\n\n if self.guid in owners:\n return # already add\n\n # add to this group (avoid 'child' being remove from project when no one refer to it)\n pbxhelper.pbxobj_add_pbxlist_value(self, u'pbx_children', child, \\\n self.is_valid_child, index=index)\n\n for owner in owners.values():\n child.remove_referrer(owner) # remove from other groups", "def navigate_to_subpath(self, child):\n if not isinstance(child, six.string_types):\n raise TypeError(u'Expected child to be a string, was: {}'.format(child))\n if self.field:\n raise AssertionError(u'Currently at a field, cannot go to child: {}'.format(self))\n\n edge_direction, edge_name = get_edge_direction_and_name(child)\n new_fold_path = self.fold_path + _create_fold_path_component(edge_direction, edge_name)\n return FoldScopeLocation(self.base_location, new_fold_path)", "def get_child(self, action):\n if action not in self.children:\n #self.game.set_state(self.state)\n #obs, reward, done, _ = self.game.step(action)\n next_state, next_player = self.game.getNextState(self.state,self.current_player,action, previous_move = self.action)\n reward = 0\n game_ended = self.game.getGameEnded(next_state, -1)\n done = (game_ended != 0)\n if self.done == True:\n done = True\n if game_ended == 1 or game_ended == -1:\n reward = game_ended\n\n obs = next_state[:9]\n self.children[action] = Node(\n state=next_state,\n action=action,\n parent=self,\n reward=reward,\n done=done,\n obs=obs,\n mcts=self.mcts,\n player= next_player)\n return self.children[action]", "def connectToChild(self):\n return _libsbml.FbcAnd_connectToChild(self)", "def connectToChild(self):\n return _libsbml.MultiSpeciesPlugin_connectToChild(self)", "def connectToChild(self):\n return _libsbml.GroupsModelPlugin_connectToChild(self)", "def child_added(self, child):\n super(QtToolButton, self).child_added(child)\n if isinstance(child, QtMenu):\n self.widget.setMenu(child.widget)", "def insert_child_after(self, descendant, after, **kwargs):\n return self.sequence_manager.insert_child_after(\n descendant, self, after, **kwargs)", "def find_parent(self):\n pass", "def menu_find_next(self, event=None):\n self.parentPanel.onFind(event)", "def getChild(self, child, request):\n request.postpath.insert(0, request.prepath.pop())\n return self.leaf", "def _find_parent(child: ModuleInfo, potentialParents: List[ModuleInfo])\\\n -> ModuleInfo:\n\n return next(\n (p for p in potentialParents if child.get_name() in p.get_children()),\n None\n )", "def connectToChild(self):\n return _libsbml.SpeciesFeature_connectToChild(self)", "def connectToChild(self):\n return _libsbml.QualModelPlugin_connectToChild(self)", "def _parentDirectoryActionTriggeredSlot(self):\r\n\r\n self._controller.model.activeIndex = self._controller.model.activeIndex.parent()", "def on_FollowSuperNode_clicked(self):\n # TODO: not implemented yet\n # raise NotImplementedError\n print(\"Select following Other SuperNode. Best Wish\")\n self.select_actor = \"FollowSuperNode\"", "def get_index(self, child):\n for _index, item in enumerate(self.children):\n if item == child:\n return _index\n\n return -1", "def find_next_action(self, obs, agents, i):\n return None", "def connectToParent(self, *args):\n return _libsbml.FbcReactionPlugin_connectToParent(self, *args)", "def locateChild(req, segments):", "def _open_child(self, parent_g, child, opened):\n child_g = parent_g + 1\n existing = opened.get(child)\n if existing is None or existing.g > child_g:\n opened[child] = Node(child_g, child)", "def move_child_before(self, descendant, before, **kwargs):\n return self.sequence_manager.move_child_before(\n descendant, self, before, **kwargs)", "def connectToChild(self):\n return _libsbml.FbcModelPlugin_connectToChild(self)", "def connectToChild(self):\n return _libsbml.CompSBasePlugin_connectToChild(self)", "def find(self):\n if self.get_parent() == self:\n return self\n else:\n self.__parent = self.__parent.find()\n return self.__parent.find()", "def add_child(self, child):\r\n \r\n self._children.append(child)\r\n self.update_batch(self._batch, self._group)", "def get_parent_index(self, child):\n return (child-1)//2", "def do_clickchild(self, str_arg):\n # printLog(self.threadName + \"[running 'clickchild %s']\" % str_arg)\n # arg validation\n arg = validateString(str_arg)\n if iDevice.dump_view:\n self.__dumpview()\n try:\n # to avoid ' ' two spaces case\n # suppose string like: id/button1 (5,2,3,3,3) (0,50)\n # i = arg.index(' ')\n # ids = arg[0:i]\n # arg = arg[i + 1:].strip()\n # seqs = arg[1:-1].split(',')\n arg_list = arg.split(' ')\n\n if len(arg_list) == 2:\n printLog(self.threadName + 'do_clickChild: using default offset.')\n node_id, seqs = arg_list\n self.__clickChildView(node_id, seqs[1:-1].split(','))\n elif len(arg_list) == 3:\n # node_id, seqs, offset = arg_list\n # self.__clickChildView(node_id, seqs[1:-1].split(','), self.__getPointXY(offset.strip()))\n raise ValueError(\"using AVC will NO LONGER require including the offset parameter.\")\n else:\n raise ValueError('missing argument.')\n except ValueError:\n printLog(self.threadName + 'do_clickChild: click failed', logging.ERROR)\n traceback.print_exc()\n self.resultFlag = False\n time.sleep(1)\n # finally:\n # printLog(self.threadName + \"[status=%s]\" % self.resultFlag)", "def _get_parent(self, child_ix):\n if child_ix == 0:\n return None\n t = 1 if child_ix & 1 else 2\n return (child_ix - t) / 2", "def expand_child(self, move):\n is_leaf, child_board = self.simulate_move(move)\n child = MCTSNode(board=child_board, score=random.random(), parent=self, is_leaf=is_leaf) #CNN.predict(simulated_board) instead of 0\n self.children.append(child)\n return child", "def set_parent(self, child, parent):\n parents = cmds.listConnections(\"%s.parent\" % child, plugs=True, source=True)\n if parents:\n # there is only one parent at a time\n cmds.disconnectAttr(\"%s.parent\" % child, \"%s\" % parents[0])\n if parent:\n cmds.connectAttr(\"%s.parent\" % child, \"%s.children\" % parent, force=True, nextAvailable=True)", "def get_action(self, index):\n if not index.isValid():\n return self.menuAction()\n parents = self._get_parent_indizes(index)\n menu = self\n for i in reversed(parents):\n action = menu.actions()[i.row()]\n menu = action.menu()\n try:\n return menu.actions()[index.row()]\n except IndexError:\n return None", "def getChild(self, *args):\n return _libsbml.ASTBasePlugin_getChild(self, *args)", "def after(self, p):\n self._validate(p)\n # if there exists p's right child, successor is left most position\n # in p's right subtree\n if self.right(p) is not None:\n walk = self.right(p)\n while self.left(walk)is not None:\n walk = left(walk)\n return walk\n # successor is the parent of the \"right turn\" position \n # when going upward\n else:\n walk = p\n above = self.parent(walk)\n while above is not None and walk==self.right(above):\n walk = above\n above = self.parent(walk)\n return above", "def connectToChild(self):\n return _libsbml.MultiSpeciesType_connectToChild(self)", "def calculate_best_action(self):\n if isinstance(self.action, Action.DoNothingAction):\n return\n if len(self.children):\n bestChild = self.get_best_child()\n idx = self.children.index(bestChild)\n self.bestAction = self.availableActions[idx]", "def remove_child(self, child: \"Node\") -> \"Node\":\n act = None\n for action, node in self.children.items():\n if node == child:\n child._parent = None\n act = action\n if act:\n del self.children[act]\n else:\n raise ValueError(\"The node does not have the given child node\")\n return self", "def getIndex(self, child):\n \n if child in self._children:\n return self._children.index(child)\n else:\n return -1", "def find_parent_of(self, *args):\n return _ida_hexrays.citem_t_find_parent_of(self, *args)", "def addChild(self, child):\n #assert child not in self.children\n #if child not in self.children:\n child.parents.append(self)\n self.children.append(child)", "def insertChild(self, *args):\n return _libsbml.ASTBasePlugin_insertChild(self, *args)", "def fm_get_child(self, idx):\n return self._relation_lst[self.CHILD][idx]", "def get_child_index(self, child: \"AbstractNode\") -> int:\n return self._children.index(child)", "def mutate(self, child):\n return child", "def connectToParent(self, *args):\n return _libsbml.MultiASTPlugin_connectToParent(self, *args)", "def find_child(self, data):\n for c in self.traverse():\n if c == self: continue\n if c.data == data: return c\n return None", "def menu_go_to_line(self, event=None):\n if self.app.children:\n self.app.childActive.go_to_line()", "def add_child(self, action: Action) -> \"Node\":\n child = Node(self._state.execute_action(action))\n if action in self._untried_edges:\n self._untried_edges.remove(action)\n self.children[action] = child\n child._parent = self\n return child", "def add_relatives(self, child2parent, idx2word):\n for child, parent in child2parent.items():\n if parent not in (0, -1):\n parent_word = idx2word[parent]\n parent_word.add_child(child)\n child.parent = parent_word", "def select(self):\n current = self.root\n actions = []\n while not current.is_leaf:\n # print(\"Selection... considering between:\")\n # print([\"{} ({:.2f})\".format(n, n.ucb) for n in current.visited_children])\n # print(\"Node's stats say that best child is {} ({})\".format(current.best_child, current.best_child.ucb))\n current = current.best_child\n actions.append(current.previous_action)\n # print(\"Selected\", current)\n # print()\n\n for a in actions:\n self.sim.play_action(a)\n # print(\"--------------------------------------\")\n return current", "def get_child_by(self, selector):\r\n for child in self.get_children():\r\n if selector(child):\r\n return child\r\n return None", "def test_menuaction_register_methods_with_parent(self) -> None:\n with self.assertWarns(RemovedInReviewBoard70Warning,\n self.deprecation_message):\n foo_action = FooAction()\n bar_action = BarAction('action-1', [foo_action])\n toplevel_action = TopLevelMenuAction()\n\n toplevel_action.register()\n\n bar_action.register(toplevel_action)\n\n self.assertEqual(actions_registry.get('action_id', 'foo-action'),\n foo_action)\n self.assertEqual(actions_registry.get('action_id', 'bar-action-1'),\n bar_action)\n self.assertEqual(toplevel_action.child_actions, [bar_action])\n self.assertEqual(bar_action.parent_action, toplevel_action)\n self.assertEqual(foo_action.parent_action, bar_action)\n self.assertEqual(bar_action.child_actions, [foo_action])\n\n bar_action.unregister()\n\n self.assertIsNone(actions_registry.get('action_id', 'foo-action'))\n self.assertIsNone(actions_registry.get('action_id', 'bar-action-1'))\n self.assertIsNone(foo_action.parent_action)\n self.assertEqual(bar_action.child_actions, [])\n self.assertEqual(toplevel_action.child_actions, [])\n self.assertIsNone(bar_action.parent_action)", "def connectToParent(self, *args):\n return _libsbml.ASTBasePlugin_connectToParent(self, *args)", "def get_parent(child: State, states: [State]) -> State:\n parents = [state for state in states if is_state_a_child(state)]\n if not parents:\n return None\n parents.sort(key = lambda st: st.x, reverse=True)\n return parents[0]", "def connectToChild(self):\n return _libsbml.CompModelPlugin_connectToChild(self)", "def _do_request_insert_child(self, __button, **kwargs): # pylint: disable=unused-argument\n return self._do_request_insert(sibling=False)", "def connectToChild(self):\n return _libsbml.CompSBMLDocumentPlugin_connectToChild(self)", "def test_action_register_methods_with_parent(self) -> None:\n with self.assertWarns(RemovedInReviewBoard70Warning,\n self.deprecation_message):\n bar_action = BarAction('action-1')\n foo_action = FooAction()\n\n bar_action.register()\n foo_action.register(bar_action)\n\n self.assertEqual(actions_registry.get('action_id', 'foo-action'),\n foo_action)\n self.assertEqual(foo_action.parent_action, bar_action)\n self.assertEqual(bar_action.child_actions, [foo_action])\n\n foo_action.unregister()\n\n self.assertIsNone(actions_registry.get('action_id', 'foo-action'))\n self.assertIsNone(foo_action.parent_action)\n self.assertEqual(bar_action.child_actions, [])", "def find(self,i):\r\n if self.parent[i]<0:\r\n return i\r\n return self.find(self.parent[i])", "def add_child(self, child, probe_id=None):\n node = None\n matching_nodes = [x for x in self.children if x.name == child.name] # see if the added node has already in its children list\n # print(\"[*] add children with the name {}.. matching_nodes: {}\".format(child.name, matching_nodes))\n if len(matching_nodes) > 0:\n node = matching_nodes[0]\n if probe_id is not None:\n node.probes = probe_id\n # print(\"\\t[*] current node: {}\".format(node.name))\n if node is None:\n if probe_id is not None:\n child.probes = probe_id\n self.children.append(child)\n node = child\n # print(\"\\t[*] node {} is appended to {} child list\".format(node.name, self.name))\n return node", "def add_child(self, child, **kwargs):\n if child.is_root:\n return\n if 'after' in kwargs:\n if kwargs['after'] is not None:\n try:\n self.children.insert(\n self.children.index(kwargs['after']), child)\n return\n except ValueError:\n self.children.append(child)\n else:\n self.children.insert(0, child)\n self.children.append(child)\n if child.parent is not self and child.parent is not None:\n child.parent.remove_child(child)\n # pylint: disable=protected-access\n child.__parent = self", "def get_current_child(xmodule):\r\n if not hasattr(xmodule, 'position'):\r\n return None\r\n\r\n if xmodule.position is None:\r\n pos = 0\r\n else:\r\n # position is 1-indexed.\r\n pos = xmodule.position - 1\r\n\r\n children = xmodule.get_display_items()\r\n if 0 <= pos < len(children):\r\n child = children[pos]\r\n elif len(children) > 0:\r\n # Something is wrong. Default to first child\r\n child = children[0]\r\n else:\r\n child = None\r\n return child", "def _parent(child):\n parent = Pos(n=child.n - 1, x=child.x // 2, y=child.y // 2)\n left = child.x % 2\n top = child.y % 2\n return (parent, left, top)", "def create_action(self, parent):\n return QtGui.QAction(parent)", "def navigate_to_fold(self, folded_child):\n if not isinstance(folded_child, six.string_types):\n raise TypeError(u'Expected folded_child to be a string, was: {}'.format(folded_child))\n if self.field:\n raise AssertionError(u'Currently at a field, cannot go to folded child: '\n u'{}'.format(self))\n\n edge_direction, edge_name = get_edge_direction_and_name(folded_child)\n\n fold_path = _create_fold_path_component(edge_direction, edge_name)\n return FoldScopeLocation(self, fold_path)", "def getSuccAfterExpand(self, action):\n if self.children == {}: \n raise Exception(\"Called 'TreeNode.getSuccAfterExpand' either on a end state of the game or without calling expand before\")\n if not action in self.children: return None\n return self.children[action]", "def __find(self, x, parent: 'Node'):\n found = None\n if parent.value == x:\n return parent\n\n for child in parent.children:\n if child.value == x:\n return child\n new_found = self.__find(x, parent=child)\n if new_found:\n found = new_found\n\n return found", "def connectToChild(self):\n return _libsbml.SubListOfSpeciesFeatures_connectToChild(self)", "def insert_child(self, descendant, position, **kwargs):\n return self.sequence_manager.insert_child(\n descendant, self, position, **kwargs)", "def SwitchNode(self, move, state):\n # if node has children\n for i in self.child:\n if i.Move == move:\n return i\n # if node has no children\n return self.AddChild(move, state)", "def insert_output(self, action):\n parents = self.get_direct_outputs()\n action.add_input(self)\n for parent in parents:\n parent.remove_input(self)\n parent.add_input(action)\n return True", "def return_parent(self):\n # Return parent if completed\n if self.completed:\n return self.father\n return -1", "def action(self):\n next_action = self.strategy.get_next_move(self)\n return next_action", "def connectToParent(self, *args):\n return _libsbml.MultiSpeciesReferencePlugin_connectToParent(self, *args)", "def get_child(self, uid: str):\n if not self.has_child(uid):\n raise RuntimeError(\"Widget '{}' doesn't contain child '{}'.\".format(self.uid, uid))\n\n for w in self._children:\n if w.uid == uid:\n return w", "def get_index(self, action, column=0):\n if action == self.menuAction():\n return QtCore.QModelIndex()\n # find all parents to get their index\n parents = self._get_parents(action)\n index = QtCore.QModelIndex()\n # Move through indexes down the chain\n for a in reversed(parents):\n parent = a.parent()\n # if parent of action is its own menu, get parent of that menu.\n # We want to know which row the action is in. For that we need\n # The real parent menu.\n if parent is a.menu():\n parent = parent.parent()\n row = parent.actions().index(a)\n index = self._model.index(row, 0, index)\n parent = action.parent()\n if parent is None:\n return index\n if parent is action.menu():\n parent = parent.parent()\n row = parent.actions().index(action)\n index = self._model.index(row, column, index)\n return index", "def add_child(self, child):\n self.childs.append(child)", "def deterministical_decide(self):\n children = self.root_node.my_children\n best = children[0]\n for c in children:\n if c.visit_time > best.visit_time:\n best = c\n return best.parent_action", "def addChild( self, child ):\n\n self.childs.append( child )", "def connectToParent(self, *args):\n return _libsbml.GroupsModelPlugin_connectToParent(self, *args)", "def connectToParent(self, *args):\n return _libsbml.MultiSpeciesPlugin_connectToParent(self, *args)", "def getBestAction(self, currNode):\n bestScore = None\n bestAction = None\n for child in currNode.children:\n childScore = child.cumulativeScore\n if (bestAction is None or childScore > bestScore):\n bestScore = childScore\n bestAction = child.lastAction\n return bestAction", "def add_child(self, child):\r\n self.children.append(child)", "def add_parent(self, child, parent):\r\n setp = self._parents.setdefault(child, set())\r\n setp.add(parent)", "def next(self):\n if not self.has_parent():\n return None\n if self.parent.get_original_child(self.component_type) is self:\n return None\n siblings = \\\n self.parent.get_spoofed_children_of_type(self.component_type)\n next_index = siblings.index(self) + 1\n if len(siblings) > next_index:\n return siblings[next_index]\n return self.parent.get_original_child(self.component_type)", "def find_button_parent(root, elm):\n if lxml:\n parent = elm.getparent()\n if parent is not None:\n if parent.attrib.get('class') in widgets_buttons:\n return parent\n return find_button_parent(root, parent)\n else:\n def find_parent(cur, elm):\n for o in cur:\n if o == elm:\n if cur.attrib.get('class') in widgets_buttons:\n # we are the button, immediately above the target\n return cur\n else:\n # we aren't the button, but target is over there\n return True\n parent = find_parent(o, elm)\n if parent == True:\n # It is over there, but didn't find a button yet\n if cur.attrib.get('class') in widgets_buttons:\n # we are the button\n return cur\n else:\n return True\n if parent is not None:\n # we have the button parent over there\n return parent\n return None\n parent = find_parent(root, elm)\n if parent == True:\n parent = None\n return parent" ]
[ "0.65133506", "0.5764956", "0.56135213", "0.5588276", "0.556616", "0.5460327", "0.54077643", "0.53608245", "0.53120625", "0.52631944", "0.52541935", "0.5245333", "0.5144854", "0.513944", "0.51289624", "0.50804204", "0.5078679", "0.5062347", "0.5059294", "0.50372154", "0.503003", "0.5018946", "0.49946773", "0.49874905", "0.49712682", "0.49635014", "0.492581", "0.4907992", "0.48900583", "0.4857744", "0.4855918", "0.4848094", "0.48427027", "0.48398927", "0.48301795", "0.48141098", "0.4813416", "0.47765353", "0.47756213", "0.47726962", "0.47689226", "0.47577417", "0.47567272", "0.47488567", "0.47461694", "0.4744943", "0.4720907", "0.47154635", "0.47041026", "0.46952453", "0.46886712", "0.46832848", "0.46713814", "0.46654725", "0.46472326", "0.46421906", "0.46351218", "0.46329257", "0.4620247", "0.4611595", "0.46074724", "0.46067178", "0.4597052", "0.45904353", "0.45564806", "0.4554071", "0.45510292", "0.45340183", "0.4528861", "0.45228082", "0.4518804", "0.451285", "0.45114487", "0.45055804", "0.45031315", "0.4497684", "0.44849727", "0.44812348", "0.44737518", "0.44707894", "0.44627547", "0.44539067", "0.44505078", "0.4439325", "0.44346884", "0.4431333", "0.44283292", "0.44262347", "0.44111058", "0.4410808", "0.44098666", "0.44054398", "0.4400301", "0.43973586", "0.43958935", "0.43955845", "0.43951258", "0.43905282", "0.43901676", "0.4388187" ]
0.7250408
0
Handle the child added event for a QtActionGroup. This handler will also add the widget to the parent widget, since a QActionGroup only serves as a management container.
def child_added(self, child): super(QtActionGroup, self).child_added(child) if isinstance(child, QtAction): self.widget.addAction(child.widget) parent = self.parent() if parent is not None: before = self.find_next_action(child) parent.widget.insertAction(before, child.widget)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_widget(self):\n self.widget = QCustomActionGroup(self.parent_widget())", "def child_added(self, child):\n super(QtToolButton, self).child_added(child)\n if isinstance(child, QtMenu):\n self.widget.setMenu(child.widget)", "def child_removed(self, child):\n super(QtActionGroup, self).child_removed(child)\n if isinstance(child, QtAction) and child.widget is not None:\n self.widget.removeAction(child.widget)\n parent = self.parent()\n if parent is not None:\n parent.widget.removeAction(child.widget)", "def init_layout(self):\n super(QtActionGroup, self).init_layout()\n widget = self.widget\n for action in self.actions():\n widget.addAction(action)", "def actionGroup(self, QDesignerFormWindowManagerInterface_ActionGroup): # real signature unknown; restored from __doc__\n pass", "def child_added(self, child):\n super(WxDockPane, self).child_added(child)\n if isinstance(child, WxContainer):\n self.widget.SetDockWidget(self.dock_widget())", "def _itemAdded(self, item):\n group = self.item()\n if group is None:\n return\n\n row = group.getItems().index(item)\n self.addRow(nodeFromItem(item), row + self._CHILDREN_ROW_OFFSET)", "def _add_child(self, widget):\n # May be overloaded in layout widgets\n self.node.appendChild(widget.node)", "def _do_request_insert_child(self, __button, **kwargs): # pylint: disable=unused-argument\n return self._do_request_insert(sibling=False)", "def add_group(self):\n items = self.group_list.selectedItems()\n for item in items:\n self.parent.add_group_data(item.text())", "def __on_group_created(self, logger, *args):", "def connectToChild(self):\n return _libsbml.GroupsModelPlugin_connectToChild(self)", "def add_child(self, child):\r\n \r\n self._children.append(child)\r\n self.update_batch(self._batch, self._group)", "def add_child(self, child: UIComponent):\n child.parent = self\n child.set_chronometer(self._chronometer)\n self.children.append(child)\n if self.props.resize_mode == ResizeMode.AUTO:\n self._reset('add_child')", "def add_child(self, child_node):\n # Assignment group doesn't have short_name\n if not hasattr(child_node.node, 'short_name'):\n # Makes sure the candidates are shown if a student \n # is part of more than one AssignmentGroup\n if len(self.children) != 0:\n child_node.display_group = True\n # Contains only one, set display_group to True for that element as well.\n if len(self.children) == 1:\n list(self.children.values())[0].display_group = True\n self.children[child_node] = child_node\n else:\n if child_node.get_name() not in self.children:\n self.children[child_node.get_name()] = child_node\n else:\n self.children[child_node.get_name()].merge(child_node)", "def child_removed(self, child):\n super(WxDockPane, self).child_removed(child)\n if isinstance(child, WxContainer):\n self.widget.SetDockWidget(self.dock_widget())", "def populate_menu(self):\n # TODO : Performance issue ?\n self.showGroupMenu.clear()\n self.addGroupDataMenu.clear()\n counter = 0\n for group_name in self.group_data.keys():\n counter +=1\n exec('self.groupAct' + str(counter) + ' = QAction(\"' + group_name+'\", self)')\n eval('self.groupAct' + str(counter) + '.triggered.connect(partial(self.load_group, group_name))')\n exec('self.groupAddAct' + str(counter) + ' = QAction(\"' + group_name+'\", self)')\n eval('self.groupAddAct' + str(counter) + '.triggered.connect(partial(self.add_group_data, group_name))')\n self.showGroupMenu.addAction(eval('self.groupAct' + str(counter)))\n self.addGroupDataMenu.addAction(eval('self.groupAddAct' + str(counter)))", "def _add_to_cli(self, parser, group=None):\n dest = self.dest\n if group is not None:\n dest = group.name + '_' + dest\n\n subparsers = parser.add_subparsers(dest=dest,\n title=self.title,\n description=self.description,\n help=self.help)\n # NOTE(jd) Set explicitly to True for Python 3\n # See http://bugs.python.org/issue9253 for context\n subparsers.required = True\n\n if self.handler is not None:\n self.handler(subparsers)", "def addchild(self, child, index=None, move=True):\n owners = child.owners()\n if len(owners) > 0 and not move:\n return # keep the original owner\n\n if self.guid in owners:\n return # already add\n\n # add to this group (avoid 'child' being remove from project when no one refer to it)\n pbxhelper.pbxobj_add_pbxlist_value(self, u'pbx_children', child, \\\n self.is_valid_child, index=index)\n\n for owner in owners.values():\n child.remove_referrer(owner) # remove from other groups", "def append_child(self, child):\n # Child UID must not be the same as parent's UID\n if self.uid == child.uid:\n raise RuntimeError(\"Cannot add child widget '{}' because it has same UID as its parent\".format(child.uid))\n\n # Each widget is responsible to control only its direct children, not all descendants\n if self.has_child(child.uid):\n raise RuntimeError(\"Widget '{}' already contains descendant '{}'\".format(self.uid, child.uid))\n\n child.parent = self\n\n if not child.weight:\n self._last_children_weight += 100\n child.weight = self._last_children_weight\n elif child.weight > self._last_children_weight:\n self._last_children_weight = ceil(child.weight / 100) * 100\n\n # Obviously, child must be placed in the same form's area as its parent\n child.form_area = self.form_area\n\n self._children.append(child)\n self._children_uids.append(child.uid)\n self._children.sort(key=lambda x: x.weight)\n\n return child", "def add_child(self, child):\r\n self.children.append(child)", "def add_widget(self, widget):\n widget.bind(on_touch_down=self.button_touch_down)\n return super(MainGrid, self).add_widget(widget)", "async def async_added_to_hass(self) -> None:\n self._group.set_callback(self.schedule_update_ha_state)\n self.hass.data[DOMAIN][self._entry_id].groups.append(self)", "def child_removed(self, child):\n super(QtToolButton, self).child_removed(child)\n if isinstance(child, QtMenu):\n if child.widget is self.widget.menu():\n self.widget.setMenu(None)", "def on_containers_list_triggered(self):\n parent_widget = self.parentWidget()\n containers_list = parent_widget.findChildren(ContainersList, 'containers_list_widget')\n\n if len(containers_list) > 0:\n containers_list[0].setWindowState(Qt.WindowNoState)\n containers_list[0].setFocus()\n\n return\n\n containers_list = ContainersList()\n containers_list.setObjectName('containers_list_widget')\n containers_list.create_ui()\n parent_widget.mdi.addSubWindow(containers_list)\n containers_list.show()", "def child_added(self, child):\n super(AbstractItemView, self).child_added(child)\n self.get_member(\"_items\").reset(self)", "def add_child(self, child):\r\n self.children.append(child)", "def group_data_callback(self, action: EventType, group_id: str) -> None:\n self.process_item(group_id, {})", "def add_action_button_to_overflow(self):\n\n if len(self.ids.right_actions.children) > 1:\n button_to_be_added = self.ids.right_actions.children[1]\n self._hidden_items.append(button_to_be_added)\n self.ids.right_actions.remove_widget(button_to_be_added)\n\n self._overflow_menu_items.append(\n {\n \"viewclass\": \"OverFlowMenuItem\",\n \"icon\": button_to_be_added.icon,\n \"text\": button_to_be_added.overflow_text,\n \"height\": dp(48),\n \"on_press\": lambda *x: button_to_be_added.on_release(*x),\n }\n )\n self.overflow_cls.items = self._overflow_menu_items\n self.overflow_cls.caller = self.ids.right_actions.children[0]", "def addChild( self, child ):\n\n self.childs.append( child )", "def add_child(self, child, label):\n self.children[label] = child\n child.parents.append(self)", "def add_child(self, child):\n self.childs.append(child)", "def group(*args, show: bool = True, parent: str = \"\", before: str = \"\", width: int = 0, pos=[],\n horizontal: bool = False, horizontal_spacing: float = -1.0, id:str='', indent=-1):\n try:\n widget = internal_dpg.add_group(*args, show=show, parent=parent, before=before, width=width,\n horizontal=horizontal, horizontal_spacing=horizontal_spacing, id=id,\n indent=indent, pos=pos)\n internal_dpg.push_container_stack(widget)\n yield widget\n finally:\n internal_dpg.pop_container_stack()", "def add_child(self, child):\n self.children.append(child)", "def add_child(self, child):\n self.children.append(child)", "def OnChildFocus(self, event):\r\n\r\n # when a child pane has it's focus set, we should change the \r\n # pane's active state to reflect this. (this is only true if \r\n # active panes are allowed by the owner)\r\n\r\n window = event.GetWindow()\r\n if isinstance(window, wx.Dialog):\r\n # Ignore EVT_CHILD_FOCUS events originating from dialogs not\r\n # managed by AUI\r\n rootManager = None\r\n elif isinstance(window.GetParent(), AuiFloatingFrame):\r\n rootManager = GetManager(window)\r\n else:\r\n rootManager = self\r\n \r\n if rootManager:\r\n rootManager.ActivatePane(window)\r\n \r\n event.Skip()", "def connectToChild(self):\n return _libsbml.Group_connectToChild(self)", "def slotGroupEdit(self):\n dialog = GroupDialog(self)\n if dialog.exec_loop() == QDialog.Accepted:\n if dialog.group_id != None:\n # set group\n self.sampleGroup.globalGroupId = dialog.group_id\n self.groupLabel.setText(dialog.group_id)\n else:\n # ungroup\n self.sampleGroup.globalGroupId = None\n self.groupLabel.setText('Not\\nGrouped')\n self.emit(PYSIGNAL('groupChanged'), (self,))", "def add_widget(self, widget):\n widget.bind(on_touch_down=self.button_touch_down,\n on_touch_up=self.button_touch_up)\n return super(SelectableLayout, self).add_widget(widget)", "def addChild(self, child):\n #assert child not in self.children\n #if child not in self.children:\n child.parents.append(self)\n self.children.append(child)", "def add_argument_group(self, *args, **kwargs):\n title = kwargs.get('title', args[0])\n for group in self._action_groups:\n if group.title == title:\n return group\n group = MutableArgumentGroup(self, *args, **kwargs)\n self._action_groups.append(group)\n return group", "def createOptionsGroup(self):\n #1. Create a widget (here: QGroupBox)\n self.groupBox = QGroupBox()\n self.groupBox.setAlignment(4)\n\n #2. Create a couple of elements\n self.save_to_database = QtGui.QPushButton()\n self.save_to_database.setText(\"Save to database\")\n\n self.line = QFrame()\n self.line.setFrameShape(QFrame.HLine)\n self.line.setFrameShadow(QFrame.Sunken)\n\n #Mother or child select\n\n mother_child = QtGui.QHBoxLayout()\n self.mother_btn = QRadioButton(\"Mother\")\n self.mother_btn.setChecked(True)\n self.mother = True\n mother_child.addWidget(self.mother_btn)\n self.mother_btn.toggled.connect(lambda:self.set_mother(self.mother_btn))\n\n\n self.child_btn = QRadioButton(\"Child\")\n self.child_btn.setChecked(False)\n self.child = False\n mother_child.addWidget(self.child_btn)\n self.child_btn.toggled.connect(lambda:self.set_child(self.child_btn))\n\n #Coordinates display\n self.coordinates1 = QLineEdit()\n self.coordinates1.setFixedWidth(40)\n self.coordinates2 = QLineEdit()\n self.coordinates2.setFixedWidth(40)\n self.coordinates3 = QLineEdit()\n self.coordinates3.setFixedWidth(40)\n self.coordinates4 = QLineEdit()\n self.coordinates4.setFixedWidth(40)\n\n self.title = \"Dyad: \"+str(self.video.get_dyad())+ \"\\t\\t Video: \" + str(self.video.get_camera())\n self.l1 = QLabel(self.title)\n self.l2 = QLabel(\"Comment (optional)\")\n self.l3 = QLabel(\"Coordinates\")\n self.l4 = QLabel(\"Current frame\")\n\n self.comment = QLineEdit()\n self.current_frame = QLineEdit()\n\n #3. Add them to a QVBoxLayout (Vertical)\n hbox = QtGui.QHBoxLayout()\n hbox.addWidget(self.coordinates1)\n hbox.addWidget(self.coordinates2)\n hbox.addWidget(self.coordinates3)\n hbox.addWidget(self.coordinates4)\n\n vbox = QVBoxLayout()\n vbox.addWidget(self.l1)\n vbox.addWidget(self.line)\n vbox.addWidget(self.l2)\n vbox.addWidget(self.comment)\n vbox.addLayout(mother_child)\n vbox.addWidget(self.l4)\n vbox.addWidget(self.current_frame)\n vbox.addWidget(self.l3)\n vbox.addLayout(hbox)\n vbox.addWidget(self.save_to_database)\n\n\n vbox.addStretch(1)#Add empty QSpacerItem that pushes the buttons upwards\n\n #4. Add layout to widget\n self.groupBox.setLayout(vbox)\n\n return self.groupBox", "def signal_handler(self,sig,data):\n self.resize_child_window()", "def menu_insert_separator(self, event=None):\n if self.app.children:\n self.app.childActive.insert_separator()", "def add_dockwidget(self, child, title):\n dockwidget, location = child.create_dockwidget(title)\n self.addDockWidget(location, dockwidget)\n return dockwidget", "def put_in_groupbox(widget, title):\n box = QtGui.QGroupBox(title)\n layout = QtGui.QHBoxLayout(box)\n layout.addWidget(widget)\n return box", "def handleActionAdd(self):\n self.fDialog.show()", "def insertChild(self, *args):\n return _libsbml.ASTBasePlugin_insertChild(self, *args)", "def append_child(self, child):\n\t\tself._children.append(child)", "def new_child(self, parent, *args, **kwargs):\n child = self.new_element(*args, **kwargs)\n parent.append(child)\n return child", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def add(self, child):\r\n# child = Node()\r\n# child._id = Kinetic.Global.id_counter\r\n# Kinetic.Global.id_counter += 1\r\n child.index = len(self.children)\r\n child.parent = self\r\n self.children.append(child)\r\n stage = child.get_stage()\r\n\r\n if not stage:\r\n Kinetic.Global._add_temp_node(child)\r\n else:\r\n stage._add_id(child)\r\n stage._add_name(child)\r\n\r\n go = Kinetic.Global\r\n go._pull_nodes(stage)\r\n\r\n if hasattr(self, '_add'):\r\n self._add(child)\r\n\r\n return '%s.add(%s);' %(self.name, child.name)", "def on_the_groups_page_click_add(driver):\n assert wait_on_element(driver, 10, xpaths.groups.title)\n assert wait_on_element(driver, 10, xpaths.button.add, 'clickable')\n driver.find_element_by_xpath(xpaths.button.add).click()", "def addGroup(self, *args):\n return _libsbml.GroupsModelPlugin_addGroup(self, *args)", "def on_button_group(self, row_index: int):\n\n def _callback(attr, old, new):\n # Note: bokeh.core.PropertyList can not be deep copied\n # it RuntimeErrors, cast as list instead\n active = list(new)\n self.notify(on_button_group(row_index, active))\n\n return _callback", "def add_child(self, child):\n name = child.name\n self._children[name] = child\n self._name_dict[name.split('-')[0]] += 1", "def init_layout(self):\n super(QtToolButton, self).init_layout()\n for child in self.children():\n if isinstance(child, QtMenu):\n self.widget.setMenu(child.widget)\n break", "def addChild(self, *args):\n return _libsbml.ASTBasePlugin_addChild(self, *args)", "def create_action(self, parent):\n return QtGui.QAction(parent)", "def add_child(self, child, **kwargs):\n if child.is_root:\n return\n if 'after' in kwargs:\n if kwargs['after'] is not None:\n try:\n self.children.insert(\n self.children.index(kwargs['after']), child)\n return\n except ValueError:\n self.children.append(child)\n else:\n self.children.insert(0, child)\n self.children.append(child)\n if child.parent is not self and child.parent is not None:\n child.parent.remove_child(child)\n # pylint: disable=protected-access\n child.__parent = self", "def add_child(self, child_account):\r\n self._children.append(child_account)", "def log_group_updated_added_event(sender, **kwargs):\n logger = logging.getLogger(__name__)\n\n group = kwargs['instance']\n if kwargs['created']:\n logger.info(\"Group added: %s. Group leader: %s (ID: %d)\",\n group.title,\n group.leader,\n group.id)\n else:\n logger.info(\"Group updated: %s. Group leader: %s (ID: %d)\",\n group.title,\n group.leader,\n group.id)", "def add_child(self, ldraw_model):\n self.children.append(ldraw_model)", "def child(*args, show: bool = True, parent: str = \"\", before: str = \"\", width: int = 0, pos=[],\n height: int = 0, border: bool = True, autosize_x: bool = False, autosize_y: bool = False,\n no_scrollbar: bool = False, horizontal_scrollbar: bool = False, menubar: bool = False, id:str='', \n indent=-1):\n try: \n widget = internal_dpg.add_child(*args, show=show, parent=parent, before=before, width=width,\n height=height, border=border, autosize_x=autosize_x, autosize_y=autosize_y,\n no_scrollbar=no_scrollbar, horizontal_scrollbar=horizontal_scrollbar,\n menubar=menubar, id=id, indent=indent, pos=pos)\n internal_dpg.push_container_stack(widget)\n yield widget\n finally:\n internal_dpg.pop_container_stack()", "def add_group(self,form,prefix,name,items,**extra):\n w = InputGroup(prefix+name,**extra)\n form.addWidget(w)\n if w.isCheckable:\n self.fields.append(w)\n\n if self.autoprefix:\n prefix += name+'/'\n self.add_items(items,w.form,prefix=prefix)", "def on_the_add_group_side_box_input_the_group_name(driver):\n assert wait_on_element(driver, 7, xpaths.add_Group.title)\n assert wait_on_element(driver, 7, xpaths.add_Group.name_Input, 'inputable')\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).clear()\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).send_keys('qetest')", "def handle_groupchat_message(self, msg):\n self.xmpp.event('groupchat_message', msg)\n self.xmpp.event(\"muc::%s::message\" % msg['from'].bare, msg)", "def add_child(self, action: Action) -> \"Node\":\n child = Node(self._state.execute_action(action))\n if action in self._untried_edges:\n self._untried_edges.remove(action)\n self.children[action] = child\n child._parent = self\n return child", "def addChildObject(self, child):\n \n currChild = self.getChild(child.getName())\n if currChild:\n index = self.getIndex(currChild)\n if index != -1:\n self._children[index] = child\n child.setParent(self)\n # Unset the existing child's parent\n currChild.setParent(None)\n del currChild\n \n self.__setChildDict(child)\n else:\n child.setParent(self) \n self._children.append(child)\n self.__setChildDict(child)", "def addMenu():\n toolsMenu = mb.findChild(QtGui.QMenu, \"&Tools\")\n if toolsMenu:\n toolsMenu.addAction(action)", "def _parentDirectoryActionTriggeredSlot(self):\r\n\r\n self._controller.model.activeIndex = self._controller.model.activeIndex.parent()", "def add_child(self, name: str, command: Command) -> None:\n self._children[name] = command", "def appendChild(self, child):\n self.__initChild()\n self.__child.append(child)", "def create_action(self, *args, **kwargs):\n action_group = kwargs.pop('action_group', None)\n act = QAction(*args, **kwargs)\n if action_group:\n act.setActionGroup(action_group)\n\n return act", "def connectToParent(self, *args):\n return _libsbml.GroupsModelPlugin_connectToParent(self, *args)", "def add_child_handler(self, pid, callback, *args):\n h = self._loop.trio_as_future(self._waitpid, pid, callback, *args)\n self._callbacks[pid] = h", "def exec_(self):\n super().exec_()\n return self.clicked_button", "def _add_child(self, registry: 'Registry') -> None:\n\n assert isinstance(registry, Registry)\n assert registry.scope is not None\n assert registry.scope not in self.children, \\\n f'scope {registry.scope} exists in {self.name} registry'\n self.children[registry.scope] = registry", "def double_click_event(self, clicked_item):\n item = self.group_list.item(clicked_item.row())\n self.parent.load_group(item.text())", "def handleEvent(self, event):\n for child in self.children:\n child.handleEvent(event)", "def add_to_group(self, group):\n\n if not self.in_group(group):\n self.secondary_groups.append(group)\n return self", "def _newChild(self, child):\n self._testKeySubNsAdd()\n self._getSubNsList().append(child)", "def addOptionToGroup(self, groupName, *args, **kwargs):\n group = self._optionGroupDict.get(groupName)\n group.add_argument(*args, **kwargs)", "def add_child(self, blueprint: 'Blueprint'):\n self._children.append(blueprint)\n blueprint._parent = self\n return blueprint", "def _onaddchannel(self):\n\n self._fileinfolayout.insertWidget(\n self._fileinfolayout.count() - 1,\n ChannelInfoWidget(self._channels)\n )", "def add_group(self, *args, **kwds):\n title = kwds.pop('title', None)\n description = kwds.pop('description', None)\n if kwds:\n raise Exception('unknown keyword arguments: %s' % kwds)\n\n # set title, description if args[0] is string\n if isinstance(args[0], string_types):\n title = args[0]\n args = args[1:]\n if isinstance(args[0], string_types):\n description = args[0]\n args = args[1:]\n\n assert all(isinstance(arg, Command) for arg in args), 'all args should be instance of Command'\n self._arg_stack.append(('group', args, {'title': title, 'description': description}))\n return self", "def append_group(self, _groupby):\n # forces a `__getitem__`, which in turn calls `__missing__`\n # the first time we try to insert a value\n def do_append(key, group):\n self[key].send(group)\n appender = yield from starmap(do_append, _groupby)", "def on_category(self):\n super(ToolSettings, self).on_category()\n selItems = self.tw_category.selectedItems() or []\n #--- Build Tree ---#\n if selItems:\n if hasattr(selItems[0], 'itemWidget'):\n if selItems[0].itemWidget is not None:\n if not selItems[0].itemWidget.__edited__:\n selItems[0].itemWidget._initWidget()\n selItems[0].itemWidget.buildTree()", "def add_child(self, child_id):\n self._children.append(child_id)", "def on_category(self):\n super(ProjectSettings, self).on_category()\n selItems = self.tw_category.selectedItems() or []\n #--- Build Tree ---#\n if selItems:\n if hasattr(selItems[0], 'itemWidget'):\n if selItems[0].itemWidget is not None:\n if not selItems[0].itemWidget.__edited__:\n selItems[0].itemWidget._initWidget()\n selItems[0].itemWidget.buildTree()", "def handle_reply_to_group(self, api, command):\n try:\n content, orig_msg, continue_session, helper_metadata = (\n self._get_cmd_params(api, command, [\n 'content', 'in_reply_to', 'continue_session',\n 'helper_metadata']))\n except InvalidOutboundCommand, err:\n return self._mkfaild(command, reason=unicode(err))\n\n d = self.app_worker.reply_to_group(\n orig_msg, content, continue_session=continue_session,\n helper_metadata=helper_metadata)\n return d.addCallbacks(*self._reply_callbacks(command))", "def __expandAllSetup(self):\n btn = QtWidgets.QPushButton()\n self.__toolbar.addWidget(btn)\n btn.setIcon(QtGui.QIcon(\":down.png\"))\n btn.setFocusPolicy(QtCore.Qt.NoFocus)\n btn.setToolTip(\"Expand all groups\")\n btn.clicked.connect(self.__monitorCue.expandAll) # pylint: disable=no-member", "def add_child(self, child):\n\n self._children.add(child)", "def handle_event(self, event):\n self.give_sub_event.handle_event(event)", "def widget(self, request, group):", "def tool_bar_append_widget(self, widget):\n self._bar_layout.addWidget(widget)", "def newAction(self, label, icon, slot):\n action = self.addAction(icon, label)\n self.parent.connect(action, QtCore.SIGNAL(\"triggered(bool)\"), slot);\n return action", "def add_child(self, cd, wt: float):\n self.child.append([cd, wt])", "def insert_output(self, action):\n parents = self.get_direct_outputs()\n action.add_input(self)\n for parent in parents:\n parent.remove_input(self)\n parent.add_input(action)\n return True", "def add(self, widget, emit_signal=True):\n if widget in self._selection:\n return\n\n self._selection.insert(0, widget)\n if emit_signal:\n self.selection_changed()\n\n refresh_selected_nodes(widget)" ]
[ "0.67494166", "0.6715713", "0.6624586", "0.612766", "0.58904576", "0.5739347", "0.56276655", "0.5609154", "0.5497478", "0.5419861", "0.53794026", "0.5364947", "0.5319612", "0.5314476", "0.53119785", "0.5293082", "0.52860713", "0.5270443", "0.5258827", "0.5233708", "0.52084774", "0.5202621", "0.5196853", "0.5187449", "0.5178258", "0.5171143", "0.5165584", "0.51306176", "0.51176906", "0.51098806", "0.50890326", "0.50322", "0.5018769", "0.50175124", "0.50175124", "0.49960646", "0.49828932", "0.49743846", "0.49717566", "0.49678347", "0.49568984", "0.49474028", "0.49391988", "0.4928826", "0.49259797", "0.49104336", "0.48976716", "0.48946655", "0.48899502", "0.48775506", "0.48773006", "0.48704398", "0.48622814", "0.4831232", "0.48203346", "0.48187932", "0.4808974", "0.4808491", "0.4784813", "0.47787008", "0.4770673", "0.47566998", "0.47520187", "0.47510728", "0.47453097", "0.47438687", "0.47405002", "0.47386795", "0.4736745", "0.47354728", "0.47323304", "0.47202995", "0.47196168", "0.47142226", "0.47058097", "0.47016948", "0.4699843", "0.46946025", "0.46940732", "0.4692344", "0.46855187", "0.4679236", "0.46651945", "0.46647513", "0.4653467", "0.46290877", "0.46270603", "0.4625388", "0.46148157", "0.46124506", "0.46030256", "0.46013674", "0.4594979", "0.45907465", "0.45904922", "0.45846388", "0.45817733", "0.45794532", "0.45716307", "0.45556778" ]
0.8052653
0
Handle the child removed event for a QtActionGroup. This handler will also remove the widget to the parent widget, since a QActionGroup only serves as a management container.
def child_removed(self, child): super(QtActionGroup, self).child_removed(child) if isinstance(child, QtAction) and child.widget is not None: self.widget.removeAction(child.widget) parent = self.parent() if parent is not None: parent.widget.removeAction(child.widget)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def child_removed(self, child):\n super(QtToolButton, self).child_removed(child)\n if isinstance(child, QtMenu):\n if child.widget is self.widget.menu():\n self.widget.setMenu(None)", "def child_removed(self, child):\n super(AbstractItemView, self).child_removed(child)\n self.get_member(\"_items\").reset(self)", "def child_removed(self, child):\n super(WxDockPane, self).child_removed(child)\n if isinstance(child, WxContainer):\n self.widget.SetDockWidget(self.dock_widget())", "def _remove_child(self, widget):\n self.node.removeChild(widget.node)", "def slotDelete(self):\n item = self.groupListBox.item((self.groupListBox.currentItem()))\n group = item.text().ascii()\n Group.Sequencer().slotRemoveGlobalGroup(group)", "def child_added(self, child):\n super(QtActionGroup, self).child_added(child)\n if isinstance(child, QtAction):\n self.widget.addAction(child.widget)\n parent = self.parent()\n if parent is not None:\n before = self.find_next_action(child)\n parent.widget.insertAction(before, child.widget)", "def __on_group_deleted(self, logger, *args):", "def remove_child_handler(self, pid):\n h = self._callbacks.pop(pid, None)\n if h is None:\n return False\n h.cancel()\n return True", "def after_delete(self, record):\n debug = logging.getLogger(__name__).debug\n debug('deleted group %r (%r)', record['name'], record['group_id'])\n audit('delete group', record['name'])", "def cleanup_callback(self, obj, event):\n self.parent.rend.RemoveActor(self.actor)\n #self.vtk_interactor.RemoveObservers('LeftButtonPressEvent')\n self.parent.vtk_interactor.RemoveObserver(self.cleanup_observer)\n cleanup_observer = None", "def on_delete_event(self, widget, event):\n\n self.hide()\n return True", "def removeChild(self, *args):\n return _libsbml.ASTBasePlugin_removeChild(self, *args)", "def feed_entry_post_delete(sender, instance, **kwargs):\n del kwargs\n if isinstance(instance, FeedEntry) and (sender == FeedEntry):\n signals.post_delete.disconnect(feed_entry_post_delete, sender=FeedEntry)\n try:\n if instance.command_group and instance.command_group.id is not None:\n instance.command_group.delete()\n except CommandGroupEntry.DoesNotExist:\n pass\n\n signals.post_delete.connect(feed_entry_post_delete, sender=FeedEntry)", "async def async_will_remove_from_hass(self) -> None:\n self._group.set_callback(None)\n self.hass.data[DOMAIN][self._entry_id].groups.remove(self)", "def log_group_deleted_event(sender, **kwargs):\n logger = logging.getLogger(__name__)\n\n group = kwargs['instance']\n logger.info(\"Group deleted: %s. Group leader: %s (ID: %d)\",\n group.title,\n group.leader,\n group.id)", "def delete_level(self, event=None):\n self.parent.delete_filter(self)", "def remove_child(self, child):\n if hasattr(child, \"_protected\"):\n raise TypeError(\"You cannot remove channels defined at class level.\")\n if hasattr(child, \"_collection\"):\n collection = getattr(self, child._collection)\n del collection[child.id]\n delattr(self, child._name)", "def remove_child(self, child):\n\n self.children.remove(child)", "def on_closebutton_handle_clicked(self, _widget):\n self._terminate.set()\n self.top_widget.destroy()\n self.top_widget = None", "def del_child(self, child):\n\n try:\n self.children.remove(child)\n except ValueError:\n pass\n else:\n self.rebuild_children_dict()", "def process_IN_DELETE_SELF(self, event):", "def DeleteChildren(self, tree):\r\n\r\n for child in self._children:\r\n if tree:\r\n tree.SendDeleteEvent(child)\r\n\r\n child.DeleteChildren(tree)\r\n \r\n if child == tree._selectItem:\r\n tree._selectItem = None\r\n\r\n # We have to destroy the associated window\r\n for wnd in child._wnd:\r\n if wnd:\r\n wnd.Hide()\r\n wnd.Destroy()\r\n \r\n child._wnd = []\r\n\r\n if child in tree._itemWithWindow:\r\n tree._itemWithWindow.remove(child)\r\n \r\n del child\r\n \r\n self._children = []", "def _onremove(self):\n self._channellist.remove(self)\n self.deleteLater()", "def on_delete(action, item, view):\n actors.remove(action.actor)\n self.remove(item)", "def cleanup(self):\n for child in self.children():\n child.deleteLater()", "def delete_event(self,widget=None):\n self.on_device_dialog_cancel_clicked()\n return True", "def actionGroup(self, QDesignerFormWindowManagerInterface_ActionGroup): # real signature unknown; restored from __doc__\n pass", "def on_groups_deleted(event):\n permission_backend = event.request.registry.permission\n\n for change in event.impacted_objects:\n group = change[\"old\"]\n bucket_id = event.payload[\"bucket_id\"]\n group_uri = utils.instance_uri(event.request, \"group\", bucket_id=bucket_id, id=group[\"id\"])\n\n permission_backend.remove_principal(group_uri)", "def childDidExit(self, processProtocol, reason):\n for child in self.children:\n if child.protocol == processProtocol:\n self.log.info(\n u\"Child process ({child.transport.pid}) exited: \"\n u\"{reason}\",\n child=child, reason=reason,\n )\n self.children.remove(child)\n break\n else:\n self.log.error(\n u\"No child for for process protocol\",\n processProtocol=processProtocol\n )\n\n try:\n self.dispatcher.removeSocket(processProtocol.inheritedSocket)\n except ValueError:\n self.log.error(\n u\"No socket found for process protocol\",\n processProtocol=processProtocol\n )", "def remove(self, child):\r\n if child and child.index is not None and id(self.children[child.index]) == id(child):\r\n stage = self.get_stage()\r\n if stage:\r\n stage._remove_id(child.get_id())\r\n stage._remove_name(child.get_name())\r\n\r\n Kinetic.Global._remove_temp_node(child)\r\n del self.children[child.index]\r\n self._set_children_indices()\r\n\r\n while child.children and len(child.children) > 0:\r\n child.remove(child.children)\r\n\r\n if hasattr(child, '_remove'):\r\n child._remove()\r\n return '%s.remove(%s);' %(self.name, child.name)", "def delete(self):\n\n # TODO find a way to remove this when sub-classing in HCRoot\n self.parent.del_child(self)", "def create_widget(self):\n self.widget = QCustomActionGroup(self.parent_widget())", "def remove_sizegroup(self, sizegroup):\n self.sizegroups.remove(sizegroup)\n self.emit('remove-sizegroup', sizegroup)", "def handleRemoveButtonClicked(self):\n with Tracer(traceLogger):\n # Figure out which dataset to remove\n rowsToDelete = set()\n selectedRanges = self.fileInfoTableWidget.selectedRanges()\n for rng in selectedRanges:\n for row in range(rng.topRow(), rng.bottomRow()+1):\n rowsToDelete.add(row)\n\n # Remove files in reverse order so we don't have to switch indexes as we go\n for row in sorted(rowsToDelete, reverse=True):\n # Remove from the GUI\n self.fileInfoTableWidget.removeRow(row)\n # Remove from the operator input\n finalSize = len(self.topLevelOperator.Dataset) - 1\n self.topLevelOperator.Dataset.removeSlot(row, finalSize)\n\n # The gui and the operator should be in sync\n assert self.fileInfoTableWidget.rowCount() == len(self.topLevelOperator.Dataset)", "def remove_child(self, child):\n if self.is_root:\n return\n self.children.remove(child)\n # pylint: disable=protected-access\n child.__parent = None", "def DeleteChildren(self, tree):\r\n\r\n for child in self._children:\r\n if tree:\r\n tree.SendDeleteEvent(child)\r\n\r\n child.DeleteChildren(tree)\r\n \r\n if child == tree._select_me:\r\n tree._select_me = None\r\n\r\n # We have to destroy the associated window\r\n wnd = child.GetWindow()\r\n if wnd:\r\n wnd.Destroy()\r\n child._wnd = None\r\n\r\n if child in tree._itemWithWindow:\r\n tree._itemWithWindow.remove(child)\r\n \r\n del child\r\n \r\n self._children = []", "def DebugMenuProviderMixin_on_destroy(self):\n ## Remove Debug Menu:\n curr_window = self.root_window\n curr_menubar = self.root_menu_bar\n curr_actions_dict = self.DebugMenuProviderMixin_actionsDict\n\n curr_menubar.removeAction(curr_actions_dict[self.top_level_menu_name])\n curr_window.ui.actionMenuDebug = None\n \n self.activeMenuReference.active_drivers_menu = None\n self.activeMenuReference.active_drivables_menu = None\n self.activeMenuReference.active_connections_menu = None\n \n # curr_window.ui.menus.global_window_menus.debug.actions_dict = {} # Empty the dict of actions\n self.DebugMenuProviderMixin_actionsDict = {}", "def remove_child(self, child: \"Node\") -> \"Node\":\n act = None\n for action, node in self.children.items():\n if node == child:\n child._parent = None\n act = action\n if act:\n del self.children[act]\n else:\n raise ValueError(\"The node does not have the given child node\")\n return self", "def Remove(self, event):\n pass", "def _itemRemoved(self, item):\n group = self.item()\n if group is None:\n return\n\n # Find item\n for row in self.children():\n if isinstance(row, Item3DRow) and row.item() is item:\n self.removeRow(row)\n break # Got it\n else:\n raise RuntimeError(\"Model does not correspond to scene content\")", "def remove_child(self, child):\n\n self._children.remove(child)", "def _delChild(self, child):\n try:\n self._getSubNsList().remove(child)\n except KeyError:\n # This shouldn't happen, but what do we do if it does?\n pass\n self._testKeySubNsDel()", "def child_added(self, child):\n super(QtToolButton, self).child_added(child)\n if isinstance(child, QtMenu):\n self.widget.setMenu(child.widget)", "def ChildrenClosing(self, item):\r\n\r\n if self._textCtrl != None and item != self._textCtrl.item() and self.IsDescendantOf(item, self._textCtrl.item()):\r\n self._textCtrl.StopEditing()\r\n \r\n if item != self._key_current and self.IsDescendantOf(item, self._key_current):\r\n self._key_current = None\r\n \r\n if self.IsDescendantOf(item, self._select_me):\r\n self._select_me = item\r\n \r\n if item != self._current and self.IsDescendantOf(item, self._current):\r\n self._current.SetHilight(False)\r\n self._current = None\r\n self._select_me = item", "def remove(self, widget, emit_signal=True):\n if widget not in self._selection:\n return\n\n refresh_selected_nodes(widget)\n\n self._selection.remove(widget)\n if emit_signal:\n self.selection_changed()", "def handle_remove(self):\r\n self.del_common()", "def removeChild(self, childRegion):\n self._children.remove(childRegion)\n self._zincRegion.removeChild(childRegion._zincRegion)\n childRegion._parent = None\n childRegion.freeContents()\n if childRegion._ancestorModelSourceCreated:\n self._reload()\n else:\n self._informRegionChange(True)", "def on_action_widget_close(self, value):\n _log.debug('widget_close %s', value)\n skip_undo = getattr(get_instance(value), 'view_skip_undo', False)\n # todo save settings and dock geometry for undo\n unique_id = self._widget_suspend(value, delete=True)\n if skip_undo:\n return None\n else:\n return [['registry/view/actions/!widget_open', unique_id],\n ['registry/view/actions/!widget_close', unique_id]]", "def removeChild(self, child):\n child.parents.remove(self)\n self.children.remove(child)", "def child_added(self, child):\n super(AbstractItemView, self).child_added(child)\n self.get_member(\"_items\").reset(self)", "def OnClose(self, event):\r\n\r\n if self._owner_mgr:\r\n self._owner_mgr.OnFloatingPaneClosed(self._pane_window, event)\r\n\r\n if not event.GetVeto():\r\n self._mgr.DetachPane(self._pane_window)\r\n\r\n if isinstance(self._pane_window, auibar.AuiToolBar):\r\n self._pane_window.SetAuiManager(self._owner_mgr)\r\n\r\n # if we do not do this, then we can crash...\r\n if self._owner_mgr and self._owner_mgr._action_window == self:\r\n self._owner_mgr._action_window = None\r\n\r\n self.Destroy()", "def DeleteChildren(self, item):\r\n\r\n self._dirty = True # do this first so stuff below doesn't cause flicker\r\n\r\n self.ChildrenClosing(item)\r\n item.DeleteChildren(self)", "def removeGroup(self, *args):\n return _libsbml.GroupsModelPlugin_removeGroup(self, *args)", "def ChildrenClosing(self, item):\r\n\r\n if self._textCtrl != None and item != self._textCtrl.item() and self.IsDescendantOf(item, self._textCtrl.item()):\r\n self._textCtrl.StopEditing()\r\n\r\n if self.IsDescendantOf(item, self._selectItem):\r\n self._selectItem = item\r\n \r\n if item != self._current and self.IsDescendantOf(item, self._current):\r\n self._current.SetHilight(False)\r\n self._current = None", "def remove_child(self, child):\r\n try:\r\n self._children.remove(child)\r\n except ValueError:\r\n #Don't care if it's not in the list\r\n pass", "def remove_group_bucket():\n pass", "def remove_child(self, child: \"AbstractNode\") -> None:\n self._children.remove(child)", "def removeChild(self, *args):\n return _libsbml.ASTNode_removeChild(self, *args)", "def _item_removed(self, item):\n item.unobserve('linkable_vars', self.root._update_linkable_vars)\n with item.suppress_notifications():\n del item.root\n del item.parent\n item.index = 0\n if isinstance(item, Sequence):\n item.unobserve('_last_index', self._item_last_index_updated)", "def _on_delete_plot_panel(self, event):\n # name = event.name\n caption = event.caption\n if self.cb_plotpanel is not None:\n pos = self.cb_plotpanel.FindString(str(caption))\n if pos != wx.NOT_FOUND:\n self.cb_plotpanel.Delete(pos)\n self.enable_append()", "def removeFromParentAndDelete(self):\n return _libsbml.Trigger_removeFromParentAndDelete(self)", "def remove(self):\n self.hide()\n self.deleteLater()", "def delete_child(self, val):\n del self._children[val]\n return val", "def on_deleteButton_clicked(self):\n itm = self.protocolHandlersList.selectedItems()[0]\n self.__manager.removeProtocolHandler(itm.text(0))\n \n self.protocolHandlersList.takeTopLevelItem(\n self.protocolHandlersList.indexOfTopLevelItem(itm))\n del itm", "def onRemove(self):\n # Ensure taht we can work\n plt = Plot.getPlot()\n if not plt:\n self.updateUI()\n return\n # Get again all the subwidgets (to avoid PySide Pitfalls)\n mw = self.getMainWindow()\n form = mw.findChild(QtGui.QWidget, \"TaskPanel\")\n form.axId = self.widget(QtGui.QSpinBox, \"axesIndex\")\n\n # Don't remove first axes\n if not form.axId.value():\n msg = QtGui.QApplication.translate(\n \"plot_console\",\n \"Axes 0 can not be deleted\",\n None,\n QtGui.QApplication.UnicodeUTF8)\n App.Console.PrintError(msg + \"\\n\")\n return\n # Remove axes\n ax = plt.axes\n ax.set_axis_off()\n plt.axesList.pop(form.axId.value())\n # Ensure that active axes is correct\n index = min(form.axId.value(), len(plt.axesList) - 1)\n form.axId.setValue(index)\n plt.update()", "def remove(self):\n\n\t\t\t\tself.parent.thing.remove_sheet(self.thing)\n\t\t\t\tdel self.parent[self.label]", "def post_security_group_delete(self, resource_id, resource_dict):\n pass", "def signal_handler(self,sig,data):\n self.resize_child_window()", "def delete_group(self, group):\n raise NotImplementedError('delete_group')", "def OnPaneDocked(self, event):\r\n\r\n event.Skip()\r\n self.RemoveAutoNBCaption(event.GetPane())", "def on_remove_clicked(self, control):\n\n # Recogemos el valor del ComboBox\n dniBaja = self.cbDniRemove.get_active_text()\n # Lo enviamos al metodo de la Clase MetodosBD\n MetodosBD.MetodosBD.borrar_vendedor(self, dniBaja)\n # Metodo que refresca los Combobox\n vendedores.actualizar_cmbDni(self)\n # Medoto que limpia las cajas de texto\n vendedores.on_limpiar(self, control)", "def remove_child_catalog(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinHierarchyDesignSession.remove_child_bin\n self._get_provider_session('catalog_hierarchy_design_session').remove_child_catalog(*args, **kwargs)", "def delete_child(self, model):\n assert isinstance(model, self.model_class) # it's a homogeneous collection\n m_id = str(model.get_id())\n assert m_id != None # needs a real id or cid\n assert m_id in self._models\n model._mark_deleted()\n del self._models[m_id]", "def after_remove(self, cls, *args, **kwargs):\n pass", "def remove_child(self, child: int = 0) -> None:\n logging.info(f\"Remove a node from the child node list. {self.desc}\")\n js = f\"\"\"var elm = document.querySelectorAll(\"{self.css}\")[{self.index}];\n elm.removeChild(elm.childNodes[{child}]);\"\"\"\n self._execute_javascript(js)", "def remove_button(self):\n self.scene.remove_child(self.toggle_button_el)", "def remove_child(self, child_id):\r\n self.children = [ c for c in self.children if c.id!= child_id ]", "def delete_challenge_groups_hook(*_, instance: Challenge, using, **__):\n try:\n instance.admins_group.delete(using=using)\n except ObjectDoesNotExist:\n pass\n\n try:\n instance.participants_group.delete(using=using)\n except ObjectDoesNotExist:\n pass", "def deleteModule(self):\n\n # delete the joint mover\n movers = self.returnJointMovers\n\n for moverGrp in movers:\n for mover in moverGrp:\n cmds.lockNode(mover, lock=False)\n\n cmds.delete(self.name + \"_mover_grp\")\n\n # remove the entry from the outliner\n index = self.rigUiInst.treeWidget.indexOfTopLevelItem(self.outlinerWidgets[self.name + \"_treeModule\"])\n self.rigUiInst.treeWidget.takeTopLevelItem(index)\n\n # remove the groupbox\n self.groupBox.setParent(None)\n\n # deal with mirror module\n networkNode = self.returnNetworkNode\n mirrorModule = cmds.getAttr(networkNode + \".mirrorModule\")\n if mirrorModule != None:\n if mirrorModule != \"None\":\n modules = utils.returnRigModules()\n for mod in modules:\n modName = cmds.getAttr(mod + \".moduleName\")\n if modName == mirrorModule:\n\n # set the mirrored version\n cmds.setAttr(mod + \".mirrorModule\", lock=False)\n cmds.setAttr(mod + \".mirrorModule\", \"None\", type=\"string\", lock=True)\n\n # get instance of mirror module's class\n modType = cmds.getAttr(mod + \".moduleType\")\n modName = cmds.getAttr(mod + \".moduleName\")\n module = __import__(\"RigModules.\" + modType, {}, {}, [modType])\n\n # get the class name from that module file (returns Modules.ART_Root.ART_Root for example)\n moduleClass = getattr(module, module.className)\n\n # find the instance of that module and call on the skeletonSettings_UI function\n moduleInst = moduleClass(self.rigUiInst, modName)\n\n # find the current groupBox for this module\n for i in range(self.rigUiInst.moduleSettingsLayout.count()):\n if type(self.rigUiInst.moduleSettingsLayout.itemAt(i).widget()) == QtWidgets.QGroupBox:\n if self.rigUiInst.moduleSettingsLayout.itemAt(i).widget().title() == modName:\n self.rigUiInst.moduleSettingsLayout.itemAt(i).widget().setParent(None)\n\n # relaunch the skeleton settings UI with new info\n moduleInst.skeletonSettings_UI(modName)\n\n # check for any attached modules\n attachedModules = self.checkForDependencies()\n elementList = []\n if len(attachedModules) > 0:\n\n for each in attachedModules:\n elementList.append([each[2], \" -> parent changed from: \", each[1], \" to: \", \"root\\n\"])\n cmds.parent(each[2] + \"_mover_grp\", \"root_mover\")\n cmds.setAttr(each[0] + \".parentModuleBone\", lock=False)\n cmds.setAttr(each[0] + \".parentModuleBone\", \"root\", type=\"string\", lock=True)\n each[3].currentParent.setText(\"root\")\n mover = \"root_mover\"\n\n # create the connection geo between the two\n childMover = utils.findOffsetMoverFromName(each[2])\n riggingUtils.createBoneConnection(mover, childMover, each[2])\n each[3].applyModuleChanges(each[3])\n cmds.select(clear=True)\n\n # remove the network node\n cmds.delete(networkNode)\n\n # delete scriptJob\n cmds.scriptJob(kill=self.scriptJob, force=True)\n self.updateBoneCount()\n self.rigUiInst.moduleInstances.remove(self)\n\n # warn user about changes\n if len(attachedModules) > 0:\n winParent = interfaceUtils.getMainWindow()\n win = interfaceUtils.DialogMessage(\"Attention!\",\n \"The following modules have had their parent changed\\\n due to the change in this module's structure:\",\n elementList, 5, winParent)\n win.show()", "def removeChild(self, edge):\n del self.child_edges[edge.getId()]", "def OnRemoveAutomation(self, event, automation):\n\n self.app.RemoveAutomation(automation)\n for child in self.GetChildren():\n child.Destroy()\n\n self.Draw()", "def pre_security_group_delete(self, resource_id):\n pass", "def RemoveChild(self, child, notify=1):\n self._children.remove(child)\n if notify:\n child.RemoveParent(self, notify=0)", "def test_delete_child(self):\r\n # Create 2 children of main course.\r\n resp_1 = self.create_xblock(display_name='child 1', category='chapter')\r\n resp_2 = self.create_xblock(display_name='child 2', category='chapter')\r\n chapter1_usage_key = self.response_usage_key(resp_1)\r\n chapter2_usage_key = self.response_usage_key(resp_2)\r\n\r\n course = self.get_item_from_modulestore(self.usage_key)\r\n self.assertIn(chapter1_usage_key, course.children)\r\n self.assertIn(chapter2_usage_key, course.children)\r\n\r\n # Remove one child from the course.\r\n resp = self.client.ajax_post(\r\n self.course_update_url,\r\n data={'children': [unicode(chapter2_usage_key)]}\r\n )\r\n self.assertEqual(resp.status_code, 200)\r\n\r\n # Verify that the child is removed.\r\n course = self.get_item_from_modulestore(self.usage_key)\r\n self.assertNotIn(chapter1_usage_key, course.children)\r\n self.assertIn(chapter2_usage_key, course.children)", "def _remove_child_element(self, index):\n del self._child_elements[index]", "def remove_row(self):\n if len(self.columns[\"rows\"].children) > 0:\n self.selects.pop()\n self.button_groups.pop()\n self.buttons[\"edit\"].pop()\n self.columns[\"rows\"].children.pop()", "def process_IN_DELETE(self, event):", "def remove(self, widget):\n self.widgets.remove(widget)\n widget.destroy()", "def remove_child(self, descendant):\n self.children.through.objects.get(\n parent=self, child=descendant).delete()", "def unregister_child(self, child, signal_store=None, greedy=False):\n # Mirror own signals by default\n if signal_store is None:\n signal_store = self\n\n for _, callback in signal_store.signal_callbacks:\n for signal, *_ in Signal.get_signals(callback):\n signal.remove_parent(child, self)\n\n # Unregister child's signals as well as parents\n if greedy:\n self.unregister_child(child, child)", "def on_leave(self):\r\n for widgets in self.timecompoundlist:\r\n for w in widgets:\r\n self.ids.inlayout.remove_widget(w)\r\n self.ids.inlayout.remove_widget(self.add_button)\r\n self.ids.inlayout.remove_widget(self.del_button)", "def onRemove(self):\n pass", "def onRemove(self):\n pass", "def remove_component(self, child):\n if (child.component_type in self._children and\n child is self._children[child.component_type]):\n child.parent = None\n del self._children[child.component_type]\n if (child.component_type in self._spoofed_children and\n child in self._spoofed_children[child.component_type]):\n self._spoofed_children[child.component_type].remove(child)\n child.parent = None\n self._remove_child_from_tag_table(child)\n return child", "def _on_remove_row(self, elements_to_remove: List[QObject]):\n\n for x in elements_to_remove:\n x.setParent(None)\n x.deleteLater()\n\n if not self.allow_repeats:\n self._remove_repeats()", "def remove_from_group(self, group):\n\n if self.in_group(group):\n self.secondary_groups.remove(group)\n return self", "def delete_parent(sender, instance, **kwargs):\n ItemRelation.objects.filter(child_id=instance.item_id).delete()", "def test_delete_group_log_context(self):\n self.group.delete_group.return_value = succeed('del')\n expected_lookup = (matches(IsBoundWith(base_log=True, effectful=True)),\n '00', 'g1')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n expected_lookup, self.group,\n fallback_dispatcher=get_log_dispatcher(self.log,\n {'effectful': True}))\n self.assertEqual(result, 'del')", "def XPDestroyWidget(inWidget, inDestroyChildren):\n pass", "def hide_action_bar(self):\n self.remove_widget(self.bottom_action_bar)" ]
[ "0.66755563", "0.6194727", "0.6103499", "0.59941256", "0.5964826", "0.59547776", "0.56917274", "0.5554345", "0.53270316", "0.5319716", "0.5318732", "0.53073436", "0.5299997", "0.52597994", "0.5187974", "0.51409185", "0.51390886", "0.5129079", "0.51237017", "0.5121978", "0.5089273", "0.5067556", "0.5055692", "0.5048465", "0.49946117", "0.49767175", "0.49679986", "0.49668556", "0.494119", "0.49375105", "0.49318773", "0.49207562", "0.49150807", "0.48899123", "0.48822305", "0.48767644", "0.4870438", "0.48678553", "0.48544258", "0.48517525", "0.48468593", "0.4846171", "0.48406035", "0.48372382", "0.48333153", "0.4812966", "0.4794791", "0.4786849", "0.47802112", "0.4779855", "0.47746396", "0.47677055", "0.475801", "0.47532982", "0.47519", "0.47420108", "0.4739004", "0.47225153", "0.4722335", "0.47180945", "0.47170544", "0.47122404", "0.470984", "0.47083372", "0.470509", "0.47018093", "0.4683476", "0.46668953", "0.4665162", "0.46575606", "0.4635551", "0.46318352", "0.46294734", "0.46264902", "0.46204552", "0.46204126", "0.46202222", "0.46143118", "0.46021307", "0.45970702", "0.45931825", "0.4591097", "0.45906913", "0.45846963", "0.45788854", "0.4575691", "0.4574287", "0.4574069", "0.45724905", "0.45679757", "0.45636418", "0.45521116", "0.45521116", "0.45483962", "0.4544712", "0.45354247", "0.45257688", "0.4523889", "0.45183855", "0.4518232" ]
0.8089256
0
Get the QAction children for this action group. Returns
def actions(self): isinst = isinstance return [c.widget for c in self.children() if isinst(c, QtAction)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getChildren(self):\n \n return self._children", "def children(self):\n \n return self._children", "def children(self):\n return self._children", "def children(self):\n return self._children", "def get_children(self):\n return self._children", "def get_children(self):\r\n return self._children", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def get_children(self):\r\n return self.children", "def GetChildren(self):\r\n\r\n return self._children", "def actions(self):\n return self._action_list", "def getChildren(self):\n return self.child_edges", "def actions(self):\n\n return self._actions.getSlice(0)", "def actions(self):\n\n return self._actions.getSlice(0)", "def get_childs(self):\n\t\treturn self.__childs", "def get_children(self):\n return self.items", "def actions(self):\n return self._actions", "def get_children(self):\n return self._routes.values()", "def children(self):\n return list(self._children)", "def children(self) -> List[str]:\n return self._children", "def get_all_children(self):\n return tuple(self.children)", "def getchildren(self):\n return self.root.getchildren()", "def getChildren(self):\n return []", "def getActions(self):\n actions = self.actions[:]\n return actions", "def get_children(self):\n return [node for node in self._children.values()]", "def actions(self):\r\n return self.puzzle.actions", "def get_list_of_actions(self):\n return self.actions", "def get_actions(self):\n actions = []\n for section in self._sections:\n for (sec, action) in self._actions:\n if sec == section:\n actions.append(action)\n\n actions.append(MENU_SEPARATOR)\n return actions", "def child_views(self):\n return self.children", "def get_children(self):\n\n return self._children.copy()", "def get_child_descriptors(self):\r\n return self.descriptor.get_children()", "def get_children(self):\n return self.children", "def get_children(self):\n\n pass", "def children(self):\n return tuple(getattr(self, i) for i in self._traversable)", "def descendants(self):\n for child in self.children:\n yield child\n if isinstance(child, LoggedAction):\n for descendant in child.descendants():\n yield descendant", "def actions(self):\r\n return actions.Actions(self)", "def get_children(self):\n return []", "def children(self):\n return self.hashring_watch.get_children()", "def getChildren():", "def children(self):\n return self.contents", "def children(self):\n return self.contents", "def children_list(self):\n return [\n # self.notify,\n # self.snap_multiplier,\n # self.range_low, self.range_high,\n # self.activity_threshold\n ]", "def children(self) -> List[Plugin]:\n raise NotImplementedError()", "def getActions():\n return getPlugins(IRenamingAction, plugins)", "def fm_all_children(self):\n return self._relation_lst[self.CHILD].copy()", "def get_actions(self):\n return self.agent.get_actions()", "def get_actions(self):\n return []", "def get_children(self, refobj):\n children = cmds.listConnections(\"%s.children\" % refobj, d=False)\n if not children:\n children = []\n return children", "def children(self) -> dict:\n if self._children is None:\n self._children = OrderedDict()\n\n return self._children", "def children(self):\n return [self.cut]", "def children(self):\n if self._children is None:\n return set()\n else:\n return self._children", "def get_plugin_actions(self):\n return []", "def get_plugin_actions(self):\n return []", "def _get_items(self):\n isinst = isinstance\n allowed = (Action, ActionGroup, Menu)\n items = (child for child in self.children if isinst(child, allowed))\n return tuple(items)", "def get_children(self):\n raise NotImplementedError()", "def _generate_children(self) -> list:\n if self.debug: print(f\"StateNode._generate_children()\")\n return [self.transition(x) for x in self.actions()]", "def children_ids(self):\n return self._children_ids", "def actions(self) -> Sequence[_A_out]:\n return self._actions", "def nav_children(self):\r\n return list(self.get_children().filter(show_in_nav=True))", "def children(self) -> list[set[\"HierarchicalCategory\"]]:\n return self.categorization.children(self)", "def actions(self):\n actions = []\n\n for name, item in self._definition.get('actions', {}).items():\n name = self._get_name('action', name)\n actions.append(Action(name, item, self._resource_defs))\n\n return actions", "def children(self) -> List[Region]:\n return self._children", "def all_actions(self):\n actions = self.actions.stream[:]\n for eq in self.equipment:\n actions.extend(eq.actions)\n return actions", "def get_available_actions(self):\n return self.actions", "def actions(self):\r\n return Actions(self)", "def get_children(self, item, level):\n return item.children", "def partition_actions(self):\n\n return self._partition_actions", "def make_children(self):\n\t\tchildren = []\n\t\tfor action in self.observations:\n\t\t\tchildren.append(\"empty\")\n\n\t\treturn children", "def _get_actions(self):\n return self.__actions", "def _get_actions(self):\n return self.__actions", "def _get_actions(self):\n return self.__actions", "def children(self):\n return {x[1] for x in self.outgoing}", "def get_node_children(self, node):\n return node.children", "def children(self) -> Iterable[Heirarchical]:\n return []", "def getChildren(self):\n return self.directories.values()", "def actions(self):\n from moztrap.view.lists.actions import actions\n return actions", "def get_children(self):\r\n\r\n if not self.has_children:\r\n return []\r\n\r\n if getattr(self, '_child_instances', None) is None:\r\n self._child_instances = [] # pylint: disable=attribute-defined-outside-init\r\n for child_loc in self.children:\r\n try:\r\n child = self.runtime.get_block(child_loc)\r\n child.runtime.export_fs = self.runtime.export_fs\r\n except ItemNotFoundError:\r\n log.exception(u'Unable to load item {loc}, skipping'.format(loc=child_loc))\r\n continue\r\n self._child_instances.append(child)\r\n\r\n return self._child_instances", "def children(self): # noqa: ANN201", "def children_data(self) -> List[CatalogDataCategoryTreeInterface]:\n return self._children_data", "def actions(self) -> List['outputs.PreventionJobTriggerInspectJobAction']:\n return pulumi.get(self, \"actions\")", "def get_child_descriptors(self):\r\n if self.child_descriptor is None:\r\n return []\r\n\r\n return [self.child_descriptor]", "def get_children_elements(self):\n\n pass", "def get_children(self):\n std = self._std\n bld = self._bld\n cls = self.__class__\n\n root = self.get_sobj()\n cit = std.NewChildIterator(root)\n cit.InitEx(0)\n\n children = []\n while cit.More():\n node = cls(std, bld, cit.Value().GetID(), self)\n if node.is_alive():\n children.append(node)\n cit.Next()\n return children", "def get_all_inputs(self):\n def get_inputs_recurently(action, actions):\n for child in action.get_direct_inputs():\n if child not in actions:\n actions += [child]\n if child.get_direct_inputs():\n get_inputs_recurently(child, actions)\n\n actions = []\n get_inputs_recurently(self, actions)\n return actions", "def getAllRobotActions(self):\n return self.robot.actions", "def children(node):\n\n return snd(node)", "def children(self, i):\n if i < 0:\n raise IndexError()\n return self._children[i]", "def get_header_context_menu_actions(self):\n return self._horizontal_header().actions()", "def get_children_queryset(self):\n pass", "def getAllChildren(self):\n \n l = []\n for child in self._children:\n l.append(child)\n l.extend(child.getAllChildren())\n \n return l", "def buttons(self):\n return self._buttons", "def buttons(self):\n return self._buttons", "def buttons(self):\n return self._buttons", "def get_child_resource_nodes(self):\n raise errors.Unimplemented()", "def get_array_of_children(self):\n children = [self.posXposYposZ,self.posXposYnegZ,self.posXnegYposZ,self.posXposYnegZ,self.negXposYposZ,self.negXposYnegZ,self.negXnegYposZ,self.negXnegYnegZ ] \n return children", "def current_container_children(self):\n # noinspection PyProtectedMember\n return self.current_container._all_children", "def get_children(self):\r\n\r\n # FIXME: Expose iteration from CIndex, PR6125.\r\n def visitor(child, parent, children):\r\n # FIXME: Document this assertion in API.\r\n # FIXME: There should just be an isNull method.\r\n assert child != conf.lib.clang_getNullCursor()\r\n\r\n # Create reference to TU so it isn't GC'd before Cursor.\r\n child._tu = self._tu\r\n children.append(child)\r\n return 1 # continue\r\n children = []\r\n conf.lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor),\r\n children)\r\n return iter(children)", "def action_templates(self) -> List[ActionTemplate]:\n return self._action_templates", "def get_display_items(self):\r\n items = []\r\n for child in self.get_children():\r\n items.extend(child.displayable_items())\r\n\r\n return items" ]
[ "0.68728095", "0.67529786", "0.66481596", "0.66481596", "0.65832484", "0.65597486", "0.65548855", "0.65548855", "0.65548855", "0.6481813", "0.64334065", "0.6417876", "0.6396768", "0.63655233", "0.63655233", "0.63300586", "0.63013625", "0.62966573", "0.62869656", "0.6238286", "0.6232387", "0.62233764", "0.6202355", "0.62009573", "0.6183358", "0.61517256", "0.6126485", "0.61223686", "0.61213213", "0.6086075", "0.60662514", "0.6041014", "0.6035981", "0.60132635", "0.6004226", "0.6000694", "0.59648514", "0.5927546", "0.5880087", "0.5853612", "0.58337814", "0.58337814", "0.58320487", "0.5831133", "0.5824669", "0.58216554", "0.58213633", "0.5800758", "0.57979155", "0.5794912", "0.578233", "0.5768574", "0.57646525", "0.57646525", "0.57400405", "0.57349074", "0.5700428", "0.56974775", "0.5691071", "0.5678353", "0.56766737", "0.56703115", "0.56305355", "0.5602183", "0.55939364", "0.55649865", "0.5562389", "0.55510443", "0.5546644", "0.5539827", "0.5539827", "0.5539827", "0.55074924", "0.54854894", "0.5456218", "0.54559094", "0.5438206", "0.5432096", "0.5430591", "0.54298407", "0.5408276", "0.5406794", "0.5397762", "0.53849244", "0.5372046", "0.5368236", "0.5365482", "0.5359254", "0.5358671", "0.5340753", "0.53293395", "0.53186345", "0.53186345", "0.53186345", "0.53145534", "0.53045714", "0.5288156", "0.52870035", "0.5282462", "0.5278252" ]
0.72453445
0
Set the exclusive state of the underlying control.
def set_exclusive(self, exclusive): self.widget.setExclusive(exclusive)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setExclusive(self, exclusive):\n super(QCustomActionGroup, self).setExclusive(exclusive)\n if exclusive:\n last = self._last_checked\n if last is not None:\n last.setChecked(True)\n for action in self.actions():\n if action is not last:\n action.setChecked(False)", "def set_exclusive_mouse(self, exclusive):\n super(Window, self).set_exclusive_mouse(exclusive)\n self.exclusive = exclusive", "def setReadOnly(self, state: bool) -> None:\n ...", "def setLocked( self, state = True ):\n self._xLocked = state\n self._yLocked = state", "def disable(self):\n self.colour_combo.config(state=tk.DISABLED)\n self.game_name_entry.config(state=tk.DISABLED)\n self.num_tickets_entry.config(state=tk.DISABLED)", "def disable(self):\r\n self.update(enabled=False)", "def setXLocked( self, state = True ):\n self._xLocked = state", "def setDisableWithLayer( self, state ):\n self._disableWithLayer = state\n self.setDirty()", "def set_exclusive_active(self, name):\n self.set_all_inactive()\n self.set_active(name)", "def disable(self):\n self.SetInteractive(0)", "def _disable(self):\n self.enabled = False", "def reenable(*args):\n self.controls.disabled = False\n self.disabled = False", "def toggle(self):\r\n self._variable.set(not self._variable.get()) \r\n self._activate()", "def set_disabled(self, val):\n self._disabled = val", "def set_disabled(self, val):\n self._disabled = val", "def set_state( self ):", "def toggle(self):\n self.checked = not self.checked\n if self.command:\n self.command(self.name)", "def set_locked(self, *args):\n return _ida_hexrays.vdui_t_set_locked(self, *args)", "def __disableControls(self):\n self.ignoreAll()", "def _enable_disable_gui(self, state):\r\n self.mainWidget.standbyPushButton.setDisabled(state)\r\n self.mainWidget.eventComboBox.setDisabled(state)\r\n self.mainWidget.roomComboBox.setDisabled(state)\r\n self.mainWidget.dateComboBox.setDisabled(state)\r\n self.mainWidget.talkComboBox.setDisabled(state)\r\n self.mainWidget.audioFeedbackCheckbox.setDisabled(state)", "def toggle_selected(self):\n\n self._selected = not self._selected", "def _onCheckBox(self, widget):\n widget.setStateCheck(not widget.getStateCheck())", "def disable_setup(self):\n self.high_ver_entry.config(state=\"disabled\")\n self.low_ver_entry.config(state=\"disabled\")\n self.left_hor_entry.config(state=\"disabled\")\n self.right_hor_entry.config(state=\"disabled\")", "def setYLocked( self, state = True ):\n self._yLocked = state", "def toggle(self):\n self._variable.set(not self._variable.get())\n self._activate()", "def toggle(self):\n if self.is_enabled:\n self.disable()\n else:\n self.enable()", "def ToggleLock(self, event):\n pass", "def toggle(self):\n self.open = not self.open", "def toggle(self):\n if self._state in [STATE_OFF, STATE_IDLE, STATE_STANDBY]:\n self._state = STATE_ON\n else:\n self._state = STATE_OFF", "def disable(self):\n self.enabled = False", "def set_disabled_switch(self, disabled):\n self.disabled = disabled", "def __setstate__(self, state):\n return None", "def set_status(self, locked=None, exclusive=None):\n self.locked = locked\n self.exclusive = exclusive", "def ToggleSpinner(event, state, widget):\n if state == True:\n widget.Enable()\n else:\n widget.Disable()\n event.Skip()", "def set_Off(self):\n if not(self._locked):\n self.__dict__['statusOn']=False\n self._undo_action()\n else:\n self._log.info('The JobProperty %s is blocked', self.__name__)", "def disable(self):\n self.enabled = False\n self.__store(self)", "def readonly(self, readonly):\n\n self._readonly = readonly", "def readonly(self, readonly):\n\n self._readonly = readonly", "def disable(self):\n self._enabled = False", "def lockSliderPanel(self, flag): \n\t\tself.doLockSliderPanel = flag", "def _disabled(self, *args, **kwargs):\n raise TypeError(\"'%s' does not support mutable operations.\" %\n self.__class__)", "def _disabled(self, *args, **kwargs):\n raise TypeError(\"'%s' does not support mutable operations.\" %\n self.__class__.__name__)", "def set_state(self, state):\n self.state = state\n self.config(fill=self.state)", "def toggle(self, **kwargs):\n self.on = False if self.on else True", "def disabled(self, disabled):\n self._disabled = disabled", "def disable(self):", "def toggleEnabled(self, guiElement):\n enabled = guiElement.isEnabled()\n guiElement.setEnabled(not enabled)", "def setReadOnly(self, readOnly):\n try:\n self._editor.setReadOnly(readOnly)\n except AttributeError:\n self._editor.setEditable(not readOnly)\n self.setPickerEnabled(not readOnly)", "def set_inactive(self):\n self.active = False", "def disableEditing(self, disable):\n self.disabled = disable", "def set_disabled(self, disabled):\n if disabled:\n self.__button_new_game.configure(state=DISABLED, text=\"Playing...\")\n else:\n self.__button_new_game.configure(state=ACTIVE, text=\"New Game\")", "def setEnabled(self, boo):\n if boo:\n self.mousePressEvent = self.mousePressEventEnabled\n self.mouseMoveEvent = self.mouseMoveEventEnabled\n self.mouseReleaseEvent = self.mouseReleaseEventEnabled\n else:\n self.mousePressEvent = self.notEnabledDummy\n self.mouseMoveEvent = self.notEnabledDummy\n self.mouseReleaseEvent = self.notEnabledDummy", "def React(self):\n ##Disable DesktopMode if Xplorer & Conductor == False\n #self.state.React(self.state.GetSurface(\"Xplorer\") == False and\n # self.state.GetSurface(\"Conductor\") == False,\n # \"DesktopMode\", False)\n if self.state.GetSurface(\"DesktopMode\"):\n self.rbXplorer.SetSelection(0)\n \n \n self.UpdateDisplay()\n return", "def toggle(self) -> None:\n ...", "def disable(self) -> None:", "def toggle(self) -> None:", "def toggle(self) -> None:", "def lock_gate(self):\n self.fsm_gate.clear()", "def standby(self):\n self._state = STATE_STANDBY", "def standby(self):\n self._state = STATE_STANDBY", "def reset(self):\n if self.right_frame.current:\n self.right_frame.setEnabled(True)\n self.right_frame.set_current(self.right_frame.current)\n else:\n self.right_frame.setEnabled(False)", "def setInverted(self, state=True):\n self.__inverted = state", "def set_disable(self, btn, state):\n if self._disabled_buttons is None:\n self._disabled_buttons = {}\n self._disabled_buttons[btn] = state", "def disable(self, index):\n self._action(index, StateVariable.enable, missingok=False, value=False)", "async def disable(self, **kwargs) -> None: # pylint: disable=unused-argument\r\n await self.set_ena(False)", "def enable(self):\n self.colour_combo.config(state=tk.NORMAL)\n self.game_name_entry.config(state=tk.NORMAL)\n self.num_tickets_entry.config(state=tk.NORMAL)", "def __setState(self, widget):\n\n import wx\n import fsleyes_widgets.bitmaptoggle as bmptoggle\n\n if isinstance(widget, wx.MenuItem):\n widget.Check(self.toggled)\n elif isinstance(widget, (wx.CheckBox,\n wx.ToggleButton,\n bmptoggle.BitmapToggleButton)):\n widget.SetValue(self.toggled)", "def action_lock(self):\n self.state = 'locked'", "def setIgnitorState(*args):\n args[0].Controls.IgnitorState.ignitor_state = args[1]", "def entryToggle(self):\n status = \"normal\" if self.optionVar.get() == 4 else \"disabled\"\n for i in range(3):\n self.entry[i].configure(state=status)", "def disable(self):\n if self.active != DISABLED:\n self.uimanager.remove_ui(self.active)\n self.uimanager.remove_action_group(self.action_group)\n self.active = DISABLED", "def toggleShowOnlySelection(self):\r\n\t\tself.showOnlySelection = not self.showOnlySelection", "def set_state(self, state: int):", "def set_control(self, value):\n self.control = value", "def disable(self):\n logging.debug(\"Disabling switch %s\" % self.name)\n self.disabled = True", "def set_state(self, state):\n if self.state == CHANNEL_MOVE_STATE_NONE:\n self.state = state", "def unlock_control(self):\n raise NotImplementedError('PlatformService: Implementation incomplete')", "def disabled(self, disabled):\n\n self._disabled = disabled", "def cambiar_celeste(self):\r\n self.celeste.setDisabled(True)", "def setModifyState(self, bool):\n self._canShowModRect = bool\n if bool == False:\n self._modRect.hide()", "def deSelect(self):\n for i in range(len(self.__controlsChecks)):\n self.__controlsChecks[i].setChecked(False)", "def disable(self):\n super().disable()", "def state_locked_changed(self, state):\n self.door_interlock_state = state\n self.get_state()", "def _on_click(self, _):\n self.value = not self.value", "def setCheckBoxState( self, cCtrlName, nState ):\n oControl = self.getControl( cCtrlName )\n oControl.setState( nState )", "def _reset(self):\n self._click()\n if self._touch is None:\n self._state = STATE_INACTIVE", "def SetToggle(self, flag):\n\n self.up = not flag\n self.Refresh()", "def set_state(self):\n self.able = not self.able\n self.save()", "def state_not_changed(self, curstate, event, *args, **kwargs):", "def state_not_changed(self, curstate, event, *args, **kwargs):", "def disable(self):\n raise NotImplementedError", "def cambiar_verde(self):\r\n self.morado.setDisabled(True)", "def lock (self):\n self.locked = True\n self._changed = False", "def rc_set_toggle(self,rc_field,value=None):\n\n rc_val = getattr(self.rc,rc_field)\n if value is None:\n value = not rc_val\n setattr(self.rc,rc_field,value)", "def setUIChanged(self, value):\r\n with self.__uiChangedLock:\r\n self.__uiChanged = value", "def disableViewStateSync(self):\n self._updateViewStateSync(False)", "def disable(self):\n self.error_code = 'DISABLED'\n self.running = False", "def deactivate(widg, self):\n widg.set_sensitive(False)", "def disable(self):\n pass", "def pause(self):\n self.entry['state']=DGG.DISABLED\n self.ignoreAll()" ]
[ "0.6620294", "0.6611052", "0.65280485", "0.6495654", "0.6394166", "0.6288686", "0.62424856", "0.6240522", "0.61622685", "0.61020094", "0.6073353", "0.60687214", "0.604627", "0.5999525", "0.5999525", "0.59711045", "0.5961201", "0.5957905", "0.5943796", "0.59436667", "0.59285986", "0.5921416", "0.5919099", "0.5909097", "0.58939576", "0.5887179", "0.5882491", "0.5844941", "0.58358604", "0.58348805", "0.58251935", "0.57993853", "0.5787081", "0.5757152", "0.5754039", "0.57423437", "0.5738964", "0.5738964", "0.5735775", "0.5735368", "0.5730062", "0.5730022", "0.5689695", "0.56652373", "0.5662966", "0.5656633", "0.5650549", "0.5643232", "0.56336886", "0.5633552", "0.5628906", "0.56284153", "0.5627384", "0.56238604", "0.56180567", "0.5609217", "0.5609217", "0.5609052", "0.5590918", "0.5590918", "0.5586112", "0.558375", "0.5583679", "0.557544", "0.55753547", "0.55612475", "0.5561002", "0.55447865", "0.5543869", "0.5535502", "0.5535464", "0.55315626", "0.5527926", "0.55268145", "0.5521009", "0.55161846", "0.551584", "0.55154896", "0.5510223", "0.5507668", "0.55035466", "0.55006576", "0.5499652", "0.54921716", "0.54896724", "0.54788834", "0.5477662", "0.5469894", "0.54684526", "0.54684526", "0.5456836", "0.5456089", "0.54514706", "0.54503584", "0.54463214", "0.5440015", "0.54358864", "0.5407035", "0.53992563", "0.5392054" ]
0.7793201
0
Set the enabled state of the underlying control.
def set_enabled(self, enabled): self.widget.setEnabled(enabled)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setEnabled( self, cCtrlName, bEnabled=True ):\n self.setControlModelProperty( cCtrlName, \"Enabled\", bEnabled )", "def setEnabled(self, enable: bool) -> None:\n self.enabled = ...", "def set_enabled(self, enabled=True):\n self._enabled = enabled", "def enable(self):\r\n self.update(enabled=True)", "def enable(self):\n self.enabled = True", "def enable(self):\n self.enabled = True", "def enable(self):\n self._enabled = True", "def enabled(self, enabled):\n \n self._enabled = enabled", "def enable(self):\n self.switch.enable()\n self._enabled = True", "def enabled(self, enabled):\n\n self._enabled = enabled", "def enabled(self, enabled):\n\n self._enabled = enabled", "def enabled(self, enabled: bool):\n\n self._enabled = enabled", "def enabled(self, enabled: bool):\n\n self._enabled = enabled", "def set_enabled(self, newval):\n rest_val = \"1\" if newval > 0 else \"0\"\n return self._setAttr(\"enabled\", rest_val)", "def enable(self):\n self.colour_combo.config(state=tk.NORMAL)\n self.game_name_entry.config(state=tk.NORMAL)\n self.num_tickets_entry.config(state=tk.NORMAL)", "def setEnabled(self, enabled):\n def do(toUpdateList):\n self.enabled = enabled\n self.actions.addAction(do)", "def setEnabled(self, enable):\n self.advancedWidget1D.setEnabled(enable)\n self.advancedWidget2D.setEnabled(enable)\n self.radialRange1D.setEnabled(enable)\n self.radialRange2D.setEnabled(enable)\n self.azimuthalRange2D.setEnabled(enable)\n self.ui.integrate1D.setEnabled(enable)\n self.ui.integrate2D.setEnabled(enable)\n self.ui.advanced1D.setEnabled(enable)\n self.ui.advanced2D.setEnabled(enable)", "def set_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlVmDev_SetEnabled', self.handle, bEnabled)", "def reenable(*args):\n self.controls.disabled = False\n self.disabled = False", "def set_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlVirtNet_SetEnabled', self.handle, bEnabled)", "def set_disabled_switch(self, disabled):\n self.disabled = disabled", "def setPickerEnabled(self, enable):\n self.__button.setEnabled(enable)", "def setEditButtonEnabled(self, enabled):\n self.editCurrentConfigButton.setEnabled(enabled)", "async def set_enabled(self, enabled: bool) -> None:\n return await self.api.set_enabled(enabled)", "def shell_enabled_changed(self, enabled):\n self.set_enabled(enabled)", "def toggleEnabled(self, guiElement):\n enabled = guiElement.isEnabled()\n guiElement.setEnabled(not enabled)", "def enable_controls(self, boolean):\n\n self.enabled = boolean\n self.pageNumber.set_sensitive(boolean)\n self.pagePart.set_sensitive(boolean)\n self.btPagePrev.set_sensitive(boolean)\n self.btSubPagePrev.set_sensitive(boolean)\n self.btSubPageNext.set_sensitive(boolean)\n self.btPageNext.set_sensitive(boolean)", "def set_step_enabled(self, enabled):\r\n self.pushButton.setEnabled(enabled)", "def set_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlShare_SetEnabled', self.handle, bEnabled)", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def set_disabled(self, val):\n self._disabled = val", "def set_disabled(self, val):\n self._disabled = val", "def changeEnabled(self, val):\n logging.debug(\"Changing enabled to \" + str(val))\n self.filesList.setEnabled(val)\n self.tabArea.setEnabled(val)\n self.actionRemove.setEnabled(val)\n self.actionSave.setEnabled(val)", "def setEnabled(self, *args):\n return _libsbml.SBMLExtension_setEnabled(self, *args)", "def enabled(self, enabled: ConfigNodePropertyBoolean):\n\n self._enabled = enabled", "def setEnabled(self, boo):\n if boo:\n self.mousePressEvent = self.mousePressEventEnabled\n self.mouseMoveEvent = self.mouseMoveEventEnabled\n self.mouseReleaseEvent = self.mouseReleaseEventEnabled\n else:\n self.mousePressEvent = self.notEnabledDummy\n self.mouseMoveEvent = self.notEnabledDummy\n self.mouseReleaseEvent = self.notEnabledDummy", "def enable_motor(self, enabled):\r\n self.enabled = enabled\r\n\r\n # Set motors in neutral if disabling.\r\n if not self.enabled:\r\n self.set_neutral()", "def set_enabledAtPowerOn(self, newval):\n rest_val = \"1\" if newval > 0 else \"0\"\n return self._setAttr(\"enabledAtPowerOn\", rest_val)", "def enable(self, enable):\n\n self._enable = enable", "def toggle(self):\n if self.is_enabled:\n self.disable()\n else:\n self.enable()", "def disabled(self, disabled):\n self._disabled = disabled", "def enabled(self, enabled):\n if not isinstance(enabled, bool):\n raise ValidationError(\"Enabled must be a bool, not a %s.\" % type(enabled).__name__)\n else:\n self._set('enabled', enabled)", "def _set_pool_enabled(self, enabled):\n if not isinstance(enabled, bool):\n raise AttributeError(\"The enabled value should be True or False.\")\n self.pooling_enabled = enabled", "def setEnabled(self, status):\r\n self._status = status\r\n\r\n if status:\r\n self._start()\r\n else:\r\n self._stop()\r\n\r\n for cb in self._statusListener:\r\n cb(self, status)", "def setDisableWithLayer( self, state ):\n self._disableWithLayer = state\n self.setDirty()", "def disabled(self, disabled):\n\n self._disabled = disabled", "def enable(self):\n self.enabled = True\n for child in self.children:\n child.enable()", "def enable_button(self, index):\n if index != 0:\n self.roll_dem_bones.setEnabled(True)", "def EnableTool(self, tool_id, state):\r\n\r\n tool = self.FindTool(tool_id)\r\n\r\n if tool:\r\n \r\n if state == True:\r\n tool.state &= ~AUI_BUTTON_STATE_DISABLED\r\n else:\r\n tool.state |= AUI_BUTTON_STATE_DISABLED", "def enableCheckBoxTriState( self, cCtrlName, bTriStateEnable ):\n oControl = self.getControl( cCtrlName )\n oControl.enableTriState( bTriStateEnable )", "def Enable(self, flag=True):\n \n for c in self.choices:\n c.Enable(flag)", "def SetWindowEnabled(self, enable=True):\r\n\r\n if not self._wnd:\r\n raise Exception(\"\\nERROR: This Item Has No Window Associated\")\r\n\r\n self._windowenabled = enable\r\n self._wnd.Enable(enable)", "def turnButtonsOn(self, enabled):\n if enabled:\n #connecting iface signals\n self.iface.currentLayerChanged.connect(self.acquire)\n for button in self.buttons:\n #disconnecting the clicked signal\n button.clicked.disconnect(self.reclassify)\n #changing button behavior\n button.setCheckable(True)\n else:\n #disconnecting iface signals\n self.disconnectLayerSignals()\n try:self.iface.currentLayerChanged.disconnect(self.acquire)\n except:pass\n for button in self.buttons:\n #connecting the clicked signal\n button.clicked.connect(self.reclassify)\n #changing button behavior\n button.setCheckable(False)", "def enabled(self):\n raise NotImplementedError", "def enable(self):\n pass", "def enable(self):\n for val in data:\n val.enable()\n self.enabled = True", "def set_disable(self, btn, state):\n if self._disabled_buttons is None:\n self._disabled_buttons = {}\n self._disabled_buttons[btn] = state", "def _disable(self):\n self.enabled = False", "def enable(self):\n self.SetInteractive(1)", "def set_protection_enabled(self, c, state):\n self.enable_protection = state", "def enable(self):\n raise NotImplementedError", "def set_disabled(self, disabled):\n if disabled:\n self.__button_new_game.configure(state=DISABLED, text=\"Playing...\")\n else:\n self.__button_new_game.configure(state=ACTIVE, text=\"New Game\")", "def setActivated(self, activated):\r\n \r\n self.widget.setEnabled(activated)\r\n if activated:\r\n self.forwardAction.setEnabled(not activated)\r\n self.backwardAction.setEnabled(not activated)\r\n self.parentDirectoryAction.setEnabled(not activated)\r\n self.refreshAction.setEnabled(activated)\r\n else:\r\n self.forwardAction.setEnabled(activated)\r\n self.backwardAction.setEnabled(activated)\r\n self.parentDirectoryAction.setEnabled(activated)\r\n self.refreshAction.setEnabled(activated)", "def enable_btns(self):\n self.saveBtn.setEnabled(True)\n self.openVideoBtn.setEnabled(True)\n self.openAnnotationBtn.setEnabled(True)\n self.resetBtn.setEnabled(True)\n self.speedCombo.setEnabled(True)\n self.newFileBtn.setEnabled(True)\n self.HelpBtn.setEnabled(True)", "def enable(self, *args, **kwargs):\n pass", "def disable(self):\n self.colour_combo.config(state=tk.DISABLED)\n self.game_name_entry.config(state=tk.DISABLED)\n self.num_tickets_entry.config(state=tk.DISABLED)", "def enable(self, subsystem=False):\n self.__dict__[\"enabled\"] = True\n\n if subsystem:\n self.subsystem.enable()", "def add_option_enable(self):\n logger.debug(\"Adding enable option\")\n chkenable = ttk.Checkbutton(self.optsframe,\n variable=self.vars[\"enabled\"],\n text=\"Enable {}\".format(self.tabname),\n command=self.on_chkenable_change)\n chkenable.pack(side=tk.RIGHT, padx=5, anchor=tk.W)\n Tooltip(chkenable,\n text=\"Enable or disable {} display\".format(self.tabname),\n wraplength=200)", "def _set_action_enabled(self, action, index):\n action.setEnabled(index.flags() & QtCore.Qt.ItemIsEnabled)", "def enable(widget_list: list) -> None:\r\n\r\n for widget in widget_list:\r\n widget.configure(state='normal')", "def disable(self):\n logging.debug(\"Disabling switch %s\" % self.name)\n self.disabled = True", "async def enable(self, **kwargs) -> None: # pylint: disable=unused-argument\r\n await self.set_ena(True)", "def set_device_menus_enabled(self, enabled):\n # self.save_image.setEnabled(enabled)\n self.format_combo.setEnabled(enabled)\n self.resolution_combo.setEnabled(enabled)\n self.fps_combo.setEnabled(enabled)\n\n # self.recording_action.setEnabled(enabled)", "def enable(self) -> bool:\n return self._enable", "def disable(self):\r\n self.update(enabled=False)", "def _enable_disable_gui(self, state):\r\n self.mainWidget.standbyPushButton.setDisabled(state)\r\n self.mainWidget.eventComboBox.setDisabled(state)\r\n self.mainWidget.roomComboBox.setDisabled(state)\r\n self.mainWidget.dateComboBox.setDisabled(state)\r\n self.mainWidget.talkComboBox.setDisabled(state)\r\n self.mainWidget.audioFeedbackCheckbox.setDisabled(state)", "def testSetEnabled(self):\n self.mgr.setGoProEnabled(True)\n self.assertTrue(self.mgr.enabled)\n self.mockWrite.assert_called_with(\"GoProEnabled\", \"1\")\n self.mgr.setGimbalEnabledParam.assert_called_with()", "def setEnabled(self, *args):\n return _libsbml.SBMLExtensionRegistry_setEnabled(self, *args)", "def enableEditorsCheckFocusIn(self, enabled):\n self.editorsCheckFocusIn = enabled", "def set_enable(self, enable):\n\n with AutoUpdater._lock:\n if isinstance(enable, Bus):\n AutoUpdater.remove_link(self._enable)\n AutoUpdater.add_link(\n enable,\n self._enable)\n else:\n raise ValueError(\n \"ERROR: Invalid Enable input. Enable must be a \"\n \"1-bit Bus or a Connector.\")", "def __set__(self, obj, enabled):\n\n value = getattr(obj, self.base_attr)\n if enabled:\n value |= self.bitmask\n else:\n value &= ~self.bitmask\n setattr(obj, self.base_attr, value)", "def setAutomaticMode(self, enabling: bool) -> None:\n ...", "def EnableItem(self, n, flag=True):\n \n self.choices[n].Enable(flag)", "def enable(self) -> None:", "def disable(self):\n self.enabled = False", "def set_hardware_control(self, value):\n self.widgets['hardware_control'].setChecked(value)\n self._under_hardware_control = value", "def signal_enabled(self, signal_enabled):\n\n self._signal_enabled = signal_enabled", "def turn_on_buttons(self):\n self.edit_button.setEnabled(True)\n self.delete_button.setEnabled(True)", "def enable(self, delay=False) -> None:\n self.enabled = True\n if self.delayed == False:\n self.delayed = delay" ]
[ "0.7554714", "0.7493753", "0.739995", "0.7382345", "0.731879", "0.731879", "0.72874004", "0.7201384", "0.7166079", "0.71536714", "0.71536714", "0.7085178", "0.7085178", "0.7061498", "0.70269674", "0.68897516", "0.6885137", "0.6868981", "0.6791306", "0.67691064", "0.67643887", "0.67625135", "0.6749216", "0.67226136", "0.6692179", "0.66422737", "0.6565082", "0.656033", "0.65496635", "0.6545542", "0.6545542", "0.6545542", "0.6545542", "0.6545542", "0.6545542", "0.6545542", "0.6545542", "0.6545542", "0.6545542", "0.6545542", "0.6497882", "0.6497882", "0.64937204", "0.64848876", "0.6472041", "0.64420986", "0.64151895", "0.6391105", "0.6388836", "0.6370395", "0.6341966", "0.63218975", "0.62661195", "0.6253147", "0.62225187", "0.6218978", "0.62132263", "0.6211917", "0.6185707", "0.61772776", "0.6162104", "0.6137837", "0.6136755", "0.61219287", "0.6100548", "0.6092012", "0.60839975", "0.6061911", "0.60513085", "0.6046152", "0.6042618", "0.602444", "0.6014494", "0.60127276", "0.6003915", "0.5994694", "0.5965686", "0.5957307", "0.5955559", "0.59322816", "0.59287465", "0.59131444", "0.59104", "0.58996457", "0.5895541", "0.5853519", "0.5850276", "0.5846347", "0.583722", "0.5837067", "0.5835899", "0.58336616", "0.5829488", "0.5825668", "0.5784768", "0.5765386", "0.5757754", "0.57557905", "0.5754049" ]
0.81380314
1
Set the visible state of the underlying control.
def set_visible(self, visible): self.widget.setVisible(visible)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_visible(self, visible):\n widget = self.widget\n if visible:\n widget.setVisible(True)\n widget.raise_()\n widget.activateWindow()\n else:\n widget.setVisible(False)", "def set_visible(self, visible):\n # Make sure the 'visible' attribute is synced up as a result\n # of the method call. This may fire a notification, in which\n # case the change handler will call this method again. This\n # guard prevents that unneeded recursion.\n if guard.guarded(self, 'set_visible'):\n return\n else:\n with guard(self, 'set_visible'):\n self.visible = visible\n \n # Only set the visibility to True (which will show the window) \n # if the component is fully initialized.\n if not visible or self.initialized:\n self.abstract_obj.set_visible(visible)", "def set_visible(self, visible):\n if visible:\n self.widget.Open()\n else:\n self.widget.Close()", "def set_visible(self, state: bool):\n self.box.set_visible(state)\n if not state:\n self.add_box.set_visible(False)", "def setVisible( self, state ):\n self._visible = state\n \n super(XNode, self).setVisible(self.isVisible())\n \n self.dispatch.visibilityChanged.emit(state)\n self.setDirty()", "def visible(self, visible):\n\n self._visible = visible", "def set_visible(self, visible):\n self.ec._win.set_mouse_visible(visible)\n self.ec._win.set_mouse_platform_visible(visible) # Pyglet workaround\n self._visible = visible", "def ensure_visible(self):\n self.set_visible(True)", "def set_visible(self, is_visible):\n self._data['is_visible'] = 1 if is_visible else 0", "def ToggleVisible(self, event):\n pass", "def toggle_visibility(self):\n if self.is_visible():\n self.hide()\n else:\n self.show()", "def set_mouse_visible(self, visible):\n raise NotImplementedError", "def set_visible(self):\n\t\tself.hide()\n\t\tself.__sys_tray_icon.setVisible(True)", "def SetVisible(self, bVisible):\n msg = self.__GetMsg(RPCMessageID.IPCMsg_Widget_PlotVisible)\n msg.bool_data.append(bVisible)\n rs.data_service.PostMsgToClient(msg)", "def set_header_visible(self, visible):\n if visible:\n self.widget.setHeaderHidden(False)\n else:\n self.widget.setHeaderHidden(True)", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def set_visible(self, status):\n if isinstance(status, bool):\n if status:\n self._visible = True\n else:\n self._visible = False\n else:\n raise ValueError(\"Input must a bool.\")", "def is_visible(self, is_visible):\n\n self.container['is_visible'] = is_visible", "def SetOverflowVisible(self, visible):\r\n\r\n self._overflow_visible = visible\r\n if visible:\r\n self._agwStyle |= AUI_TB_OVERFLOW\r\n else:\r\n self._agwStyle &= ~AUI_TB_OVERFLOW\r\n\r\n self.Refresh(False)", "def SetGripperVisible(self, visible):\r\n\r\n self._gripper_visible = visible\r\n if visible:\r\n self._agwStyle |= AUI_TB_GRIPPER\r\n else:\r\n self._agwStyle &= ~AUI_TB_GRIPPER\r\n \r\n self.Realize()\r\n self.Refresh(False)", "def set_visible(self, visible):\n self._visible = visible\n for artist in self.artists:\n artist.set_visible(visible)", "def __grid_visibility_checkbox(self, c):\n self.grid_visibility(c.checked)\n self.__grid_visibility = c.checked", "def __grid_visibility_checkbox(self, c):\n self.grid_visibility(c.checked)\n self.__grid_visibility = c.checked", "def grid_visibility(self, is_visible):\n self.__graphics_grid.set_visibility(is_visible)", "def grid_visibility(self, is_visible):\n self.__graphics_grid.set_visibility(is_visible)", "def SetShown(self, shown):\r\n\r\n self._shown = shown\r\n return self", "def setPageControlVisible(*args):", "def visible(self, show):", "def ensure_hidden(self):\n self.set_visible(False)", "def update_visible(self, immediate=False):\n raise NotImplementedError", "def set_visible(self, target: bool) -> None:\n hidden = not target\n for ent in self.child_ents():\n ent.vis_shown = target\n ent.hidden = hidden\n for solid in ent.solids:\n solid.vis_shown = target\n solid.hidden = hidden\n\n for solid in self.child_solids():\n solid.vis_shown = solid.hidden = target\n solid.hidden = hidden", "def set_visible(self, value):\n for artist in self.artists:\n artist.set_visible(value)", "def SetTitleBarVisible(self, visible):\n if self._title_bar_visible != visible:\n self._title_bar_visible = visible\n def closure(pane):\n left = self._title_bar_orientation == wx.VERTICAL\n pane.CaptionVisible(visible, left)\n self._PaneInfoOperation(closure)", "def _set_visibility_and_animations( self ):\r\n import re\r\n pattern = [ \"control.hasfocus\\(([0-9]+)\\)\", \"control.isvisible\\(([0-9]+)\\)\" ]\r\n rvalue = [ \"control.hasfocus(##)\", \"control.isvisible(##)\" ]\r\n for key in self.win.controls.keys():\r\n visible = self.win.controls[ key ][ \"visible\" ][ 0 ]\r\n enable = self.win.controls[ key ][ \"enable\" ]\r\n visibleChanged = False\r\n enableChanged = False\r\n animChanged = False\r\n final_anim = []\r\n for cnt in range( len( pattern ) ):\r\n items = re.findall( pattern[ cnt ], visible )\r\n visible = re.sub( pattern[ cnt ], rvalue[ cnt ], visible )\r\n # fix Control.HasFocus(id) visibility condition and Control.IsVisible(id) visibility condition\r\n for item in items:\r\n visibleChanged = True\r\n if ( int( item ) in self.navigation and self.navigation[ int( item ) ][ 0 ] in self.win.controls and self.win.controls[ self.navigation[ int( item ) ][ 0 ] ][ \"id\" ] == int( item ) ):\r\n actualId = self.win.controls[ self.navigation[ int( item ) ][ 0 ] ][ \"controlId\" ]\r\n visible = visible.replace( \"##\", str( actualId ), 1 )\r\n items = re.findall( pattern[ cnt ], enable )\r\n enable = re.sub( pattern[ cnt ], rvalue[ cnt ], enable )\r\n # fix Control.HasFocus(id) enabled condition and Control.IsVisible(id) enabled condition\r\n for item in items:\r\n enableChanged = True\r\n if ( int( item ) in self.navigation and self.navigation[ int( item ) ][ 0 ] in self.win.controls and self.win.controls[ self.navigation[ int( item ) ][ 0 ] ][ \"id\" ]==int( item ) ):\r\n actualId = self.win.controls[ self.navigation[ int( item ) ][ 0 ] ][ \"controlId\" ]\r\n enable = enable.replace( \"##\", str( actualId ), 1 )\r\n # fix Control.HasFocus(id) animation condition and Control.IsVisible(id) animation condition\r\n for acnt in range( len( self.win.controls[ key ][ \"animation\" ] ) ):\r\n items = re.findall( pattern[ cnt ], self.win.controls[ key ][ \"animation\" ][ acnt ][ 1 ] )\r\n anim_attr = re.sub( pattern[ cnt ], rvalue[ cnt ], self.win.controls[ key ][ \"animation\" ][ acnt ][ 1 ] )\r\n for item in items:\r\n animChanged = True\r\n if ( int( item ) in self.navigation and self.navigation[ int( item ) ][ 0 ] in self.win.controls and self.win.controls[ self.navigation[ int( item ) ][ 0 ] ][ \"id\" ]==int( item ) ):\r\n actualId = self.win.controls[ self.navigation[ int( item ) ][ 0 ] ][ \"controlId\" ]\r\n anim_attr = anim_attr.replace( \"##\", str( actualId ), 1 )\r\n if ( items ): final_anim += [ ( self.win.controls[ key ][ \"animation\" ][ acnt ][ 0 ], anim_attr, ) ]\r\n \r\n # set the controls new visible condition\r\n if ( visibleChanged ): self.win.controls[ key ][ \"visible\" ][ 0 ] = visible\r\n # set the controls new visible condition\r\n if ( enableChanged ): self.win.controls[ key ][ \"enable\" ] = enable\r\n # set the controls new animation condition\r\n if ( animChanged ): \r\n self.win.controls[ key ][ \"animation\" ] = final_anim\r\n # set the controls initial visibility\r\n if ( visible != \"false\" and visible != \"true\" ):\r\n self.win.controls[ key ][ \"control\" ].setVisibleCondition( visible, self.win.controls[ key ][ \"visible\" ][ 1 ] )\r\n else:\r\n self.win.controls[ key ][ \"control\" ].setVisible( xbmc.getCondVisibility( visible ) )\r\n if ( enable != \"false\" and enable != \"true\" ):\r\n self.win.controls[ key ][ \"control\" ].setEnableCondition( enable )\r\n else:\r\n self.win.controls[ key ][ \"control\" ].setEnabled( xbmc.getCondVisibility( enable ) )\r\n # set the controls animations\r\n if ( self.win.controls[ key ][ \"animation\" ] ): self.win.controls[ key ][ \"control\" ].setAnimations( self.win.controls[ key ][ \"animation\" ] )", "def unHide(self):\n self.visible = True", "def shell_header_visible_changed(self, visible):\n self.set_header_visible(visible)", "def toggle_hidden(self):\n if self.hidden:\n self.show()\n else:\n self.hide()", "def visibility(self, visibility):\n\n self._visibility = visibility", "def visibility(self, visibility):\n\n self._visibility = visibility", "def visibility(self, visibility):\n\n self._visibility = visibility", "def hide(self, event=None):\r\n self.visible = 0\r\n self.withdraw()", "def hide(self, event=None):\r\n self.visible = 0\r\n self.withdraw()", "def set_title_bar_visible(self, visible):\n self.widget.SetTitleBarVisible(visible)", "def hide(self):\n self.visible = False", "def hide(self, event=None):\n self.visible = 0\n self.withdraw()", "def hide( self, event=None ):\n self.visible = 0\n self.withdraw()", "def hide( self, event=None ):\n self.visible = 0\n self.withdraw()", "def isVisible(self):\n\t\treturn True", "def hide(self):\n self.set_visible(False)", "def show(self):\n self.frame.grid()\n self.visible = True", "def __robot_visibility_checkbox(self, c):\n if len(self.__robots) > 0:\n self.__robots[self.__selected_robot].set_robot_visibility(\n c.checked)", "def PinButton(self, visible=True):\r\n \r\n return self.SetFlag(self.buttonPin, visible)", "def setIsolateHidden( self, state ):\n self._isolatedHidden = state\n \n super(XNode, self).setVisible(self.isVisible())", "def toggle_hidden(self):\n AbstractChild.toggle_hidden(self)\n self.accFrame.update_values()\n self.botFrame.update_values()\n # On toggle hidden\n self.on_toggle_hidden()", "def setSurfaceVisibility(visible='both'):\n vdict = {'both':'BOTH','top':'TOP','bottom':'BOTTOM'}\n dislin.survis(vdict[visible])", "def is_visible(self):", "def visible_to(self, visible_to):\n\n self._visible_to = visible_to", "def isVisible(self, p_int): # real signature unknown; restored from __doc__\n return False", "def isVisible(self, p_int): # real signature unknown; restored from __doc__\n return False", "def hidden(self, hidden):\n\n self._hidden = hidden", "def visibility_toggle(self, _):\n raise VisibilityToggleEvent", "def setGridVisible(self,visible=True):\n for line in self.items():\n if isinstance(line, QGraphicsLineItem):\n # ignore arrow\n if not hasattr(line, 'arrowHead'):\n line.setVisible(visible)", "def __update_visible(self) -> None:\n for i in range(0, 8):\n visible_row = self.__row_position + Labyrinth.ALL_ROW_MOVE[i]\n visible_col = self.__col_position + Labyrinth.ALL_COL_MOVE[i]\n if 0 <= visible_row < self.__labyrinth.labyrinth_height and \\\n 0 <= visible_col < self.__labyrinth.labyrinth_width:\n self.__labyrinth.visible_cells[visible_row][visible_col] = 1", "def is_visible(self):\n return self.proto.display_type == DISPLAY_TYPE.Visible.value", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def EnsureVisible(self, indx):\r\n\r\n self._tabs.MakeTabVisible(indx, self)", "def hidden(self, val):\n self.set_property(\"Hidden\", val)", "def visible(self):\n return self._visible", "def visible(self):\n return self._visible", "def __setClearButtonVisibility(self, text):\n\n\t\tif text:\n\t\t\tself.__clearButton.show()\n\t\telse:\n\t\t\tself.__clearButton.hide()", "def toggle_window_visibility(self):\r\n if self.isHidden():\r\n self.show_window()\r\n self.visibilityAction.setText(self.hideWindowString)\r\n else:\r\n self.hide_window()\r\n self.visibilityAction.setText(self.showWindowString)", "def setModifyState(self, bool):\n self._canShowModRect = bool\n if bool == False:\n self._modRect.hide()", "def show(self):\r\n if self.visible == 1 and time() - self.lastMotion > self.delay:\r\n self.visible = 2\r\n if self.visible == 2:\r\n self.deiconify()", "def show(self):\r\n if self.visible == 1 and time() - self.lastMotion > self.delay:\r\n self.visible = 2\r\n if self.visible == 2:\r\n self.deiconify()", "def is_visible(self):\n return self._visible", "def setWhitespaceVisible (self, visible):\n if visible:\n self.srcEditor.setWhitespaceVisible(visible)\n else:\n self.srcEditor.setWhitespaceVisible(visible)", "def hide_at_showing(self, hide_at_showing):\n self._hide_at_showing = hide_at_showing", "def toggle_hidden(self):\n self.show_hidden = not self.show_hidden\n self.reload('.')", "def set_visible(ax, spine_bottom=False, spine_top=False, spine_left=False, spine_right=False,\n grid=False, tick=False, label=False):\n ax.spines['bottom'].set_visible(spine_bottom)\n ax.spines['top'].set_visible(spine_top)\n ax.spines['left'].set_visible(spine_left)\n ax.spines['right'].set_visible(spine_right)\n ax.grid(grid)\n ax.tick_params(bottom=tick, top=tick, left=tick, right=tick,\n labelbottom=label, labeltop=label, labelleft=label, labelright=label)" ]
[ "0.8229523", "0.8191319", "0.79702616", "0.7886644", "0.77734816", "0.7673497", "0.7571436", "0.7353849", "0.73125196", "0.71429527", "0.7117967", "0.711646", "0.70657074", "0.70528007", "0.68809277", "0.6869344", "0.6869344", "0.6869344", "0.6869344", "0.6869344", "0.6869344", "0.6869344", "0.6869344", "0.6869344", "0.6869344", "0.6869344", "0.6848557", "0.6820751", "0.679397", "0.67642343", "0.67077047", "0.6576891", "0.6576891", "0.657447", "0.657447", "0.6544712", "0.6506106", "0.6505238", "0.6462721", "0.6314283", "0.63006103", "0.62741345", "0.621319", "0.62044644", "0.6188408", "0.6152939", "0.60900396", "0.6083009", "0.6083009", "0.6083009", "0.60704505", "0.60704505", "0.6029361", "0.60183096", "0.60110456", "0.6007338", "0.6007338", "0.5950565", "0.5921211", "0.58970207", "0.5894665", "0.5889768", "0.58579165", "0.58259577", "0.5810671", "0.5807454", "0.58043545", "0.58018607", "0.58018607", "0.5793067", "0.5752579", "0.5743238", "0.57394034", "0.57387906", "0.57170594", "0.57170594", "0.57170594", "0.57170594", "0.57170594", "0.57170594", "0.57170594", "0.57170594", "0.57170594", "0.57170594", "0.57170594", "0.57097405", "0.5708166", "0.5679435", "0.5679435", "0.5669985", "0.566377", "0.5653756", "0.5644879", "0.5644879", "0.5637574", "0.5632742", "0.5630965", "0.5612292", "0.561123" ]
0.8712535
1
train the node2vec model
def load_node2vec_for_doc_collection(cls, preprocess_document_collection: PreprocessMultiFieldDocumentCollection, pretrain_node2vec_path=None, embedding_size=DEFAULT_EMBEDDING_SIZE, ): not_exist_vector = LoadUtil.get_unknown_vector(embedding_size) if pretrain_node2vec_path is not None: print("pretrained node2vec path path is given, loading") full_node2vec_model = Word2VecKeyedVectors.load(pretrain_node2vec_path) new_node2vec_model = Word2VecKeyedVectors(vector_size=embedding_size) doc_id_str_list = [] vector_list = [] invalid_doc_id_count = 0 doc_list = preprocess_document_collection.get_all_preprocess_document_list() for doc in doc_list: doc_id_str = str(doc.get_document_id()) if doc_id_str not in full_node2vec_model.vocab: vector = not_exist_vector invalid_doc_id_count = invalid_doc_id_count + 1 else: vector = full_node2vec_model[doc_id_str] doc_id_str_list.append(doc_id_str) vector_list.append(vector) if len(doc_id_str_list) != len(set(doc_id_str_list)): raise Exception("error when filter the node2vec, duplicate doc id in input doc collection!") new_node2vec_model.add(entities=doc_id_str_list, weights=vector_list, replace=True) print("full node2vec size=%d, invald id=%d new_size=%d" % ( len(full_node2vec_model.vocab), invalid_doc_id_count, len(new_node2vec_model.vocab))) return new_node2vec_model else: print("pretrained node2vec path is not given, training") # print("Word2Vec Training...") # # using the CBOW model of word2vec, because we don't use to predict # w2v_model = gensim.models.Word2Vec(sentences=corpus_clean_text, size=embedding_size, min_count=1) # print("Word2Vec Train complete") # self.node2vec_model = w2v_model.wv # todo: implement training the node2vec here, currently is only load a pretrain node2ve model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trainNet():", "def main() -> None:\n\n # Load pickled (adj, feat) tuple\n with open(os.path.join(NETWORK_DIR, PICKLE_FILE), \"rb\") as file:\n adj, features = pickle.load(file)\n\n g = nx.Graph(adj) # Recreate graph using node indices (0 to num_nodes-1)\n\n # Draw the network\n # nx.draw_networkx(g, with_labels=False, node_size=50, node_color=\"r\")\n # plt.show()\n\n # Preprocessing (train/test split)\n np.random.seed(0) # make sure train-test split is consistent\n adj_sparse = nx.to_scipy_sparse_matrix(g)\n\n # Perform train-test split\n (\n adj_train,\n train_edges,\n train_edges_false,\n val_edges,\n val_edges_false,\n test_edges,\n test_edges_false,\n ) = mask_test_edges(adj_sparse, test_frac=0.3, val_frac=0.1)\n\n # new graph object with only non-hidden edges\n g_train = nx.from_scipy_sparse_matrix(adj_train)\n\n # Inspect train/test split\n print(\"Total nodes:\", adj_sparse.shape[0])\n\n # adj is symmetric, so nnz (num non-zero) = 2 * num_edges\n print(\"Total edges:\", int(adj_sparse.nnz / 2))\n print(\"Training edges (positive):\", len(train_edges))\n print(\"Training edges (negative):\", len(train_edges_false))\n print(\"Validation edges (positive):\", len(val_edges))\n print(\"Validation edges (negative):\", len(val_edges_false))\n print(\"Test edges (positive):\", len(test_edges))\n print(\"Test edges (negative):\", len(test_edges_false))\n\n # Train node2vec (Learn Node Embeddings)\n\n # node2vec settings\n # NOTE: When p = q = 1, this is equivalent to DeepWalk\n\n P = 1 # Return hyperparameter\n Q = 1 # In-out hyperparameter\n WINDOW_SIZE = 10 # Context size for optimization\n NUM_WALKS = 10 # Number of walks per source\n WALK_LENGTH = 80 # Length of walk per source\n DIMENSIONS = 128 # Embedding dimension\n DIRECTED = False # Graph directed/undirected\n WORKERS = 8 # Num. parallel workers\n ITER = 1 # SGD epochs\n\n # Preprocessing, generate walks\n\n # create node2vec graph instance\n g_n2v = node2vec.Graph(g_train, DIRECTED, P, Q)\n g_n2v.preprocess_transition_probs()\n walks = g_n2v.simulate_walks(NUM_WALKS, WALK_LENGTH)\n walks = [list(map(str, walk)) for walk in walks]\n\n # Train skip-gram model\n model = Word2Vec(\n walks,\n size=DIMENSIONS,\n window=WINDOW_SIZE,\n min_count=0,\n sg=1,\n workers=WORKERS,\n iter=ITER,\n )\n\n # Store embeddings mapping\n emb_mappings = model.wv\n\n print(emb_mappings)\n\n # Create node embeddings matrix (rows = nodes, columns = embedding features)\n emb_list = []\n for node_index in range(0, adj_sparse.shape[0]):\n node_str = str(node_index)\n node_emb = emb_mappings[node_str]\n emb_list.append(node_emb)\n emb_matrix = np.vstack(emb_list)\n\n def get_edge_embeddings(edge_list):\n \"\"\"\n Generate bootstrapped edge embeddings (as is done in node2vec paper)\n Edge embedding for (v1, v2) = hadamard product of node embeddings for\n v1, v2.\n \"\"\"\n embs = []\n for edge in edge_list:\n node1 = edge[0]\n node2 = edge[1]\n emb1 = emb_matrix[node1]\n emb2 = emb_matrix[node2]\n edge_emb = np.multiply(emb1, emb2)\n embs.append(edge_emb)\n embs = np.array(embs)\n return embs\n\n # Train-set edge embeddings\n pos_train_edge_embs = get_edge_embeddings(train_edges)\n neg_train_edge_embs = get_edge_embeddings(train_edges_false)\n train_edge_embs = np.concatenate(\n [pos_train_edge_embs, neg_train_edge_embs]\n )\n\n # Create train-set edge labels: 1 = real edge, 0 = false edge\n train_edge_labels = np.concatenate(\n [np.ones(len(train_edges)), np.zeros(len(train_edges_false))]\n )\n\n # Val-set edge embeddings, labels\n pos_val_edge_embs = get_edge_embeddings(val_edges)\n neg_val_edge_embs = get_edge_embeddings(val_edges_false)\n val_edge_embs = np.concatenate([pos_val_edge_embs, neg_val_edge_embs])\n val_edge_labels = np.concatenate(\n [np.ones(len(val_edges)), np.zeros(len(val_edges_false))]\n )\n\n # Test-set edge embeddings, labels\n pos_test_edge_embs = get_edge_embeddings(test_edges)\n neg_test_edge_embs = get_edge_embeddings(test_edges_false)\n test_edge_embs = np.concatenate([pos_test_edge_embs, neg_test_edge_embs])\n\n # Create val-set edge labels: 1 = real edge, 0 = false edge\n test_edge_labels = np.concatenate(\n [np.ones(len(test_edges)), np.zeros(len(test_edges_false))]\n )\n\n # Train logistic regression classifier on train-set edge embeddings\n edge_classifier = LogisticRegression(random_state=0)\n edge_classifier.fit(train_edge_embs, train_edge_labels)\n\n # Predicted edge scores: probability of being of class \"1\" (real edge)\n val_preds = edge_classifier.predict_proba(val_edge_embs)[:, 1]\n val_roc = roc_auc_score(val_edge_labels, val_preds)\n val_ap = average_precision_score(val_edge_labels, val_preds)\n\n # Predicted edge scores: probability of being of class \"1\" (real edge)\n test_preds = edge_classifier.predict_proba(test_edge_embs)[:, 1]\n test_roc = roc_auc_score(test_edge_labels, test_preds)\n test_ap = average_precision_score(test_edge_labels, test_preds)\n\n print(\"node2vec Validation ROC score: \", str(val_roc))\n print(\"node2vec Validation AP score: \", str(val_ap))\n print(\"node2vec Test ROC score: \", str(test_roc))\n print(\"node2vec Test AP score: \", str(test_ap))", "def train():\n pass", "def train():\n # YOUR TRAINING CODE GOES HERE", "def trainModel( self, featureTrain, classTrain):", "def train(args):\n\n log_header('Training network')\n\n train_retriever(args)", "def node2vec_embedding(\n G_training,\n dimensions=64,\n walk_length=10,\n num_walks=10,\n p=1,\n q=1.2\n):\n node2vec = Node2Vec(\n G_training,\n dimensions=dimensions,\n walk_length=walk_length,\n num_walks=num_walks,\n p=p,\n q=q\n )\n print(\"Fitting node2vec model...\")\n # Using skip-gram algorithm and negative sampling\n # instead of hierarchical softmax\n model = node2vec.fit(window=5, min_count=1, sg=1, hs=0)\n return model", "def train_naive(): # add arguments as needed\n pass", "def train(\n # fmt: off\n lang: (\"Model language\", \"positional\", None, str),\n output_path: (\"Output directory to store model in\", \"positional\", None, Path),\n train_path: (\"Location of JSON-formatted training data\", \"positional\", None, Path),\n dev_path: (\"Location of JSON-formatted development data\", \"positional\", None, Path),\n raw_text: (\"Path to jsonl file with unlabelled text documents.\", \"option\", \"rt\", Path) = None,\n base_model: (\"Name of model to update (optional)\", \"option\", \"b\", str) = None,\n pipeline: (\"Comma-separated names of pipeline components\", \"option\", \"p\", str) = \"tagger,parser,ner\",\n vectors: (\"Model to load vectors from\", \"option\", \"v\", str) = None,\n replace_components: (\"Replace components from base model\", \"flag\", \"R\", bool) = False,\n n_iter: (\"Number of iterations\", \"option\", \"n\", int) = 30,\n n_early_stopping: (\"Maximum number of training epochs without dev accuracy improvement\", \"option\", \"ne\", int) = None,\n n_examples: (\"Number of examples\", \"option\", \"ns\", int) = 0,\n use_gpu: (\"Use GPU\", \"option\", \"g\", int) = -1,\n version: (\"Model version\", \"option\", \"V\", str) = \"0.0.0\",\n meta_path: (\"Optional path to meta.json to use as base.\", \"option\", \"m\", Path) = None,\n init_tok2vec: (\"Path to pretrained weights for the token-to-vector parts of the models. See 'spacy pretrain'. Experimental.\", \"option\", \"t2v\", Path) = None,\n parser_multitasks: (\"Side objectives for parser CNN, e.g. 'dep' or 'dep,tag'\", \"option\", \"pt\", str) = \"\",\n entity_multitasks: (\"Side objectives for NER CNN, e.g. 'dep' or 'dep,tag'\", \"option\", \"et\", str) = \"\",\n noise_level: (\"Amount of corruption for data augmentation\", \"option\", \"nl\", float) = 0.0,\n orth_variant_level: (\"Amount of orthography variation for data augmentation\", \"option\", \"ovl\", float) = 0.0,\n eval_beam_widths: (\"Beam widths to evaluate, e.g. 4,8\", \"option\", \"bw\", str) = \"\",\n gold_preproc: (\"Use gold preprocessing\", \"flag\", \"G\", bool) = False,\n learn_tokens: (\"Make parser learn gold-standard tokenization\", \"flag\", \"T\", bool) = False,\n textcat_multilabel: (\"Textcat classes aren't mutually exclusive (multilabel)\", \"flag\", \"TML\", bool) = False,\n textcat_arch: (\"Textcat model architecture\", \"option\", \"ta\", str) = \"bow\",\n textcat_positive_label: (\"Textcat positive label for binary classes with two labels\", \"option\", \"tpl\", str) = None,\n tag_map_path: (\"Location of JSON-formatted tag map\", \"option\", \"tm\", Path) = None,\n verbose: (\"Display more information for debug\", \"flag\", \"VV\", bool) = False,\n debug: (\"Run data diagnostics before training\", \"flag\", \"D\", bool) = False,\n # fmt: on\n):\n util.fix_random_seed()\n util.set_env_log(verbose)\n\n # Make sure all files and paths exists if they are needed\n train_path = util.ensure_path(train_path)\n dev_path = util.ensure_path(dev_path)\n meta_path = util.ensure_path(meta_path)\n output_path = util.ensure_path(output_path)\n if raw_text is not None:\n raw_text = list(srsly.read_jsonl(raw_text))\n if not train_path or not train_path.exists():\n msg.fail(\"Training data not found\", train_path, exits=1)\n if not dev_path or not dev_path.exists():\n msg.fail(\"Development data not found\", dev_path, exits=1)\n if meta_path is not None and not meta_path.exists():\n msg.fail(\"Can't find model meta.json\", meta_path, exits=1)\n meta = srsly.read_json(meta_path) if meta_path else {}\n if output_path.exists() and [p for p in output_path.iterdir() if p.is_dir()]:\n msg.warn(\n \"Output directory is not empty\",\n \"This can lead to unintended side effects when saving the model. \"\n \"Please use an empty directory or a different path instead. If \"\n \"the specified output path doesn't exist, the directory will be \"\n \"created for you.\",\n )\n if not output_path.exists():\n output_path.mkdir()\n msg.good(f\"Created output directory: {output_path}\")\n\n tag_map = {}\n if tag_map_path is not None:\n tag_map = srsly.read_json(tag_map_path)\n # Take dropout and batch size as generators of values -- dropout\n # starts high and decays sharply, to force the optimizer to explore.\n # Batch size starts at 1 and grows, so that we make updates quickly\n # at the beginning of training.\n dropout_rates = util.decaying(\n util.env_opt(\"dropout_from\", 0.2),\n util.env_opt(\"dropout_to\", 0.2),\n util.env_opt(\"dropout_decay\", 0.0),\n )\n batch_sizes = util.compounding(\n util.env_opt(\"batch_from\", 100.0),\n util.env_opt(\"batch_to\", 1000.0),\n util.env_opt(\"batch_compound\", 1.001),\n )\n\n if not eval_beam_widths:\n eval_beam_widths = [1]\n else:\n eval_beam_widths = [int(bw) for bw in eval_beam_widths.split(\",\")]\n if 1 not in eval_beam_widths:\n eval_beam_widths.append(1)\n eval_beam_widths.sort()\n has_beam_widths = eval_beam_widths != [1]\n\n default_dir = Path(__file__).parent.parent / \"ml\" / \"models\" / \"defaults\"\n\n # Set up the base model and pipeline. If a base model is specified, load\n # the model and make sure the pipeline matches the pipeline setting. If\n # training starts from a blank model, intitalize the language class.\n pipeline = [p.strip() for p in pipeline.split(\",\")]\n msg.text(f\"Training pipeline: {pipeline}\")\n disabled_pipes = None\n pipes_added = False\n if use_gpu >= 0:\n activated_gpu = None\n try:\n activated_gpu = set_gpu(use_gpu)\n except Exception as e:\n msg.warn(f\"Exception: {e}\")\n if activated_gpu is not None:\n msg.text(f\"Using GPU: {use_gpu}\")\n else:\n msg.warn(f\"Unable to activate GPU: {use_gpu}\")\n msg.text(\"Using CPU only\")\n use_gpu = -1\n if base_model:\n msg.text(f\"Starting with base model '{base_model}'\")\n nlp = util.load_model(base_model)\n if nlp.lang != lang:\n msg.fail(\n f\"Model language ('{nlp.lang}') doesn't match language \"\n f\"specified as `lang` argument ('{lang}') \",\n exits=1,\n )\n if vectors:\n msg.text(f\"Loading vectors from model '{vectors}'\")\n _load_vectors(nlp, vectors)\n\n nlp.select_pipes(disable=[p for p in nlp.pipe_names if p not in pipeline])\n for pipe in pipeline:\n # first, create the model.\n # Bit of a hack after the refactor to get the vectors into a default config\n # use train-from-config instead :-)\n if pipe == \"parser\":\n config_loc = default_dir / \"parser_defaults.cfg\"\n elif pipe == \"tagger\":\n config_loc = default_dir / \"tagger_defaults.cfg\"\n elif pipe == \"ner\":\n config_loc = default_dir / \"ner_defaults.cfg\"\n elif pipe == \"textcat\":\n config_loc = default_dir / \"textcat_defaults.cfg\"\n elif pipe == \"senter\":\n config_loc = default_dir / \"senter_defaults.cfg\"\n else:\n raise ValueError(f\"Component {pipe} currently not supported.\")\n pipe_cfg = util.load_config(config_loc, create_objects=False)\n if vectors:\n pretrained_config = {\n \"@architectures\": \"spacy.VocabVectors.v1\",\n \"name\": vectors,\n }\n pipe_cfg[\"model\"][\"tok2vec\"][\"pretrained_vectors\"] = pretrained_config\n\n if pipe == \"parser\":\n pipe_cfg[\"learn_tokens\"] = learn_tokens\n elif pipe == \"textcat\":\n pipe_cfg[\"exclusive_classes\"] = not textcat_multilabel\n pipe_cfg[\"architecture\"] = textcat_arch\n pipe_cfg[\"positive_label\"] = textcat_positive_label\n\n if pipe not in nlp.pipe_names:\n msg.text(f\"Adding component to base model '{pipe}'\")\n nlp.add_pipe(nlp.create_pipe(pipe, config=pipe_cfg))\n pipes_added = True\n elif replace_components:\n msg.text(f\"Replacing component from base model '{pipe}'\")\n nlp.replace_pipe(pipe, nlp.create_pipe(pipe, config=pipe_cfg))\n pipes_added = True\n else:\n if pipe == \"textcat\":\n textcat_cfg = nlp.get_pipe(\"textcat\").cfg\n base_cfg = {\n \"exclusive_classes\": textcat_cfg[\"exclusive_classes\"],\n \"architecture\": textcat_cfg[\"architecture\"],\n \"positive_label\": textcat_cfg[\"positive_label\"],\n }\n if base_cfg != pipe_cfg:\n msg.fail(\n f\"The base textcat model configuration does\"\n f\"not match the provided training options. \"\n f\"Existing cfg: {base_cfg}, provided cfg: {pipe_cfg}\",\n exits=1,\n )\n msg.text(f\"Extending component from base model '{pipe}'\")\n disabled_pipes = nlp.select_pipes(\n disable=[p for p in nlp.pipe_names if p not in pipeline]\n )\n else:\n msg.text(f\"Starting with blank model '{lang}'\")\n lang_cls = util.get_lang_class(lang)\n nlp = lang_cls()\n\n if vectors:\n msg.text(f\"Loading vectors from model '{vectors}'\")\n _load_vectors(nlp, vectors)\n\n for pipe in pipeline:\n # first, create the model.\n # Bit of a hack after the refactor to get the vectors into a default config\n # use train-from-config instead :-)\n if pipe == \"parser\":\n config_loc = default_dir / \"parser_defaults.cfg\"\n elif pipe == \"tagger\":\n config_loc = default_dir / \"tagger_defaults.cfg\"\n elif pipe == \"morphologizer\":\n config_loc = default_dir / \"morphologizer_defaults.cfg\"\n elif pipe == \"ner\":\n config_loc = default_dir / \"ner_defaults.cfg\"\n elif pipe == \"textcat\":\n config_loc = default_dir / \"textcat_defaults.cfg\"\n elif pipe == \"senter\":\n config_loc = default_dir / \"senter_defaults.cfg\"\n else:\n raise ValueError(f\"Component {pipe} currently not supported.\")\n pipe_cfg = util.load_config(config_loc, create_objects=False)\n if vectors:\n pretrained_config = {\n \"@architectures\": \"spacy.VocabVectors.v1\",\n \"name\": vectors,\n }\n pipe_cfg[\"model\"][\"tok2vec\"][\"pretrained_vectors\"] = pretrained_config\n\n if pipe == \"parser\":\n pipe_cfg[\"learn_tokens\"] = learn_tokens\n elif pipe == \"textcat\":\n pipe_cfg[\"exclusive_classes\"] = not textcat_multilabel\n pipe_cfg[\"architecture\"] = textcat_arch\n pipe_cfg[\"positive_label\"] = textcat_positive_label\n\n pipe = nlp.create_pipe(pipe, config=pipe_cfg)\n nlp.add_pipe(pipe)\n\n # Update tag map with provided mapping\n nlp.vocab.morphology.tag_map.update(tag_map)\n\n # Multitask objectives\n multitask_options = [(\"parser\", parser_multitasks), (\"ner\", entity_multitasks)]\n for pipe_name, multitasks in multitask_options:\n if multitasks:\n if pipe_name not in pipeline:\n msg.fail(\n f\"Can't use multitask objective without '{pipe_name}' in \"\n f\"the pipeline\"\n )\n pipe = nlp.get_pipe(pipe_name)\n for objective in multitasks.split(\",\"):\n pipe.add_multitask_objective(objective)\n\n # Prepare training corpus\n msg.text(f\"Counting training words (limit={n_examples})\")\n corpus = GoldCorpus(train_path, dev_path, limit=n_examples)\n n_train_words = corpus.count_train()\n\n if base_model and not pipes_added:\n # Start with an existing model, use default optimizer\n optimizer = create_default_optimizer()\n else:\n # Start with a blank model, call begin_training\n cfg = {\"device\": use_gpu}\n optimizer = nlp.begin_training(lambda: corpus.train_examples, **cfg)\n nlp._optimizer = None\n\n # Load in pretrained weights (TODO: this may be broken in the config rewrite)\n if init_tok2vec is not None:\n components = _load_pretrained_tok2vec(nlp, init_tok2vec)\n msg.text(f\"Loaded pretrained tok2vec for: {components}\")\n\n # Verify textcat config\n if \"textcat\" in pipeline:\n textcat_labels = nlp.get_pipe(\"textcat\").cfg.get(\"labels\", [])\n if textcat_positive_label and textcat_positive_label not in textcat_labels:\n msg.fail(\n f\"The textcat_positive_label (tpl) '{textcat_positive_label}' \"\n f\"does not match any label in the training data.\",\n exits=1,\n )\n if textcat_positive_label and len(textcat_labels) != 2:\n msg.fail(\n \"A textcat_positive_label (tpl) '{textcat_positive_label}' was \"\n \"provided for training data that does not appear to be a \"\n \"binary classification problem with two labels.\",\n exits=1,\n )\n train_data = corpus.train_data(\n nlp,\n noise_level=noise_level,\n gold_preproc=gold_preproc,\n max_length=0,\n ignore_misaligned=True,\n )\n train_labels = set()\n if textcat_multilabel:\n multilabel_found = False\n for ex in train_data:\n train_labels.update(ex.gold.cats.keys())\n if list(ex.gold.cats.values()).count(1.0) != 1:\n multilabel_found = True\n if not multilabel_found and not base_model:\n msg.warn(\n \"The textcat training instances look like they have \"\n \"mutually-exclusive classes. Remove the flag \"\n \"'--textcat-multilabel' to train a classifier with \"\n \"mutually-exclusive classes.\"\n )\n if not textcat_multilabel:\n for ex in train_data:\n train_labels.update(ex.gold.cats.keys())\n if list(ex.gold.cats.values()).count(1.0) != 1 and not base_model:\n msg.warn(\n \"Some textcat training instances do not have exactly \"\n \"one positive label. Modifying training options to \"\n \"include the flag '--textcat-multilabel' for classes \"\n \"that are not mutually exclusive.\"\n )\n nlp.get_pipe(\"textcat\").cfg[\"exclusive_classes\"] = False\n textcat_multilabel = True\n break\n if base_model and set(textcat_labels) != train_labels:\n msg.fail(\n f\"Cannot extend textcat model using data with different \"\n f\"labels. Base model labels: {textcat_labels}, training data \"\n f\"labels: {list(train_labels)}\",\n exits=1,\n )\n if textcat_multilabel:\n msg.text(\n f\"Textcat evaluation score: ROC AUC score macro-averaged across \"\n f\"the labels '{', '.join(textcat_labels)}'\"\n )\n elif textcat_positive_label and len(textcat_labels) == 2:\n msg.text(\n f\"Textcat evaluation score: F1-score for the \"\n f\"label '{textcat_positive_label}'\"\n )\n elif len(textcat_labels) > 1:\n if len(textcat_labels) == 2:\n msg.warn(\n \"If the textcat component is a binary classifier with \"\n \"exclusive classes, provide '--textcat_positive_label' for \"\n \"an evaluation on the positive class.\"\n )\n msg.text(\n f\"Textcat evaluation score: F1-score macro-averaged across \"\n f\"the labels '{', '.join(textcat_labels)}'\"\n )\n else:\n msg.fail(\n \"Unsupported textcat configuration. Use `spacy debug-data` \"\n \"for more information.\"\n )\n\n # fmt: off\n row_head, output_stats = _configure_training_output(pipeline, use_gpu, has_beam_widths)\n row_widths = [len(w) for w in row_head]\n row_settings = {\"widths\": row_widths, \"aligns\": tuple([\"r\" for i in row_head]), \"spacing\": 2}\n # fmt: on\n print(\"\")\n msg.row(row_head, **row_settings)\n msg.row([\"-\" * width for width in row_settings[\"widths\"]], **row_settings)\n try:\n iter_since_best = 0\n best_score = 0.0\n for i in range(n_iter):\n train_data = corpus.train_dataset(\n nlp,\n noise_level=noise_level,\n orth_variant_level=orth_variant_level,\n gold_preproc=gold_preproc,\n max_length=0,\n ignore_misaligned=True,\n )\n if raw_text:\n random.shuffle(raw_text)\n raw_batches = util.minibatch(\n (nlp.make_doc(rt[\"text\"]) for rt in raw_text), size=8\n )\n words_seen = 0\n with tqdm.tqdm(total=n_train_words, leave=False) as pbar:\n losses = {}\n for batch in util.minibatch_by_words(train_data, size=batch_sizes):\n if not batch:\n continue\n try:\n nlp.update(\n batch,\n sgd=optimizer,\n drop=next(dropout_rates),\n losses=losses,\n )\n except ValueError as e:\n err = \"Error during training\"\n if init_tok2vec:\n err += \" Did you provide the same parameters during 'train' as during 'pretrain'?\"\n msg.fail(err, f\"Original error message: {e}\", exits=1)\n if raw_text:\n # If raw text is available, perform 'rehearsal' updates,\n # which use unlabelled data to reduce overfitting.\n raw_batch = list(next(raw_batches))\n nlp.rehearse(raw_batch, sgd=optimizer, losses=losses)\n docs = [ex.doc for ex in batch]\n if not int(os.environ.get(\"LOG_FRIENDLY\", 0)):\n pbar.update(sum(len(doc) for doc in docs))\n words_seen += sum(len(doc) for doc in docs)\n with nlp.use_params(optimizer.averages):\n util.set_env_log(False)\n epoch_model_path = output_path / f\"model{i}\"\n nlp.to_disk(epoch_model_path)\n nlp_loaded = util.load_model_from_path(epoch_model_path)\n for beam_width in eval_beam_widths:\n for name, component in nlp_loaded.pipeline:\n if hasattr(component, \"cfg\"):\n component.cfg[\"beam_width\"] = beam_width\n dev_dataset = list(\n corpus.dev_dataset(\n nlp_loaded,\n gold_preproc=gold_preproc,\n ignore_misaligned=True,\n )\n )\n nwords = sum(len(ex.doc) for ex in dev_dataset)\n start_time = timer()\n scorer = nlp_loaded.evaluate(dev_dataset, verbose=verbose)\n end_time = timer()\n if use_gpu < 0:\n gpu_wps = None\n cpu_wps = nwords / (end_time - start_time)\n else:\n gpu_wps = nwords / (end_time - start_time)\n with use_ops(\"numpy\"):\n nlp_loaded = util.load_model_from_path(epoch_model_path)\n for name, component in nlp_loaded.pipeline:\n if hasattr(component, \"cfg\"):\n component.cfg[\"beam_width\"] = beam_width\n dev_dataset = list(\n corpus.dev_dataset(\n nlp_loaded,\n gold_preproc=gold_preproc,\n ignore_misaligned=True,\n )\n )\n start_time = timer()\n scorer = nlp_loaded.evaluate(dev_dataset, verbose=verbose)\n end_time = timer()\n cpu_wps = nwords / (end_time - start_time)\n acc_loc = output_path / f\"model{i}\" / \"accuracy.json\"\n srsly.write_json(acc_loc, scorer.scores)\n\n # Update model meta.json\n meta[\"lang\"] = nlp.lang\n meta[\"pipeline\"] = nlp.pipe_names\n meta[\"spacy_version\"] = f\">={about.__version__}\"\n if beam_width == 1:\n meta[\"speed\"] = {\n \"nwords\": nwords,\n \"cpu\": cpu_wps,\n \"gpu\": gpu_wps,\n }\n meta.setdefault(\"accuracy\", {})\n for component in nlp.pipe_names:\n for metric in _get_metrics(component):\n meta[\"accuracy\"][metric] = scorer.scores[metric]\n else:\n meta.setdefault(\"beam_accuracy\", {})\n meta.setdefault(\"beam_speed\", {})\n for component in nlp.pipe_names:\n for metric in _get_metrics(component):\n meta[\"beam_accuracy\"][metric] = scorer.scores[metric]\n meta[\"beam_speed\"][beam_width] = {\n \"nwords\": nwords,\n \"cpu\": cpu_wps,\n \"gpu\": gpu_wps,\n }\n meta[\"vectors\"] = {\n \"width\": nlp.vocab.vectors_length,\n \"vectors\": len(nlp.vocab.vectors),\n \"keys\": nlp.vocab.vectors.n_keys,\n \"name\": nlp.vocab.vectors.name,\n }\n meta.setdefault(\"name\", f\"model{i}\")\n meta.setdefault(\"version\", version)\n meta[\"labels\"] = nlp.meta[\"labels\"]\n meta_loc = output_path / f\"model{i}\" / \"meta.json\"\n srsly.write_json(meta_loc, meta)\n util.set_env_log(verbose)\n\n progress = _get_progress(\n i,\n losses,\n scorer.scores,\n output_stats,\n beam_width=beam_width if has_beam_widths else None,\n cpu_wps=cpu_wps,\n gpu_wps=gpu_wps,\n )\n if i == 0 and \"textcat\" in pipeline:\n textcats_per_cat = scorer.scores.get(\"textcats_per_cat\", {})\n for cat, cat_score in textcats_per_cat.items():\n if cat_score.get(\"roc_auc_score\", 0) < 0:\n msg.warn(\n f\"Textcat ROC AUC score is undefined due to \"\n f\"only one value in label '{cat}'.\"\n )\n msg.row(progress, **row_settings)\n # Early stopping\n if n_early_stopping is not None:\n current_score = _score_for_model(meta)\n if current_score < best_score:\n iter_since_best += 1\n else:\n iter_since_best = 0\n best_score = current_score\n if iter_since_best >= n_early_stopping:\n msg.text(\n f\"Early stopping, best iteration is: {i - iter_since_best}\"\n )\n msg.text(\n f\"Best score = {best_score}; Final iteration score = {current_score}\"\n )\n break\n except Exception as e:\n msg.warn(f\"Aborting and saving final best model. Encountered exception: {e}\")\n finally:\n best_pipes = nlp.pipe_names\n if disabled_pipes:\n disabled_pipes.restore()\n with nlp.use_params(optimizer.averages):\n final_model_path = output_path / \"model-final\"\n nlp.to_disk(final_model_path)\n meta_loc = output_path / \"model-final\" / \"meta.json\"\n final_meta = srsly.read_json(meta_loc)\n final_meta.setdefault(\"accuracy\", {})\n final_meta[\"accuracy\"].update(meta.get(\"accuracy\", {}))\n final_meta.setdefault(\"speed\", {})\n final_meta[\"speed\"].setdefault(\"cpu\", None)\n final_meta[\"speed\"].setdefault(\"gpu\", None)\n meta.setdefault(\"speed\", {})\n meta[\"speed\"].setdefault(\"cpu\", None)\n meta[\"speed\"].setdefault(\"gpu\", None)\n # combine cpu and gpu speeds with the base model speeds\n if final_meta[\"speed\"][\"cpu\"] and meta[\"speed\"][\"cpu\"]:\n speed = _get_total_speed(\n [final_meta[\"speed\"][\"cpu\"], meta[\"speed\"][\"cpu\"]]\n )\n final_meta[\"speed\"][\"cpu\"] = speed\n if final_meta[\"speed\"][\"gpu\"] and meta[\"speed\"][\"gpu\"]:\n speed = _get_total_speed(\n [final_meta[\"speed\"][\"gpu\"], meta[\"speed\"][\"gpu\"]]\n )\n final_meta[\"speed\"][\"gpu\"] = speed\n # if there were no speeds to update, overwrite with meta\n if (\n final_meta[\"speed\"][\"cpu\"] is None\n and final_meta[\"speed\"][\"gpu\"] is None\n ):\n final_meta[\"speed\"].update(meta[\"speed\"])\n # note: beam speeds are not combined with the base model\n if has_beam_widths:\n final_meta.setdefault(\"beam_accuracy\", {})\n final_meta[\"beam_accuracy\"].update(meta.get(\"beam_accuracy\", {}))\n final_meta.setdefault(\"beam_speed\", {})\n final_meta[\"beam_speed\"].update(meta.get(\"beam_speed\", {}))\n srsly.write_json(meta_loc, final_meta)\n msg.good(\"Saved model to output directory\", final_model_path)\n with msg.loading(\"Creating best model...\"):\n best_model_path = _collate_best_model(final_meta, output_path, best_pipes)\n msg.good(\"Created best model\", best_model_path)", "def trainNewModel():\n print \"Creating feature vectors for trainset...\"\n trainDependencies = getDependency(trainDepFilename)\n trainLabel, trainFeatureVectors = \\\n createFeatureVectors(trainFilename, trainDependencies)\n print \"Length of feature vector for trainset: %d\" \\\n % len(trainFeatureVectors[0])\n if not len(addTrainsetList) == 0:\n print \"Combining feature vectors of additional trainset...\"\n trainLabel, trainFeatureVectors = \\\n combineAdditionalTrainset(\n addTrainsetList, trainLabel, trainFeatureVectors)\n print \"Feature vectors of trainset created.\"\n SVMTrain(trainLabel, trainFeatureVectors, modelFilename)", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self, X, y):", "def main():\n\tif(len(sys.argv) < 6) :\n\t\tprint('Usage : python node_representation.py graphfile node2vec_file text_file word2vec_size, size_walk')\n\t\texit()\n\n\tgraph, node2vec_file, text_file, size_w2v, size_walk = sys.argv[1], sys.argv[2], sys.argv[3], int(sys.argv[4]), int(sys.argv[5])\t\n\t#loads graph, word2vec corpora and node2vec model\n\tfile = open(graph, 'rb')\n\tG = pickle.load(file) \n\tfile.close()\n\tnode2vec = load_node2vec(node2vec_file)\n\tdf_user_text, text, w2v_model = load_text(text_file, size_w2v)\n\n\tG = G.to_directed()\n\tG = nx.convert_node_labels_to_integers(G)\n\n\tnodes = list(G.nodes)\n\tgraph_data = {}\n\n\tprint('# iterating... ')\n\tfor node in nodes:\n\t\tusername = node\n\t\temb = node2vec[node]\n\t\tfeatures = get_topics_w2v(username, size_w2v, df_user_text, text, w2v_model)\n\t\twalk_nodes = random_walk_sampling_simple(G, node, size_walk)\n\t\twalk = []\n\t\tfor n in walk_nodes:\n\t\t\twalk.append(node2vec[n]) \n\t\tgraph_data[node] = Node(username, emb, features, walk)\n\n\tprint('ok!')\t\n\toutfile = open(graph+'_features','wb')\n\tpickle.dump(graph_data, outfile)", "def train(self, trainfile):", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train_reg(self, sess, pre, model, edges, G, cluster_negtivate=False, nodeid2cluster={}, iter=1, chunksize=150):\n assert model.node_embedding.dtype == np.float32\n\n log.info(\"O1 training model with %i workers on %i vocabulary and %i features and 'negative sampling'=%s\" %\n (self.workers, len(model.vocab), model.layer1_size, self.negative))\n\n if not model.vocab:\n raise RuntimeError(\"you must first build vocabulary before training the model\")\n\n edges = RepeatCorpusNTimes(edges, iter)\n total_node = edges.corpus.shape[0] * edges.corpus.shape[1] * edges.n\n log.debug('total edges: %d' % total_node)\n start, next_report, node_count = time.time(), [5.0], [0]\n\n #int(sum(v.count * v.sample_probability for v in self.vocab.values()))\n jobs = Queue(maxsize=2*self.workers) # buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(\n lock = threading.Lock()\n\n\n def worker_train():\n \"\"\"Train the model, lifting lists of paths from the jobs queue.\"\"\"\n py_work = np.zeros(model.layer1_size, dtype=np.float32)\n\n while True:\n job = jobs.get(block=True)\n if job is None: # data finished, exit\n jobs.task_done()\n # print('thread %s break' % threading.current_thread().name)\n break\n\n lr = max(self.min_lr, self.lr * (1 - 1.0 * node_count[0]/total_node))\n #lr = self.lr \n job_words = 0\n #pre=self.build_model(len(model.vocab), model.layer1_size, lamda = 0.0, learning_rate=lr)\n for edge in job:\n if edge is not None:\n x = []\n y = []\n x.append([edge[0].index, edge[1].index])\n weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n #y.append(weight)\n y.append(1.0)\n #for i in range(int(10 * (weight)) * self.negative):\n for i in range(self.negative):\n nodeidx = model.table[np.random.randint(model.table_size)]\n if nodeidx != edge[0].index:\n x.append([edge[0].index, nodeidx])\n y.append(0.0)\n feed_dict = {\n pre.x: x,\n pre.y: y,\n pre.node_embeddings_init: model.node_embedding\n }\n #saver = tf.train.Saver()\n _, loss, node_embeddings = sess.run([pre.d_updates, pre.reg_loss, pre.node_embeddings_n1],\n feed_dict=feed_dict)\n model.node_embedding[edge[0].index] = node_embeddings[edge[0].index]\n x = []\n y = []\n x.append([edge[1].index, edge[0].index])\n weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n #y.append(weight)\n y.append(1.0)\n for i in range(self.negative):\n nodeidx = model.table[np.random.randint(model.table_size)]\n if edge[1].index != nodeidx:\n x.append([edge[1].index, nodeidx])\n y.append(0.0)\n feed_dict = {\n pre.x: x,\n pre.y: y,\n pre.node_embeddings_init: model.node_embedding\n }\n \n #saver = tf.train.Saver()\n _, loss, node_embeddings = sess.run([pre.d_updates, pre.reg_loss, pre.node_embeddings_n1],\n feed_dict=feed_dict)\n\n #model.node_embedding = node_embeddings\n model.node_embedding[edge[1].index] = node_embeddings[edge[1].index]\n job_words += len(x)\n \n #log.info(\"train_loss: {}, node_embeddings = {}\".format(loss, model.node_embedding))\n \n #saver.restore(sess, INNER_MODEL_FILE)\n #job_words = sum(train_o1(model.node_embedding, edge, lr, self.negative, model.table,\n # py_size=model.layer1_size, py_work=py_work) for edge in job if edge is not None)\n #job_words = len(x)\n jobs.task_done()\n lock.acquire(timeout=30)\n try:\n node_count[0] += job_words\n\n elapsed = time.time() - start\n if elapsed >= next_report[0]:\n log.info(\"PROGRESS: at %.2f%% \\tnode_computed %d\\talpha %.05f\\t %.0f nodes/s\" %\n (100.0 * node_count[0] / total_node, node_count[0], lr, node_count[0] / elapsed if elapsed else 0.0))\n next_report[0] = elapsed + 5.0 # don't flood the log, wait at least a second between progress reports\n finally:\n lock.release()\n\n\n workers = [threading.Thread(target=worker_train, name='thread_'+str(i)) for i in range(self.workers)]\n for thread in workers:\n thread.daemon = True # make interrupting the process with ctrl+c easier\n thread.start()\n\n\n # convert input strings to Vocab objects (eliding OOV/downsampled words), and start filling the jobs queue\n for job_no, job in enumerate(chunkize_serial(prepare_sentences(model, edges), chunksize)):\n jobs.put(job)\n\n\n for _ in range(self.workers):\n jobs.put(None) # give the workers heads up that they can finish -- no more work!\n\n for thread in workers:\n thread.join()\n \n elapsed = time.time() - start\n log.info(\"training on %i words took %.1fs, %.0f words/s\" %\n (node_count[0], elapsed, node_count[0]/ elapsed if elapsed else 0.0))", "def init_word2vec():\n start = time()\n if not os.path.exists('/cs/engproj/314/proj2/trained_model/GoogleNews-vectors-negative300.bin.gz'):\n raise ValueError(\"SKIP: You need to download the google news model\")\n model = KeyedVectors.load_word2vec_format('/cs/engproj/314/proj2/trained_model/GoogleNews-vectors-negative300.bin.gz', binary=True)\n print('Cell took %.2f seconds to run.' % (time() - start))\n # model.init_sims(replace=True)\n global trained_model\n trained_model = model\n return", "def train(self, model, edges, G, cluster_negtivate=False, nodeid2cluster={}, chunksize=150, iter=1):\n assert model.node_embedding.dtype == np.float32\n\n log.info(\"O1 training model with %i workers on %i vocabulary and %i features and 'negative sampling'=%s\" %\n (self.workers, len(model.vocab), model.layer1_size, self.negative))\n\n if not model.vocab:\n raise RuntimeError(\"you must first build vocabulary before training the model\")\n\n edges = RepeatCorpusNTimes(edges, iter)\n total_node = edges.corpus.shape[0] * edges.corpus.shape[1] * edges.n\n log.debug('total edges: %d' % total_node)\n start, next_report, node_count = time.time(), [5.0], [0]\n\n #int(sum(v.count * v.sample_probability for v in self.vocab.values()))\n jobs = Queue(maxsize=2*self.workers) # buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(\n lock = threading.Lock()\n\n def worker_train():\n \"\"\"Train the model, lifting lists of paths from the jobs queue.\"\"\"\n py_work = np.zeros(model.layer1_size, dtype=np.float32)\n\n while True:\n job = jobs.get(block=True)\n if job is None: # data finished, exit\n jobs.task_done()\n # print('thread %s break' % threading.current_thread().name)\n break\n\n #lr = max(self.min_lr, self.lr * (1 - 1.0 * node_count[0]/total_node))\n lr = self.lr \n job_words = 0\n for edge in job:\n if edge is not None:\n if cluster_negtivate:\n node_set = set()\n if model.vocab_t[edge[0].index] not in nodeid2cluster:\n cls1 = -1\n else:\n cls1 = nodeid2cluster[model.vocab_t[edge[0].index]]\n node_set.add(cls1)\n if model.vocab_t[edge[1].index] not in nodeid2cluster:\n cls2 = -1\n else:\n cls2 = nodeid2cluster[model.vocab_t[edge[1].index]]\n node_set.add(cls2)\n neg_l = []\n #选择的负样本的node必须是有明确类别归属的\n for i in range(self.negative):\n nodeidx = model.table[np.random.randint(model.table_size)]\n if model.vocab_t[nodeidx] not in nodeid2cluster:\n i-=1\n continue\n else:\n cls_n = nodeid2cluster[model.vocab_t[nodeidx]]\n #加入不同边限制 G 里存放的是nodeid,不是idx\n if cls_n not in node_set and model.vocab_t[nodeidx] not in G[model.vocab_t[edge[0].index]] \\\n and model.vocab_t[nodeidx] not in G[model.vocab_t[edge[1].index]]:\n neg_l.append(nodeidx)\n neg_np = np.asarray(neg_l)\n weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n if weight > 0.0 and len(neg_np) > 0:\n #job_words += sum(train_o1(model.node_embedding, edge, lr, int(10 * (weight)) * self.negative, neg_np,\n # py_size=model.layer1_size, py_work=py_work) \n # for i in range(1))\n job_words += sum(train_o1(model.node_embedding, edge, weight, lr, self.negative, neg_np,\n py_size=model.layer1_size, py_work=py_work) \n for i in range(1))\n elif len(neg_np) == 0:\n #job_words += sum(train_o1(model.node_embedding, edge, lr, 0, neg_np,\n # py_size=model.layer1_size, py_work=py_work) \n # for i in range(1))\n job_words += sum(train_o1(model.node_embedding, edge, weight, lr, 0, neg_np,\n py_size=model.layer1_size, py_work=py_work) \n for i in range(1))\n else:\n weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n if weight >= 0.1:\n #job_words += sum(train_o1(model.node_embedding, edge, lr, int(10 * (weight)) * self.negative, model.table,\n # py_size=model.layer1_size, py_work=py_work) \n # for i in range(1))\n job_words += sum(train_o1(model.node_embedding, edge, weight, lr, self.negative, model.table,\n py_size=model.layer1_size, py_work=py_work) \n for i in range(1))\n #for i in range(int(10 * weight)))\n #job_words = sum(train_o1(model.node_embedding, edge, lr, self.negative, model.table,\n # py_size=model.layer1_size, py_work=py_work) for edge in job if edge is not None)\n jobs.task_done()\n lock.acquire(timeout=30)\n try:\n node_count[0] += job_words\n\n elapsed = time.time() - start\n if elapsed >= next_report[0]:\n log.info(\"PROGRESS: at %.2f%% \\tnode_computed %d\\talpha %.05f\\t %.0f nodes/s\" %\n (100.0 * node_count[0] / total_node, node_count[0], lr, node_count[0] / elapsed if elapsed else 0.0))\n next_report[0] = elapsed + 5.0 # don't flood the log, wait at least a second between progress reports\n finally:\n lock.release()\n\n\n workers = [threading.Thread(target=worker_train, name='thread_'+str(i)) for i in range(self.workers)]\n for thread in workers:\n thread.daemon = True # make interrupting the process with ctrl+c easier\n thread.start()\n\n # convert input strings to Vocab objects (eliding OOV/downsampled words), and start filling the jobs queue\n for job_no, job in enumerate(chunkize_serial(prepare_sentences(model, edges), chunksize)):\n jobs.put(job)\n\n for _ in range(self.workers):\n jobs.put(None) # give the workers heads up that they can finish -- no more work!\n\n for thread in workers:\n thread.join()\n\n elapsed = time.time() - start\n log.info(\"training on %i words took %.1fs, %.0f words/s\" %\n (node_count[0], elapsed, node_count[0]/ elapsed if elapsed else 0.0))", "def train(args):\r\n print('Create generators')\r\n generators = train_valid_test_generators(\r\n valid_proportion=args.valid_proportion,\r\n test_proportion=args.test_proportion,\r\n seed=args.seed,\r\n shape=(args.height, args.width),\r\n batch_size=args.batch_size,\r\n shuffle=True\r\n )\r\n print('Create model')\r\n model = create_mobilenetv2(\r\n input_shape=(args.height, args.width, 3),\r\n alpha=args.alpha,\r\n depth_multiplier=args.depth_multiplier,\r\n l2_reg=args.l2_reg,\r\n seed=args.seed\r\n )\r\n\r\n print('Training freezed model')\r\n freeze_model(model, 'global_max_pooling2d_1')\r\n callbacks = callbacks_factory(\r\n callbacks_list=[\r\n 'early_stopping',\r\n 'tensorboard',\r\n ],\r\n model_mask='mobilenetv2_multiclassification_freezed'\r\n )\r\n model = train_pipeline(\r\n model,\r\n generators['hard_train_generator'],\r\n generators['valid_generator'],\r\n callbacks,\r\n optimizer_lr=args.optimizer_lr,\r\n optimizer_decay=args.optimizer_decay,\r\n epochs=args.epochs\r\n )\r\n\r\n print('Training unfreezed model')\r\n unfreeze_model(model)\r\n callbacks = callbacks_factory(\r\n callbacks_list=[\r\n 'best_model_checkpoint',\r\n 'early_stopping',\r\n 'tensorboard',\r\n 'learning_rate_scheduler'\r\n ],\r\n model_mask='mobilenetv2_multiclassification'\r\n )\r\n model = train_pipeline(\r\n model,\r\n generators['easy_train_generator'],\r\n generators['valid_generator'],\r\n callbacks,\r\n optimizer_lr=args.optimizer_lr,\r\n optimizer_decay=args.optimizer_decay,\r\n epochs=3 * args.epochs\r\n )\r\n\r\n print('Save test evaluation')\r\n results = model.evaluate_generator(generators['test_generator'])\r\n pd.DataFrame({\r\n 'MetricsNames': model.metrics_names,\r\n 'Results': results\r\n }).to_csv(os.path.join('../logs/solution_1_test_generator_evaluation.csv'), index=False)", "def train(self, test_vector):\n\t\twith open(self.PATH + '/src/data/train_emma.csv', 'rt') as f:\n\t\t\treader = csv.reader(f)\n\n\t\t\ttrain_data = dict()\n\t\t\ttrain_data_labels = list()\n\t\t\ttrain_data_list = []\n\t\t\ttrain_data_labels_list = []\n\n\t\t\tnext(reader, None)\n\t\t\tfor row in reader:\n\t\t\t\tfor idx in range(len(row)):\n\t\t\t\t\tif idx == 0:\n\t\t\t\t\t\ttrain_data['file'] = row[idx]\n\t\t\t\t\tif idx == 1:\n\t\t\t\t\t\ttrain_data['line'] = int(row[idx])\n\t\t\t\t\tif idx == 2:\n\t\t\t\t\t\ttrain_data['timestamp'] = row[idx]\n\t\t\t\t\tif idx == 3:\n\t\t\t\t\t\ttrain_data_labels.append(row[idx])\n\t\t\t\t\tif idx == 4:\n\t\t\t\t\t\ttrain_data_labels.append(row[idx])\n\n\t\t\t\ttrain_data_list.append(train_data)\n\t\t\t\ttrain_data_labels_list.append(train_data_labels)\n\t\t\t\ttrain_data = dict()\n\t\t\t\ttrain_data_labels = list()\n\n\t\t\tC = 0.8\n\t\t\tdict_vectorizer = DictVectorizer(sparse=False)\n\t\t\ttrain_data_trasformed = dict_vectorizer.fit_transform(train_data_list)\n\t\t\ttest_vector_transformed = dict_vectorizer.transform(test_vector)\n\n\t\t\t# print(dict_vectorizer.get_feature_names())\n\t\t\t# print(dict_vectorizer.inverse_transform(train_data_trasformed))\n\n\t\t\t# print('Inverse transformation !!!')\n\t\t\t# print(test_vector)\n\t\t\t# inv_trans = dict_vectorizer.inverse_transform(test_vector_transformed)\n\n\t\t\t# fit LinearSVC\n\t\t\t# multi label binarizer to convert iterable of iterables into processing format\n\t\t\tmlb = MultiLabelBinarizer()\n\t\t\ty_enc = mlb.fit_transform(train_data_labels_list)\n\n\t\t\ttrain_vector = OneVsRestClassifier(svm.SVC(probability=True))\n\t\t\tclassifier_rbf = train_vector.fit(train_data_trasformed, y_enc)\n\n\t\t\t# test_vecc = cnt_vectorizer.fit_transform(X[:, 0])\n\t\t\t# # todo use pickle to persist\n\t\t\t# test_vector_reshaped = np.array(test_vector.ravel()).reshape((1, -1))\n\t\t\tprediction = classifier_rbf.predict(test_vector_transformed)\n\n\n\t\t\tprint(\"Predicted usernames: \\n\")\n\t\t\t# print(prediction)\n\t\t\t# print(mlb.inverse_transform(prediction))\n\n\t\t\tusers = self.parse_prediction(mlb.inverse_transform(prediction))\n\t\t\tprint(users)\n\t\t\treturn users", "def train():\n if os.path.isfile(load_model):\n all_weights = np.load(load_model) \n else:\n print(\"Model file does not exist. Exiting....\")\n return\n\n print(\"Build up the network\")\n\n\n # Two different types of input\n image_input_var = T.tensor4('original_inputs')\n rotated_image_input_var = T.tensor4('rotated_image_input')\n target_var = T.ivector('targets')\n\n # Build teacher network\n cnn_model, cnn_mid_output, weight_decay_penalty = cifar10_merge.build_cnn(image_input_var)\n\n # Get the intermediate layer of the teacher network\n original_model_mid_output = lasagne.layers.get_output(cnn_mid_output, image_input_var, deterministic = True)\n\n # Get the softmax output of the teacher network.\n\n original_model_output_val = lasagne.layers.get_output(cnn_model, image_input_var, deterministic = True)\n \n # Build the student network\n \n rotated_cnn_model, rotated_model_mid, rotated_weight_penalty = \\\n cifar10_merge.build_cnn(rotated_image_input_var)\n \n # Get the softmax output of the student network. Since it need to be trained on, deterministic = False\n rotated_model_mid_output = lasagne.layers.get_output(rotated_model_mid, rotated_image_input_var, deterministic = False)\n\n # Get the model output of the studenet network.\n rotated_model_output = lasagne.layers.get_output(rotated_cnn_model, rotated_image_input_var, deterministic = True)\n\n # Set the weights for the teacher network\n lasagne.layers.set_all_param_values(cnn_model, all_weights)\n\n # Get the initialized weights below the intermediate layer\n rotated_net_weights_below_mid = lasagne.layers.get_all_param_values(rotated_model_mid)\n\n # Get the parameter of the student network that needs to be trained.\n rotated_net_training_param = lasagne.layers.get_all_params(rotated_model_mid, trainable=True)\n\n # Set the weights for the student network\n lasagne.layers.set_all_param_values(rotated_cnn_model, all_weights)\n\n lasagne.layers.set_all_param_values(rotated_model_mid,\n rotated_net_weights_below_mid)\n \n # cross_entropy_loss = lasagne.objectives.categorical_crossentropy(rotated_model_mid_output, target_var)\n\n # cross_entropy_loss_mean = cross_entropy_loss.mean()\n\n # L = T.mean(lasagne.objectives.squared_error(original_model_mid_output, rotated_model_mid_output), axis = 1)\n L = lasagne.objectives.squared_error(original_model_mid_output, rotated_model_mid_output).mean()\n # cost = T.mean(L)\n\n # cost = cross_entropy_loss_mean\n cost = L\n\n # updates = lasagne.updates.adagrad(cost, rotated_net_training_param, learning_rate=0.1)\n updates = lasagne.updates.adam(cost, rotated_net_training_param, learning_rate=0.001)\n\n # cross_entropy_loss = lasagne.objectives.categorical_crossentropy(model_output, target_var)\n\n # cross_entropy_loss_mean = cross_entropy_loss.mean()\n\n # loss = cross_entropy_loss_mean + weight_decay_penalty\n\n\n train_acc = T.mean(T.eq(T.argmax(rotated_model_output, axis = 1), target_var),\n dtype=theano.config.floatX)\n\n original_model_acc = T.mean(T.eq(T.argmax(original_model_output_val, axis = 1), target_var),\n dtype=theano.config.floatX)\n\n train_fn = theano.function(inputs = [image_input_var, rotated_image_input_var, target_var],\n outputs = [original_model_mid_output, rotated_model_mid_output, train_acc], updates = updates)\n\n # Return the accuracy for teacher network and student network, respectively\n val_fn = theano.function(inputs = [image_input_var, rotated_image_input_var, target_var],\n outputs = [original_model_acc, train_acc])\n\n if os.path.isfile(os.path.join(train_dir, 'latest_model.txt')):\n weight_file = \"\"\n with open(os.path.join(train_dir, 'latest_model.txt'), 'r') as checkpoint_file:\n weight_file = checkpoint_file.read().replace('\\n', '')\n print(\"Loading from: \", weight_file)\n model_weights = np.load(weight_file)\n lasagne.layers.set_all_param_values(rotated_cnn_model, model_weights)\n\n # Get images and labels for CIFAR-10.\n\n cifar10_data = cifar10_merge_input.load_cifar10()\n\n bkgimg = np.array([np.mean(cifar10_data.train.images[cifar10_data.train.labels==i], axis = 0) for i in range(10)])\n for epoch in xrange(max_steps):\n start_time = time.time()\n\n original_test_image, rotated_test_image, test_label = cifar10_data.test.next_eval_batch(batch_size)\n total_t_net_for_original = 0\n total_s_net_for_original = 0\n total_t_net_for_rotation = 0\n total_s_net_for_rotation = 0\n total_count = 0\n\n print(\"Start Evaluating\")\n\n while(rotated_test_image is not None):\n t_net_for_original, s_net_for_original = val_fn(original_test_image, original_test_image, test_label)\n total_t_net_for_original += t_net_for_original * original_test_image.shape[0]\n total_s_net_for_original += s_net_for_original * original_test_image.shape[0]\n\n t_net_for_rotated, s_net_for_rotated = val_fn(rotated_test_image, rotated_test_image, test_label)\n total_t_net_for_rotation += t_net_for_rotated * rotated_test_image.shape[0]\n total_s_net_for_rotation += s_net_for_rotated * rotated_test_image.shape[0]\n\n total_count += rotated_test_image.shape[0]\n original_test_image, rotated_test_image, test_label = cifar10_data.test.next_eval_batch(batch_size)\n \n print(\"Student Network Accuracy on Original Image: %.4f\" % (float(total_s_net_for_original / total_count)))\n print(\"Teacher Network Accuracy on Original Image: %.4f\" % (float(total_t_net_for_original / total_count)))\n\n print(\"Student Network Accuracy on Rotated Image: %.4f\" % (float(total_s_net_for_rotation / total_count)))\n print(\"Teacher Network Accuracy on Rotated Image: %.4f\" % (float(total_t_net_for_rotation / total_count)))\n\n\n print(\"Start Training...\")\n original_train_image, rotated_train_image, train_label, start = cifar10_data.train.next_batch(batch_size)\n original_train_image = generate(original_train_image, train_label, bkgimg, 16, 32, None, batch_size)\n # rotated_train_image = random_rotated_image(original_train_image[::-1])\n rotated_train_image = random_rotated_image(original_train_image)\n\n end_time_1 = time.time() - start_time\n step = 1\n loss_total = 0\n original_start = start\n\n while(start != 0):\n #loss_value, train_acc = train_fn(original_train_image, rotated_train_image, train_label)\n \n ori_mid, rot_mid, train_acc = train_fn(original_train_image, rotated_train_image, train_label)\n # ori_mid, rot_mid, train_acc = train_fn(original_train_image, np.array(np.random.rand(batch_size, 3, 32, 32), dtype = np.float32), train_label)\n step += 1\n if start == original_start:\n print(ori_mid[0])\n print(rot_mid[0])\n print(train_label)\n \n original_train_image, rotated_train_image, train_label, start = cifar10_data.train.next_batch(batch_size)\n original_train_image = generate(original_train_image, train_label, bkgimg, 16, 32, None, batch_size)\n rotated_train_image = random_rotated_image(original_train_image)\n # assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n # loss_total += loss_value\n if 1:\n if epoch % 100 == 0 or (step + 1) == max_steps:\n checkpoint_path = os.path.join(train_dir, 'model_step%d.npy' % epoch)\n weightsOfParams = lasagne.layers.get_all_param_values(rotated_cnn_model)\n np.save(checkpoint_path, weightsOfParams)\n latest_model_path = os.path.join(train_dir, 'latest_model.txt')\n try:\n os.remove(latest_model_path)\n except OSError:\n pass\n latest_model_file = open(latest_model_path, \"w\")\n latest_model_file.write(checkpoint_path)\n latest_model_file.close()\n\n # print(\"Epoch Stop, loss_averge\", float(loss_total) / float(step))\n duration = time.time() - start_time\n print(\"Duration is\", duration)", "def train(self, training_steps=10):", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def __init__(self,nodes=[],model=KFModel()):\n self.nodes = nodes\n self.model = model\n self.status = 'none'\n return", "def train_model(schema,fieldsToRead = None):\n\tif not fieldsToRead:\n\t\tfieldsToRead = schema[\"fields\"].keys()\n\n\tif(\"vector_size\" in schema):\n\t\tvectorSize = schema[\"vector_size\"]\n\telse:\n\t\tvectorSize = DEFAULT_VECTOR_SIZE\n\n\tsentences = []\n\t# build sentences:\n\tprint \"Building Feature vectors...\"\n\n\tread_sentences(schema, lambda x : sentences.append(merge_sentences_to_single_sentence(x, fieldsToRead)))\n\tprint \"Read \" + str(len(sentences)) + \" documents\"\n\tprint \"Training Model...\"\n\tmodelPath = model_path(schema)\n\tweightMatrixPath = weight_matrix_path(schema)\n\tsentences = transpose_sentences(sentences)\n\tmodel = Word2Vec(sentences, size=vectorSize, window=5, min_count=1, workers=4)\n\tmodel.save(modelPath)\n\tmodel.save_word2vec_format(weightMatrixPath)\n\tprint \"Finished training\"\n\treturn model", "def train(self)->None:", "def train(self, **kwargs):\n self.solver.train(**kwargs)", "def train_mobilenetv2():\n\n # load data\n training_sets = load_augmented_dataset()\n\n # build models\n model_mobile = build_mobilenetv2()\n\n # store base weights\n baseWeights_t = model_mobile.get_weights()\n\n # NOTE: You can still leave this alone if you've only downloaded the fully augmented set.\n for training_set in training_sets:\n print(\" Starting training for set {}\".format(str(training_set)))\n model_mobile.set_weights(baseWeights_t) # Resets model\n train_x = np.load(os.path.join(\"./model_cache/train_data\", training_sets[training_set][0]))\n train_y = np.load(os.path.join(\"./model_cache/train_data\", training_sets[training_set][1]))\n\n early_stopping_monitor = EarlyStopping(patience=2)\n history = model_mobile.fit(train_x, train_y, batch_size=32, epochs=20, verbose=1, validation_split=0.2,\n shuffle=True,\n callbacks=[early_stopping_monitor])\n\n mpu.plot_accuracy_loss(history,\n \"./model_cache/train_data/{}_mobilenetv2_plots.png\".format(str(training_set)))\n\n upload_blob(BUCKET_NAME, \"./model_cache/train_data/{}_mobilenetv2_plots.png\".format(str(training_set)),\n \"model_charts/{}_mobilenetv2_plots.png\".format(str(training_set)))\n\n model_mobile.save(\"./model_cache/train_data/{}_mobilenetv2.h5\".format(str(training_set)))\n\n upload_blob(BUCKET_NAME, \"./model_cache/train_data/{}_mobilenetv2.h5\".format(str(training_set)),\n \"saved_models/{}_mobilenetv2.h5\".format(str(training_set)))", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def train_SVM(data: np.array, labels: np.array)->None:\n print(\"SVM is not implemented yet!\")", "def training(train_data, dev_data, param):\n text_to_vec = TextToVec(**param)\n\n # Fit with both train and dev data\n text_to_vec.fit(train_data['data'] + dev_data['data'])\n word_vec_map = text_to_vec.vectorizer.get_feature_names()\n train_vec = text_to_vec.transform(train_data['data'])\n dev_vec = text_to_vec.transform(dev_data['data'])\n logger.info(f\"train vec size:{train_vec.shape}, dev vec size:{dev_vec.shape}\")\n\n # # apply weights on tfidf based on whether the word appear in multiple classes\n # tt_occ = Counter(train_data['encoded_label'])\n # weight_list = []\n # for i in range(train_vec.shape[1]): # For every feature\n # occ = Counter(train_data['encoded_label'][train_vec[:, i] > 0.0])\n # for key, value in occ.items():\n # occ[key] = value/tt_occ[key]\n # weight_list.append(np.std(list(occ.values()))/0.35)\n # weight = np.array(weight_list).reshape(1, -1)\n # weight = weight/np.max(weight)\n # train_vec = np.multiply(train_vec, weight)\n\n # Perform oversampling on training data\n if param['balanced'] not in ['Bootstrap', 'Handsample']:\n logger.info(f\"class info before resampling: {sorted(Counter(train_data['encoded_label']).items())}\")\n train_vec, train_data['encoded_label'] = resample(X_train=train_vec, y_train=train_data['encoded_label'], balance=param['balanced'])\n logger.info(f\"class info after resampling:{sorted(Counter(train_data['encoded_label']).items())}\")\n\n # Fit model\n if param['classifier'] == 'MultinomialNB':\n clf = MultinomialNB()\n elif param['classifier'] == 'LDA':\n clf = LinearDiscriminantAnalysis()\n else:\n clf = svm.LinearSVC()\n\n if param['multiclass'] == 'OnevsOne':\n model = OneVsOneClassifier(clf)\n else:\n model = OneVsRestClassifier(clf)\n\n if param['classifier'] == 'LinearSVM' or param['multiclass'] == 'OnevsOne':\n logger.info(f'Fitting model: {param}')\n model = model.fit(train_vec, train_data['encoded_label'])\n train_prediction = model.predict(train_vec)\n dev_prediction = model.predict(dev_vec)\n else:\n logger.info(f'Fitting model: {param}')\n model = model.fit(train_vec, train_data['binary_label'])\n train_prediction = np.argmax(model.predict(train_vec), axis=1)\n dev_prediction = np.argmax(model.predict(dev_vec), axis=1)\n\n\n return train_prediction, dev_prediction, train_vec.shape, dev_vec.shape, model, word_vec_map", "def train_with_control_vec_pretrianing(builder, train_ds, eval_ds):\n num_train_examples = builder.info.splits['train'].num_examples\n \n task = {'name': 'extr', 'excluded_label': None}\n num_classes = builder.info.features[task['name']].num_classes \n task['num_classes'] = num_classes\n\n model = models_lib.Model(num_classes=num_classes)\n\n main_task = {'name': 'label', 'excluded_label': 3}\n num_classes = builder.info.features[main_task['name']].num_classes - 1\n main_task['num_classes'] = num_classes \n\n lr_scheduler = tf.keras.experimental.CosineDecayRestarts(\n initial_learning_rate=0.0001, \n first_decay_steps=10*(num_train_examples//FLAGS.pretrain_bs),\n t_mul=2.0,\n m_mul=0.9,\n alpha=0.1)\n optimizer = tf.keras.optimizers.Adam(learning_rate=lr_scheduler)\n\n if FLAGS.ckpt: \n model, optimizer, ckpt, ckpt_manager = load_model(FLAGS.ckpt, model, optimizer)\n else: \n if FLAGS.save_model: \n ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)\n ckpt_manager = tf.train.CheckpointManager(\n checkpoint=ckpt, \n directory=FLAGS.model_dir+'/pretrain', \n max_to_keep=3 \n )\n else: \n ckpt=None\n ckpt_manager=None\n\n print('==========CONTROL VECTOR PRETRAIN==========')\n for epoch in range(FLAGS.pretrain_epochs):\n print('==========EPOCH: %s==========' % epoch)\n control_vec_pretrain(\n pretrain_ds=train_ds,\n model=model,\n optimizer=optimizer,\n task=task,\n epochs=1,\n lineareval_epochs=0,\n lineareval_task=None,\n eval_ds=None,\n ckpt_manager=ckpt_manager\n )\n\n head = model.sh\n model.sh = models_lib.SupervisedHead(main_task['num_classes'])\n linear_eval(train_ds, model, main_task, FLAGS.lineareval_epochs, eval_ds=eval_ds)\n\n model.sh = head\n\n model.sh = models_lib.SupervisedHead(main_task['num_classes'])\n linear_eval(train_ds, model, main_task, 30, eval_ds=eval_ds)\n evaluate(eval_ds, model, main_task)", "def __init__(self, word2vec_model):\n self._model = word2vec_model", "def train(self, X, y):\n pass", "def __init__(self):\n self.train(positivity_files, 0)\n self.train(subjectivity_files, 1)", "def train(self, uInputInfo):\n if self.node_sharing:\n ## train just one node,\n self.pipes[0][0].send((\"train\", uInputInfo))\n self.pipes[0][0].recv()\n \n else:\n ## start each node's training\n for i in range(len(self.nodes)):\n for j in range(len(self.nodes[i])):\n self.pipes[i][j].send((\"train\", uInputInfo))\n\n ## wait for the training to be finished\n for i in range(len(self.nodes)):\n for j in range(len(self.nodes[i])):\n self.pipes[i][j].recv()", "def train(self, data):\n pass", "def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=500,num_hidden_units_2=300,num_hidden_units_3=200,num_code_units=50):\n \n print(\"Making the model...\")\n network = model((None,200),200,num_hidden_units,num_hidden_units_2,num_hidden_units_3,num_code_units)\n print(\"Done!\")\n\n\n for tetrode_number in [10]:\n\n print(\"Loading the model parameters from {}\".format(MODEL_FILENAME+str(tetrode_number)))\n f = open(MODEL_FILENAME+str(tetrode_number),'r')\n all_param_values = pickle.load(f)\n f.close()\n # print(all_param_values)\n lasagne.layers.set_all_param_values(network, all_param_values)\n\n print(\"Loading the data...\")\n dataset = load_data(tetrode_number)\n print(\"Done!\")\n\n print(dataset['data'].shape)\n\n print(\"Setting up the training functions...\")\n training = funcs(dataset,network)\n print(\"Done!\")\n\n for i in range(NUM_EPOCHS):\n costs = []\n\n for start, end in zip(range(0, dataset['data'].shape[0], BATCH_SIZE), range(BATCH_SIZE, dataset['data'].shape[0], BATCH_SIZE)):\n cost = training['train'](dataset['data'][start:end],dataset['data'][start:end])\n costs.append(cost)\n\n meanTrainCost = np.mean(np.asarray(costs,dtype=np.float32))\n # accuracy = training['accuracy'](dataset['X_test'],dataset['y_test'])\n\n print(\"Epoch: {}, Training cost: {}\".format(i+1,meanTrainCost))\n # NUM_POINTS = 5000\n codes = training['code'](dataset['data'][0:NUM_POINTS])\n\n \n\n # y = set(list(d.predict(dataset['data'][0:NUM_POINTS])))\n\n # print(y)\n\n # activations_1 = training['activations_1'](dataset['data'][0:NUM_POINTS])\n # activations_2 = training['activations_2'](dataset['data'][0:NUM_POINTS])\n # codes = training['code'](dataset['data'][0:NUM_POINTS])\n # # print(codes.shape)\n # # codes_2d = bh_sne(codes)\n\n # for k in range(3):\n # print(k)\n\n # codes_2d = bh_sne(np.asarray(codes[:(k+1)*12000],dtype=np.float64))\n\n # # d = DPGMM(n_components=10, covariance_type='full')\n # d = DPGMM(n_components=15,n_iter=100)\n\n # d.fit(codes_2d[:(k+1)*12000])\n\n # hdp = d.predict_proba(codes_2d[:(k+1)*12000])\n\n # hdp_1d = [np.argmax(z) for z in hdp]\n\n # print(set(list(hdp_1d)))\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/hdp_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # m = TSNE(n_components=2, random_state=0)\n \n # # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # # activations_1_2d = bh_sne(activations_1)\n # # activations_2_2d = bh_sne(activations_2)\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS][:(k+1)*12000],alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # This is where the code for the video will go\n # ##############################################################################\n # # Compute DBSCAN\n # db = None\n # core_samples_mask = None\n # labels = None\n\n # num_labels = 0\n # eps=1.0\n # while(num_labels < 10):\n # db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n # core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n # core_samples_mask[db.core_sample_indices_] = True\n # labels = db.labels_\n # num_labels = np.amax(labels)\n # eps -= 0.1\n\n # print(\"Num learned labels: {}\".format(num_labels))\n\n # plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS][:(k+1)*12000],lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # # pickle.dump(labels, f)\n # # f.close()\n\n codes_2d = bh_sne(np.asarray(codes,dtype=np.float64),theta=0.4)\n\n # d = DPGMM(n_components=10, covariance_type='full')\n d = DPGMM(n_components=15,n_iter=1000)\n\n d.fit(codes_2d)\n\n hdp = d.predict_proba(codes_2d)\n\n hdp_1d = [np.argmax(z) for z in hdp]\n\n print(set(list(hdp_1d)))\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/hdp_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # m = TSNE(n_components=2, random_state=0)\n \n # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # activations_1_2d = bh_sne(activations_1)\n # activations_2_2d = bh_sne(activations_2)\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS],alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # This is where the code for the video will go\n ##############################################################################\n # Compute DBSCAN\n db = None\n core_samples_mask = None\n labels = None\n\n num_labels = 0\n eps=1.0\n while(num_labels < 10):\n db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n num_labels = np.amax(labels)\n eps -= 0.1\n\n print(\"Num learned labels: {}\".format(num_labels))\n\n plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS],lw=0)\n plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # pickle.dump(labels, f)\n # f.close()", "def train_all(X_train_fuse, Y_train, X_dev_fuse, Y_dev, R_train, R_dev, hyperparams):", "def run(args):\n ####################\n # Data Processing #\n ####################\n\n words, counts = load_input(args.train_path)\n trie = marisa_trie.Trie(counts.keys())\n indices = load_indices('word', words, counts)\n args.nb_classes = len(indices.keys())\n print(len(indices.keys()))\n timeseries = make_embedding(args.train_path, words, indices)\n\n ###################\n # Construct model #\n ###################\n\n if os.path.exists(name+'.h5'):\n model = keras_load_model(name+'.h5')#', custom_objects={'perplexity': perplexity})\n model.summary()\n else:\n model = densenet.DenseNet(args.nb_classes,\n args.img_dim,\n args.depth,\n args.nb_dense_block,\n args.growth_rate,\n args.nb_filter,\n dropout_rate=args.dropout_rate,\n weight_decay=args.weight_decay)\n # Model output\n model.summary()\n\n # Build optimizer\n opt = Nadam(lr=args.learning_rate)\n\n model.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['mae', 'accuracy'])\n\n ####################\n # Network training #\n ####################\n\n train(model, timeseries, indices, words, args)\n\n #words, counts = load_input(args.test_path)\n #timeseries = make_embedding(args.test_path, words, indices)\n #trie = load_trie(counts)\n #charinds = load_indices('char')\n #char_to_bpc(model, timeseries, indices, charinds, words, args, trie) #cel\n #word_to_perplexity(model, timeseries, indices, words, args) #nll", "def main(args):\n input_path = args.input\n output_path = args.output\n model_type = args.model_type\n dimensions = args.dimensions\n walk_length = args.walk_length\n num_walks = args.num_walks\n window_size = args.window_size\n itr = args.iter\n workers = args.workers\n p = args.p\n q = args.q\n is_weighted = args.weighted\n is_directed = args.directed\n\n graph = node2vec.Node2Vec(dimensions=dimensions, walk_length=walk_length, num_walks=num_walks,\n window_size=window_size, itr=itr, workers=workers, p=p, q=q,\n is_weighted=is_weighted, is_directed=is_directed)\n graph.train(inputs=input_path, model_type=model_type)\n graph.save(output_path=output_path)", "def nonlearning():\n\taT.featureAndTrain(['../../AudioData/chunked_data_sorted/pos', '../../AudioData/chunked_data_sorted/neg'], \n\t\t\t\t\t\t1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, \n \"svm\", \"emotion_classifier\", True)", "def train_network(self):\n if self.trainData:\n if self.verbose:\n print('Started training...')\n\n for epoch in range(135):\n pass\n # save the model\n else:\n if self.verbose:\n print('No train data available')", "def train(self, trainingData, trainingLabels, validationData, validationLabels):\n from sklearn import svm\n \n \"*** YOUR CODE HERE ***\"\n self.sklearn_svm = svm.SVC(C=5, kernel='rbf', gamma=0.005, decision_function_shape='ovo')\n self.sklearn_svm.fit(trainingData, trainingLabels)", "async def train(self):", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def train(self, examples):\n print(examples)\n # first we will do gensim to get word embeddings\n tokens = []\n for example in examples:\n for tuple in example:\n tokens.append([tuple[0]])\n self.model = Word2Vec(tokens, min_count=1, size=100).wv\n # shuffle the examples so that they are gone through 'randomly'\n #print(examples)\n random.shuffle(examples)\n #print(examples)\n # iterate through our examples\n for j in range(len(examples)):\n # the stored label for the previous token\n prev_label = None\n prev_word = None\n # iterate through our tokens for the example\n for i in range(len(examples[j])):\n # store our token and its label\n token = examples[j][i][0]\n y = examples[j][i][1]\n # get the features for our current token\n next_word = None\n if i <= (len(examples)-1):\n next_word = examples[j][i+1][0]\n features = self.featurize(prev_label, prev_word, token, next_word)\n # set our previous label to our current since\n # we are done featurizing and need to store it for\n # the next iteration\n prev_label = y\n # a dictionary that will store our z values\n z = {}\n # calculate our z value for every state for\n # the example we are on\n # z(state) = features * weights\n # z[state] = np.dot(features, weights[state])\n for state in self.states:\n z[state] = np.dot(features, self.weights[state])\n # store our max\n max = -1\n # store our y_hat\n y_hat = None\n # store our probabilities\n prob = {}\n # this runs softmax on our z's\n # y_hat = softmax(z)\n denom = sum(np.exp(np.array(list(z.values()))))\n for state in self.states:\n # softmax = p(state) = e^z[state] / (sum[e^z for all z's)\n # making sure this works the way I want it to, should\n # be three values\n #print(np.array(list(z.values())))\n #print(np.exp(np.array(list(z.values()))))\n prob[state] = np.exp(z[state]) / denom\n # if our current prob is greater than the others then it is our boy\n if prob[state] > max:\n # save the new prob as the max\n max = prob[state]\n # save the state as our prediction y_hat\n y_hat = state\n # this will hold our gradients for all the states\n gradients = {}\n for state in self.states:\n # gradient[state] = ((y_hat == state) - prob[state]) * features\n gradients[state] = ((y_hat == state) - prob[state]) * features\n # weights[state] -= loss * gradients\n self.weights[state] -= self.loss * gradients[state]", "def train_reg_struct(self, sess, pre, model, edges, G, cluster_negtivate=False, nodeid2cluster={}, iter=1, chunksize=150):\n assert model.node_embedding.dtype == np.float32\n\n log.info(\"O1 training model with %i workers on %i vocabulary and %i features and 'negative sampling'=%s\" %\n (self.workers, len(model.vocab), model.layer1_size, self.negative))\n\n if not model.vocab:\n raise RuntimeError(\"you must first build vocabulary before training the model\")\n\n edges = RepeatCorpusNTimes(edges, iter)\n total_node = edges.corpus.shape[0] * edges.corpus.shape[1] * edges.n\n log.debug('total edges: %d' % total_node)\n start, next_report, node_count = time.time(), [5.0], [0]\n\n #mean field\n #print(\"model.node_embedding = \", model.node_embedding)\n #print(\"model.w2.T = \", model.w2.T)\n node_emb_tmp = np.zeros((model.vocab_size, model.layer1_size), dtype=np.float32)\n loop = 100\n log.info(\"i = 0, model.node_embedding = {}\".format(model.node_embedding[1]))\n for i in range(loop):\n for node in G.nodes():\n #node_emb = tf.nn.embedding_lookup(self.node_embeddings, model.vocab[node])\n #tmp = np.zeros(model.layer1_size, dtype=np.float32)\n #for nnodeid in G[node]:\n # nodeidx = model.vocab[nnodeid].index\n # tmp = tmp + model.node_embedding[nodeidx]\n tmp = np.sum([G[node][nodeid]['weight'] * \n model.node_embedding[model.vocab[nodeid].index] \n for nodeid in G[node]], axis=0)\n #print(\"near nodeidx = \", nodeidx, \", emb = \", model.node_embedding[nodeidx])\n #print(\"np.matmul(tmp, model.w2.T) = \", np.matmul(tmp, model.w2.T))\n #model.node_embedding[model.vocab[node].index] = np.maximum(0, np.matmul(tmp, model.w2.T))\n #print(\"ori nodeidx = \", model.vocab[node].index, \", emb = \", model.node_embedding[model.vocab[node].index])\n #node_emb_tmp[model.vocab[node].index] = 1 / (1 + np.exp(-np.matmul(tmp, model.w2.T)))\n node_emb_tmp[model.vocab[node].index] = 10 * np.tanh(np.matmul(tmp, model.w2.T))\n #node_emb_tmp[model.vocab[node].index] = np.exp(np.matmul(tmp, model.w2.T))\n #node_emb_tmp[model.vocab[node].index] = np.matmul(tmp, model.w2.T)\n #e_x = np.exp(np.matmul(tmp, model.w2.T) - np.max(np.matmul(tmp, model.w2.T)))\n #node_emb_tmp[model.vocab[node].index] = e_x / e_x.sum()\n #print(\"node_emb_tmp = \", node_emb_tmp)\n #print(\"model.node_embedding = \", model.node_embedding)\n model.node_embedding = self.normal(node_emb_tmp.copy())\n if i == loop - 1 or i == loop - 2:\n log.info(\"i = {}, node_emb_tmp = {}\".format(i, node_emb_tmp[1]))\n # print(\"i = \", i,\", model.node_embedding = \", model.node_embedding)\n\n #int(sum(v.count * v.sample_probability for v in self.vocab.values()))\n jobs = Queue(maxsize=2*self.workers) # buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(\n train_jobs = Queue(maxsize=2*self.workers)\n lock = threading.Lock()\n \n\n def worker_train():\n \"\"\"Train the model, lifting lists of paths from the jobs queue.\"\"\"\n #py_work = np.zeros(model.layer1_size, dtype=np.float32)\n\n while True:\n job = jobs.get(block=True)\n if job is None: # data finished, exit\n jobs.task_done()\n # print('thread %s break' % threading.current_thread().name)\n break\n \n #log.info(\"thread start!\")\n #lr = max(self.min_lr, self.lr * (1 - 1.0 * node_count[0]/total_node))\n lr = self.lr \n job_words = 0\n #pre=self.build_model(len(model.vocab), model.layer1_size, lamda = 0.0, learning_rate=lr)\n x1 = []\n x2 = []\n y = []\n #cur = 0\n for edge in job:\n if edge is not None:\n #cur+=1\n #if cur % 100 == 0:\n # log.info(\"edge[0].index = {}\".format(edge[0].index))\n #x.append([edge[0].index, edge[1].index)\n edge_0_emb = np.sum([G[model.vocab_t[edge[0].index]][nodeid]['weight'] *\n model.node_embedding[model.vocab[nodeid].index] \n for nodeid in G[model.vocab_t[edge[0].index]]], axis=0)\n #edge_0_emb = model.node_embedding[edge[0].index] \n edge_1_emb = np.sum([G[model.vocab_t[edge[1].index]][nodeid]['weight'] *\n model.node_embedding[model.vocab[nodeid].index] \n for nodeid in G[model.vocab_t[edge[1].index]]], axis=0)\n #edge_1_emb = model.node_embedding[edge[1].index]\n x1.append(edge_0_emb)\n #print(\"edge[0].index = \", edge[0].index)\n #print(\"0 nebor g = \", [nodeid for nodeid in G[model.vocab_t[edge[0].index]]])\n #print(\"edge[1].index = \", edge[1].index)\n #print(\"1 nebor g = \", [nodeid for nodeid in G[model.vocab_t[edge[1].index]]])\n #print(\"model.vocab_t = \", model.vocab_t)\n x2.append(edge_1_emb)\n \n #print(\"1 nebor g = \", G[model.vocab_t[edge[1].index]])\n weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n #y.append(weight)\n y.append(1.0)\n #for i in range(int(10 * (weight)) * self.negative):\n for i in range(self.negative):\n nodeidx = model.table[np.random.randint(model.table_size - 1)]\n if nodeidx != edge[0].index and \\\n (model.vocab_t[nodeidx] not in model.connected_path[model.vocab_t[edge[0].index]]\n or (model.connected_path[model.vocab_t[edge[0].index]][model.vocab_t[nodeidx]][0] < 0.1)):\n x1.append(edge_0_emb)\n x2.append(np.sum([G[model.vocab_t[nodeidx]][nodeid]['weight'] *\n model.node_embedding[model.vocab[nodeid].index] \n for nodeid in G[model.vocab_t[nodeidx]]], axis=0)\n )\n #x2.append(model.node_embedding[nodeidx]) \n y.append(0.0)\n else:\n i -= 1\n if nodeidx != edge[1].index and \\\n (model.vocab_t[nodeidx] not in model.connected_path[model.vocab_t[edge[1].index]]\n or (model.connected_path[model.vocab_t[edge[1].index]][model.vocab_t[nodeidx]][0] < 0.1)):\n x1.append(edge_1_emb)\n x2.append(np.sum([G[model.vocab_t[nodeidx]][nodeid]['weight'] *\n model.node_embedding[model.vocab[nodeid].index] \n for nodeid in G[model.vocab_t[nodeidx]]], axis=0)\n )\n #x2.append(model.node_embedding[nodeidx]) \n y.append(0.0)\n else:\n i-=1\n #log.info(\"edge end!\")\n #print(\"model.node_embedding = \", model.node_embedding)\n \n #for i in range(1):\n # feed_dict = {\n # pre.x1: x1,\n # pre.x2: x2,\n # pre.y: y,\n # pre.w2_init: model.w2\n # }\n #saver = tf.train.Saver()\n #print(\"model.w2 = \", model.w2)\n #_, loss, node_embeddings, w2 = sess.run([pre.d_updates, pre.reg_loss, pre.node_embeddings, pre.w2],\n #_, loss, w2, mut, mut_ori = sess.run([pre.d_updates, pre.reg_loss, pre.w2, pre.mut, pre.mut_ori],\n # feed_dict=feed_dict)\n #log.info(\"iter = {}, loss = {}\".format(i, loss))\n #if i == loop - 1:\n # print(\"y = \", y)\n # print(\"mut = \", mut)\n # print(\"w2.T = \", w2.T)\n #print(\"mut_ori = \", mut_ori)\n #loop = 10\n #for i in range(loop):\n # for node in G.nodes():\n # #node_emb = tf.nn.embedding_lookup(self.node_embeddings, model.vocab[node])\n # tmp = np.zeros(model.layer1_size, dtype=np.float32)\n # for nnodeid in G[node]:\n # nodeidx = model.vocab[nnodeid].index\n # #print(\"nodeidx = \", nodeidx)\n # tmp = tmp + model.node_embedding[nodeidx]\n # #model.node_embedding[model.vocab[node].index] = np.maximum(0, np.matmul(tmp, w2.T))\n # #model.node_embedding[model.vocab[node].index] = np.exp(np.matmul(tmp, w2.T))\n # model.node_embedding[model.vocab[node].index] = np.matmul(tmp, w2.T)\n # #e_x = np.exp(np.matmul(tmp, w2.T) - np.max(np.matmul(tmp, w2.T)))\n # #model.node_embedding[model.vocab[node].index] = e_x / e_x.sum()\n # #model.node_embedding = node_embeddings\n #print(\"model.node_embedding_next = \", model.node_embedding)\n #model.w2 = w2\n #print(\"model.w2_next = \", model.w2)\n #x = []\n #y = []\n #x.append([edge[1].index, edge[0].index])\n #weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n #y.append(weight)\n #y.append(1.0)\n #for i in range(self.negative):\n # nodeidx = model.table[np.random.randint(model.table_size)]\n # if edge[1].index != nodeidx:\n # x.append([edge[1].index, nodeidx])\n # y.append(0.0)\n #feed_dict = {\n # pre.x: x,\n # pre.y: y,\n # pre.node_embeddings_init: model.node_embedding\n #}\n \n #saver = tf.train.Saver()\n #_, loss, node_embeddings = sess.run([pre.d_updates, pre.reg_loss, pre.node_embeddings_n1],\n # feed_dict=feed_dict)\n\n #model.node_embedding = node_embeddings\n #model.node_embedding[edge[1].index] = node_embeddings[edge[1].index]\n #log.info(\"thread end!\")\n job_words += len(y)\n \n #log.info(\"train_loss: {}, node_embeddings = {}\".format(loss, model.node_embedding))\n \n #saver.restore(sess, INNER_MODEL_FILE)\n #job_words = sum(train_o1(model.node_embedding, edge, lr, self.negative, model.table,\n # py_size=model.layer1_size, py_work=py_work) for edge in job if edge is not None)\n #job_words = len(x)\n #log.info(\"train_jobs put!\")\n #log.info(\"train_jobs_full = {}\".format(train_jobs.full()))\n train_jobs.put([x1, x2, y])\n #train_jobs.put([x1, x2, y], block=False)\n #log.info(\"train_jobs put end!\")\n jobs.task_done()\n lock.acquire(timeout=30)\n try:\n node_count[0] += job_words\n\n elapsed = time.time() - start\n if elapsed >= next_report[0]:\n log.info(\"PROGRESS: at %.2f%% \\tnode_computed %d\\talpha %.05f\\t %.0f nodes/s\" %\n (100.0 * node_count[0] / total_node, node_count[0], lr, node_count[0] / elapsed if elapsed else 0.0))\n next_report[0] = elapsed + 5.0 # don't flood the log, wait at least a second between progress reports\n #log.info(\"jobs.qsize() = {}, train_jobs.qsize() = {}\".format(jobs.qsize(), train_jobs.qsize()))\n #train_jobs.put([x1, x2, y])\n finally:\n lock.release()\n \n def worker_train_tf():\n while True:\n #log.info(\" start1, train_jobs = {}\".format(train_jobs.qsize()))\n job = train_jobs.get(block=True)\n if job is None: # data finished, exit\n train_jobs.task_done()\n # print('thread %s break' % threading.current_thread().name)\n break\n #log.info(\" start2, train_jobs = {}\".format(train_jobs.qsize()))\n x1, x2, y = job\n for i in range(1):\n feed_dict = {\n pre.x1: x1,\n pre.x2: x2,\n pre.y: y,\n pre.w2_init: model.w2\n } \n #_, loss, w2, mut, mut_ori = sess.run([pre.d_updates, pre.reg_loss, pre.w2, pre.mut, pre.mut_ori],\n #_, loss, w2, lr = sess.run([pre.d_updates, pre.reg_loss, pre.w2, pre.learning_rate],\n _, loss, w2 = sess.run([pre.d_updates, pre.reg_loss, pre.w2],\n feed_dict=feed_dict)\n log.info(\"iter = {}, loss = {}\".format(i, loss))\n model.w2 = w2\n train_jobs.task_done()\n #log.info(\"train_jobs.qsize() = {}\".format(train_jobs.qsize()))\n\n\n workers = [threading.Thread(target=worker_train, name='thread_'+str(i)) for i in range(self.workers)]\n for thread in workers:\n thread.daemon = True # make interrupting the process with ctrl+c easier\n thread.start()\n #log.info(\"thread = {} start!\".format(thread))\n \n train_worker = [threading.Thread(target=worker_train_tf, name='train_thread_'+str(i)) for i in range(1)]\n for thread in train_worker:\n thread.daemon = True # make interrupting the process with ctrl+c easier\n thread.start()\n #log.info(\"train thread = {} start!\".format(thread))\n\n # convert input strings to Vocab objects (eliding OOV/downsampled words), and start filling the jobs queue\n for job_no, job in enumerate(chunkize_serial(prepare_sentences(model, edges), chunksize)):\n jobs.put(job)\n #log.info(\"jobs.qsize() = {}\".format(jobs.qsize()))\n\n for _ in range(self.workers):\n jobs.put(None) # give the workers heads up that they can finish -- no more work!\n \n for _ in range(1):\n train_jobs.put(None) # give the workers heads up that they can finish -- no more work!\n\n for thread in workers:\n thread.join()\n \n for thread in train_worker:\n thread.join()\n \n elapsed = time.time() - start\n log.info(\"training on %i words took %.1fs, %.0f words/s\" %\n (node_count[0], elapsed, node_count[0]/ elapsed if elapsed else 0.0))", "def to_train(self):\n for _m in self.modules.values():\n _m.train()", "def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()", "def run_model_three():\n\n pos_map = pickle.load(open('pos_map.p', \"rb\"))\n vocab = build_vocab(training_directory)\n word2vec = get_w2vec(wordtovec)\n\n train_out_list = prep_data_lexsub(training_directory, \"mod3_train_prepped.txt\", 3, pos_map, vocab, word2vec)\n test_out_list = prep_data_lexsub(testing_directory, \"mod3_test_prepped.txt\", 3, pos_map, vocab, word2vec)\n\n posset = get_posset()\n vocab.add(\"<s>\")\n vocab.add(\"<UNK>\")\n convert_to_svm('mod3_train_prepped.txt', \"mod3_train.svm\", posset, vocab)\n convert_to_svm('mod3_test_prepped.txt', \"mod3_test.svm\", posset, vocab)\n\n p_labels, p_acc, p_vals = train_test_model(\"mod3_train.svm\", \"mod3_test.svm\")\n with open(\"modelscores.txt\", \"a\") as text_file:\n text_file.write(str(p_acc[0]))\n text_file.write(\"\\n\")\n pickle.dump(p_labels, open('mod3_p_labels.p', 'wb'))", "def train(args):\n \n\n train_generator, validation_generator, num_training, num_validation, num_classes = generate(args)\n print(\"{} classes found\".format(num_classes))\n\n model = MobileNetV2((args.input_size, args.input_size, 3), num_classes, args.plot_model)\n\n opt = tf.keras.optimizers.Adam()\n earlystop = tf.keras.callbacks.EarlyStopping(monitor='val_acc', patience=30, verbose=1, mode='auto')\n model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['acc'])\n\n hist = model.fit_generator(\n train_generator,\n validation_data=validation_generator,\n steps_per_epoch=num_training // args.batch_size,\n validation_steps=num_validation // args.batch_size,\n epochs=args.epochs,\n callbacks=[earlystop])\n\n if not os.path.exists('model'):\n os.makedirs('model')\n\n df = pd.DataFrame.from_dict(hist.history)\n df.to_csv('model/hist.csv', encoding='utf-8', index=False)\n if not os.path.exists('model/output'):\n os.makedirs('model/output')\n model.save('model/output')", "def __init__(self, tokens):\n self.mdl = self.train(tokens)", "def __init__(self, tokens):\n self.mdl = self.train(tokens)", "def _train_model(self):\n raise NotImplementedError()", "def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()", "def train_model(self, length, mod_name):\n X = self.X[:length]\n y = self.y[:length]\n self.X_train, self.X_test, self.y_train, self.y_test = \\\n train_test_split(X, y, test_size=0.33, random_state=42)\n self.build_pipeline(self.X_train, self.y_train, mod_name)", "def build_model_mobilenet(num_classes):", "def train_init():\n np.random.seed(seed)\n tf.random.set_random_seed(seed)\n random.seed(seed)\n\n name = str(seed)\n desc = MNMDescriptor(5, inp_dict, outp_dict, name=name)\n desc = recursive_creator(desc, 0, 0, seed)\n hypers = {}\n for hyper in hyps:\n hypers[hyper] = np.random.choice(hyps[hyper])\n\n model = MNM(desc, hypers[\"btch_sz\"], data_inputs[\"Train\"], data_outputs[\"Train\"], loss_func_weights={\"o0\": hypers[\"wo0\"], \"o1\": hypers[\"wo1\"], \"o2\": hypers[\"wo2\"]}, name=name, lr=hypers[\"lr\"], opt=hypers[\"opt\"], random_seed=seed)\n if intelligent_training == 2:\n loss_weights = model.sequential_training(hypers[\"btch_sz\"], iter_lim // 50, conv_param, proportion, iter_lim, display_step=-1)\n else:\n loss_weights = model.autoset_training(hypers[\"btch_sz\"], iter_lim//50, conv_param, proportion, iter_lim, display_step=-1, incr=incr, decr=decr, scaling=scale)\n\n\n # ####### Save model characteristics.\n\n model.descriptor.save(path=\"\")\n model.save_weights(path=\"\")\n\n results = evaluate_model(model)\n\n np.save(\"hypers\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", hypers)\n\n np.save(\"orig_results\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", results)\n\n np.save(\"loss_weights\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", loss_weights)", "def train(\n model_path=\"./trained_model/\",\n model_file_name=\"model.h5\",\n training_data_path=\"./train.csv\",\n):\n config = SConfig(training_data_path=training_data_path)\n s2s = Seq2Seq(config)\n s2s.fit()\n s2s.save_model(path_to_model=model_path, model_file_name=model_file_name)", "def train_network(self, batch_size, epochs):\n\n if self.eq_train: self.model.fit([self.X_train_high_level, self.X_train_low_level], self.y_train, epochs=epochs, batch_size=batch_size, sample_weight=self.train_weights_eq) \n else: self.model.fit([self.X_train_high_level, self.X_train_low_level], self.y_train, epochs=epochs, batch_size=batch_size, sample_weight=self.train_weights)", "def train_word2vec_model(data_folder, algorithm='skipgram'):\n assert algorithm in ['skipgram', 'cbow']\n\n sg = 1 if algorithm is 'skipgram' else 0\n\n # Read data\n\n segments = torch.load(os.path.join(data_folder, 'word2vec_data.pth.tar'))\n\n\n # 모든 문서의 sentence들을 통채로 합해주기\n sentences = list(itertools.chain.from_iterable(list(itertools.chain.from_iterable(segments))))\n # Activate logging for verbose training\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n # Initialize and train the model (this will take some time)\n model = gensim.models.word2vec.Word2Vec(sentences=sentences, size=200, workers=8, window=10, min_count=5,\n sg=sg)\n\n # Normalize vectors and save model\n model.init_sims(True)\n model.wv.save(os.path.join(data_folder, 'word2vec_model'))", "def train(self, *args, **kwargs):\n raise NotImplementedError", "def train_model(self, *args, **kwargs):\n raise NotImplementedError", "def train(self):\n self.ae_train(self.net0, self.ae0_optimizer, self.train0_loader, self.val_loader, name='Net0')\n self.ae_train(self.net1, self.ae1_optimizer, self.train1_loader, self.val_loader, name='Net1')\n self.ae_train(self.net2, self.ae2_optimizer, self.train2_loader, self.val_loader, name='Net2')\n\n self.classifier_train(self.net0, self.optimizer0, self.train0_loader, self.val_loader, name='Net0')\n self.classifier_train(self.net1, self.optimizer1, self.train1_loader, self.val_loader, name='Net1')\n self.classifier_train(self.net2, self.optimizer2, self.train2_loader, self.val_loader, name='Net2')", "def training(self):\r\n self.model, self.voc = svm_clf_training('all', self.dataset)\r\n return 0", "def train(self):\n raise NotImplementedError", "def create_train_model(self):\n st = LancasterStemmer()\n with open(self.data_path, encoding='utf8') as f_name:\n sentences = [[st.stem(w) for w, t in pos_tag(line.lower().split()) if 'N' in t] for line in f_name]\n sentences = [filter(lambda x: len(x) > 2, (word.strip(punctuation) for word in sentences)) for sent in sentences]\n model = Word2Vec(sentences,\n min_count=self.min_count,\n size=self.size,\n window=self.window,\n workers=4)\n model.save(self.model_path)", "def train(self, ):\n raise NotImplementedError", "def train(self, X, y):\n self.model.fit(X, y)", "def training(data_to_train, network_dimensions=np.array([5,5]), n_iterations=2000, init_learning_rate=0.01, normalise_data=True, normalise_by_column=False):\n #transforma em float para fazer normalizacao\n raw_data = np.float64(data_to_train)\n\n #tamanho baseado nos dados\n m = raw_data.shape[0]\n n = raw_data.shape[1]\n\n #matriz de pesos tem que ter o mesmo tamanho do vetor de entrada (RGB = 3 entradas) \n #para cada neuronio no mapa (mapa 5x5)\n #inicializa pesos com valores aleatorios\n net = np.random.random((network_dimensions[0], network_dimensions[1], m))\n\n #raio da vizinhanca inicial (qual distancia eu procuro por vizinhos para atualizar)\n init_radius = max(network_dimensions[0], network_dimensions[1]) / 2\n #quanto o raio ira diminuir\n time_constant = n_iterations / np.log(init_radius)\n\n #cria matriz auxiliar caso precise normalizar\n data = raw_data\n\n if normalise_data:\n data = normalise(raw_data, normalise_by_column)\n \n #PROCESSO DE APRENDIZADO:\n #1. Encontra o neuronio com o vetor 3D mais proximo do vetor 3D do dataset - Best Matching Unit\n #\n #2. Move o vetor do neuronio BMU mais proximo do vetor de entrada no espaco\n #\n #3. Identifica os neuronios vizinhos do BMU e move os vetores mais proximos\n #\n #4. Reduz taxa de aprendizado\n for i in range(n_iterations):\n #seleciona um exemplo aleatorio do dataset\n t = data[:, np.random.randint(0,n)].reshape(np.array([m, 1]))\n\n #encotra o Best Matching Unit\n bmu, bmu_index = find_bmu(t, net, m)\n\n #diminui parametros de aprendizado usando\n #usa exponetial decay sigma_t = sigma_0 * exp(-t / lambda)\n #sigma_t eh o novo valor\n #sigma_0 eh o valor anterior\n #t eh o instante de tempo\n #lamba eh o time_constant\n r = decay_radius(init_radius, i, time_constant)\n l = decay_learning_rate(init_learning_rate, i, n_iterations)\n\n #move o BMU e seus vizinhos mais perto\n #atualizando pesos do BMU: w_t+1 = w_t + L_t * (V_i - w_t)\n #peso atual mais diferenca entre vetor de entrada e peso atual multipicado pela taxa de aprendiz\n #movendo o BMU mais perto do vetor de entrada\n #\n #depois, encontra outros neuronios dentro do raio definido\n #atualiza peso desses neuronios proporcionalmente a distancia ate o BMU (gaussiana)\n #para calcular essa influencia usa i_t = exp(-d^2 / (2 * sigma^2_t))\n #onde d eh a distancia entre os neuronios e sigma eh o raio no tempo atual\n\n for x in range(net.shape[0]):\n for y in range(net.shape[1]):\n w = net[x, y, :].reshape(m, 1) #pesos do neuronio atual\n #pega distancia euclidiana quadrada entre\n #posicao do neuronio atual e indice do bmu\n w_dist = np.sum((np.array([x, y]) - bmu_index) ** 2)\n #se a distancia eh menor que o raio atual (ao quadrado pq a distancia eh quadrada)\n if w_dist <= r**2:\n #calcula influencia do neuronio\n influence = calculate_influence(w_dist, r)\n #atualiza pesos do neuronio\n #w_novo = w_atual + (aprendizado * influencia * delta)\n #delta = entrada - w_atual\n new_w = w + (l * influence * (t - w))\n #coloca novo peso na matriz\n net[x, y, :] = new_w.reshape(1, 1)\n\n \n return net", "def svm():", "def train(self):\n\t\traise NotImplementedError", "def train(self):\n return", "def train(self, input_vects):\n \n #Training iterations\n for iter_no in range(self._n_iterations):\n #Train with each vector one by one\n if iter_no % 20 == 0:\n print(iter_no)\n for input_vect in input_vects:\n self._sess.run(self._training_op,\n feed_dict={self._vect_input: input_vect,\n self._iter_input: iter_no})\n \n #Store a centroid grid for easy retrieval later on\n centroid_grid = [[] for i in range(self._m)]\n self._weightages = list(self._sess.run(self._weightage_vects))\n self._locations = list(self._sess.run(self._location_vects))\n for i, loc in enumerate(self._locations):\n centroid_grid[loc[0]].append(self._weightages[i])\n self._centroid_grid = centroid_grid\n \n self._trained = True", "def train(self, features, labels):\n pass", "def train_model(kernel, label):\n clf = svm.SVC(kernel='precomputed')\n clf.fit(kernel,label)\n return clf", "def TrainOneStep(self):\n pass", "def initiate(self):\n # if self.opt.checkpoint_encoder:\n # self.load(self.opt.checkpoint_encoder, self.opt.checkpoint_decoder)\n # else:\n # start fresh.\n self.model = Transformer(\n self.opt.src_vocab_size,\n self.opt.tgt_vocab_size,\n self.opt.max_token_seq_len,\n tgt_emb_prj_weight_sharing=self.opt.proj_share_weight,\n emb_src_tgt_weight_sharing=self.opt.embs_share_weight,\n d_k=self.opt.d_k,\n d_v=self.opt.d_v,\n d_model=self.opt.d_model,\n d_word_vec=self.opt.d_word_vec,\n d_inner=self.opt.d_inner_hid,\n n_layers=self.opt.layers,\n n_head=self.opt.n_head,\n dropout=self.opt.dropout).to(self.device)\n \n for p in self.model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)", "def train():\n args = arguments_st_train()\n\n h, w = map(int, args.input_size.split(','))\n input_size = (h, w)\n\n \n if args.use_random_seed:\n tf.set_random_seed(args.random_seed)\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n \n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(\n args.data_dir,\n args.data_list,\n input_size=input_size,\n random_scale=args.random_scale,\n random_mirror=args.random_mirror,\n random_crop=args.random_crop,\n ignore_label=args.ignore_label,\n img_mean=IMG_MEAN,\n coord=coord,\n task=args.task)\n image_batch, label_batch = reader.dequeue(args.batch_size)\n \n # Create network.\n with slim.arg_scope(vgg_arg_scope(weight_decay=args.weight_decay, use_batch_norm=True, is_training=True)):\n if args.network == 'vgg_16_deeplab_st':\n net, end_points = vgg_16_deeplab_st(image_batch, num_classes=args.num_classes, is_training=True, dropout_keep_prob=args.keep_prob)\n elif args.network == 'vgg_16_shortcut_deeplab_st':\n net, end_points = vgg_16_shortcut_deeplab_st(image_batch, num_classes=args.num_classes, is_training=True, dropout_keep_prob=args.keep_prob)\n else:\n raise Exception('network name is not recognized!')\n \n \n # Predictions.\n raw_output = end_points['vgg_16/fc8_voc12']\n\n # gt labels\n raw_gt = prepare_label(label_batch, tf.stack(raw_output.get_shape()[1:3]), num_classes=args.num_classes,\n one_hot=False, task=args.task) # [batch_size, h, w]\n\n # losses\n if args.task == 'normal':\n loss = get_normal_loss(raw_output, raw_gt, args.num_classes, args.ignore_label) * args.loss_scale\n elif args.task == 'seg':\n loss = get_seg_loss(raw_output, raw_gt, args.num_classes, args.ignore_label) * args.loss_scale\n\n l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'weights' in v.name]\n reduced_loss = tf.reduce_mean(loss) + tf.add_n(l2_losses)\n \n # Image summary for visualisation.\n raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])\n raw_output_up = tf.argmax(raw_output_up, axis=3)\n pred = tf.expand_dims(raw_output_up, dim=3)\n \n images_summary = tf.py_func(inv_preprocess, [image_batch, args.save_num_images, IMG_MEAN], tf.uint8)\n labels_summary = tf.py_func(decode_labels, [label_batch, args.save_num_images, args.num_classes, args.task], tf.uint8)\n preds_summary = tf.py_func(decode_labels, [pred, args.save_num_images, args.num_classes, args.task], tf.uint8)\n \n total_summary = tf.summary.image('images', \n tf.concat(axis=2, values=[images_summary, labels_summary, preds_summary]), \n max_outputs=args.save_num_images) # Concatenate row-wise.\n summary_writer = tf.summary.FileWriter(args.snapshot_dir,\n graph=tf.get_default_graph())\n \n # Define loss and optimisation parameters.\n train_op, step_ph = create_train_ops_st(reduced_loss, args)\n \n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n\n # Load variables if the checkpoint is provided.\n if args.restore_from is not None:\n load_st(sess, args)\n \n # Saver for storing checkpoints of the model.\n save_op = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=args.max_to_keep)\n \n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n\n # Iterate over training steps.\n for step in range(args.num_steps):\n start_time = time.time()\n feed_dict = { step_ph : step }\n \n if step % args.save_pred_every == 0:\n loss_value, images, labels, preds, summary, _ = sess.run([reduced_loss, image_batch, label_batch, pred, total_summary, train_op], feed_dict=feed_dict)\n summary_writer.add_summary(summary, step)\n save(save_op, sess, args.snapshot_dir, step)\n else:\n loss_value, _ = sess.run([reduced_loss, train_op], feed_dict=feed_dict)\n duration = time.time() - start_time\n print('step {:d} \\t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))\n coord.request_stop()\n coord.join(threads)", "def __init__(self, x_train, model):\n self.x_train = x_train\n self.model = model", "def worker_train():\n py_work = np.zeros(model.layer1_size, dtype=np.float32)\n\n while True:\n job = jobs.get(block=True)\n if job is None: # data finished, exit\n jobs.task_done()\n # print('thread %s break' % threading.current_thread().name)\n break\n\n lr = max(self.min_lr, self.lr * (1 - 1.0 * node_count[0]/total_node))\n #lr = self.lr \n job_words = 0\n #pre=self.build_model(len(model.vocab), model.layer1_size, lamda = 0.0, learning_rate=lr)\n for edge in job:\n if edge is not None:\n x = []\n y = []\n x.append([edge[0].index, edge[1].index])\n weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n #y.append(weight)\n y.append(1.0)\n #for i in range(int(10 * (weight)) * self.negative):\n for i in range(self.negative):\n nodeidx = model.table[np.random.randint(model.table_size)]\n if nodeidx != edge[0].index:\n x.append([edge[0].index, nodeidx])\n y.append(0.0)\n feed_dict = {\n pre.x: x,\n pre.y: y,\n pre.node_embeddings_init: model.node_embedding\n }\n #saver = tf.train.Saver()\n _, loss, node_embeddings = sess.run([pre.d_updates, pre.reg_loss, pre.node_embeddings_n1],\n feed_dict=feed_dict)\n model.node_embedding[edge[0].index] = node_embeddings[edge[0].index]\n x = []\n y = []\n x.append([edge[1].index, edge[0].index])\n weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n #y.append(weight)\n y.append(1.0)\n for i in range(self.negative):\n nodeidx = model.table[np.random.randint(model.table_size)]\n if edge[1].index != nodeidx:\n x.append([edge[1].index, nodeidx])\n y.append(0.0)\n feed_dict = {\n pre.x: x,\n pre.y: y,\n pre.node_embeddings_init: model.node_embedding\n }\n \n #saver = tf.train.Saver()\n _, loss, node_embeddings = sess.run([pre.d_updates, pre.reg_loss, pre.node_embeddings_n1],\n feed_dict=feed_dict)\n\n #model.node_embedding = node_embeddings\n model.node_embedding[edge[1].index] = node_embeddings[edge[1].index]\n job_words += len(x)\n \n #log.info(\"train_loss: {}, node_embeddings = {}\".format(loss, model.node_embedding))\n \n #saver.restore(sess, INNER_MODEL_FILE)\n #job_words = sum(train_o1(model.node_embedding, edge, lr, self.negative, model.table,\n # py_size=model.layer1_size, py_work=py_work) for edge in job if edge is not None)\n #job_words = len(x)\n jobs.task_done()\n lock.acquire(timeout=30)\n try:\n node_count[0] += job_words\n\n elapsed = time.time() - start\n if elapsed >= next_report[0]:\n log.info(\"PROGRESS: at %.2f%% \\tnode_computed %d\\talpha %.05f\\t %.0f nodes/s\" %\n (100.0 * node_count[0] / total_node, node_count[0], lr, node_count[0] / elapsed if elapsed else 0.0))\n next_report[0] = elapsed + 5.0 # don't flood the log, wait at least a second between progress reports\n finally:\n lock.release()", "def Subtask4_pre_train_5():\n with open(PATH + 'pre_train_4_Subtask4.txt', encoding='utf-8') as fi:\n evi = eval(fi.read())\n\n train_data = np.load(PATH + 'pre_train_2_Subtask4.npy', allow_pickle=True).item()\n model = word2vec.KeyedVectors.load_word2vec_format(PATH + \"data/GoogleNews-vectors-negative300.bin\", binary=True)\n\n with open(PATH + 'pre_train_3_Subtask4.txt', encoding='utf-8') as f:\n document = eval(f.read())\n\n with open(PATH + 'traindata_Subtask4.txt', 'w') as fp:\n for data in train_data.items():\n claim = data[0]\n claim = re.sub(\"[-,.。:_=+*&^%$#@!?()<>/`';|]\", \"\", claim)\n claim = claim.split(' ')\n claim = list(filter(lambda x: x in model.vocab, claim))\n Vi = []\n for i in range(len(claim)):\n Vi.append(model[claim[i]])\n\n V = np.zeros(len(Vi[0]))\n for i in range(len(claim)):\n for j in range(len(Vi[0])):\n V[j] = V[j] + Vi[i][j]\n\n rms = 0\n for i in range(len(Vi[0])):\n rms += V[i] * V[i]\n rms = np.sqrt(rms / len(Vi[0]))\n\n for i in range(len(Vi[0])):\n V[i] = V[i] / rms\n V = V.astype(str).tolist()\n\n for doc in data[1]:\n lines = document[doc].split('\\n')\n for k in range(len(lines)):\n label = [data[0], doc, k]\n line = document[doc].split('\\n')[k]\n if line != str(k) + '\\t':\n line = line.replace(str(k) + '\\t', '')\n line = line.split('\\t')[0]\n line = re.sub(\"[-,.。:_=+*&^%$#@!?()<>/`';|]\", \"\", line)\n line = line.split(' ')\n line = list(filter(lambda x: x in model.vocab, line))\n if len(line) != 0:\n Vi = []\n for i in range(len(line)):\n Vi.append(model[line[i]])\n\n V1 = np.zeros(len(Vi[0]))\n for i in range(len(line)):\n for j in range(len(Vi[0])):\n V1[j] = V1[j] + Vi[i][j]\n\n rms = 0\n for i in range(len(Vi[0])):\n rms += V1[i] * V1[i]\n rms = np.sqrt(rms / len(Vi[0]))\n\n for i in range(len(Vi[0])):\n V1[i] = V1[i] / rms\n V1 = V1.astype(str).tolist()\n\n if label in evi:\n fp.write(' '.join(V) + ' ' + ' '.join(V1) + ' 1' + '\\n')\n else:\n fp.write(' '.join(V) + ' ' + ' '.join(V1) + ' 0' + '\\n')", "def train(self, X):\n self.X = X", "def train(self, trainData):\n pass", "def train(self, training_data):\n pass", "def train(self, batch):\n pass", "def train(articles, force_create=False):\n global _trained\n # Setup\n bag_of_words.train(articles, force_create)\n term_frequency.train(articles, force_create)\n _get_idfs(articles)\n _trained = True\n for article in articles:\n get_vector(article, force_create, True)", "def mnd_train(x_train, y_train, model_root_dir, n_gpu=4, n_cpu=10):\n\n # Horovod: initialize Horovod\n hvd.init()\n\n K.clear_session()\n gc.collect()\n # config = tf.ConfigProto(device_count={'GPU': n_gpu, 'CPU': n_cpu})\n # Horovod: pin GPU to be used to process local rank(one GPU perprocess)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.visible_device_list = str(hvd.local_rank())\n sess = tf.Session(config=config)\n K.set_session(sess)\n\n n_samples, n_win_size, n_feat = x_train.shape\n n_class = y_train.shape[1]\n\n # search-dimension\n # dim_nb_batchs = Categorical(categories=[128, 256, 512, 1024], name='batch_size')\n # dim_nb_epochs = Categorical(categories=[20, 30, 40, 50], name='epoch')\n # dim_lrs = Categorical(categories=[0.1, 0.01, 0.001], name='learn_rate')\n # dim_lr_decays = Categorical(categories=[0.1, 0.5, 0.8], name='learn_rate_decay')\n # dim_init_filters = Categorical(categories=[16, 32, 64, 128], name='filters')\n # dim_drops = Categorical(categories=[0.2, 0.3, 0.4, 0.5], name='drop')\n # dim_fc_sizes = Categorical(categories=[32, 64, 128, 256], name='fc_size')\n # dim_net_blocks = Categorical(categories=[(4, 1), (4, 3), (4, 4, 1), (4, 4, 3), (4, 4, 4, 1), (4, 4, 4, 3)],\n # name='blocks')\n # search_dim = [dim_nb_batchs,\n # dim_nb_epochs,\n # dim_lrs,\n # dim_lr_decays,\n # dim_init_filters,\n # dim_drops,\n # dim_fc_sizes,\n # dim_net_blocks]\n # default_param = [256, 20, 0.1, 0.8, 16, 0.2, 64, (4, 1)]\n\n dim_nb_batchs = Categorical(categories=[256], name='batch_size')\n dim_nb_epochs = Categorical(categories=[5], name='epoch')\n dim_lrs = Categorical(categories=[0.1, 0.01], name='learn_rate')\n dim_lr_decays = Categorical(categories=[0.8], name='learn_rate_decay')\n dim_init_filters = Categorical(categories=[128], name='filters')\n dim_drops = Categorical(categories=[0.5], name='drop')\n dim_fc_sizes = Categorical(categories=[256], name='fc_size')\n dim_net_blocks = Categorical(categories=[(4, 1)],\n name='blocks')\n search_dim = [dim_nb_batchs,\n dim_nb_epochs,\n dim_lrs,\n dim_lr_decays,\n dim_init_filters,\n dim_drops,\n dim_fc_sizes,\n dim_net_blocks]\n default_param = [256, 5, 0.1, 0.8, 16, 0.2, 64, (4, 1)]\n\n _model_dir = os.path.join(model_root_dir, 'models3/model_weight')\n if not os.path.isdir(_model_dir):\n os.mkdir(_model_dir)\n _tb_dir = os.path.join(model_root_dir, 'models3/logs')\n if not os.path.isdir(_tb_dir):\n os.mkdir(_tb_dir)\n _csvlogger_dir = os.path.join(model_root_dir, 'models3/model_metrics')\n if not os.path.isdir(_csvlogger_dir):\n os.mkdir(_csvlogger_dir)\n\n def out_name(batch_size, epoch, learn_rate, learn_rate_decay, filters, drop, fc_size, blocks):\n str_blocks = [str(x) for x in blocks]\n str_blk = ''.join(str_blocks)\n\n return 'b{0}_e{1}_lr{2:.3f}_lrd{3:.1f}_flt{4}_dr{5:.1f}_fc{6}_blk{7}'.format(batch_size,\n epoch,\n learn_rate,\n learn_rate_decay,\n filters,\n drop,\n fc_size,\n str_blk)\n\n # y_train_labels = np.argmax(y_train, axis=1)\n # skf = StratifiedKFold(n_splits=1, random_state=123, shuffle=True)\n\n # Horovod: print logs on the first worker.\n verbose = 1 if hvd.rank() == 0 else 0\n\n permutation = list(np.random.permutation(n_samples))\n\n x_train = x_train[permutation]\n y_train = y_train[permutation]\n\n @use_named_args(dimensions=search_dim)\n def gp_fitness(batch_size, epoch, learn_rate, learn_rate_decay, filters, drop, fc_size, blocks):\n print('batch_size: {}'.format(batch_size))\n print('epoch: {}'.format(epoch))\n print('learn rate: {0:.3f}'.format(learn_rate))\n print('learn rate decay: {0:.1f}'.format(learn_rate_decay))\n print('filters: {}'.format(filters))\n print('drop ratio: {0:.1f}'.format(drop))\n print('fc size: {}'.format(fc_size))\n print('blocks: {}'.format(blocks))\n\n tmp_out_name = out_name(batch_size, epoch, learn_rate,\n learn_rate_decay, filters, drop, fc_size, blocks)\n\n val_acc_arr = []\n\n # for i, (train_idx, val_idx) in enumerate(skf.split(x_train, y_train_labels)):\n # ix_train1, ix_val1 = x_train[train_idx], x_train[val_idx]\n # iy_train1, iy_val1 = y_train[train_idx], y_train[val_idx]\n for i in range(1):\n ix_train1, ix_val1, iy_train1, iy_val1 = train_test_split(x_train, y_train, test_size=0.2,\n shuffle=False)\n nb_trains = ix_train1.shape[0] // batch_size\n nb_examples = batch_size * nb_trains\n k_x_train = ix_train1[:nb_examples]\n k_y_train = iy_train1[:nb_examples]\n k_x_val = np.concatenate((ix_val1, ix_train1[nb_examples:]), axis=0)\n k_y_val = np.concatenate((iy_val1, iy_train1[nb_examples:]), axis=0)\n\n del ix_train1, ix_val1, iy_train1, iy_val1\n # gc.collect()\n\n model_fn = os.path.join(_model_dir, '{0}-k{1}.hdf5'.format(tmp_out_name, i))\n tensorboard_fn = os.path.join(_tb_dir, '{0}-tb_k{1}'.format(tmp_out_name, i))\n csvlogger_fn = os.path.join(_csvlogger_dir, '{0}-csvlogger_k{1}'.format(tmp_out_name, i))\n\n model = cnv_net(n_win_size, n_feat, n_class,\n filters=filters, kernel_size=16, strides=1, pool_size=2,\n pool_stride=2, drop=drop, blocks=blocks, fc_size=fc_size, m_name=tmp_out_name)\n\n callbacks = [\n # Horovod: broadcast initial variable states from rank 0 to all other processes.\n # This is necessary to ensure consistent initialization of all workers when\n # training is started with random weights or restored from a checkpoint.\n hvd.callbacks.BroadcastGlobalVariablesCallback(0),\n\n # # Horovod: average metrics among workers at the end of every epoch.\n # #\n # # Note: This callback must be in the list before the ReduceLROnPlateau,\n # # TensorBoard, or other metrics-based callbacks.\n # hvd.callbacks.MetricAverageCallback(),\n #\n # # Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final\n # # accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during\n # # the first five epochs. See https://arxiv.org/abs/1706.02677 for details.\n # hvd.callbacks.LearningRateWarmupCallback(warmup_epochs=5, verbose=verbose),\n #\n # # Horovod: after the warmup reduce learning rate by 10 on the 30th, 60th and 80th epochs.\n # hvd.callbacks.LearningRateScheduleCallback(start_epoch=5, end_epoch=30, multiplier=1.),\n # hvd.callbacks.LearningRateScheduleCallback(start_epoch=30, end_epoch=60, multiplier=1e-1),\n # hvd.callbacks.LearningRateScheduleCallback(start_epoch=60, end_epoch=80, multiplier=1e-2),\n # hvd.callbacks.LearningRateScheduleCallback(start_epoch=80, multiplier=1e-3),\n ]\n\n # Horovod: save checkpoints only on the first worker to prevent other workers from corrupting them.\n if hvd.rank() == 0:\n callbacks.append(EarlyStopping(monitor='val_acc', patience=5, verbose=1))\n callbacks.append(AdvancedLearnignRateScheduler(monitor='val_acc', patience=1, verbose=1, mode='auto',\n decayRatio=learn_rate_decay))\n callbacks.append(MultiGPUCheckpointCallback(model_fn, base_model=model, monitor='val_acc',\n save_best_only=True, verbose=1, save_weights_only=True))\n callbacks.append(TensorBoard(tensorboard_fn, batch_size=batch_size, histogram_freq=2))\n callbacks.append(CSVLogger(csvlogger_fn))\n\n # Horovod: adjust learning rate based on number of GPUs.\n opt = keras.optimizers.Adam(lr=learn_rate)\n # Horovod: add Horovod Distributed Optimizer.\n opt = hvd.DistributedOptimizer(opt)\n\n model.compile(optimizer=opt,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n hist = model.fit(k_x_train, k_y_train, validation_data=(k_x_val, k_y_val), verbose=verbose,\n epochs=epoch, batch_size=batch_size, callbacks=callbacks)\n\n i_val_acc = hist.history['val_acc'][-1]\n print(\"Accuracy: {0:.6%}\".format(i_val_acc))\n val_acc_arr.append(i_val_acc)\n\n del model\n del k_x_train, k_y_train, k_x_val, k_y_val\n\n K.clear_session()\n gc.collect()\n i_config = tf.ConfigProto()\n i_config.gpu_options.allow_growth = True\n i_config.gpu_options.visible_device_list = str(hvd.local_rank())\n i_sess = tf.Session(config=i_config)\n K.set_session(i_sess)\n\n cv_mean_val_acc = np.mean(val_acc_arr)\n\n global best_accuracy\n if cv_mean_val_acc > best_accuracy:\n best_accuracy = cv_mean_val_acc\n\n return -cv_mean_val_acc\n\n search_result = gp_minimize(func=gp_fitness,\n dimensions=search_dim,\n acq_func='EI', # Expected Improvement.\n n_calls=40,\n x0=default_param)\n\n with open(os.path.join(model_root_dir, 'models3/gp_search_res.pickle'), 'wb') as f:\n pickle.dump(search_result, f)", "def train(self, trainingData, trainingLabels, validationData, validationLabels ):\n import sklearn\n from sklearn import svm\n\n \"*** YOUR CODE HERE ***\"\n self.sklearn_classifier = svm.SVC(C=2, gamma=0.025, decision_function_shape='ovo', tol=0.015)\n self.sklearn_classifier.fit(trainingData, trainingLabels)", "def set_train(self):\n self.model.train()", "def train(self, X_train, y_train):\n\n self.model_pipeline.fit(X_train, y_train)", "def train_CNN(self,member,input_data):\n trainX,trainY,validX,validY = input_data\n \n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n print('Validation data shape {0}'.format(np.shape(validX)))\n print('Validation label data shape {0}\\n'.format(np.shape(validY)))\n \n \n model_file = self.model_path + f'/{member}_{self.model_args}_CNN_model.h5'\n print(model_file)\n if not os.path.exists(model_file):\n # Clear graphs\n tf.keras.backend.clear_session()\n \n #Initiliaze Convolutional Neural Net (CNN)\n model = models.Sequential()\n input_shape = np.shape(trainX[0])\n \n #First layer: input shape (y,x,# variables) \n #Add noise\n model.add(layers.GaussianNoise(0.01, input_shape=(input_shape)))\n for filters in [32,64,128]:\n model.add(layers.Conv2D(filters, (3,3),padding='same'))\n model.add(layers.Conv2D(filters, (3,3),padding='same'))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.MaxPooling2D())\n \n #Flatten the last convolutional layer \n model.add(layers.Flatten())\n model.add(layers.Dense(256))\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.Dense(4,activation='softmax'))\n #Compile neural net\n model.compile(optimizer='adam',loss='categorical_crossentropy',\n metrics=[tf.keras.metrics.AUC()])\n print(model.summary())\n #fit neural net\n n_epochs = 10\n bs = 256\n\n #augment data\n aug = imagedatagenerator(\n rotation_range=10,zoom_range=0.15,\n width_shift_range=0.2,height_shift_range=0.2,\n fill_mode=\"nearest\")\n \n train_generator = aug.flow(trainx,trainy,batch_size=bs)\n conv_hist = model.fit(\n train_generator,steps_per_epoch=len(trainx) // bs,\n epochs=n_epochs,verbose=1,class_weight=self.class_percentages)\n #save trained model\n model.save(model_file)\n print(f'Writing out {model_file}')\n else:\n model = tf.keras.models.load_model(model_file)\n print(f'\\nOpening {model_file}\\n')\n\n del trainY,trainX\n \n threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'\n if os.path.exists(threshold_file): \n del validX,validY\n return\n \n self.validate_CNN(model,validX,validY,threshold_file)\n return", "def train_word2vec_from_ES(es_config, query, model_file):\n q_docs = QueryResultDocs(es_config, query)\n model = gensim.models.Word2Vec(q_docs, workers=40)\n model.save(model_file)\n print 'model trained & saved'\n return model" ]
[ "0.7426452", "0.66420937", "0.663728", "0.65732425", "0.6568101", "0.6489874", "0.64146864", "0.63706493", "0.6360775", "0.6325146", "0.63113385", "0.63113385", "0.63113385", "0.63113385", "0.63113385", "0.63027394", "0.628448", "0.6281508", "0.6272129", "0.62691456", "0.62691456", "0.6252732", "0.6246681", "0.6242653", "0.62422115", "0.62092584", "0.6206204", "0.61987746", "0.6190185", "0.61890763", "0.6173638", "0.6159238", "0.6158883", "0.61262375", "0.6123606", "0.60995454", "0.6054154", "0.6046515", "0.6035221", "0.602109", "0.60210776", "0.6017739", "0.6007881", "0.60055375", "0.59985644", "0.59947324", "0.5987474", "0.5980413", "0.59795696", "0.5968323", "0.5963164", "0.59630436", "0.5956964", "0.59493583", "0.594854", "0.59443414", "0.5941502", "0.59110045", "0.5909317", "0.5909317", "0.5906959", "0.59011763", "0.5879335", "0.5878964", "0.5877036", "0.5872545", "0.5867514", "0.5860477", "0.58603513", "0.58556527", "0.58543706", "0.5848088", "0.5847642", "0.5841535", "0.5837225", "0.58343667", "0.58333695", "0.58321977", "0.5830288", "0.58274865", "0.5825301", "0.5824148", "0.5823583", "0.5811841", "0.5811542", "0.5811056", "0.58096695", "0.58068275", "0.5803611", "0.5803329", "0.5799822", "0.5799426", "0.57979465", "0.5794984", "0.5793588", "0.57864374", "0.5783147", "0.5778272", "0.5774596", "0.57743984" ]
0.6219104
25
All the harvesters and utilities use a common parser.
def setup_common_parser(id, description=None, epilog=None): parser = argparse.ArgumentParser( description=description, epilog=epilog, formatter_class=argparse.ArgumentDefaultsHelpFormatter ) help = f"Verbosity level of log file {id}.log" choices = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'] parser.add_argument('-v', '--verbosity', choices=choices, default='INFO', help=help) help = "Limit number of documents retrieved to this number." parser.add_argument('--num-documents', type=int, default=-1, help=help) help = ( "Limit documents retrieved to those whose URL match this regular " "expression." ) parser.add_argument('--regex', help=help) help = "Limit number of workers operating asynchronously to this number." parser.add_argument('--num-workers', type=int, default=1, help=help) help = ( "Limit number of errors to this number. This number is not exact, " "because if the number of asynchronous workers is more than one, it " "is possible that the threshold is passed simultaneously by more than " "one worker." ) parser.add_argument('--max-num-errors', type=int, default=1, help=help) return parser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_parser(self, parser):", "def select_parser():\n\n try:\n select_texttools_parser()\n except ImportError:\n select_python_parser()", "def test_simple_parse(self):\n pass", "def test_basic_parsers():", "def __init__(self, parser):\n if parser == \"csv\":\n self._parser = CSVParser()\n elif parser == \"static\":\n self._parser = StaticParser()\n else:\n raise NotImplementedError", "def _parse(self):\n pass", "def build_parser():\n desc = (\"Scrape Hearthstone decks from HearthPwn, then build a SQLite \"\n \"database of the results. Also integrates with omgvamp's Mashape \"\n \"Hearthstone API (http://hearthstoneapi.com/) to build a table of \"\n \"card data that can be used to make more advanced queries.\")\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument('--buildcards', action='store_true',\n help='(re)build card database from Mashape')\n parser.add_argument('--builddecks', action='store_true',\n help='(re)build deck database from HearthPwn')\n parser.add_argument('--perclass', action='store_true',\n help='get the same number of decks for each class')\n parser.add_argument('--count', type=int,\n help='number of decks to retrieve (per class, if'\n ' --perclass is set)')\n parser.add_argument('--filtering',\n help='the HearthPwn filter used when finding decks, '\n 'as seen in the HearthPwn URL')\n parser.add_argument('--sorting',\n help='the HearthPwn sorting used when finding '\n 'decks, as seen in the HearthPwn URL after '\n '\"&sort=\"')\n parser.add_argument('--patch', type=int,\n help='the HearthPwn patch ID used when finding '\n 'decks, as seen in the HearthPwn URL after '\n '\"&filter-build=\"')\n parser.add_argument('--results', action='store_true',\n help='for all cards, print the: cardname, total decks '\n 'using the card, percentage of decks '\n 'using the card, and average number of the card '\n 'in decks using the card')\n return parser", "def setup_parser():\r\n parser = argparse.ArgumentParser(description='Freeseer Recording Utility',\r\n formatter_class=argparse.RawTextHelpFormatter)\r\n parser.add_argument(\"-v\", \"--version\", action='version',\r\n version=textwrap.dedent('''\\\r\n Freeseer {version} ({platform})\r\n Python {pymajor}.{pyminor}.{pymicro}\r\n PyGst {pygst_version}\r\n PyQt {pyqt_version}\r\n Qt {qt_version}\r\n Yapsy {yapsy_version}\r\n '''.format(version=__version__,\r\n platform=sys.platform,\r\n pymajor=sys.version_info.major,\r\n pyminor=sys.version_info.minor,\r\n pymicro=sys.version_info.micro,\r\n pygst_version=pygst._pygst_version,\r\n pyqt_version=QtCore.PYQT_VERSION_STR,\r\n qt_version=QtCore.QT_VERSION_STR,\r\n yapsy_version=yapsy.__version__)))\r\n\r\n # Configure Subparsers\r\n subparsers = parser.add_subparsers(dest='app', help='Command List')\r\n setup_parser_record(subparsers)\r\n setup_parser_config(subparsers)\r\n setup_parser_talk(subparsers)\r\n setup_parser_report(subparsers)\r\n setup_parser_upload(subparsers)\r\n return parser", "def parse(self):\n logger=self.logger\n tokenizer=Tokenizer()\n self.scope=produtil.testing.parsetree.Scope()\n self.override(self.scope)\n self.parser=Parser(self.run_mode,logger,self.verbose)\n self.parser.requested_platform_name=self.platform_name\n morevars=self.make_vars()\n with open(self.inloc,'rt') as fileobj:\n self.parse_result=self.parser.parse(\n TokenizeFile(tokenizer,fileobj,self.inloc,1),self.scope,\n unique_id=self.unique_id,morevars=morevars)", "def setupParserOptions(self):\n\t\treturn", "def subparser( parser, subparsers ):", "def test_gen_parser(self):\n pass", "def getParser():\n\n parser = OptionParser()\n parser.add_option(\"-d\",\"--db\",dest='dbname',default=\"pyConTextWeb.db\",\n help='name of db containing reports to parse')\n parser.add_option(\"-o\",\"--odb\",dest='odbname',\n help='name of db containing results', default=\"pyConTextWeb.db\")\n #help='name of db containing results', default=\"testOutput.db\")\n #parser.add_option(\"-s\",\"--save_dir\",dest='save_dir',default='critFinderResults',\n parser.add_option(\"-s\",\"--save_dir\",dest='save_dir',default='critFinderResults',\n help='directory in which to store graphs of markups')\n parser.add_option(\"-t\",\"--table\",dest='table',default='pyConTextKit_report',\n help='table in database to select data from')\n parser.add_option(\"-i\",\"--id\",dest='id',default='rowid',\n help='column in table to select identifier from')\n parser.add_option(\"-g\", \"--graph\",action='store_true', dest='doGraphs',default=False)\n parser.add_option(\"-r\",\"--report\",dest='report_text',default='impression',\n help='column in table to select report text from')\n parser.add_option(\"-c\",\"--category\",dest='category',default='ALL',\n help='category of critical finding to search for. If ALL, all categories are processed')\n parser.add_option(\"-u\",\"--uncertainty_allowed\",dest=\"allow_uncertainty\",\n action=\"store_true\",default=False)\n parser.add_option(\"-a\",\"--dataset\",dest=\"dataset\",default='ALL',\n help='report dataset to analyze')\n parser.add_option(\"-b\",\"--rcat\",dest=\"rcat\",default='',\n help='report category to analyze')\n parser.add_option(\"-n\",\"--number\",dest=\"number\",default=20,\n help='number of reports to analyze')\n return parser", "def get_parser_test():\n copy.get_parser()", "def run(self, parsed):", "def __init__(self, parser=None):", "def test_parser(self, url):\n return self.get_meta(url)", "def __parser__(self):\n return self", "def _makeParser_search() :\n parser = argparse.ArgumentParser(\n description = SCRIPT_DESCRIPTION_SEARCH)\n parser.add_argument(\"-c\", \"--count\", action = \"store_true\",\n help = \"Just return the number of records, no fetch\")\n # Required named arguments (http://stackoverflow.com/questions/24180527/argparse-required-arguments-listed-under-optional-arguments)\n required = parser.add_argument_group(\"required named arguments\")\n # --email\n required.add_argument(\"-e\", \"--email\", type = str,\n help = \"User's email (required by Entrez)\")\n # --listId\n required.add_argument(\"-l\", \"--listId\", type = str,\n help = \"File containing one GenBank identifier per \"\n \"line. Use - for reading from stdin. \"\n \"Exactly one of --listId or \"\n \"--query must be specified, but not both.\")\n # --query\n required.add_argument(\"-q\", \"--query\", type = str,\n help = \"Query string for GenBank search. \"\n \"Exactly one of --listId or \"\n \"--query must be specified, but not both.\",\n metavar = \"SEARCH_TERM\")\n # Download options\n download = parser.add_argument_group(\"download-related options\")\n # --retmax\n download.add_argument(\"-r\", \"--retmax\", type = int, default = 0,\n help = \"Maximum number of entries to retrieve from \"\n \"GenBank, comprised between 1 and 10000. Use 0 for \"\n \"unlimited number of returned entries. (default: 0)\")\n # --download\n download.add_argument(\"-d\", \"--download\", action = \"store_true\",\n help = \"Download the full GenBank records\")\n # --forceDownload\n download.add_argument(\"-f\", \"--forceDownload\", action = \"store_true\",\n help = \"Download record even if file already exists \"\n \"(implies --download)\")\n # --fullWGS\n download.add_argument(\"--fullWGS\", action = \"store_true\",\n help = \"Also download full WGS sequence data when \"\n \"WGS trace reference is present in a GenBank record \"\n \"(only works if the original GenBank record is to be \"\n \"downloaded too or if --forceDownload is used)\")\n # --outputDir\n download.add_argument(\"-o\", \"--outputDir\", type = str, default = \".\",\n help = \"Destination folder for downloaded records \"\n \"(default: current directory)\")\n # --batchSize\n download.add_argument(\"-b\", \"--batchSize\", type = int, default = 5,\n help = \"Batch size for full record retrieval \"\n \"(default: 5)\")\n # --delay\n download.add_argument(\"--delay\", type = int, default = 15,\n help = \"Delay in seconds between successive batch \"\n \"retrieval of the full records (default: 15)\")\n return parser", "def setup_parser(iam='gfind', parser=None):\n\n if parser is None:\n parser = argparse.ArgumentParser(description=\"Locate available data.\")\n\n parser = gargs.common_args(parser, iam)\n parser.add_argument(\"--alt\", \"--gaper\", action=\"store_true\",\n dest=\"gaper\", default=False,\n help=\"Format the output so that it can be copied and\"\n \" pasted directly into a gMap or gAperture command\"\n \" line?\")\n parser.add_argument(\"--total\", \"--exponly\", action=\"store_true\",\n dest=\"exponly\", default=False, help=\"Report only the\"\n \" total raw exposure time available in the database.\")\n parser.add_argument(\"--quiet\", action=\"store_true\", dest=\"quiet\",\n help=\"Suppress all information to STDOUT.\",\n default=False)\n\n return parser", "def setup_parser(self, parser, args):\r\n\r\n pass", "def main():\n test_network_connection()\n parser()", "def __init__(self, parser: Any = None):", "def __init__(self,**kwargs):\r\n self.__dict__ = dict(list(kwargs.items()) + list(self.__dict__.items()))\r\n self.driver = kwargs.get('driver', None)\r\n self.scraper_url = self.state_storage_get_prop('scraper_url') #kwargs.get('scraper_url', None)\r\n self.scraper_url = self.reformat_scraper_url()\r\n self.retry_count = kwargs.get('retry_count', HitParadeBot.DEFAULT_RETRY)\r\n self.command = kwargs.get('command', None)\r\n self.open_url = self.state_storage_get_prop('data_selectors').get('open_url', True) #kwargs.get('data_selectors', {}).get('open_url', True)\r\n self.cache_manager = kwargs.get('cache_manager', None)\r\n parser_kwargs = {'driver' : self.driver}\r\n self.parser_kwargs = kwargs\r\n try:\r\n self.default_parser = kwargs.get('default_parser', 'BeautifulSoupParser')\r\n if self.default_parser is None:\r\n self.default_parser = self.cache_manager.cache_output_component_func(kwargs.get('default_parser', 'BeautifulSoupParser'), **kwargs)\r\n except:\r\n print('exception making parser')\r\n traceback.print_exc()\r\n self.use_once = kwargs.get('use_once', False)\r\n self.use_until_failure = kwargs.get('use_until_failure', False)\r\n self.web_driver = kwargs.get('web_driver', None)\r\n self.force_refresh = kwargs.get('force_refresh', False)\r\n self.get_external_ip_addressesss = kwargs.get('get_external_ip_adressesss', None)", "def create_parser():\n pass", "def parse(self):\n pass", "def parse(self):\n pass", "def parse(self):\n pass", "def parse(self):\n pass", "def parse(self):", "def parsing():\n # User Agents\n user_agents = '''\nAvailable User-Agents:\n winxpie60 Internet Explorer 6.0 (Windows XP)\n winxpie61 Internet Explorer 6.1 (Windows XP)\n winxpie70 Internet Explorer 7.0 (Windows XP)\n winxpie80 Internet Explorer 8.0 (Windows XP)\n winxpchrome20 Chrome 20.0.1132.47 (Windows XP)\n winxpfirefox12 Firefox 12.0 (Windows XP)\n winxpsafari5 Safari 5.1.7 (Windows XP)\n win2kie60 Internet Explorer 6.0 (Windows 2000)\n win2kie80 Internet Explorer 8.0 (Windows 2000)\n win7ie80 Internet Explorer 8.0 (Windows 7)\n win7ie90 Internet Explorer 9.0 (Windows 7)\n win7chrome20 Chrome 20.0.1132.47 (Windows 7)\n win7firefox3 Firefox 3.6.13 (Windows 7)\n win7safari5 Safari 5.1.7 (Windows 7)\n osx10safari5 Safari 5.1.1 (MacOS X 10.7.2)\n osx10chrome19 Chrome 19.0.1084.54 (MacOS X 10.7.4)\n galaxy2chrome18 Chrome 18.0.1025.166 (Samsung Galaxy S II,\\\nAndroid 4.0.3)\n galaxy2chrome25 Chrome 25.0.1364.123 (Samsung Galaxy S II,\\\nAndroid 4.0.3)\n linuxchrome26 Chrome 26.0.1410.19 (Linux)\n linuxfirefox19 Firefox 19.0 (Linux)\n '''\n\n # Description of Command Line arguments\n parser = argparse.ArgumentParser(description='Distributed Pure Python \\\nHoneyclient Implementation',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n usage='python %(prog)s [ thug-options ] url',\n fromfile_prefix_chars='@',\n epilog=user_agents)\n\n def link(urls):\n links = urls.strip().split(',')\n for url in links:\n try:\n if 'http://' not in url:\n url = 'http://' + url\n urlopen(url)\n except:\n raise argparse.ArgumentTypeError(\"%s doesn't exist\"%url)\n return urls\n\n def link_file(fn):\n fobj = open(fn, 'r')\n url = fobj.readline().strip()\n urls = []\n while url:\n try:\n if 'http://' not in url:\n url = 'http://' + url\n urlopen(url)\n urls.append(url)\n except:\n raise argparse.ArgumentTypeError(\"%s doesn't exist\"%url)\n url = fobj.readline().strip()\n return urls\n\n # Mutually Exclusive Group for URL's\n links = parser.add_argument_group('URL Options')\n url = links.add_mutually_exclusive_group(required=True)\n url.add_argument('-U', '--url',\n metavar='',\n type=link,\n nargs='+',\n help=\"Enter Single/Multiple URL's to Analyze\")\n url.add_argument('-uf', '--url-file',\n metavar='',\n type=link_file,\n help=\"File containing bunch of URL's(1 per line)\")\n\n def qfile(fn):\n fobj = open(fn, 'r')\n queues = fobj.readlines()\n queues = map((lambda x: x.replace('\\n', '')), queues)\n return queues\n\n # ThugD Options\n thugd = parser.add_argument_group('Thug Distributed Options')\n thugd.add_argument('-ia', '--include-agent',\n action='store_const',\n const=agents_priority,\n help='Display Thug Version')\n # Queues Mutually Exclusive Group\n queue = thugd.add_mutually_exclusive_group(required=False)\n queue.add_argument('-qu', '--queue',\n nargs='+',\n metavar='',\n default='generic',\n help=\"Specify Queue/Queues to route URL's \\\n(*Single Queue: URL's will be routed to specified Queue, \\\n*Multiple Queues: URL's will be routed to ALL specified Queues)\")\n queue.add_argument('-qf', '--queue-file',\n metavar='',\n type=qfile,\n help=\"Specify File name containing Queue names(1 per \\\nline)\")\n\n # Thug Options\n thug = parser.add_argument_group('Thug Options')\n thug.add_argument('-V', '--version',\n action='store_true',\n help='Display Thug Version')\n thug.add_argument('-u', '--useragent',\n metavar='',\n default='winxpie60',\n help='Select a user agent(see below for values, \\\ndefault: winxpie60)')\n thug.add_argument('-e', '--events',\n metavar='',\n help='Enable comma-separated specified DOM events \\\nhandling')\n thug.add_argument('-w', '--delay',\n metavar='',\n help='Set a maximum setTimeout/setInterval delay value \\\n(in milliseconds)')\n thug.add_argument('-n', '--logdir',\n metavar='',\n help='Set the log output directory')\n thug.add_argument('-o', '--output',\n metavar='',\n help='Log to a specified file')\n thug.add_argument('-r', '--referer',\n metavar='',\n help='Specify a referer')\n thug.add_argument('-p', '--proxy',\n metavar='',\n help='Specify a proxy (see below for format and \\\nsupported schemes)')\n thug.add_argument('-l', '--local',\n action='store_true',\n help='Analyze a locally saved page')\n thug.add_argument('-x', '--local-nofetch',\n action='store_true',\n help='Analyze a locally saved page and prevent remote\\\ncontent fetching')\n thug.add_argument('-v', '--verbose',\n action='store_true',\n help='Enable verbose mode')\n thug.add_argument('-d', '--debug',\n action='store_true',\n help='Enable debug mode')\n thug.add_argument('-q', '--quiet',\n action='store_true',\n help='Disable console logging')\n thug.add_argument('-m', '--no-cache',\n action='store_true',\n help='Disable local web cache')\n thug.add_argument('-a', '--ast-debug',\n action='store_true',\n help='Enable AST debug mode (requires \\\ndebug mode)')\n thug.add_argument('-t', '--threshold',\n metavar='',\n help='Maximum pages to fetch')\n thug.add_argument('-E', '--extensive',\n action='store_true',\n help='Extensive fetch of linked pages')\n thug.add_argument('-T', '--timeout',\n metavar='',\n help='Timeout in minutes')\n\n # Plugins\n plugin = parser.add_argument_group('Plugins')\n plugin.add_argument('-A', '--adobepdf',\n metavar='',\n default='9.1.0',\n help='Specify the Adobe Acrobat Reader version \\\n(default: 9.1.0)')\n plugin.add_argument('-P', '--no-adobepdf',\n action='store_true',\n help='Disable Adobe Acrobat Reader Plugin')\n plugin.add_argument('-S', '--shockwave',\n metavar='',\n default='10.0.64.0',\n help='Specify the Shockwave Flash version \\\n(default: 10.0.64.0)')\n plugin.add_argument('-R', '--no-shockwave',\n action='store_true',\n help='Disable Shockwave Flash Plugin')\n plugin.add_argument('-J', '--javaplugin',\n metavar='',\n default='1.6.0.32',\n help='Specify the Java Plugin version (default: \\\n1.6.0.32)')\n plugin.add_argument('-K', '--no-javaplugin',\n action='store_true',\n help='Disable Java Plugin')\n\n # Classifier\n classifier = parser.add_argument_group('Classifiers')\n classifier.add_argument('-Q', '--urlclassifier',\n metavar='',\n help='Specify a list of additional (comma \\\nseparated) URL classifier rule files')\n classifier.add_argument('-W', '--jsclassifier',\n metavar='',\n help='Specify a list of additional (comma \\\nseparated) JS classifier rule files')\n\n return parser.parse_args()", "def parse_args():\n parser = MyParser(description='Data processing and analytics library \\\n for OpenStack Browbeat perf data')\n\n parser.add_argument('-s', '--summary', dest=\"days\", type=int, default=-1,\n help='-s N summary of last N days of results')\n\n parser.add_argument('--summary-uuid', dest=\"summary_uuid\", type=str,\n default=None,\n help='--summary-uuid UUID summary of a specific uuid')\n\n parser.add_argument('--short-summary', dest=\"short_days\", type=int,\n default=-1,\n help='--short-summary N gives \\\n summary of last N days of results but uses cockroach \\\n db so only provides with basic summary')\n\n parser.add_argument('--upload-timesummary', dest=\"timeseries_uuid\",\n type=str, default=None,\n help='--upload-timesummary UUID \\\n uploads the features computed from data obtained from\\\n graphite. ')\n\n parser.add_argument('--upload-logsummary', dest=\"loggin_uuid\",\n type=str, default=None,\n help='--upload-logsummary UUID \\\n uploads the log summary to crdb \\\n currently just summarizes over entire timeperiod. ')\n\n parser.add_argument('-u', '--update-db', dest='update', type=bool,\n default=False,\n help='-u True pushes data to cockroach db')\n\n parser.add_argument('--update-clf', dest=\"clf_days\", type=int,\n default=-1,\n help='--update-clf 60 will update all classifiers \\\n listed in config file under classifier_lists \\\n using data from last 60 days')\n\n parser.add_argument('--test-clf', dest=\"test_days\", type=int,\n default=-1,\n help='--test-clf 60 will train all classifiers \\\n listed in config file under classifier_lists \\\n using data from last 60 days and then test it \\\n and display metrics')\n\n parser.add_argument('-v', '--osp-version', dest='version', type=str,\n default=None,\n help='-v 11-tripleo only returns hits for that \\\n OpenStack version, \\\n only supported by summary right now')\n\n parser.add_argument('-c', '--config', dest='config', type=str,\n default=pkg_resources.resource_filename('bml',\n \"config.yml\"),\n help='-c <config file path> use custom config file')\n\n args = parser.parse_args()\n return args", "def set_parser():\n\n print('\\n\\nLoading Options and Configurations\\n' + 72 * '~' + '\\n')\n parser = ArgumentParser( \\\n description=('''\\n\nUsed to validate the TELEMAC system against a benchmark of test cases for\na certain rank, and a certain tag'''))\n\n parser = add_runcode_argument(parser)\n parser.add_argument( \\\n \"-b\", \"--bypass\", action=\"store_true\", dest=\"bypass\", default=False,\n help=\"will bypass execution failures and try to carry on \"\\\n \"(final report at the end)\")\n # Combine with all filters above, \"rank\" now controls everything\n # and Jenkins can control \"rank\"\n parser.add_argument( \\\n \"-k\", \"--rank\", dest=\"rank\", type=int, default=4,\n help=\"specify the ranks to be validated all rank lower or equal to \"\n \"the value will be run\")\n parser.add_argument( \\\n \"--tags\", dest=\"tags\", default='all',\n help=\\\n \"specify tags (; separated) to run \"\\\n \" '-tag' will do the opposite and \"\\\n \"tag1+tag2 will run cases that has both tag1 and tag2), \"\\\n \"default is all of them\")\n parser.add_argument( \\\n \"--valrootdir\", dest=\"val_root\", default='',\n help=\"specify the directory in which to search the validation cases, \"\\\n \"default is taken from config file\")\n parser.add_argument( \\\n \"--vnv-pre\", action=\"store_true\", dest=\"vnv_pre\", default=False,\n help=\"Only do pre-treatment\")\n parser.add_argument( \\\n \"--vnv-run\", action=\"store_true\", dest=\"vnv_run\", default=False,\n help=\"Only do execution for each study\")\n parser.add_argument( \\\n \"--vnv-check\", action=\"store_true\", dest=\"vnv_check\", default=False,\n help=\"Only do check of results (epsilons)\")\n parser.add_argument( \\\n \"--vnv-post\", action=\"store_true\", dest=\"vnv_post\", default=False,\n help=\"Only do post-treatment\")\n parser.add_argument( \\\n \"--report-name\", dest=\"report_name\", default='',\n help=\"will create a csv containing information on the validation \"\\\n \"such as execution time, rank, if it passed...\")\n parser.add_argument( \\\n \"--clean\", action=\"store_true\", dest=\"cleanup\", default=False,\n help=\"will erase all object, executable, result files \"\\\n \"from subfolders for the actual configuration\")\n parser.add_argument( \\\n \"--full-clean\", action=\"store_true\", dest=\"full_cleanup\", default=False,\n help=\"will erase all vnv study folders regarding of configurations\")\n\n # Options for notebook\n parser.add_argument(\n \"--notebook\",\n dest=\"notebook\",\n action=\"store_true\", default=False,\n help=\"Run validation of notebook\")\n parser.add_argument(\n \"--notebook-timeout\",\n dest=\"nb_timeout\", type=int, default=60000,\n help=\"Time after whihc the notebook will be killed if still running\")\n parser.add_argument(\n \"--notebook-update\",\n dest=\"nb_update\",\n action=\"store_true\", default=False,\n help=\"Update notebook file with the runned one\")\n parser.add_argument(\n \"--verbose\",\n dest=\"verbose\",\n action=\"store_true\", default=False,\n help=\"More verbose validation\")\n\n # Options for api\n parser.add_argument(\n \"--api\",\n dest=\"api\",\n action=\"store_true\", default=False,\n help=\"Run validation of api\")\n\n parser.add_argument(\"args\", metavar='Python file(s)', nargs='*')\n options = parser.parse_args()\n\n # Conversion of options.tags (replacing all by list) and checking that the\n # value is valid\n # Removing quotes\n tmp_tag = options.tags.strip(\"'\\\"\")\n options.tags = tmp_tag\n # Checking that tags are valid\n for tag in options.tags.split(';'):\n if '+' in tag:\n for and_tag in tag.split('+'):\n # Removing - if in tag\n ttag = and_tag[1:] if and_tag[0] == '-' else and_tag\n if ttag not in TAGS:\n raise TelemacException(\\\n \"Unknow tag: {tag}\\nTags available: {tags}\"\\\n .format(tag=ttag, tags=';'.join(TAGS)))\n else:\n if tag == 'all':\n continue\n # Removing - if in tag\n ttag = tag[1:] if tag[0] == '-' else tag\n if ttag not in TAGS:\n raise TelemacException(\\\n \"Unknow tag: {tag}\\nTags available: {tags}\"\\\n .format(tag=ttag, tags=';'.join(TAGS)))\n\n # Replacing all by list of tags\n if 'all' in options.tags.split(';'):\n options.tags = options.tags.replace('all', ';'.join(TAGS))\n\n # If pre, run, post are all false switching them to true\n if not(options.vnv_pre or options.vnv_run or\n options.vnv_check or options.vnv_post):\n options.vnv_pre = True\n options.vnv_run = True\n options.vnv_check = True\n options.vnv_post = True\n\n return options", "def configure_parser(sub_parsers):\n\n parser = sub_parsers.add_parser(\n 'ants',\n description='Solve a traveling salesman problem using ant colony optimization',\n help='Ant colony optimization for the traveling salesman problem')\n\n parser.add_argument(\n '-r',\n '--rho',\n type=float,\n default=.5,\n help='Evaporation rate (default 0.5)')\n parser.add_argument(\n '-a',\n '--alpha',\n type=float,\n default=.5,\n help='Relative importance of the pheromone (default 0.5)')\n parser.add_argument(\n '-b',\n '--beta',\n type=float,\n default=.5,\n help='Relative importance of the heuristic information (default 0.5)')\n parser.add_argument(\n '-q',\n '--q',\n type=float,\n default=1.,\n help='Constant Q. Used to calculate the pheromone, laid down on an edge (default 1)')\n parser.add_argument(\n '-n',\n '--iteration-number',\n type=int,\n default=10,\n help='Number of iterations to execute (default 10)')\n parser.add_argument(\n '-o',\n '--two-opt',\n action='store_true',\n default=False,\n help='Enable to use 2-opt local search after each iteration (default off)')\n parser.add_argument(\n '-t',\n '--tsp-file',\n type=str,\n default=path.join(path.abspath(path.dirname(inspect.getfile(inspect.currentframe()))), 'resources/burma14.tsp'),\n help='Path of the tsp file that shall be loaded (default loads the built-in burma14.tsp)')\n\n parser.add_argument(\n 'ant_number',\n type=int,\n help='Number of ants used for solving')\n\n parser.set_defaults(func=_run_aco4tsp)", "def make_parser():\n\tlogging.info(\"Constructing parser\")\n\tdescription = \"Store and retrieve snippets of text\"\n\tparser = argparse.ArgumentParser(description = description)\n\n\tsubparsers = parser.add_subparsers(dest=\"command\", help=\"Available commands\")\n\n\t# Subparser for the put command\n\tlogging.debug(\"Constructing put subparser\")\n\tput_parser = subparsers.add_parser(\"put\", help = \"Store a snippet\")\n\tput_parser.add_argument(\"name\", help=\"The name of the snippet\")\n\tput_parser.add_argument(\"snippet\", help=\"The snippet\")\n\tput_parser.add_argument(\"filename\", default=\"snippets.csv\", nargs=\"?\", help=\"The snippet filename\")\n\t\n\t# Subparser for the get command\n\tlogging.debug(\"Constructing get subparser\")\n\tget_parser = subparsers.add_parser(\"get\", help=\"Get a snippet\")\n\tget_parser.add_argument(\"name\", help=\"The name of the snippet\")\n\tget_parser.add_argument(\"filename\", default=\"snippets.csv\", nargs=\"?\", \n\t\thelp=\"The Snippet filename\")\n\n\t# Subparser for the search command\n\tlogging.debug(\"Constructing search subparser\")\n\tsearch_parser = subparsers.add_parser(\"search\", help=\"Search for a snippet\")\n\tsearch_parser.add_argument(\"snippet_portion\", help=\"The snippet you're searching for\")\n\tsearch_parser.add_argument(\"filename\", default=\"snippets.csv\", nargs=\"?\", \n\t\thelp=\"The Snippet filename\")\n\n\t# Subparser for the update command\n\tlogging.debug(\"Constructing update subparser\")\n\tupdate_parser = subparsers.add_parser(\"update\", help=\"Search for a snippet\")\n\tupdate_parser.add_argument(\"snippet_original\", help=\"The snippet you're searching for\")\n\tupdate_parser.add_argument(\"filename\", default=\"snippets.csv\", nargs=\"?\", \n\t\thelp=\"The Snippet filename\")\n\n\t# Subparser for the update2 command\n\tlogging.debug(\"Constructing update2 subparser\")\n\tupdate2_parser = subparsers.add_parser(\"update2\", help=\"Search for a snippet\")\n\tupdate2_parser.add_argument(\"snippet_original\", help=\"The snippet you're searching for\")\n\tupdate2_parser.add_argument(\"filename\", default=\"snippets.csv\", nargs=\"?\", \n\t\thelp=\"The Snippet filename\")\n\tupdate2_parser.add_argument(\"change\", help=\"The snippet you want to change it to\")\n\n\treturn parser", "def test_parser():\n return parser(\"Testing\", \"Use this from a test\", \"\")", "def setup_parser_arguments(parser):\n parser._optionals.title = \"Common options\"\n parser.add_argument(\n '-H', '--host',\n help=\"Host to load test in the following format: http://10.21.32.33\"\n )\n # Number of Locust users\n parser.add_argument(\n '-c', '--clients',\n type=int,\n dest='num_clients',\n default=1,\n help=\"Number of concurrent Locust users. Only used together with --headless\"\n )\n # User hatch rate\n parser.add_argument(\n '-r', '--hatch-rate',\n type=float,\n default=1,\n help=\"The rate per second in which clients are spawned. Only used together with --headless\"\n )\n # Time limit of the test run\n parser.add_argument(\n '-t', '--run-time',\n help=\"Stop after the specified amount of time, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --headless\"\n )\n # List locust commands found in loaded locust files/source files\n parser.add_argument(\n '-l', '--list',\n action='store_true',\n dest='list_commands',\n help=\"Show list of possible locust classes and exit\"\n )\n \n web_ui_group = parser.add_argument_group(\"Web UI options\")\n web_ui_group.add_argument(\n '--web-host',\n default=\"\",\n help=\"Host to bind the web interface to. Defaults to '*' (all interfaces)\"\n )\n web_ui_group.add_argument(\n '--web-port', '-P',\n type=int,\n default=8089,\n help=\"Port on which to run web host\"\n )\n # if we should print stats in the console\n web_ui_group.add_argument(\n '--headless',\n action='store_true',\n help=\"Disable the web interface, and instead start the load test immediately. Requires -c and -t to be specified.\"\n )\n web_ui_group.add_argument(\n '--web-auth',\n type=str,\n dest='web_auth',\n default=None,\n help='Turn on Basic Auth for the web interface. Should be supplied in the following format: username:password'\n )\n \n master_group = parser.add_argument_group(\n \"Master options\", \n \"Options for running a Locust Master node when running Locust distributed. A Master node need Worker nodes that connect to it before it can run load tests.\",\n )\n # if locust should be run in distributed mode as master\n master_group.add_argument(\n '--master',\n action='store_true',\n help=\"Set locust to run in distributed mode with this process as master\"\n )\n master_group.add_argument(\n '--master-bind-host',\n default=\"*\",\n help=\"Interfaces (hostname, ip) that locust master should bind to. Only used when running with --master. Defaults to * (all available interfaces).\"\n )\n master_group.add_argument(\n '--master-bind-port',\n type=int,\n default=5557,\n help=\"Port that locust master should bind to. Only used when running with --master. Defaults to 5557.\"\n )\n master_group.add_argument(\n '--expect-workers',\n type=int,\n default=1,\n help=\"How many workers master should expect to connect before starting the test (only when --headless used).\"\n )\n master_group.add_argument(\n '--expect-slaves',\n action='store_true',\n help=configargparse.SUPPRESS\n )\n \n worker_group = parser.add_argument_group(\n \"Worker options\", \n textwrap.dedent(\"\"\"\n Options for running a Locust Worker node when running Locust distributed. \n Only the LOCUSTFILE (-f option) need to be specified when starting a Worker, since other options such as -c, -r, -t are specified on the Master node.\n \"\"\"),\n )\n # if locust should be run in distributed mode as worker\n worker_group.add_argument(\n '--worker',\n action='store_true',\n help=\"Set locust to run in distributed mode with this process as worker\"\n )\n worker_group.add_argument(\n '--slave',\n action='store_true',\n help=configargparse.SUPPRESS\n )\n # master host options\n worker_group.add_argument(\n '--master-host',\n default=\"127.0.0.1\",\n help=\"Host or IP address of locust master for distributed load testing. Only used when running with --worker. Defaults to 127.0.0.1.\"\n )\n worker_group.add_argument(\n '--master-port',\n type=int,\n default=5557,\n help=\"The port to connect to that is used by the locust master for distributed load testing. Only used when running with --worker. Defaults to 5557.\"\n )\n \n stats_group = parser.add_argument_group(\"Request statistics options\")\n # A file that contains the current request stats.\n stats_group.add_argument(\n '--csv', '--csv-base-name',\n dest='csvfilebase',\n help=\"Store current request stats to files in CSV format.\",\n )\n # Adds each stats entry at every iteration to the _stats_history.csv file.\n stats_group.add_argument(\n '--csv-full-history',\n action='store_true',\n default=False,\n dest='stats_history_enabled',\n help=\"Store each stats entry in CSV format to _stats_history.csv file\",\n ) \n # if we should print stats in the console\n stats_group.add_argument(\n '--print-stats',\n action='store_true',\n help=\"Print stats in the console\"\n )\n # only print summary stats\n stats_group.add_argument(\n '--only-summary',\n action='store_true',\n help='Only print the summary stats'\n )\n stats_group.add_argument(\n '--reset-stats',\n action='store_true',\n help=\"Reset statistics once hatching has been completed. Should be set on both master and workers when running in distributed mode\",\n )\n \n log_group = parser.add_argument_group(\"Logging options\")\n # skip logging setup\n log_group.add_argument(\n '--skip-log-setup',\n action='store_true',\n dest='skip_log_setup',\n default=False,\n help=\"Disable Locust's logging setup. Instead, the configuration is provided by the Locust test or Python defaults.\"\n )\n # log level\n log_group.add_argument(\n '--loglevel', '-L',\n default='INFO',\n help=\"Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL. Default is INFO.\",\n )\n # log file\n log_group.add_argument(\n '--logfile',\n help=\"Path to log file. If not set, log will go to stdout/stderr\",\n )\n \n step_load_group = parser.add_argument_group(\"Step load options\")\n # Enable Step Load mode\n step_load_group.add_argument(\n '--step-load',\n action='store_true',\n help=\"Enable Step Load mode to monitor how performance metrics varies when user load increases. Requires --step-clients and --step-time to be specified.\"\n )\n # Number of clients to incease by Step\n step_load_group.add_argument(\n '--step-clients',\n type=int,\n default=1,\n help=\"Client count to increase by step in Step Load mode. Only used together with --step-load\"\n )\n # Time limit of each step\n step_load_group.add_argument(\n '--step-time',\n help=\"Step duration in Step Load mode, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --step-load\"\n )\n \n \n other_group = parser.add_argument_group(\"Other options\")\n # Display ratio table of all tasks\n other_group.add_argument(\n '--show-task-ratio',\n action='store_true',\n help=\"Print table of the locust classes' task execution ratio\"\n )\n # Display ratio table of all tasks in JSON format\n other_group.add_argument(\n '--show-task-ratio-json',\n action='store_true',\n help=\"Print json data of the locust classes' task execution ratio\"\n )\n # Version number (optparse gives you --version but we have to do it\n # ourselves to get -V too. sigh)\n other_group.add_argument(\n '--version', '-V',\n action='version',\n help=\"Show program's version number and exit\",\n version='%(prog)s {}'.format(version),\n )\n # set the exit code to post on errors\n other_group.add_argument(\n '--exit-code-on-error',\n type=int,\n default=1,\n help=\"Sets the process exit code to use when a test result contain any failure or error\"\n )\n other_group.add_argument(\n '-s', '--stop-timeout',\n action='store',\n type=int,\n dest='stop_timeout',\n default=None,\n help=\"Number of seconds to wait for a simulated user to complete any executing task before exiting. Default is to terminate immediately. This parameter only needs to be specified for the master process when running Locust distributed.\"\n )\n \n locust_classes_group = parser.add_argument_group(\"Locust user classes\")\n locust_classes_group.add_argument(\n 'locust_classes',\n nargs='*',\n metavar='LocustClass',\n help=\"Optionally specify which Locust classes that should be used (available Locust classes can be listed with -l or --list)\",\n )", "def get_parser():\n # Parent and only parser.\n parser = argparse.ArgumentParser(\n add_help=True,\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('mode', action='store',\n choices=range(len(MODES)),\n type=int,\n help='Select mode of file download.\\n'\n ' e.g: 0(rated) or 1(list).')\n parser.add_argument('torr_page', action='store',\n choices=range(len(TORRENTS)),\n type=int,\n help='Select tracking page to download from.\\n'\n ' e.g: 0 to .. ' + str(len(TORRENTS)-1) + '.')\n parser.add_argument('str_search', action='store',\n type=str,\n help='Input torrent string to search.\\n'\n ' e.g: \"String search\"')\n return(parser)", "def extend_parser(self, parser):\n return parser", "def parser():\n fetch_all_news_codes()\n load_config_key()\n\n if not sys.argv[1:]:\n print(\"Arguments needed. Use argument --help/-h for more information.\")\n else:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--show_all\", \"-sa\", action=\"store_true\",\n help=\"Shows all available news channel codes.\")\n parser.add_argument(\"--categories\", \"-c\", action=\"store_true\",\n help=\"Shows all available news categories.\")\n parser.add_argument(\"--show\", \"-s\", action=\"store\",\n help=\"Shows all news channel codes for a specified category.\")\n parser.add_argument(\"--news\", \"-n\", type=str, help=\"Shows news articles \"\n \"for a specified news channel code.\")\n args = parser.parse_args()\n\n if args.show_all:\n show_sources_all()\n elif args.categories:\n show_categories()\n elif args.show:\n show_sources_category(args.show)\n elif args.news:\n if args.news in news_codes:\n show_news(args.news, BASE_URL)\n else:\n print(\"Invalid news code.\")\n sys.exit(1)", "def get_parser():\n # Get parsers for various model architectures.\n model_parser = ModelFactory.get_all_parsers()\n # Get parsers for various optimizers.\n optimizer_parser = OptimizerFactory.get_all_parsers()\n # Add parent parsers.\n parent_parsers = model_parser + optimizer_parser\n parser = argparse.ArgumentParser(parents=parent_parsers)\n\n # Generic options\n parser.add_argument('--checkpoint-step', type=int, default=1,\n help='Number of epochs between successive checkpoint creations')\n parser.add_argument('--config-file', type=str, default=[], nargs='*',\n help='File(s) to read the command-line arguments from')\n parser.add_argument('--continue', action='store_true',\n help='Continue the execution of the last experiment saved into the export directory')\n parser.add_argument('--debug', action='store_true', help='Show debug messages')\n parser.add_argument('--export-dir', type=str, required=True, help='Export directory')\n parser.add_argument('--no-gpu', action='store_true', help='Use CPU')\n \n parser.add_argument(\"--wandb-directory\", type=str, default=\"../wandb\")\n parser.add_argument(\"--disable-wandb\", action=\"store_true\", help=\"No Wandb logging\")\n\n # Data options\n parser.add_argument('--batch-size', type=int, default=[16], nargs='*', help='Batch size(s)')\n parser.add_argument('--dataset', type=str, default=[consts.SIGMORPHON2020], nargs='*',\n choices=[consts.SIGMORPHON2020], help='Dataset(s) to train on')\n parser.add_argument('--sigmorphon2020-root', type=str, help='Root directory for the SIGMORPHON 2020 dataset')\n\n # Language options\n parser.add_argument('--language-families', type=str, nargs='*', default=None,\n help='The families of languages to load the data for.'\n ' If not provided, all available families will be used.')\n parser.add_argument('--language-info-file', type=str, default='lang_config.tsv',\n help='The language information file.')\n parser.add_argument('--languages', type=str, nargs='*', default=None,\n help='The languages to load the data for.'\n ' If not provided, all available languages will be used.')\n\n # Optimizer options\n parser.add_argument('--optimizer', type=str, default=[OptimizerFactory.optimizers[0]],\n choices=OptimizerFactory.optimizers, nargs='*', help='Optimizer algorithm(s)')\n parser.add_argument('--num-epochs', type=int, default=30, help='Number(s) of epochs')\n\n # Model options\n parser.add_argument('--model-architecture', type=str, default=[ModelFactory.architectures[0]], nargs='*',\n choices=ModelFactory.architectures, help='Model architecture(s)')\n \n # Parallelism Optoions, affect various\n parser.add_argument('--loader-threads', type=int, default=0, help='Data loading threads. Default to 0 (load in main)')\n parser.add_argument('--use-dataparallel', action='store_true', help='Use torch.nn.DataParallel to wrap the model?')\n\n return parser", "def setup_parser():\n parser = HelpfulParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('infile', type=str, help=\"input data file\")\n\n parser.add_argument('-u', '--usage', action=\"help\",\n help=\"show this help message and exit\")\n parser.add_argument('-h', '--host', metavar='HOST', type=str,\n default='localhost', help='Server hostname')\n parser.add_argument('-p', '--port', metavar='PORT', type=int,\n default='3000', help='Server port')\n parser.add_argument('-U', '--user', metavar='USER', type=str,\n default=None, help='Username')\n parser.add_argument('-P', '--passwd', metavar='PW', type=str,\n default=None, help='Password')\n parser.add_argument('-n', '--nspace', metavar='NS', type=str,\n default='test', help='Namespace')\n parser.add_argument('-s', '--set', metavar='SET', type=str,\n default='osm', help='Set name')\n return parser", "def common_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\", \"--prefix\", type=str, default=\"scatter-\",\n help=\"The prefix of the ouput files. Output will be \"\n \"named like: <PREFIX><N>.bed, in which N is an \"\n \"incrementing number. Default 'scatter-'.\")\n parser.add_argument(\"input\", metavar=\"INPUT\", type=str,\n help=f\"The input file. The format is detected by the \"\n f\"extension. Supported extensions are: \"\n f\"{SUPPORTED_EXTENSIONS_STRING}.\")\n parser.add_argument(\"-P\", \"--print-paths\", action=\"store_true\",\n help=\"If set prints paths of the output files to \"\n \"STDOUT. This makes the program usable in \"\n \"scripts and worfklows.\")\n return parser", "def fill_parser(self, parser):\n parser.add_argument(\n \"library\",\n nargs=\"?\",\n help=\"Library to fetch (e.g. charms.mycharm.v2.foo.); optional, default to all\",\n )", "def build_parser():\n \n parser = argparse.ArgumentParser(\n description='Interfaces with the Synapse repository.')\n parser.add_argument(\n '--version',\n action='version',\n version='Synapse Client %s' % synapseclient.__version__)\n parser.add_argument(\n '-u', '--username',\n dest='synapseUser',\n help='Username used to connect to Synapse')\n parser.add_argument(\n '-p', '--password',\n dest='synapsePassword',\n help='Password used to connect to Synapse')\n parser.add_argument(\n '--debug',\n dest='debug',\n action='store_true')\n parser.add_argument(\n '-s', '--skip-checks',\n dest='skip_checks',\n action='store_true',\n help='suppress checking for version upgrade messages and endpoint redirection')\n\n\n subparsers = parser.add_subparsers(\n title='commands',\n description='The following commands are available:',\n help='For additional help: \"synapse <COMMAND> -h\"')\n\n \n parser_get = subparsers.add_parser(\n 'get',\n help='downloads a dataset from Synapse')\n parser_get.add_argument(\n 'id',\n metavar='syn123', type=str,\n help='Synapse ID of form syn123 of desired data object')\n parser_get.set_defaults(func=get)\n\n \n parser_store = subparsers.add_parser(\n 'store',\n help='depending on the arguments supplied, '\n 'store will either create, add, or update')\n group = parser_store.add_mutually_exclusive_group()\n group.add_argument(\n '--id',\n metavar='syn123', type=str,\n help='Synapse ID of form syn123 of the Synapse object to update')\n group.add_argument(\n '--parentid',\n metavar='syn123', type=str,\n help='Synapse ID of project or folder where to upload new data.')\n parser_store.add_argument(\n '--name',\n type=str, nargs=\"+\",\n help='Name of data object in Synapse')\n parser_store.add_argument(\n '--description',\n type=str, nargs=\"+\",\n help='Description of data object in Synapse.')\n parser_store.add_argument(\n '--type',\n type=str, default='File',\n help='Type of object, such as \"File\", \"Folder\", or '\n '\"Project\", to create in Synapse. Defaults to \"File\"')\n parser_store.add_argument(\n '--used',\n metavar='TargetID', type=str, nargs='*',\n help='ID of a target data entity from which the specified entity is derived')\n parser_store.add_argument(\n '--executed',\n metavar='TargetID', type=str, nargs='*',\n help='ID of a code entity from which the specified entity is derived')\n parser_store.add_argument(\n '--file',\n type=str,\n help='file to be added to synapse.')\n parser_store.set_defaults(func=store)\n\n\n parser_delete = subparsers.add_parser(\n 'delete',\n help='removes a dataset from Synapse')\n parser_delete.add_argument(\n 'id',\n metavar='syn123', type=str,\n help='Synapse ID of form syn123 of desired data object')\n parser_delete.set_defaults(func=delete)\n\n\n parser_query = subparsers.add_parser(\n 'query',\n help='Performs SQL like queries on Synapse')\n parser_query.add_argument(\n 'queryString',\n metavar='string',\n type=str, nargs='*',\n help='A query string, see https://sagebionetworks.jira.com/wiki/'\n 'display/PLFM/Repository+Service+API#'\n 'RepositoryServiceAPI-QueryAPI for more information')\n parser_query.set_defaults(func=query)\n \n \n parser_submit = subparsers.add_parser(\n 'submit',\n help='submit an entity or a file for evaluation')\n parser_submit.add_argument(\n '--evaluationID', '--evalID',\n type=str,\n help='Evaluation ID where the entity/file will be submitted')\n parser_submit.add_argument(\n '--evaluationName', '--evalN',\n type=str,\n help='Evaluation Name where the entity/file will be submitted')\n parser_submit.add_argument(\n '--evaluation',\n type=str,\n help=argparse.SUPPRESS) #mainly to maintain the backward compatibility\n parser_submit.add_argument(\n '--entity', '--eid',\n type=str,\n help='Synapse ID of the entity to be submitted')\n parser_submit.add_argument(\n '--file', '-f',\n type=str,\n help='File to be submitted to the challenge')\n parser_submit.add_argument(\n '--parentId', '--pid',\n type=str,\n help='Synapse ID of project or folder where to upload data')\n parser_submit.add_argument(\n '--name',\n type=str,\n help='Name of the submission')\n parser_submit.add_argument(\n '--teamName', '--team',\n type=str,\n help='Publicly displayed name of team for the submission[defaults to username]')\n parser_submit.add_argument(\n '--used',\n metavar='TargetID', type=str, nargs='*',\n help='ID of a target data entity from which the specified entity is derived')\n parser_submit.add_argument(\n '--executed',\n metavar='TargetID', type=str, nargs='*',\n help='ID of a code entity from which the specified entity is derived')\n parser_submit.set_defaults(func=submit)\n\n \n parser_get = subparsers.add_parser(\n 'show',\n help='show metadata for an entity')\n parser_get.add_argument(\n 'id',\n metavar='syn123', type=str,\n help='Synapse ID of form syn123 of desired synapse object')\n parser_get.set_defaults(func=show)\n\n \n parser_cat = subparsers.add_parser(\n 'cat',\n help='prints a dataset from Synapse')\n parser_cat.add_argument(\n 'id',\n metavar='syn123', type=str,\n help='Synapse ID of form syn123 of desired data object')\n parser_cat.set_defaults(func=cat)\n\n\n parser_set_provenance = subparsers.add_parser(\n 'set-provenance',\n help='create provenance records')\n parser_set_provenance.add_argument(\n '-id',\n metavar='syn123', type=str, required=True,\n help='Synapse ID of entity whose provenance we are accessing.')\n parser_set_provenance.add_argument(\n '-name',\n metavar='NAME', type=str, required=False,\n help='Name of the activity that generated the entity')\n parser_set_provenance.add_argument(\n '-description',\n metavar='DESCRIPTION', type=str, required=False,\n help='Description of the activity that generated the entity')\n parser_set_provenance.add_argument(\n '-o', '-output',\n metavar='OUTPUT_FILE', dest='output',\n const='STDOUT', nargs='?', type=str,\n help='Output the provenance record in JSON format')\n parser_set_provenance.add_argument(\n '-used',\n metavar='TargetID', type=str, nargs='*',\n help='ID of a target data entity from which the specified entity is derived')\n parser_set_provenance.add_argument(\n '-executed',\n metavar='TargetID', type=str, nargs='*',\n help='ID of a code entity from which the specified entity is derived')\n parser_set_provenance.set_defaults(func=setProvenance)\n\n\n parser_get_provenance = subparsers.add_parser(\n 'get-provenance',\n help='show provenance records')\n parser_get_provenance.add_argument(\n '-id',\n metavar='syn123', type=str, required=True,\n help='Synapse ID of entity whose provenance we are accessing.')\n parser_get_provenance.add_argument(\n '-o', '-output',\n metavar='OUTPUT_FILE', dest='output',\n const='STDOUT', nargs='?', type=str,\n help='Output the provenance record in JSON format')\n parser_get_provenance.set_defaults(func=getProvenance)\n\n parser_add = subparsers.add_parser(\n 'add',\n help='uploads and adds a dataset to Synapse')\n parser_add.add_argument(\n '-parentid', '-parentId',\n metavar='syn123', type=str, required=True,\n help='Synapse ID of project or folder where to upload data.')\n parser_add.add_argument(\n '-name',\n metavar='NAME', type=str, required=False,\n help='Name of data object in Synapse')\n parser_add.add_argument(\n '-description',\n metavar='DESCRIPTION', type=str,\n help='Description of data object in Synapse.')\n parser_add.add_argument(\n '-type',\n type=str, default='File',\n help='Type of object to create in synapse. Defaults to \"File\".')\n parser_add.add_argument(\n '-used',\n metavar='TargetID', type=str, nargs='*',\n help='ID of a target data entity from which the specified entity is derived')\n parser_add.add_argument(\n '-executed',\n metavar='TargetID', type=str, nargs='*',\n help='ID of a code entity from which the specified entity is derived')\n parser_add.add_argument(\n 'file',\n type=str,\n help='file to be added to synapse.')\n parser_add.set_defaults(func=add)\n\n\n parser_create = subparsers.add_parser(\n 'create',\n help='Creates folders or projects on Synapse')\n parser_create.add_argument(\n '-parentid', '-parentId',\n metavar='syn123', type=str, required=False,\n help='Synapse ID of project or folder where to place folder [not used with project]')\n parser_create.add_argument(\n '-name',\n metavar='NAME', type=str, required=True,\n help='Name of folder/project.')\n parser_create.add_argument(\n '-description',\n metavar='DESCRIPTION', type=str,\n help='Description of project/folder')\n parser_create.add_argument(\n 'type',\n type=str,\n help='Type of object to create in synapse one of {Project, Folder}')\n parser_create.set_defaults(func=create)\n\n\n parser_update = subparsers.add_parser(\n 'update',\n help='uploads a new file to an existing Synapse Entity')\n parser_update.add_argument(\n '-id',\n metavar='syn123', type=str, required=True,\n help='Synapse ID of entity to be updated')\n parser_update.add_argument(\n 'file',\n type=str,\n help='file to be added to synapse.')\n parser_update.set_defaults(func=update)\n\n\n parser_onweb = subparsers.add_parser(\n 'onweb',\n help='opens Synapse website for Entity')\n parser_onweb.add_argument(\n 'id',\n type=str,\n help='Synapse id')\n parser_onweb.set_defaults(func=onweb)\n\n\n ## the purpose of the login command (as opposed to just using the -u and -p args) is\n ## to allow the command line user to cache credentials\n parser_login = subparsers.add_parser(\n 'login',\n help='login to Synapse and (optionally) cache credentials')\n parser_login.add_argument(\n '-u', '--username',\n dest='synapseUser',\n help='Username used to connect to Synapse')\n parser_login.add_argument(\n '-p', '--password',\n dest='synapsePassword',\n help='Password used to connect to Synapse')\n parser_login.add_argument(\n '--rememberMe', '--remember-me',\n dest='rememberMe',\n action='store_true',\n default=False,\n help='Cache credentials for automatic authentication on future interactions with Synapse')\n parser_login.set_defaults(func=login)\n\n\n return parser", "def _parse(self, args):\n parser = self._create_parser()\n return parser.parse(args)", "def _init_parser():\n\t\n\t_parser = argparse.ArgumentParser()\n\t_parser.add_argument(\"--pull\", help=\"pull scripts from UR3\", action=\"store_true\")\n\t_parser.add_argument(\"--create\", help=\"create data base from script files\", action=\"store_true\")\n\t_parser.add_argument(\"--clear\", help=\"clear all data base\", action=\"store_true\")\n\treturn _parser", "def get_parser():\n parser = argparse.ArgumentParser(description='Parser des liens sur les sites Jahia et Wordpress.')\n parser.add_argument('ficher_des_sites', help='le fichier contenant les sites a parser.')\n parser.add_argument('-v', '--version', help='affiche la version du parser',\n action='version', version='%(prog)s ' + __version__)\n return parser", "def get_parser():\n parser = argparse.ArgumentParser(description='Parser des liens sur les sites Jahia et Wordpress.')\n parser.add_argument('ficher_des_sites', help='le fichier contenant les sites a parser.')\n parser.add_argument('-v', '--version', help='affiche la version du parser',\n action='version', version='%(prog)s ' + __version__)\n return parser", "def setup_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-u\", \"--url\", dest='url', required=True,\n help=\"Falkonry Edge URL\")\n parser.add_argument(\"-i\", \"--input_file\", dest='input', required=True,\n help=\"Input data file to feed into Falkonry Edge Analyzer\")\n parser.add_argument(\"-o\", \"--output_file\", dest='output', required=True,\n help=\"File name to write Falkonry Edge Analyzer output\")\n parser.add_argument(\"-t\", \"--time_column\", dest='time', type=int, required=True,\n help=\"Time column index starting with 0\")\n parser.add_argument(\"-z\", \"--time_zone\", dest='zone', required=True,\n help=\"Time zone\")\n parser.add_argument(\"-f\", \"--time_format\", dest='format', required=True,\n help=\"Timestamp format\")\n parser.add_argument(\"-e\", \"--entity_column\", dest='entity', type=int,\n help=\"Entity column index starting with 0\")\n parser.add_argument(\"-b\", \"--batch_column\", dest='batch', type=int,\n help=\"Batch column index starting with 0\")\n parser.add_argument(\"-r\", \"--input_feed_rate\", dest='rate', type=int, default=1000,\n help=\"Number of records to send to edge per second.\")\n\n return parser", "def ImportParsers(cls, import_dir):\n sys.path.append(import_dir)\n cls.elf_parser = importlib.import_module(\n \"vts.utils.python.library.elf_parser\")\n cls.vtable_parser = importlib.import_module(\n \"vts.utils.python.library.vtable_parser\")", "def _scrape(self):", "def __init__(self):\n self.parser = RequestParser()\n self.parser.add_argument(\"location\", type=str, required=True,\n help=\"location field is missing\")\n self.parser.add_argument(\"images\", type=str, required=True,\n help=\"Image field is missing\")\n self.parser.add_argument(\"topic\", type=str, required=True,\n help=\"Topic field is missing\")\n self.parser.add_argument(\"happeningOn\", type=str, required=True,\n help=\"Date field is missing\")\n self.parser.add_argument(\"tags\", type=str, required=True,\n help=\"Tags field seems to be missing\")", "def setup_parser(self):\n parser = argparse.ArgumentParser(description=DESCRIPTION)\n parser.add_argument('words', metavar='W', nargs='+', help=POSITIONAL_HELP)\n parser.add_argument('-a','--any', dest=\"search_funct\", action=\"store_const\", \n const='any', default='all', help=SEARCH_HELP)\n parser.add_argument('-o','--only-id', action='store_true', help=ID_HELP)\n parser.add_argument('-u', '--update', action='store_true', help=UPDATE_HELP)\n return parser", "def parse(self, url):\n pass", "def _parser(self, request, *args, **kwargs):\n\n self.request = request\n\n # parse header\n self.header = {k[5:]: v for k, v in request.META.items() if k.startswith('HTTP_')}\n self.header['CONTENT_TYPE'] = request.META.get('CONTENT_TYPE')\n\n # parse boby\n if request.method not in ['GET', 'HEAD']:\n\n # TODO: serve other body format\n if 'multipart/form-data' in self.header['CONTENT_TYPE']:\n self.body = request.POST.dict()\n\n else:\n # default: application/json\n if self.request.body:\n try:\n self.body = json.loads(self.request.body)\n except Exception as e:\n raise Exception('parse json body error')\n \n # parse query\n self.query = request.GET.dict()\n\n # parse cookie\n self.cookie = {k: v for k, v in request.COOKIES.items()}", "def __init__(self, *args, **kw):\n self.parser = Parser(*args, **kw)", "def __init__(self):\r\n super(TestParser, self).__init__([self.TestHandler()])", "def fill_parser(self, parser):\n parser.add_argument(\"name\", help=\"The name of the library file (e.g. 'db')\")", "def parser():\n parser = ArgumentParser()\n parser.add_argument('dir_jsons', help='dir containing json files')\n parser.add_argument('dir_out', help='output directory')\n parser.add_argument('file_name', help='name of HTML file')\n return parser", "def parse(self, args):\n pass", "def create_basic_parse():\n # SEE: https://docs.python.org/3/library/argparse.html\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--path_cover', type=str, required=True,\n help='path to the csv cover file')\n parser.add_argument('-d', '--path_dataset', type=str, required=False,\n help='path to the dataset location, '\n 'if missing in cover', default=None)\n parser.add_argument('-o', '--path_out', type=str, required=True,\n help='path to the output directory')\n parser.add_argument('--unique', dest='unique', action='store_true',\n help='whether each experiment have unique time stamp')\n parser.add_argument('--visual', dest='visual', action='store_true',\n help='whether visualise partial results')\n parser.add_argument('--lock_expt', dest='lock_thread', action='store_true',\n help='whether lock to run experiment in single thread')\n parser.add_argument('--run_comp_benchmark', action='store_true',\n help='run computation benchmark on the end')\n parser.add_argument('--nb_workers', type=int, required=False, default=1,\n help='number of registration running in parallel')\n return parser", "def import_additional_parser():\n # Parse arguments\n try:\n global add_parser\n import add_parser\n except ImportError as e:\n print('No additional parser found.')\n pass", "def parse():\n parser = argparse.ArgumentParser(\n description='Calculates statistics of sampled data')\n parser.add_argument(\n '--platform', default=None,\n help='the platform to use. \"r\" for reddit and \"s\" for stack overflow. Leave blank to do both')\n parser.add_argument(\n '--rq', default=None,\n help='the research question to answer. 1, 2, or 3', type=int)\n parser.add_argument(\n '--frequency',\n action='store_true',\n help='Compute the frequency distributions of urls, subreddits, and tags (slow)')\n parser.add_argument(\n '--tags',\n action='store_true',\n help='Only compute tags frequency dist. Overrides other options.')\n parser.add_argument(\n '--explain',\n action='store_true',\n help='custom helper. Check code not docs.')\n parser.add_argument(\n '--bootstrap',\n type=int,default=None,\n help='use stats bootstrapping')\n parser.add_argument(\n '--sample_num',\n default=None,\n help='select a sample number')\n args = parser.parse_args()\n if args.tags:\n tags_frequency_distribution(\n SampledStackOverflowPost.objects.all())\n tags_frequency_distribution(\n SampledStackOverflowPost.objects.filter(has_wiki_link=True))\n elif args.explain:\n explain()\n else:\n if args.platform is None:\n platforms = ['r', 's', ]\n else:\n platforms = [args.platform]\n if args.rq is None:\n rqs = [1, 2, 3]\n else:\n rqs = [args.rq]\n for platform in platforms:\n for rq in rqs:\n main(platform, rq, args.frequency, args.bootstrap, args.sample_num)", "def setup_parser():\n\n psr_desc=\"cfdi engine service interface\"\n psr_epi=\"select a config profile to specify defaults\"\n\n psr = argparse.ArgumentParser(\n description=psr_desc, epilog=psr_epi)\n\n psr.add_argument('-nmp', action='store_true', dest='nmp',\n help='unique process approach (useful in development)')\n\n psr.add_argument('-d', action='store_true', dest='debug',\n help='print debug information')\n\n psr.add_argument('-c', '--config', action='store',\n dest='config',\n help='load an specific config profile')\n\n psr.add_argument('-p', '--port', action='store',\n dest='port',\n help='launches service on specific port')\n\n return psr.parse_args()", "def generate_parser():\n description = \"%(prog)s -- Data handling, normalization, manipulation, and plotting for HiC and 5C experimental data\"\n epilog = \"For command line options of each command, type: %(prog)s <COMMAND> -h\"\n parser = ap.ArgumentParser(description=description, epilog=epilog)\n parser.add_argument(\"--version\", action=\"version\", version=\"%(prog)s %(version_num)s\" % {'prog':parser.prog, 'version_num':VERSION})\n subparsers = parser.add_subparsers(dest='subcommand')\n\n add_connect_subparser(subparsers)\n add_fragments_subparser(subparsers)\n add_fivecdataset_subparser(subparsers)\n add_fivecproject_subparser(subparsers)\n add_fivecnormalize_subparser(subparsers)\n add_complete_fivec_subparser(subparsers)\n add_fivec_heatmap_subparser(subparsers)\n add_fivec_interval_subparser(subparsers)\n add_fivec_combine_replicates_subparser(subparsers)\n add_fends_subparser(subparsers)\n add_hicdataset_subparser(subparsers)\n add_hicproject_subparser(subparsers)\n add_hicnormalize_subparser(subparsers)\n add_complete_hic_subparser(subparsers)\n add_hic_heatmap_subparser(subparsers)\n add_hic_mrheatmap_subparser(subparsers)\n add_hic_interval_subparser(subparsers)\n add_hic_combine_replicates_subparser(subparsers)\n add_quasar_subparser(subparsers)\n return parser", "def _parser(s, remainder):\n parser = _ArgumentParser()\n def run():\n return s.parse(parser, remainder)\n parser.run = run\n return parser", "def get_parser():\n _program_name = Path(__file__).stem\n example = f''' Example: >> {_program_name} sample.odb\\n '''\n parser = ArgumentParser(description=__doc__.split('..')[0], # Don't include module author part of doc string\n formatter_class=ArgumentDefaultsHelpFormatter, epilog=example, prog=_program_name)\n parser.add_argument(nargs=1,\n dest='input_file',\n type=str,\n help='odb or odbreport file for extracting data',\n metavar='sample.odb')\n parser.add_argument('-o', '--output-file',\n dest='output_file',\n type=str,\n help='file for printing output',\n metavar='sample.h5')\n parser.add_argument('-f', '--output-file-type',\n dest='output_type',\n choices=['yaml', 'json', 'h5'],\n type=str,\n default='h5',\n help='Type of file in which to store output data',\n metavar='h5')\n parser.add_argument('-r', '--odb-report-args',\n dest='odb_report_args',\n type=str,\n help='Arguments to give to the odbreport command. Require the ``option=value`` interface style.',\n metavar='\"step=step1 results\"')\n parser.add_argument('-a', '--abaqus-command',\n dest='abaqus_command',\n type=str,\n default=_settings._default_abaqus_command,\n help='Abaqus command to use',\n metavar='/path/to/abaqus')\n parser.add_argument('-d', '--delete-report-file',\n action=\"store_true\",\n dest='delete_report_file',\n default=False,\n help='Delete after parsing the file created by the odbreport command')\n parser.add_argument('-v', '--verbose',\n action=\"store_true\",\n dest='verbose',\n default=False,\n help='Print all messages')\n return parser", "def common_arg_parser():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--log_dir', type=str, default='logs/', help='root folder to save experimental logs.')\n parser.add_argument('--game_name', type=str, default='', help='run one game only.')\n parser.add_argument('--mode', type=str, help='specify which code to run.')\n parser.add_argument('--gpu_ids', default=[0, 1, 2, 3], nargs='+', help='gpu ids to run different games')\n parser.add_argument('--game_groups', default=[0, 1, 2, 3], nargs='+', help='game groups to run')\n parser.add_argument('--seed', type=int, default='0', help='random seeds for those games')\n parser.add_argument('--routine_ablation', type=str, default=\"\", help='Name of the ablated routines.')\n return parser", "def setUp(self):\n self.parser = echo.create_parser()", "def setUp(self):\n self.parser = echo.create_parser()", "def setup_parser():\n PARSER = argparse.ArgumentParser(description='Running GSI')\n\n PARSER.add_argument('analysis_datetime', type=str, help=\"analysis_datetime\")\n PARSER.add_argument('gsi_dir', type=str, help=\"gsi_dir\")\n PARSER.add_argument('gsi_processor', type=int, help=\"gsi_processor\")\n PARSER.add_argument('cycle_interval', type=int, help=\"cycle_interval\")\n PARSER.add_argument('model_vertical_level', type=int, help=\"model_vertical_level\")\n PARSER.add_argument('background_data', type=str, help=\"background_data\")\n PARSER.add_argument('crtm_root', type=str, help=\"crtm_root\")\n PARSER.add_argument('gsi_root', type=str, help=\"gsi_root\")\n \n PARSER.add_argument('--f_prepbufr', type=str, dest=\"f_prepbufr\", default='')\n PARSER.add_argument('--f_1bamua', type=str, dest=\"f_1bamua\", default='')\n PARSER.add_argument('--f_1bhrs4', type=str, dest=\"f_1bhrs4\", default='')\n PARSER.add_argument('--f_1bmhs', type=str, dest=\"f_1bmhs\", default='')\n PARSER.add_argument('--f_gpsro', type=str, dest=\"f_gpsro\", default='')\n PARSER.add_argument('--f_radwnd', type=str, dest=\"f_radwnd\", default='')\n PARSER.add_argument('--f_refInGSI', type=str, dest=\"f_refInGSI\", default='')\n PARSER.add_argument('--model_core', type=str, dest=\"model_core\", default='ARW')\n PARSER.add_argument('--cv_option', type=str, dest=\"cv_option\", default='NAM')\n PARSER.add_argument('--computing_platform', type=str, dest=\"computing_platform\", default='LINUX_PBS')\n PARSER.add_argument('--new_run', type=str, dest=\"new_run\", default='True')\n PARSER.add_argument('--outer_loop', type=int, dest=\"outer_loop\", default=2)\n PARSER.add_argument('--inner_loop', type=int, dest=\"inner_loop\", default=50)\n PARSER.add_argument('--if_clean', type=str, dest=\"if_clean\", default='no')\n\n '''\n python Main_Script.py 2017082112 /mnt/WRF/gsi_test/practice_11 4 1 50 /mnt/WRF/wrf_1FMTHf/wrfinput_d01 /opt/miniconda2/envs/wrf/crtm-2.2.3/CRTM_2.2.3 /opt/miniconda2/envs/wrf/comGSIv3.5_EnKFv1.1 --f_prepbufr /opt/miniconda2/envs/wrf/bufr_stuff/bin/test.bufr\n return PARSER.parse_args(['2017082112', '/home/szhang/gsi_directory/practice_10', \n 4, 1, 50,\n '/home/szhang/gsi_directory/practice_10/background_data', \n '/home/szhang/gsi_directory/practice_10/crtm_root', \n '/home/szhang/gsi_directory/practice_10/gsi_root', \n '--f_prepbufr', '/home/szhang/gsi_directory/practice_10/f_prepbufr'])\n '''\n return PARSER.parse_args()", "def get_parser():\n\tparser = argparse.ArgumentParser(description=\"Twitter Searcher\")\n\tparser.add_argument(\"-q\",\n\t\t\t\t\t\t\"--query\",\n\t\t\t\t\t\tdest=\"query\",\n\t\t\t\t\t\thelp=\"Query/Filter\",\n\t\t\t\t\t\tdefault='*')\n\tparser.add_argument(\"-d\",\n\t\t\t\t\t\"--data-dir\",\n\t\t\t\t\tdest=\"city\",\n\t\t\t\t\thelp=\"Output/Data Directory\")\n\treturn parser", "def haiku_string_parser():\n pass", "def get_argument_parser(self):\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest='command')\n fetch_parser = subparsers.add_parser('fetch', help='fetches and displays a release from discogs')\n fetch_parser.add_argument('discogs_id', help='the ID of the release')\n rip_parser = subparsers.add_parser('rip', help='rips the current CD to WAV')\n rip_parser.add_argument('--destination', help='optional destination for the CD rip')\n search_parser = subparsers.add_parser(\n 'search',\n prog='search',\n help='performs a very simple search on discogs')\n search_parser.add_argument('term', help='the term to search for')\n encode_parser = subparsers.add_parser(\n 'encode', help='Encodes a CD or a set of WAV files to mp3.')\n encode_parser.add_argument(\n 'encoding_from', choices=['cd', 'wav'], help='The source to encode from.')\n encode_parser.add_argument(\n 'encoding_to', choices=['mp3', 'flac'], help='The destination to encode to.')\n encode_parser.add_argument(\n '--source', help='The destination of the source wav file. This can be a file or directory.')\n encode_parser.add_argument(\n '--destination', help='The destination of the resulting mp3 or flac. This can be a file or directory.')\n encode_parser.add_argument(\n '--keep-source', action='store_true', help='If encoding from wav, use this to keep the original wav being removed.')\n encode_parser.add_argument(\n '--collapse-index-tracks', action='store_true', help='If set this will collapse any subtracks to a single track.')\n encode_parser.add_argument(\n '--discogs-id', help='The discogs ID for the release. When this is used metadata from the discogs release will be applied to the encoded files.')\n decode_parser = subparsers.add_parser('decode', help='Decodes a set of FLAC or MP3 files to WAV.')\n decode_parser.add_argument(\n 'decode_from', choices=['flac', 'mp3'], help='The source to decode from.')\n decode_parser.add_argument(\n '--source', help='The destination of the source file. This can be a file or directory.')\n decode_parser.add_argument(\n '--destination', help='The destination of the resulting wav. This can be a file or directory.')\n tag_parser = subparsers.add_parser('tag', help='Tags an audio file')\n tag_parser.add_argument(\n 'action', choices=['add', 'remove'], help='The tagging action to be performed. A tag can be added or removed.')\n tag_parser.add_argument(\n 'format', choices=['mp3', 'flac'], help='The file format of the audio file being tagged.')\n tag_parser.add_argument(\n '--collapse-index-tracks', action='store_true', help='If set this will collapse any subtracks to a single track.')\n tag_parser.add_argument(\n '--source',\n help='The source audio files to tag. This can be a file or a directory. If the source is omitted, the files in the current working directory will be used.')\n tag_parser.add_argument('--discogs-id', help='The discogs ID for the release. When this is used metadata from the discogs release will be applied to the tagged files.')\n tag_parser.add_argument('--artist', help='The artist to use for the tag.')\n tag_parser.add_argument('--album-artist', help='The album artist to use for the tag.')\n tag_parser.add_argument('--album', help='The album to use for the tag.')\n tag_parser.add_argument('--title', help='The title to use for the tag.')\n tag_parser.add_argument('--year', help='The year to use for the tag.')\n tag_parser.add_argument('--genre', help='The year to use for the tag.')\n tag_parser.add_argument('--track-number', help='The track number to use for the tag.')\n tag_parser.add_argument('--track-total', help='The track total to use for the tag.')\n tag_parser.add_argument('--disc-number', help='The disc number to use for the tag.')\n tag_parser.add_argument('--disc-total', help='The disc total to use for the tag.')\n tag_parser.add_argument('--comment', help='The comment for the tag.')\n artwork_parser = subparsers.add_parser('artwork', help='adds or removes artwork from a file')\n artwork_parser.add_argument(\n 'action', choices=['add', 'remove'], help='The artwork action to be performed. The artwork can be added or removed.')\n artwork_parser.add_argument(\n 'type', choices=['mp3', 'flac'], help='The type of file to apply the artwork to.')\n artwork_parser.add_argument(\n '--source', help='The destination file or directory to apply the artwork to. If there is no source then any artwork in the current directory will be used.')\n artwork_parser.add_argument(\n '--destination', help='The destination file or directory to apply the artwork to. If there is no destination then the current directory will be used.')\n mix_parser = subparsers.add_parser('mix', help='adds a mix')\n mix_parser.add_argument('source', help='the source of the mix')\n mix_parser.add_argument('--artist', help='The artist to use for the tag.')\n mix_parser.add_argument('--album', help='The album to use for the mix.')\n mix_parser.add_argument('--title', help='The title to use for the mix.')\n mix_parser.add_argument('--year', help='The year to use for the mix.')\n mix_parser.add_argument('--comment', help='The comment for the mix.')\n return parser", "def parse(self):\n raise NotImplementedError(\"Parse not specified!\")", "def build_parser(self, parser: ArgumentParser) -> None:", "def parse_args():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('--version', action='version',\n version=\"iswitch version \" + get_version())\n\n # Can't require a subparser because of need to maintain\n # backwards compatibility with Python 3.6\n subparsers = parser.add_subparsers(\n dest='command', help='command')\n\n pwd_parser = subparsers.add_parser(\"pwd\",\n help='Print working directory/collection')\n pwd_parser.add_argument('--verbose', '-v', action='store_true', default=False,\n help='Print verbose information for troubleshooting')\n\n cd_parser = subparsers.add_parser(\"cd\",\n help='Change working directory/collection')\n cd_parser.add_argument('--verbose', '-v', action='store_true', default=False,\n help='Print verbose information for troubleshooting')\n cd_parser.add_argument('directory', default=None, nargs='?',\n help='Directory to change to')\n\n ls_parser = subparsers.add_parser(\"ls\",\n help='List collections or data objects')\n ls_parser.add_argument('--verbose', '-v', action='store_true', default=False,\n help='Print verbose information for troubleshooting')\n ls_parser.add_argument('queries', default=None, nargs='*',\n help='Collection, data object or data object wildcard')\n ls_parser.add_argument(\"-m\", \"--format\", dest='format', default='plain',\n help=\"Output format\", choices=['plain', 'json', 'csv', \"yaml\"])\n ls_parser.add_argument(\"-s\", \"--sort\", dest=\"sort\", default='name',\n help=\"Propery to use for sorting\", choices=['name', 'ext', 'size', 'date', \"unsorted\"])\n ls_parser.add_argument(\"-H\", \"--hr-size\", default='default', dest=\"hrsize\",\n help=\"Whether to print human-readable sizes [yes,no,default].\" +\n \"By default, enable human-readable for text output, disable for other formats.\",\n choices=['default', 'yes', 'no'])\n ls_parser.add_argument('--recursive', '-r', action='store_true', default=False,\n help='Include contents of subcollections')\n ls_parser.add_argument('-l', action='store_true', default=False,\n help='Display replicas with size, resource, owner, date')\n ls_parser.add_argument('-L', action='store_true', default=False,\n help='like -l, but also display checksum and physical path')\n\n help_hrs = \" (you can optionally use human-readable sizes, like \\\"2g\\\" for 2 gigabytes)\"\n find_parser = subparsers.add_parser(\"find\",\n help='Find data objects by property')\n find_parser.add_argument('--verbose', '-v', action='store_true', default=False,\n help='Print verbose information for troubleshooting')\n find_parser.add_argument('queries', default=None, nargs='*',\n help='Collection, data object or data object wildcard')\n find_parser.add_argument('--print0', '-0', action='store_true', default=False,\n help='Use 0 byte delimiters between results')\n find_parser.add_argument(\n \"--dname\",\n help=\"Wildcard filter for data object name\")\n find_parser.add_argument(\n \"--owner-name\",\n help=\"Filter for data object owner name (excluding zone)\")\n find_parser.add_argument(\"--owner-zone\",\n help=\"Filter for data object owner zone\")\n find_parser.add_argument(\"--resc-name\",\n help=\"Filter for data object resource\")\n find_parser.add_argument(\n \"--minsize\",\n help=\"Filter for minimum data object size\" +\n help_hrs)\n find_parser.add_argument(\n \"--maxsize\",\n help=\"Filter for maximum data object size\" +\n help_hrs)\n find_parser.add_argument(\n \"--size\",\n help=\"Filter for (exact) data object size\" +\n help_hrs)\n\n if len(sys.argv) == 1:\n parser.print_help()\n parser.exit()\n\n return vars(parser.parse_args())", "def parser_setup():\n ap = argparse.ArgumentParser(description=__doc__)\n ap.add_argument(\"-c\", \"--config-dir\", default=\".\",\n help=\"Configuration directory. Contains YAML configuration\"\n \"files.\")\n ap.add_argument(\"-v\", \"--verbose\", action=\"count\", default=1,\n help=\"Print copious debugging info.\")\n ap.add_argument(\"-q\", \"--quiet\", action=\"count\", default=0,\n help=\"Suppress output. -qq to suppress ALL output.\")\n ap.add_argument(\"-p\", \"--profile\", default=\"all\",\n help=\"Dashboard profile to load from dashdef.yml\")\n ap.add_argument(metavar=\"HOST\", nargs=\"*\", dest=\"host_globs\",\n help=\"Host glob.\")\n return ap", "def _parse(self, cleaner=None):\n self.num_results = 0\n self._parse_lxml(cleaner)\n\n # try to parse the number of results.\n attr_name = self.searchtype + '_search_selectors'\n selector_dict = getattr(self, attr_name, None)\n\n # get the appropriate css selectors for the num_results for the keyword\n num_results_selector = getattr(\n self,\n 'num_results_search_selectors',\n None\n )\n\n self.num_results_for_query = self.first_match(\n num_results_selector,\n self.dom\n )\n if not self.num_results_for_query:\n logger.debug(''''{}: Cannot parse num_results from serp page\n with selectors {}\n '''.format(self.__class__.__name__, num_results_selector))\n\n # get the current page we are at.\n try:\n self.page_number = int(\n self.first_match(self.page_number_selectors, self.dom)\n )\n except ValueError:\n self.page_number = -1\n\n # let's see if the search query was shitty (no results for that query)\n self.effective_query = self.first_match(\n self.effective_query_selector,\n self.dom\n )\n if self.effective_query:\n logger.debug('''{}: There was no search hit for the search query.\n Search engine used {} instead.\n '''.format(self.__class__.__name__, self.effective_query))\n else:\n self.effective_query = ''\n\n # the element that notifies the user about no results.\n self.no_results_text = self.first_match(\n self.no_results_selector,\n self.dom\n )\n\n # get the stuff that is of interest in SERP pages.\n if not selector_dict and not isinstance(selector_dict, dict):\n raise Exception('''There is no such attribute: {}. No selectors found\n '''.format(attr_name))\n\n for result_type, selector_class in selector_dict.items():\n # might be \"key:ads_main, value:{us_ip: {...}, de_ip: {...}}\"\n # for example, one iteration of this loop would handle all the \"ads_main\" items\n self.search_results[result_type] = []\n self.related_keywords[result_type] = []\n\n for _, selectors in selector_class.items():\n # each key will be \"us_ip, de_ip, etc\"\n # each value (selectors) is yet another dict, the key is name of selector (e.g. \"container\")\n # and the values in \"selectors\" is the actual css selector (e.g. \"#center_col\")\n\n # this means the us_ip selectors AND the de_ip selectors will be used, but duplicates are not logged\n if 'result_container' in selectors and selectors['result_container']:\n css = '{container} {result_container}'.format(**selectors)\n else:\n css = selectors['container']\n results = self.dom.xpath(\n self.css_to_xpath(css)\n )\n\n to_extract = set(selectors.keys()) - {'container', 'result_container'}\n selectors_to_use = {key: selectors[key] for key in to_extract if key in selectors.keys()}\n\n # if you skip an item for a real reason (e.g. prerender links)\n # then increment num_rightfully_skipped to avoid messing up rank calcs\n num_rightfully_skipped = 0\n\n for index, result in enumerate(results):\n # Let's add primitive support for CSS3 pseudo selectors\n serp_result = {}\n # key are for example 'link', 'snippet', 'visible-url', ...\n # selector is the selector to grab these items\n for key, selector in selectors_to_use.items():\n serp_result[key] = self.advanced_css(selector, result)\n\n # # skip prerender links\n # has_prerender = self.advanced_css('link::attr(rel)', result)\n # if has_prerender == 'prerender':\n # num_rightfully_skipped += 1\n # continue\n \n\n # only add items that have not None links.\n # Avoid duplicates. Detect them by the link.\n # If statement below: Lazy evaluation.\n # The more probable case first.\n found_container = False\n serp_result['rank'] = index + 1 - num_rightfully_skipped\n for key in ['isTweetCarousel', 'isMapsPlaces', 'isMapsLocations', 'isNewsCarousel', 'isKnowledgeBox']:\n if serp_result.get(key):\n serp_result[key] = True\n found_container = True\n if serp_result.get('isKnowledgeBox'):\n all_content = self.advanced_css('*', result)\n serp_result['misc'] = all_content\n if (\n found_container\n ) or (\n 'link' in serp_result and serp_result['link'] and\n not [e for e in self.search_results[result_type]\n if e['link'] == serp_result['link'] and not e.get('isKnowledgeBox')]\n ) or (\n result_type in [\n 'knowledge_panel', 'tweets',\n 'maps_places', 'maps_locations',\n ] or serp_result.get('isKnowledgeBox')\n ):\n self.search_results[result_type].append(serp_result)\n self.num_results += 1\n elif 'keyword' in serp_result and serp_result['keyword']:\n self.related_keywords[result_type].append(serp_result)\n else:\n num_rightfully_skipped += 1", "def extended_parse(self):\n\t\t## Do the initial parsing\n\t\tself.parse()\n\n\t\t## First, cycle through the hosts, and append hostgroup information\n\t\tindex = 0\n\t\tfor host in self.data['all_host']:\n\t\t\tif host.has_key('register') and host['register'] == '0': continue\n\t\t\tif not host.has_key('host_name'): continue\n\t\t\tif not self.data['all_host'][index]['meta'].has_key('hostgroup_list'):\n\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'] = []\n\n\t\t\t## Append any hostgroups that are directly listed in the host definition\n\t\t\tif host.has_key('hostgroups'):\n\t\t\t\tfor hostgroup_name in self._get_list(host, 'hostgroups'):\n\t\t\t\t\tif not self.data['all_host'][index]['meta'].has_key('hostgroup_list'):\n\t\t\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'] = []\n\t\t\t\t\tif hostgroup_name not in self.data['all_host'][index]['meta']['hostgroup_list']:\n\t\t\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'].append(hostgroup_name)\n\n\t\t\t## Append any services which reference this host\n\t\t\tservice_list = []\n\t\t\tfor service in self.data['all_service']:\n\t\t\t\tif service.has_key('register') and service['register'] == '0': continue\n\t\t\t\tif not service.has_key('service_description'): continue\n\t\t\t\tif host['host_name'] in self._get_active_hosts(service):\n\t\t\t\t\tservice_list.append(service['service_description'])\n\t\t\tself.data['all_host'][index]['meta']['service_list'] = service_list\n\t\t\t\t\t\n\n\t\t\t## Increment count\n\t\t\tindex += 1\n\n\t\t## Loop through all hostgroups, appending them to their respective hosts\n\t\tfor hostgroup in self.data['all_hostgroup']:\n\n\t\t\tfor member in self._get_list(hostgroup,'members'):\n\t\t\t\tindex = 0\n\t\t\t\tfor host in self.data['all_host']:\n\t\t\t\t\tif not host.has_key('host_name'): continue\n\n\t\t\t\t\t## Skip members that do not match\n\t\t\t\t\tif host['host_name'] == member:\n\n\t\t\t\t\t\t## Create the meta var if it doesn' exist\n\t\t\t\t\t\tif not self.data['all_host'][index]['meta'].has_key('hostgroup_list'):\n\t\t\t\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'] = []\n\n\t\t\t\t\t\tif hostgroup['hostgroup_name'] not in self.data['all_host'][index]['meta']['hostgroup_list']:\n\t\t\t\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'].append(hostgroup['hostgroup_name'])\n\n\t\t\t\t\t## Increment count\n\t\t\t\t\tindex += 1\n\n\t\t## Expand service membership\n\t\tindex = 0\n\t\tfor service in self.data['all_service']:\n\t\t\tservice_members = []\n\n\t\t\t## Find a list of hosts to negate from the final list\n\t\t\tself.data['all_service'][index]['meta']['service_members'] = self._get_active_hosts(service)\n\n\t\t\t## Increment count\n\t\t\tindex += 1", "def init_parser():\n parser = OptionParser()\n parser.add_option(\"-n\", \"--interactive\", action=\"store_true\", help=\"run in interactive (non-daemon) mode\")\n parser.add_option(\"-r\", \"--run\", action=\"store_true\", help=\"starts process identified by -app parameter\")\n parser.add_option(\"-k\", \"--kill\", action=\"store_true\", help=\"kill process identified by -app parameter\")\n parser.add_option(\"-a\", \"--app\", action=\"store\", help=\"application to start (process name)\")\n parser.add_option(\"-q\", \"--query\", action=\"store_true\", help=\"query application's state\")\n parser.add_option(\"-i\", \"--install_ve\", action=\"store_true\", help=\"install a virtualenv for the runtime to use\")\n parser.add_option(\"-s\", \"--shell\", action=\"store_true\", help=\"run an ipython shell within the virtualenv\")\n parser.add_option(\"-t\", \"--tests\", action=\"store_true\", help=\"run tests\")\n parser.add_option(\"-x\", \"--xunit\", action=\"store_true\", help=\"run tests with coverage and xunit output for Jenkins\")\n parser.add_option(\"-z\", \"--analyze\", action=\"store_true\", help=\"run pylint on project\")\n parser.add_option(\"-l\", \"--list\", action=\"store_true\", help=\"list available applications\")\n parser.add_option(\"-o\", \"--outfile\", action=\"store\", help=\"save results from a report to a file\")\n return parser", "def mock_parser_fcn(s):", "def arg_parsing():\n\n description = '''\n Parse fb message archive csv to a trainable format or aggregate stats for a given target user.\n '''\n import argparse\n parser = argparse.ArgumentParser(\n description=description,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n subparsers = parser.add_subparsers(\n help='Parse fb message archive csv to a trainable format or aggregate stats for a given target user.',\n title='SubCommands', description='Valid SubCommands')\n parse_to_deep_qa_args_parsing(subparsers)\n aggregate_stats_for_target_usr_args_parsing(subparsers)\n argu = parser.parse_args()\n return argu", "def _setup_parser():\n parser = argparse.ArgumentParser(add_help=True)\n parser.add_argument('--eval_model', type=str, default=None)\n parser.add_argument('--stack', type=int, default=1)\n parser.add_argument('--flare', action='store_true')\n parser.add_argument('--mixreg', action='store_true')\n\n env_group = parser.add_argument_group(\"Env Args\")\n env_group.add_argument('--env_name', type=str, default=ENV_NAME)\n env_group.add_argument('--num_envs', type=int, default=NUM_ENVS)\n env_group.add_argument('--num_levels', type=int, default=NUM_LEVELS)\n env_group.add_argument('--start_level', type=int, default=START_LEVEL)\n\n agent_group = parser.add_argument_group(\"Agent Args\")\n PPOAgent.add_to_argparse(agent_group)\n\n model_group = parser.add_argument_group(\"Model Args\")\n ImpalaPPO.add_to_argparse(model_group)\n\n return parser", "def parse_from_tree(self, parse):\n pass", "def parser(self, q, casing=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/{1}'.format(self.get_url(), 'parser')\r\n\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json", "def get_parser():\n parser = ArgumentParser(description=__doc__,\n formatter_class=ArgumentDefaultsHelpFormatter,\n prog='pv2')\n subparsers = parser.add_subparsers(dest='cmd')\n # subparsers.add_parser('selfcheck',\n # add_help=False,\n # help=\"Self-check of the sst toolkit.\")\n # parser.add_argument('--version',\n # action='version',\n # version=('sst %s' % str(sst.__version__)))\n subparsers.add_parser('eval',\n add_help=False,\n parents=[evaluate.get_parser()],\n formatter_class=ArgumentDefaultsHelpFormatter,\n help=(\"Evaluate a single image\"))\n subparsers.add_parser('train',\n add_help=False,\n parents=[train.get_parser()],\n formatter_class=ArgumentDefaultsHelpFormatter,\n help=(\"Train a new model.\"))\n subparsers.add_parser('plot',\n add_help=False,\n parents=[plot.get_parser()],\n formatter_class=ArgumentDefaultsHelpFormatter,\n help=(\"Plot summary information.\"))\n return parser", "def test_parse_valid(self):\n mock_scraper = MockCtdScraper()\n scrape_gen = mock_scraper.scrape(TEST_CHUNKSIZE)\n self.parser.parse(next(scrape_gen))", "def get_parser(subparsers):\n parser = subparsers.add_parser('collect', help='Collector server')\n\n parser.add_argument('-d', '--data-dir', dest='data_dir', help='Data store root directory')\n parser.add_argument('-U', '--db-url', dest='db_url', help='Database URL (SQLAlchemy form)',\n default=None)\n parser.add_argument('--verbose', dest='store_verbose', action='store_true',\n help='Make the EventStore to log more verbose output.')\n parser.add_argument('--rdbs-store', dest='rdbs_store', action='store_true',\n help='Use RDBS EventStore instead of NaiveEventStore.' +\n 'The RDBS store keeps the events in a relational database.')\n parser.add_argument('--live', dest='live_mode', action='store_true',\n help='Run the collector in live (non persistent) mode. Events will not be stored.')\n\n return parser", "def run_parsing(self):\n\n if self.version:\n print(f'\"{VERSION}\"')\n return VERSION\n elif self.limit is not None and self.limit <= 0:\n print(\"Limit must be greater than 0!\")\n return \"Limit must be greater than 0!\"\n elif self.date:\n if len(str(self.date)) != 8:\n print(\"Wrong date format!\")\n else:\n self.print_if_verbose(\n f\"Method 'run_parsing' is working: \\n\"\n f\"'run_parsing' method calls 'get_content_from_cache' method: \\n\")\n self.rss_feed = self.get_content_from_cache()\n if self.rss_feed:\n if self.json:\n self.print_if_verbose(f\"'run_parsing' method calls 'print_json_content' method: \\n\")\n self.print_json_content(self.rss_feed)\n else:\n self.print_if_verbose(f\"'run_parsing' method calls 'print_content_from_cache' method: \\n\")\n self.print_content_from_cache(self.rss_feed)\n\n if self.to_html_path:\n self.print_if_verbose(f\"'run_parsing' method calls 'save_to_html' method: \\n\")\n self.save_to_html(self.rss_feed)\n\n if self.to_fb2_path:\n self.print_if_verbose(f\"'run_parsing' method calls 'save_to_fb2' method: \\n\")\n self.save_to_fb2(self.rss_feed)\n\n else:\n self.print_if_verbose(\n f\"Method 'run_parsing' is working: \\n\"\n f\"'run_parsing' method calls 'get_content' method: \\n\")\n self.content = self.get_content()\n\n if self.content:\n self.print_if_verbose(f\"'run_parsing' method calls 'process_content' method: \\n\")\n self.rss_feed = self.process_content(self.content)\n\n self.print_if_verbose(f\"'run_parsing' method calls 'save_news_to_cache' method: \\n\")\n self.save_news_to_cache(self.rss_feed)\n\n if self.json:\n self.print_if_verbose(f\"'run_parsing' method calls 'print_json_content' method: \\n\")\n self.print_json_content(self.rss_feed)\n else:\n self.print_if_verbose(f\"'run_parsing' method calls 'print_content' method: \\n\")\n self.print_content(self.rss_feed)\n\n if self.to_html_path:\n self.print_if_verbose(f\"'run_parsing' method calls 'save_to_html' method: \\n\")\n self.save_to_html(self.rss_feed)\n\n if self.to_fb2_path:\n self.print_if_verbose(f\"'run_parsing' method calls 'save_to_fb2' method: \\n\")\n self.save_to_fb2(self.rss_feed)\n\n self.print_if_verbose(f\"Program execution completed!\")\n\n return \"Program execution completed!\"", "def parser ( self ):\n return self._parser", "def func_PARSE(self):\n self.parsed_url = parse.urlparse(\"http://{0}:{1}{2}\".format(args.HTTP_HOST, args.HTTP_PORT, self.path).lower())\n self.parsed_param = parse.parse_qs(self.parsed_url[4])", "def test_parse(self): \n\n results = self.parser.parse()\n self.assertEqual(results, test_case_data['parse_output'])", "def parse(self): \n pass", "def parseOptions(self):\n\n\t\tparser = OptionParser()\n parser.add_option(\n \"-u\",\n \"--user\",\n dest=\"user\",\n help=\"enter a user or 'all'\"\n )\n\n parser.add_option(\n \"-p\",\n \"--projects\",\n dest=\"projects\",\n help=\"enter a project or 'all'\"\n )\n (self.options, self.args) = parser.parse_args()", "def parser(*args, **kwargs):\n return NotImplementedError", "def parse(self) -> None:\n pass", "def parser():\n \n \n parser = ap.ArgumentParser(description='Parsing some file names in various forms')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-f','--filepaths',dest='filepaths',metavar='PATH1,PATH2,...',type=str,\n help='Input a string or list of strings. ex. '\n '\"C:\\\\Users\\\\Matt\\\\Desktop\\\\FireEyeCodingSample\\\\learn.txt\",'\n '\"foo\\\\bar\\\\fudge\"''Note: On Unix systems, you need quotes or you need to escape your escape characters.'\n 'Otherwise, your resultant tokens may be incorrect.')\n group.add_argument('-i','--input',dest='file',metavar='TEXTFILE',type=str,\n help='Input a valid file path to a text file of '\n 'paths. ex. '\n '\"C:\\\\Users\\\\Matt\\\\Desktop\\\\FireEyeCodingSample\\\\learn.txt\"')\n \n \n args = parser.parse_args()\n\n if args.file and not os.path.isfile(args.file):\n raise OSError('This file does not exist - please input a valid file path')\n \n return args", "def get_first_available_parser():\n if sys.platform == 'cli':\n try:\n from bridge.parser.bridge_dotnet import Parser\n return Parser\n except ImportError:\n pass\n elif sys.platform[:4] == 'java':\n try:\n from bridge.parser.bridge_java import Parser\n return Parser\n except ImportError:\n pass\n \n from bridge.parser.bridge_default import Parser\n \n return Parser" ]
[ "0.67136407", "0.64401156", "0.63453275", "0.62447006", "0.6202729", "0.61242324", "0.61228955", "0.6109933", "0.60615706", "0.60477245", "0.60369617", "0.6022127", "0.5999023", "0.59757316", "0.594359", "0.5933972", "0.5901211", "0.5888715", "0.5855673", "0.584835", "0.58434206", "0.5835599", "0.58333915", "0.5823001", "0.58172643", "0.5811238", "0.5811238", "0.5811238", "0.5811238", "0.57858247", "0.57849103", "0.5783757", "0.57663697", "0.57532525", "0.57504904", "0.57388043", "0.57353586", "0.57321817", "0.573194", "0.5729684", "0.57083803", "0.56951946", "0.5684494", "0.56614417", "0.5655278", "0.5640913", "0.5620748", "0.561934", "0.561934", "0.5611568", "0.56082374", "0.5607097", "0.557305", "0.5568087", "0.5567446", "0.5566886", "0.5552377", "0.55496335", "0.5541994", "0.5536724", "0.55278456", "0.55271316", "0.5519984", "0.55085653", "0.5506521", "0.55049026", "0.54924697", "0.54907185", "0.54822415", "0.5481964", "0.5481964", "0.5476497", "0.5475554", "0.5473654", "0.5473421", "0.54654795", "0.5453522", "0.544699", "0.54461753", "0.5442354", "0.5433491", "0.5425148", "0.54189265", "0.5418169", "0.5417165", "0.5410269", "0.5408461", "0.54072446", "0.54040366", "0.5402341", "0.5399568", "0.53917813", "0.53586155", "0.5357924", "0.53549683", "0.5354343", "0.53529954", "0.53513336", "0.5349722", "0.53447235" ]
0.56508
45
These are options that only make sense for actual harvesters, such as ARM. They do not make sense for utilities that do not perform harvesting, such as the sitemap checker.
def add_harvesting_options(parser, id): help = ( "Ignore the last harvest time. Use this switch to attempt to " "harvest records that may have failed for some reason on a recent " "harvest attempt. The regex option may also be useful here." ) parser.add_argument('--ignore-harvest-time', action='store_true', help=help) help = ( "Retrieve and process all records, but do not attempt to harvest " "the records to GMN." ) parser.add_argument('--no-harvest', action='store_true', help=help) help = "Retry a failed record this number of times." parser.add_argument('--retry', type=int, default=1, help=help) help = "Harvest records to this DataOne member node." parser.add_argument('--host', default='localhost', help=help) help = "DataOne member node SSL port." parser.add_argument('--port', default=443, type=int, help=help) help = 'Path to dataone client-side certificate.' parser.add_argument('--certificate', default=None, help=help) help = 'Path to dataone host client-side key.' parser.add_argument('--private-key', default=None, help=help) parser.description = f"Harvest metadata from {id.upper()}." parser.epilog = ( "Not supplying an argument to both the certificate and key arguments " "will disable client side authentication." )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def options(self) -> list[str]:\n return [BYPASS, ARMED]", "def options(self): # pragma: no cover\r\n return ''", "def getServerOptions(self):\n pass", "def _options(self):\n return", "def __options(self):\n\t\ta = 1 if self.random else 0\n\t\tb = 2 if self.topoftheday else 0\n\t\tc = 4 if self.offline else 0\n\t\treturn a+b+c", "def scanOptions(self, options):", "def options(self, parser, env):\n pass", "def list_opts():\n return [('ironic_lib', utils_opts)]", "def _additional_option(self):\n pass", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"", "def getRouterOptions(self):\n pass", "def options(self, parser):\n pass", "def supported_options(self):\n return ['person','filename','speed','pitch','volume']", "def setDefaultOpts(self):\n\t\tself.getopt.s = ['h']\n\t\tself.getopt.l = ['help']\n\t\tself.getopt.s.extend([('x:', 'screen')])\n\t\tself.getopt.l.extend([('xml=', 'screen')])\n\t\treturn", "def _rsp_options(self, tool: T.Union['Compiler', 'StaticLinker', 'DynamicLinker']) -> T.Dict[str, T.Union[bool, RSPFileSyntax]]:\n options = {'rspable': tool.can_linker_accept_rsp()}\n if options['rspable']:\n options['rspfile_quote_style'] = tool.rsp_file_syntax()\n return options", "def SupportsOptions(self, browser_options):\n raise NotImplementedError()", "def versatileOptions():\r\n return tuple(sorted(i[0] for i in list(Options.defaults().items()) if i[1].find(' #v ') > 0))", "def help_opt(self):\n print(OPTIONS)", "def options(self, *args, **kwargs):\n self.request(\"options\", *args, **kwargs)", "def get_options(self):\n return []", "def optionHelp(self):\n return {}", "def _get_default_options():\n return {\n \"library_folders\": [],\n \"verbose\": False,\n \"check_balanced\": True,\n \"mtime_check\": True,\n \"cache\": False,\n \"codegen\": False,\n \"expand_mx\": False,\n \"unroll_loops\": True,\n \"inline_functions\": True,\n \"expand_vectors\": False,\n \"resolve_parameter_values\": False,\n \"replace_parameter_expressions\": False,\n \"replace_constant_expressions\": False,\n \"eliminate_constant_assignments\": False,\n \"replace_parameter_values\": False,\n \"replace_constant_values\": False,\n \"eliminable_variable_expression\": None,\n \"factor_and_simplify_equations\": False,\n \"detect_aliases\": False,\n \"allow_derivative_aliases\": True,\n \"reduce_affine_expression\": False,\n }", "def option_user_agent(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionUserAgent/')))", "def default_capabilities(self):", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -t, --transaction\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -f, --from-file <filename>\n --not-error-tolerant\n \"\"\"", "def set_options(self, **options):\n self.source = options['source'] or settings.MEDIA_ROOT\n self.container = options['container'] or ls.AZURE_DEFAULT_CONTAINER\n self.verbosity = int(options.get('verbosity', 1))\n ignore_patterns = options['ignore_patterns']\n if options['use_default_ignore_patterns']:\n ignore_patterns += ['.*', '*~']\n self.ignore_patterns = list(set(ignore_patterns))\n self.dir = options['dir']", "def _usage_options_example(self):\n pass", "def initialize_options(self):", "def allowOption(path=None, **kwa):\n return {}", "def test_error_html_using_options(self):\n pass", "def list_opts():\n return [(constants.MLNX_BAREMETAL_DRIVER_GROUP_NAME, DRIVER_OPTS)]", "def setRouterOptions(self, options):\n pass", "def options(self, parser, env):\n super(ReportPortalPlugin, self).options(parser, env)\n parser.add_option('--rp-config-file',\n action='store',\n default=env.get('NOSE_RP_CONFIG_FILE'),\n dest='rp_config',\n help='config file path')\n\n parser.add_option('--rp-launch',\n action='store',\n default=None,\n dest='rp_launch',\n help='postfix of launch name in report portal')\n\n parser.add_option('--rp-mode',\n action='store',\n default=\"DEFAULT\",\n dest='rp_mode',\n help='level of logging')\n\n parser.add_option('--rp-launch-description',\n action='store',\n default=\"\",\n dest='rp_launch_description',\n help='description of a launch')\n\n parser.add_option('--ignore-loggers',\n action='store',\n default=[],\n dest='ignore_loggers',\n help='logger filter')", "def options(self):\n return self.__options", "def option_skip_url_string(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionSkipURLString/')))", "def _default_experiment_options(cls) -> Options:\n options = super()._default_experiment_options()\n options.update_options(\n circuit_order=\"RIRIRI\",\n )\n return options", "def setup(self, optparser):\n\t\tpass", "def browser_options():\n opts = Options()\n opts.add_argument('--disable-dev-shm-usage')\n if settings.SELENIUM_DEBUG: opts.add_argument('--auto-open-devtools-for-tabs')\n if settings.USER_AGENT != 'default': opts.add_argument(f'user-agent={settings.USER_AGENT}')\n # Fallback, falls Chrome Installation in Program Files installiert ist\n if settings.CHROME_PATH: opts.binary_location = settings.CHROME_PATH\n if os.environ.get('DOCKER_ENV'):\n opts.add_argument('--no-sandbox')\n return opts", "def test_options_flags(self):\n opts = []\n\n # Handle \"--failed\" as a special case: we want to re-run only\n # the tests that failed within our Django apps\n # This sets the --last-failed flag for the pytest command, so this\n # functionality is the same as described in the pytest documentation\n if self.failed_only:\n opts.append(\"--last-failed\")\n\n # This makes it so we use pytest's fail-fast feature in two cases.\n # Case 1: --fail-fast is passed as an arg in the paver command\n # Case 2: The environment variable TESTS_FAIL_FAST is set as True\n env_fail_fast_set = (\n 'TESTS_FAIL_FAST' in os.environ and os.environ['TEST_FAIL_FAST']\n )\n\n if self.fail_fast or env_fail_fast_set:\n opts.append(\"--exitfirst\")\n\n if self.with_wtw:\n opts.extend([\n '--wtw',\n f'{COVERAGE_CACHE_BASEPATH}/{WHO_TESTS_WHAT_DIFF}',\n '--wtwdb',\n f'{COVERAGE_CACHE_BASEPATH}/{COVERAGE_CACHE_BASELINE}'\n ])\n\n return opts", "async def get_options(self):", "def parseOptions():\n \n parser=op.OptionParser(usage=\"Usage %prog SERVER\"\n ,version=\"%prog 1.0\",description=\"Sets up wordpress.\"\n +\"SERVER is the base url for the server, this should be your domain name \"\n +\"which points to your machine's IP, or your machine's IP if you don't have \"\n +\"a domain name. This script should probably be run with sudo as it will \"\n +\"likely have to edit and read files which aren't editable or perhaps \"\n +\"not even readable by standard users.\")\n \n parser.add_option(\"--dry-run\",dest=\"dryRun\",action=\"store_true\",default=False\n ,help=\"If set will not actually do anything, only print out what it would \"\n +\"have done [not default]\")\n return parser.parse_args()", "def build_options(self):\n opts = [\n \"-k rpm.rpmva=off\",\n \"-k apache.log=True\",\n ]\n\n sensitive_keys = {\n self._engine_plugin: 'sensitive_keys',\n 'ovirt_engine_dwh': 'dwh_sensitive_keys',\n }\n if self.configuration['include_sensitive_data']:\n for plugin in sensitive_keys:\n self.configuration[sensitive_keys[plugin]] = ':'\n\n for plugin in sensitive_keys:\n if self.configuration.get(sensitive_keys[plugin]):\n opts.append(\n '-k {plugin}.sensitive_keys={keys}'.format(\n plugin=plugin,\n keys=self.configuration.get(sensitive_keys[plugin]),\n )\n )\n\n if self.configuration.get(\"ticket_number\"):\n opts.append(\n \"--ticket-number=%s\" % self.configuration.get(\"ticket_number\")\n )\n\n if self.sos_version < '30':\n opts.append('--report')\n\n if self.configuration.get(\"log_size\"):\n opts.append(\n \"--log-size=%s\" %\n self.configuration.get('log_size')\n )\n else:\n if self.sos_version < '30':\n opts.append('--report')\n opts.append(\"-k general.all_logs=True\")\n elif self.sos_version < '32':\n opts.append(\"-k logs.all_logs=True\")\n else:\n opts.append(\"--all-logs\")\n\n if self.configuration.get(\"upload\"):\n opts.append(\"--upload=%s\" % self.configuration.get(\"upload\"))\n return \" \".join(opts)", "def options(self):\r\n return self._options", "def generate_options(self):\n super(MachineLookup, self).generate_options()\n options = [\"generic_filters\", \"meta_filters\"]\n for option in self.command_options:\n if option['dest'] in options:\n option['action'] = \"append\"", "def linkOptions(self):\n linker_options = [x.text if x.text is not None else \"\" for x in\n self.subdoc.find(\"link-options\").findall(\"option\")]\n if not ld_option_verifier.verify(linker_options):\n env.error(u\"Linker option verification \"\n \"failed for bundle {} ({})\".format(\n self.input,\n ld_option_verifier.error_msg))\n if linker_options.count(\"-execute\") != 0:\n self.is_executable = True\n\n # make sure linker has a none zero version min for watchos.\n try:\n # check watchos version.\n version_min = linker_options.index(\"-watchos_version_min\")\n # if valid version min location, check if it is 0.0\n if version_min < (len(linker_options) - 1) and linker_options[version_min + 1] == \"0.0.0\":\n # write a default watchos version.\n if self.is_translate_watchos:\n linker_options[version_min + 1] = \"5.0.0\"\n else:\n linker_options[version_min + 1] = \"2.0.0\"\n self.deployment_target = linker_options[version_min + 1]\n except ValueError:\n # if watchos is not specified during translate, add default deployment target.\n if self.is_translate_watchos:\n linker_options.extend([\"-watchos_version_min\", \"5.0.0\"])\n\n if self.platform is not None and self.platform != \"Unknown\":\n linker_options.extend([\"-syslibroot\", env.getSDK()])\n if self.sdk_version is not None and self.sdk_version != \"NA\":\n linker_options.extend([\"-sdk_version\", self.sdk_version])\n return linker_options", "def options(self):\n self.match = None\n self.endpoint = self.EndpointURI()\n return super(MatchResource, self).options()", "def initialize_options(self):\n pass", "def initialize_options(self):\n pass", "def _modified_option_defaults(self) -> Dict[str, Any]:\n return {\n # Change 'debug.traceback' default to True if debug logging is enabled.\n 'debug.traceback': logging.getLogger('pyocd').isEnabledFor(logging.DEBUG),\n }", "def options(self) -> Mapping[str, str]:\n return pulumi.get(self, \"options\")", "def initialize_options(self):\n pass", "def options_set(self):\n\n global OPTIONS\n OPTIONS.append(config.ENABLE(self.threaded))\n OPTIONS.append(config.ENABLE(self.datasaver))\n OPTIONS.append(self.language)", "def requested_config_vals():\n return {'transfer_stats_per_file':'opt'}", "def _set_defaults(self):\n self._opts = {\n \"insecure\": [],\n \"header\": [],\n \"verbose\": [],\n \"nobody\": [],\n \"proxy\": [],\n \"resume\": [],\n \"ctimeout\": [\"--connect-timeout\", str(self.ctimeout)],\n \"timeout\": [\"-m\", str(self.timeout)],\n \"other\": [\"-s\", \"-q\", \"-S\"]\n }\n if self.insecure:\n self._opts[\"insecure\"] = [\"-k\"]\n if Msg().level > Msg.DBG:\n self._opts[\"verbose\"] = [\"-v\"]\n self._files = {\n \"url\": \"\",\n \"error_file\": FileUtil(\"execurl_err\").mktmp(),\n \"output_file\": FileUtil(\"execurl_out\").mktmp(),\n \"header_file\": FileUtil(\"execurl_hdr\").mktmp()\n }", "def _get_options(ret):\n attrs = {\"host\": \"host\", \"port\": \"port\", \"skip\": \"skip_on_error\", \"mode\": \"mode\"}\n\n _options = salt.returners.get_returner_options(\n __virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__\n )\n return _options", "def otherOptionsFullScreen(self):\n\n # Set Storage List\n storageList = []\n # Create Intel explain menu\n menuDisplay = \"\"\"\n \\n\n [*] Information Verbose:\n Ontop of Asking for the Username and \n Password Should we Gather Even\n More Information about the User such as \n GEOIP / ISP / User Agent etc. etc. \n This Requires Curl to be installed or \n file_get_contents in PHP on selected Server \n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n # Set Verbose of Intel Gather\n self.results = input(\n \"\\nWould you like to Build a More In-depth Intel Report on Victim ( y Or n ): \")\n if self.results.lower()[0] == \"y\" or self.results.lower() == \"yes\":\n storageList.append(\"INTEL_VERBOSE_LOUD\")\n elif self.results.lower()[0] == \"n\" or self.results.lower() == \"no\":\n storageList.append(\"INTEL_VERBOSE_HUSH\")\n else:\n # Anything Else lets just Hush it then\n storageList.append(\"INTEL_VERBOSE_HUSH\")\n # Redirect Ask\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = Redirect URL Which is the Same \n = URL of the Full-Screen Attack \n = you picked. For Instance If \n = it was AOL Full-Screen Attack\n = the default URL redirect would \n = be https://my.screenname.aol.com\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"After the Victim Inputs Info Where Should the Script Redirect?: \")\n # Check if nothing was entered\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"REDIRECT_DEFAULT\")\n else:\n # No Checking on URL Let Them Use Whatever lol there bad i guess\n # Append Default Redirect Naaaow\n storageList.append(self.results)\n\n # Spoof link\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = What do you want the URL Link to be spoofed\n = to? This will be displayed when the user\n = rolls over the link. Basically tricking\n = them making them think they are going\n = to that URL..\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"What should the URL be spoofed to? (ex: https://my.screenname.aol.com): \")\n # Check if nothing was entered\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"DEFAULT_SPOOF\")\n else:\n # Append specified spoof url now\n storageList.append(self.results)\n\n # link name\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = What do you want the Actual URL name\n = to be?\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"What should the URL name be? (ex: Aol Login): \")\n # Check if nothing was entered\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"DEFAULT_URL_NAME\")\n else:\n # Append url name\n storageList.append(self.results)\n\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = name of Index.php If you feel \n = the need to change the name please \n = do not add the actual extension .php \n = along with it only add whatever crazy \n = name you come up with\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"What Should the Main Index PHP File Be Called? ( ex: login ) : \")\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"INDEX_DEFAULT\")\n else:\n check = self.results.find(\".\")\n # if it doesn't return a -1 it found a decimal\n if check != -1:\n # Throw Error we found a dot\n self.errorOutput(\n \"[*] Error - Didn't We Say Not to Add an Extension, WOW...\", \"yellow\")\n else:\n # Append name of the File\n storageList.append(self.results)\n\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = Title of the Webpage.\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"blue\")\n self.results = input(\n \"What Should the Title of the Page be? (ex: AOL Login ) : \")\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"TITLE_DEFAULT\")\n else:\n # Append name of the File\n storageList.append(self.results)\n\n # Return Storage List for Processing\n return storageList", "def options():\n\n parser = argparse.ArgumentParser(description=\"PlantCV Clowder image analysis script for the DDPSC indoor system.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n #parser.add_argument(\"-v\", \"--vis\", help=\"Input VIS/RGB image.\", required=True)\n #parser.add_argument(\"-n\", \"--nir\", help=\"Input NIR image.\", required=True)\n #parser.add_argument(\"-p\", \"--perspective\", help=\"Camera perspective (side-view, top-view)\", required=True)\n parser.add_argument(\"-d\", \"--dataset\", help=\"Clowder Dataset key.\", required=True)\n parser.add_argument(\"-u\", \"--url\", help=\"Clowder URL.\", required=True)\n parser.add_argument(\"-U\", \"--username\", help=\"Clowder username.\", required=True)\n parser.add_argument(\"-p\", \"--password\", help=\"Clowder password.\", required=True)\n\n args = parser.parse_args()\n\n # if not os.path.exists(args.vis):\n # raise IOError(\"File does not exist: {0}\".format(args.vis))\n # if not os.path.exists(args.nir):\n # raise IOError(\"File does not exist: {0}\".format(args.nir))\n\n return args", "def options(self, a: str) -> typing.Any:", "def test_list_options(self):\n pass", "def getopt():\n raise NotImplementedError()", "def define_options(self) -> Optional[Any]:\n return {\n 'basename': OptionDef(required=True, default_value='promtail', allowed_types=[str]),\n 'namespace': OptionDef(required=True, default_value='monitoring', allowed_types=[str]),\n 'config': {\n 'prometheus_annotation': OptionDef(required=True, default_value=False, allowed_types=[bool]),\n 'promtail_config': OptionDef(allowed_types=[str, ConfigFile]),\n 'loki_url': OptionDef(allowed_types=[str]),\n 'authorization': {\n 'serviceaccount_create': OptionDef(required=True, default_value=True, allowed_types=[bool]),\n 'serviceaccount_use': OptionDef(allowed_types=[str]),\n 'roles_create': OptionDef(required=True, default_value=True, allowed_types=[bool]),\n 'roles_bind': OptionDef(required=True, default_value=True, allowed_types=[bool]),\n },\n },\n 'container': {\n 'promtail': OptionDef(required=True, default_value='grafana/promtail:2.0.0', allowed_types=[str]),\n },\n 'kubernetes': {\n 'resources': {\n 'daemonset': OptionDef(allowed_types=[Mapping]),\n }\n },\n }", "def set_global_flags(self):\n\n import rpki.http, rpki.x509, rpki.sql, rpki.async, rpki.log\n\n try:\n rpki.http.debug_http = self.getboolean(\"debug_http\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.http.want_persistent_client = self.getboolean(\"want_persistent_client\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.http.want_persistent_server = self.getboolean(\"want_persistent_server\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.http.use_adns = self.getboolean(\"use_adns\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.http.enable_ipv6_clients = self.getboolean(\"enable_ipv6_clients\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.http.enable_ipv6_servers = self.getboolean(\"enable_ipv6_servers\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.x509.CMS_object.debug_cms_certs = self.getboolean(\"debug_cms_certs\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.sql.sql_persistent.sql_debug = self.getboolean(\"sql_debug\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.async.timer.gc_debug = self.getboolean(\"gc_debug\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.async.timer.run_debug = self.getboolean(\"timer_debug\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.x509.XML_CMS_object.dump_outbound_cms = rpki.x509.DeadDrop(self.get(\"dump_outbound_cms\"))\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.x509.XML_CMS_object.dump_inbound_cms = rpki.x509.DeadDrop(self.get(\"dump_inbound_cms\"))\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.async.gc_summary(self.getint(\"gc_summary\"), self.getint(\"gc_summary_threshold\", 0))\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.log.enable_tracebacks = self.getboolean(\"enable_tracebacks\")\n except ConfigParser.NoOptionError:\n pass", "def definearguments(self, customparser):\n if not customparser:\n return\n customparser.add_option(\n '--url',\n dest='url',\n help=\"Use the provided iLO URL to login.\",\n default=None,\n )\n customparser.add_option(\n '-u',\n '--user',\n dest='user',\n help=\"If you are not logged in yet, including this flag along\"\\\n \" with the password and URL flags can be used to log into a\"\\\n \" server in the same command.\"\"\",\n default=None,\n )\n customparser.add_option(\n '-p',\n '--password',\n dest='password',\n help=\"\"\"Use the provided iLO password to log in.\"\"\",\n default=None,\n )\n customparser.add_option(\n '-e',\n '--enc',\n dest='encode',\n action='store_true',\n help=SUPPRESS_HELP,\n default=False,\n )", "def initialize_options(self):\n self.all = False\n self.coverage = False\n super(test, self).initialize_options()", "def get_extra_options(self):\n # Options change depending on the pdf generator..\n try:\n transform_module = getattr(transforms, self.pdf_generator)\n except AttributeError:\n return []\n\n options = []\n tool_options = self.pdf_tool.make_options()\n adapter_options, adapter_overrides = self._get_adapter_options()\n\n opts_order = [self.request, tool_options]\n if adapter_overrides:\n opts_order.insert(0, adapter_options)\n else:\n opts_order.append(adapter_options)\n\n # First we check the options for which no value is\n # needed.\n # For each one, it is possible to define a --no-xxx\n # option.\n for opt_name in transform_module.simple_options:\n for opts in opts_order:\n if opts.get('--no-%s' % opt_name):\n break\n\n if opts.get(opt_name, None):\n options.append('--%s' % opt_name)\n break\n # Then we check values that expect a value.\n for opt_name in transform_module.valued_options:\n for opts in opts_order:\n opt_val = opts.get(opt_name, None)\n\n if opt_val is None:\n continue\n\n # Value is put before the option name as we\n # insert them after in another list using l.insert(2, opt)\n if isinstance(opt_val, list):\n for x in reversed(opt_val):\n options.append(str(x))\n else:\n options.append(str(opt_val))\n\n options.append('--%s' % opt_name)\n break\n\n return options", "def __init__(self):\n self.config = get_config()\n self.options, self.arguments = get_options(self.config)\n if self.get_bool(\"cache\") and self.get_bool(\"cache_search\") \\\n and not self.get_bool(\"longlist\"):\n integrate_search_cache(\n self.config,\n self.get(\"cachedir\"),\n self.get(\"setpath\")\n )\n if not self.arguments:\n if \"id\" in self.options.__dict__ \\\n and self.options.__dict__[\"id\"]:\n self.arguments.append( self.options.__dict__[\"id\"] )\n del( self.options.__dict__[\"id\"] )\n import sys\n message = \"WARNING: the --id option is deprecated and will eventually be removed\\n\"\n sys.stderr.write(message)\n elif \"city\" in self.options.__dict__ \\\n and self.options.__dict__[\"city\"] \\\n and \"st\" in self.options.__dict__ \\\n and self.options.__dict__[\"st\"]:\n self.arguments.append(\n \"^%s city, %s\" % (\n self.options.__dict__[\"city\"],\n self.options.__dict__[\"st\"]\n )\n )\n del( self.options.__dict__[\"city\"] )\n del( self.options.__dict__[\"st\"] )\n import sys\n message = \"WARNING: the --city/--st options are deprecated and will eventually be removed\\n\"\n sys.stderr.write(message)", "def poss_opt(self):\n return ('ip', 'debug')", "def pytest_addoption(parser):\n parser.addoption(\"--address\", action=\"store\",\n default=\"http://192.168.145.130/\", help=\"Opencart web address\")\n parser.addoption(\"--browser\", action=\"store\", default=\"chrome\", help=\"Browser name\")\n parser.addoption(\"--wait\", action=\"store\", default=10, help=\"Implicity wait\")", "def set_option_user_agent(self, string, apikey=''):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionUserAgent/', {'String': string, 'apikey': apikey})))", "def create_options(self):\n return []", "def setOptions(self):\n self.parser.add_option( \"--outputdir\",\n dest = \"outdir\",\n default = None,\n help = \"Directory to write JSON summary to.\" )\n\n self.parser.add_option( \"--dbs\",\n dest = \"usedbs\",\n default = 'no',\n help = \"Use information in DBS to build the input lumi lists and the output lumi lists.\"+\\\n \" Allowed values are yes/no. Default is no.\" )", "def add_options(parser):\n parser.add_option(\"\", \"--excess-bw\", type=\"float\", default=_def_excess_bw,\n help=\"set RRC excess bandwith factor [default=%default] (PSK)\")\n parser.add_option(\"\", \"--no-gray-code\", dest=\"gray_code\",\n action=\"store_false\", default=_def_gray_code,\n help=\"disable gray coding on modulated bits (PSK)\")", "def setup_parser_arguments(parser):\n parser._optionals.title = \"Common options\"\n parser.add_argument(\n '-H', '--host',\n help=\"Host to load test in the following format: http://10.21.32.33\"\n )\n # Number of Locust users\n parser.add_argument(\n '-c', '--clients',\n type=int,\n dest='num_clients',\n default=1,\n help=\"Number of concurrent Locust users. Only used together with --headless\"\n )\n # User hatch rate\n parser.add_argument(\n '-r', '--hatch-rate',\n type=float,\n default=1,\n help=\"The rate per second in which clients are spawned. Only used together with --headless\"\n )\n # Time limit of the test run\n parser.add_argument(\n '-t', '--run-time',\n help=\"Stop after the specified amount of time, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --headless\"\n )\n # List locust commands found in loaded locust files/source files\n parser.add_argument(\n '-l', '--list',\n action='store_true',\n dest='list_commands',\n help=\"Show list of possible locust classes and exit\"\n )\n \n web_ui_group = parser.add_argument_group(\"Web UI options\")\n web_ui_group.add_argument(\n '--web-host',\n default=\"\",\n help=\"Host to bind the web interface to. Defaults to '*' (all interfaces)\"\n )\n web_ui_group.add_argument(\n '--web-port', '-P',\n type=int,\n default=8089,\n help=\"Port on which to run web host\"\n )\n # if we should print stats in the console\n web_ui_group.add_argument(\n '--headless',\n action='store_true',\n help=\"Disable the web interface, and instead start the load test immediately. Requires -c and -t to be specified.\"\n )\n web_ui_group.add_argument(\n '--web-auth',\n type=str,\n dest='web_auth',\n default=None,\n help='Turn on Basic Auth for the web interface. Should be supplied in the following format: username:password'\n )\n \n master_group = parser.add_argument_group(\n \"Master options\", \n \"Options for running a Locust Master node when running Locust distributed. A Master node need Worker nodes that connect to it before it can run load tests.\",\n )\n # if locust should be run in distributed mode as master\n master_group.add_argument(\n '--master',\n action='store_true',\n help=\"Set locust to run in distributed mode with this process as master\"\n )\n master_group.add_argument(\n '--master-bind-host',\n default=\"*\",\n help=\"Interfaces (hostname, ip) that locust master should bind to. Only used when running with --master. Defaults to * (all available interfaces).\"\n )\n master_group.add_argument(\n '--master-bind-port',\n type=int,\n default=5557,\n help=\"Port that locust master should bind to. Only used when running with --master. Defaults to 5557.\"\n )\n master_group.add_argument(\n '--expect-workers',\n type=int,\n default=1,\n help=\"How many workers master should expect to connect before starting the test (only when --headless used).\"\n )\n master_group.add_argument(\n '--expect-slaves',\n action='store_true',\n help=configargparse.SUPPRESS\n )\n \n worker_group = parser.add_argument_group(\n \"Worker options\", \n textwrap.dedent(\"\"\"\n Options for running a Locust Worker node when running Locust distributed. \n Only the LOCUSTFILE (-f option) need to be specified when starting a Worker, since other options such as -c, -r, -t are specified on the Master node.\n \"\"\"),\n )\n # if locust should be run in distributed mode as worker\n worker_group.add_argument(\n '--worker',\n action='store_true',\n help=\"Set locust to run in distributed mode with this process as worker\"\n )\n worker_group.add_argument(\n '--slave',\n action='store_true',\n help=configargparse.SUPPRESS\n )\n # master host options\n worker_group.add_argument(\n '--master-host',\n default=\"127.0.0.1\",\n help=\"Host or IP address of locust master for distributed load testing. Only used when running with --worker. Defaults to 127.0.0.1.\"\n )\n worker_group.add_argument(\n '--master-port',\n type=int,\n default=5557,\n help=\"The port to connect to that is used by the locust master for distributed load testing. Only used when running with --worker. Defaults to 5557.\"\n )\n \n stats_group = parser.add_argument_group(\"Request statistics options\")\n # A file that contains the current request stats.\n stats_group.add_argument(\n '--csv', '--csv-base-name',\n dest='csvfilebase',\n help=\"Store current request stats to files in CSV format.\",\n )\n # Adds each stats entry at every iteration to the _stats_history.csv file.\n stats_group.add_argument(\n '--csv-full-history',\n action='store_true',\n default=False,\n dest='stats_history_enabled',\n help=\"Store each stats entry in CSV format to _stats_history.csv file\",\n ) \n # if we should print stats in the console\n stats_group.add_argument(\n '--print-stats',\n action='store_true',\n help=\"Print stats in the console\"\n )\n # only print summary stats\n stats_group.add_argument(\n '--only-summary',\n action='store_true',\n help='Only print the summary stats'\n )\n stats_group.add_argument(\n '--reset-stats',\n action='store_true',\n help=\"Reset statistics once hatching has been completed. Should be set on both master and workers when running in distributed mode\",\n )\n \n log_group = parser.add_argument_group(\"Logging options\")\n # skip logging setup\n log_group.add_argument(\n '--skip-log-setup',\n action='store_true',\n dest='skip_log_setup',\n default=False,\n help=\"Disable Locust's logging setup. Instead, the configuration is provided by the Locust test or Python defaults.\"\n )\n # log level\n log_group.add_argument(\n '--loglevel', '-L',\n default='INFO',\n help=\"Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL. Default is INFO.\",\n )\n # log file\n log_group.add_argument(\n '--logfile',\n help=\"Path to log file. If not set, log will go to stdout/stderr\",\n )\n \n step_load_group = parser.add_argument_group(\"Step load options\")\n # Enable Step Load mode\n step_load_group.add_argument(\n '--step-load',\n action='store_true',\n help=\"Enable Step Load mode to monitor how performance metrics varies when user load increases. Requires --step-clients and --step-time to be specified.\"\n )\n # Number of clients to incease by Step\n step_load_group.add_argument(\n '--step-clients',\n type=int,\n default=1,\n help=\"Client count to increase by step in Step Load mode. Only used together with --step-load\"\n )\n # Time limit of each step\n step_load_group.add_argument(\n '--step-time',\n help=\"Step duration in Step Load mode, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --step-load\"\n )\n \n \n other_group = parser.add_argument_group(\"Other options\")\n # Display ratio table of all tasks\n other_group.add_argument(\n '--show-task-ratio',\n action='store_true',\n help=\"Print table of the locust classes' task execution ratio\"\n )\n # Display ratio table of all tasks in JSON format\n other_group.add_argument(\n '--show-task-ratio-json',\n action='store_true',\n help=\"Print json data of the locust classes' task execution ratio\"\n )\n # Version number (optparse gives you --version but we have to do it\n # ourselves to get -V too. sigh)\n other_group.add_argument(\n '--version', '-V',\n action='version',\n help=\"Show program's version number and exit\",\n version='%(prog)s {}'.format(version),\n )\n # set the exit code to post on errors\n other_group.add_argument(\n '--exit-code-on-error',\n type=int,\n default=1,\n help=\"Sets the process exit code to use when a test result contain any failure or error\"\n )\n other_group.add_argument(\n '-s', '--stop-timeout',\n action='store',\n type=int,\n dest='stop_timeout',\n default=None,\n help=\"Number of seconds to wait for a simulated user to complete any executing task before exiting. Default is to terminate immediately. This parameter only needs to be specified for the master process when running Locust distributed.\"\n )\n \n locust_classes_group = parser.add_argument_group(\"Locust user classes\")\n locust_classes_group.add_argument(\n 'locust_classes',\n nargs='*',\n metavar='LocustClass',\n help=\"Optionally specify which Locust classes that should be used (available Locust classes can be listed with -l or --list)\",\n )", "def parse_options():\n\n parser = optparse.OptionParser(usage=USAGE, version=VERSION)\n\n parser.add_option(\"-f\", \"--file\",\n action=\"store\", default=Utils.getConfig(\"defaultFile\"), dest=\"file\",\n help=\"Read the site name from external file\")\n\n parser.add_option(\"-s\", \"--site-name\",\n action=\"store\", default=\"\", dest=\"sitename\",\n help=\"Get links for specified url only\")\n\n opts, args = parser.parse_args()\n\n return opts, args", "def get_driver_options():\n # Define Browser Options\n chrome_options = Options()\n chrome_options.add_argument(\"--headless\") # Hides the browser window\n chrome_options.add_experimental_option('prefs', {'intl.accept_languages': 'en,en_US'})\n return chrome_options", "def parse_options():\n description = \"\"\"DDoS_Wall is designed to mitigate common types of DDoS attacks. It offers system\n monitoring and will enable TCP cookies if the system is under attack, this helps\n mitigate SYN flood attacks. It also provides protection against HTTP based attacks which it\n will automatically detect and the offending IP addresses will be blocked. ddos_wall must be run\n with root privileges\"\"\"\n parser = optparse.OptionParser(description=description)\n parser.add_option('-c', '--cpu_orange', default=0, help='orange threshold for CPU utilisation', metavar='<ARG>')\n parser.add_option('-C', '--cpu_red', default=0, help='red threshold for CPU utilisation', metavar='<ARG>')\n parser.add_option('-m', '--memory_orange', default=0, help='orange threshold for RAM usage', metavar='<ARG>')\n parser.add_option('-M', '--memory_red', default=0, help='red threshold for RAM usage', metavar='<ARG>')\n parser.add_option('-n', '--network_orange', default=0, help='orange threshold for Network usage', metavar='<ARG>')\n parser.add_option('-N', '--network_red', default=0, help='red threshold for Network usage', metavar='<ARG>')\n parser.add_option('-p', '--port', default=1234, help='port that proxy listens on', metavar='<ARG>')\n parser.add_option('-a', '--ip_address', help='MANDATORY - ip address of server', metavar='<ARG>')\n parser.add_option('-I', '--interface', default='eth0', help='the interface forwarding traffic', metavar='<ARG>')\n parser.add_option('-t', '--time', default=10, help='the number of minutes that threshold is calculated over',\n metavar='<ARG>')\n parser.add_option('-i', '--interval', default=10, help='the interval between polling the server', metavar='<ARG>')\n parser.add_option('-s', '--setup', action='store_true', default=False,\n help='setup DDoS_Wall')\n parser.add_option('-r', '--reset', action='store_true', default=False, help='resets DDoS_Wall')\n\n opts, args = parser.parse_args()\n\n # IP address must be supplied\n if opts.ip_address is None:\n print(\"Please supply an IP Address for the server e.g --ip_address 10.10.10.10\")\n exit(-1)\n\n options = dict()\n options['port'] = opts.port # port that proxy listens on\n options['ip_address'] = opts.ip_address # IP address of server\n options['interface'] = opts.interface # the network interface\n options['cpu_orange_threshold'] = float(opts.cpu_orange)\n options['cpu_red_threshold'] = float(opts.cpu_red)\n options['ram_orange_threshold'] = float(opts.memory_orange)\n options['ram_red_threshold'] = float(opts.memory_red)\n options['network_orange_threshold'] = float(opts.network_orange)\n options['network_red_threshold'] = float(opts.network_red)\n options['time_period'] = opts.time # how long in minutes the running average for the monitoring should be\n options['interval'] = opts.interval # length of tim in seconds between polling resource\n options['setup'] = opts.setup # If setup needs running\n options['reset'] = opts.reset # Reset DDoS_Wall\n\n return options", "def _parse_options(self):\n parser = argparse.ArgumentParser(prog=self._program,\n formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=30, width=132))\n parser.add_argument(\"--debug\", action='store_true', default=self._debug, help=\"The debug flag. (Default: {0})\".format(self._debug))\n parser.add_argument(\"--drives\", default=None, help=\"The drives to display. (Default: {0})\".format(self._drives))\n parser.add_argument(\"--exclude\", default=None, help=\"The drives to exclude. (Default: {0})\".format(self._exclude))\n parser.add_argument(\"--force_spt\", action='store_true', help=\"Force using spt (debug). (Default: {0})\".format(self._force_spt))\n parser.add_argument(\"--json\", action='store_true', default=self._json_format, help=\"Enable JSON format. (Default: {0})\".format(self._json_format))\n parser.add_argument(\"--long\", action='store_true', default=self._long_format, help=\"Enable long format. (Default: {0})\".format(self._long_format))\n parser.add_argument(\"--noencs\", action='store_false', default=self._include_enclosures, help=\"Exclude enclosures. (Default: {0})\".format(not self._include_enclosures))\n parser.add_argument(\"--noheader\", action='store_false', default=self._report_header, help=\"Exclude headers. (Default: {0})\".format(not self._report_header))\n parser.add_argument(\"--power_on_hours\", action='store_true', default=self._power_on_hours, help=\"Include power on hours. (Default: {0})\".format(not self._power_on_hours))\n # Filters for spt:\n parser.add_argument(\"--firmware_version\", default=None, help=\"The firmware version. (Default: {0})\".format(self.firmware_version))\n parser.add_argument(\"--product_name\", default=None, help=\"The product name. (Default: {0})\".format(self.product_name))\n parser.add_argument(\"--vendor_name\", default=None, help=\"The vendor name. (Default: {0})\".format(self.vendor_name))\n parser.add_argument(\"--serial_number\", default=None, help=\"The serial number. (Default: {0})\".format(self.serial_number))\n parser.add_argument(\"--sas_address\", default=None, help=\"The SAS address. (Default: {0})\".format(self.target_port))\n parser.add_argument(\"--target_port\", default=None, help=\"The target port. (Default: {0})\".format(self.target_port))\n parser.add_argument(\"--use_lsscsi\", action='store_true', help=\"Find devices via lsscsi. (Default: {0})\".format(self._use_lsscsi))\n parser.add_argument(\"--spt_path\", default=None, help=\"The spt tool path. (Default: {0})\".format(self.tool))\n\n args = parser.parse_args()\n\n self._debug = args.debug\n if self._debug:\n self.log_level = logging.DEBUG\n self._json_format = args.json\n self._long_format = args.long\n if args.drives:\n self._drives = args.drives.split(',')\n if args.exclude:\n self._exclude = args.exclude.split(',')\n if not args.noencs:\n self._include_enclosures = False\n if not args.noheader:\n self._report_header = False\n if args.power_on_hours:\n self._power_on_hours = True\n if args.firmware_version:\n self.firmware_version = args.firmware_version\n if args.product_name:\n self.product_name = args.product_name\n if args.vendor_name:\n self.vendor_name = args.vendor_name\n if args.serial_number:\n self.serial_number = args.serial_number\n if args.sas_address:\n self.target_port = args.sas_address\n if args.target_port:\n self.target_port = args.target_port\n if args.force_spt:\n self._force_spt = args.force_spt\n if args.use_lsscsi:\n self._use_lsscsi = args.use_lsscsi\n if args.spt_path:\n self.tool = args.spt_path", "def __init__(self):\n self.timeout = Config.conf['timeout']\n self.ctimeout = Config.conf['ctimeout']\n self.download_timeout = Config.conf['download_timeout']\n self.agent = Config.conf['http_agent']\n self.http_proxy = Config.conf['http_proxy']\n self.cache_support = False\n self.insecure = Config.conf['http_insecure']\n self._curl_exec = Config.conf['use_curl_executable']\n self._select_implementation()", "def _CommonOptions(self, p):\n super()._CommonOptions(p, opt_v=False)", "def setup_options(parser):\n help_str = \"Turn on debug\"\n parser.add_argument(\"-d\", \"--debug\", action=\"store_true\", default=False,\n help=help_str)", "def parse_options(self, options):\n pass", "def web_python_options(self, dot_cookietemple: Optional[dict]):\n self.web_struct.command_line_interface = cookietemple_questionary_or_dot_cookietemple(function='select',\n question='Choose a command line library',\n choices=['Click', 'Argparse', 'No command-line interface'],\n default='Click',\n dot_cookietemple=dot_cookietemple,\n to_get_property='command_line_interface')\n self.web_struct.testing_library = cookietemple_questionary_or_dot_cookietemple(function='select',\n question='Choose a testing library',\n choices=['pytest', 'unittest'],\n default='pytest',\n dot_cookietemple=dot_cookietemple,\n to_get_property='testing_library')", "def definearguments(self, customparser):\n\n customparser.add_option(\n '--disable',\n action=\"store_false\",\n dest=\"enableFeature\",\n help=\"Disable the Scalable Persistent Memory feature. Warning: \"\\\n \"any pending configuration changes will be lost.\"\n )", "def runoptions(self):\n # outstanding = self.missing_required()\n # if outstanding:\n # raise TypeError('Module missing required parameter: %s' % ', '.join(outstanding))\n return self._runopts", "def get_rd_kafka_opts(self):\n raise NotImplementedError()", "def extra_options():\n extra_vars = {\n 'withsampledata': [False, \"Include sample data\", CUSTOM],\n 'bwapluginver': [None, \"BWA pugin version\", CUSTOM],\n 'RSEMmod': [False, \"Enable RSEMmod\", CUSTOM],\n }\n return EasyBlock.extra_options(extra_vars)", "def CrawlOptions(self, u:URL)->CrawlerOptions:\n return self._options", "def _get_run_options(self, cmdp, exec_engine=None):\n cmdp.declare_options(\"-v= -e= -w= -u= -p= -i -t -a -P\")\n cmd_options = {\n \"netcoop\": {\n \"fl\": (\"-P\", \"--publish-all\", \"--netcoop\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"portsmap\": {\n \"fl\": (\"-p=\", \"--publish=\",), \"act\": \"E\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"novol\": {\n \"fl\": (\"--novol=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"vol\": {\n \"fl\": (\"-v=\", \"--volume=\",), \"act\": \"E\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"env\": {\n \"fl\": (\"-e=\", \"--env=\",), \"act\": \"E\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"envfile\": {\n \"fl\": (\"--env-file=\",), \"act\": 'E',\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"user\": {\n \"fl\": (\"-u=\", \"--user=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"cwd\": {\n \"fl\": (\"-w=\", \"--workdir=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"entryp\": {\n \"fl\": (\"--entrypoint=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"cpuset\": {\n \"fl\": (\"--cpuset-cpus=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"hostauth\": {\n \"fl\": (\"--hostauth\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"containerauth\": {\n \"fl\": (\"--containerauth\",), \"act\": 'R',\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"nosysdirs\": {\n \"fl\": (\"--nosysdirs\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"hostenv\": {\n \"fl\": (\"--hostenv\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"bindhome\": {\n \"fl\": (\"--bindhome\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"nometa\": {\n \"fl\": (\"--nometa\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"dri\": {\n \"fl\": (\"--dri\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"cmd\": {\n \"fl\": (\"P+\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"volfrom\": {\n \"fl\": (\"--volumes-from=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"dns\": {\n \"fl\": (\"--dns=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"dnssearch\": {\n \"fl\": (\"--dns-search=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"kernel\": {\n \"fl\": (\"--kernel=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"devices\": {\n \"fl\": (\"--device=\",), \"act\": \"E\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"nobanner\": {\n \"fl\": (\"--nobanner\",), \"act\": 'R',\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"platform\": {\n \"fl\": (\"--platform=\",), \"act\": 'R',\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"pull\": {\n \"fl\": (\"--pull=\"), \"act\": 'R',\n \"p2\": \"CMD_OPT\", \"p3\": False\n }\n }\n for option, cmdp_args in list(cmd_options.items()):\n last_value = None\n for cmdp_fl in cmdp_args[\"fl\"]:\n option_value = cmdp.get(cmdp_fl, cmdp_args[\"p2\"],\n cmdp_args[\"p3\"])\n if not exec_engine:\n continue\n if cmdp_args[\"act\"] == \"R\": # action is replace\n if option_value or last_value is None:\n exec_engine.opt[option] = option_value\n elif cmdp_args[\"act\"] == \"E\": # action is extend\n # if option == \"env\":\n # print (type(option_value))\n # print (option_value)\n exec_engine.opt[option].extend(option_value)\n last_value = option_value", "def add_extra_args(self):\n self.parser.add_argument(\"--region\", required=False)\n self.parser.add_argument(\"--zone\", required=False)\n self.parser.add_argument(\"--network\", required=False)", "def _getOptions(self):\n args = []\n for iname, value in self.options:\n args.append('-' + iname)\n if value != 'true':\n args.append(value)\n return args", "def required_options():\n return [\n 'projects',\n 'old_milestone_names',\n 'new_milestone_name',\n 'statuses',\n 'bugs_importance',\n 'maximum'\n ]", "def help_option(args, run):\n pass", "def SoapOptions(self) -> SoapOption:", "def settable(self):\r\n return Options([i for i in list(self.items())\r\n if i[0] in Options.versatileOptions()])", "def setup(self, options, results):", "def get_options(cls, player, context={}):\n\t\traise NotImplementedError()", "def get_options():\n # pass in the access_token via commandline\n parser = OptionParser()\n parser.add_option(\"--data-dir\", default='/tmp',\n action=\"store\", type=\"string\", dest=\"data_dir\",\n help=\"Directory where DBs exist\")\n parser.add_option(\"--malicious\",\n action=\"store_true\", default=False, dest=\"malicious\",\n help=\"Check malicious\")\n parser.add_option(\"--suspicious\",\n action=\"store_true\", default=False, dest=\"suspicious\",\n help=\"Check suspicious\")\n parser.add_option(\"--predicted\",\n action=\"store_true\", default=False, dest=\"predicted\",\n help=\"Check predicted\")\n (options, _) = parser.parse_args()\n if(not options.malicious and\n not options.predicted and\n not options.suspicious):\n parser.error(\"Please specify at least one category\")\n return options", "def options(self, value):\n self._options = value\n if self._options.get(\"legacy\"):\n self._options[\"extended\"] = False", "def plugin_options(request):\n options = (\n '--rabbit-amqp-uri',\n '--rabbit-api-uri'\n )\n\n args = [\n \"{}={}\".format(opt, request.config.getoption(opt)) for opt in options\n ]\n return args", "def test_defaultValues(self):\n argV = []\n self.usage.parseOptions(argV)\n self.failUnlessEqual(self.usage.opts['fooint'], 392)\n self.assert_(isinstance(self.usage.opts['fooint'], int))\n self.failUnlessEqual(self.usage.opts['foofloat'], 4.23)\n self.assert_(isinstance(self.usage.opts['foofloat'], float))\n self.failUnlessEqual(self.usage.opts['eggint'], None)\n self.failUnlessEqual(self.usage.opts['eggfloat'], None)" ]
[ "0.63878936", "0.62212753", "0.61612546", "0.61248803", "0.5934255", "0.5776161", "0.5709554", "0.5666998", "0.56532615", "0.564446", "0.55798537", "0.5551045", "0.55506915", "0.5514824", "0.5505621", "0.5467913", "0.54607224", "0.5432014", "0.54278624", "0.54107505", "0.54085046", "0.5388811", "0.5381081", "0.5376547", "0.53600115", "0.5337669", "0.53351045", "0.5334484", "0.532459", "0.53186107", "0.5311", "0.530926", "0.5307612", "0.5292016", "0.526731", "0.5230788", "0.5230129", "0.5205747", "0.5192822", "0.51865095", "0.5183396", "0.5172937", "0.51682353", "0.5159441", "0.51586044", "0.51572615", "0.51460826", "0.51460826", "0.5139298", "0.51372254", "0.51284665", "0.5126493", "0.5104045", "0.50983715", "0.50892985", "0.5086267", "0.5081976", "0.5063545", "0.5062421", "0.50586325", "0.50578386", "0.5031648", "0.50310177", "0.50123256", "0.5009964", "0.50009364", "0.49922374", "0.49890685", "0.49885643", "0.49853826", "0.49834442", "0.49809703", "0.49783677", "0.4973042", "0.49703294", "0.49702826", "0.4967851", "0.49674886", "0.496216", "0.49526465", "0.49428567", "0.4935988", "0.49316356", "0.49271423", "0.49231008", "0.49171877", "0.49148044", "0.4914439", "0.4910749", "0.49088585", "0.48949325", "0.48843324", "0.48828664", "0.4878013", "0.48770717", "0.48708755", "0.48658854", "0.48629394", "0.48601896", "0.48486525" ]
0.490981
89
Validate an XML document.
def validate(): description = f"Validate XML metadata." parser = argparse.ArgumentParser( description=description, formatter_class=argparse.ArgumentDefaultsHelpFormatter ) help = "XML file or URL" parser.add_argument('infile', help=help) help = ( "Format ID for metadata standard. If this argument is supplied, " "only that format ID will be checked. If not, all format IDs will be " "checked." ) parser.add_argument('--format-id', help=help, choices=d1_scimeta.util.get_supported_format_id_list()) help = "Verbosity of log messages." choices = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'] parser.add_argument('-v', '--verbosity', help=help, choices=choices, default='INFO') args = parser.parse_args() validator = XMLValidator(verbosity=args.verbosity) validator.validate(args.infile, format_id=args.format_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validateDocument(self, doc):\n if doc is None: doc__o = None\n else: doc__o = doc._o\n ret = libxml2mod.xmlValidateDocument(self._o, doc__o)\n return ret", "def validate(self) :\n\t\tif self.doc is not None :\n\t\t\tparser = etree.XMLParser(recover=True, strip_cdata=True)\n\t\t\ttree = etree.XML(self.doc.toxml(), parser)\n\t\t\tdtdFile = self._getDTDFile()\n\t\t\tif dtdFile is not None :\n\t\t\t\tif _existFile(dtdFile) :\n\t\t\t\t\tdtd = etree.DTD(dtdFile)\n\t\t\t\t\tif dtd.validate(tree) :\n\t\t\t\t\t\tself._enrichXML()\n\t\t\t\t\t\treturn True\n\t\t\t\t\telse :\n\t\t\t\t\t\tprint(dtd.error_log.filter_from_errors()[0])\n\t\t\t\t\t\treturn False\n\t\t\t\telse :\n\t\t\t\t\tprint('Unable to find the DTD file ',dtdFile)\n\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tself._enrichXML()\n\t\t\t\treturn True\n\t\telse :\n\t\t\treturn False", "def validate(self, xmlfilename):\n\t\tdata = open(self.xsdfilename, 'rb') \n\t\tschema_root = etree.XML(data.read())\n\t\tschema = etree.XMLSchema(schema_root)\n\t\txmlparser = etree.XMLParser(schema=schema)\n\t\ttry:\n\t\t\twith open(xmlfilename, 'rb') as f:\n\t\t\t\tetree.fromstring(f.read(), xmlparser)\n\t\t\tprint(\"XML file was parsed without errors\")\n\t\t\treturn True\n\t\texcept etree.XMLSchemaError:\n\t\t\tprint(\"Error parsing XML file\")\n\t\t\ttraceback.print_tb()\n\t\t\treturn False", "def xml_validator(self,xml_string):\r\n\r\n try:\r\n schema = etree.XMLSchema(file=XSD_FILE_PATH)\r\n parser = objectify.makeparser(schema=schema)\r\n objectify.fromstring(xml_string, parser)\r\n print(\"XML file has been validated.\")\r\n return True\r\n except XMLSyntaxError:\r\n #handle exception here\r\n print(\"XML file cannot be validated.\")\r\n return False", "def validate_etree(self, etree_xml):\n valid = self.xml_schema.validate(etree_xml)\n return SchemaValidationResult(valid, self.xml_schema.error_log)", "def validateDocument(self, ctxt):\n if ctxt is None: ctxt__o = None\n else: ctxt__o = ctxt._o\n ret = libxml2mod.xmlValidateDocument(ctxt__o, self._o)\n return ret", "def validate(self, document):\n self.validator.validate(document)", "def _validateXML(self, to_be_validated, xsd_model):\n #We parse the XSD model\n xsd_model = StringIO.StringIO(xsd_model)\n xmlschema_doc = etree.parse(xsd_model)\n xmlschema = etree.XMLSchema(xmlschema_doc)\n\n string_to_validate = StringIO.StringIO(to_be_validated)\n\n try:\n document = etree.parse(string_to_validate)\n except (etree.XMLSyntaxError, etree.DocumentInvalid) as e: # pylint: disable=catching-non-exception\n LOG('SlapTool::_validateXML', INFO, \n 'Failed to parse this XML reports : %s\\n%s' % \\\n (to_be_validated, e))\n return False\n\n if xmlschema.validate(document):\n return True\n\n return False", "def validate(self, doc, schemaloc=False):\n if not (schemaloc or self._schemalocs):\n raise errors.ValidationError(\n \"No schemas to validate against! Try instantiating \"\n \"XmlValidator with use_schemaloc=True or setting the \"\n \"schema_dir param in __init__\"\n )\n\n root = utils.get_etree_root(doc)\n xsd = self._build_uber_schema(root, schemaloc)\n is_valid = xsd.validate(root)\n\n return XmlValidationResults(is_valid, xsd.error_log)", "def validate(xml_document, schema=None, cls=None, path=None, schema_path=None,\n use_defaults=True, namespaces=None, locations=None, base_url=None,\n defuse='remote', timeout=300, lazy=False):\n source, schema = get_context(\n xml_document, schema, cls, locations, base_url, defuse, timeout, lazy\n )\n schema.validate(source, path, schema_path, use_defaults, namespaces)", "def schemaValidateDoc(self, doc):\n if doc is None: doc__o = None\n else: doc__o = doc._o\n ret = libxml2mod.xmlSchemaValidateDoc(self._o, doc__o)\n return ret", "def validateRoot(self, doc):\n if doc is None: doc__o = None\n else: doc__o = doc._o\n ret = libxml2mod.xmlValidateRoot(self._o, doc__o)\n return ret", "def validate(self):\n\n # clear any previous xml errors\n clear_error_log()\n if self.schema_file is not None:\n try:\n # Attempt parsing the schema file\n schdoc = parse(self.schema_file)\n except XMLSyntaxError as e:\n # The schema was not parsable XML\n logging.warning('The schema XML file could not be parsed.')\n for item in e.error_log:\n logging.info(item)\n\n return False\n\n try:\n schema = XMLSchema(schdoc)\n except XMLSchemaParseError as e:\n # The schema document is XML, but it's not a schema\n logging.warning(\n 'The schema XML file was parsed, but it does not appear to be a valid XML Schema document.'\n )\n for item in e.error_log:\n logging.info(item)\n\n return False\n\n try:\n # Attempt parsing the data file\n data = parse(self.datafile)\n except XMLSyntaxError as e:\n # The data was not parsable XML\n logging.warning('The data XML file could not be parsed.')\n for item in e.error_log:\n logging.info(item)\n\n return False\n\n if self.schema_file is not None:\n if schema.validate(data):\n self.data = data\n return True\n\n logging.warning(\n 'The data does not conform to the provided schema.')\n for item in schema.error_log:\n logging.info(item)\n\n return False\n\n self.data = data\n\n return True", "def validate(self, doc):\n return self.schema.validate(doc)", "def validate_string(self, xml_string):\n etree_xml = parse(xml_string, allow_file=False)\n return self.validate_etree(etree_xml)", "def ValidateXML(file):\n #TODO validate against DTD\n re_escape_quotes=re.compile('\"')\n s=re_escape_quotes.sub('\\\\\"', f)\n return getstatusoutput(\"echo \\\"%s\\\" | xmllint --valid - 2>&1 > /dev/null\" % s)[0]", "def validate(self, schema=os.path.join(os.path.dirname(__file__), 'am.xsd')):\n return validate_xml(schema, self.path, from_path=True)", "def schemaValidateDoc(self, ctxt):\n if ctxt is None: ctxt__o = None\n else: ctxt__o = ctxt._o\n ret = libxml2mod.xmlSchemaValidateDoc(ctxt__o, self._o)\n return ret", "def test_validate_invalid(self):\r\n self.assertEqual(get_tree_and_validate(self.invalid_xml, open(self.SCHEMA, 'r').read()), 0)", "def recipe12_8():\n from xml.parsers.xmlproc import utils, xmlval, xmldtd\n def validate_xml_file(xml_filename, app=None, dtd_filename=None):\n # build validating parser object with appropriate error handler\n parser=xmlval.Validator()\n parser.set_error_handler(utils.ErrorPrinter(parser))\n if dtd_filename is None:\n # DTD fiel specified, laod and set it as the DTD to use\n dtd=xmldtd.load_dtd(dtd_filename)\n parser.val.dtd = parser.dtd = parser.ent = dtd\n if app is not None:\n # Application processing requested, set application object\n parser.set_application(app)\n # everything being set correctly, finally perform the parsing\n parser.parse_resource(xml_filename) \n # if XML data is in a string s, use instead\n # parser.feed(s)\n # parser.close(s)", "def test_valid_xml(self):\r\n self.build_problem()\r\n self.assertTrue(True)", "def validate(self):\n import os\n\n if self.kind == KDM.INTEROP:\n with open(os.path.join(os.path.dirname(__file__), 'xsd', 'interop.xsd'), 'r') as f:\n schema = f.read()\n elif self.kind == KDM.SMPTE:\n with open(os.path.join(os.path.dirname(__file__), 'xsd', 'smpte.xsd'), 'r') as f:\n schema = f.read()\n\n base_dir = os.getcwd()\n os.chdir(os.path.join(os.path.dirname(__file__), 'xsd'))\n try:\n schema = ET.XMLSchema(ET.XML(schema))\n xmlparser = ET.XMLParser(schema=schema)\n ET.fromstring(self.raw, xmlparser)\n finally:\n os.chdir(base_dir)", "def assertValid(self, doc):\n return self.schema.assertValid(doc)", "def validate_XML(xml):\n tree = etree.XML(xml)\n schema_tree = etree.XML(SCHEMA_TEMPLATE)\n # Find all unique instances of 'xsi:schemaLocation=\"<namespace> <path-to-schema.xsd> ...\"'\n schema_locations = set(tree.xpath(\"//*/@xsi:schemaLocation\", namespaces={'xsi': XSI}))\n for schema_location in schema_locations:\n # Split namespaces and schema locations ; use strip to remove leading\n # and trailing whitespace.\n namespaces_locations = schema_location.strip().split()\n # Import all found namspace/schema location pairs\n for namespace, location in zip(*[iter(namespaces_locations)] * 2):\n xs_import = etree.Element(XS + \"import\")\n xs_import.attrib['namespace'] = namespace\n xs_import.attrib['schemaLocation'] = location\n schema_tree.append(xs_import)\n # Contstruct the schema\n schema = etree.XMLSchema(schema_tree)\n # Validate!\n schema.assertValid(tree)", "def validate_xml(content, schema_content):\n xml_schema_doc = etree.parse(schema_content)\n xml_schema = etree.XMLSchema(xml_schema_doc)\n xml = etree.parse(StringIO.StringIO(content))\n\n # Validate the content against the schema.\n try:\n xml_schema.assertValid(xml)\n except etree.DocumentInvalid:\n return xml_schema.error_log\n\n return ''", "def validate_xml(self):\n log.info(\"Validating FluoView Mosaic XML...\")\n tree = etree.parse(self.infile[\"full\"])\n root = tree.getroot()\n if not root.tag == \"XYStage\":\n raise TypeError(\"Unexpected value: %s\" % root.tag)\n # find() raises an AttributeError if no such element is found:\n xdir = root.find(\"XAxisDirection\").text\n ydir = root.find(\"YAxisDirection\").text\n # WARNING: 'mcount' is the HIGHEST INDEX number, not the total count!\n mcount = int(root.find(\"NumberOfMosaics\").text)\n # currently we only support LTR and TTB experiments:\n if xdir != \"LeftToRight\" or ydir != \"TopToBottom\":\n raise TypeError(\"Unsupported Axis configuration\")\n self.supplement = {\"xdir\": xdir, \"ydir\": ydir, \"mcount\": mcount}\n log.info(\"Finished validating XML.\")\n return tree", "def validate(self):\n self.logger.debug(\"In validate.\")\n\n document = self._get_raw_doc()\n\n session = iHMPSession.get_session()\n self.logger.info(\"Got iHMP session.\")\n\n (valid, error_message) = session.get_osdf().validate_node(document)\n\n problems = []\n\n if not valid:\n self.logger.info(\"Validation did not succeed for %s.\", __name__)\n problems.append(error_message)\n\n if 'prepared_from' not in self._links.keys():\n problems.append(\"Must add a 'prepared_from' link to a sample.\")\n\n self.logger.debug(\"Number of validation problems: %s.\", len(problems))\n return problems", "def validate_document(settings):\n if not settings.filename.endswith(\".xml\"):\n print_error(\"{} must have a .xml extension to interoperate with build tool\".format(settings.filename))\n exit(1)\n \n failed = False\n \n print color(\"Validating: \", color_code(BLUE)), settings.filename\n if platform in [\"linux\", \"linux2\"]: \n stat_info = os.stat(settings.filename)\n gid = stat_info.st_gid\n mode = stat_info.st_mode & 0777\n group = getgrgid(gid)[0]\n if group != \"cs0220ta\":\n print_error(\"Wrong group, you MUST run `chgrp cs0220ta {}'\".format(settings.filename))\n failed = True\n if mode ^ 0660 != 0000:\n print_error(\"Wrong permissions, you MUST run `chmod 660 {}'\".format(settings.filename))\n failed = True\n \n invalid_lt = re.compile(\"<(?!/?(assignment|problem|year|title|name|blurb|due))\")\n invalid_amp = re.compile(r\"&(?!\\w{1,10};)\")\n invalid_char = re.compile(r\"[^\\x00-\\x7f]\")\n \n # Some more manual checking \n with open(settings.filename) as f:\n for num, line in enumerate(f):\n problem_lt = re.search(invalid_lt, line)\n if problem_lt:\n print_error(\"Invalid < character on line {} at character {}\".format(num+1, problem_lt.start()))\n print color(\"\\tMake sure the tags you are using are correct.\", \n color_code(YELLOW, foreground=False) + color_code(BLACK))\n failed = True\n problem_amp = re.search(invalid_amp, line)\n if problem_amp:\n print_error(\"Invalid raw & character on line {} at character {}\".format(num+1, problem_amp.start()))\n print color(\"\\tA literal & can be escaped by using \\\"&amp;\\\" instead.\", \n color_code(YELLOW, foreground=False) + color_code(BLACK))\n failed = True\n problem_char = re.search(invalid_char, line)\n if problem_char:\n print_error(\"Invalid non-ASCII character on line {} at character {}\".format(num+1, problem_char.start()))\n failed = True\n \n try:\n tree = ET.parse(settings.filename)\n except Exception:\n print_error(\"XML in {} could not be parsed at all.\".format(settings.filename))\n print color(\"\\tAre you sure all tags are closed?\", color_code(YELLOW))\n print color(\"\\nPlease rerun validation once XML is fixed\", color_code(CYAN))\n exit(1)\n try:\n document = Document(settings.filename)\n document.parse_tree(tree)\n document.validate()\n except ImproperXmlException as e:\n print_error(e.args[0])\n print color(\"\\nPlease rerun validation after fixing\", color_code(CYAN))\n exit(1)\n \n for i, version in enumerate(document.versions):\n print color(\"\\n\\nProblem {}: {}\\n\".format(i+1, version.filename),\n color_code(BLUE))\n validate_version(version, failed)", "def validate(file_in) :\n\tname = str(file_in.name)\n\tif name[-4:] != \".xml\" and name[-4:] != \".XML\" :\n\t\treturn False\n\txsd = open('wcdb/WorldCrises.xsd.xml', 'r')\n\txmlFile = open('wcdb/temp.xml', 'w')\n\txmlFile.write(file_in.read())\n\txmlFile = open('wcdb/temp.xml', 'r')\n\ttry:\n\t\tpsvi = pyxsval.parseAndValidate(\"wcdb/temp.xml\",\n\t\t\t\"wcdb/WorldCrises.xsd.xml\", xmlIfClass=pyxsval.XMLIF_ELEMENTTREE)\n\t\ttree = psvi.getTree()\n\texcept pyxsval.XsvalError, e:\n\t\treturn 'Validation aborted. ' + str(e)\n\texcept GenXmlIfError, e:\n\t\treturn 'Parsing aborted. ' + str(e)\n\texcept Exception as e:\n\t\t# catch all\n\t\treturn 'Exception. ' + str(e)\n\t#handle invalid case\n\treturn tree", "def validate(self):\n self.logger.debug(\"In validate.\")\n\n document = self._get_raw_doc()\n\n session = iHMPSession.get_session()\n self.logger.info(\"Got iHMP session.\")\n\n (valid, error_message) = session.get_osdf().validate_node(document)\n\n problems = []\n if not valid:\n self.logger.info(\"Validation did not succeed.\")\n problems.append(error_message)\n\n if 'associated_with' not in self._links.keys():\n problems.append(\"Must have a 'associated_with' link to a subject.\")\n\n self.logger.debug(\"Number of validation problems: %s.\", len(problems))\n return problems", "def validateElement(self, doc, elem):\n if doc is None: doc__o = None\n else: doc__o = doc._o\n if elem is None: elem__o = None\n else: elem__o = elem._o\n ret = libxml2mod.xmlValidateElement(self._o, doc__o, elem__o)\n return ret", "def test_does_validate_valid_xml_file(self):\n xml_file = join(\n getcwd(), 'testdata', 'newstest2019-defr-src-ts.de.FIXED.xml'\n )\n doc = valitest.ValidatableTestSet(xml_file)\n self.assertEqual(doc.setid, \"newstest2019\")\n self.assertEqual(doc.srclang, \"any\")", "def validateSBML(self):\n return _libsbml.SBMLDocument_validateSBML(self)", "def validate(self, document) -> None:\n if not len(document.text) > 0:\n raise ValidationError(\n message=self._message,\n cursor_position=document.cursor_position,\n )", "def assertValidXML(self, data):\r\n # Just try the load. If it throws an exception, the test case will fail.\r\n self.serializer.from_xml(data)", "def validate(cls,corpus,target):\n printWarning(cls,inspect.stack()[0][3],\n \"Preparing data for xsd validation..\")\n xmlstring = corpus.writeToString()\n printWarning(cls,inspect.stack()[0][3],\n \"Prepared\")\n xsd = Validator.validateXSD(xmlstring,target)\n semantic = Validator.validateSemantic(corpus,target)\n valid = (xsd and semantic)\n if not valid:\n printError(cls,inspect.stack()[0][3],\n \"Data not valid\")\n return(valid)", "def validate(self, node):", "def validate_schema(doc_xml, schema_xml=None):\n doc_dml = deepcopy(doc_xml)\n\n doc_new = etree.Element(doc_xml.tag, nsmap={None: 'http://www.sii.cl/SiiDte'})\n doc_new[:] = doc_xml[:] # move children into new root\n doc_new.attrib.update(doc_xml.attrib) # copy attributes of the root node\n\n # reload xml\n buff = BytesIO(etree.tostring(doc_new, method='c14n'))\n xml = etree.parse(buff).getroot()\n\n if not schema_xml:\n schema_pth = resolve_schema(doc_xml)\n\n with open(schema_pth, 'rb') as fh:\n schema_dml = etree.parse(fh)\n\n schema = etree.XMLSchema(schema_xml)\n schema.assertValid(dml)\n\n return True # if no assertion gets thrown above, we can safely assume a `True` validity. ", "def schemaValidateFile(self, filename, options):\n ret = libxml2mod.xmlSchemaValidateFile(self._o, filename, options)\n return ret", "def SchemaValidate(self, xsd):\n ret = libxml2mod.xmlTextReaderSchemaValidate(self._o, xsd)\n return ret", "def validate(self):\n return self._validate(self.root)", "def validate(self, document) -> None:\n if not self._re.match(document.text):\n raise ValidationError(\n message=self._message, cursor_position=document.cursor_position\n )", "def parse(self, file, validate = False):\n\t\tif _existFile(file) :\n\t\t\tself.doc = parse(file)\n\t\t\tself.documentElement = self.doc.documentElement\n\t\t\tif validate :\n\t\t\t\tif self.validate() :\n\t\t\t\t\treturn True\n\t\t\t\telse :\n\t\t\t\t\tself._enrichXML()\n\t\t\t\t\treturn False\n\t\t\telse :\n\t\t\t\tself._enrichXML()\n\t\t\t\treturn True", "def validate(self, document) -> None:\n path = Path(document.text).expanduser()\n if self._is_file and not path.is_file():\n raise ValidationError(\n message=self._message,\n cursor_position=document.cursor_position,\n )\n elif self._is_dir and not path.is_dir():\n raise ValidationError(\n message=self._message,\n cursor_position=document.cursor_position,\n )\n elif not path.exists():\n raise ValidationError(\n message=self._message,\n cursor_position=document.cursor_position,\n )", "def schema_check(self):\n\n try:\n self.schema.assertValid(self.get_content())\n except lxml.etree.DocumentInvalid:\n logger.error(\"PDU failed schema check\")\n for line in self.pretty_print_content().splitlines():\n logger.warning(line)\n raise", "def validate(self, *args):\n return _libsbml.SBMLValidator_validate(self, *args)", "def validateXSD(cls,xmlstring,target):\n printMessage(cls,inspect.stack()[0][3],\n \"Validating against '%s' XSD..\"%(target))\n\n curdir = os.path.dirname(globals()['__file__'])\n if target==\"new\":\n xsd=\"%s/../bioinfer.xsd\"%curdir\n elif target==\"relaxed\":\n xsd=\"%s/../bioinfer.relaxed.xsd\"%curdir\n elif target==\"compatible\":\n xsd=\"%s/../bioinfer.relaxed.xsd\"%curdir\n else:\n printError(cls,inspect.stack()[0][3],\"Cannot validate '%s' format\"%target)\n return(False)\n \n doc = L.parseDoc(xmlstring)\n schemaCtxt = L.schemaNewParserCtxt(xsd)\n schema = schemaCtxt.schemaParse()\n validatorCtxt = schema.schemaNewValidCtxt()\n\n exitstatus = validatorCtxt.schemaValidateDoc(doc)\n valid = (exitstatus==0)\n if valid:\n printMessage(cls,inspect.stack()[0][3],\"Valid XML\")\n else:\n printError(cls,inspect.stack()[0][3],\"Invalid XML\")\n return(valid)", "def validate(self, namespace):\n pass", "def validateFromString(cls,xmlstring,target):\n corpus = Corpus()\n if corpus.readFromString(xmlstring):\n return( Validator.validate(corpus,target) ) \n else:\n return(False)", "def parseString(self, xml, validate = False):\n\t\tself.doc = parseString(xml)\n\t\tself.documentElement = self.doc.documentElement\n\t\tif validate :\n\t\t\tif self.validate() :\n\t\t\t\treturn True\n\t\t\telse :\n\t\t\t\tself._enrichXML()\n\t\t\t\treturn False\n\t\telse :\n\t\t\tself._enrichXML()\n\t\t\treturn True", "def relaxNGValidateDoc(self, doc):\n if doc is None: doc__o = None\n else: doc__o = doc._o\n ret = libxml2mod.xmlRelaxNGValidateDoc(self._o, doc__o)\n return ret", "def _check_xml_syntax_error(self):\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((\n xml_file, result.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True", "def validate(self, document) -> None:\n\n # document.text will have value in two cases, after we pressed enter in the prompt or when navigating down\n # the autocomplete commands list. In the second case there is no need to press enter to trigger this method,\n # but in those cases self.validation_type == ''\n typed = document.text\n\n if typed:\n if self.validation_type == \"number\":\n regex = r\"^-?\\d+$\"\n\n if not re.search(regex, typed):\n\n raise ValidationError(\n message=\"Please input a positive or negative number.\"\n )\n elif self.validation_type == \"yes_no\":\n regex = r\"^[yYnN]$\"\n\n if not re.search(regex, typed):\n raise ValidationError(message=\"Please type y, n, Y or N.\")\n elif self.validation_type == \"text_max_len\":\n if len(typed) > 100:\n raise ValidationError(message=\"La oración debe tener menos de 100 caracteres.\")\n else:\n raise ValidationError(message=\"Internal Error: Wrong validation type\")", "def check_xml_constraints(checker, xml_path):\n # Follow the XML spec precicely for the definition of XMLDecl, except for:\n # VersionNum := '1.0'\n # EncName := 'UTF-8'\n # EncodingDecl not optional\n # SDDecl must have 'no'\n RE_XML_S = r'([\\x20\\x09\\x0D\\x0A])'\n RE_XML_Eq = '(' + RE_XML_S + '?=' + RE_XML_S + '?)'\n RE_XML_SDDecl = '(' + RE_XML_S + 'standalone' + RE_XML_Eq + r'(\\'no\\'|\"no\"))'\n RE_XML_EncName = r'(UTF\\-8)'\n RE_XML_EncodingDecl = '(' + RE_XML_S + 'encoding' + RE_XML_Eq + '(\"' + RE_XML_EncName + r'\"|\\'' + RE_XML_EncName + r'\\'))'\n RE_XML_VersionNum = r'(1\\.0)'\n RE_XML_VersionInfo = '(' + RE_XML_S + 'version' + RE_XML_Eq + r'(\\'' + RE_XML_VersionNum + r'\\'|\"' + RE_XML_VersionNum + '\"))'\n RE_XML_XMLDecl = r'<\\?xml' + RE_XML_VersionInfo + RE_XML_EncodingDecl + RE_XML_SDDecl + '?' + RE_XML_S + '?' + r'\\?>'\n\n try:\n with open(xml_path, encoding='utf-8-sig') as file:\n xml_file = file.read()\n newlines = file.newlines\n except IOError as e:\n get_log().error(\"Error opening XML file {} : {}\".format(xml_path, str(e)))\n return\n\n if re.match('\\ufeff', xml_file):\n checker.error(\"BOM not allowed in XML file\", \"constraints_bom\")\n\n if not (re.match(RE_XML_XMLDecl, xml_file) or re.match('\\ufeff' + RE_XML_XMLDecl, xml_file)):\n checker.error(\"Invalid XML Declaration\", \"constraints_declaration\")\n\n # Some files might not have newlines at all (single line)\n if not newlines in ['\\n', '\\r\\n', None]:\n checker.error(\n \"XML file has invalid ending: {}\".format(repr(file.newlines)),\n \"constraints_line_ending\"\n )", "def validateDocumentFinal(self, doc):\n if doc is None: doc__o = None\n else: doc__o = doc._o\n ret = libxml2mod.xmlValidateDocumentFinal(self._o, doc__o)\n return ret", "def validate_file(self, file_location):\n\n with open(file_location, 'r') as f:\n etree_xml = parse(f, allow_file=True)\n\n return self.validate_etree(etree_xml)", "def is_valid(self):\n self.logger.debug(\"In is_valid.\")\n\n document = self._get_raw_doc()\n\n session = iHMPSession.get_session()\n self.logger.info(\"Got iHMP session.\")\n\n (valid, _error_message) = session.get_osdf().validate_node(document)\n\n if 'associated_with' not in self._links.keys():\n valid = False\n\n self.logger.debug(\"Valid? %s\", str(valid))\n\n return valid", "def VerifyVersion(root):\r\n global VERSION\r\n global REQUIRED_VERSION\r\n try:\r\n if root.get(\"version\") < REQUIRED_VERSION:\r\n raise XmlVersionError(\"XML Version must be %s or above, we found %s!\" \\\r\n % (REQUIRED_VERSION, root.get(\"version\")))\r\n except KeyError:\r\n raise MalformedXmlError()", "def assertValidXMLResponse(self, resp):\r\n self.assertHttpOK(resp)\r\n self.assertTrue(resp['Content-Type'].startswith('application/xml'))\r\n self.assertValidXML(resp.content)", "def isValid(self):\n ret = libxml2mod.xmlParserGetIsValid(self._o)\n return ret", "def _validate_document(self, config):\n if 'gathering_phase' in self.config:\n self._validate_gathering_phase(self.config['gathering_phase'])\n else:\n comm.abort('ERROR: invalid config file',\n 'The required gathering_phase was not in the config', 1)\n\n if 'inclusion_phase' in self.config:\n self._validate_inclusion_phase(self.config['inclusion_phase'])\n\n if 'action_phase' in self.config:\n self._validate_action_phase(self.config['action_phase'])\n else:\n comm.abort('ERROR: invalid config file',\n 'The required action_phase was not in the config', 1)", "def __validate_against_dtd(self):\n # Opens the DTD file and loads it into a lxml DTD object.\n dtd_file = open(self._dtd_filename, 'r')\n dtd_object = etree.DTD(dtd_file)\n\n\n # .validate() checks if the config file complies to the schema. If it doesn't, this condition is entered.\n if not dtd_object.validate(self._config_file):\n dtd_file.close()\n raise ConfigReaderError(\"DTD validation failed on {0}: {1}\".format(self._config_filename,\n dtd_object.error_log.filter_from_errors()[0]))\n\n # If we get here, the validation passed, so we can close the DTD file object.\n dtd_file.close()", "def check_config_xml(self, contents):\n self.log(u\"Checking contents XML config file\")\n self.result = ValidatorResult()\n if self._are_safety_checks_disabled(u\"check_config_xml\"):\n return self.result\n contents = gf.safe_bytes(contents)\n self.log(u\"Checking that contents is well formed\")\n self.check_raw_string(contents, is_bstring=True)\n if not self.result.passed:\n return self.result\n self.log(u\"Checking required parameters for job\")\n job_parameters = gf.config_xml_to_dict(contents, self.result, parse_job=True)\n self._check_required_parameters(self.XML_JOB_REQUIRED_PARAMETERS, job_parameters)\n if not self.result.passed:\n return self.result\n self.log(u\"Checking required parameters for task\")\n tasks_parameters = gf.config_xml_to_dict(contents, self.result, parse_job=False)\n for parameters in tasks_parameters:\n self.log([u\"Checking required parameters for task: '%s'\", parameters])\n self._check_required_parameters(self.XML_TASK_REQUIRED_PARAMETERS, parameters)\n if not self.result.passed:\n return self.result\n return self.result", "def validate_kml():\n from pykml.parser import parse\n from optparse import OptionParser\n\n parser = OptionParser(\n usage=\"usage: %prog FILENAME_or_URL\",\n version=\"%prog 0.1\",\n )\n parser.add_option(\"--schema\", dest=\"schema_uri\",\n help=\"URI of the XML Schema Document used for validation\")\n (options, args) = parser.parse_args()\n if len(args) != 1:\n parser.error(\"wrong number of arguments\")\n else:\n uri = args[0]\n\n try:\n # try to open as a file\n fileobject = open(uri)\n except IOError:\n try:\n fileobject = urllib2.urlopen(uri)\n except ValueError:\n raise ValueError('Unable to load URI {0}'.format(uri))\n except:\n raise\n\n doc = parse(fileobject, schema=None)\n\n if options.schema_uri:\n schema = Schema(options.schema_uri)\n else:\n # by default, use the OGC base schema\n sys.stdout.write(\"Validating against the default schema: {0}\\n\".format(OGCKML_SCHEMA))\n schema = Schema(OGCKML_SCHEMA)\n\n sys.stdout.write(\"Validating document...\\n\")\n if schema.validate(doc):\n sys.stdout.write(\"Congratulations! The file is valid.\\n\")\n else:\n sys.stdout.write(\"Uh-oh! The KML file is invalid.\\n\")\n sys.stdout.write(schema.assertValid(doc))\n # close the fileobject, if needed\n try:\n fileobject\n except NameError:\n pass #variable was not defined\n else:\n fileobject.close", "def assert_valid(format_id, xml):\n if isinstance(xml, lxml.etree._Element) or isinstance(xml, lxml.etree._ElementTree):\n validate_tree(format_id, xml)\n elif isinstance(xml, bytes):\n validate_bytes(format_id, xml)\n elif isinstance(xml, str):\n validate_path(format_id, xml)\n else:\n raise d1_scimeta.util.SciMetaError(\n \"xml must be a path, bytes or an lxml.etree.\"\n )", "def validate(self, soapmsg):\n return self.xsd_validator.validate(soapmsg.body)", "def is_valid(self):\n self.logger.debug(\"In is_valid.\")\n\n document = self._get_raw_doc()\n\n session = iHMPSession.get_session()\n self.logger.info(\"Got iHMP session.\")\n\n # _error_message is intentionally unused\n (valid, _error_message) = session.get_osdf().validate_node(document)\n\n if 'prepared_from' not in self._links.keys():\n self.logger.error(\"Must have a 'prepared_from' linkage.\")\n valid = False\n\n self.logger.debug(\"Valid? %s\", str(valid))\n\n return valid", "def parse_xml_data(xml_string):\n\n try:\n ET.fromstring(xml_string)\n except ET.ParseError as e:\n logging.error(\"Error unable to parse the XML.\")\n raise XmlValidationError", "def schemaIsValid(self):\n ret = libxml2mod.xmlSchemaIsValid(self._o)\n return ret", "def validate(self):\n return _libsbml.SBMLExternalValidator_validate(self)", "def validateOneElement(self, doc, elem):\n if doc is None: doc__o = None\n else: doc__o = doc._o\n if elem is None: elem__o = None\n else: elem__o = elem._o\n ret = libxml2mod.xmlValidateOneElement(self._o, doc__o, elem__o)\n return ret", "def validateElement(self, ctxt, elem):\n if ctxt is None: ctxt__o = None\n else: ctxt__o = ctxt._o\n if elem is None: elem__o = None\n else: elem__o = elem._o\n ret = libxml2mod.xmlValidateElement(ctxt__o, self._o, elem__o)\n return ret", "def validateRoot(self, ctxt):\n if ctxt is None: ctxt__o = None\n else: ctxt__o = ctxt._o\n ret = libxml2mod.xmlValidateRoot(ctxt__o, self._o)\n return ret", "def test_invalid_xml_box(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n jp2k = Jp2k(self._bad_xml_file)\n\n self.assertEqual(jp2k.box[3].box_id, 'xml ')\n self.assertEqual(jp2k.box[3].offset, 77)\n self.assertEqual(jp2k.box[3].length, 28)\n self.assertIsNone(jp2k.box[3].xml)", "def validateDesignspaceDoc(dsDoc, **kwArgs):\n if dsDoc.sources:\n for src in dsDoc.sources:\n if not os.path.exists(src.path):\n raise DesignSpaceDocumentError(\n f\"Source file {src.path} does not exist\")\n else:\n raise DesignSpaceDocumentError(\"Designspace file contains no sources.\")", "def isValid(self):\n return _libsbml.XMLError_isValid(self)", "def validate(tree):\n return rvalidate(tree.root, None, None, None, None, 0, set())", "def __init__(self, xml_path):\n # Load XSD schema for document validation\n self.__schema = xmlschema.XMLSchema('valitest.xsd')\n\n try:\n self.__schema.validate(xml_path)\n self.__xmldoc = xml.etree.ElementTree.parse(xml_path)\n self.__xml_path = basename(xml_path)\n\n # pylint: disable-msg=bad-continuation\n except (\n xmlschema.XMLSchemaValidationError,\n xml.etree.ElementTree.ParseError,\n ) as error:\n raise ValueError(error)\n\n # XML is valid, so setid and srclang do exist\n _root = self.__xmldoc.getroot()\n self.__setid = _root.attrib['setid']\n self.__srclang = _root.attrib['srclang']", "def relaxNGValidateDoc(self, ctxt):\n if ctxt is None: ctxt__o = None\n else: ctxt__o = ctxt._o\n ret = libxml2mod.xmlRelaxNGValidateDoc(ctxt__o, self._o)\n return ret", "def test_negative_file_and_xml(self):\n xml_object = ET.parse(self.xmlfile)\n with self.assertRaises((IOError, OSError)):\n glymur.jp2box.XMLBox(filename=self.xmlfile, xml=xml_object)", "def schemaValidateOneElement(self, ctxt):\n if ctxt is None: ctxt__o = None\n else: ctxt__o = ctxt._o\n ret = libxml2mod.xmlSchemaValidateOneElement(ctxt__o, self._o)\n return ret", "def validateFromET(cls,root,target):\n corpus = Corpus()\n if corpus.readFromET(root):\n return( Validator.validate(corpus,target) )\n else:\n return(False)", "def validate_document(validation, document):\n LOG.info(\"oidc-auth-apps: validating %s\", validation['name'])\n if validation['validation'] != 'children':\n LOG.warning(\"oidc-auth-apps: root validation should be\"\n \" children not %s\", validation['validation'])\n result = recurse_validate_document(validation['children'], document)\n if 'accepted' in validation:\n if not validate_accepted(validation['accepted'], document):\n return False\n if validation['optional']:\n LOG.warning(\"oidc-auth-apps: root validation is optional\")\n return True\n return result", "def IsValid(self):\n ret = libxml2mod.xmlTextReaderIsValid(self._o)\n return ret", "def is_good_enough_xml(self, resp):\n content_type = resp.headers['Content-Type'].lower()\n \n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('xml') > -1)", "def _check_for_errors(etree: ET.ElementTree):\n if etree.getroot().tag == 'error':\n raise APIError(etree.getroot().text)", "def validateDtd(self, doc, dtd):\n if doc is None: doc__o = None\n else: doc__o = doc._o\n if dtd is None: dtd__o = None\n else: dtd__o = dtd._o\n ret = libxml2mod.xmlValidateDtd(self._o, doc__o, dtd__o)\n return ret", "def xsd_schema_check(file_path):\n\n xsd_path = 'xsd/ASCMHL.xsd'\n xsd = etree.XMLSchema(etree.parse(xsd_path))\n\n # pass a file handle to support the fake file system used in the tests\n file = open(file_path, 'rb')\n result = xsd.validate(etree.parse(file))\n\n if result:\n logger.info(f'validated: {file_path}')\n else:\n logger.error(f'ERROR: {file_path} didn\\'t validate against XSD!')\n logger.info(f'Issues:\\n{xsd.error_log}')\n raise errors.VerificationFailedException", "def validate(settings):\n if not settings.filename.endswith(\".xml\"):\n print_error(\"{} must have a .xml extension to interoperate with build tool\".format(settings.filename))\n exit(1)\n \n failed = False\n \n print color(\"Validating: \", color_code(BLUE)), settings.filename\n if platform in [\"linux\", \"linux2\"] and not(settings.skip_permissions):\n stat_info = os.stat(settings.filename)\n gid = stat_info.st_gid\n mode = stat_info.st_mode & 0777\n group = getgrgid(gid)[0]\n if group != \"cs0220ta\":\n print_error(\"Wrong group, you MUST run `chgrp cs0220ta {}'\".format(settings.filename))\n failed = True\n if mode ^ 0660 != 0000:\n print_error(\"Wrong permissions, you MUST run `chmod 660 {}'\".format(settings.filename))\n failed = True\n \n invalid_lt = re.compile(\"<(?!/?(problem|usedin|version|authors?|year|topics?|types?|param|deps?|dependency|dependencies|body|solution|rubric|resource))\")\n invalid_amp = re.compile(r\"&(?!\\w{1,10};)\")\n invalid_char = re.compile(r\"[^\\x00-\\x7f]\")\n \n # Some more manual checking \n with open(settings.filename) as f:\n for num, line in enumerate(f):\n if len(string.rstrip(line)) > 80:\n print_warning(\"Line {} longer than 80 characters (has {})\".format(num+1, len(string.rstrip(line))))\n failed = True\n problem_lt = re.search(invalid_lt, line)\n if problem_lt:\n print_error(\"Invalid < character on line {} at character {}\".format(num+1, problem_lt.start()))\n print color(\"\\tA literal < can be escaped using \\\"&lt;\\\" instead.\", \n color_code(YELLOW, foreground=False) + color_code(BLACK))\n failed = True\n problem_amp = re.search(invalid_amp, line)\n if problem_amp:\n print_error(\"Invalid raw & character on line {} at character {}\".format(num+1, problem_amp.start()))\n print color(\"\\tA literal & can be escaped by using \\\"&amp;\\\" instead.\", \n color_code(YELLOW, foreground=False) + color_code(BLACK))\n failed = True\n problem_char = re.search(invalid_char, line)\n if problem_char:\n print_error(\"Invalid non-ASCII character on line {} at character {}\".format(num+1, problem_char.start()))\n failed = True\n \n try:\n tree = ET.parse(settings.filename)\n except Exception:\n print_error(\"XML in {} could not be parsed.\".format(settings.filename))\n print color(\"\\nPlease rerun validation once XML is fixed\", color_code(CYAN))\n exit(1)\n if tree.getroot().tag == 'assignment':\n print_error(\"This looks like an assignment xml file. Did you mean 22edit validate_doc?\")\n exit(1)\n try:\n problem = Problem(settings.filename)\n problem.parse_tree(tree, False)\n except ImproperXmlException as e:\n print_error(e.args[0])\n print color(\"\\nPlease rerun validation after fixing\", color_code(CYAN))\n exit(1)\n \n firstProblem = True\n for version in problem.get_versions():\n if not version.standalone and not firstProblem:\n continue\n firstProblem = False\n \n print color(\"\\n\\nVERSION {}:\\n\".format(version.vid),\n color_code(BLUE))\n validate_version(version, failed)", "def schema_valid(arch, **kwargs):\n validator = relaxng(arch.tag)\n if validator and not validator.validate(arch):\n result = True\n for error in validator.error_log:\n _logger.error(tools.ustr(error))\n result = False\n return result\n return True", "def validate_nrml(request):\n xml_text = request.POST.get('xml_text')\n if not xml_text:\n return HttpResponseBadRequest(\n 'Please provide the \"xml_text\" parameter')\n try:\n xml_text = xml_text.replace('\\r\\n', '\\n').replace('\\r', '\\n')\n _do_validate_nrml(xml_text)\n except (HTTPError, ValueError) as e:\n exc = e.args[0]\n return _make_response(error_msg=exc['message'],\n error_line=exc['lineno'],\n valid=False)\n except Exception as exc:\n # get the exception message\n exc_msg = exc.args[0]\n if isinstance(exc_msg, bytes):\n exc_msg = exc_msg.decode('utf-8') # make it a unicode object\n elif isinstance(exc_msg, unicode):\n pass\n else:\n # if it is another kind of object, it is not obvious a priori how\n # to extract the error line from it\n # but we can attempt anyway to extract it\n error_line = _get_error_line(unicode(exc_msg))\n return _make_response(\n error_msg=unicode(exc_msg), error_line=error_line,\n valid=False)\n error_msg = exc_msg\n error_line = _get_error_line(exc_msg)\n return _make_response(\n error_msg=error_msg, error_line=error_line, valid=False)\n else:\n return _make_response(error_msg=None, error_line=None, valid=True)", "def validate(validator, document):\n try:\n validator.validate(document)\n except jsonschema.ValidationError as ex:\n raise wsgi_errors.HTTPBadRequestBody(\n '{0}: {1}'.format(ex.args, ex.message)\n )", "def validate(self, document) -> None:\n try:\n if self._float_allowed:\n float(document.text)\n else:\n int(document.text)\n except ValueError:\n raise ValidationError(\n message=self._message, cursor_position=document.cursor_position\n )", "def validate(self, validate):\n libxml2mod.xmlParserSetValidate(self._o, validate)", "def isXML(self):\n return _libsbml.XMLError_isXML(self)", "def is_valid(self, soapmsg):\n return self.xsd_validator.is_valid(soapmsg.body)", "def schemaValidateOneElement(self, elem):\n if elem is None: elem__o = None\n else: elem__o = elem._o\n ret = libxml2mod.xmlSchemaValidateOneElement(self._o, elem__o)\n return ret", "def validate():", "def debugCheckDocument(self, output):\n ret = libxml2mod.xmlDebugCheckDocument(output, self._o)\n return ret", "def validate_against_schema(self, json_doc):\n if self.uri not in self.se.validation:\n raise RuntimeError(\"$validation is not defined for {} field; thus the json document could not be validated\".format(self.name))\n else:\n validate(json_doc, self.se.validation[self.uri])\n print('The JSON document is valid')" ]
[ "0.77251816", "0.73353755", "0.7165082", "0.71011734", "0.70915216", "0.7084776", "0.6905823", "0.6782283", "0.67535055", "0.6746288", "0.6682557", "0.65831435", "0.65698", "0.6530995", "0.651539", "0.64867383", "0.64745414", "0.6468379", "0.64606", "0.6451013", "0.6378289", "0.63704634", "0.6350096", "0.6300347", "0.6229214", "0.6213598", "0.61862004", "0.61759806", "0.61677855", "0.6144643", "0.6141892", "0.6132827", "0.61111474", "0.61016434", "0.60672724", "0.6061575", "0.60584277", "0.6023172", "0.59539473", "0.59531593", "0.59351236", "0.5902633", "0.5902168", "0.5898643", "0.58972", "0.5892097", "0.5884069", "0.587871", "0.5864888", "0.58555305", "0.5827304", "0.5810235", "0.5777268", "0.57686013", "0.5739816", "0.5726393", "0.5694955", "0.56793547", "0.5669185", "0.56682366", "0.56200117", "0.56174725", "0.56057733", "0.5604135", "0.55845493", "0.5584303", "0.5547346", "0.5545595", "0.5524704", "0.5524545", "0.5504059", "0.54969627", "0.54740185", "0.54486", "0.5419503", "0.5404093", "0.5404073", "0.5394168", "0.539348", "0.53901076", "0.5387825", "0.5374749", "0.53651416", "0.53598905", "0.53501314", "0.5331984", "0.5329215", "0.53171176", "0.5306726", "0.5295068", "0.52927494", "0.5291625", "0.5275573", "0.5258068", "0.52531695", "0.52482826", "0.52346224", "0.5220747", "0.51921517", "0.51779985" ]
0.64024895
20
Get fields for a schema or instance context element.
def get_fields(self, element_node: ElementNode, namespaces: Optional[NamespacesType] = None, decoders: Optional[Tuple[XsdAttribute, ...]] = None) -> IdentityCounterType: fields: List[IdentityFieldItemType] = [] def append_fields() -> None: if isinstance(value, list): fields.append(tuple(value)) elif isinstance(value, bool): fields.append((value, bool)) elif not isinstance(value, float): fields.append(value) elif math.isnan(value): fields.append(('nan', float)) else: fields.append((value, float)) result: Any value: Union[AtomicValueType, None] for k, field in enumerate(self.fields): if field.token is None: msg = f"identity field {field} is not built" raise XMLSchemaNotBuiltError(self, msg) context = XPathContext(element_node) result = field.token.get_results(context) if not result: if decoders is not None and decoders[k] is not None: value = decoders[k].value_constraint if value is not None: if decoders[k].type.root_type.name == XSD_QNAME: value = get_extended_qname(value, namespaces) append_fields() continue if not isinstance(self, XsdKey) or 'ref' in element_node.elem.attrib and \ self.schema.meta_schema is None and self.schema.XSD_VERSION != '1.0': fields.append(None) elif field.target_namespace not in self.maps.namespaces: fields.append(None) else: msg = _("missing key field {0!r} for {1!r}") raise XMLSchemaValueError(msg.format(field.path, self)) elif len(result) == 1: if decoders is None or decoders[k] is None: fields.append(result[0]) else: if decoders[k].type.content_type_label not in ('simple', 'mixed'): msg = _("%r field doesn't have a simple type!") raise XMLSchemaTypeError(msg % field) value = decoders[k].data_value(result[0]) if decoders[k].type.root_type.name == XSD_QNAME: if isinstance(value, str): value = get_extended_qname(value, namespaces) elif isinstance(value, datatypes.QName): value = value.expanded_name append_fields() else: msg = _("%r field selects multiple values!") raise XMLSchemaValueError(msg % field) return tuple(fields)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_fields(self):\n return self._fields", "def get_fields(self):\n return self.fields", "def get_fields(self):\n return self.fields", "def get_fields(self):\n\n\t\treturn self.__fields", "def get_fields(self):\r\n return self.fields", "def fields(self):\n return {k:getattr(self, k, None) for k in self.schema.fields}", "def getFields(context, interface=None, annotation=None):\n if interface is None:\n domain_model = proxy.removeSecurityProxy(context.domain_model)\n interface = utils.get_derived_table_schema(domain_model)\n if annotation is None:\n annotation = utils.get_descriptor(interface)\n for field_name in annotation.listing_columns:\n yield interface[field_name]\n # !+FIELD_KEYERROR(mr, jul-2012) throws a KeyError when field_name is \n # not part of the interface e.g. if we use a \"field property\" that is \n # implemented as a domain_model.{property}.", "def get_all_fields(context):\n\n schema = zope.component.getUtility(\n IDexterityFTI, name=context.portal_type).lookupSchema()\n fields = dict((fieldname, schema[fieldname]) for fieldname in schema)\n\n assignable = IBehaviorAssignable(context)\n for behavior in assignable.enumerateBehaviors():\n behavior_schema = behavior.interface\n fields.update((name, behavior_schema[name])\n for name in behavior_schema)\n\n return fields", "def get_fields_in_model(instance):\n assert isinstance(instance, Document)\n return instance._fields", "def fields(self):\n if self._fields is None:\n self._init_fields()\n return self._fields", "def fields(cls):\n if not hasattr(cls, '_fields'):\n cls.parse_attributes()\n return cls._fields", "def get_fields(node):\r\n return dict(iter_fields(node))", "def get_fields(self):\n \n return self.metadata.keys()", "def Fields(self):\n return self._fields", "def getFields(self):\n return sorted(self.schema.fields, key=lambda f: f.name)", "def get_fields(self, table_name):\n return self.get_table_meta(table_name)['fields']", "def listFields(self):\n return self.get_json('/field')", "def extract(self):\n self.field_list = []\n \n try:\n self.mfields = self.getModel()._meta.fields\n if(self.mfields):\n try:\n for model_fields in self.mfields:\n if(model_fields.name == \"id\"):\n pass \n \n elif(model_fields.name == \"pci\"):\n pass \n elif(model_fields.name == \"sci\"):\n pass \n elif(model_fields.name == \"validated\"):\n pass \n else:\n self.field_list.append(model_fields.name)\n return self.field_list\n except:\n raise \n else:\n return None \n except:\n raise", "def getEventFields(self):\n return [(x, getattr(self, x)) for x in self._fields]", "def get_fields_list(self):\n return self.description[\"fields\"][\"values\"].keys()", "def get_fields(self):\n return list(self.metadata.keys())", "def get_fields(self, key=None):\n return self._get_query('fields', self._build_params(key=key), Field)", "def get_fields(cls):\n return cls.fields.values()", "def fields(self) -> Dict[str, Field]:\n return self._fields", "def get_fields(self, request, obj=None):\n if obj:\n return self.fields\n return self.add_fields", "def fields(self):\r\n return self._by_name.iteritems()", "def get_fields(self):\n\t\treturn self.__fields.copy()", "def get_fields(self, path):\n with self.inspector(path) as opened_file:\n return opened_file.describe_fields()", "def fields(self) -> Mapping[str, str]:\n return pulumi.get(self, \"fields\")", "def iter_fields(node):\r\n for field in getattr(node, '_fields', ()) or ():\r\n try:\r\n yield field, getattr(node, field)\r\n except AttributeError:\r\n pass", "def get_all_xml_fields(context):\n\n fields = get_all_fields(context)\n return [field for field in fields.values() if isinstance(field, (XMLText, XMLBinary, XMLImage))]", "def get_fields(self) -> Iterable[fields.Field]:\n for attr_name in dir(self):\n attr = getattr(self, attr_name)\n if isinstance(attr, fields.Field):\n yield attr", "def get_fields(cls):\n return map(lambda x: getattr(cls, x), cls.get_field_names())", "def fields(self):\r\n pass", "def _get_fields(self):\n if not self._cursor.description:\n return {}\n\n results = {}\n column = 0\n\n for des in self._cursor.description:\n fieldname = des[0]\n results[column] = fieldname\n column = column + 1\n\n return results", "def f(self):\r\n return self.fields()", "def read_field_attributes(self, fieldname):\n return self.read_field(fieldname).attributes", "def __fields(self):\n return [self.__class__.__dict__[f] for f in self.__class__._fields]", "def fields(self) -> List[SingleField]:\n return self._fields", "def fields(proto):\n return [x[0].name for x in proto.ListFields()]", "def get_fields(self):\n fields = []\n for items in self.order_items:\n fields += items.get_fields()\n \n fields = list(set(fields))\n \n field_order = ['recordId', 'orderId', 'itemId', 'collectionId']\n \n out_fields = field_order\n \n for f in fields:\n if f not in field_order:\n out_fields.append(f)\n \n return out_fields", "def fields(self) -> List[Field]: # pragma: no cover\n pass", "def input_fields(self):\r\n return self.input.fields", "def fields(cls):\n return cls._nameToValue", "def fields(self):", "def get_fields(self, pager=None):\n return Field.deserialize_list(self._get_multiple('fields', {}, pager))", "def get_fields(self):\n fields = {}\n allowed_types = (\n SerializerMethodField,\n Field,\n Serializer,\n )\n for attr in dir(self):\n if attr == 'data':\n continue\n\n if isinstance(getattr(self, attr), allowed_types):\n fields[attr] = getattr(self, attr)\n\n return fields", "def getFields(iface):\n return getFieldsInOrder(iface)", "def get_fields(ds):\n\n # Get layer\n layer = ds.GetLayer(0)\n # feature.GetFieldCount()\n layer_defn = layer.GetLayerDefn()\n field_names = [layer_defn.GetFieldDefn(i).GetName() for i in range(layer_defn.GetFieldCount())]\n\n return field_names", "def _get_fields(self):\n if self.data is not None:\n return self.begin, self.end, self.data\n else:\n return self.begin, self.end", "def _get_fields(self):\n if self.data is not None:\n return self.begin, self.end, self.data\n else:\n return self.begin, self.end", "async def get_fields(self) -> List[Field]:\n schema = await self.get_schema()\n fields = []\n if schema:\n # The faust-avro parser expects a json-parsed avro schema\n # https://github.com/masterysystems/faust-avro/blob/master/faust_avro/parsers/avro.py#L20\n parsed_schema = self._parse(json.loads(schema))\n for field in parsed_schema.fields:\n fields.append(Field(field.name, field.type.python_type))\n\n return fields", "def fields(self):\n ...", "def get_fields(self):\n for field in self.fields_box.children:\n if isinstance(field, MyTextField):\n yield field", "def get_fields():\n if not request.is_xhr:\n abort(403)\n fields = Field.query.all()\n result = {field.id:field.name for field in fields}\n return jsonify(result)", "def _get_fields(self):\n table = self.ui.tableFields\n rows = table.rowCount()\n cols = table.columnCount()\n fields = []\n for i in range(rows):\n fields.append(\n tuple(map(lambda x: table.item(i, x).text(), range(cols)))\n )\n return fields", "def fields(self):\n assert self.is_block()\n assert self.tag () != OCamlValue.DOUBLE_ARRAY_TAG # FIXME not implemented\n\n words = self.size_words()\n if words is None:\n return [None]\n\n a = []\n for i in range(int(words)):\n field = self._unsafe_field(i)\n a.append(field)\n if field is None:\n break # Append a single invalid value to indicate out-of-bounds to the user\n return a", "def get_fields(self, dm_name):\n dm = self.get_dm(dm_name)\n return dm['mdmFields']", "def get_fields(data):\n return data['train'][data['train'].keys()[0]].attrs.keys()", "def get_fields(self):\n return self._devices.keys()", "def field_values(self):\n return self.proto.field_values", "def _getFields(obj, tree=None, retval=None, fileobj=None):\n fieldAttributes = {'/FT': 'Field Type', '/Parent': 'Parent', '/T': 'Field Name', '/TU': 'Alternate Field Name',\n '/TM': 'Mapping Name', '/Ff': 'Field Flags', '/V': 'Value', '/DV': 'Default Value'}\n if retval is None:\n retval = OrderedDict()\n catalog = obj.trailer[\"/Root\"]\n # get the AcroForm tree\n if \"/AcroForm\" in catalog:\n tree = catalog[\"/AcroForm\"]\n else:\n return None\n if tree is None:\n return retval\n\n obj._checkKids(tree, retval, fileobj)\n for attr in fieldAttributes:\n if attr in tree:\n # Tree is a field\n obj._buildField(tree, retval, fileobj, fieldAttributes)\n break\n\n if \"/Fields\" in tree:\n fields = tree[\"/Fields\"]\n for f in fields:\n field = f.getObject()\n obj._buildField(field, retval, fileobj, fieldAttributes)\n\n return retval", "def _fields(self, doclet):\n FIELD_TYPES = OrderedDict([('params', _params_formatter),\n ('properties', _params_formatter),\n ('exceptions', _exceptions_formatter),\n ('returns', _returns_formatter)])\n for field_name, callback in iteritems(FIELD_TYPES):\n for field in doclet.get(field_name, []):\n description = field.get('description', '')\n unwrapped = sub(r'[ \\t]*[\\r\\n]+[ \\t]*', ' ', description)\n yield callback(field, unwrapped)", "def _get_fields(self, table):\n fields = list()\n for column in table.columns:\n fields.append({'id': column.name, 'type': str(column.type)})\n return fields", "def product_fields(self):\r\n return products.ProductFields(self)", "def get_model_fields(self):\n fields = []\n\n for field in self.model._meta.get_fields():\n fields.append(field.name)\n\n return fields", "def get_fields(model, fields=None):\n include = [f.strip() for f in fields.split(',')] if fields else None\n return utils.get_fields(\n model,\n include\n )", "def get_fields(self):\n field_list = []\n for field in self._meta.local_fields:\n if not field.primary_key:\n field_list.append([field.verbose_name.title(),\n self.__getattribute__(field.name),\n field.get_internal_type()])\n return field_list", "def fields(self) -> Optional[Sequence['outputs.PropertyDefinitionResponse']]:\n return pulumi.get(self, \"fields\")", "def fields(class_or_instance: Union[Type[_DT], _DT]) -> Tuple[Var[Any, Any]]:\n # Might it be worth caching this, per class?\n try:\n fields = getattr(class_or_instance, \"fields\")\n meta = getattr(class_or_instance, \"meta\")\n meta_vars = meta[\"vars\"]\n except AttributeError or KeyError:\n raise TypeError(\"must be called with a declared type or instance\")\n\n # Exclude pseudo-fields. Note that fields is sorted by insertion\n # order, so the order of the tuple is as the fields were defined.\n out = []\n for f in fields:\n var = meta_vars[f]\n out.append(var)\n return tuple(out)", "def audit_fields(elem, fields):\r\n errs = []\r\n parsed = {}\r\n for field, field_type, dict_field in fields:\r\n if field not in elem.attrib:\r\n errs.append(('missing value', field))\r\n else:\r\n value = ensure_type(elem.get(field), field_type)\r\n if not value:\r\n errs.append(('wrong type', field))\r\n else:\r\n parsed[dict_field] = value\r\n \r\n if errs:\r\n parsed = None\r\n return parsed, errs", "def GetAllFields(self, run_unsafe=False):\n\n if not (self.inherited_fields_expanded or run_unsafe):\n raise RuntimeError(f'Type {self.typename} has not been expanded')\n if self._all_fields is None:\n tmp = self.local_field_names.copy()\n tmp.update(self.inherited_field_names)\n if run_unsafe:\n return tmp\n self._all_fields = tmp\n return self._all_fields", "def get_fields(self, resource):\n\n def _get_fields_key(resource):\n \"\"\"Returns the fields key from a resource dict\n\n \"\"\"\n if resource['code'] in [HTTP_OK, HTTP_ACCEPTED]:\n if (MODEL_RE.match(resource_id) or\n ANOMALY_RE.match(resource_id)):\n return resource['object']['model']['model_fields']\n elif CLUSTER_RE.match(resource_id):\n return resource['object']['clusters']['fields']\n elif CORRELATION_RE.match(resource_id):\n return resource['object']['correlations']['fields']\n elif STATISTICAL_TEST_RE.match(resource_id):\n return resource['object']['statistical_tests']['fields']\n elif STATISTICAL_TEST_RE.match(resource_id):\n return resource['object']['statistical_tests']['fields']\n elif LOGISTIC_REGRESSION_RE.match(resource_id):\n return resource['object']['logistic_regression']['fields']\n elif ASSOCIATION_RE.match(resource_id):\n return resource['object']['associations']['fields']\n elif SAMPLE_RE.match(resource_id):\n return dict([(field['id'], field) for field in\n resource['object']['sample']['fields']])\n else:\n return resource['object']['fields']\n return None\n\n if isinstance(resource, dict) and 'resource' in resource:\n resource_id = resource['resource']\n elif (isinstance(resource, basestring) and (\n SOURCE_RE.match(resource) or DATASET_RE.match(resource) or\n MODEL_RE.match(resource) or PREDICTION_RE.match(resource))):\n resource_id = resource\n resource = self._get(\"%s%s\" % (self.url, resource_id))\n else:\n LOGGER.error(\"Wrong resource id\")\n return\n # Tries to extract fields information from resource dict. If it fails,\n # a get remote call is used to retrieve the resource by id.\n fields = None\n try:\n fields = _get_fields_key(resource)\n except KeyError:\n resource = self._get(\"%s%s\" % (self.url, resource_id))\n fields = _get_fields_key(resource)\n\n return fields", "def get_field_names(self):\n return {rv[0] for rv in self.iter_fields()}", "def get_fields(self):\n for child in self.children:\n if isinstance(child, MyTextField):\n yield child", "def get_fields():\n return jsonify(result=Tree.fields())", "def fields(self):\n return [f[1] for f in sorted(self.dd.fields.items())]", "def list_meta_fields():\n ret = {}\n status, result = _query(action=\"meta\", command=\"fields\")\n root = ET.fromstring(result)\n for field in root:\n field_id = None\n field_ret = {\"name\": field.text}\n for item in field.items():\n field_ret[item[0]] = item[1]\n if item[0] == \"id\":\n field_id = item[1]\n ret[field_id] = field_ret\n return ret", "def fields_dict(self):\n return self._declared_fields", "def get_fields(self):\n \n fields = []\n for order in self.order_lst:\n fields += order.get_fields()\n \n fields = list(set(fields))\n \n out_fields = self.eod.sort_fields(fields)\n \n return out_fields", "def get_fields(\n schema: Union[Config, Schema], types: Union[Type, Tuple[Type]] = None\n) -> List[Tuple[str, BaseField]]:\n\n fields = list(schema._fields.items())\n if isinstance(schema, Config):\n fields += list(schema._schema._fields.items())\n\n if types:\n fields = [item for item in fields if isinstance(item[1], types)]\n return fields", "def get_fields(schema: BaseModel, exclude_dump_only: bool = False) -> dict:\n if hasattr(schema, \"fields\"):\n fields = schema.fields\n elif hasattr(schema, \"_declared_fields\"):\n fields = copy.deepcopy(schema._declared_fields)\n else:\n raise ValueError(\n \"{!r} doesn't have either `fields` or `_declared_fields`.\".format(schema)\n )\n Meta = getattr(schema, \"Meta\", None)\n return filter_excluded_fields(fields, Meta, exclude_dump_only)", "def get_fieldlist(cls):\n return cls.fieldlist", "def list_fields(fc):\n return [f.name for f in arcpy.ListFields(fc)]", "def get_all_fields(\n schema: Union[Schema, Config]\n) -> List[Tuple[str, Schema, BaseField]]:\n if isinstance(schema, Config):\n schema = schema._schema\n\n ret = []\n prefix = schema._key + \".\" if schema._key else \"\"\n for key, field in schema._fields.items():\n ret.append((prefix + key, schema, field))\n if isinstance(field, Schema):\n ret.extend(\n [\n (prefix + subkey, schema, subfield)\n for subkey, schema, subfield in get_all_fields(field)\n ]\n )\n return ret", "def readAccessedFields(self):\n pass", "def get_proto_fields():\n raise NotImplementedError()", "def get_field_names() -> Sequence[str]:\n raise NotImplementedError", "def get_fields_in_model(instance: Any) -> List:\n from auditlog.registry import auditlog\n\n attrs = object_mapper(instance).iterate_properties\n model_attrs = auditlog.get_model_fields(instance.__class__)\n if model_attrs['include_fields']:\n attrs = (attr for attr in attrs if attr.key in model_attrs['include_fields'])\n if model_attrs['exclude_fields']:\n attrs = (attr for attr in attrs if attr.key not in model_attrs['exclude_fields'])\n\n return attrs", "def get_field_names(self):\n return self._keys", "def get_field_names(self, declared_fields, info):\n return self._requested_fields", "def field_names(self):\n ...", "def _retrieve_fields(self, scope, fields):\r\n if scope == Scope.user_state:\r\n return self._chunked_query(\r\n StudentModule,\r\n 'module_state_key__in',\r\n (descriptor.scope_ids.usage_id for descriptor in self.descriptors),\r\n course_id=self.course_id,\r\n student=self.user.pk,\r\n )\r\n elif scope == Scope.user_state_summary:\r\n return self._chunked_query(\r\n XModuleUserStateSummaryField,\r\n 'usage_id__in',\r\n (descriptor.scope_ids.usage_id for descriptor in self.descriptors),\r\n field_name__in=set(field.name for field in fields),\r\n )\r\n elif scope == Scope.preferences:\r\n return self._chunked_query(\r\n XModuleStudentPrefsField,\r\n 'module_type__in',\r\n set(descriptor.scope_ids.block_type for descriptor in self.descriptors),\r\n student=self.user.pk,\r\n field_name__in=set(field.name for field in fields),\r\n )\r\n elif scope == Scope.user_info:\r\n return self._query(\r\n XModuleStudentInfoField,\r\n student=self.user.pk,\r\n field_name__in=set(field.name for field in fields),\r\n )\r\n else:\r\n return []", "def get(self):\n return parse_fields(raw=self._get())", "def get_returnable_fields(result, verbose=False):\n check_result(result)\n result_info = get_result(result)\n returnable_fields = result_info[\"returnable_fields\"]\n if verbose:\n pprint(returnable_fields)\n return returnable_fields", "def all_fields(item):\n return scom.all_fields(item)", "def fields(class_or_instance):\n\n # Might it be worth caching this, per class?\n try:\n fields_dict = getattr(class_or_instance, _FIELDS)\n except AttributeError:\n raise TypeError(\"must be called with a dataclass type or instance\")\n\n return fields_dict", "def objectFields(self):\n raise NotImplementedError", "def fieldsIterator(self):\n for name, field in self.fields.items():\n renderer = self.renderers.get(name)\n if renderer:\n value = renderer(self.instance)\n else:\n value = getattr(self.instance, name)\n yield field.verbose_name, value", "def get_fields(self, request, obj=None):\n if obj and obj.cwr:\n return (\n 'nwr_rev', 'description', 'works', 'filename', 'view_link',\n 'download_link')\n else:\n return ('nwr_rev', 'description', 'works')" ]
[ "0.69482595", "0.6679385", "0.6679385", "0.6660547", "0.66575557", "0.6621774", "0.65962017", "0.65450466", "0.65231115", "0.64742976", "0.64686394", "0.6364058", "0.63173836", "0.6307222", "0.62955815", "0.6285253", "0.62815905", "0.62411195", "0.6234588", "0.6227274", "0.62023854", "0.6198303", "0.61835945", "0.61832476", "0.61804104", "0.6130501", "0.6118148", "0.6081832", "0.6079705", "0.6071686", "0.60590136", "0.60539013", "0.59853905", "0.5956627", "0.5949404", "0.5939712", "0.5937226", "0.5932651", "0.59309936", "0.5919275", "0.59147584", "0.5905379", "0.5883258", "0.5847378", "0.58467525", "0.5846653", "0.582409", "0.58151037", "0.58103085", "0.58087134", "0.58087134", "0.57949805", "0.578417", "0.5782764", "0.5778648", "0.57712", "0.5770905", "0.576455", "0.57515764", "0.5747818", "0.5738625", "0.57263297", "0.5722453", "0.57090396", "0.5708652", "0.5706784", "0.5690035", "0.5680561", "0.56405133", "0.56335294", "0.5629985", "0.5624228", "0.5612187", "0.5609566", "0.559908", "0.5598249", "0.5596746", "0.55952567", "0.5594726", "0.5566083", "0.5564089", "0.55593854", "0.5521704", "0.55059123", "0.5502136", "0.55019194", "0.54950726", "0.5491809", "0.5489606", "0.54895437", "0.5489237", "0.5487133", "0.5486337", "0.54818743", "0.5471533", "0.54599446", "0.54570615", "0.5446413", "0.541354", "0.53951114" ]
0.5588493
79
What are the most popular three articles of all time?
def print_top_articles(): create_view_top_articles = ( "CREATE VIEW top_articles AS " + "SELECT COUNT(path) AS num, path " + "FROM log GROUP BY path ORDER BY num DESC;") get_popular_articles_names = ( "SELECT title, num " + "FROM top_articles, articles " + "WHERE top_articles.path = '/article/' || articles.slug limit 3;") print("\nRunning Task: " + print_top_articles.__doc__ + "\n") conn, cur = connect() cur.execute(create_view_top_articles) cur.execute(get_popular_articles_names) results = cur.fetchall() for title, views in results: print('\t{} - {} views'.format(title, views)) disconnect(conn, cur)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def most_popular_articles():\n\n results = query_database(QUERIES[0])\n print('\\nWhat are the most popular three articles of all time?\\n')\n for title, views in results:\n print(' * \"{}\" -- {} views'.format(title, views))", "def most_popular_articles():\n print '1. The most popular articles are...'\n return (\"\"\"SELECT articles.title, COUNT(*) as num FROM articles, log\"\"\"\n \"\"\" WHERE SUBSTRING (log.path FROM 10) = articles.slug and\"\"\"\n \"\"\" log.path != '/' Group By articles.title ORDER By num\"\"\"\n \"\"\" DESC LIMIT 3;\"\"\")", "def three_most_popular_articles():\n\n # To print information\n information_string = '1. The 3 most popular articles of all time are:\\n'\n\n # Query string\n query = \"\"\"select title,count(*) as num from\n articles,log where\n log.path=CONCAT('/article/',articles.slug)\n group by articles.title\n order by num DESC limit 3;\"\"\"\n\n print(information_string)\n for result in query_db(query):\n print('\\t\"' + str(result[0]) + '\" - ' + str(result[1]) + ' views')\n\n print(\"\\n\")", "def get_mostViewedArticles():\n\n query = \"\"\"\n SELECT articles.title,COUNT(*) as views\n FROM articles JOIN log\n ON log.path LIKE ('/article/' || articles.slug)\n GROUP BY articles.title\n ORDER BY views DESC\n LIMIT 3\n \"\"\"\n\n posts = execute_query(query)\n print('\\nWhat are the most popular three articles of all time?')\n for title, views in posts:\n print(title + \" - \" + str(views) + \" views\")", "def popular_articles():\n query = \"\"\"SELECT articles.title,count(*) AS total_views FROM articles,log WHERE log.path like concat('/article/',articles.slug)\n group by articles.title order by total_views desc limit 3\"\"\"\n result = get_data(query)\n print(\" 1. The most popular three articles of all time:\")\n print(\"\")\n for record in result :\n print(' ' + '\\\"' + str(record[0]) + '\\\"' + '-' + ' ' + str(record[1]) + ' '+ 'views')\n print(\" \")", "def get_most_popular_articles():\n\tdb = psycopg2.connect(database=DBNAME)\n\tc = db.cursor()\n\tc.execute(\"select t2.title, count(*) as total from log as t1,articles as t2 where t1.path=concat('/article/',t2.slug) group by t2.title order by total desc limit 3 ;\")\n\tdata = c.fetchall()\n\tdb.close()\n\treturn data", "def top3_articles():\n\n cur.execute(\"\"\"\n SELECT title, COUNT(*) AS article_title\n FROM article_summary\n GROUP BY title\n ORDER BY article_title DESC\n LIMIT 3;\n \"\"\")\n result = cur.fetchall()\n return result", "def get_popular_articles():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_popular_articles = \"\"\"\n SELECT art.title, COUNT(lg.id) as views\n FROM articles as art\n JOIN log as lg\n ON art.slug = substring(lg.path,10)\n AND lg.status = '200 OK'\n GROUP BY art.title\n ORDER BY views desc\n LIMIT 3; \"\"\"\n c.execute(query_popular_articles)\n articles = from_db_cursor(c)\n db.close()\n return articles", "def query_article():\r\n conn, cur = connect()\r\n query1 = (\"select * from article limit 3\")\r\n cur.execute(query1)\r\n res = cur.fetchall()\r\n conn.close()\r\n print (\"\\nThe most popular three articles of all time:\\n\")\r\n for i in range(0, len(res), 1):\r\n print (res[i][0] + \" --> \" + str(res[i][1]) + \" views\")", "def popular_articles():\n db = psycopg2.connect(\"dbname=news\")\n \"\"\"Connect to news database.\"\"\"\n c = db.cursor()\n \"\"\"Open a cursor to perform database operation.\"\"\"\n query = \"\"\"select title, count(path) as view from articles, log \n where '/article/' || articles.slug = log.path group by title, path \n order by view desc limit 3;\"\"\"\n \"\"\"The cursor runs query and fetches result.\"\"\"\n c.execute(query)\n \"\"\"Execute query using cursor.\"\"\"\n rows = c.fetchall()\n print \"Most popular three articles of all time: \"\n print \"---------------------------------------- \"\n for row in rows:\n print row[0], \"--\", row[1], \" views\"\n db.close()", "def article_rank():\n db, c = connect(DBNAME)\n c.execute(\"select title, count(title) as views from \\\"pathslug\\\" \"\n \"group by title order by views desc limit 3\")\n article_table = c.fetchall()\n db.close()\n print \"\\nThree Most Popular Articles All Time:\"\n for article in article_table:\n print str(article[0]) + \" - \" + str(article[1]) + \" views\"", "def printTopThreeArticles():\n query = \"\"\"\n SELECT author_article_popularity_view.article,\n author_article_popularity_view.views\n FROM author_article_popularity_view\n LIMIT 3;\n \"\"\"\n cursor = connection.cursor()\n cursor.execute(query)\n results = cursor.fetchall()\n print(\"\\nTop 3 articles of all time: \")\n for i, result in enumerate(results):\n print(\"{}. \\\"{}\\\" - {:,} views\".format(i + 1, result[0], result[1]))", "def topArticles():\n c = db.cursor()\n c.execute(\"select titles.title, tophits.hits\\\n from tophits, titles\\\n where tophits.path = titles.slug\\\n order by hits desc limit 3;\")\n results = c.fetchall()\n c.close()\n return results", "def most_viewed_articles():\n query = \"\"\"\n SELECT articles.title, COUNT(*) AS views\n FROM articles\n JOIN log\n ON log.path = '/article/' || articles.slug\n WHERE log.status ='200 OK'\n GROUP BY articles.title ORDER BY views DESC LIMIT 3;\n \"\"\"\n results = psql_connection(query)\n\n print(\"Most viewed articles:\")\n for result in results:\n print '{article} - {count} views'.format(\n article=result[0], count=result[1])", "def print_top_articles():\n\n output = get_query_results(\n '''SELECT articles.title, COUNT(path) AS views\n FROM articles\n JOIN log\n ON log.path=CONCAT('/article/', articles.slug)\n GROUP BY title\n ORDER BY views DESC\n LIMIT 3;'''\n )\n print(\"\\nMost Popular Articles: \\n\")\n for title, views in output:\n print(\"\\\"{}\\\" -- {} views\".format(title, views))", "def get_top_3_articles():\n query1 = \"\"\"select title, count(*) as views\n from articles, log\n where log.path like '%' || articles.slug\n group by title\n order by views desc\n limit 3;\"\"\"\n results = execute_query(query1)\n for result in results:\n print(\"- \\\"%s\\\" — %s views\" % (result[0], result[1]))", "def getPopularArticles():\n db = psycopg2.connect(\"dbname=news\")\n c = db.cursor()\n c.execute(\" select count (*) as views, title from articles \"\n + \"left join \"\n + \"log on concat('/article/', articles.slug) = log.path \"\n + \"group by title order by views desc limit 3\")\n views = c.fetchall()\n db.close()\n return views", "def print_popular_articles():\n print(\"3 most popular articles\\n\")\n popularity_data = get_query_results(POPULARITY_QUERY)\n article_row_format = '\"{}\" — {} views'\n for title, views in popularity_data:\n print(article_row_format.format(title, views))", "def get_popular_titles():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # counting views that had status 200\n c.execute(\"select articles.title, count(*) as views \"\n \"from articles, log \"\n \"where log.path like concat('%', articles.slug, '%') \"\n \"and log.status = '200 OK' \"\n \"group by articles.title \"\n \"order by views desc limit 3\")\n results = c.fetchall()\n text_file = open(\"text.txt\", \"a+\") # append to text file\n text_file.write(\"The three most popular articles of all time are:\\n\\n\")\n # for loop to print each article\n for title, views in results:\n text_file.write(\"\\\"\" + title + \"\\\"\" + \" - \" + str(views) + \" views\\n\")\n text_file.write(\"\\n\")\n text_file.close()\n db.close", "def most_popular_article_authors():\n\n # To print information\n information_string = '2. The most popular article ' \\\n 'authors of all time are:\\n'\n\n # Query string\n query = \"\"\" select x.author , count(1) as qtd from (\n SELECT b.name as author\n FROM articles a join authors b on(a.author = b.id)\n join log c on(c.path = '/article/' ||a.slug)\n ) x group by x.author order by 2 desc;\"\"\"\n\n print(information_string)\n for result in query_db(query):\n print('\\t' + str(result[0]) + ' - ' + str(result[1]) + ' views')\n\n print(\"\\n\")", "def top_three_articles():\n query = \"\"\"select articles.title,\n article_path_views.views\n from articles, article_path_views\n where '/article/' || articles.slug = article_path_views.path\n order by views desc\n limit 3\"\"\"\n result_table = execute_query(query)\n\n # generate a report from table_to_report() function\n report = table_to_report(result_table, ' views')\n return \"The Top Three of Most Viewed Articles:\\n\" + report", "def get_most_popular_authors():\n\tdb = psycopg2.connect(database=DBNAME)\n\tc = db.cursor()\n\tc.execute(\" select t1.name,count(*) as total from authors as t1, articles as t2,log as t3 where t3.path=concat('/article/',t2.slug) and t1.id=t2.author group by t1.name order by total desc limit 3;\")\n\tdata = c.fetchall()\n\tdb.close()\n\n\treturn data", "def get_mostPopularAuthors():\n\n query = \"\"\"\n SELECT authors.name,COUNT(*) as views\n FROM articles join authors\n ON articles.author=authors.id\n JOIN log ON log.path LIKE ('/article/' || articles.slug)\n GROUP BY authors.name\n ORDER BY views DESC\n \"\"\"\n\n posts = execute_query(query)\n print('\\nWho are the most popular article authors of all time?')\n for author, views in posts:\n print(author + \" - \" + str(views) + \" views\")", "def print_popular_articles(articles):\n for (title, views) in articles:\n print \"\\\"%s\\\" - %d views\" % (title, views)", "def top_three_articles(cursor):\n top_articles = 'No articles found'\n try:\n cursor.execute(\"\"\"select title, count(*) as hits\n from articles, log\n where path = ('/article/' || slug)\n group by title\n order by hits desc\n limit 3\n \"\"\")\n article_views = cursor.fetchall()\n # If no articles were found, return\n if len(article_views) <= 0:\n return article_views\n\n except psycopg2.Error as e:\n print('Fetching top articles by views: \\r\\n{}'.format(e.pgerror))\n\n # If the query returns any articles, return the results.\n else:\n top_articles = 'Top articles by views: \\r\\n'\n for result in article_views:\n top_articles += ' \"{0} - {1} views\"\\r\\n'.format(result[0],\n result[1])\n return top_articles", "def popular_authors() :\n query = \"\"\"SELECT authors.name,count(*) AS total_views FROM authors,articles,log WHERE log.path like concat ('/article/',articles.slug)\n AND articles.author=authors.id group by authors.name order by total_views desc\"\"\"\n result = get_data(query)\n print(\" 2. The most popular articles authors of all time:\")\n print(\"\")\n for record in result :\n print(' ' +' ' + str(record[0]) + ' -' + ' ' + str(record[1]) + ' ' +'views')\n print(\" \")", "def get_popular_article():\n query_command = \"SELECT * from popular_posts LIMIT 3\"\n query_data = run_query(query_command)\n return query_data", "def print_top_articles(popular_articles):\n\n print('\\nThe three top most articles viewed are:\\n')\n for article in popular_articles:\n print(article[0] + '\\t-\\t' + str(article[1]) + ' views \\n')\n print('-------------------------------------------------------\\n')", "def keywords(articles, top_n=25):\n\n # compute term idfs\n token_docs = [lemma_tokenize(clean(a.text)) for a in articles]\n local_term_idf = IDF(token_docs)\n\n token_docs, phrases = extract_phrases(token_docs, [a.text for a in articles], global_term_idf)\n\n titles = [a.title for a in articles]\n title_tokens = [lemma_tokenize(clean(t)) for t in titles]\n term_counts = defaultdict(int)\n for doc in token_docs:\n for t in set(doc):\n if t:\n term_counts[t] += 1\n\n title_terms = set()\n for title_tks in title_tokens:\n title_terms = title_terms | set(title_tks)\n for ph in phrases:\n if any(ph in title.lower() for title in titles):\n title_terms.add(ph)\n\n # Score terms\n term_scores = []\n for t, count in term_counts.items():\n # Ignore numbers, they are very specific to a particular event and\n # introduce noise\n try:\n float(t)\n continue\n except ValueError:\n # TODO This is a troublesome token, not sure why it's not filtered out by\n # IDF. needs more investigation\n if t == 'n\\'t':\n continue\n score = count * (global_term_idf[t] - local_term_idf[t])\n if t in title_terms:\n score *= 1.5\n term_scores.append((t, score))\n\n return sorted(term_scores, key=lambda t: t[1], reverse=True)[:top_n]", "def top_authors():\n\n cur.execute(\"\"\"\n SELECT author, count(*) AS article_author\n FROM article_summary\n GROUP BY author\n ORDER BY article_author DESC;\n \"\"\")\n result = cur.fetchall()\n return result", "def most_popular_authors():\n\n results = query_database(QUERIES[1])\n print('\\nWho are the most popular article authors of all time?\\n')\n for author, views in results:\n print(' * {} -- {} views'.format(author, views))", "def most_similar(self, article: str, topn: int = 5):\n return [article[0] for article in self._model.similar_by_word(article, topn)]", "def most_popular_authors():\n print '2. The most popular authors are...'\n return (\"\"\"SELECT authors.name, count(*) as num from\"\"\"\n \"\"\" authors, articles, log WHERE SUBSTRING (log.path FROM 10)\"\"\"\n \"\"\" = articles.slug and articles.author = authors.id and\"\"\"\n \"\"\" log.path != '/' Group By authors.name ORDER by num\"\"\"\n \"\"\" DESC LIMIT 20;\"\"\")", "def how_popular_most_popular(data):\r\n #list of artists\r\n artists = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n for x in data[key]:\r\n artists += [x]\r\n sorted_artists = selection_sort(artists)\r\n count = 1\r\n max_count = 1\r\n max_artists = []\r\n for i in range(len(sorted_artists)-1):\r\n #ends at second to last index because I use i and i + 1\r\n if sorted_artists[i] == sorted_artists[i+1]:\r\n count += 1\r\n else:\r\n if count == max_count:\r\n max_artists += [sorted_artists[i]]\r\n count = 1\r\n elif count > max_count:\r\n max_artists = []\r\n max_artists += [sorted_artists[i]]\r\n max_count = count\r\n count = 1\r\n return max_count", "def pubs_by_articles_published( data ) :\n # let's be Pythonic and use counter\n result = [ (k,v) for k,v in Counter([x['SO'] for x in data]).iteritems() ]\n # now sort\n result.sort( lambda a,b : cmp(b[1],a[1]) )\n return result", "def top_5_news():\n top_five = []\n\n news = (db.news.find({}).sort([\n ('shares_count', pymongo.DESCENDING),\n ('comments_count', pymongo.DESCENDING),\n ('title', pymongo.ASCENDING)\n ]).limit(5))\n\n for new in news:\n top_five.append((new['title'], new['url']))\n\n return top_five", "def calculate_most_popular(text, n_populars, steam=False):\n fdist = calculate_fdist(text, steam)\n term = []\n for key, value in fdist.items():\n term.append((key, value))\n term.sort(key=lambda x: int(x[1]), reverse=True)\n return term[:n_populars]", "def get_popular_authors():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # counting views that had status 200\n c.execute(\"select authors.name, count(*) as num \"\n \"from articles, authors, log \"\n \"where articles.author = authors.id \"\n \"and log.path like concat('%', articles.slug, '%') \"\n \"and log.status = '200 OK' \"\n \"group by authors.name order by num desc\")\n results = c.fetchall()\n text_file = open(\"text.txt\", \"a+\") # append to text file\n text_file.write(\"The most popular authors of all time are:\\n\\n\")\n # for loop to print each author\n for name, num in results:\n text_file.write(\"\\\"\" + name + \"\\\"\" + \" - \" + str(num) + \" views\\n\")\n text_file.write(\"\\n\")\n text_file.close()\n db.close", "def most_popular(n=5):\n cars = Car.objects.annotate(review_number=models.Count('reviews'))\n sorted_cars = cars.order_by('review_number')\n return sorted_cars[:n]", "def top_controversial(self, n):\n return top_movies", "def most_viewed_authors():\n query = \"\"\"\n SELECT authors.name, COUNT(*) AS views\n FROM articles\n JOIN log\n ON log.path = '/article/' || articles.slug\n JOIN authors\n ON authors.id = articles.author\n WHERE log.status ='200 OK'\n GROUP BY authors.name ORDER BY views DESC LIMIT 3;\n \"\"\"\n results = psql_connection(query)\n\n print(\"Most viewed authors:\")\n for result in results:\n print '{author} - {count} views'.format(\n author=result[0], count=result[1])", "def get_popular_authors():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_popular_authors = \"\"\"\n SELECT aut.name, COUNT(lg.id) AS views\n FROM articles AS art\n JOIN log AS lg ON art.slug = SUBSTRING(lg.path,10)\n AND lg.status = '200 OK'\n JOIN authors AS aut ON aut.id = art.author\n GROUP BY aut.name\n ORDER BY views desc; \"\"\"\n c.execute(query_popular_authors)\n authors = from_db_cursor(c)\n db.close()\n return authors", "def get_top_articles(update=False):\n # use caching to avoid running unnecessary DB queries at each page load\n key = 'top_ten'\n articles = memcache.get(key)\n\n logging.warn('MEMCACHE | Wiki articles %s' % str(articles))\n\n if (articles is None) or (len(articles) == 0) or update:\n # necessary artificial delay when a new article has just been persisted to the datastore\n if update:\n time.sleep(2)\n\n articles = db.GqlQuery('SELECT * FROM Article ORDER BY updated DESC LIMIT 10')\n articles = list(articles)\n memcache.set(key, articles)\n\n logging.warn('DATASTORE | Wiki articles count %s' % str(len(articles)))\n return articles", "def most_popular(self, n):\n return popular_tags", "def recommend_for_new_user(titles=False, n_max=10):\n return reader.UserList().get_most_popular_articles(titles=titles)[: n_max]", "def get_mostFrequent(self, n=5):\r\n pass", "def get_mostFrequent(self, n=5):\r\n pass", "def top_articles_by_views(articles, top_x):\n p = PageviewsClient()\n\n # create date string based on previous month\n now = datetime.datetime.now()\n previous_month = str(now.month - 1).zfill(2)\n if previous_month == \"00\": previous_month = \"12\"\n start_date = str(now.year) + previous_month + \"0100\"\n end_date = str(now.year) + previous_month + \"2800\"\n\n # get views\n result = p.article_views('en.wikipedia', articles, \n granularity='monthly', start=start_date, end=end_date)\n # clean results (six is used for backwards compatibility with python 2\n result = six.next(six.itervalues(result))\n sorted_articles = sorted(result.items(), \n key=operator.itemgetter(1), reverse=True)\n return sorted_articles[:top_x]", "def top_keywords(urls, count=10):\n try:\n res = Counter()\n for url in urls:\n res += Counter(get_keyword_dict(url))\n return [w[0] for w in res.most_common(count)]\n except:\n print('Error finding top keywords')", "def get_popular_authors():\n query_command = \"SELECT * from popular authors LIMIT 3\"\n query_data = run_query(query_command)\n return query_data", "def getMostPlausibleArticleClasses(aSoup, **kwargs):\n if 'minNbrOccurrences' in kwargs:\n regularMin = kwargs.get('minNbrOccurrences')\n # without this line, can cause TypeError for duplicate keyword argument error \n kwargs.pop('minNbrOccurrences')\n else:\n regularMin = 6\n #\n classes,nbrs = filterPossibleArticleRelatedClasses(aSoup, minNbrOccurrences=regularMin, **kwargs)\n try:\n # articles have the same classes for different tags in the hierarchy\n mostReccurentSetOfClassEff = statistics.mode(nbrs)\n except statistics.StatisticsError:\n _dNsEffs = effectif(nbrs)\n _ns = list(_dNsEffs)\n _effs = [_dNsEffs[key] for key in _ns]\n mostReccurentSetOfClassEff = _ns[_effs.index( max(_effs) )]\n articleRelatedClasses = [classes[i] for i,n in enumerate(nbrs) if n==mostReccurentSetOfClassEff]\n articleRelatedClassesOccurrences = [nbrs[i] for i,n in enumerate(nbrs) if n==mostReccurentSetOfClassEff]\n return articleRelatedClasses, articleRelatedClassesOccurrences", "def get_top_articles(\n limit: int = 5,\n date: int = int(datetime.now().strftime(\"%Y%m%d\"))\n):\n\n res = articles_db.get_top_articles_mongo(\n articles,\n limit,\n date\n )\n\n return res", "def count_articles(all_articles):\n print(f\"There are {len(all_articles)} articles.\")", "def get_most_viewed_hashtag():\n tags = HashTags.objects.order_by('-no_of_times_viewed').distinct()[:10]\n return tags", "def popular_authors():\n query = \"\"\"select authors.name, sum(views)\n from authors, articles, article_path_views\n where authors.id = articles.author\n and '/article/' || articles.slug = article_path_views.path\n group by authors.name\n order by sum desc\"\"\"\n result_table = execute_query(query)\n\n # generate a report from table_to_report() function\n report = table_to_report(result_table, ' views')\n return \"Authors and Their Articles' Total Views:\\n\" + report", "def getBestStories(self):\n source = self.getSource(\"http://news.ycombinator.com/best\")\n stories = self.getStories(source)\n return stories", "def get_popular(lookups):\n return {k: lookups.link_counts[k] for k in lookups.movie_to_idx}", "def dashboard_content_article_tag_cloud():\n tag_stats = dict()\n past_30 = offset_time_past(30, str=True)\n articles = mongo.db[app.config['ARTICLES_COLLECTION']]\n results = articles.find({'collected': {'$gt': past_30}}, {'_id': 0})\n for result in results:\n for tag in result.get('tags', list()):\n tag_stats[tag] = tag_stats.get(tag, 0) + 1\n tags_sorted = sorted(tag_stats.items(), key=operator.itemgetter(1),\n reverse=True)[:50]\n data = list()\n for item in tags_sorted:\n data.append({'name': item[0], 'weight': item[1]})\n return jsonify(data)", "def get_most_popular_artists(data):\r\n #list of artists\r\n artists = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n for x in data[key]:\r\n artists += [x]\r\n sorted_artists = selection_sort(artists)\r\n count = 1\r\n max_count = 1\r\n max_artists = []\r\n for i in range(len(sorted_artists)-1):\r\n #ends at second to last index because I use i and i + 1\r\n if sorted_artists[i] == sorted_artists[i+1]:\r\n count += 1\r\n else:\r\n if count == max_count:\r\n max_artists += [sorted_artists[i]]\r\n count = 1\r\n elif count > max_count:\r\n max_artists = []\r\n max_artists += [sorted_artists[i]]\r\n max_count = count\r\n count = 1\r\n return max_artists", "def count_words(all_articles):\n total_words = 0\n for title in all_articles:\n total_words += all_articles[title]['word-count']\n print(f\"There are {total_words} words written.\")", "def author_rank():\n db, c = connect(DBNAME)\n c.execute(\"select name, count(name) as views from \\\"authorpath\\\" \"\n \"group by name order by views desc\")\n author_table = c.fetchall()\n db.close()\n print \"\\nMost Popular Article Authors of All Time:\"\n for author in author_table:\n print str(author[0]) + \" - \" + str(author[1]) + \" views\"", "def count_by_author(all_articles):\n author_count = {}\n\n for title in all_articles:\n author = \", \".join(all_articles[title]['authors'])\n if author not in author_count:\n author_count[author] = 1\n else:\n author_count[author] = author_count[author] + 1\n \n print_all_items_in_dict(author_count)", "def topAuthors():\n c = db.cursor()\n c.execute(\"select name, sum(hits) as hits\\\n from authorhits group by name\\\n order by hits desc;\")\n results = c.fetchall()\n c.close()\n return results", "def analyze(url):\n\n #Note : Using the function to count repeated words and sorted by value\n\n print('\\n\\nVisiting',url)\n print('The most 25 common word')\n print('\\n{:30} {:6}\\n'.format('Word','Count'))\n\n content = urlopen(url).read().decode()\n collector = Collector(url)\n collector.feed(content)\n urls = collector.getLinks()\n\n words_lst = collector.getdata()\n print(words_lst)\n # word_count = Counter(words_lst) # use collection\n # most_25_common = word_count.most_common(25) #\n\n word_count = frequency(words_lst)\n sorted_word_count = sorted(word_count.items(), key = lambda x : x[1],reverse= True)\n\n for word,count in sorted_word_count[:25]:\n print ('{:30}{:5}'.format(word,count))\n\n #return word_count\n\n # for word,count in most_25_common:\n # print('{:30} {:5}'.format(word,count))\n # return urls", "def article_stats(s_list,subject):\n word_dicts = {languages[0]:{},languages[1]:{}}\n stats = [subject]\n for i,article in enumerate(s_list):\n word_dicts[languages[i]] = get_words(article)\n wc = total_wc(word_dicts[languages[i]])\n stats.append((wc,avg_word_length(article, wc),avg_par_length(article)))\n stats.append(compute_similarity(word_dicts[languages[0]],word_dicts[languages[1]]))\n return stats", "def query1():\n\n print(\"1. What are the most popular three articles of all time? Which \" +\n \"articles have been accessed the most?\\n\")\n\n query = \"\"\"\n SELECT articles.title, subq.hits FROM articles\n LEFT JOIN\n (SELECT COUNT(log.path) AS hits, log.path FROM log\n WHERE log.path LIKE '/article/%'\n AND log.status = '200 OK' AND log.method = 'GET'\n GROUP BY log.path) AS subq\n ON subq.path LIKE '/article/'||articles.slug\n ORDER BY subq.hits DESC LIMIT 3;\n \"\"\"\n\n response = db_query(query)\n\n for i, j in enumerate(response):\n # Convert tuple to list to allow writing. Format \"hits\" with comma\n # seperator. Print output.\n j = list(j)\n j[1] = str(format(j[1], ',d'))\n print(\" Title: '{}' - {} views\".format(*j))", "def get_pybites_top_tags_using_feedparser(n=10):\n # TODO: For some reason this function gives one single false count:\n # All counts are according to the tests EXCEPT \"python\". This function\n # gives a count of 78, whereas the tests expect 79.\n # Opening the raw xml file in an editor we see indeed 79 matches for\n # \"<category>python</category>\".\n # Solution: rewrite the function to just do a text search like the text\n # editor. ^-^\n\n feed = feedparser.parse(content)\n tags_counter = Counter()\n for entry in feed.entries:\n for tag in entry.tags:\n tags_counter.update([tag.term])\n return tags_counter.most_common(n)", "def printTopAuthors():\n query = \"\"\"\n SELECT author_article_popularity_view.author,\n SUM(author_article_popularity_view.views) AS total_views\n FROM author_article_popularity_view\n GROUP BY author_article_popularity_view.author\n ORDER BY total_views DESC;\n \"\"\"\n cursor = connection.cursor()\n cursor.execute(query)\n results = cursor.fetchall()\n print(\"\\nTop authors of all time: \")\n for i, result in enumerate(results):\n print(\"{}. {} - {:,} views\".format(i + 1, result[0], result[1]))", "def top_50():\r\n file_read = read_file()\r\n vacabulary_list = []\r\n for key in file_read:\r\n vacabulary_list.extend(file_read[key])\r\n top_50 = Counter(vacabulary_list).most_common(50)\r\n return (top_50)", "def popular():\r\n d = data_loader.vid_patient_tuples_dict\r\n most_popular_videos = []\r\n for k in sorted(d, key=lambda k: len(d[k]), reverse=True):\r\n most_popular_videos.append(k)\r\n return most_popular_videos", "def print_popular_authors(authors):\n for (author, views) in authors:\n print \"%s - %d views\" % (author, views)", "def latest_content(request):\n latest_articles = Article.published_articles()[:5]\n latest_comments = Comment.objects.all().order_by('-pub_date')[:5]\n tags = Tag.objects.annotate(num_articles=Count('article')).order_by(\n '-num_articles')\n contributors = Contributor.objects.annotate(\n num_articles=Count('article')).order_by('-num_articles')\n return {'latest_articles': latest_articles,\n 'latest_comments': latest_comments,\n 'tags': tags,\n 'contributors': contributors,\n }", "def most_frequent_train(train_data):\n ### YOUR CODE HERE\n tags_counts_for_each_word = {}\n # Filling a dictionary from words and tag tags to their counters\n # Going over the words and counting their tags appearances\n for sentance in train_data:\n for word, tag in sentance:\n # If first time seeing word, adding it's tags count dictionary\n if word not in tags_counts_for_each_word:\n tags_counts_for_each_word[word] = {}\n # Fetching word tags count dictionary\n word_tags_count_dictionary = tags_counts_for_each_word[word]\n # If tag not in word's tags dictionary, initializing the counter\n if tag not in word_tags_count_dictionary:\n word_tags_count_dictionary[tag] = 0\n # Incrementing word tag counter\n word_tags_count_dictionary[tag] += 1\n \n words_maximal_tags = {}\n # Going over each word and finding it's maximal tag\n for word in tags_counts_for_each_word:\n # Fetching all word tags counts\n word_tags_count_dictionary = tags_counts_for_each_word[word]\n \n maximal_tag, maximal_tag_counter = '', 0\n # Finding word tag with maximal tag counter\n for curent_tag, current_counter in word_tags_count_dictionary.items():\n if current_counter > maximal_tag_counter:\n maximal_tag, maximal_tag_counter = curent_tag, current_counter\n \n # Setting the maximal tag for current word\n words_maximal_tags[word] = maximal_tag\n \n return words_maximal_tags\n ### END CODE HERE", "def most_popular_artist(our_data):\n counter_dict = {}\n for artist in all_artists(our_data):\n if artist in counter_dict:\n counter_dict[artist] += 1\n else:\n counter_dict[artist] = 1\n maximum_albums = max(counter_dict.values())\n artist_lists = []\n for keys, values in counter_dict.items():\n if values == maximum_albums:\n artist_lists.append(keys) \n return artist_lists", "def get_articles(self):\n\t\tarticles = Blog.objects.all()\\\n\t\t\t.filter(publication_date__lte=datetime.date.today())\\\n\t\t\t.order_by('publication_date')\n\t\ti = random.randint(0, articles.count()-1)\n\t\treturn articles, articles[i]", "def process_article(title):\n strings = []\n for lang in languages:\n strings.append(get_page(title,lang))\n return article_stats(strings,title)", "def get_most_popular_posts():\n popular_posts_ids = [post.id for post in Post.objects.popular()]\n return Post.objects.filter(id__in=popular_posts_ids). \\\n add_comments_count(). \\\n prefetch_related('author'). \\\n prefetch_with_tags_and_likes(). \\\n order_by('likes_count')", "def get_most_popular_talks_by_views(videos):\r\n return sorted(videos, key=lambda x: int(x.metrics['viewCount']), reverse=True)", "def top_by_ratings(self, n, metric=average):\n return top_movies", "def testArticleCount(self):\n\n self.articleCount(17)", "def top_python_questions(url=cached_so_url):\n content = load_page(url)\n soup = BeautifulSoup(content)\n questions = [(question.select_one('a.question-hyperlink').string.strip(),\n int(question.select_one('span.vote-count-post').string.strip()))\n for question in soup.find_all(class_='question-summary')\n if question.select_one('div.views').string.strip().endswith('m views')]\n return sorted(questions, key=lambda x: -x[1])", "def summary(self, *args, **kwargs):\n article = self.get_object()\n summary_data = self.get_serializer(article).data\n\n keywords = summary_data['keywords']\n related_articles = \\\n Article.objects.filter(Q(keywords__contains=keywords[:1])\n | Q(keywords__contains=keywords[1:2])\n | Q(keywords__contains=keywords[2:3])) \\\n .order_by('-publish_time')[:11] \\\n .values('identifier', 'title', 'images', 'site_name', 'domain', 'publish_time')\n\n related_articles = [related for related in list(related_articles)\n if related['identifier'] != article.identifier]\n\n summary_data['related'] = related_articles\n\n return Response(summary_data)", "def get_top_keywords_from_articles(self, kwords_list):\n _all_keywords = []\n for a in kwords_list:\n if a != []:\n for w in a:\n _all_keywords.append([w['keyword'],w['weight'],w['label']])\n _df_g = pd.DataFrame(_all_keywords, columns=[\"Keyword\", \"Count\",\"Label\"])\n _df_g.sort_values(by=\"Count\", inplace=True, ascending=False)\n _df_g.reset_index(drop=True, inplace=True)\n _df_g.to_csv('test.csv')\n print(len(_df_g))\n\n _df_g['Keyword'] = _df_g['Keyword'].apply(self.remove_repeat_words)\n _df_g.dropna(axis=0, inplace=True)\n p1,p2 = self.pos_taggers(_df_g)\n _df_g['c_POS'] = p1\n _df_g['s_POS'] = p2\n _df_g['c_POS_score'] = _df_g['c_POS'].apply(self.combine_pos_score)\n _df_g['s_POS_score'] = _df_g['s_POS'].apply(self.specific_pos_score)\n _df_g['Count'] = _df_g['Count'] + _df_g['c_POS_score'] + _df_g['s_POS_score'] \n print(len(_df_g))\n _df_g.sort_values(by='Count',inplace=True, ascending=False)\n print(len(_df_g))\n _df_g = _df_g.reset_index(drop=True)\n _df_g = _df_g[:10]\n response_dict = dict()\n response_dict['nc'] = \", \".join(_df_g['Keyword'].to_list())\n return response_dict", "def find_frequent_words(words, most_frequent): \n \n # common_words = Counter(sorted(words))\n # print common_words\n common_words = Counter(sorted(words)).most_common(most_frequent)\n print (common_words )\n most_common_words = [w for w, w_count in common_words]\n return most_common_words", "def get_top_news_and_the_rest(self):\n queryset = self.news.order_by('-marked', '-publication_date')\n return queryset.first(), queryset[1:]", "def most_words(self, n):\n return big_tags", "def get_pageranks(articles, skip_zeros=False, ambiguous_only=False, ambiguous_forms=set()):\n pageranks = {}\n pagerank_frequency=defaultdict(int)\n\n pr_uniq_sets=defaultdict(set)\n for article in articles:\n for mention in article.entity_mentions:\n if ambiguous_only and mention.mention not in ambiguous_forms:\n continue\n h=int(mention.gold_pr/1)\n if not skip_zeros or h!=0:\n pagerank_frequency[h]+=1\n pr_uniq_sets[h].add(mention.gold_link)\n pageranks[mention.gold_link]=h\n pr_uniq=defaultdict(int)\n for k,v in pr_uniq_sets.items():\n pr_uniq[k]=len(v)\n return pagerank_frequency, pr_uniq, pageranks", "def top_python_questions(url=cached_so_url):\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n ge_1m = []\n lt_1m = []\n\n question_summary = soup.find_all(\"div\", class_=\"question-summary\")\n for question in question_summary:\n views = int(question.find(\"div\", class_=\"views\").get(\"title\").split(\" \")[0].replace(\",\", \"\"))\n _question = question.find(\"a\", class_=\"question-hyperlink\").get_text()\n votes = int(question.find(\"span\", class_=\"vote-count-post\").get_text())\n \n if views >= 1000000:\n ge_1m.append((_question, votes))\n else:\n lt_1m.append((_question, votes))\n\n return sorted(ge_1m, key=lambda x: x[1], reverse=True)", "def get_relevant_articles_tf_idf(self, title, k):\n\n\n inner_product=0\n distances=list()\n for article in self.tf_idf:\n if not article==title:\n angle=self.angle_finder(self.tf_idf[title], self.tf_idf[article])\n distances.append((article, math.acos(angle)))\n distances=sorted(distances, key=lambda tup: tup[1])\n print (distances[:k])\n return distances[:k]", "def get_most_popular_talks_by_like_ratio(videos):\n return sorted(videos, key=get_ratio, reverse=True)", "def num_articles(self):\n\t\treturn len(index)", "def get_top_nationalities(result, n=5):\n nat_freq=pd.DataFrame(result['country'].value_counts())\n ratios=nat_freq[:n]/nat_freq.sum()*100\n res='The most common visitors are from'\n for i in range(0,len(ratios)):\n if i!=len(ratios)-1:\n res=res+f' {ratios.index[i]} ({np.round(ratios.country[i],2)}%),'\n else:\n res=res+f' and {ratios.index[i]} ({np.round(ratios.country[i],2)}%).'\n return res", "def get_top_featured_entries(number=5):\n return list(Entry.published.filter(featured=True)[:number])", "def print_top_authors():\n\n output = get_query_results(\n '''SELECT authors.name, COUNT(*) AS views\n FROM authors\n JOIN(SELECT articles.title, articles.author FROM articles\n JOIN log ON log.path=CONCAT('/article/', articles.slug))\n AS popular\n ON authors.id=popular.author\n GROUP BY name\n ORDER BY views DESC;'''\n )\n print(\"\\nPopularity of Authors: \\n\")\n for author, views in output:\n print(\"\\\"{}\\\" -- {} views\".format(author, views))", "def top_by_num_of_ratings(self, n):\n return top_movies", "def _get_new_article(pages):\n date = arrow.now().replace(days=-30).format('YYYY-MM-DD')\n pages = [p for p in pages if p.created > date]\n\n skips = [p for p in pages if 'scp' in p.tags and p.rating >= 40]\n tales = [p for p in pages if 'tale' in p.tags and p.rating >= 20]\n goi = [p for p in pages if 'goi-format' in p.tags and p.rating >= 20]\n pages = skips + tales + goi\n\n return random.choice(pages) if pages else None", "def get_next_articles(self):\n\t\tarticles = Blog.objects.all()\\\n\t\t\t.filter(publication_date__lte=datetime.date.today())\\\n\t\t\t.order_by('publication_date')\n\n\t\tif articles.count() <= 4:\n\t\t\treturn articles\n\n\t\ti, j, k, l = random.sample(range(0, articles.count()-1), 4)\n\t\treturn [articles[i], articles[j], articles[k], articles[l]]", "def personal_top_three(scores):\n return sorted(scores, reverse=True)[:3]", "def personal_top_three(scores):\n return sorted(scores, reverse=True)[:3]", "def get_instance_distribution(articles, instance):\n references = defaultdict(int)\n for article in articles:\n for mention in article.entity_mentions:\n form=mention.mention\n meaning=mention.gold_link\n if meaning==instance:\n references[form]+=1\n return sorted(references.items(), key=lambda x: x[1], reverse=True)", "def get_top_authors():\n query2 = \"\"\"select name, count(*) as views\n from authors, articles, log\n where authors.id = articles.author\n and log.path like '%' || articles.slug\n group by name\n order by views desc;\"\"\"\n results = execute_query(query2)\n for result in results:\n print(\"- %s — %s views\" % (result[0], result[1]))" ]
[ "0.79686993", "0.7780535", "0.7766619", "0.77628744", "0.77026826", "0.763548", "0.75076354", "0.7505313", "0.7494586", "0.74271536", "0.7414069", "0.71258545", "0.7118688", "0.71109426", "0.70809424", "0.70300716", "0.7005257", "0.6999698", "0.69860226", "0.69293576", "0.69207895", "0.68815297", "0.6821987", "0.67415", "0.6712863", "0.6610579", "0.6595861", "0.6519822", "0.650785", "0.64559275", "0.6432424", "0.642599", "0.6419749", "0.6364293", "0.6325073", "0.6297468", "0.6262455", "0.6190965", "0.6189548", "0.6179389", "0.61489755", "0.61278546", "0.6121388", "0.61165696", "0.6095394", "0.60524464", "0.60524464", "0.6038192", "0.6006129", "0.5969114", "0.59388053", "0.5933762", "0.5921516", "0.5918564", "0.591399", "0.59045583", "0.58623976", "0.5812903", "0.58081406", "0.5805926", "0.5794548", "0.5785871", "0.57809174", "0.5748889", "0.5746972", "0.5742005", "0.5729411", "0.57199377", "0.57147634", "0.5712349", "0.569326", "0.5668295", "0.56613183", "0.5657324", "0.56507546", "0.564738", "0.5620592", "0.5619572", "0.56014645", "0.55914736", "0.55580443", "0.554291", "0.55291337", "0.55205905", "0.5507499", "0.55037326", "0.54910314", "0.5490538", "0.5482488", "0.5475915", "0.54695684", "0.5458695", "0.54565364", "0.5450815", "0.5450248", "0.5449583", "0.54487276", "0.5432427", "0.5432427", "0.54308176", "0.5418464" ]
0.0
-1
Who are the most popular article authors of all time?
def print_top_authors(): create_view_top_articles = ( "CREATE VIEW top_articles AS " + "SELECT COUNT(path) AS num, path " + "FROM log GROUP BY path ORDER BY num DESC;") create_view_top_authors = ( "CREATE VIEW top_authors as " + "SELECT sum(num) as views, author " + "FROM top_articles, articles " + "WHERE top_articles.path LIKE '%' || articles.slug GROUP BY author;") get_popular_artists = ( "SELECT name, views " + "FROM authors, top_authors " + "WHERE top_authors.author = authors.id ORDER BY views DESC;") print("\nRunning Task: " + print_top_authors.__doc__ + "\n") conn, cur = connect() cur.execute(create_view_top_articles) cur.execute(create_view_top_authors) cur.execute(get_popular_artists) results = cur.fetchall() for title, views in results: print('\t\"{}\" - {} views'.format(title, views)) disconnect(conn, cur)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_mostPopularAuthors():\n\n query = \"\"\"\n SELECT authors.name,COUNT(*) as views\n FROM articles join authors\n ON articles.author=authors.id\n JOIN log ON log.path LIKE ('/article/' || articles.slug)\n GROUP BY authors.name\n ORDER BY views DESC\n \"\"\"\n\n posts = execute_query(query)\n print('\\nWho are the most popular article authors of all time?')\n for author, views in posts:\n print(author + \" - \" + str(views) + \" views\")", "def most_popular_authors():\n\n results = query_database(QUERIES[1])\n print('\\nWho are the most popular article authors of all time?\\n')\n for author, views in results:\n print(' * {} -- {} views'.format(author, views))", "def most_popular_authors():\n print '2. The most popular authors are...'\n return (\"\"\"SELECT authors.name, count(*) as num from\"\"\"\n \"\"\" authors, articles, log WHERE SUBSTRING (log.path FROM 10)\"\"\"\n \"\"\" = articles.slug and articles.author = authors.id and\"\"\"\n \"\"\" log.path != '/' Group By authors.name ORDER by num\"\"\"\n \"\"\" DESC LIMIT 20;\"\"\")", "def most_popular_article_authors():\n\n # To print information\n information_string = '2. The most popular article ' \\\n 'authors of all time are:\\n'\n\n # Query string\n query = \"\"\" select x.author , count(1) as qtd from (\n SELECT b.name as author\n FROM articles a join authors b on(a.author = b.id)\n join log c on(c.path = '/article/' ||a.slug)\n ) x group by x.author order by 2 desc;\"\"\"\n\n print(information_string)\n for result in query_db(query):\n print('\\t' + str(result[0]) + ' - ' + str(result[1]) + ' views')\n\n print(\"\\n\")", "def popular_authors() :\n query = \"\"\"SELECT authors.name,count(*) AS total_views FROM authors,articles,log WHERE log.path like concat ('/article/',articles.slug)\n AND articles.author=authors.id group by authors.name order by total_views desc\"\"\"\n result = get_data(query)\n print(\" 2. The most popular articles authors of all time:\")\n print(\"\")\n for record in result :\n print(' ' +' ' + str(record[0]) + ' -' + ' ' + str(record[1]) + ' ' +'views')\n print(\" \")", "def top_authors():\n\n cur.execute(\"\"\"\n SELECT author, count(*) AS article_author\n FROM article_summary\n GROUP BY author\n ORDER BY article_author DESC;\n \"\"\")\n result = cur.fetchall()\n return result", "def get_most_popular_authors():\n\tdb = psycopg2.connect(database=DBNAME)\n\tc = db.cursor()\n\tc.execute(\" select t1.name,count(*) as total from authors as t1, articles as t2,log as t3 where t3.path=concat('/article/',t2.slug) and t1.id=t2.author group by t1.name order by total desc limit 3;\")\n\tdata = c.fetchall()\n\tdb.close()\n\n\treturn data", "def get_popular_authors():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_popular_authors = \"\"\"\n SELECT aut.name, COUNT(lg.id) AS views\n FROM articles AS art\n JOIN log AS lg ON art.slug = SUBSTRING(lg.path,10)\n AND lg.status = '200 OK'\n JOIN authors AS aut ON aut.id = art.author\n GROUP BY aut.name\n ORDER BY views desc; \"\"\"\n c.execute(query_popular_authors)\n authors = from_db_cursor(c)\n db.close()\n return authors", "def popular_authors():\n query = \"\"\"select authors.name, sum(views)\n from authors, articles, article_path_views\n where authors.id = articles.author\n and '/article/' || articles.slug = article_path_views.path\n group by authors.name\n order by sum desc\"\"\"\n result_table = execute_query(query)\n\n # generate a report from table_to_report() function\n report = table_to_report(result_table, ' views')\n return \"Authors and Their Articles' Total Views:\\n\" + report", "def author_rank():\n db, c = connect(DBNAME)\n c.execute(\"select name, count(name) as views from \\\"authorpath\\\" \"\n \"group by name order by views desc\")\n author_table = c.fetchall()\n db.close()\n print \"\\nMost Popular Article Authors of All Time:\"\n for author in author_table:\n print str(author[0]) + \" - \" + str(author[1]) + \" views\"", "def most_viewed_authors():\n query = \"\"\"\n SELECT authors.name, COUNT(*) AS views\n FROM articles\n JOIN log\n ON log.path = '/article/' || articles.slug\n JOIN authors\n ON authors.id = articles.author\n WHERE log.status ='200 OK'\n GROUP BY authors.name ORDER BY views DESC LIMIT 3;\n \"\"\"\n results = psql_connection(query)\n\n print(\"Most viewed authors:\")\n for result in results:\n print '{author} - {count} views'.format(\n author=result[0], count=result[1])", "def get_popular_authors():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # counting views that had status 200\n c.execute(\"select authors.name, count(*) as num \"\n \"from articles, authors, log \"\n \"where articles.author = authors.id \"\n \"and log.path like concat('%', articles.slug, '%') \"\n \"and log.status = '200 OK' \"\n \"group by authors.name order by num desc\")\n results = c.fetchall()\n text_file = open(\"text.txt\", \"a+\") # append to text file\n text_file.write(\"The most popular authors of all time are:\\n\\n\")\n # for loop to print each author\n for name, num in results:\n text_file.write(\"\\\"\" + name + \"\\\"\" + \" - \" + str(num) + \" views\\n\")\n text_file.write(\"\\n\")\n text_file.close()\n db.close", "def print_top_authors():\n\n output = get_query_results(\n '''SELECT authors.name, COUNT(*) AS views\n FROM authors\n JOIN(SELECT articles.title, articles.author FROM articles\n JOIN log ON log.path=CONCAT('/article/', articles.slug))\n AS popular\n ON authors.id=popular.author\n GROUP BY name\n ORDER BY views DESC;'''\n )\n print(\"\\nPopularity of Authors: \\n\")\n for author, views in output:\n print(\"\\\"{}\\\" -- {} views\".format(author, views))", "def get_top_authors():\n query2 = \"\"\"select name, count(*) as views\n from authors, articles, log\n where authors.id = articles.author\n and log.path like '%' || articles.slug\n group by name\n order by views desc;\"\"\"\n results = execute_query(query2)\n for result in results:\n print(\"- %s — %s views\" % (result[0], result[1]))", "def print_popular_authors(authors):\n for (author, views) in authors:\n print \"%s - %d views\" % (author, views)", "def printTopAuthors():\n query = \"\"\"\n SELECT author_article_popularity_view.author,\n SUM(author_article_popularity_view.views) AS total_views\n FROM author_article_popularity_view\n GROUP BY author_article_popularity_view.author\n ORDER BY total_views DESC;\n \"\"\"\n cursor = connection.cursor()\n cursor.execute(query)\n results = cursor.fetchall()\n print(\"\\nTop authors of all time: \")\n for i, result in enumerate(results):\n print(\"{}. {} - {:,} views\".format(i + 1, result[0], result[1]))", "def topAuthors():\n c = db.cursor()\n c.execute(\"select name, sum(hits) as hits\\\n from authorhits group by name\\\n order by hits desc;\")\n results = c.fetchall()\n c.close()\n return results", "def most_popular_articles():\n print '1. The most popular articles are...'\n return (\"\"\"SELECT articles.title, COUNT(*) as num FROM articles, log\"\"\"\n \"\"\" WHERE SUBSTRING (log.path FROM 10) = articles.slug and\"\"\"\n \"\"\" log.path != '/' Group By articles.title ORDER By num\"\"\"\n \"\"\" DESC LIMIT 3;\"\"\")", "def most_popular_articles():\n\n results = query_database(QUERIES[0])\n print('\\nWhat are the most popular three articles of all time?\\n')\n for title, views in results:\n print(' * \"{}\" -- {} views'.format(title, views))", "def print_popular_authors():\n print(\"\\nAuthors listed by article views:\\n\")\n views_data = get_query_results(AUTHORS_VIEWS_QUERY)\n author_row_format = '{} - {} views'\n for author, views in views_data:\n print(author_row_format.format(author, views))", "def top_authors(cursor):\n top_auth = 'No authors found.'\n try:\n cursor.execute(\"\"\"\n select name, hits\n from authors, views_by_id as views_by_id\n where id = author\n group by name, hits\n order by hits desc\"\"\")\n\n authors = cursor.fetchall()\n # If no authors were found, return\n if len(authors) <= 0:\n return top_auth\n\n except psycopg2.Error as e:\n print('Fetching authors by popularity: \\r\\n{}'.format(e.pgerror))\n\n # If the query returns any authors, return the results.\n else:\n top_auth = 'Top authors by article views: \\r\\n'\n for auth in authors:\n top_auth += ' {} - {} views\\r\\n'.format(auth[0], auth[1])\n return top_auth", "def get_mostViewedArticles():\n\n query = \"\"\"\n SELECT articles.title,COUNT(*) as views\n FROM articles JOIN log\n ON log.path LIKE ('/article/' || articles.slug)\n GROUP BY articles.title\n ORDER BY views DESC\n LIMIT 3\n \"\"\"\n\n posts = execute_query(query)\n print('\\nWhat are the most popular three articles of all time?')\n for title, views in posts:\n print(title + \" - \" + str(views) + \" views\")", "def print_top_articles():\n\n output = get_query_results(\n '''SELECT articles.title, COUNT(path) AS views\n FROM articles\n JOIN log\n ON log.path=CONCAT('/article/', articles.slug)\n GROUP BY title\n ORDER BY views DESC\n LIMIT 3;'''\n )\n print(\"\\nMost Popular Articles: \\n\")\n for title, views in output:\n print(\"\\\"{}\\\" -- {} views\".format(title, views))", "def get_popular_authors():\n query_command = \"SELECT * from popular authors LIMIT 3\"\n query_data = run_query(query_command)\n return query_data", "def popular_articles():\n query = \"\"\"SELECT articles.title,count(*) AS total_views FROM articles,log WHERE log.path like concat('/article/',articles.slug)\n group by articles.title order by total_views desc limit 3\"\"\"\n result = get_data(query)\n print(\" 1. The most popular three articles of all time:\")\n print(\"\")\n for record in result :\n print(' ' + '\\\"' + str(record[0]) + '\\\"' + '-' + ' ' + str(record[1]) + ' '+ 'views')\n print(\" \")", "def get_most_popular_articles():\n\tdb = psycopg2.connect(database=DBNAME)\n\tc = db.cursor()\n\tc.execute(\"select t2.title, count(*) as total from log as t1,articles as t2 where t1.path=concat('/article/',t2.slug) group by t2.title order by total desc limit 3 ;\")\n\tdata = c.fetchall()\n\tdb.close()\n\treturn data", "def most_viewed_articles():\n query = \"\"\"\n SELECT articles.title, COUNT(*) AS views\n FROM articles\n JOIN log\n ON log.path = '/article/' || articles.slug\n WHERE log.status ='200 OK'\n GROUP BY articles.title ORDER BY views DESC LIMIT 3;\n \"\"\"\n results = psql_connection(query)\n\n print(\"Most viewed articles:\")\n for result in results:\n print '{article} - {count} views'.format(\n article=result[0], count=result[1])", "def count_by_author(all_articles):\n author_count = {}\n\n for title in all_articles:\n author = \", \".join(all_articles[title]['authors'])\n if author not in author_count:\n author_count[author] = 1\n else:\n author_count[author] = author_count[author] + 1\n \n print_all_items_in_dict(author_count)", "def getPopualrAuthors():\n db = psycopg2.connect(\"dbname=news\")\n c = db.cursor()\n c.execute(\" select count(*) as views , authors.name from articles \"\n + \" inner join \"\n + \"log on concat('/article/', articles.slug) = log.path \"\n + \" inner join authors on articles.author = authors.id \"\n + \"group by name order by views desc; \")\n authors = c.fetchall()\n db.close()\n return authors", "def article_rank():\n db, c = connect(DBNAME)\n c.execute(\"select title, count(title) as views from \\\"pathslug\\\" \"\n \"group by title order by views desc limit 3\")\n article_table = c.fetchall()\n db.close()\n print \"\\nThree Most Popular Articles All Time:\"\n for article in article_table:\n print str(article[0]) + \" - \" + str(article[1]) + \" views\"", "def print_popular_articles(articles):\n for (title, views) in articles:\n print \"\\\"%s\\\" - %d views\" % (title, views)", "def print_authors(popular_authors):\n\n print('\\nThe list of authors being listed as per their popularity:\\n')\n for author in popular_authors:\n print(author[0] + '\\t-\\t' + str(author[1]) + ' views \\n')\n print('-------------------------------------------------------\\n')", "def popular_articles():\n db = psycopg2.connect(\"dbname=news\")\n \"\"\"Connect to news database.\"\"\"\n c = db.cursor()\n \"\"\"Open a cursor to perform database operation.\"\"\"\n query = \"\"\"select title, count(path) as view from articles, log \n where '/article/' || articles.slug = log.path group by title, path \n order by view desc limit 3;\"\"\"\n \"\"\"The cursor runs query and fetches result.\"\"\"\n c.execute(query)\n \"\"\"Execute query using cursor.\"\"\"\n rows = c.fetchall()\n print \"Most popular three articles of all time: \"\n print \"---------------------------------------- \"\n for row in rows:\n print row[0], \"--\", row[1], \" views\"\n db.close()", "def three_most_popular_articles():\n\n # To print information\n information_string = '1. The 3 most popular articles of all time are:\\n'\n\n # Query string\n query = \"\"\"select title,count(*) as num from\n articles,log where\n log.path=CONCAT('/article/',articles.slug)\n group by articles.title\n order by num DESC limit 3;\"\"\"\n\n print(information_string)\n for result in query_db(query):\n print('\\t\"' + str(result[0]) + '\" - ' + str(result[1]) + ' views')\n\n print(\"\\n\")", "def query_article():\r\n conn, cur = connect()\r\n query1 = (\"select * from article limit 3\")\r\n cur.execute(query1)\r\n res = cur.fetchall()\r\n conn.close()\r\n print (\"\\nThe most popular three articles of all time:\\n\")\r\n for i in range(0, len(res), 1):\r\n print (res[i][0] + \" --> \" + str(res[i][1]) + \" views\")", "def recommend_for_new_user(titles=False, n_max=10):\n return reader.UserList().get_most_popular_articles(titles=titles)[: n_max]", "def get_popular_articles():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_popular_articles = \"\"\"\n SELECT art.title, COUNT(lg.id) as views\n FROM articles as art\n JOIN log as lg\n ON art.slug = substring(lg.path,10)\n AND lg.status = '200 OK'\n GROUP BY art.title\n ORDER BY views desc\n LIMIT 3; \"\"\"\n c.execute(query_popular_articles)\n articles = from_db_cursor(c)\n db.close()\n return articles", "def print_popular_articles():\n print(\"3 most popular articles\\n\")\n popularity_data = get_query_results(POPULARITY_QUERY)\n article_row_format = '\"{}\" — {} views'\n for title, views in popularity_data:\n print(article_row_format.format(title, views))", "def top3_articles():\n\n cur.execute(\"\"\"\n SELECT title, COUNT(*) AS article_title\n FROM article_summary\n GROUP BY title\n ORDER BY article_title DESC\n LIMIT 3;\n \"\"\")\n result = cur.fetchall()\n return result", "def print_top_articles(popular_articles):\n\n print('\\nThe three top most articles viewed are:\\n')\n for article in popular_articles:\n print(article[0] + '\\t-\\t' + str(article[1]) + ' views \\n')\n print('-------------------------------------------------------\\n')", "def getPopularArticles():\n db = psycopg2.connect(\"dbname=news\")\n c = db.cursor()\n c.execute(\" select count (*) as views, title from articles \"\n + \"left join \"\n + \"log on concat('/article/', articles.slug) = log.path \"\n + \"group by title order by views desc limit 3\")\n views = c.fetchall()\n db.close()\n return views", "def most_similar(self, article: str, topn: int = 5):\n return [article[0] for article in self._model.similar_by_word(article, topn)]", "def how_popular_most_popular(data):\r\n #list of artists\r\n artists = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n for x in data[key]:\r\n artists += [x]\r\n sorted_artists = selection_sort(artists)\r\n count = 1\r\n max_count = 1\r\n max_artists = []\r\n for i in range(len(sorted_artists)-1):\r\n #ends at second to last index because I use i and i + 1\r\n if sorted_artists[i] == sorted_artists[i+1]:\r\n count += 1\r\n else:\r\n if count == max_count:\r\n max_artists += [sorted_artists[i]]\r\n count = 1\r\n elif count > max_count:\r\n max_artists = []\r\n max_artists += [sorted_artists[i]]\r\n max_count = count\r\n count = 1\r\n return max_count", "def printTopThreeArticles():\n query = \"\"\"\n SELECT author_article_popularity_view.article,\n author_article_popularity_view.views\n FROM author_article_popularity_view\n LIMIT 3;\n \"\"\"\n cursor = connection.cursor()\n cursor.execute(query)\n results = cursor.fetchall()\n print(\"\\nTop 3 articles of all time: \")\n for i, result in enumerate(results):\n print(\"{}. \\\"{}\\\" - {:,} views\".format(i + 1, result[0], result[1]))", "def get_popular_titles():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # counting views that had status 200\n c.execute(\"select articles.title, count(*) as views \"\n \"from articles, log \"\n \"where log.path like concat('%', articles.slug, '%') \"\n \"and log.status = '200 OK' \"\n \"group by articles.title \"\n \"order by views desc limit 3\")\n results = c.fetchall()\n text_file = open(\"text.txt\", \"a+\") # append to text file\n text_file.write(\"The three most popular articles of all time are:\\n\\n\")\n # for loop to print each article\n for title, views in results:\n text_file.write(\"\\\"\" + title + \"\\\"\" + \" - \" + str(views) + \" views\\n\")\n text_file.write(\"\\n\")\n text_file.close()\n db.close", "def topArticles():\n c = db.cursor()\n c.execute(\"select titles.title, tophits.hits\\\n from tophits, titles\\\n where tophits.path = titles.slug\\\n order by hits desc limit 3;\")\n results = c.fetchall()\n c.close()\n return results", "def more_popular(twitter_data, a, b):\r\n \r\n a_popularity = len(all_followers(twitter_data, a)) \r\n b_popularity = len(all_followers(twitter_data, b))\r\n if a_popularity > b_popularity:\r\n return -1\r\n if a_popularity < b_popularity:\r\n return 1\r\n return username_first(twitter_data, a, b)", "def _get_sorted_trend_setters(authors: list) -> dict:\n\n trend_setters = {author: authors.count(author) for author in authors}\n top_trend_setters = dict(sorted(\n trend_setters.items(), key=lambda item: item[1], reverse=True\n ))\n return top_trend_setters", "def get_n_authors(soup):\n n_authors = len(soup.find_all(attrs={\"name\":\"Author\"}))\n return(n_authors)", "def more_popular(twitter_data, a, b):\n\n a_popularity = len(all_followers(twitter_data, a))\n b_popularity = len(all_followers(twitter_data, b))\n if a_popularity > b_popularity:\n return -1\n if a_popularity < b_popularity:\n return 1\n return username_first(twitter_data, a, b)", "def author_entity_frequency_and_popularity(self, author_id):\n return self.db.execute(u'''\n SELECT e.entity, author_freq, SUM(e.frequency) AS entity_popularity, years, max_rho\n FROM entities AS e,\n (\n SELECT entity, COUNT(DISTINCT(document_id)) as author_freq, GROUP_CONCAT(year) as years, MAX(rho) AS max_rho\n FROM entity_occurrences\n WHERE author_id == ? AND rho > ?\n GROUP BY entity\n ) as d_e\n WHERE d_e.entity == e.entity GROUP BY e.entity\n ''', (author_id, DEFAULT_MIN_SCORE)).fetchall()", "def query2():\n\n print(\"2. Who are the most popular article authors of all time?\\n\")\n\n query = \"\"\"\n SELECT authors.name, subq_author.hits FROM authors\n LEFT JOIN\n (SELECT articles.author, CAST(SUM(subq_article.hits) AS INTEGER)\n AS hits FROM articles\n LEFT JOIN\n (SELECT COUNT(log.path) AS hits, log.path FROM log\n WHERE log.path LIKE '/article/%'\n AND log.status = '200 OK' AND log.method = 'GET'\n GROUP BY log.path) AS subq_article\n ON subq_article.path LIKE '/article/'||articles.slug\n GROUP BY articles.author) AS subq_author\n ON authors.id = subq_author.author\n ORDER BY subq_author.hits DESC;\n \"\"\"\n\n response = db_query(query)\n\n for i, j in enumerate(response):\n # Convert tuple to list to allow writing. Format \"hits\" with comma\n # seperator. Print output.\n j = list(j)\n j[1] = str(format(j[1], ',d'))\n print(\" Author: '{}' - {} views\".format(*j))", "def author_articles(self):\n return ArticlePage.objects.live().filter(author=self).order_by('-date')", "def most_popular_artist(our_data):\n counter_dict = {}\n for artist in all_artists(our_data):\n if artist in counter_dict:\n counter_dict[artist] += 1\n else:\n counter_dict[artist] = 1\n maximum_albums = max(counter_dict.values())\n artist_lists = []\n for keys, values in counter_dict.items():\n if values == maximum_albums:\n artist_lists.append(keys) \n return artist_lists", "def nauthors(self):\n return self._nauthors", "def get_top_3_articles():\n query1 = \"\"\"select title, count(*) as views\n from articles, log\n where log.path like '%' || articles.slug\n group by title\n order by views desc\n limit 3;\"\"\"\n results = execute_query(query1)\n for result in results:\n print(\"- \\\"%s\\\" — %s views\" % (result[0], result[1]))", "def get_most_viewed_hashtag():\n tags = HashTags.objects.order_by('-no_of_times_viewed').distinct()[:10]\n return tags", "def get_authors_query():\n\n query = '''select authors.name, count(*) as views\n from authors, articles, log\n where authors.id = articles.author\n and substr(log.path,10)=articles.slug\n group by authors.name order by views desc;'''\n\n return query", "def question_2():\n cursor.execute(mostPopAuthors)\n output = cursor.fetchall()\n return output", "def get_most_popular_artists(data):\r\n #list of artists\r\n artists = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n for x in data[key]:\r\n artists += [x]\r\n sorted_artists = selection_sort(artists)\r\n count = 1\r\n max_count = 1\r\n max_artists = []\r\n for i in range(len(sorted_artists)-1):\r\n #ends at second to last index because I use i and i + 1\r\n if sorted_artists[i] == sorted_artists[i+1]:\r\n count += 1\r\n else:\r\n if count == max_count:\r\n max_artists += [sorted_artists[i]]\r\n count = 1\r\n elif count > max_count:\r\n max_artists = []\r\n max_artists += [sorted_artists[i]]\r\n max_count = count\r\n count = 1\r\n return max_artists", "def ef_iaf_author(self, author_id):\n total_papers = self.total_papers()\n author_entity_frequency = self.author_entity_frequency_and_popularity(author_id)\n author_papers = self.author_papers_count(author_id)\n return sorted(((\n entity,\n entity_author_freq / float(author_papers),\n log(total_papers/float(entity_popularity)),\n entity_author_freq / float(author_papers) * log(total_papers/float(entity_popularity)),\n max_rho,\n [int(y) for y in years.split(\",\")],\n ) for entity, entity_author_freq, entity_popularity, years, max_rho in author_entity_frequency), key=lambda t: t[3], reverse=True)", "def top_three_articles():\n query = \"\"\"select articles.title,\n article_path_views.views\n from articles, article_path_views\n where '/article/' || articles.slug = article_path_views.path\n order by views desc\n limit 3\"\"\"\n result_table = execute_query(query)\n\n # generate a report from table_to_report() function\n report = table_to_report(result_table, ' views')\n return \"The Top Three of Most Viewed Articles:\\n\" + report", "def top_5_news():\n top_five = []\n\n news = (db.news.find({}).sort([\n ('shares_count', pymongo.DESCENDING),\n ('comments_count', pymongo.DESCENDING),\n ('title', pymongo.ASCENDING)\n ]).limit(5))\n\n for new in news:\n top_five.append((new['title'], new['url']))\n\n return top_five", "def get_popular_article():\n query_command = \"SELECT * from popular_posts LIMIT 3\"\n query_data = run_query(query_command)\n return query_data", "def scrape_author(self, author_name, min_len=0, max_len=9999):\n search = sc.search_author(author_name)\n author = next(search)\n sc.fill(author, sections=['publications'])\n print(author.keys())\n with open(\n 'loadings\\\\authors_papers\\\\{}.txt'.format(author_name),\n 'w',\n encoding='utf-8'\n ) as file:\n for counter, pubblication in enumerate(author['publications']):\n\n if len(pubblication['bib']['title']) < min_len \\\n or len(pubblication['bib']['title']) > max_len:\n continue\n file.write(pubblication['bib']['title'])\n file.write('\\n')\n counter += 1\n if counter > self.hard_limit:\n break", "def top_three_articles(cursor):\n top_articles = 'No articles found'\n try:\n cursor.execute(\"\"\"select title, count(*) as hits\n from articles, log\n where path = ('/article/' || slug)\n group by title\n order by hits desc\n limit 3\n \"\"\")\n article_views = cursor.fetchall()\n # If no articles were found, return\n if len(article_views) <= 0:\n return article_views\n\n except psycopg2.Error as e:\n print('Fetching top articles by views: \\r\\n{}'.format(e.pgerror))\n\n # If the query returns any articles, return the results.\n else:\n top_articles = 'Top articles by views: \\r\\n'\n for result in article_views:\n top_articles += ' \"{0} - {1} views\"\\r\\n'.format(result[0],\n result[1])\n return top_articles", "def get_movie_most_nominations(movies: list) -> str:\n pass", "def pubs_by_articles_published( data ) :\n # let's be Pythonic and use counter\n result = [ (k,v) for k,v in Counter([x['SO'] for x in data]).iteritems() ]\n # now sort\n result.sort( lambda a,b : cmp(b[1],a[1]) )\n return result", "def createAuthorArticlePopularityView():\n query = \"\"\"\n CREATE TEMPORARY VIEW author_article_popularity_view AS\n SELECT COUNT(log.path) AS views,\n author_article_view.title AS article,\n author_article_view.author AS author\n FROM author_article_view LEFT JOIN log\n ON log.path LIKE '%' || author_article_view.slug || '%'\n GROUP BY article, author\n ORDER BY views DESC;\n \"\"\"\n connection.cursor().execute(query)", "def get_article_author(self, article_webpage):\n pass", "def citing_authors(self, entities):\n result = self.db.execute(u'''SELECT DISTINCT(author_id)\n FROM \"entity_occurrences\"\n WHERE entity IN ({}) AND rho > ?'''.format(join_entities_sql(entities)), (DEFAULT_MIN_SCORE,)).fetchall()\n return [t[0] for t in result]", "def get_top_articles(\n limit: int = 5,\n date: int = int(datetime.now().strftime(\"%Y%m%d\"))\n):\n\n res = articles_db.get_top_articles_mongo(\n articles,\n limit,\n date\n )\n\n return res", "def directorsOfMostMovies (movies, count):\n directorCounts = {}\n for movieInfo in movies.values():\n key = movieInfo[1]\n if key not in directorCounts:\n directorCounts[key] = 0\n directorCounts[key] += 1\n return sorted([ (v, k) for (k,v) in directorCounts.items() ], reverse=True)[:count]\n # OR:\n # directors = [ x[1] for x in movies.values() ]\n # directorSet = set(directors)\n # return sorted([ (directors.count(d), d) for d in directorSet ], reverse=True)[:count]", "def keywords(articles, top_n=25):\n\n # compute term idfs\n token_docs = [lemma_tokenize(clean(a.text)) for a in articles]\n local_term_idf = IDF(token_docs)\n\n token_docs, phrases = extract_phrases(token_docs, [a.text for a in articles], global_term_idf)\n\n titles = [a.title for a in articles]\n title_tokens = [lemma_tokenize(clean(t)) for t in titles]\n term_counts = defaultdict(int)\n for doc in token_docs:\n for t in set(doc):\n if t:\n term_counts[t] += 1\n\n title_terms = set()\n for title_tks in title_tokens:\n title_terms = title_terms | set(title_tks)\n for ph in phrases:\n if any(ph in title.lower() for title in titles):\n title_terms.add(ph)\n\n # Score terms\n term_scores = []\n for t, count in term_counts.items():\n # Ignore numbers, they are very specific to a particular event and\n # introduce noise\n try:\n float(t)\n continue\n except ValueError:\n # TODO This is a troublesome token, not sure why it's not filtered out by\n # IDF. needs more investigation\n if t == 'n\\'t':\n continue\n score = count * (global_term_idf[t] - local_term_idf[t])\n if t in title_terms:\n score *= 1.5\n term_scores.append((t, score))\n\n return sorted(term_scores, key=lambda t: t[1], reverse=True)[:top_n]", "def author_entity_frequency(self, author_id):\n return self.db.execute(u'''\n SELECT entity, COUNT(DISTINCT(document_id)) as author_freq, GROUP_CONCAT(year) as years, MAX(rho) AS max_rho\n FROM entity_occurrences\n WHERE author_id == ? AND rho > ?\n GROUP BY entity\n ''', (author_id, DEFAULT_MIN_SCORE)).fetchall()", "def AuthorsCount(self, default=None):\n return self.data.get('metadata', {}).get('author_count', default)", "def get_most_popular_posts():\n popular_posts_ids = [post.id for post in Post.objects.popular()]\n return Post.objects.filter(id__in=popular_posts_ids). \\\n add_comments_count(). \\\n prefetch_related('author'). \\\n prefetch_with_tags_and_likes(). \\\n order_by('likes_count')", "def calculate_most_popular(text, n_populars, steam=False):\n fdist = calculate_fdist(text, steam)\n term = []\n for key, value in fdist.items():\n term.append((key, value))\n term.sort(key=lambda x: int(x[1]), reverse=True)\n return term[:n_populars]", "def top_controversial(self, n):\n return top_movies", "def count_articles(all_articles):\n print(f\"There are {len(all_articles)} articles.\")", "def test_author_many_lastnames(self):\n inv_search = 'author:\"alvarez gaume, j* r* r*\"'\n spi_search = 'find a alvarez gaume, j r r'\n self._compare_searches(inv_search, spi_search)", "def test_discussion_filter_author(self):\n author_vals = (\n ('DoesNotExist', 0),\n ('admin', 1),\n ('jsocol', 4),\n )\n\n for name, number in author_vals:\n u = UserFactory(username=name)\n for i in range(number):\n thread1 = ThreadFactory(title=u'audio')\n PostFactory(thread=thread1, author=u)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json'}\n\n for author, total in author_vals:\n qs.update({'author': author})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])", "def most_popular(self, n):\n return popular_tags", "def top_articles_by_views(articles, top_x):\n p = PageviewsClient()\n\n # create date string based on previous month\n now = datetime.datetime.now()\n previous_month = str(now.month - 1).zfill(2)\n if previous_month == \"00\": previous_month = \"12\"\n start_date = str(now.year) + previous_month + \"0100\"\n end_date = str(now.year) + previous_month + \"2800\"\n\n # get views\n result = p.article_views('en.wikipedia', articles, \n granularity='monthly', start=start_date, end=end_date)\n # clean results (six is used for backwards compatibility with python 2\n result = six.next(six.itervalues(result))\n sorted_articles = sorted(result.items(), \n key=operator.itemgetter(1), reverse=True)\n return sorted_articles[:top_x]", "def getAuthor(self):\n\t\tself.authorList = [submission.author for submission in self.subreddit.top(time_filter = 'day', limit = self.limits)]\n\t\treturn self.authorList", "def latest_content(request):\n latest_articles = Article.published_articles()[:5]\n latest_comments = Comment.objects.all().order_by('-pub_date')[:5]\n tags = Tag.objects.annotate(num_articles=Count('article')).order_by(\n '-num_articles')\n contributors = Contributor.objects.annotate(\n num_articles=Count('article')).order_by('-num_articles')\n return {'latest_articles': latest_articles,\n 'latest_comments': latest_comments,\n 'tags': tags,\n 'contributors': contributors,\n }", "def get_most_popular_annotations(ambiguous_entity, k=2):\n freq = [(key, len(value)) for key, value in ambiguous_entity.annotated_corpus.items()]\n freq = sorted(freq, key=lambda x: x[1], reverse=True)\n return [x[0] for x in freq[:k]]", "def most_popular(n=5):\n cars = Car.objects.annotate(review_number=models.Count('reviews'))\n sorted_cars = cars.order_by('review_number')\n return sorted_cars[:n]", "def author_visualisation(self, spam_collection):\n\n spam_author_collection = dict.fromkeys(spam_collection)\n for data, author_set in spam_collection.iteritems():\n for author in author_set:\n spam_author_collection[author] = 1\n\n for data, author_set in spam_collection.iteritems():\n for author in author_set:\n spam_author_collection[author] += 1\n\n spam_list = sorted(spam_author_collection.items(), key=operator.itemgetter(1))\n\n group = []\n values = []\n iterator = 5\n for spam in reversed(spam_list):\n group.append(spam[0])\n values.append(spam[1])\n if iterator == 0:\n break\n iterator -= 1\n\n y_pos = np.arange(len(group))\n\n plt.barh(y_pos, values, align='center', alpha=0.5)\n plt.yticks(y_pos, group)\n plt.xlabel('Number of Spam Comments')\n plt.ylabel('YouTube Author')\n plt.title('Top 5 Spamming Authors \\nin YouTube Comment Corpus')\n\n plt.show()", "def get_word_counts(messages_by_author):\n counters_by_author = {}\n for author in messages_by_author.keys():\n author_counter = Counter()\n for message in messages_by_author[author]:\n author_counter += Counter(get_words(string_to_onlyalpha(message.content)))\n counters_by_author[author] = author_counter\n return counters_by_author", "def get_most_popular_merchants(self):\n if self.model:\n return self.model.wv.index_to_key[: self.num_rec]\n else:\n print(\"train the model before performing this step\")\n return None", "def popularity(self, user_list):\n item_popular = Counter(self.train['movieId'].values)\n ret = 0\n n = 0\n print('\\nCalculate popularity: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n for rec in set([data[0] for data in recom_data]):\n ret += math.log(1 + item_popular.get(rec))\n n += 1\n ret /= n * 1.0\n print('\\npopularity: ', ret)\n return ret", "def print_top_articles():\n\n create_view_top_articles = (\n \"CREATE VIEW top_articles AS \" +\n \"SELECT COUNT(path) AS num, path \" +\n \"FROM log GROUP BY path ORDER BY num DESC;\")\n get_popular_articles_names = (\n \"SELECT title, num \" +\n \"FROM top_articles, articles \" +\n \"WHERE top_articles.path = '/article/' || articles.slug limit 3;\")\n\n print(\"\\nRunning Task: \" + print_top_articles.__doc__ + \"\\n\")\n\n conn, cur = connect()\n cur.execute(create_view_top_articles)\n cur.execute(get_popular_articles_names)\n results = cur.fetchall()\n\n for title, views in results:\n print('\\t{} - {} views'.format(title, views))\n\n disconnect(conn, cur)", "def get_artists_most_played(session_):\n # artists = session_.query(Artist).order_by(Artist.name.asc()).paginate()\n artists = session_.query(Artist).order_by(Artist.plays.desc()).all()\n return artists", "def get_top_articles(update=False):\n # use caching to avoid running unnecessary DB queries at each page load\n key = 'top_ten'\n articles = memcache.get(key)\n\n logging.warn('MEMCACHE | Wiki articles %s' % str(articles))\n\n if (articles is None) or (len(articles) == 0) or update:\n # necessary artificial delay when a new article has just been persisted to the datastore\n if update:\n time.sleep(2)\n\n articles = db.GqlQuery('SELECT * FROM Article ORDER BY updated DESC LIMIT 10')\n articles = list(articles)\n memcache.set(key, articles)\n\n logging.warn('DATASTORE | Wiki articles count %s' % str(len(articles)))\n return articles", "def most_missed_creators(self, cache_max_age=0):\n expected_items = []\n query = u'CLAIM[195:%s] AND NOCLAIM[170]' % \\\n ',195:'.join(self.collections) # collection\n wd_queryset = wdquery.QuerySet(query)\n\n wd_query = wdquery.WikidataQuery(cacheMaxAge=cache_max_age)\n data = wd_query.query(wd_queryset)\n\n if data.get('status').get('error') == 'OK':\n expected_items = data.get('items')\n\n creator_dict = {}\n counter = 0\n for q_val in expected_items:\n q_item = self.wd.QtoItemPage(q_val)\n data = q_item.get()\n claims = data.get('claims')\n if u'P170' in claims:\n continue\n descr = data.get('descriptions').get('en')\n if descr and descr.startswith(u'painting by '):\n creator = descr[len(u'painting by '):]\n if '(' in creator: # to get rid of disambiguation addition\n creator = creator[:creator.find('(')].strip()\n if creator in creator_dict.keys():\n creator_dict[creator] += 1\n else:\n creator_dict[creator] = 1\n counter += 1\n pywikibot.output(u'Found %d mentions of %d creators' %\n (counter, len(creator_dict)))\n # output\n f = codecs.open(u'creatorHitlist.csv', 'w', 'utf-8')\n for k, v in creator_dict.iteritems():\n f.write(u'%d|%s\\n' % (v, k))\n f.close()", "def get_authors_count(self, institution):\n return self.db.execute(u'''SELECT COUNT(*) FROM authors WHERE institution==?''', (institution,)).fetchall()[0][0]", "def get_most_popular(self):\n\t\tpopular_rated = self.data_final[self.data_final['Rating'] == 10]\n\t\tpopular_jokes = popular_rated.groupby('JokeID').count().reset_index()\n\t\tpopular_jokes = popular_jokes[['JokeID','Rating']]\n\t\tpopular_jokes.columns = ['JokeID','Number_rated10']\n\t\ttop_joke = popular_jokes.sort_values(by=['Number_rated10'], ascending=False).head(1)\n\t\ttop_joke_val = top_joke['JokeID'].values[0]\n\t\tjokes_list = sorted(set(self.data_final['JokeID']))\n\t\tjoke_num = jokes_list.index(top_joke_val)\n\t\ttop_joke_desc = self.data_jokes[self.data_jokes['JokeID'] == top_joke_val].values[0][1]\n\n\t\treturn top_joke_desc, joke_num", "def count_word_usage(counters_by_author, word_list):\n specific_word_counter = {}\n for author in counters_by_author.keys():\n word_counter = Counter()\n for item in counters_by_author[author]:\n for word in word_list:\n if word in item:\n print(item)\n word_counter[word] += counters_by_author[author][item]\n specific_word_counter[author] = word_counter\n return specific_word_counter", "def get_ars(self, author):\n return self.divided[author][:-1]" ]
[ "0.84637415", "0.8395699", "0.8325273", "0.8321845", "0.8157658", "0.80273575", "0.7990549", "0.7792252", "0.77818847", "0.7686615", "0.7682264", "0.76261973", "0.75585544", "0.7522089", "0.7496455", "0.73912114", "0.7381132", "0.735024", "0.73232555", "0.7270634", "0.7169818", "0.7095853", "0.7026694", "0.70194745", "0.69743323", "0.6874296", "0.6849663", "0.6833659", "0.67938805", "0.6748801", "0.6726414", "0.67242295", "0.6662603", "0.6633716", "0.6553412", "0.6537264", "0.6521089", "0.6444699", "0.6391774", "0.63315666", "0.63079435", "0.62826186", "0.62631345", "0.61919844", "0.6156478", "0.61327577", "0.6074733", "0.6030435", "0.6030301", "0.6008949", "0.6004238", "0.5970723", "0.5959741", "0.59556615", "0.59487104", "0.5947171", "0.5936569", "0.5932458", "0.5917469", "0.59126264", "0.5858251", "0.58462816", "0.5837916", "0.580749", "0.57878405", "0.5779664", "0.57781", "0.5770056", "0.5768499", "0.57487106", "0.57287484", "0.57282513", "0.5705889", "0.5703658", "0.56998706", "0.5691021", "0.56820655", "0.5673636", "0.5672209", "0.56129634", "0.56022006", "0.55984503", "0.5589036", "0.5577613", "0.557558", "0.55661345", "0.5565078", "0.55430865", "0.5499661", "0.5481679", "0.5472018", "0.5471405", "0.54623276", "0.54582757", "0.5458091", "0.54483646", "0.54405403", "0.54369015", "0.5436589", "0.5434653" ]
0.61229473
46
On which days did more than 1% of requests lead to errors?
def print_errors(): create_view_total_requests = ( "CREATE VIEW total_requests AS " + "SELECT date(time), count(status) as count " + "FROM log GROUP BY date;") create_view_error_requests = ( "CREATE VIEW error_requests AS " + "SELECT date(time), count(status) as count " + "FROM log WHERE status LIKE '404%' GROUP BY date;") calculate_error_percentage = ( "SELECT total_requests.date, ROUND(" + "(CAST(error_requests.count as decimal)/" + "total_requests.count*100.00),2) as percent " + "FROM total_requests, error_requests " + "WHERE total_requests.date=error_requests.date AND " + "(CAST(error_requests.count as decimal)/" + "total_requests.count*100.00)>1 ORDER BY percent DESC;") print("\nRunning Task: " + print_errors.__doc__ + "\n") conn, cur = connect() cur.execute(create_view_total_requests) cur.execute(create_view_error_requests) cur.execute(calculate_error_percentage) results = cur.fetchall() for result in results: print('\t{0:%B %d, %Y} - {1}% errors'.format(result[0], result[1])) disconnect(conn, cur)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def days_with_request():\n\n # To print information\n information_string = '3. Days with more than ' \\\n '1% of request that lead to an error:\\n'\n\n # Query string\n query = \"\"\"select * from (select date(time),\n round(100.0*sum(case log.status\n when '200 OK' then 0 else 1 end)/count(log.status),3)\n as error from log group by date(time)\n order by error desc) as subq where error > 1;\"\"\"\n\n print(information_string)\n for result in query_db(query):\n print('\\t{0:%B %d, %Y} - {1}%'.format(result[0], result[1]))\n\n print(\"\\n\")", "def error_dates():\n\n results = query_database(QUERIES[2])\n print('\\nOn which days did more than 1% of requests lead to errors?\\n')\n for date, rate in results:\n print(' * {} -- {:.2%}'.format(date, rate))", "def find_error_days():\n query = \"\"\"\n SELECT all_requests.day,\n (ROUND((error_requests.bad * 1000)/all_requests.good)/10)\n AS percent\n FROM all_requests\n JOIN error_requests\n ON all_requests.day = error_requests.day\n WHERE ROUND((error_requests.bad * 1000)/all_requests.good) > 10\n \"\"\"\n results = psql_connection(query)\n\n print(\"Days with more than 1% errors\")\n for result in results:\n print '{date} - {errors} % errors'.format(\n date=result[0].strftime('%B %d, %Y'), errors=result[1])", "def one_percent_error_loads():\n print '3. The days where there are more than 1% load error are'\n return (\"\"\"SELECT gday, perc FROM (select date(time) as gday,\"\"\"\n \"\"\" ((count(CASE WHEN status = '404 NOT FOUND' THEN 1\"\"\"\n \"\"\" END)::decimal / count(status)::decimal) * 100.0) as perc\"\"\"\n \"\"\" FROM log GROUP BY gday) as errreq where perc >=1;\"\"\")", "def get_error_days():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # dividing views of bad requests and total request to get percentage\n c.execute(\"select bad_request.time, \"\n \"(bad_request.num * 1.0 / total_request.num) as errors \"\n \"from bad_request, total_request \"\n \"where bad_request.time = total_request.time \"\n \"and (bad_request.num * 1.0 / total_request.num) > 0.01\")\n results = c.fetchall()\n text_file = open(\"text.txt\", \"a+\") # append to text file\n text_file.write(\"Day(s) where more than 1 percent of requests were errors:\"\n \"\\n\\n\")\n for time, errors in results:\n text_file.write(time.strftime('%B %d, %Y') + \" - \" +\n str(errors * 100)[:3] + \"% errors\\n\")\n text_file.write(\"\\n\")\n text_file.close()\n db.close()", "def get_errorMoreThan1Percent():\n\n query = \"\"\"SELECT * FROM error_Records WHERE percent > 1.00\"\"\"\n\n posts = execute_query(query)\n print('\\nOn which days did more than 1% of requests lead to errors?')\n for i in posts:\n print(str(i[0])+'-'+str(i[1])+'% errors')", "def get_high_error_days():\n query3 = \"\"\"select to_char(date, 'Mon dd, yyyy'),\n error_rate from\n (select a.date, errors, requests,\n round(100.0 * errors / requests, 1) as error_rate\n from\n (select date(time) as date,\n count(*) as errors\n from log\n where status not like '%200%'\n group by date) as a,\n (select date(time) as date,\n count(*) as requests\n from log\n group by date) as b\n where a.date = b.date) as error_rates\n where error_rate >= 1.00;\"\"\"\n results = execute_query(query3)\n for result in results:\n print(\"- %s — %s%% errors\" % (result[0], result[1]))", "def daily_error_gt_1pct(db):\n\n query = \"\"\"\n select day, round(error_pct,2) as error_pct\n from ( select day,\n ( ( sum(occurance) filter(where status != '200 OK')\n / sum(occurance) ) * 100 ) as error_pct\n from ( select to_char(time, 'Month DD, YYYY') as day,\n status,\n count(*) as occurance\n from log\n group by day, status\n order by day, occurance desc ) as subq1\n group by day ) as subq\n where subq.error_pct > 1;\n \"\"\"\n\n print('\\nOn which days did more than 1% of requests lead to errors?\\n')\n for row in do_query(db, query):\n print('\\t{} -- {}% errors'.format(row[0], row[1]))", "def error_report():\n db, c = connect(DBNAME)\n c.execute(\"select to_char(time,'FMMonth DD, YYYY') as date, \"\n \"round((sum(case when status = '200 OK' \"\n \"then 0 else 1 end)::decimal / count(*)) * 100,2) \"\n \"as percent_error from log group by date \"\n \"having (sum(case when status = '200 OK' \"\n \"then 0 else 1 end)::decimal / count(*)) * 100 > 1\")\n error_table = c.fetchall()\n db.close()\n print \"\\nDates on Which Over 1% of Requests Led to Errors:\"\n for error in error_table:\n if __name__ == '__main__':\n print str(error[0]) + \" - \" + str(error[1]) + \"%\"", "def query_errors():\r\n conn, cur = connect()\r\n query3 = (\"select * from errors where error >1\")\r\n cur.execute(query3)\r\n res3 = cur.fetchall()\r\n conn.close()\r\n print(\"\\nDays with more than 1% of requests lead to errors:\\n\")\r\n for i in range(0, len(res3), 1):\r\n print(str(res3[i][0]) + \" --> \" + str(round(res3[i][1], 2))+\" %errors\")", "def print_top_error_days():\n\n output = get_query_results(\n '''SELECT date, ROUND(fail*100.0/total, 2) AS percentage\n FROM errors WHERE (fail*100.0/total) > 1\n ORDER BY percentage DESC;'''\n )\n print(\"\\nDays With HTTP Error Rates Over 1%: \\n\")\n for date, rate in output:\n print(\"\\\"{0:%B %d, %Y}\\\" -- {1:}%\".format(date, rate))", "def high_errors():\n\n cur.execute(\"SELECT newdate, percentage FROM stats WHERE percentage > 1;\")\n result = cur.fetchall()\n return result", "def error_days():\n db = psycopg2.connect(\"dbname=news\")\n c = db.cursor()\n query = \"\"\"select log_dates.log_date as day, \n cast(errors.error_total as float) / cast(log_dates.total as float) \n as percent from log_dates, errors where\n log_dates.log_date = errors.error_date group by day, error_total, total\n having cast(errors.error_total as float) / cast(log_dates.total\n as float) >= .01 order by day asc;\"\"\"\n c.execute(query)\n errors = c.fetchall()\n print \" \"\n print \"Dates with more than 1% error rate:\"\n print \"-----------------------------------\"\n for error in errors:\n print error[0].strftime(\"%B %d , %Y\"), \"--\",\\\n \"{: .2%}\".format(error[1]), \"errors\"\n\n db.close()", "def error_rate():\n query = \"\"\"select to_char(date, 'FMMonth DD, YYYY') as date,\n round(error_req::numeric/total_req*100, 2) as error_rate\n from daily_errorreq_totalreq\n where round(error_req::numeric/total_req*100, 2) > 1.00\"\"\"\n result_table = execute_query(query)\n\n # generate a report from table_to_report function\n report = table_to_report(result_table, '%')\n return \"Days Where Over 1% of Requests Leading to Errors:\\n\" + report", "def errDays():\n c = db.cursor()\n c.execute(\"select date, percent from avg_error\\\n where percent > 1.00;\")\n results = c.fetchall()\n c.close()\n return results", "def days_with_error() :\n query = \"\"\"SELECT errorlogs.date,round(100.0*errorcount/logcount,2) As Percent FROM logs,errorlogs\n WHERE logs.date=errorlogs.date AND errorcount>logcount/100\"\"\"\n result = get_data(query)\n print(\" 3. Days with more than 1% of error:\")\n print(\"\")\n for record in result :\n print(' ' + str(record[0]) + ' '+ '-' + \" \" + str(record[1]) + '%'+ ' '+ 'errors')\n print(\"\\t\")", "def problem_days(cursor):\n days = 'None found'\n try:\n logs = \"\"\"select daily.day,\n daily_total::integer/100,\n daily_errors::integer,\n daily_total\n from daily_logs as daily, error_logs as errors\n where (daily_total::integer/100.0) <\n daily_errors::integer\n and daily.day = errors.day\n order by daily.day\n \"\"\"\n\n cursor.execute(logs)\n report = cursor.fetchall()\n # If no days were found, return\n if len(report) <= 0:\n return days\n\n except psycopg2.Error as e:\n print('Fetching summary of days with >1% error statuses: \\r\\n{}'\n .format(e.pgerror))\n\n # If the query returns any days, return the results.\n else:\n day_str = ' {0} - {1}% of {2} were errors\\r\\n'\n days = 'Days when over 1% of requests lead to errors: \\r\\n'\n for date in report:\n percentage_error = round((date[2]/date[1]), 2)\n days += day_str.format(date[0].strftime('%d of %B %Y'),\n percentage_error,\n date[3])\n return days", "def failed_per_hour(self):\r\n return (3600.*(self.circ_failed+self.strm_failed))/self.current_uptime()", "def request_failed(self, ignored):\n self._errors += 1", "def printDaysWithErrors():\n cursor = connection.cursor()\n query = \"\"\"\n SELECT * FROM\n (SELECT daily_error_view.day,\n (daily_error_view.errors * 100.0)\n /\n (daily_traffic_view.views * 100.0)\n AS error_rate\n FROM daily_error_view JOIN daily_traffic_view\n ON daily_error_view.day = daily_traffic_view.day)\n AS daily_error_rate\n WHERE daily_error_rate.error_rate > 0.01;\n \"\"\"\n cursor.execute(query)\n results = cursor.fetchall()\n print(\"\\nDays with greater than 1 percent error rate:\")\n for result in results:\n print(\"{:%B %d, %Y} - {:.2%} errors\".format(result[0], result[1]))", "def test_too_many_requests(self):\n try:\n self._mock_time_series(error=fitbit_exceptions.HTTPTooManyRequests,\n error_attrs={'retry_after_secs': 35})\n except fitbit_exceptions.HTTPTooManyRequests:\n self.assertEqual(sys.exc_info()[1].retry_after_secs, 35)\n else:\n assert False, 'Should have thrown exception'", "def test_bad_period(self):\n self.period = 'bad'\n response = self._get(get_kwargs=self._data())\n self._check_response(response, 104)", "def tracking_error(benchmark_returns_by_date, etf_returns_by_date):\n series_ret=(etf_returns_by_date)-(benchmark_returns_by_date)\n stdev=series_ret.std()\n Tracking_error=np.sqrt(252)*stdev\n assert benchmark_returns_by_date.index.equals(etf_returns_by_date.index)\n \n #TODO: Implement function\n\n return Tracking_error", "def query3():\n\n print(\"3. On which days did more than 1% of requests lead to errors?\\n\")\n\n query = \"\"\"\n SELECT view_daily_requests.date,\n CAST(view_daily_errors.daily_errors AS REAL) /\n CAST(view_daily_requests.daily_requests AS REAL) AS pc\n FROM view_daily_requests\n JOIN view_daily_errors\n ON view_daily_requests.date = view_daily_errors.date\n WHERE CAST(view_daily_errors.daily_errors AS REAL) /\n CAST(view_daily_requests.daily_requests AS REAL) >= 0.01\n ORDER BY pc DESC;\n \"\"\"\n\n response = db_query(query)\n\n for i, j in enumerate(response):\n # Convert tuple to list to allow writing. Format \"pc\" as percentage,\n # format date '31 December 2018'. Print output.\n j = list(j)\n j[0] = j[0].strftime(\"%d %B %Y\")\n j[1] = str(format(j[1], '%'))\n print(\" Date: {} - {} errors\".format(*j))", "def Daysleftverification():\n pass", "def find_anomaly():\n query_command = \"SELECT day FROM error_log WHERE error_percent > 1.0\"\n query_data = run_query(query_command)\n return query_data", "def clientconnfailrate(self) :\n\t\ttry :\n\t\t\treturn self._clientconnfailrate\n\t\texcept Exception as e:\n\t\t\traise e", "def validate_single_request(requestgroup, max_duration_error=15,\n raise_error=True):\n\n is_modified = False\n\n response = requests.post(\n 'https://observe.lco.global/api/requestgroups/validate/',\n headers={'Authorization': 'Token {}'.format(token)},\n json=requestgroup\n )\n\n # Make sure the API call was successful\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError as exc:\n print('API call failed: {}'.format(response.content))\n raise exc\n\n requestgroup_dict = response.json()\n\n # If you get an error because your incorrectly estimated the number of\n # exposures, correct it here.\n if len(requestgroup_dict['errors']) >= 1:\n\n if 'non_field_errors' in requestgroup_dict['errors']:\n\n print(42*'-')\n print('GOT ERROR: {}'.\n format(requestgroup_dict['errors']['non_field_errors']))\n print(42*'-')\n\n return np.nan, np.nan\n\n if 'requests' in requestgroup_dict['errors']:\n\n print(42*'-')\n print('GOT ERROR: {}'.\n format(requestgroup_dict['errors']['requests']))\n print(42*'-')\n\n try:\n errmsg = (\n requestgroup_dict['errors']['requests'][0]['non_field_errors'][0]\n )\n except:\n return np.nan, np.nan\n\n\n if 'the target is visible for a maximum of' in errmsg:\n\n # get the strings of durations, and decrement the requested number\n # of exposures by the right multiple!\n sr = search(\"According{}maximum of {} hours \"\n \"within{}your request {} hours. Consider{}\",\n errmsg)\n\n max_dur = float(sr[1])\n req_dur = float(sr[3])\n\n if req_dur == max_dur:\n # {:.1f} formatted strings. genius ._.\n req_dur += 0.01\n\n if not req_dur > max_dur:\n errmsg = (\n 'ERR! max dur: {}, req dur: {}'.format(max_dur, req_dur)\n )\n raise ValueError(errmsg)\n\n diff_dur_sec = (req_dur - max_dur)*60*60\n\n # previously, guessed\n #\n # expcount = np.floor(\n # (endtime-starttime).to(u.hr)\n # /\n # (exptime*u.second + read_time_per_exposure).to(u.hr)\n # )\n #\n # that produced the difference above...\n exptime_sec = (\n requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_time']\n )\n\n expcount = (\n requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_count']\n )\n\n read_time_per_exposure = 30*u.second # from Bayliss' completed runs\n n_exposures_diff = int(\n np.ceil(diff_dur_sec/\n (exptime_sec + read_time_per_exposure.value)\n )\n )\n\n new_expcount = expcount - n_exposures_diff\n\n print(42*'-')\n print('WRN!: max durn: {} hr, req durn: {} hr. had {} exposures, decrement to {}'.\n format(max_dur, req_dur, expcount, new_expcount))\n print(42*'-')\n requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_count'] = new_expcount\n\n is_modified = True\n\n return requestgroup, is_modified\n\n else:\n if raise_error:\n raise NotImplementedError('got new API error: {}'.format(errmsg))\n else:\n print('WRN!: Got API error: {}'.format(errmsg))\n print(requestgroup)\n return np.nan, np.nan\n\n billed_durn = (\n requestgroup_dict['request_durations']['requests'][0]['duration']\n )\n\n start = Time(requestgroup['requests'][0]['windows'][0]['start'])\n end = Time(requestgroup['requests'][0]['windows'][0]['end'])\n window_durn = (end - start).value*24*60*60\n\n expcount = (\n requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_count']\n )\n\n if (window_durn - billed_durn)/60 > max_duration_error:\n\n errmsg = (\n 'ERROR! got a window of {:.2f} min; but tried to bill {:.2f} min.'.\n format(window_durn/60, billed_durn/60)\n )\n\n print(42*'-')\n print(errmsg)\n print(42*'-')\n #import IPython; IPython.embed()\n #raise AssertionError(errmsg) #FIXME\n return np.nan, np.nan\n\n else:\n\n print(42*'-')\n print('ACCEPTED! window durn: {:.2f} min, billed {:.2f} min. had {:d} exposures'.\n format(window_durn/60, billed_durn/60, expcount))\n print(42*'-')\n return requestgroup, is_modified", "def tick_bad_request_counter(self, request):\r\n self.cache_incr(self.get_cache_key(request))", "def get_errorData_query():\n\n query = '''select total_requests.days, errors*100/total_requests as percentage\n from error_requests, total_requests\n where error_requests.days = total_requests.days\n and (errors*100/total_requests > 1);'''\n\n return query", "def get_days_rate():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_days_rate = \"\"\"\n SELECT * FROM (SELECT TO_CHAR(time::date,'Mon DD, YYYY') AS date,\n ROUND((COUNT(status) FILTER (\n WHERE status='404 NOT FOUND'))*100/COUNT(status)::decimal, 2)::text\n ||'% errors' AS rate\n FROM log\n GROUP BY time::date) AS error_rate\n WHERE rate::text > 1::text;\"\"\"\n c.execute(query_days_rate)\n rates = from_db_cursor(c)\n db.close()\n return rates", "def auditnsballocfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditnsballocfailrate\n\t\texcept Exception as e:\n\t\t\traise e", "def getWorstDays():\n db = psycopg2.connect(\"dbname=news\")\n c = db.cursor()\n c.execute(\" select c.* from\"\n + \"(select a.* , b.* , \"\n + \"(cast( b.total as decimal(16,4))/a.total)*100 as percent from\"\n + \" (select count(*) total , time::timestamp::date as timea \"\n + \"from log group by timea order by timea) as a, \"\n + \"(select count(*) total , time::timestamp::date as timea \"\n + \"from log where status <> '200 OK'\"\n + \"group by timea order by timea ) as b \"\n + \"where a.timea = b.timea) as c where c.percent > 1;\")\n days = c.fetchall()\n db.close()\n return days", "def test_latest_total_response_times_pruned(self):\n s = StatsEntry(self.stats, \"/\", \"GET\", use_response_times_cache=True)\n t = int(time.time())\n for i in reversed(range(2, 30)):\n s.response_times_cache[t - i] = CachedResponseTimes(\n response_times={}, num_requests=0\n )\n self.assertEqual(29, len(s.response_times_cache))\n s.log(17, 1337)\n s.last_request_timestamp -= 1\n s.log(1, 1)\n self.assertEqual(20, len(s.response_times_cache))\n self.assertEqual(\n CachedResponseTimes(response_times={17: 1}, num_requests=1),\n s.response_times_cache.popitem(last=True)[1],\n )", "def siterequestsrate(self) :\n\t\ttry :\n\t\t\treturn self._siterequestsrate\n\t\texcept Exception as e:\n\t\t\traise e", "async def _calculate_remaining_requests(self, request_id: int) -> int:\n raise NotImplementedError()", "def default_too_many_requests_handler(response: Response) -> float:\n util = Util()\n # the Retry-After value can be a str/int/float. as a string\n # it can be a full date (e.g., \"2022-03-26T00:00:00Z\")\n retry_after = response.headers.get('Retry-After', 0)\n try:\n # always convert value to seconds if possible\n seconds = util.any_to_datetime(retry_after).timestamp() - time.time()\n except RuntimeError:\n # retry_after must be in seconds\n seconds = retry_after\n\n # handle negative values\n if isinstance(seconds, float | int) and seconds < 0:\n seconds = retry_after\n\n return float(seconds)", "def get_failure_rate(self) -> float:\n return self.failurerate", "def model_error(self):\n return self.premium() / self.data['premium'] - 1", "def test_get_daily_change_log(self):\n msg = \"Response status is not 200\"\n response = self.api.get_daily_change_log(self.year, self.month, self.day)\n self.assertEqual(response.status_code, 200, msg)", "def test_error_noted_in_response_if_meter_has_overlapping_readings(self):\n dup_import_record = ImportRecord.objects.create(owner=self.user, last_modified_by=self.user, super_organization=self.org)\n dup_filename = \"example-pm-monthly-meter-usage-1-dup.xlsx\"\n dup_filepath = os.path.dirname(os.path.abspath(__file__)) + \"/../data_importer/tests/data/\" + dup_filename\n\n dup_file = ImportFile.objects.create(\n import_record=dup_import_record,\n source_type=SEED_DATA_SOURCES[PORTFOLIO_METER_USAGE][1],\n uploaded_filename=dup_filename,\n file=SimpleUploadedFile(\n name=dup_filename,\n content=pathlib.Path(dup_filepath).read_bytes()\n ),\n cycle=self.cycle\n )\n\n url = reverse(\"api:v3:import_files-start-save-data\", args=[dup_file.id])\n url += f'?organization_id={self.org.pk}'\n post_params = {\n 'cycle_id': self.cycle.pk,\n }\n response = self.client.post(url, post_params)\n\n total_meters_count = Meter.objects.count()\n\n result_summary = json.loads(response.content)\n\n expected_import_summary = [\n {\n \"property_id\": self.property_1.id,\n \"cycles\": self.cycle.name,\n \"pm_property_id\": \"5766973\",\n \"source_id\": \"5766973-0\",\n \"type\": \"Electric - Grid\",\n \"incoming\": 2,\n \"successfully_imported\": 2,\n \"errors\": \"\",\n },\n {\n \"property_id\": self.property_1.id,\n \"cycles\": self.cycle.name,\n \"pm_property_id\": \"5766973\",\n \"source_id\": \"5766973-1\",\n \"type\": \"Natural Gas\",\n \"incoming\": 2,\n \"successfully_imported\": 2,\n \"errors\": \"\",\n },\n {\n \"property_id\": self.property_2.id,\n \"cycles\": self.cycle.name,\n \"pm_property_id\": \"5766975\",\n \"source_id\": \"5766975-0\",\n \"type\": \"Electric - Grid\",\n \"incoming\": 4,\n \"successfully_imported\": 0,\n \"errors\": \"Overlapping readings.\",\n },\n {\n \"property_id\": self.property_2.id,\n \"cycles\": self.cycle.name,\n \"pm_property_id\": \"5766975\",\n \"source_id\": \"5766975-1\",\n \"type\": \"Natural Gas\",\n \"incoming\": 4,\n \"successfully_imported\": 0,\n \"errors\": \"Overlapping readings.\",\n },\n ]\n\n self.assertCountEqual(result_summary['message'], expected_import_summary)\n self.assertEqual(total_meters_count, 2)", "def _get_resends(self):\n if not self.has_error():\n return []\n\n errors = []\n i = 0\n for item in self.my_json['results']:\n if item.has_key('error') and item['error'] == 'Unavailable':\n errors.append((i, item['error']))\n i += 1\n return errors", "def get_error_rates():\n return [ERR_RT * i for i in range(int((1 / ERR_RT) / 4))] # error up to 25%", "def is_rate_limit_exceeded(self, request):\r\n counts = self.get_counters(request)\r\n return sum(counts.values()) >= self.requests", "def test_bad_base_date(self):\n self.base_date = 'bad'\n response = self._get(get_kwargs=self._data())\n self._check_response(response, 104)", "def auditportallocfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditportallocfailrate\n\t\texcept Exception as e:\n\t\t\traise e", "def systcpconnfailrate(self) :\n\t\ttry :\n\t\t\treturn self._systcpconnfailrate\n\t\texcept Exception as e:\n\t\t\traise e", "def n_subimissions_per_day( url, headers ):", "async def test_source_up_to_dateness(self):\n response = await self.collect(get_request_json_return_value={\"timestamp\": \"1565284457173\"})\n expected_age = days_ago(datetime_fromtimestamp(1565284457173 / 1000.0))\n self.assert_measurement(response, value=str(expected_age))", "def test_requests_are_throttled(self):\n request = self.factory.get('/')\n for dummy in range(4):\n response = MockView.as_view()(request)\n assert response.status_code == 429", "def auditcontextnotfoundrate(self) :\n\t\ttry :\n\t\t\treturn self._auditcontextnotfoundrate\n\t\texcept Exception as e:\n\t\t\traise e", "def remaining_requests(self):\n try:\n return self._get_limit('Remaining')\n except ValueError:\n logging.error(\n \"Unable to gather limit statistics until log() has been called. Returning -1\")\n return -1", "def suspected_per_hour(self):\r\n return (3600.*(self.circ_suspected+self.strm_suspected\r\n +self.circ_failed+self.strm_failed))/self.current_uptime()", "def onecall(method, url, results, **options):\n start = time.time()\n\n try:\n res = method(url, **options)\n except RequestException as exc:\n results.errors.append(exc)\n else:\n duration = time.time() - start\n results.all_res.append(duration)\n # results.status_code_counter[res.status_code].append(duration)", "def test_refetch_precomputed_error():\n ident1 = _id()\n ident2 = _id()\n result1 = proj.fetch('test', ident1)\n result2 = proj.fetch('test', ident2, args={'throw_error': True})\n assert result1.status == 'complete'\n assert result1.start_time <= result1.end_time\n assert result2.status == 'error'\n assert result2.start_time <= result2.end_time", "def check_active_requests():\n\n active_requests = jobtracker.query(\"SELECT * FROM requests \" \\\n \"WHERE status='waiting'\")\n for request in active_requests:\n\n\t# Check requested status \n\tif DownloaderSPAN512.check_request_done(request):\n\t dlm_cout.outs(\"Restore (GUID: %s) has succeeded. Will create file entries.\\n\" % request['guid'])\n\t create_file_entries(request)\n\n\telse:\n#\t dlm_cout.outs(\"Request (GUID: %s) has failed.\\n\" \\\n#\t \"\\tDatabase failed to report the data as restored.\" % request['guid'])\n#\t jobtracker.query(\"UPDATE requests SET status='failed', \" \\\n# \"details='Request failed. Why ?', \" \\\n# \"updated_at='%s' \" \\\n# \"WHERE guid='%s'\" % (jobtracker.nowstr(), request['guid']))\n\n query = \"SELECT (TO_SECONDS('%s')-TO_SECONDS(created_at)) \" \\\n \"AS deltaT_seconds \" \\\n \"FROM requests \" \\\n \"WHERE guid='%s'\" % \\\n (jobtracker.nowstr(), request['guid'])\n row = jobtracker.query(query, fetchone=True)\n #if row['deltaT_seconds']/3600. > config.download.request_timeout:\n if row/3600. > config.download.request_timeout:\n dlm_cout.outs(\"Restore (GUID: %s) is over %d hr old \" \\\n \"and still not ready. Marking \" \\\n \"it as failed.\" % \\\n (request['guid'], config.download.request_timeout))\n jobtracker.query(\"UPDATE requests \" \\\n \"SET status='failed', \" \\\n \"details='Request took too long (> %d hr)', \" \\\n \"updated_at='%s' \" \\\n \"WHERE guid='%s'\" % \\\n (config.download.request_timeout, jobtracker.nowstr(), \\\n request['guid']))", "def test_yearly_report_error(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.expense)\n self.assertEqual(res.status_code, 201)\n rv = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 200, 'date_of_expense': '10-01-2021'})\n year = 'sdfg'\n res = self.client().get(f'/yearly_report?year={year}', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 400)\n results = json.loads(res.data)\n self.assertEqual(results['message'], f'The date {year} does not match the format YYYY')", "def min_failing_periods_to_alert(self) -> float:\n return pulumi.get(self, \"min_failing_periods_to_alert\")", "def check_api_use_rate():\n with open('api_use.csv', 'r') as api_use_file:\n csv_reader = csv.reader(api_use_file)\n last_date_used_unparsed, times_used_since_last_reset_unparsed = next(csv_reader)\n\n month, day, year, hour, minute = [int(item)\n for item in last_date_used_unparsed.split(\"/\")\n ]\n\n last_time_used = datetime.datetime(year, month, day, hour, minute)\n times_used_since_last_reset = int(times_used_since_last_reset_unparsed)\n\n current_time = datetime.datetime.now()\n\n time_since_last_use = current_time - last_time_used\n seconds_since_last_use = time_since_last_use.seconds\n\n # if it hasn't been ten minutes since the last time you used it\n if seconds_since_last_use < 460:\n # if it hasn't been used more than 8 times\n if times_used_since_last_reset < 9:\n # update last time use and times used\n times_used_since_last_reset += 1\n last_time_used = current_time\n print(\"You can use the api\")\n print(\"You have {} uses remaining and {} minutes before the reset\".format(\n 10 - times_used_since_last_reset, (460 - seconds_since_last_use) / 60.0\n ))\n update_tracker(last_time_used, times_used_since_last_reset)\n return True\n # if it has been used 8 times in the last ten minutes\n elif times_used_since_last_reset >= 9:\n print(\"Warning you have used the api {} times in 10 minutes.\".format(\n times_used_since_last_reset))\n return False\n # if it has been more than 9 minutes you are good to go\n elif seconds_since_last_use >= 460:\n # okay to use. reset current time and times used\n times_used_since_last_reset = 1\n last_time_used = current_time\n print(\"It's been more than 9 minutes since last use. You are good to go\")\n update_tracker(last_time_used, times_used_since_last_reset)\n return True", "def test_GET_startdate_error(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=self.expense)\n self.assertEqual(res.status_code, 201)\n rv = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 1122, 'date_of_expense': '10-01-2021'})\n self.assertEqual(rv.status_code, 201)\n date = '12sjfnj'\n resl = self.client().get(f'/expenses/?start_date={date}', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(resl.status_code, 400)\n results = json.loads(resl.data)\n self.assertEqual(results['message'], f'The date {date} does not match the format DD-MM-YYYY')", "def get_most_errors():\n\tdb = psycopg2.connect(database=DBNAME)\n\tc = db.cursor()\n\tc.execute(\"create IF NOT EXISTS view totalviews as select time::date as date,count(*) as total from log group by date;\")\n\tc.execute(\"create IF NOT EXISTS view errorviews as select time::date as date,count(*) as total from log where status <> '200 OK' group by date;\")\n\tc.execute(\"select t1.date, round(t1.total::numeric/t2.total::numeric,3) as err from errorviews as t1,totalviews as t2 where t1.date=t2.date and round(t1.total::numeric/t2.total::numeric,3)>0.01 order by err desc;\")\n\tdata = c.fetchall()\n\tdb.close()\n\treturn data", "def files_errored_out(self) -> float:\n return pulumi.get(self, \"files_errored_out\")", "def _retry_occurred(self):", "def test_datetime_http_error(self, mock_url_read):\n mock_url_read.side_effect = urllib.error.HTTPError(None, None, None, None, None)\n self.assertEqual(datetime.datetime.min, self.__report.datetime('id'))\n mock_url_read.assert_called_once_with(\n 'http://url/Cxwebinterface/odata/v1/Projects?$expand=LastScan&$filter=Name%20eq%20%27id%27')", "def test_since(self):\n self.create_logs(self.user1, num=50, start=self.year2000)\n self.create_logs(self.user1, num=50, start=self.year2001)\n\n response = self.client.get(telemetry_url, {\n 'since': self.year2001.isoformat(),\n })\n self.assertEqual(200, response.status_code)\n\n data = json.loads(response.content)\n\n # since is non-inclusive, so the first entry is not included\n self.assertEqual(49, len(data))\n\n for entry in data:\n time = iso8601.parse_date(entry['timestamp'])\n self.assertGreater(time, self.year2001)", "def anomaly(self):\n return self._anomaly(result_count=1, failure_amount=1)", "def calc_error_dist(self):\n pass", "def auditmemallocfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditmemallocfailrate\n\t\texcept Exception as e:\n\t\t\traise e", "def gc_requests(self):\r\n\t\tnow = time.time()\r\n\t\tif (now - self.last_gc) > self.gc_interval: \r\n\t\t\tprint('now: {0}'.format(now))\r\n\t\t\tfor req in self.requests:\r\n\t\t\t\tprint('req.timestamp: {0}'.format(req.timestamp))\r\n\t\t\t\tprint('diff: {0}'.format(now - req.timestamp))\r\n\t\t\tlive = [req for req in self.requests if (now - req.timestamp) < self.max_request_refresh_interval]\r\n\t\t\tdead = [req for req in self.requests if (now - req.timestamp) >= self.max_request_refresh_interval]\r\n\r\n\t\t\tif len(dead):\r\n\t\t\t\tself.requests = live\r\n\t\t\t\tself.last_modified = now\r\n\r\n\t\t\tself.last_gc = time.time()", "def _check_response_for_request_errors(self):\r\n if self.response.HighestSeverity == \"ERROR\":\r\n for notification in self.response.Notifications:\r\n if notification.Severity == \"ERROR\":\r\n raise FedexError(notification.Code,\r\n notification.Message)", "def sample_500_response():\n response = requests.get(\"https://google.com\")\n response.status_code = 500\n return response", "def _clean_outdated(self):\n now = _now()\n outdated = []\n for request_no, request_info in self._current_requests.items():\n if now - request_info.start_time > self._force_clean_after:\n outdated.append(request_no)\n if outdated:\n logging.error(\"There are {} requests which were started but haven't \"\n \"been finished in more than {}s.\"\n .format(len(outdated), self._force_clean_after))\n for request_no in outdated:\n del self._current_requests[request_no]\n self._last_autoclean_time = now", "def print_error_data(error_data):\n\n print('\\nDays when there were more than 1% errors in HTTP :\\n')\n for day in error_data:\n print(str(day[0]) + '\\t-\\t' + str(day[1]) + '% \\n')\n print('-------------------------------------------------------\\n')", "def find_error_dates(conn: sqlite3.Connection):\n\n curr = conn.cursor()\n\n # Daily\n querystr = '''\n CREATE TABLE error_days_d AS\n SELECT DISTINCT station, read_date\n FROM daily_raw dr \n WHERE flag1 IN ('I','P')\n '''\n curr.execute(querystr)\n\n querystr = '''\n CREATE INDEX edd_idx\n ON error_days_d\n (station, read_date)\n '''\n curr.execute(querystr)\n\n # Hourly\n\n # Based on simple flags\n\n querystr = '''\n CREATE TABLE error_days_h AS\n SELECT DISTINCT station, read_date\n FROM hourly_raw hr \n WHERE flag1 IN ('[',']','{','}')\n OR flag2 IN ('Q','q')\n '''\n curr.execute(querystr)\n\n querystr = '''\n CREATE INDEX edh_idx\n ON error_days_h\n (station, read_date)\n '''\n curr.execute(querystr)\n\n # Based on accumulation flags\n\n querystr = '''\n CREATE TABLE error_days_ha AS\n SELECT station, read_date, COUNT(1) AS count1\n FROM hourly_raw hr \n WHERE flag1 in ('a', 'A')\n GROUP BY station, read_date \n '''\n curr.execute(querystr)\n\n querystr = '''\n CREATE INDEX edha_idx\n ON error_days_ha\n (station, read_date)\n '''\n curr.execute(querystr)\n\n conn.commit()", "def gc_requests(self):\n\t\tnow = time.time()\n\t\tif (now - self.last_gc) > self.gc_interval: \n\t\t\tprint('now: {0}'.format(now))\n\t\t\tfor req in self.requests:\n\t\t\t\tprint('req.timestamp: {0}'.format(req.timestamp))\n\t\t\t\tprint('diff: {0}'.format(now - req.timestamp))\n\t\t\tlive = [req for req in self.requests if (now - req.timestamp) < self.max_request_refresh_interval]\n\t\t\tdead = [req for req in self.requests if (now - req.timestamp) >= self.max_request_refresh_interval]\n\n\t\t\tif len(dead):\n\t\t\t\tself.requests = live\n\t\t\t\tself.last_modified = now\n\n\t\t\tself.last_gc = time.time()", "def error_percentage():\n error_percentage = Statistics.error_percentage()\n error = [{'label': 'complete', 'y': \"{0:.1f}\".format(\n (1 - error_percentage) * 100)}]\n if error_percentage > 0:\n error.append(\n {'label': 'error', 'y': \"{0:.1f}\".format(error_percentage * 100)})\n return jsonify(result=error)", "def _badness(self, time):\n return (time - self.expected_time)**2", "def auditlog32errsyslogallocnsbfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditlog32errsyslogallocnsbfailrate\n\t\texcept Exception as e:\n\t\t\traise e", "def request_validation_error(error):\n return bad_request(error)", "def request_validation_error(error):\n return bad_request(error)", "def request_validation_error(error):\n return bad_request(error)", "def call(url):\n result = requests.get(url)\n if 300 <= result.status_code < 400:\n raise TemporaryException\n if result.status_code == 429:\n raise ApiCountZeroException\n if 400 <= result.status_code < 600:\n raise PermanentException\n return result", "def time_to_failure():\n return int(random.expovariate(BREAK_MEAN))\n #return MTBF", "def getRetryCount():\n return int(webapp2.get_request().headers.get('X-Appengine-TaskRetryCount', 0))", "def log_failure(self, request):\n self.log_file.write(self.TYPE_FAILURE + \",%f,,,%f,,\\n\" %\n (float(request.resources[0]['amount']),\n float(request.offer)))", "def req_foo(url):\n print(f'{url} - start request')\n st = datetime.now()\n try:\n r = requests.get(url)\n print(f'INFO: {url} - GET status {r.status_code} | time: {datetime.now() - st}')\n return r\n except requests.ConnectionError:\n print(f'ERROR: {url} - Connection error | time: {datetime.now() - st}')\n except Exception:\n print(f'ERROR: {url} - Unknown error | time: {datetime.now() - st}')\n return None", "def reliable_request(href):\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}\n\n done = False\n time_sleep = 5\n while not done:\n try:\n resp = requests.get( href, headers = headers )\n except Exception as e:\n print('connection error', e)\n time.sleep(time_sleep)\n time_sleep += 3 \n else:\n done = True\n print(resp)\n \n return resp", "def audit_failed(self):\n\n return self.__failed", "def test_api_requests_limited(self):\n\n did_reach_rate_limit = False\n for _ in range(110):\n response = self.send_get('Participant', expected_status=None)\n if response.status_code == TooManyRequests.code:\n did_reach_rate_limit = True\n break\n\n self.assertTrue(did_reach_rate_limit)", "def failed_on(self):\n return self._failed_on", "def test_http_speed(self):\n log.msg(\"timing retrival time for %s\"\n %self.http_url)\n def got_response(body):\n self.report['http_response_time'] = (datetime.now() - self.http_request_start_time).total_seconds()\n self.report['http_success'] = True\n log.msg(\"Successful http request\")\n\n self.http_request_start_time = datetime.now()\n return self.doRequest(self.http_url, method=\"GET\", \n body_processor=got_response)", "def siterequestbytesrate(self) :\n\t\ttry :\n\t\t\treturn self._siterequestbytesrate\n\t\texcept Exception as e:\n\t\t\traise e", "def sitetotalrequests(self) :\n\t\ttry :\n\t\t\treturn self._sitetotalrequests\n\t\texcept Exception as e:\n\t\t\traise e", "def calculate_request_threshold(self, requests_per_second):\n request_threshold = 1.0 / float(requests_per_second)\n return request_threshold", "def calculate_reliability(data):\n\n\tsuccess_ratio = data['success'].sum() * 1.0 / len(data)\n\tprint \"Reliability: {}\".format(success_ratio)", "def test_sleep_request(self):\n date = datetime.now() - timedelta(minutes=14)\n RequestAPI.objects.create(total_request=450, date=date)\n start = time.time()\n ManagerRequestApiTwitter().handle_rate_limit()\n stop = time.time()\n total_time = stop - start\n self.assertGreater(total_time, 60)", "def test_05_user_progress(self):\r\n url = '/api/app/1/userprogress'\r\n self.check_limit(url, 'get', 'app')", "def check_missing_requests():\n logger.info(\"ETL Check for missing requests\")\n timestamp = int((datetime.datetime.now() - datetime.timedelta(hours=12)).timestamp()) * 1000\n\n job = Job(\n run=\"beagle_etl.jobs.lims_etl_jobs.fetch_new_requests_lims\",\n args={\"timestamp\": timestamp, \"redelivery\": False},\n status=JobStatus.CREATED,\n max_retry=3,\n children=[],\n )\n job.save()\n logger.info(format_log(\"ETL fetch_new_requests_lims job created\", obj=job))", "def test_01_stats_dates(self):\r\n today = unicode(datetime.date.today())\r\n with self.flask_app.test_request_context('/'):\r\n dates, dates_n_tasks, dates_anon, dates_auth = stats.stats_dates(1)\r\n err_msg = \"There should be 10 answers today\"\r\n assert dates[today] == 10, err_msg\r\n err_msg = \"There should be 100 answers per day\"\r\n assert dates_n_tasks[today] == 100, err_msg\r\n err_msg = \"The SUM of answers from anon and auth users should be 10\"\r\n assert (dates_anon[today] + dates_auth[today]) == 10, err_msg", "def test_get_daily_transfers(self):\n msg = \"Response status is not 200\"\n response = self.api.get_daily_transfers(self.year, self.month, self.day)\n self.assertEqual(response.status_code, 200, msg)", "def _first_good_date(self, day):\n count = 0\n while True:\n try:\n self.data.loc[day - timedelta(count)]\n return day - timedelta(count)\n except KeyError:\n count += 1" ]
[ "0.74424076", "0.7376572", "0.70266056", "0.7013269", "0.69252485", "0.685988", "0.6858518", "0.68238664", "0.6589236", "0.6509093", "0.6447439", "0.64048654", "0.6340471", "0.63340926", "0.6213541", "0.6193134", "0.61706835", "0.60453206", "0.596626", "0.5903541", "0.5840484", "0.58303916", "0.5780407", "0.57252777", "0.5689137", "0.5662226", "0.5651247", "0.56363535", "0.56287694", "0.5602209", "0.5556415", "0.55419064", "0.55391717", "0.55111414", "0.5496987", "0.5476225", "0.545161", "0.5450967", "0.5440533", "0.54292196", "0.5423734", "0.5423171", "0.54124975", "0.5404297", "0.5400381", "0.5396318", "0.53919995", "0.53881806", "0.53837657", "0.5380704", "0.53553385", "0.5341392", "0.5307539", "0.5292519", "0.52883196", "0.52847236", "0.52795535", "0.5276505", "0.5269736", "0.5265892", "0.5261553", "0.525212", "0.5232818", "0.5230995", "0.52277833", "0.52271473", "0.52261853", "0.52103126", "0.5206555", "0.52055734", "0.5197902", "0.51978195", "0.51928866", "0.5186533", "0.5184139", "0.51752526", "0.5175006", "0.5174598", "0.51414436", "0.51414436", "0.51414436", "0.51336724", "0.51211524", "0.511115", "0.5109802", "0.51026165", "0.51007926", "0.5095443", "0.5092811", "0.50924087", "0.5087729", "0.5084054", "0.5076367", "0.5075899", "0.50749177", "0.5060513", "0.50574905", "0.5056007", "0.5053274", "0.50518584", "0.5051832" ]
0.0
-1
Generates a series of reflections for an intial point p and first impact q
def reflect_gen(p,q): while True: yield q # n is a unit vector normal to the ellipse at point q n = unit([ci*qi for (ci,qi) in zip((-4,-1),q)]) # d is a unit vector from p to q, e.g. the laser beams path d = unit([pi - qi for (pi,qi) in zip(p,q)]) # r is a unit vector in the direction of the reflected laser beam r = reflect(d, n) (p,q) = (q, intersect(r, q))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutation(i,N_p,t,T,P,N_vars,F_min,F_const):\n\n #Adaptive scaling factor\n if N_vars >= 3:\n F=F_min*2**np.exp(1-(T/(T+1-t)))\n else:\n F = F_const\n #candidates are assigned without the i-th element\n candidates= np.delete(np.arange(N_p), np.where(np.arange(N_p)==i))\n #3 target vectors are picked out randomly for the donorvector generator\n cand_rand=np.random.choice(candidates,3,replace= False)\n X1=P[cand_rand[0],]\n X2=P[cand_rand[1],]\n X3=P[cand_rand[2],]\n \n\t#Donorvctor generator\n V= X1 + F*(X2-X3)\n return V", "def maker(N,n_vars,p):\n x = [] #an empty list to hold the data\n y = np.zeros(N) #an array to hold the dependent variable\n b = [] #an empty list to hold the true bs\n i = 1\n while i <= n_vars: #loop over the variables we want to create\n x_i = np.random.normal(loc = 0.0, scale = 1.0, size = N) #generate the data\n x.append(x_i) #add it to the list of data\n if np.random.uniform(0,1) < p: #if the variable matters...\n b_i = np.random.normal(loc = 0.0, scale = 1.0) #draw a random effect for this variable\n else:\n b_i = 0 #otherwise set it's true effect equal to 0.\n b.append(b_i) #add it to the list of effects\n y = y + b_i*x_i #add the variable effect to the dependent variable\n i += 1 #index up i\n \n b_i = np.random.normal(loc = 0.0, scale = 1.0) #draw a random intercept\n b.append(b_i) #append this intercept to the effects\n y = b_i + y + np.random.normal(loc = 0.0, scale = 1.0, size = N) #add the normally distributed error term and the intercept\n return [np.array(x),np.array(y),np.array(b)]", "def simulated_reflectivity(pressure, temperature, vapor_mixing_ratio, liquid_mixing_ratio, snow_mixing_ratio=None,\n graupel_mixing_ratio=None, use_varint=False, use_liqskin=False):\n # Set values for constants with variable intercept\n R1 = 1e-15\n RON = 8e6\n RON2 = 1e10\n SON = 2e7\n GON = 5e7\n RON_MIN = 8e6\n RON_QR0 = 0.00010\n RON_DELQR0 = 0.25*RON_QR0\n RON_CONST1R = (RON2-RON_MIN)*0.5\n RON_CONST2R = (RON2+RON_MIN)*0.5\n\n # set constant intercepts\n rno_l = 8e6\n rno_s = 2e7\n rno_g = 4e6\n\n qvapor = da.clip(vapor_mixing_ratio, 0., None)\n qliquid = da.clip(liquid_mixing_ratio, 0., None)\n\n # If qgraupel but not qsnow, set qgraupel = qsnow\n if snow_mixing_ratio is None:\n if graupel_mixing_ratio is None:\n qsnow = da.zeros_like(qliquid)\n qgraupel = da.zeros_like(qliquid)\n else:\n qgraupel = da.clip(graupel_mixing_ratio, 0., None)\n qsnow = da.zeros_like(graupel_mixing_ratio)\n qsnow[temperature <= 273.15] = qgraupel[temperature <= 273.15]\n else:\n qsnow = da.clip(snow_mixing_ratio, 0., None)\n qgraupel = da.clip(graupel_mixing_ratio, 0., None)\n\n # density for liquid, snow, and graupel (kg m-3)\n rho_l = 1000. # liquid\n rho_i = 100. # snow\n rho_g = 400. # graupel\n\n # constant evaluation of gamma distribution\n gamma = 720.\n\n # Alpha constant\n alpha = 0.224\n\n # constant multiplication factors\n factor_l = gamma * 1e18 * (1./(np.pi*rho_l))**1.75\n s = gamma * 1e18 * (1./(np.pi*rho_i))**1.75 * (rho_i/rho_l)**2 * alpha\n g = gamma * 1e18 * (1./(np.pi*rho_g))**1.75 * (rho_g/rho_l)**2 * alpha\n\n # calculate virtual temperature\n virtual_t = virtual_temperature(temperature, qvapor)\n\n # dry gas constant\n Rd = 287.\n rho_air = pressure/(Rd*virtual_t)\n\n # adjust for brightband if use_liqskin=True\n if use_liqskin:\n raise NotImplementedError('Liquid skin correction not implemented')\n # factor_s = da.full_like(temperature, s)\n # factor_g = da.full_like(temperature, g)\n # try:\n # factor_s[temperature >= 273.15] = factor_s[temperature >= 273.15] / da.array([alpha])\n # factor_g[temperature >= 273.15] = factor_g[temperature >= 273.15] / da.array([alpha])\n # except ValueError:\n # factor_s = s\n # factor_g = g\n else:\n factor_s = s\n factor_g = g\n\n # calculate variable intercept if use_varint=True\n if use_varint:\n raise NotImplementedError('Variable intercepts not yet implemented')\n # temp_c = da.clip(temperature-273.15, temperature.min(), -0.001)\n # sonv = MIN(2.0D8, 2.0D6*EXP(-0.12D0*temp_c))\n #\n # gonv = gon\n # IF (qgr(i,j,k) .GT. R1) THEN\n # gonv = 2.38D0 * (PI*RHO_G/(rhoair*qgr(i,j,k)))**0.92D0\n # gonv = MAX(1.D4, MIN(gonv,GON))\n # END IF\n #\n # ronv = RON2\n # IF (qra(i,j,k) .GT. R1) THEN\n # ronv = RON_CONST1R*TANH((RON_QR0 - qra(i,j,k))/RON_DELQR0) + RON_CONST2R\n # END IF\n else:\n ronv = rno_l\n sonv = rno_s\n gonv = rno_g\n\n # Total equivalent reflectivity factor (z_e, in mm^6 m^-3) is\n # the sum of z_e for each hydrometeor species:\n z_e = (((factor_l*(rho_air*qliquid)**1.75)/(ronv**.75)) +\n ((factor_s*(rho_air*qsnow)**1.75)/(sonv**.75)) +\n ((factor_g*(rho_air*qgraupel)**1.75)/(gonv**.75)))\n\n # Adjust small values of Z_e so that dBZ is no lower than -30\n z_e = da.clip(z_e, .001, None)\n\n # Convert to dBZ\n dbz = 10.*da.log10(z_e)\n return dbz", "def moment(self, p, q):\n\n def combin(n, r):\n # compute number of combinations of size r from set n\n def prod(values):\n try:\n return reduce(lambda x, y: x * y, values)\n except TypeError:\n return 1\n\n return prod(range(n - r + 1, n + 1)) / prod(range(1, r + 1))\n\n vertices = self.vertices(closed=True)\n x = vertices[0, :]\n y = vertices[1, :]\n\n m = 0.0\n n = len(x)\n for l in range(n):\n l1 = (l - 1) % n\n dxl = x[l] - x[l1]\n dyl = y[l] - y[l1]\n Al = x[l] * dyl - y[l] * dxl\n \n s = 0.0\n for i in range(p + 1):\n for j in range(q + 1):\n s += (-1)**(i + j) \\\n * combin(p, i) \\\n * combin(q, j) / ( i+ j + 1) \\\n * x[l]**(p - i) * y[l]**(q - j) \\\n * dxl**i * dyl**j\n m += Al * s\n\n return m / (p + q + 2)", "def first_class_tp(nvar=18, prowQ=9, mcon=4 ):\n n = nvar + prowQ + mcon\n p = prowQ + mcon\n m = mcon\n c = np.zeros(n)\n d = np.zeros(p)\n ucon = np.ones(m)*inf\n uvar = np.ones(n)*inf\n lvar = -np.ones(n)*inf\n #name = str(p)+'_'+str(n)+'_'+str(m)+'_First'+'.txt'\n name = str(p)+'_'+str(n)+'_'+str(m)+'_l1_tp'+'.txt'\n \n # Q randomly chosen such that Qij belong to the (-10,10)\n Q = 10 * np.random.rand(p, n)*(np.random.randint(3, size=(p,n))-1)\n \n # d=(di), di=sum dij for i= 1,...,p\n for i in range(p): \n d[i]= Q[i,:].sum()\n # B randomly chosen such that Bij belong to the (-3,3)\n B = 3 * np.random.rand(m, n)*(np.random.randint(3, size=(m,n))-1)\n \n b= np.zeros(m)\n b[0] = B[0,:].sum()\n for i in range(m):\n mu = np.random.rand()+ 1e-10\n b[i] = B[i,:].sum()-m*mu\n lcon = b\n return Q,B,d,c,lcon,ucon,lvar,uvar,name", "def quaterion_product(q, p):\n q0 = q[3]\n p0 = p[3]\n\n return [q0*p[0:3] + p0*q[0:3] + mtimes(skew(q[0:3]), p[0:3]), q0*p0 - mtimes(q[0:3].T, p[0:3])]", "def classical_preprocessing(*args, **kwargs):\r\n qnode.construct(args, kwargs)\r\n return qml.math.stack(qnode.qtape.get_parameters())", "def create_normal_normal_goals():\n # Create the pattern/form of the prior normal distribution\n beta_name_lv = var('beta_name')\n beta_size_lv = var('beta_size')\n beta_rng_lv = var('beta_rng')\n a_lv = var('a')\n R_lv = var('R')\n beta_prior_mt = mt.MvNormalRV(a_lv, R_lv,\n size=beta_size_lv,\n rng=beta_rng_lv,\n name=beta_name_lv)\n # beta_type_lvars = mt_type_params(beta_prior_mt)\n\n y_name_lv = var('y_name')\n y_size_lv = var('y_size')\n y_rng_lv = var('y_rng')\n F_t_lv = var('f')\n V_lv = var('V')\n E_y_mt = mt.dot(F_t_lv, beta_prior_mt)\n Y_mt = mt.MvNormalRV(E_y_mt, V_lv,\n size=y_size_lv,\n rng=y_rng_lv,\n name=y_name_lv)\n\n Y_obs_mt = mt.observed(obs_sample_mt, Y_mt)\n\n # Create tuple-form expressions for the posterior\n e_expr = mt.sub(Y_obs_mt, mt.dot(F_t_lv, a_lv))\n F_expr = (mt.transpose, F_t_lv)\n R_F_expr = (mt.dot, R_lv, F_expr)\n Q_expr = (mt.add,\n V_lv,\n (mt.dot,\n F_t_lv,\n R_F_expr))\n A_expr = (mt.dot, R_F_expr, (mt.matrix_inverse, Q_expr))\n # m = C \\left(F V^{-1} y + R^{-1} a\\right)\n m_expr = (mt.add, a_lv, (mt.dot, A_expr, e_expr))\n # C = \\left(R^{-1} + F V^{-1} F^{\\top}\\right)^{-1}\n # TODO: We could use the naive posterior forms and apply identities, like\n # Woodbury's, in another set of \"simplification\" relations.\n # In some cases, this might make the patterns simpler and more broadly\n # applicable.\n C_expr = (mt.sub,\n R_lv,\n (mt.dot,\n (mt.dot, A_expr, Q_expr),\n (mt.transpose, A_expr)))\n\n norm_posterior_exprs = (mt.MvNormalRV,\n m_expr, C_expr,\n y_size_lv, y_rng_lv)\n\n fact(conjugate,\n # MvNormal likelihood, MvNormal prior mean\n Y_obs_mt, norm_posterior_exprs)\n\n return ((eq, prior_dist_mt, beta_prior_mt),\n # This should unify `Y_mt` and `obs_dist_mt`.\n (eq, obs_mt, Y_obs_mt))", "def Dynamics(Md,X,shockprime):\n assert X.shape[1] == 1\n\n Xp = X.copy()\n Xp[:Md.nX] = Md.StateTrans(X,shockprime)\n Xp[Md.nX:Md.nXY] = Md.F(Xp[Md.interpstates])\n Xp[Md.nXY:] = Md.Static(Xp)\n\n return Xp", "def prob4():\n#raise NotImplementedError(\"Problem 4 Incomplete\")\n h = lambda x : x[0] < -1 and x[1] > 1\n f = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([0,0]),cov=np.eye(2))\n g = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([-1,1]),cov=np.eye(2))\n X = np.random.multivariate_normal(mean=np.array([-1,1]),cov=np.eye(2),size=10000)\n return 1./10000*np.sum(np.apply_along_axis(h,1,X)*np.apply_along_axis(f,1,X)/np.apply_along_axis(g,1,X))", "def build_posterior(self, q):\n v = q\n if hasattr(q, '__iter__'):\n if hasattr(q[0], '__iter__'):\n if len(q[0]) is not self.N:\n raise ValueError('Specified coordinates have incorrect dimensionality')\n elif self.N is 1:\n v = [(k,) for k in q]\n else:\n raise ValueError('The number of specified points must be greater than 1')\n else:\n raise ValueError('The number of specified points must be greater than 1')\n\n\n lengths = self.s * self.scale_lengths\n K_qx = self.matrix(v, self.x, lengths)\n K_qq = self.matrix(v, v, lengths)\n self.mu = dot(K_qx, self.H)\n self.sigma = K_qq - dot( K_qx, solve( self.K_xx, K_qx.T ) )\n return self.mu, self.sigma", "def compute_demand(self, p):\n \n G, h = spdiag([-1.0]*self.n), matrix(0.0, (self.n, 1))\n \n if self.type == 'quad':\n Q, r = self.data\n return solvers.qp(-Q, p-r, G, h)['x']\n\n if self.type == 'sqrt':\n def F(x=None, z=None):\n if x is None: return 0, matrix(1.0, (self.n, 1))\n u, Du, H = self.utility(x)\n f, Df = p.T*x - u, p.T - Du\n if z is None: return f, Df\n return f, Df, -z[0]*H\n return solvers.cp(F, G, h)['x']", "def run_pmed(request_json, p=1):\n in_graph, lines, points = geojson_to_graph(request_json['features'])\n assi_graph, node_map, gt_g, ass_to_path, _, vertices, _ = make_assi_graph(in_graph)\n\n # Crete R objects and solve\n r_spatial_df_demand, r_spatial_df_candidates, r_cost_matrix, candidates = make_tb_objects(\n assi_graph,\n points,\n node_map\n )\n\n s_assi_df = solve_pmed(\n r_spatial_df_demand,\n r_spatial_df_candidates,\n metric=r_cost_matrix,\n p=p\n )\n\n # Make and write output\n o_graph = make_o_graph(\n s_assi_df,\n candidates,\n in_graph,\n points,\n lines,\n node_map,\n gt_g,\n vertices\n )\n\n footprint = assi_to_path(\n o_graph,\n node_map,\n in_graph,\n lines,\n points,\n ass_to_path\n )\n\n out_json = create_geojson(footprint)\n\n return out_json", "def pvector_pp(i, q):\n\tc0 = coords_cut[i]\n\tra, dec = c0.ra.value, c0.dec.value\n\tr = hp.rotator.Rotator([ra, dec, 0])\n\tsT = np.matmul(r.mat, np.matmul(s_tensor_cut[:,:,i], r.mat.transpose()))\n\tevals, evecs = np.linalg.eigh(sT[1:,1:])\n\tevecA, evecB = evecs[:,0], evecs[:,1]\n\tif evecB[0] < 0:\n\t\tevecB = -evecB\n\ttheta = np.arctan2(evecB[1], evecB[0])\n\tres = 180*theta.item()/np.pi, i\n\tq.put(res)\n\treturn res", "def __init__ (self, p, q):\n self.n = p * q\n self.n_sq = self.n * self.n\n self.g = self.n + 1", "def _core_calc_design(self,prof) :\n\t\tlp_list,ld_list = [],[]\n\t\tcp_list,cd_list = [],[]\n\t\t\t\t\n\t\tfor eqnid,eqn in enumerate(self.equations) : \n\t\t\treg_p = self.regressors[eqnid]['prod']\n\t\t\treg_d = self.regressors[eqnid]['degrad']\n\t\t\t\n\t\t\tLp = np.ones(prof.n_sample)\n\t\t\tLd = np.ones(prof.n_sample)\n\t\t\n\t\t\t# Get regressor values\n\t\t\tX_p = [np.log(prof.var[:,reg-1]) for reg in reg_p ]\n\t\t\tX_d = [np.log(prof.var[:,reg-1]) for reg in reg_d ]\n\t\t\t\n\t\t\tLp = np.vstack((Lp,np.array(X_p))).T\n\t\t\tLd = np.vstack((Ld,np.array(X_d))).T\t\t\t\n\n\t\t\t# Calculate Cp\n\t\t\tCp = np.dot(LA.inv(np.dot(Lp.T,Lp)),Lp.T)\n\t\t\tCd = np.dot(LA.inv(np.dot(Ld.T,Ld)),Ld.T)\n\t\t\t# Append Lp,Ld,Cp and Cd to relevant lists\n\t\t\tlp_list.append(Lp)\n\t\t\tld_list.append(Ld)\n\t\t\tcp_list.append(Cp)\n\t\t\tcd_list.append(Cd)\t\t\t\n\t\treturn (lp_list,ld_list,cp_list,cd_list)", "def __init__(self, p, q):\n self.p = p\n self.q = q\n # biais des unités d’entrée) -> dim (1xp)\n self.a = np.zeros((1, self.p))\n # biais des unités de sortie -> dim (1xq)\n self.b = np.zeros((1, self.q))\n # initialisés aléatoirement suivant une loi normale centrée, de variance égale à 0.01\n self.W = np.random.normal(loc=0, scale=0.1, size=(self.p, self.q))", "def crp_to_dcm(q):\n s = q @ q\n return (1/(1 + s))*((1 - s)*np.identity(3) + 2*np.outer(q, q) - 2*ut.cross_product_operator(q))", "def reflection_about_average(circuit, q):\n\n circuit.h(q)\n circuit.x(q)\n circuit.h(q[1])\n circuit.cx(q[0], q[1])\n circuit.h(q[1])\n circuit.x(q)\n circuit.h(q)", "def info(self, qp: QP) -> Info:\n zero = P(jp.zeros((self.num_bodies, 3)), jp.zeros((self.num_bodies, 3)))\n\n dp_c = sum([c.apply(qp) for c in self.colliders], zero)\n dp_j = sum([j.apply(qp) for j in self.joints], zero)\n info = Info(dp_c, dp_j, zero)\n return info", "def params(dim):\r\n m = 3\r\n s = 1\r\n q = 2 ** (m - 1)\r\n while s < dim:\r\n m += 1\r\n s = m + math.factorial(m - 1) / (2 * math.factorial(m - 3))\r\n q = 2 ** (m - 1)\r\n\r\n return (\r\n m, q, s - dim)", "def algorithm_1_1(p, c, t, x):\n\n q = np.array(c, dtype=np.float64)\n\n for k in range(1, p + 1):\n for j in range(0, p - k + 1):\n q[j] = (t[j + k] - x) / (t[j + k] - t[j]) * q[j] + (x - t[j]) / (\n t[j + k] - t[j]) * q[j + 1]\n return q[0]", "def cross(p, q):\n xyz = np.zeros(3)\n xyz[0] = p[1] * q[2] - p[2] * q[1]\n xyz[1] = p[2] * q[0] - p[0] * q[2]\n xyz[2] = p[0] * q[1] - p[1] * q[0]\n return xyz", "def calculate_marginal(self):\n self.marginal_ray=beam_field()\n m=self.marginal_ray\n m.U=np.array([[[0,0,1]]])\n m.Q_p=np.array([[[0,self.entrance_pupil,0]]])\n m.propagate(self.surfaces)", "def __setup_parameters__(self):\r\n self.M=self.N+1\r\n self.u=1+self.pu\r\n self.d=1-self.pd\r\n self.qu=(math.exp((self.r-self.div)*self.dt)-self.d)/(self.u-self.d)\r\n self.qd=1-self.qu", "def probability(cpts, term, obs):\r\n \r\n \r\n # term is a list e.g., ['x_1', '0']\r\n # flip refers to the assignment either '0' false or '1' true\r\n flip = term[1]\r\n # the term itself\r\n term = term[0]\r\n # accumulator variable\r\n answer = 0\r\n # this loop locates where in the CPT we're looking\r\n for clause in range(len(cpts)):\r\n if cpts[clause][0] == term:\r\n index = clause\r\n # focus on our term\r\n cpt = cpts[index]\r\n # this loop checks if there are no preconditions\r\n # if not, then we immediately know the probability and can return\r\n for m in range(len(cpt[1])):\r\n if cpt[1][m][-2][1] == '1':\r\n if cpt[1][m][0] == [[]]:\r\n answer = cpt[1][m][-1]\r\n # list of the variables we have observed\r\n have = []\r\n if obs != []:\r\n for k in obs:\r\n have.append(k[0])\r\n # list of variables we need to know in order to calculate the probability\r\n needed = []\r\n for prob in range(len(cpt[1])):\r\n for j in cpt[1][prob][0]:\r\n if j != []:\r\n if j[0] not in needed:\r\n needed.append(j[0])\r\n # conditional logic based on the known variables\r\n for required in needed:\r\n if required not in have:\r\n # deep copy our observations list\r\n obs2 = []\r\n obs3 = []\r\n for observs in obs:\r\n obs2.append(observs)\r\n obs3.append(observs)\r\n # if we need to know a variable but don't have it\r\n # then we allow it to be either 0 or 1\r\n obs3.append([required,'1'])\r\n obs2.append([required,'0'])\r\n # computes probability if the unknown term is true, times \r\n # the probability that the unknown term is true, plus the\r\n # probability if the unknown term is false, times the \r\n # probability that the unknown term is false\r\n answer = (probability(cpts, [term,flip], obs3) * probability(cpts, [required,'1'], obs)) + (probability(cpts, [term,flip], obs2) * (probability(cpts, [required,'0'], obs)))\r\n # this loop looks complicated but all it's doing is finding the correct\r\n # line in the CPT\r\n if cpt[1][prob][-2][1] == '1':\r\n count = 1\r\n for i in range(len(cpt[1][prob][0])):\r\n if cpt[1][prob][0][i] in obs:\r\n count *= 1\r\n else:\r\n count = 0\r\n if count == 1:\r\n answer += cpt[1][prob][-1]\r\n\r\n\r\n # this computes the probability that the term is true, so if we asked \r\n # for the probability that it is false, just return 1 - answer\r\n if flip == '0':\r\n return 1 - answer\r\n return answer", "def __init__(self, p, hyperpara, para, inst_name=\"\"):\n \n self.hyperpara = hyperpara\n self.para = para\n \n s = -np.log(-np.log(1.0 - p))\n \n k, cop = hyperpara\n \n # Sets colour\n colour = [\n None,\n None,\n {\n \"i\": 2,\n \"me\": 3},\n {\n \"i\": 0,\n \"me\": 1}][k][cop]\n \n if cop == \"i\":\n if k == 3:\n def f(X):\n q1, q2, q3 = X\n \n Y = np.array([q1, q2 - q1, q3 - q2])\n \n if np.any(Y <= 0.0):\n return None\n \n return -0.5 * np.sum(((Y - para[:, 0]) / para[:, 1]) ** 2)\n elif k == 2:\n def f(X):\n sigma, q1, q2 = X\n \n if sigma <= 0:\n return None\n \n Y = np.array([q1, q2 - q1])\n \n if np.any(Y <= 0.0):\n return None\n \n a = -0.5 * np.sum(((Y - para[:, 0]) / para[:, 1]) ** 2)\n \n return a - np.log(sigma)\n elif cop == \"me\":\n q_marg = [None for _ in range(k)]\n \n for i in range(k):\n dist = Normal(para[i, 0], para[i, 1])\n q_marg[i] = TruncatedDistribution(\n dist,\n 0.0,\n TruncatedDistribution.LOWER)\n \n ot = MaximumEntropyOrderStatisticsDistribution(q_marg)\n \n if k == 3:\n def f(X):\n q1, q2, q3 = X\n \n if np.any(np.array([q1, q2 - q1, q3 - q2]) <= 0.0):\n return None\n \n Y = ot.computePDF(X)\n \n if Y <= 0:\n return None\n \n return np.log(Y)\n elif k == 2:\n def f(X):\n sigma, q1, q2 = X\n \n if sigma <= 0 or q1 <= 0.0 or q2 <= q1:\n return None\n \n Y = ot.computePDF([q1, q2])\n \n if Y <= 0:\n return None\n \n return np.log(Y) - np.log(sigma)\n \n if k == 3:\n # Transformation (mu, theta, xi) -> (q1, q2, q3)\n def g(X):\n mu, sigma, xi = X\n \n if sigma <= 0:\n return None\n \n # When xi is close enough to 0, we consider it equal to 0\n if abs(xi) < 1e-300:\n q = mu + sigma * s\n else:\n q = mu + sigma * (np.exp(xi * s) - 1.0) / xi\n \n if q[0] < 0.0:\n return None\n return q\n \n \n # Log of determinant of g\n def g_det(X):\n mu, sigma, xi = X\n \n if abs(xi) < 1e-300:\n return np.log(sigma)\n \n e = np.exp(s * xi)\n \n sm = [\n s[i] * e[i] * (e[(i + 2) % 3] - e[(i + 1) % 3])\n for i in range(3)]\n \n return np.log(sigma) + np.log(sum(sm)) - np.log(xi ** 2.0)\n elif k == 2:\n # Transformation (mu, sigma, xi) -> (sigma, q1, q2)\n def g(X):\n mu, sigma, xi = X\n \n # When xi is close enough to 0, we consider it equal to 0\n if abs(xi) < 1e-300:\n q = mu + sigma * s\n else:\n q = mu + sigma * (np.exp(xi * s) - 1.0) / xi\n \n if q[0] < 0.0:\n return None\n \n return np.concatenate(([sigma], q))\n \n \n # Log of determinant of g\n def g_det(X):\n mu, sigma, xi = X\n \n if abs(xi) < 1e-300:\n return np.log(sigma)\n \n e = (s * xi - 1.0) * np.exp(s * xi)\n \n f = np.log(abs(e[0] - e[1]))\n \n return np.log(sigma) + f - np.log(xi ** 2.0)\n \n super().__init__(\n util.log_transform(f, g, g_det),\n colour=colour,\n inst_name=inst_name)\n \n if k == 2:\n self.prior[\"proper\"] = False", "def reflect_ghost(self, p0):\n # Instead of self.p1, one could take any point on the line p1--p2.\n dist = self.p1 - p0\n alpha = numpy.einsum(\"ij, ij->i\", dist, self.mirror_edge)\n # q is sits at the perpendicular intersection of the reflection\n q = dist - (alpha / self.beta)[:, None] * self.mirror_edge\n return p0 + 2 * q", "def _perturbation(self):\n if self.P > 1:\n scales = []\n for term_i in range(self.n_randEffs):\n _scales = sp.randn(self.diag[term_i].shape[0])\n if self.jitter[term_i] > 0:\n _scales = sp.concatenate((_scales, sp.zeros(1)))\n scales.append(_scales)\n scales = sp.concatenate(scales)\n else:\n scales = sp.randn(self.vd.getNumberScales())\n return scales", "def calculate_cramer_von_mises(p, q, num_samples=100, _random_state=None, **kwargs): # pylint: disable=unused-argument\n logging.warning(\"This function is deprecated, please use `calculate_goodness_of_fit`\") # pragma: no cover", "def obtain_parametric_priors(resolution, num_actions):\n # maximum prior magnitude for any discritized state\n max_prior = 10\n \n priors = []\n \n for p in range(resolution):\n for v in range(resolution):\n for a in range(num_actions):\n priors.append(set_parametric_prior(resolution, p, v, a, max_prior))\n \n priors = np.array(priors).reshape(resolution,resolution,num_actions)\n #print(\"priors\", priors[5,5,0])\n return priors", "def prob4(d = 500): \n #import the plane data\n planeData = np.load(\"plane.npy\")\n \n tplane = planeData[:,0]\n alpha = np.deg2rad(planeData[:,1])\n beta = np.deg2rad(planeData[:,2])\n \n l = len(tplane)\n \n #define x and y functions\n def x(n):\n# Gives x position\n return d * np.tan(beta[n]) / (np.tan(beta[n]) - np.tan(alpha[n]))\n def y(n):\n# Gives y position\n return d * np.tan(beta[n]) * np.tan(alpha[n]) / (np.tan(beta[n]) - np.tan(alpha[n]))\n \n #define x and y prime as we will see them\n def xprime(n):\n# Gives the approximate derivative of x\n if n == 0:\n return fdq1(x, n, h = 1)\n elif n == l-1:\n return bdq1(x, n, h = 1)\n elif n > 0 and n < l:\n return cdq2(x, n, h = 1)\n else:\n return 0\n \n def yprime(n):\n# Gives the approximate derivative of y\n if n == 0:\n return fdq1(y, n, h = 1)\n elif n == l-1:\n return bdq1(y, n, h = 1)\n elif n > 0 and n < l:\n return cdq2(y, n, h = 1)\n else:\n return 0\n \n #define speed from x and y prime\n def speed(n):\n# print(\"speed(n) where n = \" + str(n))\n return np.sqrt((xprime(n))**2 + (yprime(n))**2)\n \n #Finally get the speed from the information we have\n spd = []\n X = []\n Y = []\n for i in range(0, l):\n spd.append(speed(i))\n X.append(x(i))\n Y.append(y(i))\n \n return spd\n \n raise NotImplementedError(\"Problem 4 Incomplete\")", "def q_greedify_policy(env, V, pi, s, gamma):\n ### START CODE HERE ###\n ##q(s,a)=sigma(P(ss')*(gamma*V(s')+R(s,a,s'))\n q = np.zeros((env.action_space.n))\n for idx, action in enumerate(range(env.action_space.n)):\n for prob_next_state, next_state, reward_next_state, done in env.P[s][action]:\n q[idx] += prob_next_state * ((gamma * V[next_state]) + reward_next_state)\n\n greedy_action = np.argmax(q)\n # print(greedy_action)\n for action, action_prob in enumerate(pi[s]):\n if action == greedy_action:\n print(action, greedy_action)\n pi[s][action] = 1\n else:\n pi[s][action] = 0", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def reflect(memory, get_q_values, policy, D_train, functions):\n\n N = np.min((BATCH_SIZE, len(memory)))\n batch_IDX = np.random.choice(np.arange(N), size=(N,), replace=False)\n recall = np.array(memory)[batch_IDX]\n states, actions, new_states, rewards, done = zip(*recall)\n # Prediction in original states\n\n rewards = np.array(rewards)\n # These are useful helper functions for use in diagnosing\n Q_ = get_q_values(np.array(states).astype('float32'))\n Q_dash = get_q_values(np.array(new_states).astype('float32'))\n predictions = Q_[:, actions]\n\n# pdb.set_trace()\n target = np.array(rewards,dtype='float32')\\\n +GAMMA*np.amax(Q_dash,axis=1)\n\n target[np.where(done)] = rewards[np.where(done)]\n\n\n inputs = zip(np.array(states, dtype='float32'), target)\n\n loss = 0\n for s_, t_ in inputs:\n loss += D_train(s_[None,:], np.array(t_).reshape(1))\n\n return loss", "def set_marginals(self, bw_method=None):\n \n # Log density\n def kde(sample):\n k = gaussian_kde(np.transpose(sample), bw_method=bw_method)\n return lambda X: k.logpdf(np.array(X))[0]\n \n for para in [\"theta\", \"q\"]:\n for typ in [\"prior\", \"post\"]:\n sample = getattr(self, typ)[para][\"sample\"]\n \n if sample is None:\n getattr(self, typ)[para][\"marginal\"] = [\n None\n for I in util.marg_1_2]\n continue\n \n getattr(self, typ)[para][\"marginal\"] = [\n kde(sample[:, I])\n for I in util.marg_1_2]\n \n if self.hyperpara[0] == 3:\n if self.hyperpara[1] == \"i\":\n qu_diff_dist = [\n TruncatedDistribution(\n Normal(self.para[i, 0], self.para[i, 1]),\n 0.0,\n TruncatedDistribution.LOWER)\n for i in range(3)]\n qu_dist = [\n qu_diff_dist[0],\n qu_diff_dist[0] + qu_diff_dist[1],\n qu_diff_dist[0] + qu_diff_dist[1] + qu_diff_dist[2]]\n \n self.prior[\"q\"][\"marginal\"][:3] = [\n qu_dist[i].computeLogPDF\n for i in range(3)]\n elif self.hyperpara[1] == \"me\":\n self.prior[\"q\"][\"marginal\"][:3] = [\n TruncatedDistribution(\n Normal(self.para[i, 0], self.para[i, 1]),\n 0.0,\n TruncatedDistribution.LOWER).computeLogPDF\n for i in range(3)]", "def __init__(self,\n p_xi: float, p_yi: float, p_zi: float,\n p_xx: float, p_yx: float, p_zx: float,\n p_xy: float, p_yy: float, p_zy: float,\n p_xz: float, p_yz: float, p_zz: float,\n p_ix: float, p_iy: float, p_iz: float) -> None:\n self._p_xi = value.validate_probability(p_xi, 'p_xi')\n self._p_yi = value.validate_probability(p_yi, 'p_yi')\n self._p_zi = value.validate_probability(p_zi, 'p_zi')\n self._p_xx = value.validate_probability(p_xx, 'p_xx')\n self._p_yx = value.validate_probability(p_yx, 'p_yx')\n self._p_zx = value.validate_probability(p_zx, 'p_zx')\n self._p_xy = value.validate_probability(p_xy, 'p_xy')\n self._p_yy = value.validate_probability(p_yy, 'p_yy')\n self._p_zy = value.validate_probability(p_zy, 'p_zy')\n self._p_xz = value.validate_probability(p_xz, 'p_xz')\n self._p_yz = value.validate_probability(p_yz, 'p_yz')\n self._p_zz = value.validate_probability(p_zz, 'p_zz')\n self._p_ix = value.validate_probability(p_ix, 'p_ix')\n self._p_iy = value.validate_probability(p_iy, 'p_iy')\n self._p_iz = value.validate_probability(p_iz, 'p_iz')\n self._p_ii = 1 - value.validate_probability(p_xi + p_yi + p_zi +\n p_xx + p_yx + p_zx +\n p_xy + p_yy + p_zy +\n p_xz + p_yz + p_zz +\n p_ix + p_iy + p_iz, 'p_ii')", "def P_init(X, perplexity):\n\n n, _ = X.shape\n x = np.sum(X ** 2, axis=1)\n y = np.sum(X ** 2, axis=1)[:, np.newaxis]\n z = np.matmul(X, X.T)\n D = x - 2 * z + y\n np.fill_diagonal(D, 0.)\n P = np.zeros((n, n))\n betas = np.ones((n, 1))\n H = np.log2(perplexity)\n\n return D, P, betas, H", "def MyBaseMoments(p,q,img,gauss_sigma,gauss_centroid=None, gauss_g1=0., gauss_g2=0.):\n weight = galsim.Image(np.zeros_like(img.array))\n gauss = galsim.Gaussian(sigma=gauss_sigma*pixel_scale).shear(g1=gauss_g1,g2=gauss_g2)\n if gauss_centroid is None:\n gauss_centroid = img.true_center\n weight = gauss.drawImage(image=weight, scale=pixel_scale, method='no_pixel', use_true_center=True, offset=(gauss_centroid-img.true_center)*(1))\n x = np.linspace(img.xmin-img.center.x*0-gauss_centroid.x*1, img.xmax-img.center.x*0-gauss_centroid.x*1, img.xmax-img.xmin+1)+0.*0.5\n y = np.linspace(img.ymin-img.center.y*0-gauss_centroid.y*1, img.ymax-img.center.y*0-gauss_centroid.y*1, img.ymax-img.ymin+1)+0.*0.5\n X, Y = np.meshgrid(x,y)\n\n Q00 = np.sum(weight.array*img.array)\n Q10 = gauss_centroid.x + np.sum(X*weight.array*img.array)/Q00\n Q01 = gauss_centroid.y + np.sum(Y*weight.array*img.array)/Q00\n Q20 = np.sum((X**2)*weight.array*img.array)\n Q02 = np.sum((Y**2)*weight.array*img.array)\n\n monomial = 1.\n for pp in xrange(p):\n monomial *= X\n for qq in xrange(q):\n monomial *= Y\n Qpq = np.sum(monomial*weight.array*img.array) #/Q00\n\n return Qpq", "def project_curve(q):\n n,T = q.shape\n if n==2:\n dt = 0.35\n if n==3:\n dt = 0.2\n epsilon = 1e-6\n\n iter = 1\n res = ones(n)\n J = zeros((n,n))\n\n s = linspace(0,1,T)\n\n qnew = q.copy()\n qnew = qnew / sqrt(innerprod_q2(qnew,qnew))\n\n qnorm = zeros(T)\n G = zeros(n)\n C = zeros(300)\n while (norm(res) > epsilon):\n if iter > 300:\n break\n\n # Jacobian\n for i in range(0,n):\n for j in range(0,n):\n J[i,j] = 3 * trapz(qnew[i,:]*qnew[j,:],s)\n \n J += eye(n)\n\n for i in range(0,T):\n qnorm[i] = norm(qnew[:,i])\n \n # Compute the residue\n for i in range(0,n):\n G[i] = trapz(qnew[i,:]*qnorm,s)\n \n res = -G\n\n if (norm(res) < epsilon):\n break\n\n x = solve(J,res)\n C[iter] = norm(res)\n\n delG = Basis_Normal_A(qnew)\n temp = zeros((n,T))\n for i in range(0,n):\n temp += x[i]*delG[i]*dt\n \n qnew += temp\n iter += 1\n \n qnew = qnew/sqrt(innerprod_q2(qnew,qnew))\n\n return qnew", "def second_class_tp(p,n):\n c = np.zeros(n)\n d = np.zeros(p)\n ucon = np.zeros(n)\n lcon = np.zeros(n)\n \n #uvar = np.ones(n)*1\n uvar = np.ones(n)*5\n lvar = np.ones(n)*0.5\n name = str(p)+'_'+str(n)+'_'+str(n)+'_l1_tp'+'.txt'\n #name = str(n)+'_'+str(p)+'_'+'_second_tp'+'.txt'\n Q = rog.hilb(p,n)\n # d=(di), di=sum qij for i= 1,...,p\n for i in range(p): \n d[i]= Q[i,:].sum()\n B = np.zeros((n,n))\n return Q,B,d,c,lcon,ucon,lvar,uvar,name", "def prop(q1,abcd,mode=[0,0],p1=1):\n\n A=abcd[0][0]\n B=abcd[0][1]\n C=abcd[1][0]\n D=abcd[1][1]\n \n n=mode[0]\n m=mode[1]\n \n q = (A*q1 + B)/(C*q1 + D)\n p = p1*np.exp(1j*np.angle(1/(A+B/q1)**(1+n+m)))\n \n return q,p", "def reflection ((x,y),(w,z)):\n twodot = 2*dotprod((x,y),(w,z))\n a, b = x - twodot* w, y - twodot*z\n return (a,b)", "def tf(xp, yp, zp, prisms, inc, dec, pmag=None):\n if xp.shape != yp.shape != zp.shape:\n raise ValueError(\"Input arrays xp, yp, and zp must have same shape!\")\n # Calculate the 3 components of the unit vector in the direction of the\n # regional field\n fx, fy, fz = giutils.dircos(inc, dec)\n res = 0\n for prism in prisms:\n if prism is None:\n continue\n if 'magnetization' not in prism.props and pmag is None:\n continue\n if pmag is None:\n mx, my, mz = prism.props['magnetization']\n else:\n mx, my, mz = pmag\n v1 = kernelxx(xp, yp, zp, prism)\n v2 = kernelxy(xp, yp, zp, prism)\n v3 = kernelxz(xp, yp, zp, prism)\n v4 = kernelyy(xp, yp, zp, prism)\n v5 = kernelyz(xp, yp, zp, prism)\n v6 = kernelzz(xp, yp, zp, prism)\n bx = v1*mx + v2*my + v3*mz\n by = v2*mx + v4*my + v5*mz\n bz = v3*mx + v5*my + v6*mz\n res += fx*bx + fy*by + fz*bz\n res *= CM * T2NT\n return res", "def complex(self, sentence):\r\n repetition = 6000\r\n warmup = 2500\r\n pos_mcmc_dict = {\"pos_\" + str(i): {} for i in range(len(sentence))}\r\n sequence = [\"noun\"] * len(sentence)\r\n for i in range(len(sentence)):\r\n if i == 0:\r\n prob_first = self.posterior_first(sentence[i])\r\n sample_first = list(\r\n np.random.choice(\r\n [keys for keys in prob_first.keys()],\r\n repetition,\r\n p=[\r\n float(prob_first[keys]) / sum(prob_first.values())\r\n for keys in prob_first.keys()\r\n ],\r\n )\r\n )\r\n sample_first = sample_first[warmup:]\r\n pos_mcmc_dict[\"pos_\" + str(i)] = {\r\n pos: (float(sample_first.count(pos)) / len(sample_first))\r\n for pos in self.position_list\r\n }\r\n sequence[i] = max(\r\n pos_mcmc_dict[\"pos_\" + str(i)],\r\n key=pos_mcmc_dict[\"pos_\" + str(i)].get,\r\n )\r\n elif i == 1:\r\n prob_second = self.post_second(sentence[i], sequence[i - 1])\r\n sample_second = list(\r\n np.random.choice(\r\n [keys for keys in prob_second.keys()],\r\n repetition,\r\n p=[\r\n float(prob_second[keys]) / sum(prob_second.values())\r\n for keys in prob_second.keys()\r\n ],\r\n )\r\n )\r\n sample_second = sample_second[warmup:]\r\n pos_mcmc_dict[\"pos_\" + str(i)] = {\r\n pos: (float(sample_second.count(pos)) / len(sample_second))\r\n for pos in self.position_list\r\n }\r\n sequence[i] = max(\r\n pos_mcmc_dict[\"pos_\" + str(i)],\r\n key=pos_mcmc_dict[\"pos_\" + str(i)].get,\r\n )\r\n else:\r\n prob_other = self.posterior_else(\r\n sentence[i], sequence[i - 1], sequence[i - 2]\r\n )\r\n sample_other = list(\r\n np.random.choice(\r\n [keys for keys in prob_other.keys()],\r\n repetition,\r\n p=[\r\n float(prob_other[keys]) / sum(prob_other.values())\r\n for keys in prob_other.keys()\r\n ],\r\n )\r\n )\r\n sample_other = sample_other[warmup:]\r\n pos_mcmc_dict[\"pos_\" + str(i)] = {\r\n pos: (float(sample_other.count(pos)) / len(sample_other))\r\n for pos in self.position_list\r\n }\r\n sequence[i] = max(\r\n pos_mcmc_dict[\"pos_\" + str(i)],\r\n key=pos_mcmc_dict[\"pos_\" + str(i)].get,\r\n )\r\n return sequence", "def evaluate_design(self): # to update the pr object", "def get_random_throwing_parameters_pro(x, vx, vy, wx, wy):\n return np.random.normal(x, 1), np.random.normal(vx, 0.5), \\\n np.random.normal(vy, 5), np.random.normal(wx, 0.2), \\\n np.random.normal(wy, 0.2)", "def simulate(self):\n n_samples = self.n_samples\n n_features = self.n_features\n nb_active_features = self.nb_active_features\n K = self.K\n pi_0 = self.pi_0\n gap = self.gap\n p0 = self.p0\n p1 = self.p1\n r_c = self.r_c\n r_cf = self.r_cf\n rho = self.rho\n\n coeffs = np.zeros(n_features)\n coeffs[0:nb_active_features] = K\n\n features = features_normal_cov_toeplitz(n_samples, n_features, rho)\n\n # Add class relative information on the design matrix \n A = np.random.choice(range(n_samples), size=int((1 - pi_0) * n_samples),\n replace=False)\n A_ = np.delete(range(n_samples), A)\n\n index_plus_gap = nb_active_features + int(\n (n_features - nb_active_features) * r_cf)\n features[A, :index_plus_gap] += gap\n features[A_, :index_plus_gap] -= gap\n\n self.features = features\n xc = features.dot(coeffs)\n\n # Simulation of latent variables\n pi = self.logistic_grad(-xc)\n u = np.random.rand(n_samples)\n Z = (u <= 1 - pi)\n self.Z = Z\n\n # Simulation of true times\n n_samples_class_1 = np.sum(Z)\n n_samples_class_0 = n_samples - n_samples_class_1\n T = np.empty(n_samples)\n pi_0_est = 1 - Z.mean()\n T[Z == 0] = np.random.geometric(p0, size=n_samples_class_0)\n\n # Compute p_c to obtain censoring rate r_c\n r_c_ = 1 - r_c\n p0_ = 1 - p0\n p1_ = 1 - p1\n pi_0_ = 1 - pi_0_est\n a = r_c_ * p0_ * p1_\n b = p0 * pi_0_est * p1_ + p1 * pi_0_ * p0_ - r_c_ * (p1_ + p0_)\n c = r_c_ - p0 * pi_0_est - p1 * pi_0_\n res = self.poldeg2_solver(a=a, b=b, c=c)\n if isinstance(res, list):\n if res[0] > 0:\n pc = 1 - res[0]\n else:\n pc = 1 - res[1]\n else:\n pc = 1 - res\n T[Z == 1] = np.random.geometric(p1, size=n_samples_class_1)\n\n # Simulation of the censoring\n C = np.random.geometric(pc, size=n_samples)\n\n # Censoring indicator: 1 if it is a time of failure, 0 if it's \n # censoring.\n delta = (T <= C).astype(int)\n\n # Observed time\n Y = np.minimum(T, C).astype(int)\n if np.sum(Y == 0) > 0:\n Y += 1\n self.delta = delta\n self.Y = Y\n return features, Y, delta", "def generation(hid_pl, f_state, eps_z, eps_x, pd, fd):\n params_prior = fd['phi_prior'](hid_pl)\n z = sample(params_prior, eps_z, 'gauss')\n phi_z = fd['phi_z'](z)\n params_out = fd['phi_dec'](phi_z, hid_pl)\n x = sample(params_out, eps_x, pd['model'])\n\n phi_x = fd['phi_x'](x)\n f_in = tf.concat([phi_x, phi_z], axis=1, name='f_theta_joint_inputs')\n f_out, f_state = fd['f_theta'](f_in, f_state)\n return x, f_out, f_state", "def reflect(real_seqs):\n reflectX = np.random.choice([-1, 1])\n reflectY = np.random.choice([-1, 1])\n reflected = real_seqs * np.array([reflectX, reflectY, 1])\n return reflected", "def conditional(x, j, p, q, r):\n tmp = []\n d = 9\n for i in range(d):\n res = p[i][j] ** (x[i] * (x[i] + 1) * 0.5)\n res *= q[i][j] ** (1 - x[i]*x[i])\n res *= r[i][j] ** (x[i] * (x[i] - 1) * 0.5)\n tmp.append(res)\n return prod(tmp)", "def prediccion(self):\n # Project the state ahead\n self.X = self.F @ self.X + self.B @ self.M\n self.P = self.F @ self.P @ self.F.T + self.Q\n\n return self.X", "def universal_cir(q: List[QRegStorage], i: int, para: List[float]):\n\n RZ(para[0])(q[i])\n RY(para[1])(q[i])\n RZ(para[2])(q[i])\n\n RZ(para[3])(q[i + 1])\n RY(para[4])(q[i + 1])\n RZ(para[5])(q[i + 1])\n\n CX(q[i + 1], q[i])\n\n RZ(para[6])(q[i])\n RY(para[7])(q[i + 1])\n\n CX(q[i], q[i + 1])\n\n RY(para[8])(q[i + 1])\n\n CX(q[i + 1], q[i])\n\n RZ(para[9])(q[i])\n RY(para[10])(q[i])\n RZ(para[11])(q[i])\n\n RZ(para[12])(q[i + 1])\n RY(para[13])(q[i + 1])\n RZ(para[14])(q[i + 1])", "def tf(xp, yp, zp, spheres, inc, dec, pmag=None):\n if xp.shape != yp.shape != zp.shape:\n raise ValueError(\"Input arrays xp, yp, and zp must have same shape!\")\n tf = numpy.zeros_like(xp)\n # Calculate the 3 components of the unit vector in the direction of the\n # regional field\n fx, fy, fz = utils.dircos(inc, dec)\n if pmag is not None:\n if isinstance(pmag, float) or isinstance(pmag, int):\n pintensity = pmag\n pmx, pmy, pmz = fx, fy, fz\n else:\n pintensity = numpy.linalg.norm(pmag)\n pmx, pmy, pmz = numpy.array(pmag) / pintensity\n for sphere in spheres:\n if sphere is None or ('magnetization' not in sphere.props\n and pmag is None):\n continue\n radius = sphere.radius\n # Get the intensity and unit vector from the magnetization\n if pmag is None:\n mag = sphere.props['magnetization']\n if isinstance(mag, float) or isinstance(mag, int):\n intensity = mag\n mx, my, mz = fx, fy, fz\n else:\n intensity = numpy.linalg.norm(mag)\n mx, my, mz = numpy.array(mag) / intensity\n else:\n intensity = pintensity\n mx, my, mz = pmx, pmy, pmz\n # First thing to do is make the computation point P the origin of the\n # coordinate system\n x = sphere.x - xp\n y = sphere.y - yp\n z = sphere.z - zp\n # Calculate the 3 components of B\n dotprod = mx * x + my * y + mz * z\n r_sqr = x ** 2 + y ** 2 + z ** 2\n r5 = r_sqr ** (2.5)\n moment = intensity * (4. * numpy.pi * (radius ** 3) / 3.)\n bx = moment * (3 * dotprod * x - r_sqr * mx) / r5\n by = moment * (3 * dotprod * y - r_sqr * my) / r5\n bz = moment * (3 * dotprod * z - r_sqr * mz) / r5\n tf += (fx * bx + fy * by + fz * bz)\n tf *= CM * T2NT\n return tf", "def dynamics(self, p, s, action):\r\n #number of iteration used for the Euler integration method\r\n iterations = int(self.time_step/self.int_step)\r\n\r\n for i in range(iterations):\r\n #handle terminal state\r\n if abs(p) > 1 or abs(s) > 3:\r\n return p, s\r\n else:\r\n p_prime = p + self.int_step * s #p'=s\r\n s_prime = s + self.int_step * self.s_derivation(p, s, action)\r\n\r\n p = p_prime\r\n s = s_prime\r\n\r\n return p, s", "def pred(self):\n return [ self.simple_reflection(i) for i in self.descents() ]", "def test_conjecture():\n print(\"Executing test_conjecture:\")\n\n theory=[]\n\n print(language.program_string(theory))\n for i in range(10):\n theory=conjecture.vary([theory], 0, [], steps=1)\n print(f\"Theory after {i+1} stages of variation:\")\n print(language.program_string(theory))", "def _infer_pvalues(self, effect, perm, p=.05, mcp='maxstat'):\n assert all([isinstance(k, np.ndarray) for k in (effect, perm)])\n n_perm = perm.shape[0]\n # compute the minimum number of required permutations\n n_perm_req = int(10. / p)\n if n_perm < n_perm_req:\n logger.warning(f\"For inferences at p<{p}, it is recommended to per\"\n f\"form at least n_perm={n_perm_req} permutations\")\n\n # ---------------------------------------------------------------------\n logger.info(f\" infer p-values at (p={p}, mcp={mcp})\")\n # computes the pvalues\n if mcp is 'maxstat':\n max_p = perm.reshape(n_perm, -1).max(1)[np.newaxis, ...]\n nb_over = (effect[..., np.newaxis] <= max_p).sum(-1)\n pvalues = nb_over / n_perm\n # non-signi. p-values are set to 1. and min(pvalues) = 1 / n_perm\n pvalues[pvalues >= p] = 1.\n pvalues = np.maximum(1. / n_perm, pvalues)\n elif mcp in ['fdr', 'bonferroni']:\n from mne.stats import fdr_correction, bonferroni_correction\n fcn = fdr_correction if mcp is 'fdr' else bonferroni_correction\n # compute the p-values\n pvalues = (effect[np.newaxis, ...] <= perm).sum(0) / n_perm\n pvalues = np.maximum(1. / n_perm, pvalues)\n # apply correction\n is_signi, pvalues = fcn(pvalues, alpha=p)\n pvalues[~is_signi] = 1.\n\n return pvalues", "def trans_prob(next_s, q, d):\n\n next_q, next_r, next_w = next_s\n\n A_actions = [0, 1, 2, 3, 4]\n\n prob = 0\n\n for a in A_actions:\n\n prob_r = attraction_h(next_r[0], a)\n\n q1 = attraction_g(next_q[0], q, d, a)\n q2 = attraction_g(1-next_q[0], q, d, a)\n prob_q = q1 / (q1 + q2)\n\n prob += a_given_s(a, q) * prob_r * prob_q\n\n return prob", "def algorithm_1_2(p, c, x):\n\n q = np.array(c, dtype=np.float64)\n\n for k in range(1, p + 1):\n for j in range(0, p - k + 1):\n q[j] = (1 - x) * q[j] + x * q[j + 1]\n return q[0]", "def model(r, p0, n=1):\n# print \"oi\"\n Pt = zeros(n, float) # initialize the output vector\n P = p0\n for i in xrange(n):\n Pt[i] = r*P\n P = Pt[i]\n \n return Pt", "def computePPFwithAlpha(self, ddist = 5.0, dangle = 2*math.pi/12.0):\n\n # global model descriptor, gmd\n gmd = {}\n\n ntemppoint = self.temppnts.shape[0]\n for i in range(ntemppoint):\n print i, ntemppoint\n for j in range(ntemppoint):\n # for i in range(0,1):\n # for j in range(3,4):\n m_0 = np.asarray(self.temppnts[i])\n m_1 = np.asarray(self.temppnts[j])\n v_m0m1 = m_0-m_1\n v_m1m0 = m_1-m_0\n n_m0 = self.tempnormals[i]\n n_m1 = self.tempnormals[j]\n # f1, namely ||d||2\n f1 = np.linalg.norm(m_0-m_1)\n # f2, namely angle between n_m0 and v_m1m0\n f2 = rm.radian_between(n_m0, v_m1m0)\n # f3, namely angle between n_m1 and v_m0m1\n f3 = rm.radian_between(n_m1, v_m0m1)\n # f4, namely angle between n_m0 and n_m1\n f4 = rm.radian_between(n_m0, n_m1)\n # discretize the values\n f1d = math.floor(f1/ddist)*ddist+ddist\n f2d = math.floor(f2/dangle)*dangle+dangle\n f3d = math.floor(f3/dangle)*dangle+dangle\n f4d = math.floor(f4/dangle)*dangle+dangle\n key = (f1d, f2d, f3d, f4d)\n # angle between n_m0 and x+\n xplus = np.asarray([1,0,0])\n yplus = np.asarray([0,1,0])\n nm0xangle = math.degrees(rm.radian_between(n_m0, xplus))\n rotax = np.cross(xplus, n_m0)\n if np.isnan(rotax).any() or not rotax.any():\n continue\n rotmat = rm.rodrigues(rotax, nm0xangle)\n v_m1m0onxplus = np.dot(v_m1m0, rotmat)\n v_m1m0onxplusyzproj = np.asarray([0, v_m1m0onxplus[1], v_m1m0onxplus[2]])\n alpha_m0 = rm.radian_between(v_m1m0onxplusyzproj, yplus)\n if v_m1m0onxplus[2] < 0:\n alpha_m0 = 2*math.pi - alpha_m0\n # debug\n # before transform\n pg.plotArrow(base.render, spos = m_0, epos = m_1, rgba=Vec4(0,1,0,1))\n pg.plotArrow(base.render, spos = m_0, epos = m_0+n_m0, rgba = Vec4(1,0,0,1))\n # after transform\n # print v_m1m0onxplus\n # print v_m1m0onxplusyzproj\n pg.plotArrow(base.render, spos = m_0, epos = v_m1m0onxplus+m_0, rgba=Vec4(0,.7,.7,1))\n pg.plotArrow(base.render, spos = m_0, epos = v_m1m0onxplusyzproj+m_0, rgba=Vec4(.70,.7,.7,1))\n pg.plotArrow(base.render, spos = m_0, epos = m_0+xplus, rgba = Vec4(.7,0,.7,1))\n # alpha_m0\n # print np.degrees(alpha_m0)\n # plot aixs\n zplus = np.asarray([0,0,1])\n pg.plotArrow(base.render, spos = m_0, epos = m_0+xplus*10, rgba = Vec4(.3,0,0,.3))\n pg.plotArrow(base.render, spos = m_0, epos = m_0+yplus*10, rgba = Vec4(0,.3,0,.3))\n pg.plotArrow(base.render, spos = m_0, epos = m_0+zplus*10, rgba = Vec4(0,0,.3,.3))\n\n if key in gmd.keys():\n gmd[key].append([m_0, m_1, alpha_m0])\n else:\n gmd[key] = [[m_0, m_1, alpha_m0]]", "def proposal_rule(cov, mean, npix):\n npix = int(npix)\n \n params = np.random.multivariate_normal(mean, cov)\n # check if parameters are in right domain\n \n params[:npix] = test_params(params[:npix], mean[:npix],\\\n cov[:npix,:npix], crit='Rpp')\n params[npix:-1] = test_params(params[npix:-1], mean[npix:-1],\\\n cov[npix:-1,npix:-1], crit='Pb')\n params[-1] = test_params(params[-1], mean[-1], cov[-1,-1], crit='psib')\n \n #print(params) \n return(params)", "def prob4():\n\n\n N = 500000\n random_draws = np.random.multivariate_normal(mean = [-1,1], cov =[[1,0],[0,1]], size = N)\n\n h = lambda x: x[0] < -1 and x[1] > 1\n f = lambda x: stats.multivariate_normal(mean = [ 0, 0]).pdf(x)\n g = lambda x: stats.multivariate_normal(mean = [-1, 1]).pdf(x)\n\n probability = [h(random_draws[i]) * f(random_draws[i]) / g(random_draws[i]) for i in range(N)]\n\n return 1./N * np.sum(probability)", "def get_clarifications_piqa(ex, nlp, comet_model):\n # Questions are usually like \"how would you do something?\"\n personx = \"you\"\n\n input_event = ex[\"goal\"].replace(\"?\", \"\")\n outputs = {category: comet_model.predict(input_event, category, num_beams=5) for category in comet_model.categories}\n\n # We only care about preconditions and postconditions for X\n relevant_categories = [\"xIntent\", \"xNeed\", \"xEffect\", \"xWant\"]\n curr_events = []\n for category in relevant_categories:\n prefix = CATEGORY_TO_PREFIX[category]\n for out_event in outputs[category]:\n if out_event != \"none\" and out_event != \"\":\n if not out_event.lower().startswith(\"person\") and not out_event.lower().startswith(\"other\"):\n out_event = \" \".join((prefix, out_event))\n\n out_event = re.sub(\"personx\", personx, out_event, flags=re.I)\n out_event = re.sub(\"person x\", personx, out_event, flags=re.I)\n out_event = re.sub(\"persony\", \"others\", out_event, flags=re.I)\n out_event = re.sub(\"person y\", \"others\", out_event, flags=re.I)\n\n question = CATEGORY_TO_QUESTION[category].replace(\"PersonX\", personx)\n curr_events.append((question, out_event))\n\n return curr_events", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def attraction_g(next_q, q, d, a):\n\n if a == 0:\n if next_q == 0:\n xi_D = 8\n else:\n xi_D = 1\n\n elif a == 1:\n xi_D = 1\n\n elif a == 2:\n if next_q == 0:\n xi_D = 1\n else:\n xi_D = 3\n\n elif a == 3:\n if next_q == 0:\n xi_D = 1\n else:\n xi_D = 2\n\n else:\n if next_q == 0:\n xi_D = 1\n else:\n xi_D = 4\n\n dqq = 0\n if next_q == 1 and q == 0:\n if d[3] == 1:\n dqq = 1\n elif np.sum(d[6:]) == 3:\n dqq = 1\n elif next_q == 0 and q == 1:\n if d[5] == 1:\n dqq = 1\n elif np.sum(d[6:]) == 0:\n dqq = 1\n\n return xi_D + dqq", "def create_design_matrix(self):\n self.design_matrix = np.zeros([self.n, self.p])\n self.design_matrix[:,0] = 1.0 #First comlum is 1 (bias term)\n\n for i in range(self.n):\n for j in range(1,self.p):\n self.design_matrix[i,j] = self.phi(self.x[i],j)\n\n self.design_eigvals = np.linalg.eigvals([email protected]_matrix)", "def jointUncertaintyMilp(mdp, oldPi, oldZC, unknownFeatStates, costOfQuery):\n m = Model()\n m.setParam('OutputFlag', False)\n\n # convert notation to previous implementation\n S = mdp.S\n A = mdp.A\n R = mdp.rFuncs\n psi = mdp.psi\n T = mdp.T\n alpha = mdp.alpha\n gamma = mdp.gamma\n\n # useful constants\n rLen = len(R)\n M = 10000 # a large number\n Sr = range(len(S))\n Ar = range(len(A))\n\n # decision variables\n x = m.addVars(len(S), len(A), lb=0, name='x')\n y = m.addVars(rLen, name='y')\n # y prime, a helper variable\n y0 = m.addVars(rLen, name='y0', lb=0)\n\n # oldPi is a mapping from state, action (in S x A) to occupancy\n # to be consistent with x, convert it to a mapping from (s, a) where s in Sr, a in Ar\n oldX = {(s, a): oldPi[S[s], A[a]] for s in Sr for a in Ar}\n\n # integer variables\n zR = m.addVars(rLen, vtype=GRB.BINARY, name='zR')\n zC = m.addVars(len(unknownFeatStates), vtype=GRB.BINARY, name='zC')\n # zCNew indicates the newly changed features by x. note that it does not need to be constrained as integers\n zCNew = m.addVars(len(unknownFeatStates), lb=0, name='zCNew')\n\n zSafe = m.addVar(vtype=GRB.BINARY, name='zSafe')\n\n V = lambda x_local, r: sum([x_local[s, a] * r(S[s], A[a]) for s in Sr for a in Ar])\n\n # (a) flow conservation constraint\n for sp in Sr:\n m.addConstr(sum(x[s, a] * ((s == sp) - gamma * T(S[s], A[a], S[sp])) for s in Sr for a in Ar) == alpha(S[sp]))\n\n # (b) is encoded in the transition function\n\n for consIdx in range(len(unknownFeatStates)):\n # (c) unknown features can be changed\n m.addConstr(M * zC[consIdx] >= sum(x[S.index(s), A.index(a)] for s in unknownFeatStates[consIdx] for a in A))\n # (d) constrain z^{new}_\\phi, note that lb of zCNew is 0\n m.addConstr(zCNew[consIdx] >= zC[consIdx] - oldZC[consIdx])\n\n # (e) constraints on y^0_r\n m.addConstr(sum(zC[idx] for idx in range(len(oldZC)) if oldZC[idx] == 1) <= sum(oldZC) - 1 + zSafe * M)\n for i in range(rLen):\n m.addConstr(y0[i] >= V(oldX, R[i]) - (1 - zSafe) * M)\n\n # (f) constraints on y_r\n for i in range(rLen):\n m.addConstr(y[i] <= V(x, R[i]) - y0[i] + (1 - zR[i]) * M)\n m.addConstr(y[i] <= 0 + zR[i] * M)\n\n # obj\n m.setObjective(sum([psi[i] * y[i] for i in xrange(rLen)])\n - sum(zC[idx] * costOfQuery for idx in range(len(unknownFeatStates))),\n GRB.MAXIMIZE)\n\n m.optimize()\n\n pi = {(S[s], A[a]): x[s, a].X for s in Sr for a in Ar}\n\n if config.VERBOSE:\n # print decision variables other than pi for debugging\n print 'oldZC', oldZC\n print 'zC', [zC[consIdx].X for consIdx in range(len(unknownFeatStates))]\n print 'y0 values', [y0[rIdx].X for rIdx in range(rLen)]\n print 'y values', [y[rIdx].X for rIdx in range(rLen)]\n\n if m.status == GRB.Status.OPTIMAL:\n # return feasible being true and the obj value, opt pi\n # .X attribute is to retrieve the value of the variable\n return pi\n else:\n # simply return infeasible\n raise Exception('milp problem optimal solution not found' + m.status)", "def op_fresnel_reflection(m, theta):\n rho_p = pypolar.fresnel.r_par_amplitude(m, theta)\n rho_s = pypolar.fresnel.r_per_amplitude(m, theta)\n a = abs(rho_s)**2 + abs(rho_p)**2\n b = abs(rho_s)**2 - abs(rho_p)**2\n c = 2 * rho_s * rho_p\n mat = np.array([[a, b, 0, 0],\n [b, a, 0, 0],\n [0, 0, c, 0],\n [0, 0, 0, c]])\n return 0.5 * mat", "def generate_parameters(self):\n self.parameters = np.zeros(self.D)\n for l in range(self.D):\n if self.p_l[l] >= np.random.uniform(0,1):\n self.parameters[l] = 1", "def trimdynamic_pe(records1, records2, args):\n for rec1, rec2 in izip(records1, records2):\n cutpos1 = 0\n cutpos2 = 0\n tmp_qual1 = [0 if x < args.q else 1 for x in rec1.letter_annotations['phred_quality']]\n tmp_qual1.append(0)\n jumps1 = [i for i, x in enumerate(tmp_qual1[:len(tmp_qual1) - 1]) if [tmp_qual1[i], tmp_qual1[i + 1]] == [1, 0]]\n if len(jumps1) == 0:\n cutpos1 = 0\n if len(jumps1) != 0:\n cutpos1 = numpy.max(jumps1) + 1\n rec1 = rec1[:cutpos1]\n tmp_qual2 = [0 if x < args.q else 1 for x in rec2.letter_annotations['phred_quality']]\n tmp_qual2.append(0)\n jumps2 = [i for i, x in enumerate(tmp_qual2[:len(tmp_qual2) - 1]) if [tmp_qual2[i], tmp_qual2[i + 1]] == [1, 0]]\n if len(jumps2) == 0:\n cutpos2 = 0\n if len(jumps2) != 0:\n cutpos2 = numpy.max(jumps2) + 1\n rec2 = rec2[:cutpos2]\n if args.r:\n rec1 = rec1.reverse_complement(name=True,id=True,description=True)\n rec2 = rec2.reverse_complement(name=True,id=True,description=True)\n if args.d:\n rec1.name += '/1'\n rec1.id += '/1'\n rec1.description += '/1'\n rec2.name += '/2'\n rec2.id += '/2'\n rec2.description += '/2'\n y1 = False\n y2 = False\n if len(rec1) >= args.m and numpy.mean(rec1.letter_annotations['phred_quality']) >= args.a:\n y1 = True\n if len(rec2) >= args.m and numpy.mean(rec2.letter_annotations['phred_quality']) >= args.a:\n y2 = True\n if y1 and y2:\n yield rec1, None, rec2, None, 'pe'\n if y1 and not y2:\n yield None, rec1, None, None, 'se1'\n if not y1 and y2:\n yield None, None, None, rec2, 'se2'", "def _build_mixture(self) -> None:\n for mu, sigma in zip(self.mus, self.sigmas):\n self.pdfs.append(norm(mu, sigma))", "def create_datastructures_for_structural_model(reflections, experiments, cif_file):\n\n # read model, compute Fc, square to F^2\n ic = intensity_array_from_cif_file(cif_file)\n exp = deepcopy(experiments[0])\n params = Mock()\n params.decay_correction.return_value = False\n exp.scaling_model = KBScalingModel.from_data(params, [], [])\n exp.scaling_model.set_scaling_model_as_scaled() # Set as scaled to fix scale.\n\n # Now put the calculated I's on roughly a common scale with the data.\n miller_indices = flex.miller_index([])\n intensities = flex.double([])\n\n for refl in reflections:\n miller_indices.extend(refl[\"miller_index\"])\n intensities.extend(refl[\"intensity.prf.value\"])\n miller_set = miller.set(\n crystal_symmetry=crystal.symmetry(\n space_group=experiments[0].crystal.get_space_group()\n ),\n indices=miller_indices,\n anomalous_flag=True,\n )\n idata = miller.array(miller_set, data=intensities)\n\n match = idata.match_indices(ic)\n pairs = match.pairs()\n\n icalc = flex.double()\n iobs = flex.double()\n miller_idx = flex.miller_index()\n for p in pairs:\n # Note : will create miller_idx duplicates in i_calc - problem?\n iobs.append(idata.data()[p[0]])\n icalc.append(ic.data()[p[1]])\n miller_idx.append(ic.indices()[p[1]])\n\n icalc *= flex.sum(iobs) / flex.sum(icalc)\n\n rt = flex.reflection_table()\n rt[\"intensity\"] = icalc\n rt[\"miller_index\"] = miller_idx\n\n exp.identifier = ersatz_uuid4()\n rt.experiment_identifiers()[len(experiments)] = exp.identifier\n rt[\"id\"] = flex.int(rt.size(), len(experiments))\n\n return exp, rt", "def all_priors(\n p,\n qu,\n var,\n name=\"\"):\n \n # Means of quantile differences\n qu_diff_m = np.array([qu[0], qu[1] - qu[0], qu[2] - qu[1]])\n \n # Means and variances of quantile differences\n qu_diff_mv = np.transpose(np.array([qu_diff_m, var]))\n \n # Parameters for quantile differences\n para_qu_diff = np.array([util.tn_para(*row) for row in qu_diff_mv])\n \n # Parameters for quantiles\n para_qu = util.para_for_quantiles(para_qu_diff)\n \n return np.array([\n PriorQ(p, [3, \"i\"], para_qu_diff, name),\n PriorQ(p[:2], [2, \"i\"], para_qu_diff[:2], name),\n PriorQ(p, [3, \"me\"], para_qu, name),\n PriorQ(p[:2], [2, \"me\"], para_qu[:2], name)])", "def make_matrix(p, q):\n M = [[ele[0] * ele[1] for ele in itertools.product([player, 1 - player], \n [opponent, 1 - opponent])]\n for opponent in q for player in p]\n return np.array(M)", "def clfqp(self,x,p):\n alp = self.alp_opt\n nu = self.nu_opt\n dt = self.dt\n n = self.n\n I = np.identity(n)\n M = self.ncm(x,p)\n nu = np.size(self.h_or_g(x,p),1)\n u = cp.Variable((nu,1))\n e = np.reshape(x,(n,1))\n fx = np.reshape(self.dynamicsf(x,p),(n,1))\n gx = self.h_or_g(x,p)\n dMdt = (nu*I-M)/dt\n constraints = [2*e.T@(fx+gx@u)+e.T@dMdt@e <= -2*alp*e.T@M@e]\n prob = cp.Problem(cp.Minimize(cp.sum_squares(u)),constraints)\n prob.solve()\n u = u.value\n u = np.ravel(u)\n return u", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def get_cnfs(method_fun, q0=np.deg2rad([0, 30, 0, -20, 0, 45, 0]), kwargs=dict(), start = [], finish = [], steps = 2):\n x = np.hstack([\n np.linspace(start[0], finish [0], steps),\n ])\n y = np.hstack([\n np.linspace(start[1], finish [1], steps),\n ])\n z = np.hstack([\n np.linspace(start[2], finish [2], steps),\n ])\n rob_cnfs = [] # will contain the result of each inverse kinematics\n start_time = time.time()\n for (i, j, k) in zip(x, y, z):\n pos = [i, j, k]\n q = method_fun(q0, pos, **kwargs)\n rob_cnfs.append(q)\n print(rob_cnfs)\n # q0 = q # Sets the new initial joint configurations to the previous\n end_time = time.time()\n\n print(f\"\\n{np.round(end_time - start_time, 1)} seconds : Total time using {method_fun.__name__} \\n\")\n if kwargs: print(f\"\\nParameters used: {kwargs}\")\n\n plot_robots(rob_cnfs, traj_x=x, traj_y=y, traj_z=z)", "def generate(self, t):\n t /= self.T\n q = self.a_3 * t**3 + self.a_2 * t**2 * (1 - t) + self.a_1 * t * (1 - t)**2 + self.a_0 * (1 - t)**3\n q_dot = (t**2)*(((-3)*self.a_0+3*self.a_1-3*self.a_2+3*self.a_3))+t*(6*self.a_0-4*self.a_1+2*self.a_2)+self.a_1\n q_ddot = t*(((-6)*self.a_0+6*self.a_1-6*self.a_2+6*self.a_3))+6*self.a_0-4*self.a_1+2*self.a_2\n q_dot = q_dot / self.T\n q_ddot = q_ddot / (self.T)**2\n return q, q_dot, q_ddot", "def Mach(h,Vc,p):\n return np.sqrt(2/(gamma-1)*((1+p0/p*((1+(gamma-1)/(2*gamma)*rho0/p0*Vc**2)**(gamma/(gamma-1))-1))**((gamma-1)/gamma)-1))", "def refmat(p, q):\n p = p.normalized()\n q = q.normalized()\n if (p - q).norm() < 1e-5:\n return numpy.identity(3)\n pq = p - q\n pq.normalize()\n b = pq.get_array()\n b.shape = (3, 1)\n i = numpy.identity(3)\n ref = i - 2 * numpy.dot(b, numpy.transpose(b))\n return ref", "def proba(c_pred,m_pred,f_pred, dataset):\n p = np.zeros(10)\n if dataset == 'cifar10':\n for i in range(10):\n if i <4:\n if i <2:\n p[i] = c_pred[0]*(m_pred[0]/(m_pred[0]+m_pred[1]))*(f_pred[i]/np.sum(f_pred[0:2]))\n elif i <4:\n p[i] = c_pred[0]*(m_pred[1]/(m_pred[0]+m_pred[1]))*(f_pred[i]/np.sum(f_pred[2:4]))\n if i >=4:\n if i <6:\n p[i] = c_pred[1]*(m_pred[2]/(m_pred[2]+m_pred[3]+m_pred[4]))*(f_pred[i]/np.sum(f_pred[4:6]))\n elif i <8:\n p[i] = c_pred[1]*(m_pred[3]/(m_pred[2]+m_pred[3]+m_pred[4]))*(f_pred[i]/np.sum(f_pred[6:8]))\n elif i <10:\n p[i] = c_pred[1]*(m_pred[4]/(m_pred[2]+m_pred[3]+m_pred[4]))*(f_pred[i]/np.sum(f_pred[8:10]))\n else :\n for i in range(10):\n if i <5:\n if i <3:\n p[i] = c_pred[0]*(m_pred[0]/(m_pred[0]+m_pred[1]))*(f_pred[i]/np.sum(f_pred[0:3]))\n elif i <5:\n p[i] = c_pred[0]*(m_pred[1]/(m_pred[0]+m_pred[1]))*(f_pred[i]/np.sum(f_pred[3:5]))\n if i >=5:\n if i <8:\n p[i] = c_pred[1]*(m_pred[2]/(m_pred[2]+m_pred[3]))*(f_pred[i]/np.sum(f_pred[5:8]))\n elif i <10:\n p[i] = c_pred[1]*(m_pred[3]/(m_pred[2]+m_pred[3]))*(f_pred[i]/np.sum(f_pred[8:]))\n return(p)", "def prob2():\n x, i, j = sy.symbols('x, i, j')\n expr = sy.product(sy.summation(j*(sy.sin(x) + sy.cos(x)), (j, i, 5)), (i, 1, 5))\n return sy.simplify(expr)", "def _derivatives(self, state, forces_moments):\n # extract the states\n pn = state[0]\n pe = state[1]\n pd = state[2]\n e0 = state[3]\n e1 = state[4]\n e2 = state[5]\n e3 = state[6]\n u = state[7]\n v = state[8]\n w = state[9]\n # state[6:10] = normalize(state[6:10])\n p = state[10]\n q = state[11]\n r = state[12]\n # extract forces/moments\n fx = forces_moments[0]\n fy = forces_moments[1]\n fz = forces_moments[2]\n l = forces_moments[3]\n m = forces_moments[4]\n n = forces_moments[5]\n\n\n # with warnings.catch_warnings():\n # warnings.filterwarnings('error')\n # try:\n # # position kinematics\n # except Warning as e:\n # pdb.set_trace()\n # print(e)\n\n pn_dot = (e1**2+e0**2-e2**2-e3**2)*u + 2*(e1*e2-e3*e0)*v + 2*(e1*e3+e2*e0)*w\n pe_dot = 2*(e1*e2+e3*e0)*u + (e2**2+e0**2-e1**2-e3**2)*v + 2*(e2*e3-e1*e0)*w\n pd_dot = 2*(e1*e3-e2*e0)*u + 2*(e2*e3+e1*e0)*v + (e3**2+e0**2-e1**2-e2**2)*w\n\n # pn_dot = (e0**2+e1**2-e2**2-e3**2)*u + 2*(e1*e2+e3*e0)*v + 2*(e1*e3-e2*e0)*w\n # pe_dot = 2*(e1*e2-e3*e0)*u + (e0**2-e1**2+e2**2-e3**2)*v + 2*(e2*e3+e1*e0)*w\n # pd_dot = 2*(e1*e3+e2*e0)*u + 2*(e2*e3-e1*e0)*v + (e0**2-e1**2-e2**2+e3**2)*w\n\n # pdb.set_trace()\n\n # position dynamics\n mass = self.mass\n u_dot = (r*v-q*w)+fx/mass\n v_dot = (p*w-r*u)+fy/mass\n w_dot = (q*u-p*v)+fz/mass\n\n # rotational kinematics\n e0_dot = 0.5*(-p*e1-q*e2-r*e3)\n e1_dot = 0.5*(p*e0+r*e2-q*e3)\n e2_dot = 0.5*(q*e0-r*e1+p*e3)\n e3_dot = 0.5*(r*e0+q*e1-p*e2)\n\n # rotatonal dynamics\n p_dot = self.gamma1*p*q - self.gamma2*q*r + self.gamma3*l + self.gamma4*n\n q_dot = self.gamma5*p*r - self.gamma6*(p**2-r**2) + m/self.Jy\n r_dot = self.gamma7*p*q - self.gamma1*q*r + self.gamma4*l + self.gamma8*n\n\n # collect the derivative of the states\n x_dot = np.array([pn_dot, pe_dot, pd_dot, e0_dot, e1_dot, e2_dot, e3_dot,\n u_dot, v_dot, w_dot, p_dot, q_dot, r_dot])\n # pdb.set_trace()\n\n\n # print(x_dot)\n return x_dot", "def q_posterior(self, x_start, x_t, t):\n\n x_t_shape = tf.shape(x_t)\n posterior_mean = (\n self._extract(self.posterior_mean_coef1, t, x_t_shape) * x_start\n + self._extract(self.posterior_mean_coef2, t, x_t_shape) * x_t)\n posterior_variance = self._extract(self.posterior_variance, t, x_t_shape)\n posterior_log_variance_clipped = self._extract(\n self.posterior_log_variance_clipped, t, x_t_shape)\n return posterior_mean, posterior_variance, posterior_log_variance_clipped", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def calP(self):\n N = len(self.listOfParticles)\n m = self.listOfParticles[0].m\n vsum = 0\n for particle in self.listOfParticles:\n vsum += particle.V.len()\n A = np.pi*self.R**2\n F = 0.5 * A * (2*self.R) * m * N * vsum**2\n return F", "def calcMisc(t, q, u, p):\n m=dict()\n # Split state into positions and speeds (qx, qxd), uaero states (qxa_ua), dynamic inflow states (qxa_di)\n m['qx'], m['qxd'], m['qxa_ua'], m['qxa_di'] = split_q(q, p['Iqxs'], p['Iqxsd'], p['Iqxa_ua'], p['Iqxa_di'])\n\n # Structural states (length 3, even if not all DOFs are actice)\n m['q_full'], m['x'], m['xd'] = inflate_q(q, Iq=p['Iq'])\n\n # Orientation of the section\n m['Ux'], m['Uy'], m['theta_p'] = inputsAtTime(t, u)\n th = m['x'][2]\n m['omega'] = m['xd'][2]\n m['theta'] = th + m['theta_p'] + p['beta'] \n m['rho_x'] = (-p['x_AG']* np.sin(m['theta']) + p['y_AG']*np.cos(m['theta']) )\n m['rho_y'] = (-p['x_AG']* np.sin(m['theta']) + p['y_AG']*np.cos(m['theta']) )\n return m", "def testFactorDGP(self):\n N1, N0 = 2, 100\n treated_units = [0, 1]\n T0, T1 = 20, 10\n K, R, F = 5, 5, 5\n (\n Cov_control,\n Cov_treated,\n Out_pre_control,\n Out_pre_treated,\n Out_post_control,\n Out_post_treated,\n ) = factor_dgp(N0, N1, T0, T1, K, R, F)\n\n Cov = np.vstack((Cov_treated, Cov_control))\n Out_pre = np.vstack((Out_pre_treated, Out_pre_control))\n Out_post = np.vstack((Out_post_treated, Out_post_control))\n\n SC.estimate_effects(\n Out_pre,\n Out_post,\n treated_units,\n Cov,\n # constrain=\"simplex\", -- handled by argparse now..\n **command_line_options,\n )\n\n # print(fit_res)\n # est_res = SC.estimate_effects(\n # Cov, Out_pre, Out_post, treated_units, V_penalty=0, W_penalty=0.001\n # )\n # print(est_res)", "def mutual_information(pi, pj, pij):\n p_i = 1 - pi\n p_j = 1 - pj\n p_ij = pj - pij\n pi_j = pi - pij\n p_i_j = 1 - pi - pj + pij\n \n log_pi = log(pi)\n log_pj = log(pj)\n log_p_i = log(p_i)\n log_p_j = log(p_j)\n \n mi = pij * (log(pij) - log_pi - log_pj) + \\\n pi_j * (log(pi_j) - log_pi - log_p_j) + \\\n p_i_j * (log(p_i_j) - log_p_i - log_p_j)\n if p_ij != 0: # For language groups and features, this is the only probability that could be zero, and lim_x->0[x*log(x)] = 0 \n mi += p_ij * (log(p_ij) - log_p_i - log_pj)\n \n return mi", "def iteration(self):\n T = self.generate_T()\n R = self.reproduce(T)\n self.P = self.choose_mi_best(R)\n #print(self.P)", "def test_y_generate():\n a = Attractor()\n #say x, y, z = [0.1, 0.0, 0.0]\n\n dx = (10.0 * (0.0 - 0.1)) * (80.0-0.0)/10000 + 0.1\n dy = (0.1 * (28 - 0.0) - 0.0) * (80.0-0.0)/10000 + 0.0\n dz = ((0.1 * 0.0) - (8/3 * 0.0)) * (80.0-0.0)/10000 + 0.0\n ex_1 = np.array([dx, dy, dz])\n\n dx2 = (10.0 * (dy - dx)) * (80.0-0.0)/10000.0 + dx \n dy2 = (dx * (28.0 - dz) - dy) * (80.0-0.0)/10000.0 + dy\n dz2 = ((dx * dy) - (8/3 * dz)) * (80.0-0.0)/10000.0 + dz\n ex_2 = np.array([dx2, dy2, dz2])\n\n dx3 = (10.0 * (dy2 - dx2)) * (80.0-0.0)/10000.0 + dx2\n dy3 = (dx2 * (28.0 - dz2) - dy2) * (80.0-0.0)/10000.0 + dy2\n dz3 = ((dx2 * dy2) - (8/3 * dz2)) * (80.0-0.0)/10000.0 + dz2\n ex_3 = np.array([dx3, dy3, dz3])\n\n dx4 = (10.0 * (dy3 - dx3)) * (80.0-0.0)/10000.0 + dx3\n dy4 = (dx3 * (28 - dz3) - dy3) * (80.0-0.0)/10000.0 + dy3\n dz4 = ((dx3 * dy3) - (8/3 * dz3)) * (80.0-0.0)/10000.0 + dz3\n ex_4 = np.array([dx4, dy4, dz4])\n\n dx5 = (10.0 * (dy4 - dx4)) * (80.0-0.0)/10000.0 + dx4\n dy5 = (dx4 * (28 - dz4) - dy4) * (80.0-0.0)/10000.0 + dy4\n dz5 = ((dx4 * dy4) - (8/3 * dz4)) * (80.0-0.0)/10000.0 + dz4\n ex_5 = np.array([dx5, dy5, dz5])\n\n \n a.evolve(order = 4)\n y_list = a.solution['y'].tolist()\n \n for i in y_list[:6]:\n yy = round(i, 2)\n for j in [0.0, dy, dy2, dy3, dy4, dy5]:\n yyy = round(j, 2)\n \n print (\"Actual increments: \", yy)#str(a.solution()['x']).strip('[]'))\n print (\"Expected increments: \", yyy)\n assert yy == yyy", "def run(method = 'ParetoMTL', num = 10):\r\n \r\n pf = create_pf()\r\n f_value_list = []\r\n \r\n weights = circle_points([1], [num])[0]\r\n \r\n\r\n \r\n for i in range(num):\r\n \r\n print(i)\r\n \r\n if method == 'ParetoMTL':\r\n x, f = pareto_mtl_search(ref_vecs = weights,i = i)\r\n if method == 'MOOMTL':\r\n x, f = moo_mtl_search()\r\n if method == 'Linear':\r\n x, f = linear_scalarization_search()\r\n \r\n f_value_list.append(f)\r\n \r\n \r\n f_value = np.array(f_value_list)\r\n plt.plot(pf[:,0],pf[:,1])\r\n plt.scatter(f_value[:,0], f_value[:,1], c = 'r', s = 80)", "def test_step_constructors(ndraw=1000, burnin=200):\n\n cls = step\n for const_info, rand in product(zip([gaussian_instance,\n logistic_instance,\n poisson_instance],\n [cls.gaussian,\n cls.logistic,\n cls.poisson]),\n ['gaussian', 'logistic', 'laplace']):\n\n inst, const = const_info\n X, Y = inst()[:2]\n W = np.ones(X.shape[1])\n conv = const(X, Y, W)\n conv.fit()\n\n n, p = X.shape\n active = np.zeros(p, np.bool)\n active[:int(p/2)] = True\n\n candidate = ~active\n candidate[-int(p/4):] = False\n\n conv1 = const(X, Y, W, active=active)\n conv1.fit()\n\n conv2 = const(X, Y, W, candidate=candidate)\n conv2.fit()\n \n conv3 = const(X, Y, W, candidate=candidate, active=active)\n conv3.fit()\n \n selected_features = np.zeros(p, np.bool)\n selected_features[:3] = True\n\n conv3.summary(selected_features,\n ndraw=ndraw,\n burnin=burnin,\n compute_intervals=True)", "def get_features_critic(state):\n # reshape to make it a matrix with one row (so we can transpose it later)\n p, v = state\n p_v = np.array([p, v]).reshape((1, -1)).T\n X = np.array([p_v - c_entry.T for c_entry in C])\n inv_cov = np.linalg.inv(np.diag([0.04, 0.0004]))\n phi = np.array([np.exp(-(xi.T @ inv_cov @ xi) / 2) for xi in X])\n\n return np.squeeze(phi) # get rid of 2 unnecessary dimensions", "def compute_pg_vars(trajs, policy, baseline, discount, gae_lambda):\n for traj in trajs:\n # Include the last observation here, in case the trajectory is not finished\n baselines = baseline.predict(np.concatenate(\n [traj[\"observations\"], [traj[\"last_observation\"]]]))\n if traj['finished']:\n # If already finished, the future cumulative rewards starting from the final state is 0\n baselines[-1] = 0.\n # This is useful when fitting baselines. It uses the baseline prediction of the last state value to perform\n # Bellman backup if the trajectory is not finished.\n traj['returns'] = compute_cumulative_returns(\n traj['rewards'], baselines, discount)\n traj['advantages'] = compute_advantages(\n traj['rewards'], baselines, discount, gae_lambda)\n traj['baselines'] = baselines[:-1]\n\n # First, we compute a flattened list of observations, actions, and advantages\n all_obs = np.concatenate([traj['observations'] for traj in trajs], axis=0)\n all_acts = np.concatenate([traj['actions'] for traj in trajs], axis=0)\n all_advs = np.concatenate([traj['advantages'] for traj in trajs], axis=0)\n all_dists = {\n k: np.concatenate([traj['distributions'][k] for traj in trajs], axis=0)\n for k in trajs[0]['distributions'].keys()\n }\n\n # Normalizing the advantage values can make the algorithm more robust to reward scaling\n all_advs = (all_advs - np.mean(all_advs)) / (np.std(all_advs) + 1e-8)\n\n # Form chainer variables\n all_obs = Variable(all_obs)\n all_acts = Variable(all_acts)\n all_advs = Variable(all_advs.astype(np.float32, copy=False))\n all_dists = policy.distribution.from_dict(\n {k: Variable(v) for k, v in all_dists.items()})\n\n return all_obs, all_acts, all_advs, all_dists", "def sigmai_dep(ptem, psal, pref):\n zr4 = 4.8313e-4\n zd =-2.042967e-2\n zrau0 = 1000.e0\n \n sigmai_dep_out = zeros(psal.shape)\n \n # ?? for whatever reason sqrt(abs(psal)) seems to kick up a fuss when arrays\n # exceed a certain size...??? otherwise this could be vectorised\n # TODO: if pref is a number, broadcast it into a 2d field\n \n for jj in range(psal.shape[0]): # python indexing\n for ji in range(psal.shape[1]):\n \n ztem = ptem[jj, ji]\n zsal = psal[jj, ji]\n zws = sqrt( abs(psal[jj, ji]) )\n \n # Compute the volumic mass of pure water at atmospheric pressure.\n zr1 = ( ( ( ( (6.536332e-9 * ztem - 1.120083e-6) * ztem + 1.001685e-4 )\n * ztem - 9.095290e-3 ) * ztem + 6.793952e-2 ) * ztem + 999.842594e0\n )\n\n # Compute the seawater volumic mass at atmospheric pressure.\n zr2 = ( ( ( ( 5.3875e-9 * ztem - 8.2467e-7) * ztem + 7.6438e-5)\n * ztem - 4.0899e-3) * ztem + 0.824493e0\n )\n\n zr3 = (-1.6546e-6 * ztem + 1.0227e-4) * ztem - 5.72466e-3\n\n # Compute the potential volumic mass (referenced to the surface).\n zrhop = (zr4 * zsal + zr3 * zws + zr2) * zsal + zr1\n\n # Compute the compression terms.\n ze = (-3.508914e-8 * ztem - 1.248266e-8) * ztem - 2.595994e-6\n\n zbw = (1.296821e-6 * ztem - 5.782165e-9) * ztem + 1.045941e-4\n\n zb = zbw + ze * zsal\n\n zc = (-7.267926e-5 * ztem + 2.598241e-3) * ztem + 0.1571896e0\n\n zaw = ( ( (5.939910e-6 * ztem + 2.512549e-3) * ztem - 0.1028859e0 ) \n * ztem - 4.721788e0\n )\n\n za = (zd * zws + zc) * zsal + zaw\n\n zb1 = (-0.1909078e0 * ztem + 7.390729e0) * ztem - 55.87545e0\n\n za1 = ( ( (2.326469e-3 * ztem + 1.553190e0) * ztem - 65.00517e0)\n * ztem + 1044.077e0\n )\n\n zkw = ( ( ( (-1.361629e-4 * ztem - 1.852732e-2) * ztem - 30.41638e0)\n * ztem + 2098.925e0) * ztem + 190925.60\n )\n\n zk0 = (zb1 * zws + za1) * zsal + zkw\n\n # Compute the potential density anomaly.\n sigmai_dep_out[jj, ji] = ( zrhop / (1.0e0 - pref / \n ( zk0 - pref * (za - pref * zb) ) )\n - zrau0\n )\n \n return sigmai_dep_out", "def f(t,x,p,q):\n return p[1] + q[0]*x", "def MAP(cpts, obs, terms):\r\n\r\n # a list to store the computed probabilities\r\n all_sums = []\r\n # initialize all terms to false\r\n for value in range(len(terms)):\r\n terms[value] = [terms[value], '0']\r\n search_array = terms + obs\r\n # if all terms are being watched, just call MPE\r\n if len(search_array) == len(cpts):\r\n return MPE(cpts, obs)\r\n # we need to know what terms we aren't interested in so we start with \r\n # or terms and observations and note the variables that appear in CPT but\r\n # not in those\r\n dont_count = []\r\n for var in cpts:\r\n if [var[0], '0'] not in search_array and [var[0], '1'] not in search_array:\r\n dont_count.append(var[0])\r\n terms.append([var[0],'1'])\r\n # sort the terms to ensure correct ordering\r\n terms.sort()\r\n # creates a list of all possible bit strings\r\n # just an easy way to create all possible truth assignments\r\n seq = [\"\".join(seq) for seq in itertools.product(\"01\", repeat=len(terms))]\r\n # loop through all possible truth assignments\r\n for j in range(len(seq)):\r\n # we initialize at probability = 100%\r\n chance = 1\r\n # assign the truth values\r\n for k in range(len(seq[j])):\r\n terms[k][1] = seq[j][k]\r\n # this computes the probability using the chaining rule\r\n for i in range(len(terms)):\r\n new_terms = terms[:-i-1] + obs\r\n new_terms.sort()\r\n chance *= probability(cpts,terms[-i-1], new_terms)\r\n # add the probabilities to our list\r\n all_sums.append(chance)\r\n combine = []\r\n # note all variables which weren't in obs or Vs\r\n for i in dont_count:\r\n combine.append(terms.index([i,'1']))\r\n # this will store the final probabilities\r\n final_array = [0] * len(seq)\r\n # another complicated looking loop, it just serves to combine probabilities\r\n # for example, if we have a CPT with x_1, x_2, x_3, x_4 and we observe \r\n # x_1 to be true and have Vs = [x_3, x_4] then we need to combine the \r\n # probabilities that are the same except for x_2 = true vs false\r\n for loc in combine:\r\n for sequence in range(len(seq)):\r\n for alt_sequence in range(sequence+1,len(seq)):\r\n if (seq[sequence][:loc] + seq[sequence][loc+1:]) == (seq[alt_sequence][:loc] + seq[alt_sequence][loc+1:]):\r\n final_array[sequence] = all_sums[sequence] + all_sums[alt_sequence]\r\n\r\n # get the truth assignment for the highest probability\r\n location = seq[final_array.index(max(final_array))]\r\n truth_assignment = []\r\n # place the truth assignment in a more readable fashion\r\n for value in range(len(terms)):\r\n if terms[value] in search_array:\r\n if location[value] == '0':\r\n truth_assignment.append(terms[value][0]+ ' = False')\r\n else:\r\n truth_assignment.append(terms[value][0]+ ' = True')\r\n return (truth_assignment)" ]
[ "0.55361575", "0.5507782", "0.5372579", "0.5360624", "0.53593516", "0.53491974", "0.53188616", "0.53064805", "0.52135396", "0.519285", "0.51851183", "0.51435244", "0.51339656", "0.5130794", "0.51189184", "0.5111052", "0.5100512", "0.50761175", "0.5057054", "0.50560147", "0.5050513", "0.504839", "0.50396454", "0.503058", "0.503025", "0.5024763", "0.50176847", "0.50114125", "0.5008742", "0.50072265", "0.5001951", "0.49893436", "0.49745777", "0.4971626", "0.4966349", "0.49622506", "0.49593052", "0.4946813", "0.49411413", "0.4931317", "0.49214238", "0.49142665", "0.49125564", "0.49098802", "0.49079695", "0.49009863", "0.49004298", "0.4900121", "0.48996112", "0.48844838", "0.4884236", "0.48806545", "0.4872523", "0.48713017", "0.48665157", "0.4861542", "0.48607126", "0.48575878", "0.4854965", "0.48342878", "0.48288783", "0.48271418", "0.48243135", "0.48237297", "0.4816429", "0.48132837", "0.48016465", "0.47947517", "0.47924364", "0.47854334", "0.47848892", "0.4782463", "0.47814262", "0.4780905", "0.47800848", "0.47730735", "0.47680944", "0.47680637", "0.47644213", "0.4760562", "0.47468546", "0.47447935", "0.4743395", "0.47410178", "0.47408637", "0.47406", "0.47376612", "0.4734087", "0.47331104", "0.47294983", "0.47289678", "0.4724283", "0.4722682", "0.47221822", "0.472042", "0.47164062", "0.4715113", "0.47143015", "0.47136456", "0.4712519" ]
0.6714796
0
The intersection of vector v and cell 4 x^2 + y^2 = 100 starting from p
def intersect(v, p): # Solve for t: 100 = 4 * (px + t * vx)^2 + (py + t * vy)^2 # Note: 4 * px^2 + py^2 - 100 = 0 t = sum([c*vi*pi for (c,vi,pi) in zip((-8.0, -2.0),v,p)]) t /= sum([c * vi**2 for (c,vi) in zip((4.0,1.0),v)]) return [pi + vi * t for (pi, vi) in zip(p, v)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def intersection(x, y, f, p):", "def getIntersectPoint(p1, p2, p3, p4):\n points = p1, p2, p3, p4\n gradients = (\n CollisionUtility.calculate_gradient(p1, p2), CollisionUtility.calculate_gradient(p3, p4)\n )\n\n # See if the the lines are parallel\n if gradients[0] != gradients[1]:\n return CollisionUtility.calculate_not_parallel_intersection(points, gradients)\n else:\n return CollisionUtility.calculate_parallel_intersection(points, gradients)", "def an_intersection(v1, b1):\n try:\n return intersection(v1, b1, np.array([1,1]), 0)\n except np.linalg.linalg.LinAlgError:\n print v1\n return intersection(v1, b1, np.array([-1,1]), 0)", "def intersection(v1, v2):\n x = v1[0:2] + v2[0:2]\n y = v1[2:4] + v2[2:4]\n if( x[3] == 0 ): #To avoid a divide by zero, if x[3] is 0 then we just solve for where lineA equals x[2]\n t1 = (x[2] - x[0])/\\\n (x[1])\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]\n\n else: \n t1 = ( y[0] - y[2] + (y[3]/x[3])*(x[2] - x[0]) )/\\\n ( (y[3]*x[1])/x[3] - y[1] )\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]", "def intersect_triangle(v1, v2, v3, pos):\r\n #calc normal from two edge vectors v2-v1 and v3-v1\r\n nVec = cross(subtract(v2, v1), subtract(v3, v1))\r\n #equation of plane: Ax + By + Cz = kVal where A,B,C are components of normal. x,y,z for point v1 to find kVal\r\n kVal = dot(nVec,v1)\r\n #return y val i.e. y = (kVal - Ax - Cz)/B\r\n return (kVal - nVec[0]*pos[0] - nVec[2]*pos[2])/nVec[1]", "def intersects(p1, p2):\n if p1^p2:\n return -(np.dot(p1.v, p2.w) * np.eye(3, 3) + \\\n p1.w.reshape((3,1)) @ p2.v.reshape((1,3)) - \n p2.w.reshape((3,1)) @ p1.v.reshape((1,3))) * sm.unitvec(np.cross(p1.w, p2.w))\n else:\n return None", "def calculate_intersect_point(p1, p2, p3, p4):\n p = CollisionUtility.getIntersectPoint(p1, p2, p3, p4)\n if p is not None:\n points_arr = ((p1, p2), (p3, p4))\n r1, r2 = list(map(CollisionUtility.create_rect, points_arr))\n for point in p:\n try:\n res1 = r1.collidepoint(point)\n res2 = r2.collidepoint(point)\n if res1 and res2:\n point = [int(pp) for pp in point]\n return point\n except:\n str = \"point was invalid \", point\n print(str)\n return None", "def intersect_triangle(v1, v2, v3, pos):\n #calc normal from two edge vectors v2-v1 and v3-v1\n nVec = cross(subtract(v2, v1), subtract(v3, v1))\n #equation of plane: Ax + By + Cz = kVal where A,B,C are components of normal. x,y,z for point v1 to find kVal\n kVal = dot(nVec,v1)\n #return y val i.e. y = (kVal - Ax - Cz)/B\n return (kVal - nVec[0]*pos[0] - nVec[2]*pos[2])/nVec[1]", "def get_intersection(self, l, max_y=None):\n\n # Get the points\n i, j = self.breakpoint\n\n # Initialize the resulting point\n result = Coordinate()\n p: Coordinate = i\n\n # First we replace some stuff to make it easier\n a = i.xd\n b = i.yd\n c = j.xd\n d = j.yd\n u = 2 * (b - l)\n v = 2 * (d - l)\n\n # Handle the case where the two points have the same y-coordinate (breakpoint is in the middle)\n if i.yd == j.yd:\n result.xd = (i.xd + j.xd) / 2\n\n if j.xd < i.xd:\n result.yd = max_y or float('inf')\n return result\n\n # Handle cases where one point's y-coordinate is the same as the sweep line\n elif i.yd == l:\n result.xd = i.xd\n p = j\n elif j.yd == l:\n result.xd = j.xd\n else:\n # We now need to solve for x\n # 1/u * (x**2 - 2*a*x + a**2 + b**2 - l**2) = 1/v * (x**2 - 2*c*x + c**2 + d**2 - l**2)\n # Then we let Wolfram alpha do the heavy work for us, and we put it here in the code :D\n x = -(Decimal.sqrt(\n v * (a ** 2 * u - 2 * a * c * u + b ** 2 * (u - v) + c ** 2 * u) + d ** 2 * u * (v - u) + l ** 2 * (\n u - v) ** 2) + a * v - c * u) / (u - v)\n result.xd = x\n\n # We have to re-evaluate this, since the point might have been changed\n a = p.xd\n b = p.yd\n x = result.xd\n u = 2 * (b - l)\n\n # Handle degenerate case where parabolas don't intersect\n if u == 0:\n result.yd = float(\"inf\")\n return result\n\n # And we put everything back in y\n result.yd = 1 / u * (x ** 2 - 2 * a * x + a ** 2 + b ** 2 - l ** 2)\n return result", "def planeLineIntersect(p1, p2, equ):\n n = vector(equ[0], equ[1], equ[2])\n v1, v2 = vector(p1), vector(p2)\n t = (equ[3] - n.dotProduct(v2)) / (n.dotProduct(v1 - v2))\n return (t * v1 + (1 - t) * v2).coords()", "def compute_intersecting(voxel, R, kdt, max_segment): \n\tsubset = np.unique(si[kdt.query_radius(voxel, r=R+max_segment)[0]]).astype(np.int)\n\treturn subset[np.array([track_roi_intersection_check(s, voxel, sq_dist_thr=R**2) for s in tracks[subset]])]", "def find_line_intersection(self, point, vector, Ns=50):\n point = np.asarray(point, dtype=float)\n vector = np.asarray(vector, dtype=float)\n if point.size == 3:\n point = np.array([point[0], point[2]])\n if vector.size == 3:\n vector = np.array([vector[0], vector[2]])\n normal = np.array([-vector[1], vector[0]])\n normal /= norm(normal)\n with self.fix_evaluator():\n def f(t):\n t = clip(t, 0, np.pi)\n rel_vec = self(t) - point\n return normal.dot(rel_vec)\n f0 = f(0)\n if f0 == 0.0:\n return 0.0\n step = np.pi/Ns\n a = 0\n while f(a+step)*f0 > 0:\n if a == np.pi:\n raise RuntimeError(\"Line seems to not intersect curve.\")\n a = min(np.pi, a+step)\n return brentq(f, a=a, b=a+step)", "def vinet(p, v):\n x = ( v / p[3] ) ** ( 1.0 / 3 )\n xi = 3.0 / 2 * ( p[2] - 1 )\n return p[0] + 9 * p[1] * p[3] / ( xi**2 ) * ( 1 + ( xi * ( 1 - x ) - 1 ) * np.exp( xi * ( 1 - x ) ) )", "def intersection(self, pn1, pn2, h):\n #print \"intersectionection:\", pn1, pn2, h\n #print \"z: \", (pn2[0]-pn1[0])/(pn2[1]-pn1[1])*(h-pn1[1])+pn1[0]\n return (pn2[0]-pn1[0])/(pn2[1]-pn1[1])*(h-pn1[1])+pn1[0], h", "def intersection(boxes, box):\n ix = np.maximum(0, np.minimum(box[2], boxes[:,2]) - np.maximum(box[0], boxes[:,0]))\n iy = np.maximum(0, np.minimum(box[3], boxes[:,3]) - np.maximum(box[1], boxes[:,1]))\n return ix*iy", "def intersection(a, b):\n x = max(a[0],b[0])\n y = max(a[1],b[1])\n w = min(a[2],b[2]) - x\n h = min(a[3],b[3]) - y\n \n if h<0 or w<0 :\n return 0\n \n return h*w", "def checkintersection(p1,p2,p3,p4):\n def isonsegment(i,j,k):\n return ((i.x <= k.x or j.x <= k.x) and (k.x <= i.x or k.x <= j.x) and\n (i.y <= k.y or j.y <= k.y) and (k.y <= i.y or k.x <= j.y))\n\n def computedirection(i,j,k):\n a = (k.x - i.x) * (j.y - i.y);\n b = (j.x - i.x) * (k.y - i.y);\n if a < b:\n return -1\n elif a > b:\n return 1\n else:\n return 0\n\n # return no intersection if they\n if p1.x == p3.x and p1.y == p3.y:\n return False \n if p1.x == p4.x and p1.y == p4.y:\n return False\n if p2.x == p3.x and p2.y == p3.y:\n return False\n if p2.x == p4.x and p2.y == p4.y:\n return False\n\n\n d1 = computedirection(p3,p4,p1)\n d2 = computedirection(p3,p4,p2)\n d3 = computedirection(p1,p2,p3)\n d4 = computedirection(p1,p2,p4)\n return ((((d1 > 0 and d2 < 0) or (d1 < 0 and d2 > 0)) and\n ((d3 > 0 and d4 < 0) or (d3 < 0 and d4 > 0))) or\n (d1 == 0 and isonsegment(p3,p4,p1)) or\n (d2 == 0 and isonsegment(p3,p4,p2)) or\n (d3 == 0 and isonsegment(p1,p2,p3)) or\n (d4 == 0 and isonsegment(p1,p2,p4)))", "def line_sphere_intersection(p1, p2, c, r):\n\t# FILL in your code here\n\n\tline_vector=np.subtract(p2,p1) #np.array([p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2] ])\n\tval=np.sqrt(np.sum([(p2 - p1)**2\n\t\t\t\t\t\t for p1, p2 in zip(p1,p2)]))\n\n\tif val==0:\n\t\tunit_vector=np.array([0,0,0])\n\telse:\n\t\tunit_vector=[linevec/val for linevec in line_vector]\n\tvecO_C=np.subtract(p1,c)\n\t\t\n\tres=np.dot(unit_vector,vecO_C)* np.dot(unit_vector,vecO_C) - ( np.dot(vecO_C, vecO_C) - r*r )\n\treturn res", "def intersectionOfTwoLines(p1, v1, p2, v2):\n # if we transform multiple points in one go\n if len(v1.shape) == 2:\n a1 = np.einsum('ij,ij->i', v1, v1)\n a2 = np.einsum('ij,ij->i', v1, v2)\n b1 = -np.einsum('ij,ij->i', v2, v1)\n b2 = -np.einsum('ij,ij->i', v2, v2)\n c1 = -np.einsum('ij,j->i', v1, p1 - p2)\n c2 = -np.einsum('ij,j->i', v2, p1 - p2)\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]).transpose(2, 0, 1), np.array([c1, c2]).T)\n res = res[:, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)\n else: # or just one point\n a1 = np.dot(v1, v1)\n a2 = np.dot(v1, v2)\n b1 = -np.dot(v2, v1)\n b2 = -np.dot(v2, v2)\n c1 = -np.dot(v1, p1 - p2)\n c2 = -np.dot(v2, p1 - p2)\n try:\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]), np.array([c1, c2]))\n except np.linalg.LinAlgError:\n return np.ones(3)*np.nan\n res = res[None, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)[0]", "def intersection(self, segment):\n p0, p1 = segment.p0, segment.p1\n\n # x = t*(p1 - p0) + p0\n # n'*(x - origin) = 0\n # combine to get\n # n'*(t*(p1-p0) + p0 - origin) = 0\n # solve for t\n\n v = p1 - p0\n w = p0 - self.origin\n t = -np.dot(self.normal, w)/np.dot(self.normal, v)\n\n if 0-epsilon <= t <= 1+epsilon:\n return t*(p1-p0) + p0\n else:\n return None", "def intersect_2_lines(P1, V1, P2, V2):\n Vx = np.cross(V1, V2)\n s = np.dot(np.cross(P2 - P1, V1), Vx)/np.dot(Vx, Vx)\n return s", "def get_intersection_point(l1, l2):\n m, b = l1\n n, c = l2\n # Find when mx + b = nx + c\n # mx - nx = c - b\n # And...\n x = (c-b) / (m-n)\n # Then plug back in\n y = m*x + b\n return (x, y)", "def f_v(_a, _vs, _Ps, _Ps0): # _aはスカラ, _vsはベクトル, _Ps, _Ps0は3行2列の行列\n center_pos = _Ps[0]\n center_pos_0 = _Ps0[0]\n idx_iter = Index_iterator(1, 8)\n #中心点から各点へのベクトル\n x = []\n x0 = []\n for p in (_Ps):\n x.append(p - center_pos)\n for p in _Ps(_Ps0):\n x0.append(p - center_pos_0)\n\n x01 = (_Ps[1]-center_pos) \n x02 = (_Ps[2]-center_pos) \n x03 = (_Ps[3]-center_pos) \n x04 = (_Ps[4]-center_pos) \n x05 = (_Ps[5]-center_pos) \n x06 = (_Ps[6]-center_pos) \n x07 = (_Ps[7]-center_pos) \n x08 = (_Ps[8]-center_pos)\n print('p_id', center_pos, end='\\t')\n print('x01:', x01, end=\"\\t\")\n print('x03:', x03, end=\"\\t\")\n print('x05:', x05, end=\"\\t\")\n print('x07:', x07)\n x001 = (_Ps0[1]-_Ps0[0]) \n x002 = (_Ps0[2]-_Ps0[0]) \n x003 = (_Ps0[3]-_Ps0[0]) \n x004 = (_Ps0[4]-_Ps0[0]) \n x005 = (_Ps0[5]-_Ps0[0]) \n x006 = (_Ps0[6]-_Ps0[0]) \n x007 = (_Ps0[7]-_Ps0[0]) \n x008 = (_Ps0[8]-_Ps0[0]) \n \n #中心点周りの面の面積\n def calc_area(j,k,l):\n s = LA.norm(np.cross(x[j],x[k]))/2 \\\n + LA.norm(np.cross(x[k],x[l]))/2\n return s\n\n s = []\n s0 = []\n hen = [1,3,5,7]\n for i in range(4):\n j,k,l = [n for n in idx_iter.get_indexes(start_idx=hen[i], 3)]\n s[i] = calc_area(j,k,l)\n s0[i] = calc_area(j,k,l)\n\n # s0123 = LA.norm(np.cross(x[1],x[2]))/2\\\n # +LA.norm(np.cross(x[2],x[3]))/2\n # s4367 = LA.norm(np.cross(x[3],x[4]))/2\\\n # +LA.norm(np.cross(x[4],x[5]))/2\n # s4785 = LA.norm(np.cross(x[5],x[6]))/2\\\n # +LA.norm(np.cross(x[6],x[7]))/2\n # s4521 = LA.norm(np.cross(x[7],x[8]))/2\\\n # +LA.norm(np.cross(x[8],x[1]))/2\n # s04103 = LA.norm(np.cross(x0[1],x0[2]))/2\\\n # +LA.norm(np.cross(x0[2],x0[3]))/2\n # s04367 = LA.norm(np.cross(x0[3],x0[4]))/2\\\n # +LA.norm(np.cross(x0[4],x0[7]))/2\n # s04785 = LA.norm(np.cross(x0[7],x0[8]))/2\\\n # +LA.norm(np.cross(x0[8],x0[5]))/2\n # s04521 = LA.norm(np.cross(x0[5],x0[2]))/2\\\n # +LA.norm(np.cross(x0[2],x0[1]))/2\n \n #各方向への平均面積(ここだけ反時計回り順で設定してる)\n S_iminus = (s[1] + s[2]) / 2 #43方向\n S_Jminus = (s[1] + s[4]) / 2 #41方向\n S_iplus = (s[3] + s[4]) / 2 #45方向\n S_Jplus = (s[3] + s[2]) / 2 #47方向\n S_iminus0 = (s0[1] + s0[2]) / 2 #43方向\n S_Jminus0 = (s0[1] + s0[4]) / 2 #41方向\n S_iplus0 = (s0[3] + s0[4]) / 2 #45方向\n S_Jplus0 = (s0[3] + s0[2]) / 2 #47方向\n # 各方向への厚み\n h_iminus = h_0 / ((poisson/(1-poisson) * (S_iminus - S_iminus0) / S_iminus0) + 1) #43方向\n h_Jminus = h_0 / ((poisson/(1-poisson) * (S_Jminus - S_Jminus0) / S_Jminus0) + 1) #41方向\n h_iplus = h_0 / ((poisson/(1-poisson) * (S_iplus - S_iplus0) / S_iplus0) + 1) #45方向\n h_Jplus = h_0 / ((poisson/(1-poisson) * (S_Jplus - S_Jplus0) / S_Jplus0) + 1) #47方向\n # 各断片の重心\n g = []\n kado = [2,4,6,8]\n hen = [1,3,5,7]\n for i in range(len(kado)):\n _kado = kado[i]\n _hen1, _ = [idx for idx in idx_iter.get_indexes_reverse(_kado, 2)]\n _hen2, _ = [idx for idx in idx_iter.get_indexes(_kado, 2)]\n _hen = [_hen1, _hen2]\n _g1 = (center_pos + _Ps[_kado] + _Ps[_hen1])/3\n _g2 = (center_pos + _Ps[_kado] + _Ps[_hen2])/3\n g.append([_g1, _g2])\n\n g401 = (center_pos + _Ps[0] + _Ps[1]) / 3\n g430 = (center_pos + _Ps[3] + _Ps[0]) / 3\n g436 = (center_pos + _Ps[3] + _Ps[6]) / 3\n g467 = (center_pos + _Ps[6] + _Ps[7]) / 3\n g478 = (center_pos + _Ps[7] + _Ps[8]) / 3\n g485 = (center_pos + _Ps[8] + _Ps[5]) / 3\n g452 = (center_pos + _Ps[5] + _Ps[2]) / 3\n g421 = (center_pos + _Ps[2] + _Ps[1]) / 3\n g0401 = (_Ps0[4] + _Ps0[0] + _Ps0[1]) / 3\n g0430 = (_Ps0[4] + _Ps0[3] + _Ps0[0]) / 3\n g0436 = (_Ps0[4] + _Ps0[3] + _Ps0[6]) / 3\n g0467 = (_Ps0[4] + _Ps0[6] + _Ps0[7]) / 3\n g0478 = (_Ps0[4] + _Ps0[7] + _Ps0[8]) / 3\n g0485 = (_Ps0[4] + _Ps0[8] + _Ps0[5]) / 3\n g0452 = (_Ps0[4] + _Ps0[5] + _Ps0[2]) / 3\n g0421 = (_Ps0[4] + _Ps0[2] + _Ps0[1]) / 3\n \n # 各断片面積\n triangle_area = []\n kado = [2,4,6,8]\n for i in range(len(kado)):\n j, k = [idx for idx in idx_iter.get_indexes_reverse(kado[i], 1)]\n _s1 = LA.norm(np.cross(x[j],x[k]))/2\n j, k = [idx for idx in idx_iter.get_indexes(kado[i], 1)]\n _s2 = LA.norm(np.cross(x[j],x[k]))/2\n triangle_area.append([_s1, _s2])\n\n s410 = LA.norm(np.cross(x[1],x[2]))/2\n s403 = LA.norm(np.cross(x[2],x[3]))/2\n s436 = LA.norm(np.cross(x[3],x[4]))/2\n s467 = LA.norm(np.cross(x[4],x[5]))/2\n s478 = LA.norm(np.cross(x[5],x[6]))/2\n s485 = LA.norm(np.cross(x[6],x[7]))/2\n s452 = LA.norm(np.cross(x[7],x[8]))/2\n s421 = LA.norm(np.cross(x[8],x[1]))/2\n s0410 = LA.norm(np.cross(x0[1],x0[2]))/2\n s0403 = LA.norm(np.cross(x0[2],x0[3]))/2\n s0436 = LA.norm(np.cross(x0[3],x0[4]))/2\n s0467 = LA.norm(np.cross(x0[4],x0[5]))/2\n s0478 = LA.norm(np.cross(x0[5],x0[6]))/2\n s0485 = LA.norm(np.cross(x0[6],x0[7]))/2\n s0452 = LA.norm(np.cross(x0[7],x0[8]))/2\n s0421 = LA.norm(np.cross(x0[8],x0[1]))/2\n # 四角の重心\n\n center_g_square = []\n for i in range(len(g)):\n _g = (triangle_area[i][0]*g[i][0] + triangle_area[i][1]*g[i][1])/(triangle_area[i][0] + triangle_area[i][1])\n center_g.append(_g)\n g4103 = (s410*g401 + s403*g430) / (s410 + s403)\n g4367 = (s436*g436 + s467*g467) / (s436 + s467)\n g4785 = (s478*g478 + s485*g485) / (s478 + s485)\n g4521 = (s452*g452 + s421*g421) / (s452 + s421)\n g04103 = (s0410*g0401 + s0403*g0430) / (s0410 + s0403)\n g04367 = (s0436*g0436 + s0467*g0467) / (s0436 + s0467)\n g04785 = (s0478*g0478 + s0485*g0485) / (s0478 + s0485)\n g04521 = (s0452*g0452 + s0421*g0421) / (s0452 + s0421)\n # 各重心間の距離\n Lj82 = LA.norm(g4521 - g4103)\n Lj24 = LA.norm(g4103 - g4367)\n Lj46 = LA.norm(g4367 - g4785)\n Lj68 = LA.norm(g4785 - g4521)\n \n # ひずみ\n eps_i41 = (LA.norm(x01) - LA.norm(x041)) / LA.norm(x041)\n eps_J41 = (LA.norm(g4521 - g4103) - LA.norm(g04521 - g04103)) / LA.norm(g04521 - g04103)\n eps_i43 = (LA.norm(x03) - LA.norm(x043)) / LA.norm(x043)\n eps_J43 = (LA.norm(g4103 - g4367) - LA.norm(g04103 - g04367)) / LA.norm(g04103 - g04367)\n eps_i47 = (LA.norm(x01) - LA.norm(x041)) / LA.norm(x041)\n eps_J47 = (LA.norm(g4367 - g4785) - LA.norm(g04367 - g04785)) / LA.norm(g04367 - g04785)\n eps_i45 = (LA.norm(x01) - LA.norm(x041)) / LA.norm(x041)\n eps_J45 = (LA.norm(g4785 - g4521) - LA.norm(g04785 - g04521)) / LA.norm(g04785 - g04521)\n # 張力\n F_T1 = (young_modulus * h_Jminus * Lj82 * (eps_i41 + poisson * eps_J41) / (1 - poisson**2))*x01/LA.norm(x01)\n F_T3 = (young_modulus * h_iminus * Lj24 * (eps_i43 + poisson * eps_J43) / (1 - poisson**2))*x03/LA.norm(x03)\n F_T5 = (young_modulus * h_Jplus * Lj46 * (eps_i47 + poisson * eps_J47) / (1 - poisson**2))*x05/LA.norm(x05)\n F_T7 = (young_modulus * h_iplus * Lj68 * (eps_i45 + poisson * eps_J45) / (1 - poisson**2))*x07/LA.norm(x07)\n # せん断ひずみ\n gamma513 = (math.acos((np.dot(x07,x01))/(LA.norm(x07)*LA.norm(x01))) - math.acos((np.dot(x045,x041))/(LA.norm(x045)*LA.norm(x041)))\\\n + math.acos((np.dot(x03,x01))/(LA.norm(x03)*LA.norm(x01))) - math.acos((np.dot(x043,x041))/(LA.norm(x043)*LA.norm(x041))))/2\n gamma137 = (math.acos((np.dot(x01,x03))/(LA.norm(x01)*LA.norm(x03))) - math.acos((np.dot(x041,x043))/(LA.norm(x041)*LA.norm(x043)))\\\n + math.acos((np.dot(x03,x05))/(LA.norm(x03)*LA.norm(x05))) - math.acos((np.dot(x043,x047))/(LA.norm(x043)*LA.norm(x047))))/2\n gamma375 = (math.acos((np.dot(x05,x03))/(LA.norm(x05)*LA.norm(x03))) - math.acos((np.dot(x047,x043))/(LA.norm(x047)*LA.norm(x043)))\\\n + math.acos((np.dot(x07,x05))/(LA.norm(x07)*LA.norm(x05))) - math.acos((np.dot(x045,x047))/(LA.norm(x045)*LA.norm(x047))))/2\n gamma751 = (math.acos((np.dot(x05,x07))/(LA.norm(x05)*LA.norm(x07))) - math.acos((np.dot(x047,x045))/(LA.norm(x047)*LA.norm(x045)))\\\n + math.acos((np.dot(x07,x01))/(LA.norm(x07)*LA.norm(x01))) - math.acos((np.dot(x045,x041))/(LA.norm(x045)*LA.norm(x041))))/2\n # せん断力\n F_S41 = ((young_modulus * h_Jminus * LA.norm(x01) * gamma513)/(2 * (1 + poisson)))*x01/LA.norm(x01)\n F_S43 = ((young_modulus * h_Jminus * LA.norm(x03) * gamma137)/(2 * (1 + poisson)))*x03/LA.norm(x03)\n F_S47 = ((young_modulus * h_Jminus * LA.norm(x05) * gamma375)/(2 * (1 + poisson)))*x05/LA.norm(x05)\n F_S45 = ((young_modulus * h_Jminus * LA.norm(x07) * gamma751)/(2 * (1 + poisson)))*x07/LA.norm(x07)\n \n # J方向の曲げ力\n n_j_cross = np.cross(x05, x01)\n if any(n_j_cross):\n n_J = n_j_cross/LA.norm(n_j_cross)\n else: \n\n l_Jalfa = LA.norm(_Ps[1] - _Ps[7])\n cos_Jalfa = (LA.norm(x01)**2 + LA.norm(x05)**2 - l_Jalfa**2) / (2 * LA.norm(x01) * LA.norm(x05))\n if cos_Jalfa > 1.0:\n cos_Jalfa = 1.0\n elif cos_Jalfa < -1.0:\n cos_Jalfa = -1.0\n sin_Jalfa = math.sqrt(1 - cos_Jalfa**2)\n CJa2 = math.sqrt((cos_Jalfa + 1)/2)\n SJa2 = math.sqrt((1 - cos_Jalfa)/2)\n zJC = (_Ps[7][2]-_Ps[1][2])/(_Ps[7][0]-_Ps[1][0]) * (center_pos[0]-_Ps[1][0]) + _Ps[1][2] #曲げ力の方向の場合わけに必要\n if center_pos[2] > zJC:\n e_j = np.dot(np.array([[CJa2 + (n_J[0]**2) * (1 - CJa2), n_J[0] * n_J[1] * (1 - CJa2) + n_J[2] * SJa2, n_J[0] * n_J[2] * (1 - CJa2) - n_J[1] * SJa2],\\\n [n_J[1] * n_J[0] * (1 - CJa2) - n_J[2] * SJa2, CJa2 + (n_J[1]**2) * (1 - CJa2), n_J[1] * n_J[2] * (1 - CJa2) + n_J[0] * SJa2],\\\n [n_J[2] * n_J[0] * (1 - CJa2) + n_J[1] * SJa2, n_J[2] * n_J[1] * (1 - CJa2) - n_J[0] * SJa2, CJa2 + (n_J[2]**2) * (1 - CJa2)]]), (_Ps[7] - center_pos)/LA.norm(_Ps[7] - center_pos))\n else:\n e_j = np.dot(np.array([[CJa2 + (n_J[0]**2) * (1 - CJa2), n_J[0] * n_J[1] * (1 - CJa2) - n_J[2] * SJa2, n_J[0] * n_J[2] * (1 - CJa2) + n_J[1] * SJa2],\\\n [n_J[1] * n_J[0] * (1 - CJa2) + n_J[2] * SJa2, CJa2 + (n_J[1]**2) * (1 - CJa2), n_J[1] * n_J[2] * (1 - CJa2) - n_J[0] * SJa2],\\\n [n_J[2] * n_J[0] * (1 - CJa2) - n_J[1] * SJa2, n_J[2] * n_J[1] * (1 - CJa2) + n_J[0] * SJa2, CJa2 + (n_J[2]**2) * (1 - CJa2)]]), (_Ps[7] - center_pos)/LA.norm(_Ps[7] - center_pos))\n d_etha_J = (2 * sin_Jalfa / l_Jalfa) - (2 * math.sqrt(1 - np.dot(x041,x047)**2/(LA.norm(x041)*LA.norm(x047))**2)/(LA.norm(x041 - x047)))\n\n n_i = np.cross(x07,x03)/LA.norm(np.cross(x03,x07)) \n cos_ialfa = np.dot(x03,x07) / (LA.norm(x03) * LA.norm(x07))\n sin_ialfa = math.sqrt(1 - cos_ialfa**2)\n Cia2 = math.sqrt((cos_ialfa + 1)/2)\n Sia2 = math.sqrt((1 - cos_ialfa)/2)\n ziC = (_Ps[5][2]-_Ps[3][2])/(_Ps[5][0]-_Ps[3][0]) * (center_pos[0]-_Ps[3][0]) + _Ps[3][2]\n if center_pos[2] > ziC:\n e_i = np.dot(np.array([[Cia2 + (n_i[0]**2) * (1 - Cia2), n_i[0] * n_i[1] * (1 - Cia2) + n_i[2] * Sia2, n_i[0] * n_i[2] * (1 - Cia2) - n_i[1] * Sia2],\\\n [n_i[1] * n_i[0] * (1 - Cia2) - n_i[2] * Sia2, Cia2 + (n_i[1]**2) * (1 - Cia2), n_i[1] * n_i[2] * (1 - Cia2) + n_i[0] * Sia2],\\\n [n_i[2] * n_i[0] * (1 - Cia2) + n_i[1] * Sia2, n_i[2] * n_i[1] * (1 - Cia2) - n_i[0] * Sia2, Cia2 + (n_i[2]**2) * (1 - Cia2)]]), (_Ps[7] - center_pos)/LA.norm(_Ps[7] - center_pos))\n else:\n e_i = np.dot(np.array([[Cia2 + (n_i[0]**2) * (1 - Cia2), n_i[0] * n_i[1] * (1 - Cia2) - n_i[2] * Sia2, n_i[0] * n_i[2] * (1 - Cia2) + n_i[1] * Sia2],\\\n [n_i[1] * n_i[0] * (1 - Cia2) + n_i[2] * Sia2, Cia2 + (n_i[1]**2) * (1 - Cia2), n_i[1] * n_i[2] * (1 - Cia2) - n_i[0] * Sia2],\\\n [n_i[2] * n_i[0] * (1 - Cia2) - n_i[1] * Sia2, n_i[2] * n_i[1] * (1 - Cia2) + n_i[0] * Sia2, Cia2 + (n_i[2]**2) * (1 - Cia2)]]), (_Ps[5] - center_pos)/LA.norm(_Ps[5] - center_pos))\n d_etha_i = (2 * sin_ialfa / LA.norm(x07 - x03)) - (2 * math.sqrt(1 - np.dot(x043,x045)**2/(LA.norm(x043)*LA.norm(x045))**2)/(LA.norm(x043 - x045)))\n\n\n l_J = (Lj20 + Lj06 + Lj68 + Lj82) / 4\n h = (h_iminus + h_iplus + h_Jminus + h_Jplus) / 4\n I = (l_J * h**3) / 12\n M_i = (young_modulus * I * (d_etha_i + poisson * d_etha_J)/(1 - poisson**2))\n M_J = (young_modulus * I * (d_etha_J + poisson * d_etha_i)/(1 - poisson**2))\n #曲げ力\n F_Bi = M_i / LA.norm(x03) + M_i / LA.norm(x07) * e_i\n F_BJ = M_J / LA.norm(x01) + M_J / LA.norm(x05) * e_j\n #空気力\n # S = (S_iminus + S_iplus + S_Jminus + S_Jplus) / 4\n # F_A = p * S\n F_A = np.array([0.0, 0.0, -0.1]) * _a\n\n # 運動方程式(支配方程式)\n S_0 = (S_iminus0 + S_iplus0 + S_Jminus0 + S_Jplus0) / 4\n F_T = F_T41 + F_T43 + F_T45 + F_T47\n F_S = F_S41 + F_S43 + F_S45 + F_S47\n F_B = F_Bi + F_BJ\n return (F_T + F_S + F_B + F_A) / (rho * h_0 * S_0) - c * _vs", "def line_intercept(p1,p2,p3,p4):\n # Note if vertical line m = None and b holds x-val\n (m1,b1) = line_param(p1,p2)\n (m2,b2) = line_param(p3,p4)\n if (m1 != None) and (m2 != None):\n if (m1-m2) != 0.:\n x = (b2-b1)/(m1-m2)\n y = m1*x + b1\n else:\n return (None,0)\n elif (m1 == None) and (m2 != None):\n x = b1 \n y = m2*x + b2\n elif (m1 != None) and (m2 == None):\n x = b2\n y = m1*x + b1\n else:\n return (None,0) \n \n # min and max of points. \n max_x1 = max(p1[0], p2[0])\n min_x1 = min(p1[0], p2[0])\n max_y1 = max(p1[1], p2[1])\n min_y1 = min(p1[1], p2[1])\n max_x2 = max(p3[0], p4[0])\n min_x2 = min(p3[0], p4[0])\n max_y2 = max(p3[1], p4[1])\n min_y2 = min(p3[1], p4[1])\n #check if the intersection is in bounds\n flag = 1\n if x > max_x1 or x < min_x1:\n flag = 0\n elif x > max_x2 or x < min_x2:\n flag = 0\n elif y > max_y1 or y < min_y1: \n flag = 0\n elif y > max_y2 or y < min_y2: \n flag = 0\n #check if the intersection point corresponds to an end point\n intercept = num.array([x,y])\n def _same(p1,p2,prec=0.0001):\n \"\"\" are two points the same \"\"\"\n #return num.all(num.equal(p1,p2))\n t1 = num.fabs(p1[0]-p2[0]) < prec\n t2 = num.fabs(p1[1]-p2[1]) < prec\n if t1 and t2:\n #print \"same\", p1,p2\n return True\n if flag == 1:\n if _same(intercept,p1):\n flag = 2\n elif _same(intercept,p2):\n flag = 2\n elif _same(intercept,p3):\n flag = 2\n elif _same(intercept,p4):\n flag = 2\n return (intercept,flag)", "def intersection(self, L):\n if self.slope() == L.slope():\n return None\n intpt_xcood = (self.c * L.b - L.c * self.b)/(self.a * L.b - L.a * self.b)\n intpt_ycood = (self.c * L.a - L.c * self.a)/(self.b * L.a - L.b * self.a)\n\n return (intpt_xcood, intpt_ycood)", "def seg_int(a, v):\r\n a0 = a[:-1] # start points\r\n a1 = a[1:] # end points\r\n b0, b1 = v # start and end of the intersecting line\r\n b_ = b0[0]\r\n ox = a0[:, 0]\r\n dx = a1[:, 0]\r\n# f_t = np.array(list(zip(xs[:-1], xs[1:])))\r\n# f_t = np.sort(f_t, axis=1)\r\n idx0 = np.where((ox <= b_) & (b_ <= dx))[0] # incrementing x's\r\n idx1 = np.where((ox >= b_) & (b_ >= dx))[0] # decreasing x's\r\n idx_s = np.concatenate((idx0, idx1))\r\n # ---- alternate\r\n da = a1 - a0\r\n db = b1 - b0\r\n dp = a0 - b0\r\n dap = perp(da)\r\n denom = np.dot(dap, db) # or dap @ db\r\n # num = np.dot(dap, dp )\r\n db2 = db.reshape(1, 2)\r\n denom = np.einsum('ij,ij->i', dap, db2)\r\n num = np.einsum('ij,ij->i', dap, dp)\r\n int_pnts = (num/denom).reshape(num.shape[0], 1) * db + b0\r\n ft_int = np.hstack((a0, a1, int_pnts))\r\n return int_pnts, ft_int", "def intersect_point(self,m1,c1,m2,c2):\n\n x = (c2 - c1)/(m1 - m2)\n y = m1*x + c1\n return x, y", "def getVec(pos1, pos2):\n\n x1 = pos2[0] - pos1[0]\n y1 = pos2[1] - pos1[1]\n gcd1 = math.gcd(abs(x1), abs(y1))\n\n if gcd1 > 0:\n x = x1//gcd1\n else:\n x = x1\n if gcd1 > 0:\n y = y1//gcd1\n else:\n y = y1\n\n return x, y", "def intersectarea(p1,p2,size):\n x1, y1 = p1\n x2, y2 = p2\n ix1, iy1 = max(x1,x2), max(y1,y2)\n ix2, iy2 = min(x1+size,x2+size), min(y1+size,y2+size)\n iarea = abs(ix2-ix1)*abs(iy2-iy1)\n if iy2 < iy1 or ix2 < ix1: iarea = 0\n return iarea", "def line_intersection(p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y):\n s10_x = p1_x - p0_x\n s10_y = p1_y - p0_y\n s32_x = p3_x - p2_x\n s32_y = p3_y - p2_y\n\n denom = s10_x * s32_y - s32_x * s10_y\n if denom == 0.0:\n return None # Collinear\n denomPositive = denom > 0\n\n s02_x = p0_x - p2_x\n s02_y = p0_y - p2_y\n s_numer = s10_x * s02_y - s10_y * s02_x\n if (s_numer < 0) == denomPositive:\n return None # No collision\n\n t_numer = s32_x * s02_y - s32_y * s02_x\n if (t_numer < 0) == denomPositive:\n return None # No collision\n\n if (s_numer > denom) == denomPositive or (t_numer > denom) == denomPositive:\n return 0 # No collision\n \n # Collision detected\n t = t_numer / denom\n i_x = p0_x + (t * s10_x)\n i_y = p0_y + (t * s10_y)\n\n return i_x, i_y", "def point_of_intersection(l, pz=distance):\r\n # Must fix the error here. Right now, any vector can have a point in the plane.\r\n # Must make it so that only vectors pointing in the planes direction has a point there\r\n # Can be done by checking whether d is positive or not.\r\n # This is to prevent vectors that point away from the detector to be counted\r\n # The definitions below assume that the detector is centred in the origin and its length is oriented along the z-axis.\r\n p0 = np.array([0,0,pz]) # Point on the plane\r\n l0 = np.array([0,0,0]) # Point on the line\r\n n = np.array([0,0,1]) # Normal vector of the plane\r\n d = np.dot(p0-l0, n)/np.dot(l, n)\r\n point = [i*d for i in l]\r\n return point", "def intersect_ext(self, line):\n res, p, v = self.intersect(line)\n v0 = self.p0 - self.c\n v1 = p - self.c\n u = self.signed_angle(v0, v1) / self.da\n return res and u > 0 and v > 0 and u < 1 and v < 1, p, u, v", "def next_in_hull(p, v, L): \r\n N = normalize(p, L)\r\n if N != []:\r\n q = N[0]\r\n index = 0\r\n for k in range(1, len(N)):\r\n if (N[k] - q).dot(v) >= 0: # points on support line included\r\n q = N[k]\r\n index = k\r\n \r\n return index", "def vlinecomp(self):\n m_h, c_h = self.fitline(0,2) # Computes the equation for a line joining the points on the outside of the gear on opposites sides of the edm cut\n\n m_v_avg = self.average_grad() # Computes the average gradient of the constructed vertical line\n\n m_v_avg, c_v = self.line_through_point(m_v_avg,4) # Equation of line with average gradient though crack start point\n\n x_intersect,y_intersect = self.intersect_point(m_h, c_h, m_v_avg, c_v)\n\n coord_top = [x_intersect,y_intersect]\n coord_bot = [self.points[4, 0], self.points[4, 1]]\n\n distance = self.distance(coord_bot,coord_top)\n\n return coord_top, coord_bot, distance", "def intersect_ext(self, line):\n c = line.cross_z\n d = self.v.dot(c)\n if d == 0:\n return False, 0, 0, 0\n dp = line.p - self.p\n c2 = self.cross_z\n u = c.dot(dp) / d\n v = c2.dot(dp) / d\n return u > 0 and v > 0 and u < 1 and v < 1, self.lerp(u), u, v", "def _intersect(A, B, C, D):\n d = (B[0] - A[0]) * (D[1] - C[1]) - (D[0] - C[0]) * (B[1] - A[1])\n x = ((B[0] * A[1] - A[0] * B[1]) * (D[0] - C[0]) - (D[0] * C[1] - C[0] * D[1]) * (B[0] - A[0])) / d\n y = ((B[0] * A[1] - A[0] * B[1]) * (D[1] - C[1]) - (D[0] * C[1] - C[0] * D[1]) * (B[1] - A[1])) / d\n return (np.round(x, 6), np.round(y, 6))", "def segmentsIntersect(self, other, allowProjInt = False):\n \n \"\"\"\n If we are not allowing projected intersection and the bounding boxes\n do not intersect then return -3, None.\n \"\"\"\n if(not(allowProjInt) and not(self.doBoundingBoxesIntersect(other))): return -3, None #return if bounding boxes do not intersect\n \"\"\" A special case for colinear lines. \"\"\" \n if(self.areColinear(other)):\n \"\"\"\n First place all four endpoint into a set. This will elliminate shared\n end points. Next, convert the set back into a list so it can\n finally be sorted.\n \"\"\"\n pointList = sorted(list(set([self.start, self.end, other.start, other.end])), key=self.calcT) \n if len(pointList) == 3:\n \"\"\"\n if there are only three points in the list then return 2, the\n middle point in the list since it is the shared point of the\n two lines.\n \"\"\"\n return 2, pointList[1] #if they are colinear and two ends have the same point return that point\n elif len(pointList) == 2:\n \"\"\" If the two lines have the same endpoints. \"\"\"\n return 2.5, self.getMidPoint()\n else:\n \"\"\"\n If the length was not three then we know it is length 4 in which case\n we turn the two middle points into a line and return 3, the line's\n midpoint.\n \"\"\"\n tempLine = Line(pointList[1], pointList[2])\n return 3, tempLine.getMidPoint() #If they are colinear return half way inbetween middle two points\n \"\"\"\n To calculate the intersection of two points we put the lines into the\n form P+tr and Q+us where P and Q are the starting points of the lines\n r and s are vectors form the starting point to the end point, and\n t and u are scalars. Set the two equations equal to each other and \n then solve for t and u. If t and u are in the range [0-1] then the\n intersection point lines on the lines, else it is a projected point.\n \"\"\"\n r = np.subtract(self.end.get2DPoint(), self.start.get2DPoint())\n s = np.subtract(other.end.get2DPoint(), other.start.get2DPoint())\n Q_Less_P = np.subtract(other.start.get2DPoint(), self.start.get2DPoint())\n denom = np.cross(r, s)*1.0\n t = np.cross(Q_Less_P, s)/denom\n u = np.cross(Q_Less_P, r)/denom \n point = p.Point(self.start.x + r[c.X]*t, self.start.y+r[c.Y]*t) \n #If t or u are not in the range 0-1 then the intersection is projected\n if(t > 1 or u > 1 or t < 0 or u < 0):\n \"\"\"\n Due to floating point problems sometimes if t or u is outside the 0-1\n range we end up inside this if statement but are actually at the end\n of one of the lines. I can't figure out how to properly add in a tolerance\n so we are taking the four end points putting them into a list,\n then comparing them to the calculated point. The Point module is\n properly handling tolerances so if the point == any of the end\n points then we should not return a projected point.\n \"\"\"\n if not any(point == lineEnd for lineEnd in (self.start, self.end,\n other.start, other.end)):\n return -1, point #return for projected intersection of non-colinear lines\n return 1, point #lines intersect at given point", "def intersect(self, box_p, box_t):\n x_left = torch.max(box_p[0], box_t[:,0])\n y_top = torch.max(box_p[1], box_t[:,1])\n x_right = torch.min(box_p[2], box_t[:,2])\n y_bottom = torch.min(box_p[3], box_t[:,3])\n\n width = torch.clamp(x_right - x_left, min=0)\n height = torch.clamp(y_bottom - y_top, min=0)\n\n intersect_area = width * height\n\n return intersect_area", "def circ_intersect(v0, v1, r0, r1):\n dist = pt_dist(v0, v1) #calculate distance between\n if dist > (r0 + r1): return [] #out of range\n if dist < abs(r0 - r1): return [] #circle contained\n if dist == 0: return [] #same origin\n \n a = (r0**2 - r1**2 + dist**2) / (2*dist)\n b = dist - a\n h = math.sqrt(r0**2 - a**2)\n \n v2x = v0[0] + a*(v1[0] - v0[0])/dist\n v2y = v0[1] + a*(v1[1] - v0[1])/dist\n \n x3p = v2x + h*(v1[1] - v0[1])/dist\n y3p = v2y - h*(v1[0] - v0[0])/dist\n x3n = v2x - h*(v1[1] - v0[1])/dist\n y3n = v2y + h*(v1[0] - v0[0])/dist\n \n return np.array([[x3p, y3p,0.], [x3n, y3n,0.]])", "def intersect(self, seg):\n nu, nv = self.normalv, seg.normalv\n u = numpy.array([[-self.c],[-seg.c]])\n doRotation = min(nu.min(),nv.min()) <1e-4\n if doRotation:\n # rotate to avoid numerical issues\n nu = numpy.array(rotMat.dot(nu))[0]\n nv = numpy.array(rotMat.dot(nv))[0]\n m = numpy.matrix( (nu, nv) ) \n\n i = (m**-1).dot(u) \n i=numpy.array( i).swapaxes(0,1)[0]\n debug(' intersection ' ,nu, nv, self.angle, seg.angle, ' --> ',i)\n if doRotation:\n i = unrotMat.dot(i).A1\n debug(' ' ,i)\n \n \n return i", "def intersection(self, axis2):", "def intersection(line1, line2):\n p0, p1, p2, p3 = map(\n lambda tup : np.array(tup[:2]),\n [line1[0], line1[1], line2[0], line2[1]]\n )\n p1, p2, p3 = map(lambda x : x - p0, [p1, p2, p3])\n transform = np.zeros((2, 2))\n transform[:,0], transform[:,1] = p1, p2\n if np.linalg.det(transform) == 0: return\n inv = np.linalg.inv(transform)\n new_p3 = np.dot(inv, p3.reshape((2, 1)))\n #Where does line connecting (0, 1) to new_p3 hit x axis\n x_intercept = new_p3[0] / (1 - new_p3[1]) \n result = np.dot(transform, [[x_intercept], [0]])\n result = result.reshape((2,)) + p0\n return result", "def intersection( l1, l2):\n #coordonees de la lignes 1\n x1, y1, x2, y2 = l1.point\n #coordonees de la lignes 2\n x3, y3, x4, y4 = l2.point\n #\n a1 = y2 - y1\n b1 = x1 - x2\n a2 = y4 - y3\n b2 = x3 - x4\n #\n c1 = a1 * x1 + b1 * y1\n #\n c2 = a2 * x3 + b2 * y3\n #\n det = a1 * b2 - a2 * b1\n assert det, \"lines are parallel\"\n return (1. * (b2 * c1 - b1 * c2) / det, 1. * (a1 * c2 - a2 * c1) / det)", "def three_d_vector_plane_intersection(point_a, point_b, point_c, point_d, point_e):\n a = np.array(point_a)\n b = np.array(point_b)\n c = np.array(point_c)\n nv = plane_equation(point_c, point_d, point_e)\n t = (nv[0] * c[0] + nv[1] * c[1] + nv[2] * c[2] - nv[0] * a[0] - nv[1] * a[1] - nv[2] * a[2]) / \\\n (nv[0] * (b[0] - a[0]) + nv[1] * (b[1] - a[1]) + nv[2] * (b[2]-a[2]))\n x = a[0] + t * (b[0] - a[0])\n y = a[1] + t * (b[1] - a[1])\n z = a[2] + t * (b[2] - a[2])\n intersection = np.array([x, y, z])\n return intersection", "def algorithm_2_20_vector(p, t, c, x):\n\n mu = index(x, t)\n t = np.array(t, dtype=np.float64)\n c = np.array(c[mu - p:mu + 1], dtype=np.float64)\n\n for i in range(0, p):\n k = p - i\n t1 = t[mu - k + 1:mu + 1]\n t2 = t[mu + 1:mu + k + 1]\n omega = np.divide((x - t1), (t2 - t1))\n c = (1 - omega) * c[:-1] + omega * c[1:]\n return c", "def _isInside(self, v, select, progress):\n # Compute on non-masked sources :\n xyz = self.xyz\n N = xyz.shape[0]\n inside = np.ones((xyz.shape[0],), dtype=bool)\n v = v.reshape(v.shape[0] * 3, 3)\n\n # Loop over sources :\n progress.show()\n for k in range(N):\n # Get the euclidian distance :\n eucl = cdist(v, xyz[[k], :])\n # Get the closest vertex :\n eucl_argmin = eucl.argmin()\n # Get distance to zero :\n xyz_t0 = np.sqrt((xyz[k, :] ** 2).sum())\n v_t0 = np.sqrt((v[eucl_argmin, :] ** 2).sum())\n inside[k] = xyz_t0 <= v_t0\n progress.setValue(100 * k / N)\n self.data.mask = False\n self.data.mask = inside if select != 'inside' else np.invert(inside)\n # Finally update data sources and text :\n self.update()\n self.text_update()\n progress.hide()", "def intersect(self, line):\n c = line.cross_z\n d = self.v.dot(c)\n if d == 0:\n return False, 0, 0\n t = c.dot(line.p - self.p) / d\n return True, self.lerp(t), t", "def _intersection(line_points_0, line_points_1):\n u,v = line_points_0,line_points_1\n (A,B),(C,D) = line_points_0,line_points_1\n h1 = _homogenous_line(A,B)\n h2 = _homogenous_line(C,D)\n P = _intersection_homogenous(h1, h2)\n return P", "def get_intersect_lines(self, p10, p11, p20, p21):\n t = (p20 - p10) / (p11 - p10 - p21 + p20)\n return p10 + t * (p11 - p10)", "def side_points(p, v, L): \r\n u = np.array([-v[1], v[0]]) # positive normal of v:\r\n N = list() # list of points on one side of the line p,v:\r\n for k in range(len(L)):\r\n if (L[k] - p).dot(u) >= 0:\r\n N.append(L[k])\r\n \r\n return N", "def boxstuff(pts,vec):\n\treturn pts-(pts>vec)*vec+(pts<np.array([0.,0.,0.]))*vec", "def project_vector(u, v):\n u_np = np.array([u.get_x(), u.get_y()])\n v_np = np.array([v.get_x(), v.get_y()])\n proj = (np.dot(u_np, v_np) / np.dot(v_np, v_np)) * v_np\n return Point(proj[0], proj[1])", "def intersection(L1, L2):\n D = L1[0] * L2[1] - L1[1] * L2[0]\n Dx = L1[2] * L2[1] - L1[1] * L2[2]\n Dy = L1[0] * L2[2] - L1[2] * L2[0]\n if D != 0:\n x = Dx / D\n y = Dy / D\n return x, y\n else:\n return False", "def intersect_segment(self, p1, p2):\n p1 = base.getvector(p1)\n if len(p1) == 2:\n p1 = np.r_[p1, 1]\n p2 = base.getvector(p2)\n if len(p2) == 2:\n p2 = np.r_[p2, 1]\n \n\n z1 = self.line * p1\n z2 = self.line * p2\n\n if np.sign(z1) != np.sign(z2):\n return True\n if self.contains(p1) or self.contains(p2):\n return True\n return False", "def proyZm1(u, v, t1):\n den = u ** 2 + v ** 2 + 4\n x = u - t1 * (u - 4 * u / den)\n y = v - t1 * (v - 4 * v / den)\n z = -1 - t1 * (-2 + 8 / den)\n return (x, y, z)", "def intersection(self, other):\n p0_other, p1_other = other.p0, other.p1\n\n # w = p1 - p0\n # v = p1_other - p0_other\n # s*w + p0 = t*v + p0_other\n\n w = self.p1 - self.p0\n v = p1_other - p0_other\n\n A = np.vstack((w,v)).T\n b = p0_other - self.p0\n\n if np.abs(np.linalg.det(A)) < epsilon:\n return None\n\n soln = np.linalg.solve(A, b)\n s, t = soln[0], -soln[1]\n\n intersection = s*w + self.p0\n\n if ((-epsilon <= s) and (s <= 1+epsilon) and (-epsilon <= t) and (t <= 1+epsilon)):\n return intersection\n else:\n return None", "def get_line_circle_intersections(A, B, C, r):\n Lx = B[0] - A[0]\n Ly = B[1] - A[1]\n Lz = B[2] - A[2]\n\n # stranger things\n D = Lx**2 + Ly**2\n E = 2 * ( Lx * (A[0] - C[0]) + Ly * (A[1] - C[1]) )\n F = (\n (A[0] - C[0])**2\n + (A[1] - C[1])**2\n - r**2\n )\n det = E**2 - 4 * D * F\n \n # declare null vectors\n P1 = [0, 0, 0]\n P2 = [0, 0, 0]\n t1 = t2 = None\n eps = .00001\n if ( not (D <= eps) or (det < 0) ):\n if det == 0:\n print \"tangential intersection found\",\n t1 = t2 = -E / (2*D)\n else:\n print \"pass-through intersection found\",\n t1 = ( (-E + math.sqrt(det)) / (2 * D) )\n t2 = ( (-E - math.sqrt(det)) / (2 * D) )\n P1[0] = A[0] + t1 * Lx\n P1[1] = A[1] + t1 * Ly\n P1[2] = A[2] + t1 * Lz\n P2[0] = A[0] + t2 * Lx\n P2[1] = A[1] + t2 * Ly\n P2[2] = A[2] + t2 * Lz\n else:\n print \"no intersections are available\",\n\n return P1, P2", "def vector(p0, p1):\n a = p1[0] - p0[0]\n b = p1[1] - p0[1]\n return (a, b)", "def perpendicularIntersection(point, linePoint1, linePoint2):\n\t\tx1 = linePoint1[0]\n\t\ty1 = linePoint1[1]\n\t\tx2 = linePoint2[0]\n\t\ty2 = linePoint2[1]\n\t\tx3 = point[0]\n\t\ty3 = point[1]\n\t\tk = ((y2-y1) * (x3-x1) - (x2-x1) * (y3-y1)) / ((y2-y1)**2 + (x2-x1)**2)\n\t\tx4 = x3 - k * (y2-y1)\n\t\ty4 = y3 + k * (x2-x1)\n\t\treturn (x4, y4)", "def vector_equal(v1,v2):\n if (v2.x - 0.001 <= v1.x <= v2.x + 0.001) and \\\n (v2.y - 0.001 <= v1.y <= v2.y + 0.001) and \\\n (v2.z - 0.001 <= v1.z <= v2.z + 0.001):\n return True", "def pvector_pp(i, q):\n\tc0 = coords_cut[i]\n\tra, dec = c0.ra.value, c0.dec.value\n\tr = hp.rotator.Rotator([ra, dec, 0])\n\tsT = np.matmul(r.mat, np.matmul(s_tensor_cut[:,:,i], r.mat.transpose()))\n\tevals, evecs = np.linalg.eigh(sT[1:,1:])\n\tevecA, evecB = evecs[:,0], evecs[:,1]\n\tif evecB[0] < 0:\n\t\tevecB = -evecB\n\ttheta = np.arctan2(evecB[1], evecB[0])\n\tres = 180*theta.item()/np.pi, i\n\tq.put(res)\n\treturn res", "def vector_perp(v):\n assert len(v) == 2\n x, y = v\n return Vector(-y, x)", "def get_intersect_points(line1, line2):\n intersect_points = matrix.matrix_sol([line1, line2])\n return intersect_points", "def V_vect(self, points):\n return self.A_conf*norm(points)*self.isOutside(points)", "def plane_point_side_v3(p: np.ndarray, v: np.ndarray) -> Any:\n return p[:3].dot(v) + p[3]", "def test_union_intersection():\n X = np.random.randn(d, 100)\n assert np.array_equal(lincon.indicator_intersection(X), 1-lincon.indicator_union(X))", "def __contains__(self, point, e=10e-10):\n v1 = self.vector\n v2 = Vector.createFromTwoPoints(self.point, point)\n return v1.colinear(v2, e)", "def intersect(self, x, P):\n\n my_id = self.asset2id[self.my_name]\n\n # Slice out overlapping states in main filter\n begin_ind = my_id*self.num_ownship_states\n end_ind = (my_id+1)*self.num_ownship_states\n x_prior = self.filter.x_hat[begin_ind:end_ind].reshape(-1,1)\n P_prior = self.filter.P[begin_ind:end_ind,begin_ind:end_ind]\n P_prior = P_prior.reshape(self.num_ownship_states, self.num_ownship_states)\n \n c_bar, Pcc = LedgerFilter.run_covariance_intersection(x, P, x_prior, P_prior)\n\n # Update main filter states\n if Pcc.shape != self.filter.P.shape:\n self.psci(x_prior, P_prior, c_bar, Pcc)\n # self.filter.x_hat[begin_ind:end_ind] = c_bar\n # self.filter.P[begin_ind:end_ind,begin_ind:end_ind] = Pcc\n else:\n self.filter.x_hat = c_bar\n self.filter.P = Pcc\n\n return c_bar, Pcc", "def collision_detection(p, poly):\r\n _eps = 0.00001\r\n _huge = sys.float_info.max\r\n _tiny = sys.float_info.min\r\n \r\n def rayintersectseg(p, edge):\r\n ''' takes a point p=Pt() and an edge of two endpoints a,b=Pt() of a line segment returns boolean\r\n '''\r\n a,b = edge\r\n if a.y > b.y:\r\n a,b = b,a\r\n if p.y == a.y or p.y == b.y:\r\n p = Pt(p.x, p.y + _eps)\r\n \r\n intersect = False\r\n \r\n if (p.y > b.y or p.y < a.y) or (\r\n p.x > max(a.x, b.x)):\r\n return False\r\n \r\n if p.x < min(a.x, b.x):\r\n intersect = True\r\n else:\r\n if abs(a.x - b.x) > _tiny:\r\n m_red = (b.y - a.y) / float(b.x - a.x)\r\n else:\r\n m_red = _huge\r\n if abs(a.x - p.x) > _tiny:\r\n m_blue = (p.y - a.y) / float(p.x - a.x)\r\n else:\r\n m_blue = _huge\r\n intersect = m_blue >= m_red\r\n return intersect\r\n \r\n def _odd(x): return x%2 == 1\r\n \r\n def ispointinside(p, poly):\r\n\r\n return _odd(sum(rayintersectseg(p, edge)\r\n for edge in poly.edges ))\r\n \r\n detection = ispointinside(p,poly)\r\n return detection", "def _collision_one_coord(a0, v0, p0, a1, v1, p1):\n # Special cases\n if a0 == a1:\n if v0 == v1:\n if p0 == p1:\n return ANY_TIME\n \n return []\n \n t = (p0 - p1) / (v1 - v0)\n if t >= 0 and int(t) == t:\n return [t]\n else:\n return []\n \n # General case\n # Let's solve a quadratic equation\n # Particle position is described by this formula:\n # p(t) = p0 + t*(v0 + a/2) + 0.5*a*t**2 \n # We solve p0(t) - p1(t) = 0\n d_sq = (v0 + a0/2.0 - v1 - a1/2.0) ** 2 - 2 * (a0 - a1) * (p0 - p1)\n \n if d_sq < 0:\n return []\n \n d = math.sqrt(d_sq)\n \n solutions = []\n \n t1 = (-v0 - a0/2.0 + v1 + a1/2.0 + d) / (a0 - a1)\n t2 = (-v0 - a0/2.0 + v1 + a1/2.0 - d) / (a0 - a1)\n \n if t1 >= 0 and int(t1) == t1:\n solutions.append(t1)\n \n if t2 >= 0 and int(t2) == t2:\n solutions.append(t2)\n \n return solutions", "def intersects(*args):\r\n if len(args) == 2:\r\n p0, p1, p2, p3 = *args[0], *args[1]\r\n elif len(args) == 4:\r\n p0, p1, p2, p3 = args\r\n else:\r\n raise AttributeError(\"Pass 2, 2-pnt lines or 4 points to the function\")\r\n #\r\n # ---- First check ---- np.cross(p1-p0, p3-p2 )\r\n p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y = *p0, *p1, *p2, *p3\r\n s10_x = p1_x - p0_x\r\n s10_y = p1_y - p0_y\r\n s32_x = p3_x - p2_x\r\n s32_y = p3_y - p2_y\r\n denom = s10_x * s32_y - s32_x * s10_y\r\n if denom == 0.0:\r\n return False\r\n #\r\n # ---- Second check ---- np.cross(p1-p0, p0-p2 )\r\n den_gt0 = denom > 0\r\n s02_x = p0_x - p2_x\r\n s02_y = p0_y - p2_y\r\n s_numer = s10_x * s02_y - s10_y * s02_x\r\n if (s_numer < 0) == den_gt0:\r\n return False\r\n #\r\n # ---- Third check ---- np.cross(p3-p2, p0-p2)\r\n t_numer = s32_x * s02_y - s32_y * s02_x\r\n if (t_numer < 0) == den_gt0:\r\n return False\r\n #\r\n if ((s_numer > denom) == den_gt0) or ((t_numer > denom) == den_gt0):\r\n return False\r\n #\r\n # ---- check to see if the intersection point is one of the input points\r\n t = t_numer / denom\r\n # substitute p0 in the equation\r\n x = p0_x + (t * s10_x)\r\n y = p0_y + (t * s10_y)\r\n # be careful that you are comparing tuples to tuples, lists to lists\r\n if sum([(x, y) == tuple(i) for i in [p0, p1, p2, p3]]) > 0:\r\n return False\r\n return True", "def line_intersect(line1, line2):\n b1 = (line1[1][1] - line1[0][1]) / (line1[1][0] - line1[0][0])\n b2 = (line2[1][1] - line2[0][1]) / (line2[1][0] - line2[0][0])\n a1 = line1[0][1] - b1 * line1[0][0]\n a2 = line2[0][1] - b2 * line2[0][0]\n\n if a1 == a2 and b1 == b2:\n return line1\n\n xi = - (a1 - a2) / (b1 - b2)\n yi = a1 + b1 * xi\n if (line1[0][0] - xi) * (xi - line1[1][0]) >= 0\\\n and (line2[0][0] - xi) * (xi - line2[1][0]) >= 0\\\n and (line1[0][1] - yi) * (yi - line1[1][1]) >= 0\\\n and (line2[0][1] - yi) * (yi - line2[1][1]) >= 0:\n return xi, yi\n return None", "def intersect_circles(a_pos, a_radius, b_pos, b_radius):\n # vector from A to B \n dp1p2 = b_pos - a_pos\n \n if a_radius + b_radius >= abs(dp1p2):\n return dp1p2.normalized()\n else:\n raise Exception(\"No intersection\")", "def isIntersection(self, v):\n return (any(inter.v == v for inter in self.inter1) or\n any(inter.v == v for inter in self.inter2))", "def pointPotential(x,y,q,posx,posy):\n k = 8.99e9\n V = (k * q) / (sqrt(x**2 + (y - sqrt((posx**2 + posy**2)))**2))\n return V", "def mid_point(self, vector):\n return self.eval_2pts(vector, 0.5)", "def get_point_in_segment(p1, p2, alpha):\n return ((1-alpha)*p1[0]+alpha*p2[0], (1-alpha)*p1[1]+alpha*p2[1])", "def _intersected(positions, radius):\n P1 = positions[0]\n P2 = positions[1]\n P3 = positions[2]\n temp1 = P2 - P1\n e_x = temp1 / np.linalg.norm(temp1)\n temp2 = P3 - P1\n i = np.dot(e_x, temp2)\n temp3 = temp2 - i * e_x\n e_y = temp3 / np.linalg.norm(temp3)\n e_z = np.cross(e_x, e_y)\n d = np.linalg.norm(P2 - P1)\n j = np.dot(e_y, temp2) \n x = d / 2\n y = (-2*i*x + i*i + j*j) / (2*j)\n temp4 = radius**2 - x*x - y*y\n if temp4 < 0:\n return False\n return True", "def v_p(self, psi_l, ci):\n\t\treturn min((ci*self.VPMAX0)/(ci + self.KP), self.VPR)", "def intersect(f, df, g, dg):\n \"*** YOUR CODE HERE ***\"", "def _intersection(x, y):\n a, b = x\n c, d = y\n return (d > a) and (c < b)", "def comp_vel(p1=database['K+'], p2=database['pi+'], p3=database['p+'], pmin=0, pmax=80):\r\n p_range = np.linspace(pmin, pmax, 1000)\r\n m1 = p1.mass\r\n m2 = p2.mass\r\n m3 = p3.mass\r\n v1, v2, v3 = [], [], []\r\n for p in p_range:\r\n v1.append(c*beta(p, m1))\r\n v2.append(c*beta(p, m2))\r\n v3.append(c*beta(p, m3))\r\n fig = plt.figure(figsize=[10, 5])\r\n ax = fig.add_subplot(1, 1, 1)\r\n p1_name = r'K$^+$'\r\n p2_name = r'$\\pi^+$'\r\n p3_name = r'p$^+$'\r\n ax.plot(p_range, v1, 'r', label=p1_name)\r\n ax.plot(p_range, v2, 'b', label=p2_name)\r\n ax.plot(p_range, v3, 'g', label=p3_name)\r\n ax.set_xlabel('p / GeV', fontsize=20)\r\n ax.set_ylabel(r'v / $ms^{-1}$', fontsize=20)\r\n ax.axvline(75, color='k', label='p = 75 GeV')\r\n ax.set_xticks(np.arange(pmin, pmax+1, 1))\r\n ax.set_xticklabels(np.arange(pmin, pmax+1, 1))\r\n ax.grid()\r\n ax.minorticks_on()\r\n ax.set_xlim(pmin, pmax)\r\n# ax.set_ylim(np.min(v1+v2))\r\n ax.legend(fontsize=20)\r\n plt.show\r\n return", "def intersection(line1, line2):\r\n rho1, theta1 = line1[0]\r\n rho2, theta2 = line2[0]\r\n A = np.array([[np.cos(theta1), np.sin(theta1)], [np.cos(theta2), np.sin(theta2)]])\r\n b = np.array([[rho1], [rho2]])\r\n x0, y0 = np.linalg.solve(A, b)\r\n x0, y0 = int(np.round(x0)), int(np.round(y0))\r\n return [[x0, y0]]", "def polyclip(i, j, pol_x, pol_y, area=False):\n n = len(pol_x)\n nout = n + 4\n px_out, py_out = [0] * nout, [0] * nout\n clip_vals = [i, i + 1, j + 1, j]\n\n for ctype in range(4):\n cv = clip_vals[ctype]\n if ctype == 0:\n inside = [px > i for px in pol_x]\n elif ctype == 1:\n inside = [(px < i + 1) for px in pol_x]\n elif ctype == 2:\n inside = [(py < j + 1) for py in pol_y]\n else:\n inside = [py > j for py in pol_y]\n if all(inside):\n continue\n\n shiftp1 = inside.copy()\n shiftp1.insert(0, shiftp1.pop(-1))\n crosses = [i1 != i2 for (i1, i2) in zip(inside, shiftp1)]\n pind = 0\n for k in range(n):\n px, py = pol_x[k], pol_y[k]\n if crosses[k]: # out->in or in->out, add intersection\n ind = n - 1 if k == 0 else k - 1\n sx, sy = pol_x[ind], pol_y[ind]\n try:\n if ctype <= 1: # left or right\n px_out[pind] = cv\n py_out[pind] = sy + ((py - sy) / (px - sx)) * (cv - sx)\n else: # top or bottom\n px_out[pind] = sx + ((px - sx) / (py - sy)) * (cv - sy)\n py_out[pind] = cv\n except ZeroDivisionError: # pragma: no cover\n px_out[pind] = np.nan\n py_out[pind] = np.nan\n pind += 1\n\n if inside[k]: # out->in or in->in, add 2nd point\n px_out[pind] = px\n py_out[pind] = py\n pind += 1\n\n if pind >= nout - 2:\n nout *= 2\n px_out = px_out + [0] * nout\n py_out = py_out + [0] * nout\n nout *= 2\n\n if pind == 0: # polygon is entirely outside this line\n return None, None\n n = pind\n pol_x = px_out[:n].copy()\n pol_y = py_out[:n].copy()\n\n if area:\n if pol_x is None: # pragma: no cover\n return 0.0\n shiftx = pol_x.copy()\n shifty = pol_y.copy()\n shiftx.append(shiftx.pop(0))\n shifty.append(shifty.pop(0))\n a1 = [p[0] * p[1] for p in zip(pol_x, shifty)]\n a2 = [p[0] * p[1] for p in zip(pol_y, shiftx)]\n a = [p[0] - p[1] for p in zip(a1, a2)]\n return abs(sum(a)) / 2\n\n return pol_x, pol_y", "def isOnInteriorSide(self, v):\n n = self.normalVect()\n return n.dotProduct(vector(self.vertices[0]) - vector(v)) > 0", "def dexpinv(self, u, v, _=None):\n A, a = np.split(u, 2)\n B, b = np.split(v, 2)\n alpha = np.linalg.norm(A)\n rho = np.inner(A, a)\n if np.isclose(alpha, 0):\n return v\n c1 = (\n B\n - 0.5 * np.cross(A, B)\n + self._dexpinv_helper_1(alpha) * np.cross(A, np.cross(A, B))\n )\n c2 = (\n b\n - 0.5 * (np.cross(a, B) + np.cross(A, b))\n + self._dexpinv_helper_2(alpha, rho) * np.cross(A, np.cross(A, B))\n + self._dexpinv_helper_1(alpha)\n * (\n np.cross(a, np.cross(A, B))\n + np.cross(A, np.cross(a, B))\n + np.cross(A, np.cross(A, b))\n )\n )\n return np.hstack((c1, c2))", "def _circle_intersection(self, circle, point):\n dist = euclidean_distance((circle[0], circle[1]), point) - circle[2]\n vun = vec2d((circle[0] - point[0]), (circle[1] - point[1]))\n v = vun.normalized()\n\n x, y = (point[0] + dist * v.x), (point[0] + dist * v.x)\n\n return dist, (x, y)", "def intersection(self, other): # -> BaseGeometry:\n ...", "def intersection(st, ave):\n\treturn (st+ave)*(st+ave+1)//2 + ave", "def make_q(v0, v2):\n return (v0.y - v2.y)/(v0.x - v2.x)", "def intersection(st, ave):\n return (st+ave)*(st+ave+1)//2 + ave", "def intersection(st, ave):\n return (st+ave)*(st+ave+1)//2 + ave", "def intersection(st, ave):\n return (st+ave)*(st+ave+1)//2 + ave", "def intersection(st, ave):\n return (st+ave)*(st+ave+1)//2 + ave", "def intersection(st, ave):\n return (st+ave)*(st+ave+1)//2 + ave", "def get_intersect(a1, a2, b1, b2):\n s = np.vstack([a1, a2, b1, b2]) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return float('inf'), float('inf')\n return x / z, y / z", "def general_plane_intersection(n_a, da, n_b, db):\n \n # https://en.wikipedia.org/wiki/Intersection_curve\n \n n_a = np.array(n_a)\n n_b = np.array(n_b)\n da = np.array(da)\n db = np.array(db)\n \n l_v = np.cross(n_a, n_b)\n norm_l = sqrt(np.dot(l_v, l_v))\n if norm_l == 0:\n return None\n else:\n l_v /= norm_l\n aa = np.dot(n_a, n_a)\n bb = np.dot(n_b, n_b)\n ab = np.dot(n_a, n_b)\n d_ = 1./(aa*bb - ab*ab)\n l_0 = (da*bb - db*ab)*d_*n_a + (db*aa - da*ab)*d_*n_b\n \n return l_v, l_0", "def get_intersect(a1, a2, b1, b2):\r\n s = np.vstack([a1,a2,b1,b2]) # s for stacked\r\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\r\n l1 = np.cross(h[0], h[1]) # get first line\r\n l2 = np.cross(h[2], h[3]) # get second line\r\n x, y, z = np.cross(l1, l2) # point of intersection\r\n if z == 0: # lines are parallel\r\n return (float('inf'), float('inf'))\r\n return (x/z, y/z)", "def intersectConics(E1, E2):\n\n P = np.array([])\n r1 = matrix_rank(E1)\n r2 = matrix_rank(E2)\n \n if(r1==3 and r2==3):\n P = completeIntersection(E1,E2) \n else:\n if (r2 < 3): #E2 is degenerate\n defE = E2\n fullE = E1\n else:\n defE = E1 #E1 is degenerate\n fullE = E2\n m, l = decomposeDegenerateConic(defE)\n P1 = intersectConicLine(fullE,m)\n P2 = intersectConicLine(fullE,l)\n P = np.array([P1, P2])\n points_x = []\n points_y = []\n for i in range(2):\n P1 = P[i]\n if(P1.size!=0):\n for j in range(P1.shape[0]):\n points_x.append(P1[j,0]/P1[j,2])\n points_y.append(P1[j,1]/P1[j,2])\n return points_x, points_y", "def collpi2(Te,nev,v):\n return vcrit(Te)/2./v**3*collnu(Te,nev)" ]
[ "0.724931", "0.65389293", "0.64866877", "0.64301056", "0.6412559", "0.6410918", "0.64012647", "0.6370324", "0.6325234", "0.6305374", "0.62554836", "0.6230082", "0.620606", "0.6137867", "0.61155015", "0.6092252", "0.6087884", "0.60685384", "0.6036107", "0.60347986", "0.6028091", "0.60271066", "0.5983176", "0.5964442", "0.59315604", "0.5911336", "0.59065336", "0.588444", "0.58758074", "0.5872534", "0.58694595", "0.5868766", "0.58583075", "0.58461964", "0.58443993", "0.58297646", "0.5828295", "0.5817709", "0.577575", "0.5768893", "0.57554114", "0.5753526", "0.5749105", "0.57437307", "0.57259816", "0.57221663", "0.57061803", "0.5689943", "0.56825477", "0.56812876", "0.5676333", "0.5674121", "0.5672615", "0.56395787", "0.5637174", "0.56366545", "0.5630167", "0.561867", "0.5610522", "0.5606135", "0.55853516", "0.55786175", "0.5576663", "0.55735254", "0.5566659", "0.5555778", "0.5555695", "0.55540204", "0.5549207", "0.5543696", "0.5537604", "0.5523691", "0.5521024", "0.55189675", "0.5518566", "0.5484896", "0.54822403", "0.547886", "0.547482", "0.5474224", "0.5474208", "0.5471592", "0.54675746", "0.54659283", "0.54623586", "0.5459471", "0.5452037", "0.5444726", "0.5444112", "0.5444064", "0.5443276", "0.5443276", "0.5443276", "0.5443276", "0.5443276", "0.54401463", "0.5439215", "0.5435923", "0.54333264", "0.543293" ]
0.8056194
0
vector d reflection about unit vector normal n r = D (2dot(D,N) / abs(N)^2) n
def reflect(d,n): # coefficent c, because easier c = 2 * dot(d,n) return [di - c * ni for (di, ni) in zip(d,n)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _normal_vector(o, p0_3d, p1_3d):\n # The vector between middle point of v1-v2 and object center location\n # is the normal vector I'm looking for\n vn = p0_3d.lerp(p1_3d, 0.5) - o.matrix_world.translation\n # normalize so I can to length computation on it\n vn.normalize()\n return vn", "def magni(vector):\n return(np.linalg.norm(vector))", "def distance_from_plane(n,p,r,nnorm=None):\n #return np.abs(np.dot(n,(p-r)))/np.linalg.norm(n)\n #return np.abs(np.dot(n,(p-r)))/nnorm\n # the normal vector is already a unit vector!\n return np.abs(np.dot(n,(p-r)))", "def normalVect(self, n=2):\n L = len(self.vertices)\n normals = []\n while len(normals) < n:\n j = randrange(L)\n v0 = vector(self.vertices[j].coords())\n v1 = vector(self.vertices[int(j + L / 3) % L].coords())\n v2 = vector(self.vertices[int(j + 2 * L / 3) % L].coords())\n try:\n normals.append(((v1 - v0) * (v2 - v0)).normalize())\n except ValueError:\n pass\n return (1 / len(normals)) * sum(normals, vector(0, 0, 0))", "def normal(self) -> Vector:\n return normalize(cross(self.d1, self.d2))", "def vector(self) -> Vector:\n return self._normal * self._distance_from_origin", "def unit_normals(p,q,r): \n vx1 = p[0] - r[0] # x1 - x3. \n vy1 = p[1] - r[1] # y1 - y3. \n vz1 = p[2] - r[2] # z1 - z3. \n\n vx2 = q[0] - r[0] # x2 - x3. \n vy2 = q[1] - r[1] # y2 - y3. \n vz2 = q[2] - r[2] # z2 - z3. \n\n vnx = vy1*vz2 - vz1*vy2 \n vny = vz1*vx2 - vx1*vz2 \n vnz = vx1*vy2 - vy1*vx2 \n\n len_vn = math.sqrt(vnx*vnx + vny*vny + vnz*vnz) \n vnx = vnx/len_vn \n vny = vny/len_vn \n vnz = vnz/len_vn \n\n return vnx, vny, vnz", "def getNormalVector(self):\n vector = self.unit_vector\n vector.rotate(math.pi / 2)\n return vector", "def get_normal_vector_of_plane(p1, p2, p3):\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n ## print 'norm: '+str(np.linalg.norm(nvec))\n return nvec / np.linalg.norm(nvec)", "def normal(self, u, v):\n result = np.cross(self.du(u, v), self.dv(u, v))\n result = result / np.sqrt(vectordot(result, result))[:, None]\n return result", "def normal(self) -> Vec:\n # The three points are in clockwise order, so compute differences\n # in the clockwise direction, then cross to get the normal.\n point_1 = self.planes[1] - self.planes[0]\n point_2 = self.planes[2] - self.planes[1]\n\n return Vec.cross(point_1, point_2).norm()", "def test_reflection_vector(self):\n\n # A ray approaching at 45 degrees\n v = vectors.Vector(1, -1, 0)\n n = vectors.Vector(0, 1, 0)\n r = v.reflect(n)\n self.assertEqual(r, vectors.Vector(1, 1, 0))\n\n # Ray along an axis hits a surface at an angle\n v = vectors.Vector(0, -1, 0)\n n = vectors.Vector(math.sqrt(2)/2, math.sqrt(2)/2, 0)\n r = v.reflect(n)\n self.assertEqual(r, vectors.Vector(1, 0, 0))", "def normal_vector(origin, vectors):\n return np.cross(vectors[0] - origin, vectors[1] - origin)", "def normal(vx,vy,n):\n if vx==0:\n if vy==0: \n return (0,0)\n else:\n return (0,n)\n elif vy==0:\n return (n,0)\n else:\n return (n/sqrt(1+(vy/vx)**2),n/sqrt(1+(vx/vy)**2))", "def Normal(self):\n return Vector(self.normal)", "def vec_nor(x):\n nVec = np.zeros(len(x));\t\t # Initializate derivate vector\n nVec = np.divide(x, max(x))\n nVec = nVec-np.mean(nVec);\n nVec = np.divide(nVec,np.max(nVec));\n \n return nVec", "def normal(self) -> Vector:\n return self._normal", "def dV(X):\n return -4 * a * np.power(X, 3) + 2 * b * X", "def _denormalizeState(self, Z : vector) -> vector:\n return Z / self.D", "def twoDNormal(self):\n return vector((-1) * self.y, self.x, 0)", "def uv(vec):\n return vec / sqrt(dot(vec, vec))", "def dirVector(self,p1,p2):\n v=p2-p1\n l=v.Length\n return self.toMatrix(v)/l", "def normal(self) -> Vec:\n return abs(self.up_axis.cross(self.forward()))", "def createNormalVectors(D, Ns):\n normals = np.random.randn(D, Ns)\n normals = (np.matmul(normals, np.diag(np.sign(normals[0, :])))) / (\n np.sqrt(np.diag(np.matmul(np.transpose(normals), normals))))\n b = np.argsort(-normals[0, :])\n normals = normals[:, b]\n return normals", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def reflection(normal, origin=(0, 0, 0)):\n # Normalize the normal vector first.\n n = np.array(normal, dtype=float) / np.linalg.norm(normal)\n\n u, v, w = n\n\n translation = np.eye(4)\n translation[0:3, 3] = -np.array(origin)\n\n xx = 1 - 2 * u ** 2\n yy = 1 - 2 * v ** 2\n zz = 1 - 2 * w ** 2\n xy = -2 * u * v\n xz = -2 * u * w\n yz = -2 * v * w\n mirror_mat = [[xx, xy, xz, 0], [xy, yy, yz, 0], [xz, yz, zz, 0],\n [0, 0, 0, 1]]\n\n if np.linalg.norm(origin) > 1e-6:\n mirror_mat = np.dot(np.linalg.inv(translation),\n np.dot(mirror_mat, translation))\n return SymmOp(mirror_mat)", "def testNorm(self):\n assert(Vector(0, 3, 4).norm() == 5)\n assert(Vector(3, 4).norm() == 5)\n assert Vector(0, 3, 0, 0, 4, 0, size=10).norm() == 5", "def normal_vector(self, facet):\n assert len(facet) == 3\n pos = self.cluster.get_positions()\n v1 = pos[facet[1], :] - pos[facet[0], :]\n v2 = pos[facet[2], :] - pos[facet[0], :]\n n = np.cross(v1, v2)\n length = np.sqrt(np.sum(n**2))\n return n / length", "def norm(vec):\n vel = numpy.sqrt(numpy.dot(vec,vec))\n return vel", "def uVectNorm(x1,y1,z1, # P\n x2,y2,z2, # Q\n x3,y3,z3): # R\n p1 = np.array([x1,y1,z1])\n p2 = np.array([x2,y2,z2])\n p3 = np.array([x3,y3,z3])\n\n v1 = p3-p1\n v2 = p2-p1\n\n cp = np.cross(v1,v2)\n a,b,c = cp\n\n d = np.dot(cp, p3)\n\n print(a,b,c)", "def test_norm_vector():\n random_state = np.random.RandomState(0)\n for n in range(1, 6):\n v = pr.random_vector(random_state, n)\n u = pr.norm_vector(v)\n assert_almost_equal(np.linalg.norm(u), 1)", "def normal_f_v(self, v):\n\n gv = self.grad_f_v(v)\n gvn = np.linalg.norm(gv, axis=1)\n\n return (gv / gvn[:, np.newaxis])", "def unit_vector(vector):\n #print 'unit_vector'\n #print vector\n #print type(vector)\n #npvector = np.array(vector)\n return vector / np.linalg.norm(vector)", "def unit_vector(self,vector):\n return vector / np.linalg.norm(vector)", "def compute_normalvect(self):\n normvect = np.zeros((len(self.tri_pnts),3,3))\n zvec = np.array([0, 0, 1])\n for itri, tri in enumerate(self.tri_pnts):\n #import pdb; pdb.set_trace()\n tri0, tri1, tri2 = tri\n x1,y1 = self.points[tri1]-self.points[tri0]\n v1 = np.array([x1,y1,0])\n x2,y2 = self.points[tri2]-self.points[tri1]\n v2 = np.array([x2,y2,0])\n x3,y3 = self.points[tri0]-self.points[tri2]\n v3 = np.array([x3,y3,0])\n v1 = v1/np.linalg.norm(v1)\n v2 = v2/np.linalg.norm(v2)\n v3 = v3/np.linalg.norm(v3)\n #import pdb; pdb.set_trace()\n normvect[itri,:,:] = np.cross(v1,zvec), np.cross(v2,zvec), np.cross(v3,zvec)\n #import pdb; pdb.set_trace()\n return normvect", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def PlaneNormalVector(h, k, l):\r\n vec = np.array([h, k, l])\r\n return vec/np.linalg.norm(vec)", "def get_perpendicular2d(vector):\n if vector[1] == 0:\n return np.asarray([0.,1.])\n v2_0 = 1.0\n v2_1 = -(vector[0]/vector[1])\n v2 = np.asarray([v2_0, v2_1])\n return v2 / np.linalg.norm(v2)", "def getNormDetXYZ(self):\n\t\t(node0,node1) = self.getTwoNodes()\n\t\tbeta_in = node0.getBeta()\n\t\tbeta_out = node1.getBeta()\n\t\tgamma_in = node0.getGamma()\n\t\tgamma_out = node1.getGamma()\n\t\tgb_in = beta_in*gamma_in\n\t\tgb_out = beta_out*gamma_out\n\t\t(det_x,det_y,det_z) = self.getDetXYZ(self.trMtrx)\n\t\treturn ((gb_out/gb_in)*det_x,(gb_out/gb_in)*det_y,(beta_in/beta_out)*det_z)", "def normal_at(self, u, v, world=True):\n u = u * pi\n v = v * PI2\n x = cos(u) * sin(v)\n y = sin(u) * sin(v)\n z = cos(v)\n normal = Vector(x, y, z)\n if world:\n normal.transform(self.transformation)\n return normal", "def draw_random_u(d):\n mu = np.zeros(d)\n cov = np.eye(d)\n u = multivariate_normal.rvs(mean=mu, cov=cov)\n return u / np.linalg.norm(u)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def get_normal_fluctuation(hover,target,normal,vec):\n\tvector = hover - target\n\tvector = vector - vec*(vector>(vec/2.)) + vec*(vector<(-1*vec/2.))\n\tprojected = planeproject(vector,normal)\n\t#---get the sign of the projection\n\tplane_point = vector+projected\n\tsign = 1.0-2.0*(np.arccos(np.dot(vecnorm(normal),vecnorm(vector)))>np.pi/2.)\n\treturn sign*np.linalg.norm(plane_point)", "def test_perpendicular_to_vector():\n assert_almost_equal(pr.angle_between_vectors(\n pr.unitx, pr.perpendicular_to_vector(pr.unitx)), np.pi / 2.0)\n assert_almost_equal(pr.angle_between_vectors(\n pr.unity, pr.perpendicular_to_vector(pr.unity)), np.pi / 2.0)\n assert_almost_equal(pr.angle_between_vectors(\n pr.unitz, pr.perpendicular_to_vector(pr.unitz)), np.pi / 2.0)\n random_state = np.random.RandomState(0)\n for _ in range(5):\n a = pr.norm_vector(pr.random_vector(random_state))\n assert_almost_equal(pr.angle_between_vectors(\n a, pr.perpendicular_to_vector(a)), np.pi / 2.0)\n b = a - np.array([a[0], 0.0, 0.0])\n assert_almost_equal(pr.angle_between_vectors(\n b, pr.perpendicular_to_vector(b)), np.pi / 2.0)\n c = a - np.array([0.0, a[1], 0.0])\n assert_almost_equal(pr.angle_between_vectors(\n c, pr.perpendicular_to_vector(c)), np.pi / 2.0)\n d = a - np.array([0.0, 0.0, a[2]])\n assert_almost_equal(pr.angle_between_vectors(\n d, pr.perpendicular_to_vector(d)), np.pi / 2.0)", "def normal(n):\n m=np.zeros((n,n))\n for i,j in itertools.product(range(n), range(n)):\n m[i][j]=normalvariate(0,1)\n return m", "def _get_unit_vector(self, v):\n return v / np.linalg.norm(v)", "def vec_normal(vec):\r\n n = sqrt(sum(x ** 2 for x in vec)) or 1\r\n return [x / n for x in vec]", "def plane_equation(point_a, point_b, point_c):\n v1 = np.subtract(point_a, point_c)\n v2 = np.subtract(point_a, point_b)\n normal = np.cross(v1, v2)\n # print 'b4 norm', normal\n unit_normal = norm_vect(normal)\n # print 'unityyy', unit_normal\n return unit_normal", "def cal_unit_vec(vector):\n return vector / np.linalg.norm(vector)", "def d(u, v):\r\n\tdiff = u-v\r\n\treturn diff.dot(diff)", "def reflected(self, normal):\n return self - (2 * normal * self) * normal", "def norm(self):", "def norm(v: Vec2) -> Vec2:\n t = length(v)\n return Vec2(v.x / t, v.y / t)", "def getNormalizedVector(self):\n return self.scalarMultiplication(self.norm() ** -1.0)", "def find_direction_vector(line):\n pt1, pt2 = line\n pt1 = np.array(pt1).reshape(2,)\n pt2 = np.array(pt2).reshape(2,)\n direct = pt2 - pt1\n direct_norm = normalize(direct)\n return direct_norm", "def normal(self,points):\n ez=np.array([[0,0,1]])\n v=((points-self.pos()*ez)*self.C-ez)\n return (v/np.linalg.norm(v,axis=1)[:,np.newaxis])#*np.sign(self.C)", "def get_normal(self):\n c, s = np.cos(self.eangles), np.sin(self.eangles)\n r = np.array([[c, -s], [s, c]])\n u = np.array([1, 0])\n return np.dot(r, u)", "def norm2(self):\n\t\treturn self.x ** 2 + self.y ** 2 + self.z ** 2", "def norm_vect(vect):\n mag = (vect[0] ** 2 + vect[1] ** 2 + vect[2] ** 2) ** .5\n return np.divide(vect, mag)", "def unit_vector(self, vector):\n return vector / np.linalg.norm(vector)", "def normalize(self):\n return Vector(self.args + []) / self.magnitude()", "def _update_surface_normals(self):\n\n # This is the case if there are too few points to\n # compute normals so there can be values to remove\n\n #can be important for parallel\n self.swarm.shadow_particles_fetch()\n\n if self.empty:\n self.director.data[...] = 0.0\n else:\n\n particle_coords = self.swarm.particleCoordinates.data\n\n Nx = np.empty(self.swarm.particleLocalCount)\n Ny = np.empty(self.swarm.particleLocalCount)\n Nz = np.empty(self.swarm.particleLocalCount)\n\n for i, xyz in enumerate(particle_coords):\n r, neighbours = self.kdtree.query(particle_coords[i], k=4)\n\n # this point is neighbour[0] and neighbour points are neighbours[(1,2,3)]\n XYZ1 = self.kdtree.data[neighbours[1]]\n XYZ2 = self.kdtree.data[neighbours[2]]\n XYZ3 = self.kdtree.data[neighbours[3]]\n\n dXYZ1 = XYZ2 - XYZ1\n dXYZ2 = XYZ3 - XYZ1\n\n # Cross product of those 2 vectors can be use as the local normal (perhaps)\n\n Nx[i], Ny[i], Nz[i] = np.cross(dXYZ1, dXYZ2)\n #if i == 0:\n # print(Nx, Ny, Nz)\n # print(xyz[0], xyz[1],xyz[2])\n # print((self.insidePt[0] - xyz[0]) * Nx[i] )\n\n if (self.insidePt):\n sign = np.sign( (self.insidePt[0] - xyz[0]) * Nx[i] +\n (self.insidePt[1] - xyz[1]) * Ny[i] +\n (self.insidePt[2] - xyz[2]) * Nz[i] )\n Nx[i] *= sign\n Ny[i] *= sign\n Nz[i] *= sign\n\n\n for i in range(0, self.swarm.particleLocalCount):\n scale = 1.0 / np.sqrt(Nx[i]**2 + Ny[i]**2 + Nz[i]**2)\n Nx[i] *= scale\n Ny[i] *= scale\n Nz[i] *= scale\n\n\n self.director.data[:,0] = Nx[:]\n self.director.data[:,1] = Ny[:]\n self.director.data[:,2] = Nz[:]\n\n print(\"Surf Norms\")\n\n return", "def unit_vector(vector):\n assert(vector != [0,0])\n return vector / np.linalg.norm(vector)", "def generate_normals(v1, v2, v3, normalize_result=True):\n # make vectors relative to v2\n # we assume opengl counter-clockwise ordering\n a = v1 - v2\n b = v3 - v2\n n = cross(b, a)\n if normalize_result:\n n = normalize(n)\n return n", "def _unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def dVdx(self, sys):\n dx2 = sys.positions * sys.positions - self.x0 * self.x0\n return 4 * self.A * sys.positions * dx2", "def unit(vector):\r\n result = [[0] for row in range(len(vector))]\r\n # creates the initial value for result of this function, which is a vector full of 0s with the same lenght of a given vector \r\n for z in range(len(vector)):\r\n # for loop which continues as long as there are more elements in the vector \r\n result[z] = vector[z]/norm(vector)\r\n # the new result being each element in the list being divided by the norm \r\n return result", "def vincdecnorm(arr):\n tmp = convert(arr, GEOCENTRIC_CARTESIAN, GEOCENTRIC_SPHERICAL)\n return -tmp[..., 0], tmp[..., 1], tmp[..., 2]", "def norm(self):\n self.assertTrue(np.allclose(self.vectors.norm('dog.n.01'), 0.97757602))\n self.assertTrue(np.allclose(self.vectors.norm('mammal.n.01'), 0.03914723))", "def V_vect(self, distances):\n distances_norm2 = norm2(distances)\n distances_norm = np.sqrt(distances_norm2)\n isColliding = self.isColliding(distances_norm)\n\n # Collision term proportional to d**2 (cutoff)\n v_colliding = -distances_norm2/self.d_coll**2 + 1.5+0.5 * \\\n (self.d_attr/self.d_coll)**(2*self.n) - (self.d_attr/self.d_coll)**self.n\n v_colliding *= isColliding\n\n # Interaction potential: d - ln d\n v_interact = 0.5*self.d_attr**(2*self.n)/(np.identity(np.shape(distances_norm2)[1])[None, :, :]+distances_norm2)**self.n - self.d_attr**self.n/(\n np.identity(np.shape(distances_norm2)[1])[None, :, :]+distances_norm2)**(self.n/2) + 0.5\n v_interact *= (1 - isColliding)\n\n v = v_colliding + v_interact\n\n # A particle does not interact with itself\n for i in range(len(v)):\n np.fill_diagonal(v[i], 0)\n return v", "def blauNormal(blau, N):\n nom = blau - (1/N)\n den = 1 - (1/N)\n\n return nom / den", "def test_antinormal_reflection(self):\n n1 = 1.0\n n2 = 1.5\n normal = (0.0, 0.0, -1.0)\n angle = 0.0\n ray = Ray(position=(0.0, 0.0, 0.0), direction=(0.0, 0.0, 1.0), wavelength=None)\n fresnel = FresnelReflection()\n assert np.isclose(fresnel.reflectivity(angle, n1, n2), 0.04)\n new_ray = fresnel.transform(ray, {\"normal\": normal})\n assert np.allclose(flip(ray.direction), new_ray.direction)", "def norm(vector):\r\n result = 0\r\n # initial value for the result of this function\r\n for z in range(len(vector)):\r\n # this loop will continue as long as there are more values in the list \r\n result += vector[z]**2\r\n result = result**.5\r\n # The two equations above find the sum of the squares and then the square root of the squares\r\n return result", "def get_perpendicular(n: np.ndarray) -> np.ndarray:\n # find smallest component\n i = np.argmin(n)\n\n # get the other two indices\n a = (i + 1) % 3\n b = (i + 2) % 3\n\n result = np.zeros(3)\n result[i] = 0.0\n result[a] = n[b]\n result[b] = -n[a]\n return result", "def normal(self) -> 'MultiVector':\n\n return self / np.sqrt(abs(self.mag2()))", "def renorm(self):\n self.U /= (np.sum(np.abs(self.U)**2)*self.dx)**0.5", "def test_vic_dcor_nonlinear(self):\n z_matrix = np.array(\n [[0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0],\n [0.2, 0.5, 0.0],\n [0.2, 0.5, 1.0],\n [0.4, 1.0, 0.0],\n [0.4, 1.0, 1.0],\n [0.6, 1.0, 0.0],\n [0.6, 1.0, 1.0],\n [0.8, 0.5, 0.0],\n [0.8, 0.5, 1.0],\n [1.0, 0.0, 0.0],\n [1.0, 0.0, 1.0]],\n dtype=np.float64)\n obtained_w_vector = mcdm.weigh(z_matrix, \"VIC\", \"dCor\")\n expected_w_vector = np.array(\n [0.22633480, 0.27052183, 0.50314336],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)", "def normal(self, position):\n return self._normal", "def unit_vector(vector):\n vector = np.array(vector)\n if np.linalg.norm(vector) <= 0.00010:\n normv = 1.0\n else:\n normv = np.linalg.norm(vector)\n return vector / normv", "def svd_spd_decomposition(P):\n\t# Assert Matrix P is symetric\n\tassert check_symmetric(P)\n\n\t# singular value decomposition\n\tU, D, V = np.linalg.svd(P, full_matrices=False)\n\n\t# Create matrix W = Vtsqrt(diagnol(D)) #why Vt?\n\tM = np.dot(np.transpose(V), np.sqrt(np.diag(D)))\n\n\t#print(np.transpose(V))\n\n\treturn M", "def __abs__(self):\n return Vector.createFromPoint(self).norm", "def V_magNeptune(alpha,a_p,d):\n V = 5.*np.log10(a_p*d) - 7.00 + 7.944e-3*alpha + 9.617e-5*alpha**2.\n return V", "def my_mvn(mu_x, mu_y, r):\n return multivariate_normal([mu_x, mu_y], [[r, 0], [0, r]])", "def testNormalize(self):\n v1 = Vector.ones(4)\n n = v1.norm()\n assert n == 2\n assert v1.normalize() == [ 0.5, 0.5, 0.5, 0.5 ]", "def normalized(self):\n try:\n m = abs(self)\n return self / m\n except ZeroDivisionError as e:\n raise Exception(\"Attempted to normalize a zero vector, return a unit vector at zero degrees\") from e\n # return Vector(1, 0)", "def perp_2d_vec(vec):\n return normalized(np.dot(R90, vec))", "def norm2d(self) -> float:\n\n return self.v2ddict.norm2d()", "def get_vector_norm(v1):\n #sets the sum of all coordinates of vectors to 0\n sum = 0\n \n #updates the sum by multiplying the coordinate by itself and adding it into the variable sum\n for key in v1:\n sum += (v1[key])**2\n #find the square root of the sum \n vector_length = math.sqrt(sum)\n \n return vector_length" ]
[ "0.6807638", "0.6755607", "0.6614936", "0.6611211", "0.6596045", "0.65917015", "0.657798", "0.6512491", "0.64908576", "0.6453393", "0.6406519", "0.6351979", "0.6341675", "0.63390315", "0.6328692", "0.6297034", "0.6287553", "0.62832576", "0.62556225", "0.62344724", "0.6223208", "0.62113804", "0.61998045", "0.6184179", "0.61820966", "0.61820966", "0.61820966", "0.6177331", "0.6159381", "0.61578083", "0.6152654", "0.6150968", "0.61472887", "0.61332595", "0.61308825", "0.6095688", "0.6072689", "0.6071033", "0.6071033", "0.6071033", "0.6071033", "0.6071033", "0.6071033", "0.6071033", "0.6071033", "0.6071033", "0.6071033", "0.6071033", "0.6069664", "0.6058841", "0.6056928", "0.602863", "0.60122126", "0.60018814", "0.59893477", "0.5988693", "0.5986712", "0.5977926", "0.59569484", "0.5925864", "0.59140676", "0.59082395", "0.5906092", "0.59014577", "0.58954096", "0.58872426", "0.588467", "0.58841634", "0.5875876", "0.58746046", "0.5864596", "0.58627796", "0.58627254", "0.58533466", "0.58446497", "0.5844556", "0.5842186", "0.5835044", "0.5834464", "0.5833914", "0.58326286", "0.5825279", "0.5820449", "0.5818516", "0.58180207", "0.5817575", "0.58132", "0.5812923", "0.581228", "0.5810533", "0.58087534", "0.578735", "0.57838637", "0.577744", "0.5773343", "0.5771886", "0.5771124", "0.5758343", "0.574828", "0.574045" ]
0.6571784
7
Dot product of vectors x and y
def dot(x,y): return sum([xi*yi for (xi,yi) in zip(x,y)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vec_dot(x, y):\r\n return sum(a * b for a, b in zip(x, y))", "def vector_dot(x, y):\n\n if(len(x) != len(y)):\n raise ValueError(\"vector lengths differ\")\n else:\n # return x1*y1+x2*y2+...xn*yn\n return sum([x[i] * y[i] for i in range(len(x))])", "def dot(x, y):\n res = x[0] * y[0]\n for a, b in zip(x, y):\n res += a * b\n return res", "def vector_dot(v1,v2):\n return (v1.x * v2.x) + (v1.y * v2.y) + (v1.z * v2.z)", "def naive_vector_dot(x, y):\n assert len(x.shape) == 1\n assert len(y.shape) == 1\n assert x.shape[0] == y.shape[0]\n\n z = 0\n for i in range(x.shape[0]):\n z += x[i] * y[i]\n return z", "def dotproduct(x, y):\n return sum(imap(operator.mul, x, y))", "def dot(a, b):\n return a[0]*b[0] + a[1]*b[1] + a[2]*b[2]", "def dot(a, b):\n\n if len(a) != len(b):\n raise Exception(\"Input vectors must be of same length, not %d and %d\" % (len(a), len(b)))\n\n return float(sum([a[i] * b[i] for i in range(len(a))]))", "def vec_dot(v1,v2):\r\n \r\n return np.dot(v1,v2)", "def vecDot(a, b):\n ret=0.0\n for i in range(len(a)):\n ret+=a[i]*b[i]\n return ret", "def dotproduct(vec1, vec2):\n return sum((a*b) for a, b in zip(vec1, vec2))", "def dot(a,b):\n return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]", "def dotproduct(vec1, vec2):\n import operator\n return sum(map(operator.mul, vec1, vec2))", "def dot(xs: List[float], ys: List[float]) -> float:\n return sum(x * y for x, y in zip(xs, ys))", "def dotProduct(v1, v2):\n n1 = normalize(v1)\n n2 = normalize(v2)\n return n1[0] * n2[0] + n1[1] * n2[1] + n1[2] * n2[2]", "def dot(vector1, vector2):\n return sum(a1 * a2 for a1, a2 in zip(vector1, vector2))", "def dotproduct(first, other):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return (first.x*other.x + first.y*other.y + first.z*other.z)", "def dot_product(vec_1:tuple, vec_2:tuple)->float:\n return vec_1[0] * vec_2[0] + vec_1[1] * vec_2[1]", "def dot(a, b):\n return np.vdot(a.arr,b.arr)", "def dot_product(v1, v2):\n return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]", "def dotProduct(v1, v2):\n return sum((a * b) for a, b in zip(v1, v2))", "def dot(p1, p2):\n return p1[0] * p2[0] + p1[1] * p2[1]", "def vdot(a, b):\n return np.vdot(a.ravel(), b.ravel())", "def dot_product(v1, v2):\n return v1[0] * v2[0] + v1[1] * v2[1]", "def dot_vectors(u, v):\n return u[0] * v[0] + u[1] * v[1] + u[2] * v[2]", "def dot(vector_1: List, vector_2: List) -> float:\n if len(vector_1) != len(vector_2):\n raise InvalidInput(error_code_messages[\"InvalidLength\"])\n\n return sum(x * y for x, y in zip(vector_1, vector_2))", "def dot(a, b):\n return sum([a[i]*b[i] for i in range(2)])", "def dot_product(vector1, vector2):\n return [reduce_by_multiplication(pair) for pair in zip(vector1, vector2)]", "def dot(vector01,vector02):\r\n result = 0\r\n # creates the initial value for the result of the dot product\r\n for z in range(len(vector01)):\r\n # for loop which continues as long as there are more values left in the vector \r\n result += vector01[z]*vector02[z]\r\n # the new result is found to be the corresponding values in each vector multiplied and then added together \r\n return result", "def dotproduct(v1, v2):\n\treturn sum(imap(operator.mul, v1, v2))", "def _dot(a, b):\n return np.einsum('ijk,ikl->ijl', a, b)", "def dot(self, other):\n return self.x0 * other.x0 + self.x1 * other.x1 + self.x2 * other.x2", "def dotProduct(vectorA, vectorB):\r\n product =0\r\n for i in range(len(vectorA)):\r\n product += eval(vectorA[i])*eval(vectorB[i])\r\n return product", "def dot( v1, v2 ):\n return sum( x*y for x,y in izip(v1,v2) )", "def dot_product(vector1, vector2):\n out = None\n ### YOUR CODE HERE\n out=np.dot(vector1,vector2)\n ### END YOUR CODE\n\n return out", "def _layerwise_dot_product(x_s, y_s):\n return [torch.sum(x * y).item() for x, y in zip(x_s, y_s)]", "def dot(self, vec):\n pass", "def dotProduct(self, v):\n return self.x * v.x + self.y * v.y + self.z * v.z", "def dot(self, other):\n return self.x * other.x + self.y * other.y", "def dot(self, other):\n return self.x * other.x + self.y * other.y", "def dot4(a,b):\n return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]+a[3]*b[3]", "def dot_product(vector1, vector2):\n out = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n\n return out", "def naive_matrix_vector_dot(x, y):\n assert len(x.shape) == 2\n assert len(y.shape) == 1\n assert x.shape[1] == y.shape[0]\n\n z = np.zeros(x.shape[0])\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n z[i] += x[i, j] * y[j]\n return z", "def dot(a, b):\r\n a, b = as_tensor_variable(a), as_tensor_variable(b)\r\n\r\n if a.ndim == 0 or b.ndim == 0:\r\n return a * b\r\n elif a.ndim > 2 or b.ndim > 2:\r\n return tensordot(a, b, [[a.ndim - 1], [numpy.maximum(0, b.ndim - 2)]])\r\n else:\r\n return _dot(a, b)", "def vector_dot(xyz, vector):\n if len(vector) != 3:\n raise Exception(\n \"vector should be length 3, the provided length is {}\".format(\n len(vector)\n )\n )\n return vector[0]*xyz[:, 0] + vector[1]*xyz[:, 1] + vector[2]*xyz[:, 2]", "def dot(self, other):\n\t\treturn self.x * other.x + self.y * other.y + self.z * other.z", "def test_dot_different_sizes():\n dot(Vector(1.0), Vector(2.0, 3.0))", "def dot(self,vect):\n return self.x*vect.x + self.y*vect.y + self.z*vect.z", "def inner_prod(x, y):\n z = torch.zeros(2, dtype=torch.double, device=x.device)\n\n if len(list(x.size())) == 2 and len(list(y.size())) == 2:\n z[0] = torch.dot(x[0], y[0]) - torch.dot(-x[1], y[1])\n z[1] = torch.dot(x[0], y[1]) + torch.dot(-x[1], y[0])\n\n if len(list(x.size())) == 1 and len(list(y.size())) == 1:\n z[0] = (x[0] * y[0]) - (-x[1] * y[1])\n z[1] = (x[0] * y[1]) + (-x[1] * y[0])\n\n return z", "def dot(self, other):\n return float(self.x*other[0] + self.y*other[1])", "def dot(self, x):\n pass", "def dot_product(a,b):\n return sum(pairwise_mult(a,b))", "def dot(self, other):\n return self.x * other.x + self.y * other.y + self.z * other.z", "def dotproduct(vec1, vec2, sum=sum, map=map, mul=mul):\n return sum(map(mul, vec1, vec2))", "def vdot(x, v, pub):\n x_flatten = x.flatten()\n v_flatten = v.flatten()\n mul_res = paillier_gpu.mul_impl(v_flatten, x_flatten)\n\n return paillier_gpu.sum_impl(mul_res)", "def test_dot():\n assert_equal(dot(Vector(3.0, 2.0), Vector(2.0, -1.0)), 4.0)", "def dot(self, vec):\n if not isinstance(vec, self.__class__):\n raise TypeError('Dot product operand must be a vector')\n return np.dot(self, vec)", "def dot_product(vector_1, vector_2):\n result = 0\n for idx_1, value_1 in vector_1.items():\n if idx_1 in vector_2:\n result += value_1 * vector_2[idx_1]\n return result", "def dot_product(v,w):\n return v[0] * w[0] + v[1] * w[1]", "def dot(v: Vector, w: Vector) -> float:\n assert len(v) == len(w), \"vectors must be same length\"\n\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v,w):\r\n return sum(v_i * w_i\r\n for v_i, w_i in zip(v, w))", "def dot(self, other):\n\n Vector = sympy.vector.Vector\n if isinstance(other, BasisDependentZero):\n return Vector.zero\n elif isinstance(other, Vector):\n outvec = Vector.zero\n for k, v in self.components.items():\n vect_dot = k.args[1].dot(other)\n outvec += vect_dot * v * k.args[0]\n return outvec\n elif isinstance(other, Dyadic):\n outdyad = Dyadic.zero\n for k1, v1 in self.components.items():\n for k2, v2 in other.components.items():\n vect_dot = k1.args[1].dot(k2.args[0])\n outer_product = k1.args[0].outer(k2.args[1])\n outdyad += vect_dot * v1 * v2 * outer_product\n return outdyad\n else:\n raise TypeError(\"Inner product is not defined for \" +\n str(type(other)) + \" and Dyadics.\")", "def dot_product(u, v):\n ret = 0.0\n for i in range(len(u)):\n ret += float(float(u[i]) * float(v[i]))\n return ret", "def dot(p, q):\n return p[0] * q[0] + p[1] * q[1] + p[2] * q[2]", "def dot_product(first_vector, second_vector):\n first_unpacker = VectorUnpacker(first_vector)\n second_unpacker = VectorUnpacker(second_vector)\n if first_unpacker.unpacked_vector_length != second_unpacker.unpacked_vector_length:\n raise ApplicationError(\"Unpacked vector sizes are unequal\")\n\n # looks better than a 'map' one-liner to me\n value = 0\n for piece in zip(first_unpacker(), second_unpacker()):\n value += piece[0] * piece[1]\n\n return value", "def dot(self, vec):\n if not isinstance(vec, self.__class__):\n raise TypeError('Dot product operand must be a VectorArray')\n if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV:\n raise ValueError('Dot product operands must have the same '\n 'number of elements.')\n return np.sum((getattr(self, d)*getattr(vec, d) for d in self.dims), 1)", "def dot(v: Vector, w: Vector) -> float:\n assert len(v) == len(w), 'vectors must be the same length'\n\n return sum(v_item * w_item for v_item, w_item in zip(v, w))", "def dot(a, b):\n raise NotImplementedError", "def dot_product(a, b):\n dp = 0.0\n for i, j in zip(a, b):\n dp += i * j\n return dp", "def vec_dot_star(v1,v2):\r\n \r\n dot_star = v1[0]*(v2[1])-v1[1]*v2[0]\r\n return dot_star", "def dot(self, other):\n checkVector(self, other)\n dots = self.client.map(_call_dot, self.vecDask, other.vecDask, pure=False)\n # Adding all the results together\n dot = 0.0\n for future, result in daskD.as_completed(dots, with_results=True):\n dot += np.float64(result)\n return dot", "def dot(v,w):\n return sum(v_i * w_i\n for v_i, w_i in zip(v,w))", "def dot(v,w):\n return sum(v_i * w_i for v_i,w_i in zip(v,w))", "def _listdot(d1, d2):\n return [np.dot(x[0].T, x[1]) for x in zip(d1, d2)]", "def _parameter_dot_product(x: JaxComplexArray, y: JaxComplexArray, n_axes: int) -> JaxRealArray:\n axes = tuple(range(-n_axes, 0))\n return jnp.sum(x * y, axis=axes).real", "def vector_dot(v, w):\n return np.dot(v, w)", "def dot(self, other):\n ox, oy = other\n return self[0] * ox + self[1] * oy", "def dot(self, other):\n return F.Dot.apply(self, other)", "def dot(v, w):\n return sum(v_i * w_i\n for v_i, w_i in zip(v, w))", "def pairwise_dot_product_similarity(x, y):\n return torch.mm(x, torch.transpose(y, 1, 0))", "def get_dot_product(v1,v2):\n #sets default dot product\n dot_product = 0\n \n for key in v2:\n if key in v1:\n # updates the dot product if key is present in both vectors\n dot_product += v1[key]*v2[key]\n #returns final dot product\n return dot_product", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def outer_prod(x, y):\n if len(list(x.size())) != 2 or len(list(y.size())) != 2:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(2, x.size()[1], y.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.ger(x[0], y[0]) - torch.ger(x[1], -y[1])\n z[1] = torch.ger(x[0], -y[1]) + torch.ger(x[1], y[0])\n\n return z", "def test_vector_dot_product(self):\n\n # Example 1.2\n vector_p = np.array([0.5, 0.0, 0.5])\n vector_q = np.array([0.5, 0.5, 0.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n magnitude_ref_nm = 5.0/16.0\n\n vector_d = vector_p - vector_q\n magnitude_nm = vector.dot_product(crystal, vector_d, vector_d)\n\n self.assertAlmostEqual(magnitude_ref_nm, magnitude_nm, 5)\n\n # Example 1.3\n vector_p = np.array([1.0, 2.0, 0.0])\n vector_q = np.array([3.0, 1.0, 1.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n magnitude_ref_nm = 5.0/4.0\n\n magnitude_nm = vector.dot_product(crystal, vector_p, vector_q)\n self.assertAlmostEqual(magnitude_ref_nm, magnitude_nm, 5)\n\n magnitude_nm = vector.dot_product(crystal, vector_q, vector_p)\n self.assertAlmostEqual(magnitude_ref_nm, magnitude_nm, 5)\n\n #self.fail(\"Test if the testcase is working.\")", "def dot(v, w):\n\treturn sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot_product(v1, v2):\n #print(v1, v2)\n sum = 0\n\n for i in range(len(v1)):\n #print(v1[i], v2[i])\n sum += v1[i] * v2[i]\n return sum", "def dot(self,n_,x,y): # 3\n if x is None: raise TypeError(\"Invalid type for argument x\")\n if x is None:\n x_ = None\n else:\n try:\n x_ = memoryview(x)\n except TypeError:\n try:\n _tmparr_x = array.array(\"d\",x)\n except TypeError:\n raise TypeError(\"Argument x has wrong type\")\n else:\n x_ = memoryview(_tmparr_x)\n \n else:\n if x_.format != \"d\":\n x_ = memoryview(array.array(\"d\",x))\n \n if x_ is not None and len(x_) != (n_):\n raise ValueError(\"Array argument x has wrong length\")\n if y is None: raise TypeError(\"Invalid type for argument y\")\n if y is None:\n y_ = None\n else:\n try:\n y_ = memoryview(y)\n except TypeError:\n try:\n _tmparr_y = array.array(\"d\",y)\n except TypeError:\n raise TypeError(\"Argument y has wrong type\")\n else:\n y_ = memoryview(_tmparr_y)\n \n else:\n if y_.format != \"d\":\n y_ = memoryview(array.array(\"d\",y))\n \n if y_ is not None and len(y_) != (n_):\n raise ValueError(\"Array argument y has wrong length\")\n res,resargs = self.__obj.dot(n_,x_,y_)\n if res != 0:\n raise Error(rescode(res),\"\")\n _xty_return_value = resargs\n return _xty_return_value", "def trace_dot(a, b):\r\n return np.sum(a * b)", "def dot_product(a, b):\n a1, a2, a3 = a\n b1, b2, b3 = b\n return a1 * b1 + a2 * b2 + a3 * b3", "def dot(x, y):\r\n\r\n if hasattr(x, 'getnnz'):\r\n x = as_sparse_variable(x)\r\n if hasattr(y, 'getnnz'):\r\n y = as_sparse_variable(y)\r\n\r\n x_is_sparse_variable = _is_sparse_variable(x)\r\n y_is_sparse_variable = _is_sparse_variable(y)\r\n\r\n if not x_is_sparse_variable and not y_is_sparse_variable:\r\n raise TypeError()\r\n\r\n return _dot(x, y)", "def dot(self, v):\n if (len(self) != len(v)):\n raise IndexError('Vectors to be dotted must be the same size.')\n return sum([x[0]*x[1] for x in zip(self, v)])", "def dot(x, y):\n if isinstance(x, tf.SparseTensor) and isinstance(y, tf.SparseTensor):\n res = tf.sparse_tensor_dense_matmul(x, y)\n else:\n res = tf.matmul(x,y)\n return res", "def dot_product(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(self,v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(lhs: typing.Iterable, rhs: typing.Iterable) -> float:\n return sum(ls * rs for ls, rs in zip(lhs, rhs))", "def dot(u, v, w, a, b):\n u_1, u_2 = u\n v_1, v_2 = v\n return (w*u_1 + b*u_2)*(w*v_1 + b*v_2) + abs(a)*u_1*v_1" ]
[ "0.8719347", "0.85587317", "0.81081426", "0.79403627", "0.7918046", "0.78332645", "0.7820983", "0.78134316", "0.7771925", "0.77611864", "0.7745346", "0.7739358", "0.76962656", "0.7687426", "0.7682485", "0.7668282", "0.7629572", "0.76284784", "0.7628374", "0.7611728", "0.758857", "0.757651", "0.75639135", "0.7554643", "0.75245726", "0.75147325", "0.7441661", "0.7440388", "0.7424878", "0.73675054", "0.7352836", "0.7330638", "0.73283863", "0.73238814", "0.72730654", "0.7272068", "0.7269076", "0.7269012", "0.7260172", "0.7260172", "0.7228797", "0.7226879", "0.7220317", "0.7196141", "0.71798193", "0.7177341", "0.7152528", "0.71488404", "0.71004117", "0.7072288", "0.7072012", "0.7071587", "0.7068759", "0.70655763", "0.70528054", "0.7039964", "0.70325434", "0.70046854", "0.7001159", "0.69945", "0.6992246", "0.69800353", "0.6977182", "0.6971917", "0.69690824", "0.69673514", "0.69598734", "0.69447297", "0.69396883", "0.69335943", "0.6916554", "0.691343", "0.6905085", "0.69031924", "0.68981683", "0.6897988", "0.6895584", "0.68861127", "0.68852156", "0.68763345", "0.6876276", "0.68725175", "0.68725175", "0.68725175", "0.68725175", "0.68725175", "0.6872107", "0.685542", "0.6850103", "0.68291366", "0.681904", "0.6816007", "0.6815771", "0.6802763", "0.6799435", "0.6799331", "0.679083", "0.67775685", "0.6773548", "0.67418563" ]
0.83901334
2
Returns a unit vector in the same direction as x
def unit(x): l = sum([i**2 for i in x])**0.5 return [xi/l for xi in x]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unit(self):\r\n return Vector(self.x/self.length(), self.y/self.length())", "def unit():\n return Vec2d(0, 1)", "def getUnitVector(self):\n return Vector.createFromPolar(1, self.angle)", "def unit_vector(self,vector):\n return vector / np.linalg.norm(vector)", "def to_unit(self):\n if self.is_zero():\n return Vector(0,0,0)\n else:\n magnitude = self.l2_norm()\n return Vector(self.x/magnitude, self.y/magnitude, self.z/magnitude)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def getNormalVector(self):\n vector = self.unit_vector\n vector.rotate(math.pi / 2)\n return vector", "def unit_vector(self, vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def _get_unit_vector(self, v):\n return v / np.linalg.norm(v)", "def get_unit_vector(self, vector):\n return vector / la.norm(vector)", "def unit_vector(vector):\n #print 'unit_vector'\n #print vector\n #print type(vector)\n #npvector = np.array(vector)\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n assert(vector != [0,0])\n return vector / np.linalg.norm(vector)", "def _unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def cal_unit_vec(vector):\n return vector / np.linalg.norm(vector)", "def unit_vectors(x):\n xnew = x.copy()\n for v in range(x.shape[-1]):\n xnew[:, v] = x[:, v] / np.linalg.norm(x[:, v])\n return xnew", "def unit(direction):\r\n return Vector(0, -1).rotate(direction)", "def vector(self) -> Vector:\n return self._normal * self._distance_from_origin", "def uv(vec):\n return vec / sqrt(dot(vec, vec))", "def tangeant_unit_vector(self, t):\n a = self.a0 + t * self.da\n ca = cos(a)\n sa = sin(a)\n v = Vector((sa, -ca))\n if self.da > 0:\n v = -v\n return v", "def unit_vector(vector):\n return 0 if vector[0] == 0 else vector[0]/abs(vector[0]), 0 if vector[1] == 0 else vector[1]/abs(vector[1])", "def vec_unit( vec ):\r\n return np.divide( vec , np.linalg.norm( vec ) )", "def unit_vector(vector):\n return vector / max(np.linalg.norm(vector), 1e-10)", "def unit_x(cls):\n return cls(1, 0, 0)", "def unit_vec(v):\n vlen = np.linalg.norm(v)\n if np.isclose(vlen, 0):\n raise ValueError('Cannot make unit vector from zero vector.')\n else:\n return v / vlen", "def as_unit(self):\n new_vec = self.copy()\n new_vec.normalize()\n return new_vec", "def vec_x(self):\t\r\n if self.ox != 0:\r\n ov = self.ox\r\n lv = self.self.lx + self.ox\r\n else:\r\n ov = self.dx / 2\r\n lv = self.lx\r\n\r\n xv = \"\"\r\n for num in np.arange(ov, lv, self.dx):\r\n xv += str(num) + \" \"\r\n\r\n return xv", "def unit_vector(v):\n h = ((v[0]**2)+(v[1]**2))**0.5\n if h == 0:\n h = 0.000000000000001\n ua = v[0] / h\n ub = v[1] / h\n return (ua, ub)", "def unit_x(cls):\n return cls(1, 0)", "def unit_vector(vector):\n if not np.all((vector == 0)):\n return vector / np.linalg.norm(vector)\n else:\n return vector", "def normalized(self):\n len = self.length\n return Vector(self.x / len, self.y / len)", "def unit(vector: np.array) -> np.array:\n return np.array([*vector]) / np.sqrt((vector * vector).sum(axis=0))", "def _unit_vector(pt0, pt1):\n dis_0_to_1 = sqrt((pt0[0] - pt1[0])**2 + (pt0[1] - pt1[1])**2)\n return (pt1[0] - pt0[0]) / dis_0_to_1, \\\n (pt1[1] - pt0[1]) / dis_0_to_1", "def getNormalizedVector(self):\n return self.scalarMultiplication(self.norm() ** -1.0)", "def unit_vector(vector):\n unit_vector = np.zeros((len(vector), vector.shape[1]))\n norm = np.linalg.norm(vector, axis=1)\n ndim = vector.ndim\n\n if ndim == 1: # Handling of 1-dimensional array\n unit_vector = vector / norm\n elif ndim == 2: # Handling of 2-dimensional array\n for i in range(0, vector.shape[1]):\n unit_vector[:, i] = vector[:, i] / norm\n else:\n log.fatal(f\"Dimension of vector should be either 1- or 2-dimensional and not {ndim}-dimensional.\")\n\n return unit_vector", "def random_vector_in_unit_ball():\n x = np.random.normal(loc=0.0, scale=1.0, size=(numSamples, self.dim))\n z = np.random.exponential(scale=1.0, size=(numSamples,))\n d = (np.sum(np.square(x), axis=1) + z) ** 0.5\n d = d[:, np.newaxis]\n return x / d", "def unit_vector(vector):\n vector = np.array(vector)\n if np.linalg.norm(vector) <= 0.00010:\n normv = 1.0\n else:\n normv = np.linalg.norm(vector)\n return vector / normv", "def unit_vector(i, j):\n magnitude = np.sqrt(i ** 2 + j ** 2)\n unit_i = i / magnitude\n unit_j = j / magnitude\n\n return unit_i, unit_j", "def vec(self):\r\n\r\n xv = np.arange(self.dx / 2, self.lx, self.dx)\r\n yv = np.arange(-self.ly / 2 + self.dy / 2, self.ly / 2, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz, self.dz)\r\n\r\n if self.ox != 0:\r\n xv = np.arange(self.ox, self.lx + self.ox, self.dx)\r\n yv = np.arange(self.oy, self.ly + self.oy, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz, self.dz)\r\n\r\n return xv, yv, zv", "def direction(self):\n len = self.length()\n if len == 0.0:\n uvec = pos.Pos(np.transpose(np.array([0, 0, 0])))\n else:\n uvec = pos.Pos(np.transpose(np.array([(self.end.x - self.start.x) / len,\n (self.end.y - self.start.y) / len,\n (self.end.z - self.start.z) / len])))\n return uvec", "def vector(x, y, z):\n return point_or_vector(x,y,z,0.0)", "def vector_to(self, location):\n return (self.X - location) * numpy.array([1.0, 1.0, 0.0])", "def vector_normalize(x):\n mag = math.sqrt(vector_dot(x, x))\n return [float(i) / mag for i in x]", "def normalize(self):\n return Vector(self.args + []) / self.magnitude()", "def vec_nor(x):\n nVec = np.zeros(len(x));\t\t # Initializate derivate vector\n nVec = np.divide(x, max(x))\n nVec = nVec-np.mean(nVec);\n nVec = np.divide(nVec,np.max(nVec));\n \n return nVec", "def x(self) -> float:\n return self.A[1] if self.scalar_vector else self.A[0]", "def normalized(self):\n try:\n m = abs(self)\n return self / m\n except ZeroDivisionError as e:\n raise Exception(\"Attempted to normalize a zero vector, return a unit vector at zero degrees\") from e\n # return Vector(1, 0)", "def cyl_x_unit_vector(gravity: sc.Variable, incident_beam: sc.Variable) -> sc.Variable:\n v_x = sc.cross(incident_beam, gravity)\n return v_x / sc.norm(v_x)", "def get_orientation_vector(self, xyz):\n if self.g0:\n v = xyz[self.g0] - xyz[self.Ga()]\n else:\n v = self.x\n assert self.offt == 'GGG', self.offt\n return v", "def normal(self) -> Vec:\n return abs(self.up_axis.cross(self.forward()))", "def v(self):\n return Vector2(self.position)", "def uw(self):\n return sm.unitvec(self.w)", "def Normal(self):\n return Vector(self.normal)", "def unit_vector(vec_in):\n if vec_in.ndim == 1:\n out = _unit_vector_single(vec_in)\n elif vec_in.ndim == 2:\n out = _unit_vector_multi(vec_in)\n else:\n raise ValueError(\n \"incorrect arg shape; must be 1-d or 2-d, yours is %d-d\"\n % (vec_in.ndim)\n )\n return out", "def normal_at(self, u, v, world=True):\n u = u * pi\n v = v * PI2\n x = cos(u) * sin(v)\n y = sin(u) * sin(v)\n z = cos(v)\n normal = Vector(x, y, z)\n if world:\n normal.transform(self.transformation)\n return normal", "def unit_vector(data, axis=None, out=None):\r\n if out is None:\r\n data = numpy.array(data, dtype=numpy.float64, copy=True)\r\n if data.ndim == 1:\r\n data /= math.sqrt(numpy.dot(data, data))\r\n return data\r\n else:\r\n if out is not data:\r\n out[:] = numpy.array(data, copy=False)\r\n data = out\r\n length = numpy.atleast_1d(numpy.sum(data*data, axis))\r\n numpy.sqrt(length, length)\r\n if axis is not None:\r\n length = numpy.expand_dims(length, axis)\r\n data /= length\r\n if out is None:\r\n return data", "def unit_vector(data, axis=None, out=None):\r\n if out is None:\r\n data = np.array(data, dtype=np.float64, copy=True)\r\n if data.ndim == 1:\r\n data /= math.sqrt(np.dot(data, data))\r\n return data\r\n else:\r\n if out is not data:\r\n out[:] = np.array(data, copy=False)\r\n data = out\r\n length = np.atleast_1d(np.sum(data*data, axis))\r\n np.sqrt(length, length)\r\n if axis is not None:\r\n length = np.expand_dims(length, axis)\r\n data /= length\r\n if out is None:\r\n return data", "def __call__(self, x):\n v = vector(RDF,x)\n if v.is_zero():\n raise ValueError, \"The origin must not be a vertex.\"\n v = v/norm(v) # normalize vertices to unit sphere\n v = self.house*v # reflect so self.projection_dir is at \"north pole\"\n denom = self.height-v[self.dim-1]\n if denom.is_zero():\n raise ValueError, 'Point cannot coincide with ' \\\n 'coordinate singularity at ' + repr(x)\n return vector(RDF, [ v[i]/denom for i in range(self.dim-1) ])", "def unit(vector):\r\n result = [[0] for row in range(len(vector))]\r\n # creates the initial value for result of this function, which is a vector full of 0s with the same lenght of a given vector \r\n for z in range(len(vector)):\r\n # for loop which continues as long as there are more elements in the vector \r\n result[z] = vector[z]/norm(vector)\r\n # the new result being each element in the list being divided by the norm \r\n return result", "def vel_inicial(x): #Velocidad inicial como un vector de ceros\r\n return np.zeros_like(x)", "def unit_vector(data, axis=None, out=None):\n if out is None:\n data = np.array(data, dtype=np.float64, copy=True)\n if data.ndim == 1:\n data /= math.sqrt(np.dot(data, data))\n return data\n else:\n if out is not data:\n out[:] = np.array(data, copy=False)\n data = out\n length = np.atleast_1d(np.sum(data*data, axis))\n np.sqrt(length, length)\n if axis is not None:\n length = np.expand_dims(length, axis)\n data /= length\n if out is None:\n return data", "def x_to_u(self, x):\n return stats.norm.ppf(self.CDF(x))", "def xvec(self):\n return np.array([self.x, self.y])", "def xy2vec(self, x, y=None, direct=False):\n pass", "def normalized(first):\n if isinstance(first,FreeCAD.Vector):\n l=length(first)\n return FreeCAD.Vector(first.x/l, first.y/l, first.z/l)", "def unit_sun_r(sun_pos):\n return sun_pos / vector_magnitude(sun_pos[0], sun_pos[1], sun_pos[2])", "def _normal_vector(o, p0_3d, p1_3d):\n # The vector between middle point of v1-v2 and object center location\n # is the normal vector I'm looking for\n vn = p0_3d.lerp(p1_3d, 0.5) - o.matrix_world.translation\n # normalize so I can to length computation on it\n vn.normalize()\n return vn", "def inverse(self, x):\n x = np.asarray(x)\n def r(vec):\n return utils.recycled(vec, as_=x)\n if self.zero is not None and self.multiplier is not None:\n x = x / r(self.multiplier) + r(self.zero)\n elif self.zero is not None:\n x = x + r(self.zero)\n elif self.multiplier is not None:\n x = x / r(self.multiplier)\n return x", "def unit_vector(self,vector):\n\t\tunit_vector_query=0;\n\t\tfor word in vector:\n\t\t\tunit_vector_query += vector[word]*vector[word];\n\t\tunit_vector_query = math.sqrt(unit_vector_query);\n\t\treturn unit_vector_query", "def to_unit_hypercube(self, x):\n x_out = x.copy()\n for n in self.names:\n x_out[n] = (x[n] - self.bounds[n][0]) / (\n self.bounds[n][1] - self.bounds[n][0]\n )\n return x_out", "def vector_perp(v):\n assert len(v) == 2\n x, y = v\n return Vector(-y, x)", "def _unitVector(self, data: numpy.array, axis: Optional[int] = None, out: Optional[numpy.array] = None) -> numpy.array:\n if out is None:\n data = numpy.array(data, dtype = numpy.float64, copy = True)\n if data.ndim == 1:\n data /= math.sqrt(numpy.dot(data, data))\n return data\n else:\n if out is not data:\n out[:] = numpy.array(data, copy = False)\n data = out\n length = numpy.atleast_1d(numpy.sum(data*data, axis))\n numpy.sqrt(length, length)\n if axis is not None:\n length = numpy.expand_dims(length, axis)\n data /= length\n if out is None:\n return data", "def velocity_at_pos(self, x, y):\n return scipy.array([float(self._interp_u(x, y)),\n float(self._interp_v(x, y))])", "def x(self):\n return self._translation[0, 0]", "def normalize(self):\n\n if not self.magnitude():\n return Vector(0, 0)\n\n l = 1 / self.magnitude()\n return self.scale(l)", "def __abs__(self):\n return Vector.createFromPoint(self).norm", "def forward_vector(self):\n return pm.datatypes.Vector(0, 0, 1).rotateBy(self.rotation)", "def _orthogonal_vector(vector):\n return -1 * vector[1], vector[0]", "def vector(self):\n return self.__vector", "def normalized(self):\n length = self.length\n if length != 0:\n return self/length\n return Vec2d(self)", "def vector_potential(self, xyz):\n r = self.distance(xyz)\n a = (\n (self.current * self.length) / (4*np.pi*r) *\n np.exp(-i*self.wavenumber*r)\n )\n a = np.kron(np.ones(1, 3), np.atleast_2d(a).T)\n return self.dot_orientation(a)", "def vector(self, x):\n if isinstance(x, tuple):\n index = self.tuple_to_index[x]\n elif isinstance(x, str):\n index = self.string_to_index[x]\n else:\n index = x\n\n return self.vectors[index]", "def _vector_direction(self,\n desired_pos : np.array,\n actual_pos : np.array) -> np.array:\n return desired_pos - actual_pos", "def normalize(self): # Function is fucked TODO\n l = self.length()\n for i in range(0, len(self.coords)):\n self.coords[i] /= l\n return self\n # return Vector(list([0 for i in range(len(v.coords))]))\n\n # if round(self.length() == 0):\n # s = 1 / self.length()\n # return self * s\n # else:\n # return Vector(list([0 for i in range(len(v.coords))]))", "def vecnorm(X) :\n\tXtemp = X - np.min(X)\n\tXnorm = Xtemp * 2 / np.max(Xtemp) - 1\n\treturn Xnorm", "def vec_node(self):\r\n\r\n xv = np.arange(self.ox, self.lx + self.ox + self.dx, self.dx)\r\n yv = np.arange(self.oy, self.ly + self.oy + self.dy, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz + self.dz, self.dz)\r\n\r\n return xv, yv, zv", "def xyz(self) -> np.ndarray:\n return self._vector[0:3]" ]
[ "0.78788394", "0.7592913", "0.7465866", "0.7433125", "0.7410085", "0.7399616", "0.73799765", "0.73799765", "0.73799765", "0.73718464", "0.7347905", "0.7326668", "0.7326668", "0.7326668", "0.7326668", "0.7326668", "0.7326668", "0.7326668", "0.7326668", "0.7326668", "0.7326668", "0.7326668", "0.7304995", "0.7253441", "0.724526", "0.72306746", "0.7142431", "0.7123682", "0.70919037", "0.70246816", "0.6961308", "0.6928419", "0.6924768", "0.69175875", "0.68712825", "0.68361527", "0.67989206", "0.67978895", "0.6793216", "0.67806375", "0.67692196", "0.6761085", "0.6749377", "0.67468166", "0.6744784", "0.6721709", "0.67185843", "0.67106986", "0.65890986", "0.65634346", "0.6551626", "0.65054864", "0.6496581", "0.6487658", "0.6463961", "0.6460287", "0.6450805", "0.6436534", "0.6435501", "0.64354223", "0.64074445", "0.6399476", "0.6384018", "0.635621", "0.635449", "0.6343336", "0.6300882", "0.6279319", "0.6258929", "0.6237642", "0.6234174", "0.62215513", "0.6221533", "0.62051755", "0.6203018", "0.61845845", "0.6176514", "0.6161042", "0.6138931", "0.61357176", "0.61261755", "0.61199385", "0.61095816", "0.6102483", "0.6102", "0.6069168", "0.60679", "0.60597974", "0.6054049", "0.6053837", "0.6051283", "0.60467625", "0.603714", "0.60263026", "0.59996825", "0.59842944", "0.59819853", "0.598163", "0.5971506", "0.59702885" ]
0.6536953
51
Generate Brauer states on 4 qubits.
def test_brauer_2_dim_2_pval(): expected_res = np.array( [ [1, 1, 1], [0, 0, 0], [0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0], [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 0, 0], [1, 1, 1], ] ) res = brauer(2, 2) bool_mat = np.isclose(res, expected_res) np.testing.assert_equal(np.all(bool_mat), True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_qubits(self):\n return cq.LineQubit.range(4)", "def make_QN(Jmin, Jmax, I1, I2):\n QN = [UncoupledBasisState(J,mJ,I1,m1,I2,m2)\n for J in np.arange(Jmin, Jmax+1)\n for mJ in np.arange(-J,J+1)\n for m1 in np.arange(-I1,I1+1)\n for m2 in np.arange(-I2,I2+1)\n ]\n \n return QN", "def state_prepare(q: List[QRegStorage], i: int):\n\n RX(0.1)(q[i])\n RZ(0.4)(q[i + 1])\n CX(q[i], q[i + 1])\n RY(0.8)(q[i])\n RZ(1.2)(q[i])", "def test_rb(self):\n\n # Load simulator\n backend = qiskit.Aer.get_backend('qasm_simulator')\n\n # Test up to 2 qubits\n nq_list = [1, 2]\n\n for nq in nq_list:\n\n print(\"Testing %d qubit RB\" % nq)\n\n for pattern_type in range(2):\n for multiplier_type in range(2):\n # See documentation of choose_pattern for the meaning of\n # the different pattern types\n\n rb_opts = {}\n rb_opts['nseeds'] = 3\n rb_opts['length_vector'] = [1, 3, 4, 7]\n rb_opts['rb_pattern'] = self.choose_pattern(\n pattern_type, nq)\n # if the pattern type is not relevant for nq\n if rb_opts['rb_pattern'] is None:\n continue\n rb_opts['length_multiplier'] = self.choose_multiplier(\n multiplier_type, len(rb_opts['rb_pattern']))\n\n # Generate the sequences\n try:\n rb_circs, _ = rb.randomized_benchmarking_seq(**rb_opts)\n except OSError:\n skip_msg = ('Skipping tests for %s qubits because '\n 'tables are missing' % str(nq))\n print(skip_msg)\n continue\n\n # Perform an ideal execution on the generated sequences\n # basis_gates = ['u1','u2','u3','cx'] # use U, CX for now\n # Shelly: changed format to fit qiskit current version\n basis_gates = 'u1, u2, u3, cx'\n shots = 100\n result = []\n for seed in range(rb_opts['nseeds']):\n result.append(\n qiskit.execute(rb_circs[seed], backend=backend,\n basis_gates=basis_gates,\n shots=shots).result())\n\n # Verify the generated sequences\n for seed in range(rb_opts['nseeds']):\n length_vec = rb_opts['length_vector']\n for circ_index, vec_len in enumerate(length_vec):\n\n self.assertEqual(\n rb_circs[seed][circ_index].name,\n 'rb_seed_%s_length_%s' % (\n str(seed), str(vec_len)),\n 'Error: incorrect circuit name')\n self.verify_circuit(rb_circs[seed][circ_index],\n nq, rb_opts,\n vec_len, result[seed], shots)\n\n self.assertEqual(circ_index, len(rb_circs),\n \"Error: additional circuits exist\")", "def prepare_states(self):\n # Prepare random states\n multiple_quarter_pi = [(i * pi) / 4 for i in range(8)]\n zero_or_pi = [0, pi]\n states = []\n for pos in self.__client_knowledge.keys():\n rand_ang = random.choice(multiple_quarter_pi).item()\n rand_flip = random.choice(zero_or_pi).item()\n self.__client_knowledge[pos].set_rotation_encryption_angle(rand_ang)\n self.__client_knowledge[pos].set_flipping_encryption_angle(rand_flip)\n states.append(matmul(rotation_gate('z', rand_ang), plus_state()))\n\n return states", "def qtc2state(self, qtc):\n \n state_rep = []\n for idx, element in enumerate(qtc):\n# val_qtc = validateQtcSequences(element)\n d = element.shape[1]\n mult = 3**np.arange(d-1, -1, -1)\n state_num = np.append(\n 0,\n ((element + 1)*np.tile(mult, (element.shape[0], 1))).sum(axis=1) + 1\n )\n state_num = np.append(state_num, 82)\n state_char = ''\n for n in state_num:\n state_char += chr(int(n)+32)\n state_rep.append(state_num.tolist())\n \n return state_rep", "def test_reconstruct_bell_state(self):\n wires = range(2)\n\n dev = qml.device(\"default.qubit\", wires=wires, shots=10000)\n\n @qml.qnode(dev)\n def qnode(n_wires):\n qml.Hadamard(0)\n qml.CNOT(wires=[0, 1])\n return qml.classical_shadow(wires=range(n_wires))\n\n # should prepare the bell state\n bits, recipes = qnode(2)\n shadow = ClassicalShadow(bits, recipes)\n global_snapshots = shadow.global_snapshots()\n\n state = np.sum(global_snapshots, axis=0) / shadow.snapshots\n bell_state = np.array([[0.5, 0, 0, 0.5], [0, 0, 0, 0], [0, 0, 0, 0], [0.5, 0, 0, 0.5]])\n assert qml.math.allclose(state, bell_state, atol=1e-1)\n\n # reduced state should yield maximally mixed state\n local_snapshots = shadow.local_snapshots(wires=[0])\n assert qml.math.allclose(np.mean(local_snapshots, axis=0)[0], 0.5 * np.eye(2), atol=1e-1)\n\n # alternative computation\n bits, recipes = qnode(1)\n shadow = ClassicalShadow(bits, recipes)\n global_snapshots = shadow.global_snapshots()\n local_snapshots = shadow.local_snapshots(wires=[0])\n\n state = np.sum(global_snapshots, axis=0) / shadow.snapshots\n assert qml.math.allclose(state, 0.5 * np.eye(2), atol=1e-1)\n assert np.all(local_snapshots[:, 0] == global_snapshots)", "def test_statevector(self):\n \n qubits = QubitPlaceholder.register(3)\n program = Program()\n program += H(qubits[0])\n program += X(qubits[2])\n program += CNOT(qubits[0], qubits[1])\n\n measurement = program.declare(\"ro\", \"BIT\", 3)\n for i in range(0, 3):\n program += MEASURE(qubits[i], measurement[i])\n\n assigned_program = address_qubits(program) \n simulator = WavefunctionSimulator()\n statevector = simulator.wavefunction(assigned_program)\n print(statevector.amplitudes)", "def _generate_qubits(self) -> Sequence[cirq.Qid]:\n return cirq.LineQubit.range(openfermion.count_qubits(self.hamiltonian))", "def xpotts_states(n, k):\n\n assert n>0, \"n cannot be <0\"\n assert k>=2, \"k cannot be <2\"\n \n for i in range(k**n):\n state = base_repr(i, k)\n yield ['0']*(n-len(state)) + state", "def construct_qcbm(circuit, n_qubits, depth):\n\n for d in range(depth):\n for i in range(n_qubits):\n circuit.append_gate(Gate('X', target = i, angle = np.random.random()*np.pi*2))\n circuit.append_gate(Gate('Z', target = i, angle = np.random.random()*np.pi*2))\n if n_qubits != 1:\n for i in range(n_qubits):\n circuit.append_gate(Gate('CNOT', control = i, target = (i+1)%n_qubits))\n return circuit", "def make_state_appliable_4ch(state):\n size = len(state)\n st_appl = np.zeros((size,)*4, dtype=complex)\n for p1 in range(size):\n for p2 in range(size):\n for p3 in range(size):\n for p4 in range(size):\n st_appl[p1, p2, p3, p4] = state[p1, p2, p3, p4] * sqrt(factorial(p1) * factorial(p2) * factorial(p3) * factorial(p4))\n return st_appl", "def next_state(self):\n \n self.state = np.random.choice(['checkout', 'dairy', 'drinks', 'fruit', 'spices'], p=self.tr_array_dict[f'{self.state}'])", "def noisy_encode_zero(self, qubits: List[Union[QubitPlaceholder, int]]) -> Program:\n n, r_1, r_2 = self.n, self.r_1, self.r_2\n\n # We are starting with all qubits in the |0> state, meaning they are stabilised by\n # Z_1, Z_2, ..., Z_n. We want to do a unitary transformation to a state stabilised by the\n # code stabilisers along with the stabilisers for the logical 0 state. In general, if a\n # state |ᴪ> is stabilised by S, then U|ᴪ> is stabilised by USU†. We can perform Clifford\n # gate operations to transform the stabiliser set. For details see Nielsen & Chuang\n # section 10.5.2 and Problem 10.3. Also, see Appendix A of \"Fault-tolerant Preparation of\n # Stabilizer States for Quantum CSS Codes by ClassicalError-Correcting Codes.\"\n #\n # The idea is that we want to transform the parity check matrix from\n #\n # [[ 0 0 0 | I1 0 0 ], [[ I1 A1 A2 | 0 0 0 ],\n # [ 0 0 0 | 0 I2 0 ], => [ 0 0 0 | D I2 E ],\n # [ 0 0 0 | 0 0 I3 ]] [ 0 0 0 | A2T 0 I3 ]]\n #\n # Transformations to manipulate the parity check are derived from Figure 10.7 in\n # Nielsen & Chuang which shows how Pauli operators behave under conjugation by Clifford\n # operators.\n\n # The program accumulates the actual operations on the qubits.\n prog = Program()\n\n # Step 0: Copy Z's from I3 to E. Post-state:\n #\n # [[ I1 0 0 | 0 0 0 ],\n # [ 0 0 0 | 0 I2 E ],\n # [ 0 0 0 | 0 0 I3 ]]\n #\n # This is just a multiplication of stabilisers and does not require any instructions.\n\n # Step 1: Apply Hadamards to move I1 to the X side. Post-state:\n #\n # [[ I1 0 0 | 0 0 0 ],\n # [ 0 0 0 | 0 I2 E ],\n # [ 0 0 0 | 0 0 I3 ]]\n for i in range(r_1):\n prog += gates.H(qubits[i])\n\n # Step 2: Copy X's from I1 to A1 and A2. This has the side effect of constructing D and A2T.\n # Post-state:\n #\n # [[ I1 A1 A2 | 0 0 0 ],\n # [ 0 0 0 | D I2 E ],\n # [ 0 0 0 | A2T 0 I3 ]]\n for i in range(r_1):\n for j in range(r_1, n):\n if self.parity_check_c1[i, j] == 1:\n prog += gates.CNOT(qubits[i], qubits[j])\n\n return prog", "def test_swap_gate_deterministic_waltz_basis_gates(self):\n shots = 100\n circuits = ref_2q_clifford.swap_gate_circuits_deterministic(final_measure=True)\n targets = ref_2q_clifford.swap_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def quantum_net(self, q_input_features, q_weights_flat):\n\n # Reshape weights\n q_weights = q_weights_flat.reshape(self.args.q_depth, self.args.n_qubits, 3)\n\n # Start from state |+> , unbiased w.r.t. |0> and |1>\n # Amplitude encoding\n qml.QubitStateVector(q_input_features, wires=list(range(self.args.n_qubits)))\n \n # Sequence of trainable variational layers\n for k in range(self.args.q_depth):\n self.entangling_layer(self.args.n_qubits)\n self.Rot_layer(q_weights[k])\n\n # Expectation values in the Z basis\n exp_vals = [qml.expval(qml.PauliZ(position)) for position in range(self.args.target_class)]\n return tuple(exp_vals)", "def _build_gains(self):\n flip_nibbles = True\n\n p = (100, 4, flip_nibbles)\n d = (1000, 4, flip_nibbles)\n i = (0, 4, flip_nibbles)\n il = (0, 4, flip_nibbles)\n ol = (255, 2)\n cl = (0, 2)\n el = (4000, 4, flip_nibbles)\n sr = (1, 2)\n db = (1, 2)\n sm = (1, 2)\n gains = self._build_hexstr(p, d, i, il, ol, cl, el, sr, db, sm)\n return \"F6{}\".format(gains)", "def split_state(U):\n return U[0], U[1], U[2], U[3], U[4], U[5], r(U)", "def __init__(self, states, symbols, stacksymbols, acceptableStates, q0, z0, transitions):\n super(PushdownAutomaton, self).__init__(states, symbols, acceptableStates, q0, transitions)\n self.stacksymbols = stacksymbols\n self.z0 = z0\n self.stack = []\n self.simulationLog = \"\"\n self.writer = Writer(self)", "def plus_state(n_qubits):\n return np.array([1]*(2**n_qubits))/np.sqrt(2**n_qubits)", "def initial_state() -> Board:\n board = (\"rnbqkbnr\", \"pppppppp\", \"........\", \"........\", \"........\",\n \"........\", \"PPPPPPPP\", \"RNBQKBNR\")\n\n return board", "def generate_state():\n\n\t\tprobs = calc_probs(env)\n\t\tn_options = len(probs)\n\n\t\t# feedback for agent\n\t\tr_mag = np.zeros(n_options) + rmag\n\t\tl_mag = np.zeros(n_options) + lmag\n\n\t\tnew_state = Bogacz(n_trials, n_options, probs, r_mag, l_mag, V0=V0)\n\t\treturn new_state", "def test_four_qubit_random_circuit(self, device, tol):\n n_wires = 4\n dev = device(n_wires)\n dev_def = qml.device(\"default.qubit\", wires=n_wires)\n\n if dev.name == dev_def.name:\n pytest.skip(\"Device is default.qubit.\")\n\n if dev.shots is not None:\n pytest.skip(\"Device is in non-analytical mode.\")\n\n gates = [\n qml.PauliX(wires=0),\n qml.PauliY(wires=1),\n qml.PauliZ(wires=2),\n qml.S(wires=3),\n qml.T(wires=0),\n qml.RX(2.3, wires=1),\n qml.RY(1.3, wires=2),\n qml.RZ(3.3, wires=3),\n qml.Hadamard(wires=0),\n qml.Rot(0.1, 0.2, 0.3, wires=1),\n qml.CRot(0.1, 0.2, 0.3, wires=[2, 3]),\n qml.Toffoli(wires=[0, 1, 2]),\n qml.SWAP(wires=[1, 2]),\n qml.CSWAP(wires=[1, 2, 3]),\n qml.U1(1.0, wires=0),\n qml.U2(1.0, 2.0, wires=2),\n qml.U3(1.0, 2.0, 3.0, wires=3),\n qml.CRX(0.1, wires=[1, 2]),\n qml.CRY(0.2, wires=[2, 3]),\n qml.CRZ(0.3, wires=[3, 1]),\n ]\n\n layers = 3\n np.random.seed(1967)\n gates_per_layers = [np.random.permutation(gates).numpy() for _ in range(layers)]\n\n def circuit():\n \"\"\"4-qubit circuit with layers of randomly selected gates and random connections for\n multi-qubit gates.\"\"\"\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))\n\n qnode_def = qml.QNode(circuit, dev_def)\n qnode = qml.QNode(circuit, dev)\n\n assert np.allclose(qnode(), qnode_def(), atol=tol(dev.shots))", "def gen_14BQ_OH():\r\n q_smiles_base = {}\r\n q_smiles_mid = {}\r\n q_smiles_base['1,4-BQ,2-OH'] = '[H]OC1=C([H])C(=O)C([H])=C([H])C1=O'\r\n q_smiles_base['1,4-BQ,Full-OH'] = 'OC1=C(O)C(=O)C(O)=C(O)C1=O'\r\n q_smiles_base['1,4-BQ'] = 'O=C1C=CC(=O)C=C1'\r\n\r\n q_smiles_mid['1,4-BQ'] = 'O=C1C=CC(=O)C=C1'\r\n q_smiles_mid['1,4-BQ,2-OH'] = 'OC1=CC(=O)C=CC1=O'\r\n q_smiles_mid['1,4-BQ,2,3-OH'] = 'OC1=C(O)C(=O)C=CC1=O'\r\n q_smiles_mid['1,4-BQ,2,3,5-OH'] = 'OC1=CC(=O)C(O)=C(O)C1=O'\r\n q_smiles_mid['1,4-BQ,Full-OH'] = 'OC1=C(O)C(=O)C(O)=C(O)C1=O' \r\n\r\n return q_smiles_base, q_smiles_mid", "def generate_cubes(num_states, distance, startstate='RRRRRRRRRBBBBBBBBBOOOOOOOOOGGGGGGGGGWWWWWWWWWYYYYYYYYY'):\n terminal_states = {'RRRRRRRRRYYYYYYYYYOOOOOOOOOWWWWWWWWWBBBBBBBBBGGGGGGGGG',\n 'BBBBBBBBBYYYYYYYYYGGGGGGGGGWWWWWWWWWOOOOOOOOORRRRRRRRR',\n 'YYYYYYYYYBBBBBBBBBWWWWWWWWWGGGGGGGGGRRRRRRRRROOOOOOOOO',\n 'BBBBBBBBBWWWWWWWWWGGGGGGGGGYYYYYYYYYRRRRRRRRROOOOOOOOO',\n 'OOOOOOOOOGGGGGGGGGRRRRRRRRRBBBBBBBBBWWWWWWWWWYYYYYYYYY',\n 'GGGGGGGGGOOOOOOOOOBBBBBBBBBRRRRRRRRRYYYYYYYYYWWWWWWWWW',\n 'GGGGGGGGGRRRRRRRRRBBBBBBBBBOOOOOOOOOWWWWWWWWWYYYYYYYYY',\n 'GGGGGGGGGYYYYYYYYYBBBBBBBBBWWWWWWWWWRRRRRRRRROOOOOOOOO',\n 'RRRRRRRRRWWWWWWWWWOOOOOOOOOYYYYYYYYYGGGGGGGGGBBBBBBBBB',\n 'OOOOOOOOOWWWWWWWWWRRRRRRRRRYYYYYYYYYBBBBBBBBBGGGGGGGGG',\n 'WWWWWWWWWRRRRRRRRRYYYYYYYYYOOOOOOOOOBBBBBBBBBGGGGGGGGG',\n 'BBBBBBBBBOOOOOOOOOGGGGGGGGGRRRRRRRRRWWWWWWWWWYYYYYYYYY',\n 'BBBBBBBBBRRRRRRRRRGGGGGGGGGOOOOOOOOOYYYYYYYYYWWWWWWWWW',\n 'RRRRRRRRRGGGGGGGGGOOOOOOOOOBBBBBBBBBYYYYYYYYYWWWWWWWWW',\n 'YYYYYYYYYRRRRRRRRRWWWWWWWWWOOOOOOOOOGGGGGGGGGBBBBBBBBB',\n 'YYYYYYYYYOOOOOOOOOWWWWWWWWWRRRRRRRRRBBBBBBBBBGGGGGGGGG',\n 'GGGGGGGGGWWWWWWWWWBBBBBBBBBYYYYYYYYYOOOOOOOOORRRRRRRRR',\n 'WWWWWWWWWGGGGGGGGGYYYYYYYYYBBBBBBBBBRRRRRRRRROOOOOOOOO',\n 'OOOOOOOOOYYYYYYYYYRRRRRRRRRWWWWWWWWWGGGGGGGGGBBBBBBBBB',\n 'RRRRRRRRRBBBBBBBBBOOOOOOOOOGGGGGGGGGWWWWWWWWWYYYYYYYYY',\n 'WWWWWWWWWBBBBBBBBBYYYYYYYYYGGGGGGGGGOOOOOOOOORRRRRRRRR',\n 'WWWWWWWWWOOOOOOOOOYYYYYYYYYRRRRRRRRRGGGGGGGGGBBBBBBBBB',\n 'OOOOOOOOOBBBBBBBBBRRRRRRRRRGGGGGGGGGYYYYYYYYYWWWWWWWWW',\n 'YYYYYYYYYGGGGGGGGGWWWWWWWWWBBBBBBBBBOOOOOOOOORRRRRRRRR'}\n states = []\n while len(states) < num_states:\n x = RubiksCubeOld(startstate)\n for j in range(distance):\n x.apply_move(np.random.randint(0,18))\n newstate = x.get_state()\n if newstate not in terminal_states: states.append(newstate)\n states = list(set(states))\n\n return states", "def buildB(self, debug=False):\n B = np.zeros([len(self.sta), len(self.inp)])\n i = 0\n for ikey, state in self.sta.items():\n j = 0\n c = state.c\n for jkey, input in self.inp.items():\n if debug:\n print(i, ' ', j)\n if isinstance(input, InputT) and input in self.rc.adj[state]:\n # input is temperature and connected to this state\n\n B[i, j] = self.rc.adj[state][input]['H'] / c\n elif isinstance(input, InputQ) and input in self.rc.adj[\n state]: # input is heat flow and connected to state\n B[i, j] = self.rc.adj[state][input]['gain'] / c\n j += 1\n i += 1\n\n return B, list(self.sta.keys()), list(self.inp.keys())", "def test_swap_gate_nondeterministic_waltz_basis_gates(self):\n shots = 2000\n circuits = ref_2q_clifford.swap_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_2q_clifford.swap_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def initQ(self,states,actions):\r\n \r\n Q = {}\r\n \r\n for a in actions:\r\n for s in states:\r\n Q[(s,a)] = random.randrange(100)\r\n \r\n return(Q)", "def sampleBracketsPowerModel(nSamples):\n\tbrackets = []\n\tfor sampleIndex in range(nSamples):\n\t\tbracket = []\n\t\tregionWinners = np.zeros(4)\n\t\tfor regionIndex in range(4):\n\t\t\tregionVector, regionWinners[regionIndex] = sampleRegionPowerModel()\n\t\t\tbracket += regionVector\n\t\t# 2. Select outcomes of F4/NCG games (Rounds 5, 6)\n\t\tteam0 = {'seed': regionWinners[0], 'region': 0}\n\t\tteam1 = {'seed': regionWinners[1], 'region': 1}\n\t\tteam2 = {'seed': regionWinners[2], 'region': 2}\n\t\tteam3 = {'seed': regionWinners[3], 'region': 3}\n\t\twinProb1 = getWinProbability(team0, team1, r=5)\n\t\twinProb2 = getWinProbability(team2, team3, r=5)\n\t\tf4Result1 = 1 if random.random() < winProb1 else 0\n\t\tf4Result2 = 1 if random.random() < winProb2 else 0\n\t\tbracket.append(f4Result1)\n\t\tbracket.append(f4Result2)\n\t\tncgSeeds = applyRoundResults(regionWinners, [f4Result1, f4Result2])\n\n\t\t# NCG\n\t\tncgTeam1 = {'seed': ncgSeeds[0], 'region': -1}\n\t\tncgTeam2 = {'seed': ncgSeeds[1], 'region': -1}\n\t\twinProb = getWinProbability(ncgTeam1, ncgTeam2, r=6)\n\t\tncgResult = 1 if random.random() < winProb else 0\n\t\tbracket.append(ncgResult)\n\t\tbrackets.append(bracket)\n\treturn brackets", "def magic_sample(self, ys):\n\n #for each non-zero element in y\n #we want to multiply the initial state by HGate(i) SGate(i) HGate(i)\n #this turns out to be equivalent to multiplying the whole final state by\n #U H_k S_k H_k U^\\dagger\n #but H_k S_k H_k = e^{i\\pi/4} \\frac{1}{\\sqrt{2}} (I -i X_k)\n #so now we evolve identity forward by U (trivial)\n #and evolve X_k forward by U (using the AGState)\n #then we have to send the resulting Pauli through UC and UH\n #giving a third Pauli\n #then the state is of the form (we^{i\\pi/4}) UC UH (I + i^d P)/sqrt(2) |s>\n #then we apply Bravyi et al's prop. 4 to turn this into a new ch form\n \n\n chCopy = deepcopy(self.chState) #we update this copy as we go\n\n for i, y in enumerate(ys):\n if y:\n #we want to know what U_c^\\dagger U X_i U^\\dagger U_c is\n #firstly we use the A-G info\n # U X_i U^\\dagger is the i'th destabiliser\n x = self.agState.x[self.n+i]\n z = self.agState.z[self.n+i]\n r = self.agState.r[self.n+i]\n\n #print(x,z,r)\n x_col = np.array([x]).T\n z_col = np.array([z]).T\n \n #now we apply U_c to this using the CH-form info\n x_mat = chCopy.F * x_col\n z_mat = (chCopy.M * x_col + chCopy.G*z_col) % np.uint8(2)\n r = (r + util.sort_pauli_string(x_mat, z_mat)) % np.uint8(2)\n\n u = (x @ chCopy.F) % np.uint8(2)\n h = (x @ chCopy.M + z @ chCopy.G) % np.uint8(2)\n\n g = (x @ (z + chCopy.g)) % np.uint8(4)\n\n #now U_c^dag U X_i U^dag U_C = (-1)^r i^g prod_j Z_j^{h_j} X_j^{u_j}\n #we want to conjugate this by U_H\n #everywhere chCopy.v == 1 we flip a z to an x and an x to a z\n #everywhere chCopy.v == 1 and u == 1 and h == 1 we need to swap the order of our x and z so we get a minus sign\n\n u2 = u*(np.uint8(1) ^ chCopy.v) ^ (h*chCopy.v)\n h2 = (u*chCopy.v) ^ (h*(np.uint8(1) ^ chCopy.v))\n\n r = (r + (u*h*chCopy.v).sum()) % np.uint8(2)\n \n \n #now U_H^dag U_c^dag U X_i U^dag U_C U_H = (-1)^r i^g prod_j Z_j^{h2_j} X_j^{u2_j}\n\n t = u2 ^ chCopy.s\n r = (r + h2 @ t) % np.uint8(2)\n\n #now we have w UC UH |s> = w (-1)^r (i)^g UC UH |t>\n\n if all(t == chCopy.s):\n chCopy.w *= np.exp(1j*np.pi/4) * (1 + (1j)**(g+2*r -1) )/ np.sqrt(2)\n else:\n phase, VCList, v, s = util.desuperpositionise(chCopy.s, t, (g+2*r -1)%np.uint8(4), chCopy.v)\n\n chCopy.w *= phase*np.exp(1j*np.pi/4)/np.sqrt(2)\n chCopy.v = v\n chCopy.s = s\n\n for gate in VCList:\n gate.rightMultiplyC(chCopy)\n \n return chCopy", "def solution(self) -> State:", "def test_x_gate_deterministic_waltz_basis_gates(self):\n shots = 100\n circuits = ref_1q_clifford.x_gate_circuits_deterministic(final_measure=True)\n targets = ref_1q_clifford.x_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def make_queues(self):\n for age in range(self.MIN_AGE, self.MAX_AGE+self.BIN_SIZE, self.BIN_SIZE):\n self.make_n_queues(self.SEXES)", "def _one_q_state_prep(oneq_state: _OneQState):\n label = oneq_state.label\n if label == 'SIC':\n return _one_q_sic_prep(oneq_state.index, oneq_state.qubit)\n elif label in ['X', 'Y', 'Z']:\n return _one_q_pauli_prep(label, oneq_state.index, oneq_state.qubit)\n else:\n raise ValueError(f\"Bad state label: {label}\")", "def call(self, states):\n # TODO: implement this ~\n l1 = tf.nn.relu(self.Q_1(states))\n l2 = tf.nn.relu(self.Q_2(l1))\n qVals = self.Q_3(l2)\n return qVals\n # return tf.argmax(qVals, 1)", "def create_iterables(self):\n iterables = [[0, 1], [0, 1], [0, 1],\n [0, 1], [0, 1], [0, 1],\n [0, 1], [0, 1], [0, 1],\n [0, 1], [0, 1]]\n self.states = []\n for t in itertools.product(*iterables):\n self.states.append(t)", "def unitary_builder(qubit_register, circuit): \n \n no_of_qubits = math.log(next(x for x in qubit_register.shape if x != 1), 2)\n qubit_ordering = []\n operations_in_slice = []\n operation_list = None\n for slice in circuit:\n for step in slice[\"operations\"]:\n qubit_ordering.extend(step[1])\n operations_in_slice.extend([step[0]])\n identity_operation_count = int(no_of_qubits - len(qubit_ordering))\n operations_in_slice.extend([qeye(2)] * identity_operation_count)\n qubit_ordering.extend([x for x in range(int(no_of_qubits)) if x not in qubit_ordering])\n operation_slice = tensor(operations_in_slice).permute(qubit_ordering)\n if operation_list is None:\n operation_list = [operation_slice]\n else:\n operation_list.extend([operation_slice])\n qubit_ordering = []\n operations_in_slice = [] \n \n circuit_unitary = reduce((lambda x, y: x * y), operation_list)\n \n return circuit_unitary", "def _make_circuit_instructions(n_qubits, depth, type_circuit):\n\n if type_circuit in [0, 1, 2]:\n\n # if type_circuit == 1:\n # if depth > 8:\n # raise ValueError(\n # \"For type-1 circuits, only at most depth=8 allowed!\"\n # )\n\n # define rotations for circuit in each layer, 0: identity, 1:X, 2:Y 3:Z\n ini_pauli = np.zeros([depth, n_qubits], dtype=int)\n\n # set first and second layer, rest comes later\n ini_pauli[0, :] = 2 # y rotation\n if depth > 1:\n ini_pauli[1, :] = 3 # z rotation\n\n # construct natural parameterized circuit\n # gives which type of entangling gates at each layer -- first entry is\n # first qubit index, second is second qubit index, third entry is type\n # of entangling gate\n entangling_gate_index_list = [[] for i in range(depth)]\n orderList = []\n for i in range(n_qubits//2):\n if i % 2 == 0:\n orderList.append(i//2)\n else:\n orderList.append((n_qubits-i)//2)\n\n if n_qubits > 1:\n shiftList = [orderList[0]]\n else:\n shiftList = []\n for i in range(1, n_qubits//2):\n shiftList.append(orderList[i])\n shiftList += shiftList[:-1]\n\n # this list gives which entangling gates are applied in which layer\n if type_circuit == 0:\n # deep natural PQC, includes non-nearest neighbor gates\n for j in range(min(len(shiftList), int(np.ceil(depth/2))-1)):\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n elif type_circuit == 1:\n # only do 2 entangling layers at max, and only do gates with\n # nearest neighbor and no ring\n for j in range(min(len(shiftList), 3)):\n if j == 0:\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n elif (j == 1 or j == 2):\n # exclude ring gate and gate 0,1 on third entangling layer\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(1, n_qubits//2)\n ]\n\n elif type_circuit == 2:\n # only do 3 regular entangling layers in a ring topology, then two\n # more phase gates with next-nearst neighbor, which requires one\n # swap. This adds 4 more parameters\n for j in range(min(len(shiftList), 3)):\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n # entangling_gate_index_list[1+2*3]=[[0,n_qubits-1,1],[0,1,0],[n_qubits-1,n_qubits-2,0]]\n # entangling_gate_index_list[1+2*3]=[[0,n_qubits-1,1],[0,1,0],[n_qubits-1,n_qubits-2,0]]\n entangling_gate_index_list[1+2*3] = [\n [n_qubits-1, 1, 0],\n [0, n_qubits-2, 0]\n ]\n\n for i in range(len(entangling_gate_index_list)-1):\n if len(entangling_gate_index_list[i]) > 0:\n for j in range(len(entangling_gate_index_list[i])):\n qubit_index = entangling_gate_index_list[i][j][0]\n ini_pauli[i+1, qubit_index] = 2\n if i+2 < depth:\n ini_pauli[i+2, qubit_index] = 3\n\n elif type_circuit == 3:\n\n ini_pauli = np.ones([depth, n_qubits], dtype=int)*2\n\n for i in range(1, depth, 2):\n ini_pauli[i, :] = 3\n\n if n_qubits % 2 == 0:\n # even qubits ALT circuit needs to get rid of boundary rotations at\n # even entangling layers\n for i in range(4, depth, 4):\n ini_pauli[i, 0] = 0\n ini_pauli[i, -1] = 0\n if i+1 < depth:\n ini_pauli[i+1, 0] = 0\n ini_pauli[i+1, -1] = 0\n else:\n # for odd qubits, get rid of boundary either on top or bottom qubit\n for i in range(2, depth, 4):\n ini_pauli[i, -1] = 0\n if i+1 < depth:\n ini_pauli[i+1, -1] = 0\n for i in range(4, depth, 4):\n ini_pauli[i, 0] = 0\n if i+1 < depth:\n ini_pauli[i+1, 0] = 0\n\n # CNOT entangling gates\n entangling_gate_index_list = [[] for i in range(depth)]\n counter = 0\n # third index indicates type of entangling gate\n for k in range(1, depth-1, 2):\n\n # place entangler every second layer, do not place any at last\n if counter % 2 == 0:\n # even layer\n entangling_gate_index_list[k] = [\n [2*j, 2*j+1, 1] for j in range(n_qubits//2)\n ]\n else:\n # odd layer\n entangling_gate_index_list[k] = [\n [2*j+1, 2*j+2, 1] for j in range((n_qubits-1)//2)\n ]\n counter += 1\n\n else:\n raise ValueError('type_circuit='+f'{type_circuit}'+' not recognised.')\n\n return ini_pauli, entangling_gate_index_list", "def generatePossibleBrackets(M):\n\t# TODO: I bet it's actually more likely that some 8 vs 9 games flip \n\t# than the F4 seeds change. (Confirmed by region vector experiments)\n\n\t# Strings for regions, indexed by seed winning region. \n\t# All other games are pick favorite\n\tregionStrings = [None, \n\t\t'111111111000101', # seed 1 wins (over seed 2)\n\t\t'111111111000100', # 2 beats 1\n\t\t'111111111000110', # 3 beats 1\n\t\t'111111111000001'] # 4 beats 2\n\n\t# Final Four seed options to test\n\tfinalFourPossibilities = ['1111', \n\t\t'1112', '1121', '1211', '2111', \n\t\t'1113', '1131', '1311', '3111', \n\t\t'1122', '1212', '1221', '2112', '2121', '2211', \n\t\t'1114', '1141', '1411', '4111', \n\t\t'1123', '1132', '1312', '1213', \n\t\t'1231', '1321', '2113', '3112', \n\t\t'2131', '3121', '2311', '3211'\n\t\t]\n\n\tbrackets = []\n\tfor finalFourSeeds in finalFourPossibilities:\n\t\tf4seeds = [int(finalFourSeeds[i]) for i in range(4)]\n\t\tbracket = ''\n\t\tfor region in range(4):\n\t\t\tbracket = bracket + regionStrings[f4seeds[region]]\n\n\t\t# Assume favorites win in rounds 5 and 6\n\t\t# f4Game1 = '1' if f4seeds[0] <= f4seeds[1] else '0'\n\t\t# bracket.append(f4Game1)\n\t\t# f4Game2 = '1' if f4seeds[2] <= f4seeds[3] else '0'\n\t\t# bracket.append(f4Game2)\n\t\t# ncg = '1' if np.min([f4seeds[0], f4seeds[1]]) <= np.min([f4seeds[2], f4seeds[3]]) else '0'\n\t\t# bracket.append(ncg)\n\n\t\t# Try all possible final 3 bits\n\t\tfor i in range(8):\n\t\t\tlast3String = '{0:03b}'.format(i)\n\t\t\tfullBracket = bracket + last3String\n\t\t\tbrackets.append(fullBracket)\n\n\treturn brackets", "def babble(self):\n stop = self.num_states - 1\n path = []\n state = np.argmax(np.random.multinomial(1, self.chain[:,0]))\n\n # Begin at the start state and end at the stop state.\n while state != stop:\n path.append(self.states[state])\n state = np.argmax(np.random.multinomial(1, self.chain[:,state]))\n\n return \" \".join(path)", "def get_reward_states(self):\n state1 = State(7, 7)\n return [state1]", "def operations(self, qubits: Sequence[cirq.Qid]) -> cirq.OP_TREE:\n # TODO implement asymmetric ansatz\n\n param_set = set(self.params())\n\n # Change to the basis in which the one-body term is diagonal\n yield cirq.inverse(\n bogoliubov_transform(qubits, self.basis_change_matrix))\n\n for i in range(self.iterations):\n\n # Simulate one-body terms\n for p in range(len(qubits)):\n u_symbol = LetterWithSubscripts('U', p, i)\n if u_symbol in param_set:\n yield cirq.ZPowGate(exponent=u_symbol).on(qubits[p])\n\n # Rotate to the computational basis\n yield bogoliubov_transform(qubits, self.basis_change_matrix)\n\n # Simulate the two-body terms\n def two_body_interaction(p, q, a, b) -> cirq.OP_TREE:\n v_symbol = LetterWithSubscripts('V', p, q, i)\n if v_symbol in param_set:\n yield cirq.CZPowGate(exponent=v_symbol).on(a, b)\n yield swap_network(qubits, two_body_interaction)\n qubits = qubits[::-1]\n\n # Rotate back to the basis in which the one-body term is diagonal\n yield cirq.inverse(\n bogoliubov_transform(qubits, self.basis_change_matrix))\n\n # Simulate one-body terms again\n for p in range(len(qubits)):\n u_symbol = LetterWithSubscripts('U', p, i)\n if u_symbol in param_set:\n yield cirq.ZPowGate(exponent=u_symbol).on(qubits[p])\n\n # Rotate to the computational basis\n yield bogoliubov_transform(qubits, self.basis_change_matrix)", "def run_batch(bag):\n\n states = []\n\n for i in bag:\n a1 = i[0]\n a2 = i[1]\n a3 = i[2]\n a4 = i[3]\n a5 = i[4]\n prob = i[5]\n sheets = (a1 + a2 + a3 + a4 + a5)\n if sheets > 0:\n a1_prob = (float(a1) / sheets)\n a2_prob = (float(a2) / sheets)\n a3_prob = (float(a3) / sheets)\n a4_prob = (float(a4) / sheets)\n a5_prob = (float(a5) / sheets)\n if a1 > 0:\n out = a1_func(i)\n out[5] = (out[5] * a1_prob)\n states.append(out)\n if a2 > 0:\n out = a2_func(i)\n out[5] = (out[5] * a2_prob)\n states.append(out)\n if a3 > 0:\n out = a3_func(i)\n out[5] = (out[5] * a3_prob)\n states.append(out)\n if a4 > 0:\n out = a4_func(i)\n out[5] = (out[5] * a4_prob)\n states.append(out)\n if a5 > 0:\n out = a5_func(i)\n out[5] = (out[5] * a5_prob)\n states.append(out)\n if sheets == 1:\n global zero_probs\n zero_probs.append(prob)\n if sheets == 0:\n states.append([0, 1, 1, 1, 1, prob])\n return states", "def __init__(self, alpha, gamma, epsilon, num_states, actions):\n self.alpha = alpha\n self.gamma = gamma\n self.epsilon = epsilon \n self.num_states = num_states\n self.actions = actions\n #Q is a dictionary mapping (state, action) pairs to Q values\n self.Q = dict([((s, a), 0.0) for s in xrange(num_states) for a in actions])", "def to_z_basis_ops(self) -> Iterator[raw_types.Operation]:\n for qubit, pauli in self.items():\n yield clifford_gate.SingleQubitCliffordGate.from_single_map(\n {pauli: (pauli_gates.Z, False)})(qubit)", "def test_z_gate_deterministic_waltz_basis_gates(self):\n shots = 100\n circuits = ref_1q_clifford.z_gate_circuits_deterministic(final_measure=True)\n targets = ref_1q_clifford.z_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def createState(self):\n return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(30))", "def gen_states(basenm,sites,is_prefix):\n n=len(sites)\n\n # calculate max \n max=1\n for i in range(len(sites)):\n max=max*len(sites[i])\n\n vec_of_states=[]\n names=[]\n for i in range(max):\n j=i\n a=[]\n divisor=max/len(sites[0])\n for k in range(len(sites)):\n a.append(int(j/divisor))\n j=j%divisor\n try: \n divisor=divisor/len(sites[k+1])\n except IndexError: # on the last trip through the loop\n divisor=divisor/1 # yes I know, but this is formally what I'm doing \n vec_of_states.append(a)\n nm=basenm\n for i in range(n):\n if is_prefix[i]:\n nm=\"%s_%s\" % (sites[i][a[i]],nm) # prepend mUb0_ or whatever - note a[i] is not printed directly but used as a lookup \n else:\n nm=\"%s_%s\" % (nm, sites[i][a[i]]) # append _mUb0 or whatever - note a[i] is not printed directly but used as a lookup \n names.append(nm) \n return vec_of_states, names", "def _QConv(self, step, target, qubits):\n yield cirq.CZPowGate(exponent=self._get_new_param())(qubits[target], qubits[target+step])\n yield cirq.CXPowGate(exponent=self._get_new_param())(qubits[target], qubits[target+step])", "def test_set_up(self):\n s = State(substance=\"water\")\n s.up = Q_(1013250.0, \"J/kg\"), Q_(101325.0, \"Pa\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.up[0], Q_(1013250.0, \"J/kg\")) # type: ignore\n assert np.isclose(s.up[1], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore\n s.up = Q_(3013250.0, \"J/kg\"), Q_(101325.0, \"Pa\")\n assert np.isclose(s.T, Q_(700.9882316847855, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.up[0], Q_(3013250.0, \"J/kg\")) # type: ignore\n assert np.isclose(s.up[1], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.u, Q_(3013250, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(8623.283568815832, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(3.189303132125469, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(3336406.139862406, \"J/kg\")) # type: ignore\n assert s.x is None", "def run_two_qubit_zi_experiment():\n\n # Best achieved result. Goal is to get as close to +1 as possible.\n max_expect = 0.0\n\n # Perform experiments with randomly selected angles.\n for experiment in range(flags.FLAGS.experiments):\n # Pick random angles.\n for i in range(10):\n angles[i] = random.random() * 2.0 * math.pi\n\n # Construct and run the circuit.\n qc = circuit.qc('vqe')\n full_ansatz(qc)\n qc.z(0)\n\n # Measure the probablities as computed from the amplitudes.\n # We only do this once per experiment.\n p0, _ = qc.measure_bit(0, collapse=True)\n p1, _ = qc.measure_bit(1, collapse=True)\n\n # Simulate multiple measurements by sampling over the probabilities\n # to obtain a distribution of sampled states. The measurements above\n # are the probablities that a state would be found in the |0> state.\n # For each bit, we compare this probability against another random value r.\n # If the measured probability is < r, we pretend we've actually measured an\n # |0> state, else a |1> state. We do this via sample_state() on both qubits.\n #\n num_shots = flags.FLAGS.shots\n counts = [0] * 4\n for _ in range(num_shots):\n bit0 = qc.sample_state(p0)\n bit1 = qc.sample_state(p1)\n counts[bit1 * 2 + bit0] += 1\n\n # Compute the expectation value from samples measurements. Again,\n # |00> and |01> map to Eigenvalue +1\n # |10> and |11> map to Eigenvalue -1\n #\n # This is a bit of cheating. In this example we _know_ the\n # Eigenvalues and can therefore properly construct the expectation\n # value. I'd think in the general case it has to actually be\n # computed with <psi|H|psi>, which is still O(n^2).\n #\n expect = (counts[0] + counts[1] - counts[2] - counts[3]) / num_shots\n\n # Update and print currently best result.\n #\n if expect > max_expect:\n max_expect = expect\n print('Max expecation of H for experiment {:5d}: {:.4f} (target: 1.0)'.\n format(experiment, max_expect))\n print(' |00>: {}, |01>: {}, |10>: {}, |11>: {}'.format(\n counts[0], counts[1], counts[2], counts[3]))\n print(' ', end='')\n for i in range(10):\n print('{:.1f} '.format(angles[i] / 2 / math.pi * 360), end='')\n print()", "def test_t_gate_deterministic_waltz_basis_gates(self):\n shots = 100\n circuits = ref_non_clifford.t_gate_circuits_deterministic(final_measure=True)\n targets = ref_non_clifford.t_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def initialize_states(templates, number_of_templates, number_of_states=5):\n number_of_frames_in_each_state_for_each_template = []\n for i in xrange(number_of_templates):\n # get number_of_frames_in_each_state_for_each_template\n length = len(templates[i])\n small_number_of_elements_in_current_state = length / number_of_states # if length is 12,\n # then there are 3 states have 2 frames and 2 states have 3 frames,we call 2 small number and 3 big number\n number_of_big_number = length % number_of_states\n number_of_frames_in_each_state = [small_number_of_elements_in_current_state for j in \\\n xrange(number_of_states - number_of_big_number)]\n number_of_frames_in_each_state.extend \\\n ([small_number_of_elements_in_current_state + 1 for j in xrange(number_of_big_number)])\n number_of_frames_in_each_state_for_each_template.append(number_of_frames_in_each_state)\n # print number_of_frames_in_each_state_for_each_template\n return number_of_frames_in_each_state_for_each_template", "def circuit():\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))", "def test_state(\n size: Union[int, tuple],\n num_berries: int,\n number_steps: int,\n state_sizes: List[int] = [3, 5],\n) -> None:\n for state_size in state_sizes:\n game = Game(\n size,\n [0, 0],\n -1,\n 5,\n -5,\n 10,\n num_berries,\n berry_movement_probabilities=[0.5] * num_berries,\n state_size=state_size,\n )\n done = False\n i = 1\n print(f\"Beginning full board\\n{game.get_state(full=True)}\")\n print(f\"And the state\\n{game.get_state(state_size)}\")\n while not done and i < number_steps:\n action = random.choice(MOVEMENTS)\n print(f\"Action taken {action}\")\n state, reward, done = game.step(action)\n print(f\"Full board\\n{game.get_state(full=True)}\")\n print(f\"The state\\n{game.get_state(state_size)}\")\n i += 1", "def _generate_raw_environments(self, num, seed):", "def __call__(self, inputs, state):\n with tf.variable_scope(\"BayesLSTMCell\"):\n if self.w is None:\n\n# size = inputs.get_shape()[-1].value\n \n print ([\"------- Size input LSTM: \", inputs.shape])\n print ([\"------- Dim input specified \", self.X_dim])\n# print ([\"num units LSTM: \", self.num_units])\n \n self.w = VI.sample_posterior((self.X_dim + self.num_units, 4 * self.num_units),\n name=self.n + \"_weights\",\n prior=self.prior,\n is_training=self.is_training)\n \n self.b = VI.sample_posterior((4 * self.num_units, 1),\n name=self.n + \"_biases\",\n prior=self.prior,\n is_training=self.is_training)\n\n # Get the cell and hidden state from the previous cell [C_t-1, h_t-1]\n C_t_prev , h_t_prev = state\n #Vector concatenation of previous hidden state and embedded inputs\n concat_inputs_hidden = tf.concat([inputs, h_t_prev], 1)\n # Compute the Z = Wx + b for each of the 4 networks at once !\n gate_inputs = tf.nn.bias_add(tf.matmul(concat_inputs_hidden, self.w), tf.squeeze(self.b))\n \n # Split data up for the 4 gates\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n i, j, f, o = tf.split(value=gate_inputs, num_or_size_splits=4, axis=1)\n\n # Compute the new cell \n C_t = (C_t_prev * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i)*self._activation(j))\n h_t = self._activation(C_t) * tf.sigmoid(o)\n \n #Create tuple of the new state\n State_t = LSTMStateTuple(C_t, h_t)\n\n return h_t, State_t", "def UCB1(self, T, naive=False):\n # Initialization\n number_draws, rewards = self.initialize()\n rew = []\n draw = []\n\n for t in range(T):\n print(\"len = {}\".format(t))\n if naive:\n opt_func = rewards / number_draws\n else:\n opt_func = rewards / number_draws + np.sqrt(np.log(t + 1) / (2. * number_draws))\n print(\"optimization function from which we get the argmax: {}\".format(opt_func))\n\n # Get the argmax from the optimization function\n next_action = np.argmax(opt_func)\n print(\"Next Arm to draw: {}\".format(next_action + 1))\n\n next_arm = self.MAB[next_action]\n r = next_arm.sample()\n print(\"Reward of the next arm drawn: {}\".format(r))\n\n # Updating the N(t) and S(t)\n number_draws[next_action] += 1\n print(\"N vector updated: {}\".format(number_draws))\n\n rewards[next_action] += r\n print(\"S vector updated: {}\".format(rewards))\n\n # Lists of rewards and actions(arms drawn)\n draw.append(next_action)\n rew.append(r)\n\n return rew, draw", "def constructRBFStates(L1, L2, W1, W2, sigma):\n N_states = (L1+1)*(W1+W2+1)+L2*(W2+1)\n x_coords = torch.zeros(N_states, dtype=torch.float32)\n y_coords = torch.zeros(N_states, dtype=torch.float32)\n state_to_basis = {}\n ind = 0\n for x in range(L1+L2+1):\n for y in range(W1+W2+1):\n if (0<=x<=L1 and 0<=y<=W1+W2) or (0<=x<=L1+L2 and W1<=y<=W1+W2):\n x_coords[ind] = x\n y_coords[ind] = y\n ind += 1\n\n for x in range(L1 + L2 + 1):\n for y in range(W1 + W2 + 1):\n if (0 <= x <= L1 and 0 <= y <= W1 + W2) or (0 <= x <= L1 + L2 and W1 <= y <= W1 + W2):\n basis = torch.exp(-((x_coords-x)**2 + (y_coords-y)**2)/(2*sigma**2))\n state_to_basis[(x,y)] = basis.view(1, -1).to(device)\n\n return state_to_basis", "def __init__(self):\n \"\"\" action_ space : pick up location , Drop location\n state_space : location , time (hours) , day\n state_init : random pick from the state_space \"\"\"\n self.action_space = [(i,j) for i in range(m) for j in range(m) if i!=j or i==0]\n # Total states (Xi Tj Dk)\n self.state_space = [[x, y, z] for x in range(m) for y in range(t) for z in range(d)]\n # random Initialize of state (location, hours, day)\n self.state_init = random.choice(self.state_space)\n # Start the first round\n self.reset()", "def hbnb():\n states = storage.all(State).values()\n states = sorted(states, key=lambda k: k.name)\n st_ct = []\n\n # Sort cities inside each states\n for state in states:\n st_ct.append([state, sorted(state.cities, key=lambda k: k.name)])\n\n amenities = storage.all(Amenity).values()\n amenities = sorted(amenities, key=lambda k: k.name)\n\n places = storage.all(Place).values()\n places = sorted(places, key=lambda k: k.name)\n\n values = {\"states\": states, \"amenities\": amenities,\n \"places\": places, \"cache_id\": uuid4()}\n\n return render_template('0-hbnb.html', **values)", "def make_instructions(self):\n #de, aux, vers = self.rods\n de, aux, vers = 0, 1, 2\n n = self.num_rings\n\n self.recur(n, de, aux, vers)\n\n ### Add dummy tuple at end so I can look one move ahead on states\n self.instructions.append((0, 0, 0))", "def _gen_qiskit_gateset(q_circ):\n return {\n 'H': q_circ.h,\n 'X': q_circ.x,\n 'Y': q_circ.y,\n 'Z': q_circ.z,\n 'SWAP': q_circ.swap,\n 'I': q_circ.iden,\n 'S': q_circ.s,\n 'D-S': q_circ.sdg,\n 'T': q_circ.t,\n 'D-T': q_circ.tdg,\n 'RX': q_circ.rx,\n 'RY': q_circ.ry,\n 'RZ': q_circ.rz,\n 'C-H': q_circ.ch,\n 'CNOT': q_circ.cx,\n 'C-Y': q_circ.cy,\n 'CSIGN': q_circ.cz,\n 'C-RZ': q_circ.crz,\n 'CCNOT': q_circ.ccx,\n 'C-SWAP': q_circ.cswap,\n 'U': q_circ.u3,\n 'U3': q_circ.u3,\n 'U2': q_circ.u2,\n 'U1': q_circ.u1,\n 'U0': q_circ.iden,\n 'PH': q_circ.rz,\n 'RXX': q_circ.rxx,\n 'RZZ': q_circ.rzz,\n 'R': q_circ.r,\n 'MS': q_circ.ms\n }", "def make_bprod(self):\n rhs1 = random.choice(self.nonterminals)\n rhs2 = random.choice(self.nonterminals)\n lhs = random.choice(self.nonterminals)\n return (lhs, (rhs1, rhs2))", "def _samples(self):\n finite_types = \\\n [QuiverMutationType(t) for t in [['A', 1], ['A', 5], ['B', 2], ['B', 5],\n ['C', 3], ['C', 5], ['D', 2], ['D', 5],\n [\"E\", 6], [\"E\", 7], [\"E\", 8], [\"F\", 4],\n [\"G\", 2]]]\n affine_types = \\\n [QuiverMutationType(t) for t in [['A', [1,1], 1], ['A', [4,5], 1], ['D', 4, 1], ['BB', 5, 1]]]\n elliptic_types = \\\n [QuiverMutationType(t) for t in [['E', 6, [1,1]], ['E', 7, [1,1]]]]\n mutation_finite_types = \\\n [QuiverMutationType(t) for t in [['R2',(1,5)], ['R2',(3,5)]]]\n mutation_infinite_types = \\\n [QuiverMutationType(t) for t in [['E',10], ['BE',5], ['GR',(3,10)], ['T',(3,3,4)]]]\n\n return finite_types + affine_types + elliptic_types + mutation_finite_types + mutation_infinite_types", "def _paired_states(state: _qutip.Qobj, cur_n: int, out_of: str, order: int):\n if out_of is \"g\":\n return _state.element(state, [f\"g{cur_n}\", f\"e{cur_n + order}\"])\n else:\n return _state.element(state, [f\"g{cur_n - order}\", f\"e{cur_n}\"])", "def test_t_gate_nondeterministic_waltz_basis_gates(self):\n shots = 2000\n circuits = ref_non_clifford.t_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_non_clifford.t_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def calculate_state(self):\n\t\tif self.state_type == 'Queues':\n\t\t\t#self.queue_state =\\\n\t\t\t#[0. if movement.AttValue('QLen(Current, Last)') is None else movement.AttValue('QLen(Current, Last)') for movement in self.lanes_movement]\n\n\t\t\tself.queue_state =\\\n\t\t\t[0. if queue.AttValue('QLen(Current, Last)') is None else queue.AttValue('QLen(Current, Last)') for queue in self.queues_counters]\n\n\t\t\tstate = np.array(self.queue_state)[np.newaxis,:]\n\n\t\tif self.state_type == \"QueuesSig\":\n\n\t\t\tself.queue_state =\\\n\t\t\t[0. if queue.AttValue('QLen(Current, Last)') is None else queue.AttValue('QLen(Current, Last)') for queue in self.queues_counters]\n\n\t\t\tstate = np.array(self.queue_state+[self.next_action_key])[np.newaxis,:]\n\t\n\t\treturn(state)", "def next_state_generation(state, target, operators):\n current_swap_state = ''\n current_swap_res = sys.maxsize\n current_change_state = ''\n current_change_res = sys.maxsize\n # for swapping\n for i in range(0, len(state), 2):\n for j in range(2, len(state), 2):\n new_swap_state, swap_res = swap(i, j, state, target)\n if current_swap_res > swap_res:\n current_swap_res = swap_res\n current_swap_state = new_swap_state\n # for changing\n for i in range(1, len(state) - 1, 2):\n new_change_state, change_res = change(i, random.choice(operators), state, target)\n if current_change_res > change_res:\n current_change_res = change_res\n current_change_state = new_change_state\n # return the lowest of the 2 value and state amongst swapping and changing operations\n print(\"Swap res:\", current_swap_res)\n print(\"Change res:\", current_change_res)\n if current_swap_res < current_change_res:\n print(\"Best State \", current_swap_state)\n print(\"Distance from target: \", current_swap_res)\n print()\n return current_swap_state, current_swap_res\n else:\n print(\"Best State \", current_change_state)\n print(\"Distance from target: \", current_change_res)\n print()\n return current_change_state, current_change_res", "def random_state(numStates, numLetters):\n next_state = random.randrange(0, numStates)\n replace_letter = random.randrange(0, numLetters)\n movement = random.randrange(0, 2)\n return { \"next_state\": next_state, \"replace_letter\": replace_letter, \"movement\": movement }", "def test_s_gate_deterministic_waltz_basis_gates(self):\n shots = 100\n circuits = ref_1q_clifford.s_gate_circuits_deterministic(final_measure=True)\n targets = ref_1q_clifford.s_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def pre_defined_circuit(env: 'QEnv', q: List['QRegStorage'], gate_list: List[operatorType]) -> 'QEnv':\n if gate_list:\n for gate in gate_list:\n if gate.bits == 1:\n gate(q[0])\n elif gate.bits == 2:\n gate(q[0], q[1])\n return env", "def test_case_generate(self):\n\n # initialization\n state = np.random.choice(self.init_states)\n model = rm.randint(0, self.model_num - 1)\n duration = np.random.choice(self.step_values)\n temp = rm.randint(self.min_temp, self.max_temp)\n\n self.states = [[model, duration, temp]]\n self.time = duration\n\n while self.time < self.max_time:\n if state == \"inc_tmp\":\n change = np.random.choice(\n self.transitionName[0], p=self.transitionMatrix[0]\n ) # choose the next state\n if change == \"S1S1\": # stay in the same state\n temp = self.get_temp_inc(temp)\n model = rm.randint(0, self.model_num - 1)\n diff = (\n self.max_time - self.time\n ) # this is for ensuring the maximum duration is not exceeded\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n self.time += duration\n self.states.append([model, duration, temp])\n\n elif change == \"S1S2\": # change from increase to decrease\n temp = self.get_temp_dec(temp)\n model = rm.randint(0, self.model_num - 1)\n state = \"dec_tmp\"\n\n diff = self.max_time - self.time\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n self.time += duration\n self.states.append([model, duration, temp])\n else:\n print(\"Error\")\n\n elif state == \"dec_tmp\":\n change = np.random.choice(\n self.transitionName[1], p=self.transitionMatrix[1]\n )\n if change == \"S2S1\":\n temp = self.get_temp_inc(temp)\n model = rm.randint(0, self.model_num - 1)\n state = \"inc_tmp\"\n diff = self.max_time - self.time\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n\n self.time += duration\n self.states.append([model, duration, temp])\n\n elif change == \"S2S2\":\n temp = self.get_temp_dec(temp)\n model = rm.randint(0, self.model_num - 1)\n\n diff = self.max_time - self.time\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n self.time += duration\n self.states.append([model, duration, temp])\n\n else:\n print(\"Error\")\n pass\n else:\n print(\"Error\")\n\n return self.states_to_dict()", "def new_state():\n return ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in range(32))", "def _prepare(self):\n for n in range(4):\n self._code += str(random.randint(1, 9))", "def test_h_gate_deterministic_waltz_basis_gates(self):\n shots = 100\n circuits = ref_1q_clifford.h_gate_circuits_deterministic(final_measure=True)\n targets = ref_1q_clifford.h_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def test_gan_qiskit(n, Database):\n mini = np.min(Database)\n maxi = np.max(Database)\n h = (maxi - mini) / (2 ** n)\n bins = [[k for d in Database if mini + h * k < d < mini + h * (k + 1)] for k in range(2 ** n)]\n interv = [mini + h * k for k in range(2 ** n)]\n backend = BasicAer.get_backend('statevector_simulator')\n random_seed = 10598\n\n quantum_instance = QuantumInstance(backend, seed=random_seed, seed_transpiler=random_seed)\n gan_test = QGAN(Database, num_qubits=[n], snapshot_dir=None,\n quantum_instance=quantum_instance, batch_size=int(len(Database) / 20), num_epochs=300)\n gan_test.train()\n samp, bins_var = gan_test.generator.get_output(gan_test.quantum_instance, shots=4096)\n\n compar = [len(b) / len(Database) for b in bins]\n if len(interv) == len(compar):\n plt.plot(interv, compar)\n\n plt.plot(interv, bins_var)\n\n plt.show()", "def bra(i, dims):\n if not isinstance(i, list):\n i=[i]\n #Single qubit\n if len(i)==1:\n val = np.zeros((dims,1))\n val[i] = 1\n return val.reshape(1,dims)\n #multiple qubits. we need to tensor them together\n val = np.ones((1,1)) #initialize variable, so we have something to tensor with, the first time\n for x in i:\n val = np.tensordot(val,ket([x],dims), axes=0).transpose(0,2,1,3)\n val = val.reshape(val.shape[0]*val.shape[1],val.shape[2]*val.shape[3])\n return val.reshape(1,val.shape[0])", "def __str__(self):\n state = ''\n state += ' '.join([str(x) for x in self.pos]) + ' '\n state += ''.join([str(x) + ' ' + str(y) + ' ' for x,\n y in zip(self.BU, self.BD)])\n for e in self.BF:\n state += ' '.join([str(x) for x in e])\n state += ' '\n state += ' '.join([str(x) for x in self.LU]) + ' '\n state += ' '.join([str(x) for x in self.LD]) + ' '\n\n return state", "def test_simulate_state_output_padding(self, all_n_qubits):\n circuit_batch = []\n for n_qubits in all_n_qubits:\n qubits = cirq.GridQubit.rect(1, n_qubits)\n circuit_batch += util.random_circuit_resolver_batch(qubits, 1)[0]\n\n tfq_results = tfq_simulate_ops.tfq_simulate_state(\n util.convert_to_tensor(circuit_batch), [],\n [[]] * len(circuit_batch))\n\n # Don't use batch_util here to enforce consistent padding everywhere\n # without extra tests.\n sim = cirq.Simulator()\n manual_padded_results = []\n for circuit in circuit_batch:\n result = sim.simulate(circuit)\n wf = result.final_state_vector\n blank_state = np.ones(\n (2**max(all_n_qubits)), dtype=np.complex64) * -2\n blank_state[:wf.shape[0]] = wf\n manual_padded_results.append(blank_state)\n\n self.assertAllClose(tfq_results, manual_padded_results, atol=1e-5)", "def test_error_num_qubits(self, basis_state, wires):\n\n with pytest.raises(ValueError, match=\"'basis_state' must be of shape\"):\n BasisStatePreparation(basis_state, wires)", "def make(max_states=10):\n states = range(max_states)\n keys = range(max_states)\n shuffle(keys)\n return {\n 'states': states,\n 'transitions': {\n keys[index]: DummyProgramGenerator._transition(state)\n for index, state in enumerate(states)}}", "def gru_cell_decoder(self, Xt, h_t_minus_1,context_vector):\n # 1.update gate: decides how much past information is kept and how much new information is added.\n z_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_z_decoder) + tf.matmul(h_t_minus_1,self.U_z_decoder) +tf.matmul(context_vector,self.C_z_decoder)+self.b_z_decoder) # z_t:[batch_size,self.hidden_size]\n # 2.reset gate: controls how much the past state contributes to the candidate state.\n r_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_r_decoder) + tf.matmul(h_t_minus_1,self.U_r_decoder) +tf.matmul(context_vector,self.C_r_decoder)+self.b_r_decoder) # r_t:[batch_size,self.hidden_size]\n # candiate state h_t~\n h_t_candiate = tf.nn.tanh(tf.matmul(Xt, self.W_h_decoder) +r_t * (tf.matmul(h_t_minus_1, self.U_h_decoder)) +tf.matmul(context_vector, self.C_h_decoder)+ self.b_h_decoder) # h_t_candiate:[batch_size,self.hidden_size]\n # new state: a linear combine of pervious hidden state and the current new state h_t~\n h_t = (1 - z_t) * h_t_minus_1 + z_t * h_t_candiate # h_t:[batch_size*num_sentences,hidden_size]\n return h_t,h_t", "def test_ccx_gate_deterministic_waltz_basis_gates(self):\n shots = 100\n circuits = ref_non_clifford.ccx_gate_circuits_deterministic(final_measure=True)\n targets = ref_non_clifford.ccx_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def generateBinItems(quantity=500, bpp1=True):\n if bpp1:\n return [i for i in range(1, quantity+1)]\n return [(i**2)/2 for i in range(1,quantity+1)]", "def __init__(self, policy , nstates , initial_stock , incomingOrdersQueue, outgoingOrdersQueue, incomingDeliveriesQueue, outgoingDeliveriesQueue):\r\n self.currentStock = initial_stock\r\n self.currentOrders = 0\r\n self.costsIncurred = 0\r\n \r\n self.incomingOrdersQueue = incomingOrdersQueue\r\n self.outgoingOrdersQueue = outgoingOrdersQueue\r\n self.incomingDeliveriesQueue = incomingDeliveriesQueue\r\n self.outgoingDeliveriesQueue = outgoingDeliveriesQueue\r\n \r\n self.lastOrderQuantity = 0\r\n\r\n\r\n self.policy = policy\r\n self.nstates = nstates\r\n self.states = []\r\n\r\n\r\n return", "def train_braille(self, number_of_digits=4):\n print number_of_digits\n if number_of_digits>4 or number_of_digits<0 or type(number_of_digits) != int:\n raise ValueError ('number_of_digits should be 1, 2, 3 or 4')\n \n self.performace_logfile()\n n_trials = 0\n n_correct = 0\n try:\n while True:\n n_trials +=1\n number = random.randint(0, int('9'*number_of_digits))\n self.set_braille(number)\n while True:\n guess = raw_input('Guess the current number: ')\n try:\n guess = int(guess)\n break\n except ValueError as e:\n print 'Invalid input'\n if guess == number:\n print 'Correct!\\n\\n'\n n_correct += 1\n else:\n print 'Wrong, the correct answer is: {}\\n\\n'.format(number)\n time.sleep(3)\n self.set_empty()\n time.sleep(1)\n\n except KeyboardInterrupt:\n self.performance_logger('Braille', n_trials, n_correct)\n print 'Training stopped'\n print 'Out of {} trials, {} were correct.'.format(n_trials, n_correct)", "def setUp(self):\n self.f1 = uniutil.polynomial(enumerate([3, 6, 81, 1]), Z)\n self.f2 = uniutil.polynomial(enumerate([1, 81, 6, 3]), Z)\n self.f3 = uniutil.polynomial(enumerate([37, 6, 18, 1]), Z)\n self.f4 = uniutil.polynomial(enumerate([91, 7, 14, 1]), Z)\n # f5 = (x - 6)(x - 5)...x(x + 1)(x + 2) - 1\n self.f5 = uniutil.polynomial(enumerate([1439, -1368, -1324,\n 1638, -231, -252,\n 114, -18, 1]), Z)", "def test_state_preparation(self, tol, qubit_device_3_wires, basis_state, wires, target_state):\n\n @qml.qnode(qubit_device_3_wires)\n def circuit():\n BasisStatePreparation(basis_state, wires)\n\n # Pauli Z gates identify the basis state\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)), qml.expval(qml.PauliZ(2))\n\n # Convert from Pauli Z eigenvalues to basis state\n output_state = [0 if x == 1.0 else 1 for x in circuit()]\n\n assert np.allclose(output_state, target_state, atol=tol, rtol=0)", "def __init__(self, state_size, num_actions):\n super(DQN, self).__init__()\n self.num_actions = num_actions\n self.batch_size = 128\n self.epsilon = 0.7\n self.min_epsilon = 0.05\n self.epsilon_update = 0.995\n \n\n # TODO: Define network parameters and optimizer\n \n self.buffer = ReplayMemory(10000)\n\n lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(0.01, 9000, 0.1)\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)\n \n hidden_sz1 = 256 \n hidden_sz2 = 128\n \n self.Q_1 = tf.keras.layers.Dense(hidden_sz1)\n self.Q_2 = tf.keras.layers.Dense(hidden_sz2)\n self.Q_3 = tf.keras.layers.Dense(self.num_actions)", "def BBS(initial_state, check_dict):\r\n \r\n print(\"Implementing BBS...\")\r\n \r\n q = []\r\n heapq.heappush(q, (initial_state[0][2], initial_state))\r\n accomplished = False\r\n \r\n while len(q) != 0:\r\n path = heapq.heappop(q)[1]\r\n \r\n if is_goal(path[-1][0]):\r\n goal = path\r\n accomplished = True\r\n break\r\n \r\n state_container = next_possible_states(path, check_dict, False)\r\n for i in state_container:\r\n if len(path) <= 1:\r\n temp = list(path)\r\n temp.append(i)\r\n heapq.heappush(q, (i[2], temp))\r\n else:\r\n if i[0] != path[-2][0]:\r\n temp = list(path)\r\n temp.append(i)\r\n heapq.heappush(q, (i[2], temp))\r\n \r\n if accomplished:\r\n print(\"Solved! Number of moves:\", len(goal) - 1)\r\n return goal, True\r\n else:\r\n print(\"Cannot be solved. Number of moves:\", len(path) - 1)\r\n return path, False", "def diracNotation(self):\n diracNotation=\"\"\n for i in range(len(self.reversedStatevector)):\n if self.reversedStatevector[i]==0:\n continue\n diracNotation+=self.numberFormat(self.reversedStatevector[i].real)\n diracNotation+=self.numberFormat(self.reversedStatevector[i].imag,True)\n #the next line generates the state .. ex circuit with 3 wires -> i=2 => state:010\n diracNotation+=\"|\"+str((\"{0:0\"+str(self.num_qubits).replace('.0000','')+\"b}\").format(i))+\"⟩ \" \n return diracNotation.lstrip(\"+\")", "def knights_bt(dimensions):\n\n height, width = dimensions\n # Exit cases for recurive call\n # Odd boards\n if height == 5 and width == 5:\n return _5x5, (5,5)\n elif height == 7 and width == 7:\n return _7x7, (7,7)\n elif height == 9 and width == 9:\n return _9x9, (9,9)\n # Even boards\n elif height == 6:\n if width == 6:\n return _6x6, (6,6)\n elif width == 8:\n return _6x8, (6,8)\n elif height == 8:\n if width == 6:\n return _8x6, (8,6)\n elif width == 8:\n return _8x8, (8,8)\n elif width == 10:\n return _8x10, (8,10)\n elif height == 10:\n if width == 8:\n return _10x8, (10,8)\n elif width == 10:\n return _10x10, (10,10)\n elif width == 12:\n return _10x12, (10,12)\n elif height == 12:\n if width == 10:\n return _12x10, (12,10)\n\n\n # Determine if the quadrants must be odd\n isOdd = (width >= 10 and width % 2 == 0 and width % 4 != 0)\n if isOdd:\n print(\"odd board\")\n\n # Find the position to cut the board into quadrants.\n row_cut, column_cut = find_cuts(width, height, isOdd)\n\n # Divide the board at the cut points and recurse until we have a fixed solution.\n ul, ul_dim = knights_bt((row_cut, column_cut))\n ur, ur_dim = knights_bt((row_cut, width-column_cut))\n bl, bl_dim = knights_bt((height-row_cut, column_cut))\n br, br_dim = knights_bt((height-row_cut, width-column_cut))\n\n # Rotate the quadrants\n if isOdd:\n ul = rotate_flip(route=ul, b_size=ul_dim[0])\n ur = rotate_counter_clockwise(route=ur, b_size=ur_dim[0])\n bl = rotate_clockwise(route=bl, b_size=bl_dim[0])\n # br already has the corner hole in the correct position\n elif width == height and width >= 12 and width % 4 == 0:\n ur = rotate_clockwise(route=ur, b_size=bl_dim[0])\n br = rotate_flip(route=br, b_size=br_dim[0])\n bl = rotate_counter_clockwise(route=bl, b_size=bl_dim[0])\n\n\n # Merge the quadrants together.\n board = merge(route_to_board(ul, ul_dim),\n route_to_board(ur, ur_dim),\n route_to_board(bl, bl_dim),\n route_to_board(br, br_dim),\n isOdd)\n\n return board_to_route(board), (height, width)", "def initializeStates(n):\n states = []\n for i in range(n):\n states.append(0)\n return states", "def __init__(self, states_actions, rewards, discount=0.5, explore=0.0, learning_rate=0.5):\n self.discount = discount\n self.explore = explore\n self.learning_rate = learning_rate\n self.R = rewards.get if isinstance(rewards, dict) else rewards\n\n # previous (state, action)\n self.prev = (None, None)\n\n # initialize Q\n self.Q = {}\n for state, actions in states_actions.items():\n self.Q[state] = {a:0 for a in actions}", "def step4(self):\n\t\tif self.b[self.k - 1] == 'a':\n\t\t\tif self.ends(\"al\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 'c':\n\t\t\tif self.ends(\"ance\"): pass\n\t\t\telif self.ends(\"ence\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 'e':\n\t\t\tif self.ends(\"er\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 'i':\n\t\t\tif self.ends(\"ic\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 'l':\n\t\t\tif self.ends(\"able\"): pass\n\t\t\telif self.ends(\"ible\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 'n':\n\t\t\tif self.ends(\"ant\"): pass\n\t\t\telif self.ends(\"ement\"): pass\n\t\t\telif self.ends(\"ment\"): pass\n\t\t\telif self.ends(\"ent\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 'o':\n\t\t\tif self.ends(\"ion\") and (self.b[self.j] == 's' or self.b[self.j] == 't'): pass\n\t\t\telif self.ends(\"ou\"): pass\n\t\t\t# takes care of -ous\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 's':\n\t\t\tif self.ends(\"ism\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 't':\n\t\t\tif self.ends(\"ate\"): pass\n\t\t\telif self.ends(\"iti\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 'u':\n\t\t\tif self.ends(\"ous\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 'v':\n\t\t\tif self.ends(\"ive\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 'z':\n\t\t\tif self.ends(\"ize\"): pass\n\t\t\telse: return\n\t\telse:\n\t\t\treturn\n\t\tif self.m() > 1:\n\t\t\tself.k = self.j", "def state_rep (self):\n\n # Computing dealer_card\n dealer_card = self.dealer[0]\n\n # Compute player_max\n player_max = self.max_safe_sum()\n\n # State should not be bust\n assert (1 <= dealer_card <= 10)\n assert (0 <= player_max <= 31)\n\n # Compute table number\n possibilities = get_full_state (self.me)\n # possibilities = [p for p in possibilities if 0 <= p <= 31]\n\n table_no = 0\n for idx, p in enumerate(possibilities):\n if 0 <= p <= 31:\n table_no = idx\n assert 0 <= table_no <= 3\n\n # print (possibilities)\n return (table_no, dealer_card, player_max)", "def _reset_registers(self, num_state_qubits: int) -> None:\n raise NotImplementedError", "def test_get_qasm_all_gates(self):\n q_program = QuantumProgram(specs=self.QPS_SPECS_NONAMES)\n qc = q_program.get_circuit()\n qr = q_program.get_quantum_register()\n cr = q_program.get_classical_register()\n qc.u1(0.3, qr[0])\n qc.u2(0.2, 0.1, qr[1])\n qc.u3(0.3, 0.2, 0.1, qr[2])\n qc.s(qr[1])\n qc.s(qr[2]).inverse()\n qc.cx(qr[1], qr[2])\n qc.barrier()\n qc.cx(qr[0], qr[1])\n qc.h(qr[0])\n qc.x(qr[2]).c_if(cr, 0)\n qc.y(qr[2]).c_if(cr, 1)\n qc.z(qr[2]).c_if(cr, 2)\n qc.barrier(qr)\n qc.measure(qr[0], cr[0])\n qc.measure(qr[1], cr[1])\n qc.measure(qr[2], cr[2])\n result = q_program.get_qasm()\n self.assertEqual(len(result), (len(qr.name) * 23 +\n len(cr.name) * 7 +\n 385))", "def test_swap_gate_deterministic_minimal_basis_gates(self):\n shots = 100\n circuits = ref_2q_clifford.swap_gate_circuits_deterministic(final_measure=True)\n targets = ref_2q_clifford.swap_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def generate(bits):\r\n raise NotImplementedError()" ]
[ "0.6086551", "0.60675687", "0.60443336", "0.5823746", "0.57302165", "0.5619075", "0.55097914", "0.54892236", "0.54565513", "0.5449651", "0.54224604", "0.54169273", "0.5399316", "0.5391886", "0.5355335", "0.53444993", "0.5337477", "0.5316138", "0.5313683", "0.5300682", "0.5290912", "0.5272027", "0.52718186", "0.52642906", "0.5233642", "0.52305734", "0.522616", "0.5223235", "0.52190703", "0.51943094", "0.5163474", "0.5157999", "0.5150731", "0.51491785", "0.51404834", "0.5135361", "0.5133144", "0.5132567", "0.5120456", "0.51196736", "0.51150167", "0.51120603", "0.51113135", "0.51106447", "0.5107107", "0.51043415", "0.5087348", "0.50845027", "0.508167", "0.50788546", "0.5078428", "0.5071722", "0.5065097", "0.5063235", "0.5035005", "0.50295085", "0.5019946", "0.5019428", "0.50174254", "0.5005748", "0.500076", "0.49983504", "0.49971792", "0.49922952", "0.499093", "0.4987036", "0.4986544", "0.4984474", "0.4971462", "0.49679756", "0.4967715", "0.49662855", "0.49661055", "0.4964172", "0.49641386", "0.49596843", "0.49581382", "0.49548367", "0.49493387", "0.493636", "0.49354357", "0.49342662", "0.49309066", "0.49280336", "0.49196237", "0.49165717", "0.49160776", "0.49104172", "0.49075118", "0.49062324", "0.49057698", "0.4902172", "0.49012956", "0.49006462", "0.48994452", "0.48976076", "0.4893795", "0.48926875", "0.4890785", "0.4886479", "0.48861644" ]
0.0
-1
Set the power of all motors in the system
def set_motors(self,power): for i in range(len(self.motors)): self.motors[i].power = power #print "Motor %i: %d" % (i, self.motors[i].power)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_power(self, power):\n print('Setting santec power to %.4f mW' % power)\n self.santec1.write(\"LP %.2f\" % power)\n self.santec2.write(\"LP %.2f\" % power)\n self.santec3.write(\"LP %.2f\" % power)\n self.santec4.write(\"LP %.2f\" % power)", "def set_powers(self, power_1, power_2):\n pass", "def get_setPower(self):\n self.read(\":POW?\")", "def set_power(self, value):\n self.write(\":POW {}W\".format(value))", "def set_power(self, dbm=-30):\r\n _debug('simq03b_api.set_power')\r\n \r\n self.write(\"POWer \"+str(dbm))", "def set_power(self, dbm=-30):\r\n self.write(\"SOURce1:POWer:POWer \"+str(dbm))", "def set_power(self, dbm=-30):\r\n self.write(\"SOURce1:POWer:POWer \"+str(dbm))", "def set_power(self, power):\n x = 0\n if power > 100:\n power = 100\n elif power < 0:\n power = 0\n if power != 0:\n while (self.__rpm < 100) and x < 3:\n time.sleep(1)\n x += 1\n if x > 3:\n print(\"Fan doesn't spinn!\")\n return\n self.__pow = power", "def set_power(self, dbm=-30):\r\n self.write(\"POW \"+str(dbm))", "def power_on(self):\n pass", "def power(self, power):\n\n self._power = power", "def power():\n request_command(tv_command=TVCommand.power)", "def set_power_state(self, node, power_state):", "def power_on(self):\n raise NotImplementedError", "def _set_power(self, power: any) -> None:\n\n self.set_power(power, inplace=True)", "def set_power(self, dbm=-30):\r\n self.p = dbm", "def power(self, power: int, matrix_power: bool = False) -> QuantumCircuit:\n raise NotImplementedError", "def set_power_management(value: int) -> None:", "def set_power(self, dbm=-30):\r\n return self._api.set_power(dbm)", "def power(self, value: int):\n self._power = value", "def setPower(self, power):\n if self.shooterLPID.enabled:\n self.shooterLPID.disable()\n if self.shooterRPID.enabled:\n self.shooterRPID.disable()\n\n self.tShooterL.set(power)\n self.tShooterR.set(power)", "async def power_on(self):\n ...", "def set_power_dbm(self, power=None):\n if power is None:\n power = self.def_power\n self.instr.write('L1 ' + str(power + ' DM'))\n time.sleep(self.sleep_time)", "def poweron(self):\n raise NotImplementedError()", "def set_power(self, power: bool):\r\n if not self.backlight:\r\n return\r\n\r\n self.backlight.power = power", "def _set_pwm(self, raw_values):\n for i in range(len(self._pins)):\n self._pi.set_PWM_dutycycle(self._pins[i], raw_values[i])", "def setPowerFromDensity(self):\n self.p.power = self.p.powerDensity * self.getHMMass()", "def set_power(self, power, set=True):\n assert power in [self.POWER_UP, self.POWER_DOWN], \"Power must be POWER_UP or POWER_DOWN.\"\n self.power = power\n if set:\n self._set_function()", "def _set_power(self, value: str):\n if value == STATE_ON:\n self.state[1] = self.state[1][:2] + '1' + self.state[1][3:]\n\n if value == STATE_OFF:\n self.state[1] = self.state[1][:2] + '0' + self.state[1][3:]", "def enable_sensor_power():\n sen = digital.SensorPower(\"senpwr\") \n sen.set()", "def __init__(self, power):\r\n self.power = power", "def _number_dbm_changed(self, *a):\r\n self.api.set_power(self.number_dbm.get_value())", "def set_laser_power(self, power: int):\n self.logger.info(f'Setting laser power to {power}')\n power = int(power)\n if power <= 1:\n self.electronics.servo = 0\n else:\n self.electronics.servo = 1\n\n self.electronics.laser_power = power\n self.config['laser']['power'] = power", "def set_all_pwm(self, on, off):\n self.i2cBus.write_byte_data(self.address, ALL_LED_ON_L, on & 0xFF)\n self.i2cBus.write_byte_data(self.address, ALL_LED_ON_H, on >> 8)\n self.i2cBus.write_byte_data(self.address, ALL_LED_OFF_L, off & 0xFF)\n self.i2cBus.write_byte_data(self.address, ALL_LED_OFF_H, off >> 8)", "def poweron(self) -> None:\n self.servo_reset()", "def resetSim(self):\n self.powers = []", "def set_power(self, power: any, *, inplace: bool = False) -> PowerGate:\n\n # Make a copy if needed\n if inplace:\n _g = self\n else:\n _g = deepcopy(self)\n\n # Assign qubits\n _g.__power = 1 if power is None else power\n\n return _g", "def poweroff(self) -> None:\n pass", "def __pow__(self,power):\n return Factor().__build( VarSet(self.v) , np.power(self.t,power) )", "def Incrpower(self, increment):\n self.power += increment", "def power_list():", "def set_wheel_power(front_left, front_right, back_left, back_right):\n message = \"WHEELS:\" + str(front_left) + ',' + str(front_right) + ',' + str(back_left) + ',' \\\n + str(back_right) + '\\n';\n sock.sendall(message)\n return", "def test_api_ucs_power(self):\n # first power off all servers\n self.set_all_server_power_state(\"off\")\n # verify power state is down\n self.check_all_server_power_state(\"down\")\n # now power on the servers\n self.set_all_server_power_state(\"on\")\n # verify power state is up\n self.check_all_server_power_state(\"up\")", "def change_power(self, pnew):\n import math\n import time\n\n if 100 < abs(pnew):\n return\n\n STEP_SIZE = 25.0\n pcur = self.status()[self.STATUS_POWER]\n # delta < 0: slow down; 0 < delta: accelerate\n delta = pnew - pcur\n steps = math.ceil(abs(delta)/STEP_SIZE)\n\n if steps == 0:\n return\n\n inc = delta/float(steps)\n\n for _ in range(steps):\n pcur += inc\n self._bp.set_motor_power(self._port, pcur)\n time.sleep(0.25)\n self._bp.set_motor_power(self._port, pnew)", "def set_power(self, power):\n\n return self._service.exposed_set_power(power)", "def powerIP(self,power):\n np.power(self.t, power, out=self.t)\n return self", "def set_pwr_act(self, pwr):\n self.pwr_act = pwr[:]", "def poweroff(self):\n raise NotImplementedError()", "def stop():\n set_power(0)", "def set_power(power_W):\n if power_W > 0.120 : power_W = 0.120\n elif power_W < 0. : power_W = 0.\n if not (\"OK\" in cmd(\"cp\")):\n print(answer)\n shutdown()\n exit()\n if not (\"OK\" in cmd(\"p {:.4f}\".format(float(power_W)))):\n print(answer)\n shutdown()\n exit()", "def steer(self):\n\n while self.active:\n angle = self.driver.angle\n steering_pwm_calc = self.angle_to_pmw(angle)\n self.pwm.set_pwm(0, 0, steering_pwm_calc)", "def set_param_motor():\n servo.setSpeed(0, 0) # max = 255\n servo.setAccel(0, 0)\n servo.setSpeed(1, 150) # max = 255\n servo.setAccel(1, 150)", "def turn_on(self) -> None:\n self._monoprice.set_power(self._zone_id, True)", "def configure_power_sweep(\n self, freq, start_power, stop_power, *, points=None, ifbw=None\n ):\n self.sweep.type = Sweep.POWER\n self.freq_cw = freq\n self.SOURce.POWer[1].STARt.w(\n start_power\n ) # The port number suffix on POWer is ignored by the instrument\n self.SOURce.POWer[1].STOP.w(stop_power)\n if points:\n self.sweep.points = points\n if ifbw:\n self.ifbw = ifbw", "def setPowerIfNecessary(self):\n if self.p.power == 0 and self.p.powerDensity > 0:\n self.setPowerFromDensity()", "def pow(self, power):\n daskD.wait(self.client.map(_call_pow, self.vecDask, power=power, pure=False))\n return self", "def power_up(self):\n t_end = time.time() + 3\n while time.time() < t_end:\n self.light_led(5)\n self.light_led(6)", "def set_host_power(self, power):\n power = power.upper()\n if (power is not None) and (power not in POWER_STATE):\n msg = (\"Invalid input '%(pow)s'. \"\n \"The expected input is ON or OFF.\" %\n {'pow': power})\n raise exception.IloInvalidInputError(msg)\n\n # Check current power status, do not act if it's in requested state.\n cur_status = self.get_host_power_status()\n\n if cur_status == power:\n LOG.debug(self._(\"Node is already in '%(power)s' power state.\"),\n {'power': power})\n return\n\n self._perform_power_op(POWER_STATE[power])", "def reqSetPower(self, ID_list, s_l):\n while self.status != Modem.Status.IDLE :\n sleep(0.1)\n if self.status != Modem.Status.IDLE:\n raise ValueError(\"Modem setPower unexpected status: \\\n \" + str(self.status))\n self.status = Modem.Status.BUSY2REQ\n self.send(self.interpreter.buildSetPower(ID_list, s_l))\n while self.status != Modem.Status.IDLE and self.status != Modem.Status.KILL:\n sleep(self.m_to)\n # self.recvCommand()\n if self.status == Modem.Status.KILL:\n return self.close()\n return self.errorCheck()", "def set_PU(self, powerups):\n self._powerups=powerups", "def __pow__(self,*args):\r\n pass", "def reset(self, power=None):\n if power is None: power = not self.power_reset\n self.ser.write(bytearray([config(power=power)]))", "def s(self, s: ComType):\n self._pwr = s", "def _doPowerState(self, state=False):\n if state:\n self._cmdPowerOn()\n else:\n self._cmdPowerOff()", "def antennaIFpresetPower(ants=0, subarray=DEFAULT) :\n antlist = helpers.makeList(ants)\n multiSubarray('antennaIFpresetPower', subarray, antlist)", "def setvoltages(self):\n pass", "def reactive_power(self, params=None):\n if self.inv is None:\n raise der.DERError('DER not initialized')\n\n try:\n if params is not None:\n ena = params.get('Ena')\n if ena is not None:\n if ena is True:\n self.inv.volt_var.ModEna = 1\n else:\n self.inv.volt_var.ModEna = 0\n\n q = params.get('Q')\n if q is not None:\n self.inv.volt_var.ActCrv = 1 # use curve 1\n n_pt = int(self.inv.volt_var.NPt)\n from numpy import linspace\n v = linspace(90, 110, n_pt)\n q = [q]*n_pt\n # Meaning of dependent variable: 1=%WMax 2=%VArMax 3=%VArAval.\n curve_params = {'DeptRef': 2, 'RmpTms': 0, 'RmpDecTmm': 0, 'RmpIncTmm': 0,\n 'v': v, 'var': q}\n if params.get('RmpTms') is not None:\n curve_params['RmpTms'] = params.get('RmpTms')\n if params.get('RmpTms') is not None:\n curve_params['RmpDecTmm'] = params.get('RmpTms')\n curve_params['RmpIncTmm'] = params.get('RmpTms')\n self.volt_var_curve(id=self.inv.volt_var.ActCrv, params=curve_params)\n\n win_tms = params.get('WinTms')\n if win_tms is not None:\n self.inv.volt_var.WinTms = win_tms\n rmp_tms = params.get('RmpTms')\n if rmp_tms is not None:\n self.inv.volt_var.RmpTms = rmp_tms\n rvrt_tms = params.get('RvrtTms')\n if rvrt_tms is not None:\n self.inv.volt_var.RvrtTms = rvrt_tms\n\n self.inv.volt_var.write()\n\n else:\n params = {}\n self.inv.volt_var.read()\n if self.inv.volt_var.ModEna == 0:\n params['Ena'] = False\n else:\n params['Ena'] = True\n params['WinTms'] = self.inv.volt_var.WinTms\n params['RmpTms'] = self.inv.volt_var.RmpTms\n params['RvrtTms'] = self.inv.volt_var.RvrtTms\n if self.inv.volt_var.ActCrv != 0:\n params['curve'] = self.volt_var_curve(id=self.inv.volt_var.ActCrv)\n params['Q'] = self.inv.volt_var_curve.var[0]\n\n except Exception, e:\n raise der.DERError(str(e))\n\n return params", "def setSpeedEngines(leftSpeed: int, rightSpeed: int):\n pass", "def parallelControl(state, powerControl):\n return kickAt(state, state.ball_pos + state.attacking_vector, powerControl)", "def turnOffMotors(self) -> None:\n mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)", "def turnOffMotors(self):\n self.mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)", "def i2c_set_gain_all_sensors(pi, i2c_multiplexer_handle, i2c_sensor_handle, channel_numbers, i2c_sensor_gain):\n for channel_number in channel_numbers:\n i2c_multiplexer_select_channel(pi,\n i2c_multiplexer_handle, channel_number)\n i2c_sensor_handle.ambient_light_gain = i2c_sensor_gain", "def set_pwm(self, channel, on, off):\n self.i2cBus.write_byte_data(self.address, LED0_ON_L + 4 * channel, on & 0xFF)\n self.i2cBus.write_byte_data(self.address, LED0_ON_H + 4 * channel, on >> 8)\n self.i2cBus.write_byte_data(self.address, LED0_OFF_L + 4 * channel, int(off) & 0xFF)\n self.i2cBus.write_byte_data(self.address, LED0_OFF_H + 4 * channel, int(off) >> 8)", "def mv_all(self):\n # def mv_step(self):\n self.device_reg_data &= ~(0x1 << 2)\n bus.write_byte_data(self.device_address, self.device_reg_mode1, self.device_reg_data)", "def kill_all(self):\n self.settings['lights_on'] = 12\n self.settings['lights_off'] = 12\n self.settings['overhead_level'] = 0\n self.settings['soil_1'] = 0\n self.settings['soil_2'] = 0\n self.settings['soil_3'] = 0\n self.settings['soil_4'] = 0\n self.scale_overhead_level.set(self.settings['overhead_level'])\n self.scale_smc1.set(self.settings['soil_1'])\n self.scale_smc2.set(self.settings['soil_2'])\n self.scale_smc3.set(self.settings['soil_3'])\n self.scale_smc4.set(self.settings['soil_4'])\n self.active_changes = True # (flag) Once changes are retrieved, we assume that they will be sent to the controller", "def init_servos():\n for i in range(0, 7):\n kit.servo[i].actuation_range = 180\n kit.servo[i].set_pulse_width_range(450, 2550)", "def power(self, on_off, bulb_index=-1):\n\n # TODO: Throw an error if value not in range\n\n on_off = int(on_off)\n if bulb_index == -1:\n for bulb in self.bulbs:\n if on_off == PowerStates.ON:\n bulb.turn_on(effect=\"smooth\", duration=1000)\n if on_off == PowerStates.OFF:\n bulb.turn_off(effect=\"smooth\", duration=1000)\n if on_off == PowerStates.SWITCH:\n bulb.toggle(effect=\"smooth\", duration=1000)\n elif bulb_index <= len(self.bulbs):\n if on_off == PowerStates.ON:\n self.bulbs[bulb_index].turn_on(effect=\"smooth\", duration=1000)\n if on_off == PowerStates.OFF:\n self.bulbs[bulb_index].turn_off(effect=\"smooth\", duration=1000)\n if on_off == PowerStates.SWITCH:\n self.bulbs[bulb_index].toggle(effect=\"smooth\", duration=1000)", "def set_speed(self, speed, ports='ABCD'):\n\n speed += self.avg_speed\n if self.inverted:\n speed = -speed\n\n if speed > self.margin:\n speed = self.margin\n elif speed < -self.margin:\n speed = self.margin\n\n for p in ports:\n if self.motors[p].connected:\n self.motors[p].run_forever(speed_sp=speed, speed_regulation=True)\n else:\n print(\"Cant run motor on\", p, \"- not connected\")", "def get_power(self):\n #GPIO.setmode(GPIO.BOARD)\n #GPIO.setup(self.input_pin, GPIO.IN)\n return 0", "def power_pumps(self):\n return self._link_reg.power_pumps", "def enable_relays(self):\n #ensure clock and data are low\n self.e.clear_bit(7)\n self.e.clear_bit(5)\n time.sleep(0.01)\n\n #pulse the clock line\n self.e.set_bit(7)\n time.sleep(0.01)\n self.e.clear_bit(7)", "def setAllZero(self):\n self.robot.set_joint([0,0,0,0,0])\n self.robot.save_config()", "def set_powerobject(self, boolean):\n if boolean == True:\n self.powerobject = 'P'", "def power(self):\n return self._power", "def togglepow(self,channel):\n if self.rf is not None:\n newstatus = bool(self.pow[channel-1].get())\n finalstatus = self.rf.setrempow(channel-1,newstatus)\n self.messages.log('Remote power channel %d set to '%(channel)+str(finalstatus))\n else:\n self.messages.log('Not connected to a focuser.')", "def setup(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.Motor_A_EN, GPIO.OUT)\n GPIO.setup(self.Motor_B_EN, GPIO.OUT)\n GPIO.setup(self.Motor_A_Pin1, GPIO.OUT)\n GPIO.setup(self.Motor_A_Pin2, GPIO.OUT)\n GPIO.setup(self.Motor_B_Pin1, GPIO.OUT)\n GPIO.setup(self.Motor_B_Pin2, GPIO.OUT)\n self.motorStop() # Avoids automatic motor rotation after initialization\n try: # Try is used here to avoid errors due to repeated setting of PWM\n self.pwm_A = GPIO.PWM(self.Motor_A_EN, 1000)\n self.pwm_B = GPIO.PWM(self.Motor_B_EN, 1000)\n except:\n pass", "def power_off(self):\n raise NotImplementedError", "def power(self):\r\n return self.model * self.percent / 100", "def set_all_pwm(self, fans, pct):\n Logger.debug(\"Set all pwm to %d\" % (pct))\n for key, _value in list(fans.items()):\n self.set_pwm(fans[key], pct)", "def stop(self):\n self.change_power(0)", "def turn_on(self):\n self._lms.query(self._id, 'power', '1')\n self.update_ha_state()", "def __pow__(self, exponent):\n return self.runtime.pow(self, exponent)", "def mv_step(self):\n # def mv_all(self):\n self.device_reg_data &= ~(0x1 << 3)\n bus.write_byte_data(self.device_address, self.device_reg_mode1, self.device_reg_data)", "def power(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"power\")", "def simulate_power(self):\n if self.p_treatment - self.p_control < 0:\n thresh = 1 - self.alpha\n else:\n thresh = self.alpha\n\n try:\n p_crit = self.norm_null.ppf(1 - thresh)\n beta = self.norm_alt.cdf(p_crit)\n except:\n self.norm_distribution()\n p_crit = self.norm_null.ppf(1 - thresh)\n beta = self.norm_alt.cdf(p_crit)\n\n power = (1 - beta) if self.p_treatment > self.p_control else beta\n self.power = power\n\n return power", "def power_on(self):\n return self.inst.write(':OUTP ON')", "def calibrate_power_ADC(self):\n self.send_packet('\\x61')", "def power_off(fast: bool = True, restart: bool = False) -> None:", "def __pow__(self, power: Union[float, Simpy]) -> Simpy:\n result: list[float] = []\n if isinstance(power, float):\n for item in self.values:\n result.append(item ** power)\n else:\n assert len(self.values) == len(power.values)\n for i in range(len(self.values)):\n result.append(self.values[i] ** power.values[i])\n return Simpy(result)", "def turn_on(self):\n self._remote.power(1)" ]
[ "0.7091687", "0.70202076", "0.6936355", "0.68218803", "0.6769513", "0.6765122", "0.6765122", "0.67307574", "0.67279464", "0.6608434", "0.6575763", "0.65288085", "0.65074164", "0.6494266", "0.64406693", "0.64244634", "0.6414524", "0.6405676", "0.6385484", "0.63689655", "0.63555163", "0.63368106", "0.63335365", "0.63291806", "0.6235931", "0.6225724", "0.6185047", "0.61785597", "0.6139087", "0.6116311", "0.61111224", "0.61056507", "0.61046493", "0.60690516", "0.6057014", "0.6046274", "0.6032458", "0.6009308", "0.5989357", "0.5952055", "0.5925129", "0.59090114", "0.58727515", "0.5854415", "0.5849432", "0.58330876", "0.58181393", "0.5794665", "0.57800204", "0.57570904", "0.5754251", "0.5726951", "0.5720865", "0.5692311", "0.568502", "0.56577146", "0.5650763", "0.5648652", "0.56416744", "0.5636101", "0.56294143", "0.5613872", "0.5612088", "0.5609576", "0.5596134", "0.5567022", "0.55585414", "0.55521816", "0.55495113", "0.55433345", "0.5534809", "0.5529479", "0.5520063", "0.5512699", "0.55126077", "0.5509371", "0.5497767", "0.54927766", "0.5491111", "0.54743576", "0.547358", "0.5472278", "0.54600143", "0.54527825", "0.5451578", "0.54504794", "0.5449496", "0.543017", "0.542851", "0.541928", "0.54112214", "0.53867245", "0.5383809", "0.53830975", "0.5379854", "0.53795713", "0.5376046", "0.5366785", "0.5363727", "0.53608626" ]
0.7770252
0
Updates the thrust, climb rate and height of the QuadCopter model.
def update_all(self,delta_t): self.update_thrust() self.update_climb_rate() self.update_height(delta_t)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_vehicle_state(self):\n #vel = self.v + self.commands['throttle']/self.m/self.simulation_rate\n\n vel = self.commands['speed']\n steer = self.commands['steering_angle']\n\n if steer > 0.5:\n steer_cmd = 25\n elif steer < -0.5:\n steer_cmd = 185\n else:\n steer_cmd = 100 - 160*steer ##linear\n #steer_cmd = 100 - 640*steer**3 ##cubic\n\n #rospy.logwarn('Velocity command is '+ str(vel))\n # 130 is the lowest vel_cmd that makes the truck move.\n if vel > 12:\n vel_cmd = 161\n elif vel < 0:\n vel_cmd = 0\n else:\n vel_cmd = 3.77*vel + 117\n # rospy.logerr('throttle: ' + str(throttle))\n hw_port.set_command(vel_cmd,steer_cmd,self.vehicle_id)", "def __init__(self,b,u,v,hbls_old,hbbl_old,Kv_old,Kt_old,srflx,sustr,svstr,f,grid_dict,tstep_mode,dt):\n \n # INPUTS FROM TTTW SYSTEM\n self.b = b #buoyancy field: [Ly,N]\n self.u = u # x-component of velocity [Ly,N]\n self.v = v # y-component of velocity [Ly+1,N]\n self.hbls_old = hbls_old #boundary layer depth from previous time step [Ly]\n self.hbbl_old = hbbl_old # bottom boundary layer depth from previous time step [Ly]\n self.Kv_old = Kv_old # momentum mixing coefficeint from previous time step [Ly,N+1]\n self.Kt_old = Kt_old # tracer mixing coefficient from previous time step [Ly,N+1]\n self.srflx = srflx #solar heat flux [Ly] (degC * (m/s))\n self.sustr = sustr # x-component surface wind stress [Ly] (N/m^2) \n self.svstr = svstr # y-component surface wind stress [Ly+1] (N/m^2)\n self.grid_dict = grid_dict #gridded data\n self.f = f #coriolis parameter\n # KPP-SPECIFIC VARIABLES \n self.hbls = np.zeros([self.b.shape[0]])\n self.hbbl = np.zeros([self.b.shape[0]])\n self.ustar = []\n self.bvf = [] \n self.kmo = []\n self.C_h_MO = []\n self.kbl = []\n self.Cr = [] \n self.Fc = []\n self.ghat = [] #NONLOCAL TERM: TO BE USED IN TIME STEPPING\n self.tstep_mode = tstep_mode# if in time steppign mode, turn on HBL_RATE_LIMIT\n self.dt = dt", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n Q_Value = self.Q #calling constructor\n\n learning_rate = self.alpha #gives us the learning rate\n\n temporary_QValue = self.getQValue(state,action) #to get the Q value of the state\n\n nextState_QValue = self.getValue(nextState) #to get the Q value of the landing state when taken action a and state s\n\n discount_factor = self.discount #to get the gamma/ discount factor\n\n\n Q_Value[(state,action)] = ((1-learning_rate) * temporary_QValue) + (learning_rate * (reward + discount_factor * nextState_QValue)) #for formula go to README_Reinforcement.txt at line 8\n\n #util.raiseNotDefined()", "def update(self, x_train_single, updated_h):\n # x_row = cp.array(x_train_single.toarray())\n # cp.cuda.Stream.null.synchronize()\n updater(x_train_single,updated_h,self.weights,self.num_features,self.num_models,self.learning_rate)\n # self.biases += updated_h * self.learning_rate", "def update(self, t=1):\n\n\t\t# Update acceleration based on available fuel & throttle\n\t\tthrottle = self.throttle if self.fuel >= 1 else self.throttle * self.fuel\n\t\tself.acceleration = self.thrust * throttle / self.total_mass - GRAVITY\n\n\t\tself.velocity += self.acceleration / t\n\t\tself.altitude += self.velocity / t\n\t\t\n\t\t# Collide with the ground\n\t\tif self.altitude <= 0:\n\t\t\tself.reset_motion()\n\n\t\t# Update remaining fuel\n\t\tif self.fuel * t >= self.throttle:\n\t\t\tself.fuel -= self.throttle * self.fuel_consumption / t\n\t\t\tif self.fuel < 0:\n\t\t\t\tself.fuel = 0", "def update(self):\n # Update the weight matrix: \n self.W -= self.lr * self.grad_W \n \n # Update the bias matrices:\n self.b -= self.lr * np.array(self.grad_b) \n self.c -= self.lr * np.array(self.grad_c)", "def update_target(self):\n with torch.no_grad():\n for target_q_param, q_param in zip(self.target_q_funcs.parameters(), self.q_funcs.parameters()):\n target_q_param.data.copy_(self.tau * q_param.data + (1.0 - self.tau) * target_q_param.data)\n for target_pi_param, pi_param in zip(self.target_policy.parameters(), self.policy.parameters()):\n target_pi_param.data.copy_(self.tau * pi_param.data + (1.0 - self.tau) * target_pi_param.data)", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n features = self.featExtractor.getFeatures(state,action)\n\n learning_rate = self.alpha #gives us the learning rate\n\n temporary_QValue = self.getQValue(state,action) #to get the Q value of the state,action pair\n\n nextState_QValue = self.getValue(nextState) #to get the Q value of the landing state when taken action a and state s\n\n discount_factor = self.discount #to get the gamma/ discount factor\n\n weight = self.weights\n\n Q_Value = 0\n\n difference = (reward + discount_factor * nextState_QValue ) - (temporary_QValue) #refer to README_Reinforcement.txt for the formula\n\n for each_feature in features:\n\n #refer to README_Reinforcement.txt for the formula at line 20\n weight[each_feature] = weight[each_feature] + learning_rate * difference * features[each_feature]\n\n #util.raiseNotDefined()", "def update (self):\n\t\tidx = self.idx\n\t\tC = self.C[idx]\t\t# choice\n\t\tPE = self.PE[idx]\t# choice PE\n\t\talpha = self.alpha\t# learning rate\n\n\t\t# don't need to update anything for UCB\n\t\tif self.UCB_samplemean:\n\t\t\treturn\n\n\t\tif not self.gamble:\n\t\t\t# carry over values for the unselected options\n\t\t\tself.Q[idx+1,:] = self.Q[idx,:]\n\t\t\t# check if two learning rates (pos/neg)\n\t\t\tif isinstance(alpha,float):\n\t\t\t\tself.Q[idx+1,C] = self.Q[idx,C] + alpha*PE\n\t\t\telse:\n\t\t\t\tif PE > 0:\n\t\t\t\t\tself.Q[idx+1,C] = self.Q[idx,C] + alpha[0]*PE\n\t\t\t\telse:\n\t\t\t\t\tself.Q[idx+1,C] = self.Q[idx,C] + alpha[1]*PE\n\n\t\telse:\n\t\t\t# check if two learning rates (pos/neg)\n\t\t\t# PE = 0 if gamble isn't chosen\n\t\t\tif isinstance(alpha,float):\n\t\t\t\tself.Q[idx+1] = self.Q[idx] + alpha*PE\n\t\t\telse:\n\t\t\t\tif PE > 0:\n\t\t\t\t\tself.Q[idx+1] = self.Q[idx] + alpha[0]*PE\n\t\t\t\telse:\n\t\t\t\t\tself.Q[idx+1] = self.Q[idx] + alpha[1]*PE", "def _update_parameter(self, dWxh, dbh, dWhy, dby):\n # Add code to update all the weights and biases here", "def update(self,z_t):\n # YOUR CODE HERE\n pass", "def update_H(self):", "def update(self) -> None:\n self.data.update()\n self._state = round(self.data.rate[\"rates\"][self._target], 3)", "def run_optimization(self):\n # Get batch\n (obs, action, old_logp, old_value, return_, advantage) = self.buffer.eject()\n\n # Train pi\n print(\"-\" * 20 + \"\\nPi Update\" + \"\\n\" + \"-\" * 20)\n (policy_loss, entropy,\n kl_divergence, clipping_fraction, steps) = self.update_actor(obs, action, old_logp, advantage)\n\n # Train value function\n print(\"-\" * 20 + \"\\nValue Function Update\" + \"\\n\" + \"-\" * 20)\n (value_loss,\n explained_variance) = self.update_critic(obs, old_value, return_)\n\n # Logging\n self.update_counter += 1\n self.log_update(policy_loss, entropy, kl_divergence, clipping_fraction,\n value_loss, explained_variance, steps)\n\n # Update learning rate\n self.decay_lr()\n\n # Save current weights (overwrites previous weights)\n self.save_weights()\n\n # Empty scenario counter\n self.scenario_counter = dict.fromkeys(self.scenario_counter, 0)", "def update(self, sim, dt):\n #growth kinetics\n self.division_timer += dt\n #you can grow unless you are in the A state meaning apoptosis\n if(self.division_timer >= self.division_time and self._division):\n #now you can divide\n if(self.state == \"T1\"):\n #change the current sytate to D\n self.state = \"NSC\"\n self._division = False\n self.division_time = 36\n #progenitor time is faster with concentration factor\n\n #add the concentration\n source, consump_rate = self.get_gradient_source_sink_coeff(\"TNF\")\n self.set_gradient_source_sink_coeff(\"TNF\", 50.0*source, 1.0*consump_rate)\n## #get neighbors\n## nbs = sim.network.neighbors(self)\n## #get the total\n## tot = len(nbs)\n## mn_count = 0\n## for i in range(0, tot):\n## if(nbs[i].state == \"MN\" or nbs[i].state == \"T2\"): \n## mn_count += 1\n## norm_mn = float(mn_count) / float(tot)\n## if(norm_mn < self._p2):\n## self.division_time = 36*(norm_mn) # in hours\n## self.division_time = max(self.division_time, 1) \n## else:\n## \n## print(norm_mn, self.division_time)\n #also set the current consumption rate\n## source, consump_rate = self.get_gradient_source_sink_coeff(\"EGF\")\n## self.set_gradient_source_sink_coeff(\"EGF\", source, 1.0*consump_rate)\n if(self.state == \"T2\"):\n #change the current sytate to D\n self.state = \"MN\"\n self.division_time = 56 #in hours\n #also set the current consumption rate\n source, consump_rate = self.get_gradient_source_sink_coeff(\"EGF\")\n self.set_gradient_source_sink_coeff(\"EGF\", 50.0*source, 1.0*consump_rate)\n if(self.state == \"T3\"):\n #change the current sytate to D\n self.state = \"G\"\n self.division_time = 56 #in hours\n #also set the current consumption rate\n## source, consump_rate = self.get_gradient_source_sink_coeff(\"EGF\")\n## self.set_gradient_source_sink_coeff(\"EGF\", source, 1.0*consump_rate)\n #get the location\n #pick a random point on a sphere\n location = RandomPointOnSphere()*self.radius/2.0 + self.location\n #get the radius\n radius = self.radius\n #get the ID\n ID = sim.get_ID()\n #make the object\n sc = NueronalStemCell(location, radius, ID, self.state,\n division_time = self.division_time,\n params = [self._p1, self._p2,\n self._p3, self._p4, self._p5,\n self._p6, self.p7])\n #copy secretion to NSC progeny\n if(self.state == \"NSC\"):\n source, consump_rate = self.get_gradient_source_sink_coeff(\"TNF\")\n sc.set_gradient_source_sink_coeff(\"TNF\", 50.0*source, 1.0*consump_rate)\n sc._division = False\n #set its soluble count\n## sc.sol_count = self.sol_count / 2.\n## self.sol_count = self.sol_count / 2.\n #copy over all of the coefficients to the new cells\n## prod_cons = self.get_gradient_source_sink_coeff(\"O2\")\n## sc.set_gradient_source_sink_coeff(\"O2\", prod_cons[0], prod_cons[1])\n prod_cons = self.get_gradient_source_sink_coeff(\"EGF\")\n sc.set_gradient_source_sink_coeff(\"EGF\", prod_cons[0], prod_cons[1]) \n #add it to the imsulation\n sim.add_object_to_addition_queue(sc)\n #reset the division time\n self.division_timer = 0\n \n if(self.state == \"U\"):\n #HANDLE DIFFERENTIATION\n #RANDOM RULE\n x = rand.random()\n prob = self._p1 #probability of turning into a NSC\n #longer before the differentiation starts\n if(x < prob):\n #differentiation occurs\n self.state = \"T1\"\n #also add a proabability to differentiate directly to a mn\n n1 = self._p4\n## #get neighbors\n## nbs = sim.network.neighbors(self)\n## #get the total\n## tot = len(nbs)\n## mn_count = 0\n## if(tot > 0):\n## #count up the states fo all fo these\n## for i in range(0, tot):\n## if(nbs[i].state == \"MN\" or nbs[i].state == \"T2\"): \n## mn_count += 1\n #get the value fo the gradient and make differntiation inversly\n #inversly correlated with the proportion present\n norm_mn = self.get_gradient_value(\"EGF\")\n #probability of turning into a motor nueron\n n1 = self._p4\n## #normalize the result\n## if(tot != 0):\n## norm_mn = float(mn_count) / float(tot)\n## else:\n## norm_mn = 0\n #calculate the probability\n prob_MN = 1 - (1.*norm_mn**n1)/(self._p2**n1 + norm_mn**n1)\n x1 = rand.random()\n if(x1 <= self._p1*prob_MN):\n #differentiation occurs towards a motor nueron\n self.state = \"T2\"\n \n if(self.state == \"NSC\"):\n #HANDLE DIFFERENTIATION\n #RANDOM RULE\n x1 = rand.random()\n x2 = rand.random()\n #Find all the motor nuerons\n## #get neighbors\n## nbs = sim.network.neighbors(self)\n## #get the total\n## tot = len(nbs)\n## mn_count = 0\n## if(tot > 0):\n## #count up the states fo all fo these\n## for i in range(0, tot):\n## if(nbs[i].state == \"MN\" or nbs[i].state == \"T2\"): \n## mn_count += 1\n## #normalize the result\n## norm_mn = float(mn_count) / float(tot)\n #Make differerntiationd ependant on the gradient value\n norm_mn = self.get_gradient_value(\"EGF\")\n #set the paramaters\n n1 = self._p4\n #update the division time\n## self.division_time = norm_mn * 38 #in hours takes care of the feedback\n #depends on other motor nuerons\n prob_MN = 1 - (1.*norm_mn**n1)/(self._p3**n1 + norm_mn**n1) #probability of turning into a motor nueron\n## prob_G = (1.*norm_mn**n2)/(self._p3**n1 + norm_mn**n2) #of turning into a glial cell\n prob_G = self._p5\n #longer before the differentiation starts\n if(x1 <= prob_MN and x2 > prob_G):\n #differentiation occurs towards a motor nueron\n self.state = \"T2\"\n if(x1 > prob_MN and x2 <= prob_G):\n #differentiation occurs towards a glial cell\n self.state = \"T3\"\n #check to see if division enabled\n if(self._division == False):\n #check for mitotic speed up\n a = self._p6\n b = self._p7\n norm_nsc = self.get_gradient_value(\"TNF\")\n prob_divide = (1.*norm_nsc**b)/(a**b + norm_nsc**b)\n r = rand.random()\n if(r <= x):\n self._division = True", "def handle_set_speed_kph(self, req):\n self.cruising_speed = req.speed * (5. / self.traffic_level) / 3.6\n msg = \"Speed of vehicle #%i successfully set.\" % self.vehicle_id\n return srvs.SetSpeedResponse(True, msg)", "def _update_model(self, new_model):\n super()._update_model(new_model)\n\n if 'e' in self.tr_params:\n if self.state_no_train_de is None:\n for i in range(self.n_emissions - self.nr_no_train_de):\n self.B[i] = (1 - self.learning_rate) * new_model['B'][\n i\n ] + self.learning_rate * self.B[i]\n else:\n for i in range(self.n_d_emissions):\n if i < self.n_d_emissions - self.nr_no_train_de:\n self.B[i] = (1 - self.learning_rate) * new_model['B'][\n i\n ] + self.learning_rate * self.B[i]\n else:\n self.B[i][: -self.state_no_train_de, :] = (\n (1 - self.learning_rate)\n * new_model['B'][i][: -self.state_no_train_de, :]\n + self.learning_rate *\n self.B[i][: -self.state_no_train_de, :]\n )\n\n for i in range(self.n_emissions):\n normalise(new_model['B'][i], axis=1)", "def update_speed_input_step(self,curr_v):\n \n # update speed inputs \n self.speed_inputs_east*=0\n self.speed_inputs_west*=0\n self.speed_inputs_north*=0\n self.speed_inputs_south*=0\n\n if self.use_eight_directions is True: \n self.speed_inputs_north_east*=0\n self.speed_inputs_north_west*=0\n self.speed_inputs_south_east*=0\n self.speed_inputs_south_west*=0\n \n #speed_values=self.rr[:self.N_e,0] \n speed_values=np.ones((self.N_e,1))\n\n if curr_v[0]>0:\n \n # north-east\n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_east=speed_values \n \n # south-east \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_east=speed_values\n \n #east \n else:\n self.speed_inputs_east=speed_values\n\n\n elif curr_v[0]<0:\n\n # north-west \n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_west=speed_values\n\n # south-west \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_west=speed_values\n \n # west \n else:\n self.speed_inputs_west=speed_values\n\n else: \n # north\n if curr_v[1]>0:\n self.speed_inputs_north=speed_values\n\n # south\n elif curr_v[1]<0:\n self.speed_inputs_south=speed_values", "def update(self, state_value, current_time):\r\n\r\n\t\t# Calculate Error - if SetPoint > 0.0, then normalize error with respect to setpoint\r\n\t\tif self.SetPoint==0.0:\r\n\t\t\terror = state_value - self.SetPoint\r\n\t\telse:\r\n\t\t\terror = (state_value - self.SetPoint)/self.SetPoint \r\n\t\t\r\n\t\tself.current_time = current_time/1000.0 \t\t# Converting from msec to sec\r\n\t\tdelta_time = self.Ts\r\n\t\tdelta_error = error - self.last_error\r\n\r\n\t\tself.ITerm += error * delta_time\r\n\t\t\r\n\t\tself.DTerm = 0.0\r\n\t\tif delta_time > 0:\r\n\t\t\tself.DTerm = delta_error / delta_time\r\n\r\n\t\t# Remember last time and last error for next calculation\r\n\t\tself.last_time = self.current_time\r\n\t\tself.last_error = error\r\n\t\t\r\n\t\t# Calculate u(t) - catch potential division by zero error\r\n\t\ttry:\r\n\t\t\tu = self.Kp * (error + ((1.0/self.Ti) * self.ITerm) + (self.Td * self.DTerm))\r\n\t\texcept ZeroDivisionError:\r\n\t\t\tu = self.Kp * (error + (0.0 * self.ITerm) + (self.Td * self.DTerm))\r\n\t\t\t\t\r\n\t\t# Bound the controller output if necessary (between MinValue - MaxValue) \r\n\t\tif u > self.MaxValue:\r\n\t\t\tself.OutputValue = self.MaxValue\r\n\t\t\tself.ITerm -= error * delta_time \t# Back-calculate the integral error\r\n\t\telif u < self.MinValue:\r\n\t\t\tself.OutputValue = self.MinValue\r\n\t\t\tself.ITerm -= error * delta_time \t# Back-calculate the integral error\r\n\t\telse:\r\n\t\t\tself.OutputValue = u\r\n\t\t\r\n\t\t# Update the last output value\r\n\t\tself.last_OutputValue = self.OutputValue\r\n\t\t\r\n\t\t# Record state, error, y(t), and sample time values\r\n\t\tself.state_history.append(state_value)\r\n\t\tself.error_history.append(error)\r\n\t\tself.output_history.append(self.OutputValue)\r\n\t\tself.sample_times.append(current_time/1000)\t\t# Convert from msec to sec\r\n\t\t\r\n\t\t# Return controller output\r\n\t\treturn self.OutputValue", "def _init_critic_update(self):\n Q_target = tf.stop_gradient(self._get_Q_target())\n\n assert Q_target.shape.as_list() == [None, 1]\n\n Q_values = self._Q_values = tuple(\n Q([self._observations_ph, self._actions_ph])\n for Q in self._Qs)\n\n Q_losses = self._Q_losses = tuple(\n tf.losses.mean_squared_error(\n labels=Q_target, predictions=Q_value, weights=0.5)\n for Q_value in Q_values)\n\n self._Q_optimizers = tuple(\n tf.train.AdamOptimizer(\n learning_rate=self._Q_lr,\n name='{}_{}_optimizer'.format(Q._name, i)\n ) for i, Q in enumerate(self._Qs))\n Q_training_ops = tuple(\n tf.contrib.layers.optimize_loss(\n Q_loss,\n self.global_step,\n learning_rate=self._Q_lr,\n optimizer=Q_optimizer,\n variables=Q.trainable_variables,\n increment_global_step=False,\n summaries=((\n \"loss\", \"gradients\", \"gradient_norm\", \"global_gradient_norm\"\n ) if self._tf_summaries else ()))\n for i, (Q, Q_loss, Q_optimizer)\n in enumerate(zip(self._Qs, Q_losses, self._Q_optimizers)))\n\n self._training_ops.update({'Q': tf.group(Q_training_ops)})", "def handle_set_speed_kph(self, req):\n self.cruising_speed += req.speed\n msg = \"Speed of vehicle #%i successfully set.\" % self.vehicle_id\n return srvs.SetSpeedResponse(True, msg)", "def update(self):\n\n SolidSolver.update(self)\n\n self.__nextStep()", "def update_H(self):\n gamma = self.get_gamma()\n delta = self.get_delta()\n summand2 = ((1 + (gamma.transpose().dot(self.H).dot(gamma) /\n delta.transpose().dot(gamma))) *\n delta.dot(delta.transpose()) / delta.transpose().dot(gamma)\n )\n summand3 = - ((delta.dot(gamma.transpose()).dot(self.H) +\n self.H.dot(gamma).dot(delta.transpose())) /\n delta.transpose().dot(gamma))\n self.H = self.H + summand2 + summand3", "def update_speed_weights_step(self):\n \n weights_list = [self.W_speed_east, self.W_speed_west,self.W_speed_north,self.W_speed_south]\n speed_input_list = [self.speed_inputs_east,self.speed_inputs_west,\n self.speed_inputs_north,self.speed_inputs_south]\n \n if self.use_eight_directions is True:\n weights_list+=[self.W_speed_north_east,\n self.W_speed_north_west,self.W_speed_south_east,self.W_speed_south_west]\n \n speed_input_list+=[self.speed_inputs_north_east,self.speed_inputs_north_west, \n self.speed_inputs_south_east,self.speed_inputs_south_west]\n\n \n for weights,speed_input in zip(weights_list,speed_input_list):\n \n \n weight_update=speed_input*(self.rr[:self.N_e]-self.input_mean)*(self.rr_e_trace.T-self.input_mean)\n weights+=self.learn_rate_speed_weights*weight_update\n\n\n # normalize to fixed mean of incoming and outgoing weights\n weights-=(weights.mean(axis=1)-self.W_av_star)[:,np.newaxis]\n weights-=(weights.mean(axis=0)-self.W_av_star)[np.newaxis,:]\n \n # clip weights \n np.clip(weights,0,self.W_max_e,out=weights)", "def _scalar_update(self, d_t, **kwargs):\n for key, val in kwargs.items():\n if isinstance(val, GPUArray):\n kwargs[key] = val.get()\n self.solver(d_t, **kwargs)\n self.post()", "def updateQTable( self, reward, current_state ):", "def update(self, state, action, nextState, reward):\n \"\"\"Description:\n Use second equation in slide 71 of MDP\n Adjest weight of active features depend on tranistion \n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n feat = self.featExtractor.getFeatures(state, action)\n\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n maxQns = self.getValue(nextState)\n if maxQns == None:\n maxQns = 0\n Qsa = self.getQValue(state, action)\n difference = ( reward + self.discountRate * maxQns ) - Qsa\n \n for key in self.weight.keys():\n self.weight[key] += (self.alpha * difference * feat[key])\n \n \n \"\"\" END CODE \"\"\"", "def set_height(self,c, h):\r\n self.h = h\r\n self.T1 = [[-self.R * np.sqrt(3) / (2*self.h), self.R / (2*self.h), 1],[0,-self.R/(self.h),1],[self.R * np.sqrt(3) / (2*self.h), self.R / (2*self.h), 1]]\r\n return self.h", "def update(self, state, action, nextState, reward):\n # print \"Update\"\n difference = (reward + self.discount*self.compValFromState(nextState)) - self.getQValue(state, action)\n features = self.featExtractor.getFeatures(state, self.index)\n #print \"features\", features, \"difference\", difference, \"weights\", self.weights\n for key in self.weights:\n self.weights[key] = self.alpha * difference * features[key]", "def update(self, temperature, humidity, co2, voc, voltage,\n baseline=False, fullupdate=False):\n self._add_borders()\n self._add_temperature(temperature)\n self._add_humidity(humidity)\n self._add_co2(co2)\n self._add_voc(voc)\n self._add_voltage(voltage)\n self._add_baseline_indicator(baseline)\n self._update_screen(fullupdate)", "def update(self, s, a, r, t, s_prime):\n # Q-learning update:\n #Q(s,a) = Q(s,a) + \\alpha * [r + \\gamma * \\max_a Q(s',a) - Q(s,a)]\n\n #raise NotImplementedError(\"Implement Q-learning update\")\n\n # TODO Q-learning update\n # Two parts:\n target = 0\n if t == True:\n target = r\n else:\n # 1) Compute the target value reward + DISCOUNT * \\max_a Q(s', a)\n target = r + DISCOUNT_FACTOR * np.max(self.q[s_prime])\n # 2) Update Q-values with Q(s, a) += LEARNING_RATE * (target - Q(s, a))\n self.q[s,a] += LEARNING_RATE * (target - self.q[s, a])\n\n # 1) Compute target\n # Note: If s_prime (s') is a terminal state (t), then target is only \"target = reward\"\n # (You will need an if-else struct)\n \n\n # 2) Update Q-values", "def model_state_update(model, time_t, state_controller, input_f16):\n pass", "def update_H(self):\n self.grid.H[self.loc] -= (\n self.grid.courant_number\n * self.grid.inverse_permeability[self.loc]\n * self.phi_H\n )", "def update_total_speed_input_step(self,curr_v):\n \n tot_speed_input_east=np.dot(self.W_speed_east,self.speed_inputs_east)/self.N_e\n tot_speed_input_west=np.dot(self.W_speed_west,self.speed_inputs_west)/self.N_e\n tot_speed_input_north=np.dot(self.W_speed_north,self.speed_inputs_north)/self.N_e\n tot_speed_input_south=np.dot(self.W_speed_south,self.speed_inputs_south)/self.N_e\n\n self.tot_speed_input_all_padded[:self.N_e,0]=\\\n tot_speed_input_east+tot_speed_input_west+\\\n tot_speed_input_north+tot_speed_input_south\n \n if self.use_eight_directions is True:\n tot_speed_input_north_east=np.dot(self.W_speed_north_east,\n self.speed_inputs_north_east)/self.N_e\n tot_speed_input_north_west=np.dot(self.W_speed_north_west,\n self.speed_inputs_north_west)/self.N_e\n tot_speed_input_south_east=np.dot(self.W_speed_south_east,\n self.speed_inputs_south_east)/self.N_e\n tot_speed_input_south_west=np.dot(self.W_speed_south_west,\n self.speed_inputs_south_west)/self.N_e\n \n self.tot_speed_input_all_padded[:self.N_e,0]+=\\\n tot_speed_input_north_east+tot_speed_input_north_west+\\\n tot_speed_input_south_east+tot_speed_input_south_west\n \n else:\n \n # diagonal move with four directions\n if abs(curr_v[0])>0 and abs(curr_v[1])>0:\n self.tot_speed_input_all_padded[:self.N_e,0]*=.5", "def update_params(self, learning_rate):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\t\tself._W = self._W - learning_rate * self._grad_W_current\n\t\tself._b = self._b - learning_rate * self._grad_b_current\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def update_rhs(self, h, a, r):\n return Tensors(\n t1=r.t1 - a.t1 / cc_denom(h.f, 2, 'dir', 'full'),\n t2=r.t2 - a.t2 / cc_denom(h.f, 4, 'dir', 'full'),\n t3=r.t3 - (a.t3 - a.t3.transpose([0, 1, 2, 4, 3, 5])) /\n cc_denom(h.f, 6, 'dir', 'full')\n )", "def update_rhs(self, h, a, r):\n return Tensors(\n t1=r.t1 - a.t1 / cc_denom(h.f, 2, 'dir', 'full'),\n t2=r.t2 - a.t2 / cc_denom(h.f, 4, 'dir', 'full'),\n t3=r.t3 - (a.t3 - a.t3.transpose([0, 1, 2, 4, 3, 5])) /\n cc_denom(h.f, 6, 'dir', 'full')\n )", "def update(self):\n self.setVector(0.15, 0.0)", "def update(self, current_time, Tb=24., Tr=0., **kwds):\n P_ = self._cell_values['rainfall__daily_depth']\n self._PET = \\\n self._cell_values['surface__potential_evapotranspiration_rate']\n self._SO = \\\n self._cell_values['soil_moisture__initial_saturation_fraction']\n self._vegcover = self._cell_values['vegetation__cover_fraction']\n self._water_stress = self._cell_values['vegetation__water_stress']\n self._S = self._cell_values['soil_moisture__saturation_fraction']\n self._D = self._cell_values['soil_moisture__root_zone_leakage']\n self._ETA = self._cell_values['surface__evapotranspiration']\n self._fr = (self._cell_values['vegetation__live_leaf_area_index'] /\n self._LAIR_max)\n self._runoff = self._cell_values['surface__runoff']\n # LAIl = self._cell_values['vegetation__live_leaf_area_index']\n # LAIt = LAIl+self._cell_values['DeadLeafAreaIndex']\n # if LAIt.all() == 0.:\n # self._fr = np.zeros(self.grid.number_of_cells)\n # else:\n # self._fr = (self._vegcover[0]*LAIl/LAIt)\n self._fr[self._fr > 1.] = 1.\n self._Sini = np.zeros(self._SO.shape)\n self._ETmax = np.zeros(self._SO.shape)\n\n for cell in range(0, self.grid.number_of_cells):\n P = P_[cell]\n # print cell\n s = self._SO[cell]\n fbare = self._fbare\n ZR = self._zr[cell]\n pc = self._soil_pc[cell]\n fc = self._soil_fc[cell]\n scc = self._soil_sc[cell]\n wp = self._soil_wp[cell]\n hgw = self._soil_hgw[cell]\n beta = self._soil_beta[cell]\n if self._vegtype[cell] == 0: # 0 - GRASS\n sc = scc*self._fr[cell]+(1-self._fr[cell])*fc\n else:\n sc = scc\n\n Inf_cap = (self._soil_Ib[cell]*(1-self._vegcover[cell]) +\n self._soil_Iv[cell]*self._vegcover[cell])\n # Infiltration capacity\n Int_cap = min(self._vegcover[cell]*self._interception_cap[cell],\n P)\n # Interception capacity\n Peff = max(P-Int_cap, 0.) # Effective precipitation depth\n mu = (Inf_cap/1000.0)/(pc*ZR*(np.exp(beta*(1.-fc))-1.))\n Ep = max((self._PET[cell]*self._fr[cell] +\n fbare*self._PET[cell]*(1.-self._fr[cell])) -\n Int_cap, 0.0001) # mm/d\n self._ETmax[cell] = Ep\n nu = ((Ep / 24.) / 1000.) / (pc*ZR) # Loss function parameter\n nuw = ((self._soil_Ew/24.)/1000.)/(pc*ZR)\n # Loss function parameter\n sini = self._SO[cell] + ((Peff+self._runon)/(pc*ZR*1000.))\n\n if sini > 1.:\n self._runoff[cell] = (sini-1.)*pc*ZR*1000.\n # print 'Runoff =', self._runoff\n sini = 1.\n else:\n self._runoff[cell] = 0.\n\n if sini >= fc:\n tfc = (1./(beta*(mu-nu)))*(beta*(fc-sini) + np.log((\n nu-mu+mu*np.exp(beta*(sini-fc)))/nu))\n tsc = ((fc-sc)/nu)+tfc\n twp = ((sc-wp)/(nu-nuw))*np.log(nu/nuw)+tsc\n\n if Tb < tfc:\n s = abs(sini-(1./beta)*np.log(((nu-mu+mu *\n np.exp(beta*(sini-fc)))*np.exp(beta*(nu-mu)*Tb) -\n mu*np.exp(beta*(sini-fc)))/(nu-mu)))\n\n self._D[cell] = ((pc*ZR*1000.)*(sini-s))-(Tb*(Ep/24.))\n self._ETA[cell] = (Tb*(Ep/24.))\n\n elif Tb >= tfc and Tb < tsc:\n s = fc-(nu*(Tb-tfc))\n self._D[cell] = ((pc*ZR*1000.)*(sini-fc))-((tfc)*(Ep/24.))\n self._ETA[cell] = (Tb*(Ep/24.))\n\n elif Tb >= tsc and Tb < twp:\n s = (wp+(sc-wp)*((nu/(nu-nuw))*np.exp((-1)*((nu-nuw) /\n (sc-wp))*(Tb-tsc))-(nuw/(nu-nuw))))\n self._D[cell] = ((pc*ZR*1000.)*(sini-fc))-(tfc*Ep/24.)\n self._ETA[cell] = (1000.*ZR*pc*(sini-s))-self._D[cell]\n\n else:\n s = (hgw+(wp-hgw)*np.exp((-1)*(nuw/(wp-hgw)) *\n max(Tb-twp, 0.)))\n self._D[cell] = ((pc*ZR*1000.)*(sini-fc))-(tfc*Ep/24.)\n self._ETA[cell] = (1000.*ZR*pc*(sini-s))-self._D[cell]\n\n elif sini < fc and sini >= sc:\n tfc = 0.\n tsc = (sini-sc)/nu\n twp = ((sc-wp)/(nu-nuw))*np.log(nu/nuw)+tsc\n\n if Tb < tsc:\n s = sini - nu*Tb\n self._D[cell] = 0.\n self._ETA[cell] = 1000.*ZR*pc*(sini-s)\n\n elif Tb >= tsc and Tb < twp:\n s = (wp+(sc-wp)*((nu/(nu-nuw))*np.exp((-1) *\n ((nu-nuw)/(sc-wp))*(Tb-tsc))-(nuw/(nu-nuw))))\n self._D[cell] = 0\n self._ETA[cell] = (1000.*ZR*pc*(sini-s))\n\n else:\n s = hgw+(wp-hgw)*np.exp((-1)*(nuw/(wp-hgw))*(Tb-twp))\n self._D[cell] = 0.\n self._ETA[cell] = (1000.*ZR*pc*(sini-s))\n\n elif sini < sc and sini >= wp:\n tfc = 0\n tsc = 0\n twp = (((sc-wp)/(nu-nuw))*np.log(1+(nu-nuw)*(sini-wp) /\n (nuw*(sc-wp))))\n\n if Tb < twp:\n s = (wp+((sc-wp)/(nu-nuw))*((np.exp((-1)*((nu-nuw) /\n (sc-wp))*Tb))*(nuw+((nu-nuw)/(sc-wp))*(sini-wp))-nuw))\n self._D[cell] = 0.\n self._ETA[cell] = (1000.*ZR*pc*(sini-s))\n\n else:\n s = hgw+(wp-hgw)*np.exp((-1)*(nuw/(wp-hgw))*(Tb-twp))\n self._D[cell] = 0.\n self._ETA[cell] = (1000.*ZR*pc*(sini-s))\n\n else:\n tfc = 0.\n tsc = 0.\n twp = 0.\n\n s = hgw+(sini-hgw)*np.exp((-1)*(nuw/(wp-hgw))*Tb)\n self._D[cell] = 0.\n self._ETA[cell] = (1000.*ZR*pc*(sini-s))\n\n self._water_stress[cell] = min(((max(((sc - (s+sini)/2.) /\n (sc - wp)), 0.))**4.), 1.0)\n self._S[cell] = s\n self._SO[cell] = s\n self._Sini[cell] = sini\n\n current_time += (Tb+Tr)/(24.*365.25)\n return current_time", "def update_weights(self):\n\t\tpass", "def _update_stabilised_speed(self):\n if self._auv_motion == \"forward\":\n direction_to_compensate, error = self._compute_forward_movement_error()\n self._thrusters_stabilised_speed[\"1\"] = self._compute_stabilised_speed(\"1\", error,\n direction_to_compensate)\n self._thrusters_stabilised_speed[\"2\"] = self._compute_stabilised_speed(\"2\", error,\n direction_to_compensate)\n self._thrusters_stabilised_speed[\"3\"] = self._thrusters_actual_speed[\"3\"]\n self._thrusters_stabilised_speed[\"4\"] = self._thrusters_actual_speed[\"4\"]\n self._thrusters_stabilised_speed[\"5\"] = self._thrusters_actual_speed[\"5\"]\n self._thrusters_stabilised_speed[\"6\"] = self._thrusters_actual_speed[\"6\"]\n self._thrusters_stabilised_speed[\"7\"] = self._thrusters_actual_speed[\"7\"]\n self._thrusters_stabilised_speed[\"8\"] = self._thrusters_actual_speed[\"8\"]", "def update_throughput(self, thr):\n\n self.throughput = thr", "def update_inhibition(self) -> None:\n if self.spec.inhibition_type == \"fffb\":\n self.calc_fffb_inhibition()\n else:\n self.calc_kwta_inhibition()\n\n self.units.update_inhibition(torch.Tensor(self.size).fill_(self.gc_i))", "def _update(self, bandit): \n \n bandit_logs = self.logging[bandit]\n bandit = bandit.id\n estimate = bandit_logs['reward'] / bandit_logs['actions'] # if not assigned\n actions = bandit_logs['actions']\n self.mu[bandit] = (self.mu_pri[bandit]/self.var_pri[bandit] + actions*estimate/self.var0)/(actions/self.var0 + 1/self.var_pri[bandit])\n self.var[bandit] = 1/(actions/self.var0 + 1/self.var[bandit])", "def updatePWM(self):\n v_dc = self.dcmotorSpeed * self.dcmotor_sgn # changed \"vr\" to \"v_dc\", \"rightSpeed\" to \"dcmotorSpeed\" and \"right_sgn\" to dcmotor_sgn\", RFMH_2019_02_26\n pwm_dc = self.PWMvalue(v_dc, self.DC_MOTOR_MIN_PWM,\n self.DC_MOTOR_MAX_PWM) # changed \"pwmr\" to \"pwm_dc\" and \"vr\" to \"v_dc\" and adjusted both orange constants to \"DC_MOTOR_MIN_PWM\" AND \"DC_MOTOR_MAX_PWM\", RFMH_2019_02_26\n\n # TODO: Fix this debug message. I am trying to port this code over from an old version, and I do not know\n # what v and u are supposed to be here. Timothy Scott, 5.11.2019\n # if self.debug: # where the duck does the \"u\" come from?!?, RFMH_2019_02_26\n # print(\"v = %5.3f, u = %5.3f, v_dc = %5.3f, pwm_dc = %3d\" % (\n # v, u, v_dc, pwm_dc)) # deleted \"vl\" and \"pwml\" and adjust \"vr\" to \"v_dc\" to \"pwm_dc\"\n\n if math.fabs(v_dc) < self.SPEED_TOLERANCE: # changed v_r to v_dc in if loop , RFMH_2019_02_28\n DcMotorMode = Adafruit_MotorHAT.RELEASE\n pwm_dc = 0\n elif v_dc > 0:\n DcMotorMode = Adafruit_MotorHAT.FORWARD\n elif v_dc < 0:\n DcMotorMode = Adafruit_MotorHAT.BACKWARD\n\n if not self.old_pwm_dc == pwm_dc:\n self.DcMotor.setSpeed(pwm_dc) # changed rightMotor to DcMotor and pwmr to pwm_dc , RFMH_2019_02_28\n self.DcMotor.run(DcMotorMode)\n\n self.old_pwm_dc = pwm_dc", "def update_param(self, lr):\n\n\n self.W=self.W-lr*self.W_grad\n self.b = self.b - lr*self.b_grad", "def update(self, timestep, action, reward, best_action):\r\n\t\t# Performs updates based on the Player superclass update method.\r\n\t\tsuper().update(timestep, action, reward, best_action)\r\n\r\n\t\t# Updates the Q value estimate based on the nature of the step size\r\n\t\t# parameter.\r\n\t\tif self.step_size_parameter is None:\r\n\t\t\t# Update Q value estimate using a sample average step size parameter\r\n\t\t\tself.player_Q[action] += 1./self.player_selected_actions[action] * (reward - self.player_Q[action])\r\n\r\n\t\telse:\r\n\t\t\t# Update Q value estimate using a constant step size parameter\r\n\t\t\tself.player_Q[action] += self.step_size_parameter * (reward - self.player_Q[action])", "def update(self, timestep, action, reward, best_action):\r\n\t\t# Performs updates based on the Player superclass update method.\r\n\t\tsuper().update(timestep, action, reward, best_action)\r\n\r\n\t\t# Updates the Q value estimate based on the nature of the step size\r\n\t\t# parameter.\r\n\t\tif self.step_size_parameter is None:\r\n\t\t\t# Update Q value estimate using a sample average step size parameter\r\n\t\t\tself.player_Q[action] += 1./self.player_selected_actions[action] * (reward - self.player_Q[action])\r\n\r\n\t\telse:\r\n\t\t\t# Update Q value estimate using a constant step size parameter\r\n\t\t\tself.player_Q[action] += self.step_size_parameter * (reward - self.player_Q[action])", "def update(self):\n # GPS data\n self.model.GPS_latitude.set(self._kernel.data.lat)\n self.model.GPS_longitude.set(self._kernel.data.lon)\n \n self.model.GPS_heading.set(self._kernel.data.gps_heading)\n self.model.GPS_speed.set(self._kernel.data.speed)\n self.model.GPS_altitude.set(self._kernel.data.altitude)\n \n self.model.GPS_fix.set(self._kernel.data.fix)\n self.model.GPS_satellite_count.set(self._kernel.data.num_sat)\n \n # compass data\n self.model.compass_heading.set(self._kernel.data.compass_heading)\n \n # time data\n self.model.time.set(self._kernel.data.timestamp.isoformat())\n self.model.date.set(self._kernel.data.datestamp.isoformat())\n \n # other data\n self.model.temperature.set(self._kernel.data.temperature)", "def update_model_parameters(phi, T, nz, coord, SWVD, form=\"Calonne\"):\r\n D_eff = np.ones(nz)\r\n\r\n if form == \"Hansen\": # Hansen and Foslien (2015)\r\n D_eff = phi * (1 - phi) * D0 + D0\r\n elif form == \"Calonne\": # Calonne et al. (2014)\r\n x = 2 / 3 - phi\r\n b = np.heaviside(x, 1)\r\n D_eff = D0 * (1 - 3 / 2 * phi) * b\r\n else:\r\n print(\"requested method not available, check input\")\r\n\r\n ## effective thermal conductivity W/m/K\r\n k_eff = np.ones(nz)\r\n\r\n if form == \"Hansen\": # Hansen and Foslien (2015)\r\n k_eff = phi * ((1 - phi) * k_a + phi * k_i) + k_a\r\n elif form == \"Calonne\": # Calonne et al. (2011)\r\n k_eff = ka0 + ka1 * (rho_i * phi) + ka2 * (rho_i * phi) ** 2\r\n else:\r\n print(\"requested method not available, check input\")\r\n\r\n ## effective heat capacity - similar forumla in Hansen and Foslien (2015) and Löwe et al. (2019)\r\n rhoC_eff = np.zeros(nz)\r\n rhoC_eff = phi * rho_i * C_i + (np.ones(nz) - phi) * rho_a * C_a\r\n\r\n ## Water Vapor density rho_v and its derivative rho_v_dT:\r\n [rho_v, rho_v_dT] = sat_vap_dens(nz, T, SWVD)\r\n\r\n return D_eff, k_eff, rhoC_eff, rho_v, rho_v_dT", "def temp_update(self):\n a_w = self.k / self.dx\n a_e = self.k / self.dx\n a_n = self.k / self.dy\n a_s = self.k / self.dy\n a_p = a_w + a_e + a_n + a_s + self.rho * self.cp * self.dx / self.dt\n for i, j in ti.ndrange((1, self.nx - 1), (1, self.ny - 1)):\n self.T[i,\n j] = (a_w * self.T[i - 1, j] + a_e * self.T[i + 1, j] +\n a_s * self.T[i, j - 1] + a_n * self.T[i, j + 1]) / a_p", "def measurement_update(self, H, Z, R):\n S = R + H*self.P*H.T\n iS = S**-1\n K = self.P*H.T*iS\n \n self.X = self.X + K*Z\n self.P = self.P - K*H*self.P", "def update(self):\n\n self._pre_calc_mb()", "def update_critics(self, s, a, r, t, s_):\n q1_loss, q2_loss = self.cal_critic_loss(s, a, r, t, s_)\n\n self.critic1_optimiser.zero_grad()\n q1_loss.backward()\n self.critic1_optimiser.step()\n\n self.critic2_optimiser.zero_grad()\n q2_loss.backward()\n self.critic2_optimiser.step()", "def defineUpdateOperations(self):\n self.updated_value = tf.placeholder(shape=[1, self.network.action_size], dtype=tf.float32)\n self.loss = tf.reduce_sum(tf.square(self.updated_value - self.network.policyLayer))\n self.trainer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)\n\n self.updateModel = self.trainer.minimize(self.loss)", "def soft_update_critic(self):\n local_weights = np.array(self.critic_local.model.get_weights())\n target_weights = np.array(self.critic_target.model.get_weights())\n\n assert len(local_weights) == len(\n target_weights), ('Local and target model parameters must have '\n 'the same size')\n\n new_weights = self.tau * local_weights + (1 - self.tau) * target_weights\n self.critic_target.model.set_weights(new_weights)", "def update_weights(self):\n\n\n self.w += self.learn_rate * (self.X.T.dot(self.T - self.Y)\n - self.reg_L1 * np.sign(self.w)\n - self.reg_L2 * 2*self.w)", "def time_update(self, U):\n self.X = self.runge_kutta(self.process_model, self.X, U, self.dt)\n \n J = self.F.subs({'d_t': self.dt, 'v': U[0], '\\Theta': self.X[2]})\n \n self.P = J*self.P*J.T + self.Q", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n diff = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state, action)\n for feature_name, feature_value in self.featExtractor.getFeatures(state, action).iteritems():\n self.weights[feature_name] += self.alpha * diff * feature_value", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n diff = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state, action)\n for feature_name, feature_value in self.featExtractor.getFeatures(state, action).iteritems():\n self.weights[feature_name] += self.alpha * diff * feature_value", "def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)", "def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)", "def SCEC_LOH_3():\n\n #Initialize CrustModel\n model = CrustModel(2)\n\n #Slow layer\n vp=4.000\n vs=2.000\n rho=2.600\n Qa=54.65\n Qb=137.95\n thickness = 1.\n\n model.add_layer(thickness, vp, vs, rho, Qa, Qb)\n\n #Halfspace\n vp=6.000\n vs=3.464\n rho=2.700\n Qa=69.3\n Qb=120.\n thickness = 0 #Infinite thickness!\n model.add_layer(thickness, vp, vs, rho, Qa, Qb)\n\n return model", "def update_params(self, v_0, h_0, v_k, h_k):\n pos = np.dot(np.transpose(v_0), h_0)\n pos_vb = np.sum(v_0, axis=0)\n pos_hb = np.sum(h_0, axis=0)\n neg = np.dot(np.transpose(v_k), h_k)\n neg_vb = np.sum(v_k, axis=0)\n neg_hb = np.sum(h_k, axis=0)\n self.delta_bias_v = self.momentum*self.delta_bias_v + (self.learning_rate/self.batch_size)*(pos_vb - neg_vb)\n self.bias_v += self.delta_bias_v\n self.delta_bias_h = self.momentum*self.delta_bias_h + (self.learning_rate/self.batch_size)*(pos_hb-neg_hb) \n self.bias_h += self.delta_bias_h\n self.delta_weight_vh = self.momentum*self.delta_weight_vh + self.learning_rate*((pos - neg)/self.batch_size - self.decay*self.weight_vh)\n self.weight_vh += self.delta_weight_vh \n return", "def updateSpeed(self,accel,dangle,brake):\n self.dangle += dangle\n self.dangle = self.dangle * self.drag*(1-brake/3)*0.6\n self.angle += self.dangle\n self.accel = accel\n self.vx += accel * np.cos(self.angle)\n self.vy += accel * np.sin(self.angle)\n # flat cap on speed\n if(self.vx > self.maxSpeed): self.vx = self.maxSpeed\n if(self.vy > self.maxSpeed): self.vy = self.maxSpeed\n if(self.vx < -1*self.maxSpeed): self.vx = -1*self.maxSpeed\n if(self.vy < -1*self.maxSpeed): self.vy = -1*self.maxSpeed\n # apply drag and braking to slow down\n self.vx = self.vx * self.drag*(1-brake/3)\n self.vy = self.vy * self.drag*(1-brake/3)", "def update_kinematics(self, z_gl, head_gl, tail_gl, cm_gl, iT_gl):\n self.z_gl = z_gl\n self.cm_gl = cm_gl\n self.r_hc = head_gl - cm_gl\n self.r_ht = head_gl - tail_gl\n self.r_tc = tail_gl - cm_gl\n self.iT_gl = iT_gl\n self.head_gl = head_gl\n self.tail_gl = tail_gl", "def update(self):\n self.brain.update()", "def _update(self, bandit): \n \n bandit_logs = self.logging[bandit]\n bandit = bandit.id\n if not bandit_logs['actions']:\n estimate = 0 # if not taken till now then 0 is assigned\n actions = 0\n else:\n estimate = bandit_logs['reward'] / bandit_logs['actions'] # if not assigned\n actions = bandit_logs['actions']\n self.mu[bandit] = (self.mu_pri[bandit]/self.var_pri[bandit] + actions*estimate/self.var0)/(actions/self.var0 + 1/self.var_pri[bandit])\n self.var[bandit] = 1/(actions/self.var0 + 1/self.var[bandit])", "def update(self):\n if not self.metamodel.surrogate.is_built():\n # Do not adjust until we have a surrogate\n return\n\n surr_rate = 1 - self.metamodel.history.get_model_usage_rate()\n up_bound = self.desired_rate + self.acceptable_offset\n low_bound = self.desired_rate + self.acceptable_offset\n\n if low_bound <= surr_rate <= up_bound:\n # Usage rate is acceptable.\n return\n\n T = self.value\n # Adjust step size if close to border of [0, 1]\n step_size = min(self.step, T/2, (1 - T)/2)\n\n # Check if critical (Needs adjustement fast)\n # !!! This is all very hacky and needs to be improved !!!\n if surr_rate > 1 - (1 - up_bound)/2 or surr_rate < low_bound/2:\n step_size = min(self.step * self.big_step_mult, T/1.5, (1 - T)/1.5)\n\n # Adjust\n if surr_rate > up_bound:\n self.value = max(0, min(1, self.value - step_size))\n elif surr_rate < low_bound:\n self.value = max(0, min(1, self.value + step_size))\n\n return", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n feature_dictionary = self.featExtractor.getFeatures(state, action)\n difference = (reward + self.discount * self.computeValueFromQValues(nextState)) - self.getQValue(state, action)\n\n for feature in feature_dictionary:\n self.weights[feature] += self.alpha * difference * feature_dictionary[feature]\n\n #if self.epsilon > self.epsilon_min:\n # self.epsilon *= self.epsilon_decay", "def update_params(self, learning_rate=0.1):\n\n self.params['W'] = self.params['W'] - learning_rate * self.dW # update weights\n self.params['b'] = self.params['b'] - learning_rate * self.db # update bias(es)", "def update_carried(self, data):\n self.use()\n gpu_data = np.array(data, dtype=np.float32)\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbos[3])\n gl.glBufferData(gl.GL_ARRAY_BUFFER, gpu_data.nbytes, gpu_data, gl.GL_DYNAMIC_DRAW)", "def update_network(self, tr_d, lr, relz=\"\", lmbda=0.0, mu=0.0):\n trlen = float(len(tr_d))\n delta_b = [np.zeros(b.shape) for b in self.biases]\n delta_w = [np.zeros(w.shape) for w in self.weights]\n for x,y in tr_d:\n delta_b_single, delta_w_single = self.backppg_ce(x,y)\n delta_b = [db+dbs for db,dbs in zip(delta_b, delta_b_single)]\n delta_w = [dw+dws for dw,dws in zip(delta_w, delta_w_single)]\n #update the parameters in network\n if(relz==\"\"):\n mu=0.0\n elif(relz[0:2] == \"MO\"):\n relz = relz[2:]\n self.velw = [mu*vw-(lr/trlen)*dw for vw,dw in zip(self.velw, delta_w)]\n self.velb = [mu*vb-(lr/trlen)*db for vb,db in zip(self.velb, delta_b)]\n self.biases = [b + vb for b,vb in zip(self.biases, self.velb)]\n if(relz == \"L2\"):\n self.weights = [w + vw - (lr/trlen/100)*lmbda*w for w,vw in zip(self.weights, self.velw)]\n elif(relz == \"L1\"):\n self.weights = [w + vw - (lr/trlen/100)*lmbda*np.sign(w) for w,vw in zip(self.weights, self.velw)]\n else:\n self.weights = [w + vw for w,vw in zip(self.weights, self.velw)]", "def set_up_and_parameterise_experiment(self):\n # Update experiment using capacity\n capacity = self._parameter_values[\"Nominal cell capacity [A.h]\"]\n for op_conds in self.experiment.operating_conditions_steps:\n if op_conds.type == \"C-rate\":\n op_conds.type = \"current\"\n op_conds.value = op_conds.value * capacity\n\n # Update terminations\n termination = op_conds.termination\n for term in termination:\n term_type = term[\"type\"]\n if term_type == \"C-rate\":\n # Change type to current\n term[\"type\"] = \"current\"\n # Scale C-rate with capacity to obtain current\n term[\"value\"] = term[\"value\"] * capacity\n\n # Add time to the experiment times\n dt = op_conds.duration\n if dt is None:\n if op_conds.type == \"current\":\n # Current control: max simulation time: 3h / C-rate\n Crate = op_conds.value / capacity\n dt = 3 / abs(Crate) * 3600 # seconds\n else:\n # max simulation time: 1 day\n dt = 24 * 3600 # seconds\n op_conds.duration = dt\n\n # Set up model for experiment\n self.set_up_and_parameterise_model_for_experiment()", "def qUpdate(self,state,action,reward,next_state):\r\n #get delta\r\n \r\n #delta = reward + self.gamma * self.Q(next_state,next_action) \\\r\n # - self.Q(state,action)\r\n \r\n #get e update\r\n #self.e = self.gamma *self.lam * self.e - self.grad(state,action)\r\n \r\n \r\n #do update to w\r\n \r\n #self.w = self.alpha * delta * self.e\r\n #get difference between current q and new q\r\n \r\n delta = reward + self.gamma * self.maxQ(next_state)[0] - \\\r\n self.Q(state,action) \r\n #update w\r\n self.w = self.w + self.alpha * delta * self.grad(state,action)", "def updateWeights(self,weightUpdate):\n\t\n\t\tbranches = self.collectAllBranches()\n\n\t\tfor i in range(self.nBranches):\n\n\t\t\tbranches[i].weight -= weightUpdate[i]", "def set_costs(self) -> None:\n self[\"glider cost\"] = (\n self[\"glider base mass\"] * self[\"glider cost slope\"]\n + self[\"glider cost intercept\"]\n )\n self[\"lightweighting cost\"] = (\n self[\"glider base mass\"]\n * self[\"lightweighting\"]\n * self[\"glider lightweighting cost per kg\"]\n )\n self[\"electric powertrain cost\"] = (\n self[\"electric powertrain cost per kW\"] * self[\"electric power\"]\n )\n self[\"combustion powertrain cost\"] = (\n self[\"combustion power\"] * self[\"combustion powertrain cost per kW\"]\n )\n self[\"fuel cell cost\"] = self[\"fuel cell power\"] * self[\"fuel cell cost per kW\"]\n self[\"power battery cost\"] = (\n self[\"battery power\"] * self[\"power battery cost per kW\"]\n )\n self[\"energy battery cost\"] = (\n self[\"energy battery cost per kWh\"] * self[\"electric energy stored\"]\n )\n self[\"fuel tank cost\"] = self[\"fuel tank cost per kg\"] * self[\"fuel mass\"]\n # Per km\n self[\"energy cost\"] = self[\"energy cost per kWh\"] * self[\"TtW energy\"] / 3600\n\n # For battery, need to divide cost of electricity\n # at battery by efficiency of charging\n # to get costs at the \"wall socket\".\n\n _ = lambda x: np.where(x == 0, 1, x)\n self[\"energy cost\"] /= _(self[\"battery charge efficiency\"])\n\n self[\"component replacement cost\"] = (\n self[\"energy battery cost\"] * self[\"battery lifetime replacements\"]\n + self[\"fuel cell cost\"] * self[\"fuel cell lifetime replacements\"]\n )\n\n with open(DATA_DIR / \"purchase_cost_params.yaml\", \"r\") as stream:\n to_markup = yaml.safe_load(stream)[\"markup\"]\n\n self[to_markup] *= self[\"markup factor\"]\n\n # calculate costs per km:\n self[\"lifetime\"] = self[\"lifetime kilometers\"] / self[\"kilometers per year\"]\n\n with open(DATA_DIR / \"purchase_cost_params.yaml\", \"r\") as stream:\n purchase_cost_params = yaml.safe_load(stream)[\"purchase\"]\n\n self[\"purchase cost\"] = self[purchase_cost_params].sum(axis=2)\n # per km\n amortisation_factor = self[\"interest rate\"] + (\n self[\"interest rate\"]\n / (\n (np.array(1) + self[\"interest rate\"]) ** self[\"lifetime kilometers\"]\n - np.array(1)\n )\n )\n self[\"amortised purchase cost\"] = (\n self[\"purchase cost\"] * amortisation_factor / self[\"kilometers per year\"]\n )\n\n # per km\n self[\"maintenance cost\"] = (\n self[\"maintenance cost per glider cost\"]\n * self[\"glider cost\"]\n / self[\"kilometers per year\"]\n )\n\n # simple assumption that component replacement\n # occurs at half of life.\n self[\"amortised component replacement cost\"] = (\n (\n self[\"component replacement cost\"]\n * (\n (np.array(1) - self[\"interest rate\"]) ** self[\"lifetime kilometers\"]\n / 2\n )\n )\n * amortisation_factor\n / self[\"kilometers per year\"]\n )\n\n self[\"total cost per km\"] = (\n self[\"energy cost\"]\n + self[\"amortised purchase cost\"]\n + self[\"maintenance cost\"]\n + self[\"amortised component replacement cost\"]\n )", "def _update_parameters(self, curr_state, reward, next_state):\n phi = self._features.vector(curr_state)\n phi_dash = self._features.vector(next_state)\n\n self._A += np.outer(phi, (phi - self._gamma * phi_dash))\n self._b += reward * phi", "def update(Q, target_Q, opt, samples, gamma=0.99, target_type='double_dqn'):\n xp = Q.xp\n obs = xp.asarray([sample[0] for sample in samples], dtype=np.float32)\n action = xp.asarray([sample[1] for sample in samples], dtype=np.int32)\n reward = xp.asarray([sample[2] for sample in samples], dtype=np.float32)\n done = xp.asarray([sample[3] for sample in samples], dtype=np.float32)\n obs_next = xp.asarray([sample[4] for sample in samples], dtype=np.float32)\n # Predicted values: Q(s,a)\n y = F.select_item(Q(obs), action)\n # Target values: r + gamma * max_b Q(s',b)\n with chainer.no_backprop_mode():\n if target_type == 'dqn':\n next_q = F.max(target_Q(obs_next), axis=1)\n elif target_type == 'double_dqn':\n next_q = F.select_item(target_Q(obs_next),\n F.argmax(Q(obs_next), axis=1))\n else:\n raise ValueError('Unsupported target_type: {}'.format(target_type))\n target = reward + gamma * (1 - done) * next_q\n loss = mean_clipped_loss(y, target)\n Q.cleargrads()\n loss.backward()\n opt.update()", "def update_network_parameters(self, tau=None):\n\n #Is used during the first iteration such that the target networks get the same parameters of the normal networks (hard update)\n if tau is None:\n tau = self.tau\n\n #Update the target_actor weights\n weights = []\n targets = self.target_actor.weights\n for i, weight in enumerate(self.actor.weights):\n weights.append(weight * tau + targets[i]*(1-tau))\n\n self.target_actor.set_weights(weights)\n\n #Update the target_critic_1 weights\n weights = []\n targets = self.target_critic_1.weights\n for i, weight in enumerate(self.critic_1.weights):\n weights.append(weight * tau + targets[i]*(1-tau))\n\n self.target_critic_1.set_weights(weights)\n\n #Update the target_critic_2 weights\n weights = []\n targets = self.target_critic_2.weights\n for i, weight in enumerate(self.critic_2.weights):\n weights.append(weight * tau + targets[i]*(1-tau))\n\n self.target_critic_2.set_weights(weights)", "def update(self, z):\n y = z - np.dot(self.measurement_matrix, self.x)\n\n innovation_covariance = np.dot(\n self.measurement_matrix, np.dot(self.prediction_covariance, self.measurement_matrix.T)\n ) + self.measurement_covariance\n\n optimal_kalman_gain = np.dot(\n np.dot(self.prediction_covariance, self.measurement_matrix.T),\n np.linalg.inv(innovation_covariance)\n )\n\n self.x = self.x + np.dot(optimal_kalman_gain, y)\n eye = np.eye(self.state_size)\n _t1 = eye - np.dot(optimal_kalman_gain, self.measurement_matrix)\n t1 = np.dot(np.dot(_t1, self.prediction_covariance), _t1.T)\n t2 = np.dot(np.dot(optimal_kalman_gain, self.measurement_covariance), optimal_kalman_gain.T)\n self.prediction_covariance = t1 + t2", "def update(self, state_value, current_time):\r\n\t\t\r\n\t\t# Check how to update controller value and calculate error with respect to upper/lower threshold\r\n\t\tif state_value > self.UpperThreshold:\t\t\t# Increase if above upper threshold\r\n\t\t\terror = (state_value - self.UpperThreshold)/self.UpperThreshold\r\n\t\t\tincrement = self.OutputValueIncrement\r\n\t\telif state_value < self.LowerThreshold:\t\t\t# Decrease if below lower threshold\r\n\t\t\terror = (state_value - self.LowerThreshold)/self.LowerThreshold\r\n\t\t\tincrement = -self.OutputValueIncrement\r\n\t\telse:\t\t\t\t\t\t\t\t\t\t\t# Do nothing when within upper and lower thresholds\r\n\t\t\terror = 0\r\n\t\t\tincrement = 0\t\t\t\t\t\t\t\t\r\n\t\t\r\n\t\t# Bound the controller output (between MinValue - MaxValue)\r\n\t\tif self.LastOutputValue+increment > self.MaxValue:\r\n\t\t\tself.OutputValue = self.MaxValue\r\n\t\telif self.LastOutputValue+increment < self.MinValue:\r\n\t\t\tself.OutputValue = self.MinValue\r\n\t\telse:\r\n\t\t\tself.OutputValue = self.LastOutputValue+increment\r\n\t\t\t\r\n\t\t# Record state, error and sample time values\r\n\t\tself.state_history.append(state_value)\r\n\t\tself.error_history.append(error)\r\n\t\tself.output_history.append(self.OutputValue)\r\n\t\tself.sample_times.append(current_time/1000)\t\t# Convert from msec to sec\r\n\t\t\r\n\t\tself.LastOutputValue = self.OutputValue\r\n\t\t\r\n\t\treturn self.OutputValue", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n feature_dictionary = self.featExtractor.getFeatures(state, action)\n difference = (reward + self.discount * self.computeValueFromQValues(nextState)) - self.getQValue(state, action)\n\n for feature in feature_dictionary:\n self.weights[feature] += self.alpha * difference * feature_dictionary[feature]", "def update_nodes(self):\n\n L = self.level\n P = L.prob\n\n # only if the level has been touched before\n assert L.status.unlocked\n\n # get number of collocation nodes for easier access\n M = self.coll.num_nodes\n\n # gather all terms which are known already (e.g. from the previous iteration)\n # this corresponds to u0 + QF(u^k) - QdF(u^k) + tau\n\n # get QF(u^k)\n integral = self.integrate()\n for m in range(M):\n # get -QdF(u^k)_m\n for j in range(1, M + 1):\n integral[m] -= L.dt * self.QI[m + 1, j] * L.f[j]\n\n # add initial value\n integral[m] += L.u[0]\n # add tau if associated\n if L.tau[m] is not None:\n integral[m] += L.tau[m]\n\n # do the sweep\n for m in range(0, M):\n # build rhs, consisting of the known values from above and new values from previous nodes (at k+1)\n rhs = P.dtype_u(integral[m])\n for j in range(1, m + 1):\n rhs += L.dt * self.QI[m + 1, j] * L.f[j]\n\n # implicit solve with prefactor stemming from the diagonal of Qd\n L.u[m + 1] = P.solve_system(\n rhs, L.dt * self.QI[m + 1, m + 1], L.u[m + 1], L.time + L.dt * self.coll.nodes[m]\n )\n # update function values\n L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])\n\n # indicate presence of new values at this level\n L.status.updated = True\n\n return None", "def update_weights(self):\n self._weights = self._weights + self.update_weights_value\n self.weights_clipping()", "def update_tlm(self):", "def update(self, state, action, nextState, reward):\n candidateQ = reward + self.discount * \\\n self.computeValueFromQValues(nextState)\n currentQ = self.getQValue(state, action)\n difference = candidateQ - currentQ\n features = self.featExtractor.getFeatures(state, action)\n for feat in features:\n self.weights[feat] += self.alpha * difference * features[feat]", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n self.qValues[(state, action)] = ((1 - self.alpha) * self.getQValue(state, action)) + self.alpha \\\n * (reward + self.discount * self.computeValueFromQValues(nextState))", "def update(self, state_value, current_time):\r\n\t\t\r\n\t\t# Calculate Error - if SetPoint > 0.0, then normalize error with respect to setpoint\r\n\t\tif self.SetPoint==0.0:\r\n\t\t\terror = state_value - self.SetPoint\r\n\t\telse:\r\n\t\t\terror = (state_value - self.SetPoint)/self.SetPoint \r\n\t\t\r\n\t\t# Bound the controller output (between MinValue - MaxValue)\r\n\t\tif self.ConstantValue > self.MaxValue:\r\n\t\t\tself.OutputValue = self.MaxValue\r\n\t\telif self.ConstantValue < self.MinValue:\r\n\t\t\tself.OutputValue = self.MinValue\r\n\t\telse:\r\n\t\t\tself.OutputValue = self.ConstantValue\r\n\t\t\r\n\t\t# Record state, error and sample time values\r\n\t\tself.state_history.append(state_value)\r\n\t\tself.error_history.append(error)\r\n\t\tself.output_history.append(self.OutputValue)\r\n\t\tself.sample_times.append(current_time/1000)\t\t\t# Convert from msec to sec\r\n\t\t\r\n\t\treturn self.OutputValue", "def update_estimator(self):\n self.optimizer.step()\n self.optimizer.zero_grad()", "def update_estimator(self):\n self.optimizer.step()\n self.optimizer.zero_grad()", "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def update(self, state, action, nextState, reward):\n \"\"\"Description:\n Use Q-Learning algoritm in slide 58 of MDP\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n maxQns = self.getValue(nextState) # get max q-value of next state\n if maxQns == None:\n maxQns = 0\n Qsa = self.getQValue(state, action) #self.qValues[(state, action)]\n difference = reward + self.discountRate * maxQns - Qsa\n self.qValues[(state, action)] += self.alpha * difference\n \n self.vitCount[(state, action)] += 1\n \"\"\" END CODE \"\"\"", "def update(self):\n if not self.metamodel.surrogate.is_built():\n # Do not adjust until we have a surrogate\n return\n\n surr_rate = 1 - self.metamodel.history.get_model_usage_rate()\n surr_rate_err = abs(self.desired_rate - surr_rate)\n\n if surr_rate_err <= self.acceptable_offset:\n # Usage rate is acceptable.\n return\n\n T = self.value\n edge_adjustment = 1 - ((2*T - 1) ** self.alpha)\n err_adjustment = min(self.beta, 1 / ((1 - surr_rate_err) ** self.beta))\n step_size = self.step * edge_adjustment * err_adjustment\n # Adjust\n if surr_rate > self.desired_rate:\n self.value = max(T/self.beta, T - step_size)\n elif surr_rate < self.desired_rate:\n self.value = min(1 - ((1-T)/self.beta), T + step_size)\n\n return", "def update_velocity(self):\n # Set thruster (up/down) movement\n if self.thrusters:\n self.velocity_y -= self.gravity\n else:\n self.velocity_y += self.velocity_slowing\n\n # Set left movement\n if self.moving_left:\n self.velocity_x -= self.gravity\n else:\n if self.velocity_x < 0:\n self.velocity_x += self.velocity_slowing\n \n # Set right movement\n if self.moving_right:\n self.velocity_x += self.gravity\n else:\n if self.velocity_x > 0:\n self.velocity_x -= self.velocity_slowing", "def update_optimizer(self, context, optimizer, host):\n pass", "def update_r(self):\n self.gamma_r = self.gamma_s - self.gamma_q\n self.Sigma_r = self.Sigma_s - self.Sigma_q", "def update_pressure(self):\n m_multipliers = np.ones(self.mesh.get_number_of_cells())\n\n\n rhs_current = np.zeros(self.mfd.get_number_of_dof()) \n rhs_current += self.rhs_mfd\n\n\n for cell_index in range(self.mesh.get_number_of_cells()):\n density = -self.ref_pressure\n density += self.current_pressure[cell_index]\n density *= self.compressibility\n density += 1.\n density *= self.ref_density\n\n # We multiply by the inverse of \\frac{\\rho}{\\mu}\n m_multipliers[cell_index] = self.viscosity/density\n\n c_entry = self.compressibility\n c_entry *= self.porosities[cell_index]\n c_entry /= self.delta_t\n c_entry *= self.mesh.get_cell_volume(cell_index)\n\n rhs_current[self.mesh.get_number_of_faces()+\n cell_index] += c_entry*self.current_pressure[cell_index]\n\n self.lhs_coo.data[self.c_start+cell_index] = c_entry\n\n for [index, cell_index] in enumerate(self.rate_wells):\n rhs_current[self.mesh.get_number_of_faces()+cell_index] += \\\n self.rate_wells_rate[index]\n\n self.mfd.update_m(self.lhs_coo.data[:self.m_x_coo_length], m_multipliers)\n\n solution = dsolve.spsolve(self.lhs_coo.tocsr(), rhs_current)\n self.prev_pressure = self.current_pressure\n self.current_pressure = solution[self.mesh.get_number_of_faces():]\n self.current_velocity = solution[:self.mesh.get_number_of_faces()]", "def update_forces(self):\n\n pass", "def __update(self, learning_rate):\n for layer in self.layers:\n layer.weights.set_value((layer.weights - learning_rate * layer.dW).eval())\n layer.biases.set_value((layer.biases - learning_rate * layer.db).eval())" ]
[ "0.5559533", "0.5498132", "0.54623824", "0.54417586", "0.5429667", "0.54125965", "0.5399066", "0.5371565", "0.53581613", "0.5342093", "0.5301803", "0.5297535", "0.52953154", "0.5255505", "0.52489734", "0.52468264", "0.5231706", "0.5230685", "0.51868993", "0.517444", "0.51712936", "0.5169751", "0.5167011", "0.5163602", "0.51634187", "0.51415837", "0.51191884", "0.51190674", "0.51180226", "0.51130193", "0.51003295", "0.5097932", "0.5094322", "0.50767964", "0.50482005", "0.5043563", "0.5043563", "0.5041045", "0.5039963", "0.50391054", "0.5019186", "0.5010449", "0.5007364", "0.50027126", "0.49989867", "0.49947184", "0.49573418", "0.49573418", "0.4955745", "0.49532646", "0.49529448", "0.4947284", "0.4941408", "0.49383926", "0.4933472", "0.49321645", "0.49313575", "0.49313062", "0.4928732", "0.4928732", "0.49259478", "0.49259478", "0.49215114", "0.4900942", "0.49009004", "0.48960766", "0.48883706", "0.48756582", "0.48732534", "0.48706108", "0.48678276", "0.48612648", "0.4855102", "0.48502976", "0.4847864", "0.4841427", "0.48398486", "0.48353207", "0.48306012", "0.48273137", "0.48172405", "0.48149824", "0.48129103", "0.48112625", "0.48080748", "0.48059148", "0.48001733", "0.4797266", "0.47952706", "0.479409", "0.479409", "0.47929406", "0.4788549", "0.4779953", "0.4773848", "0.47725934", "0.47712308", "0.476928", "0.4762172", "0.47615877" ]
0.6069336
0
Print Hall Account statements for specified hall_ID
def print_statement(Hall): pdf = FPDF('P', 'mm', 'A4') pdf.add_page('P') pdf.set_font('Times', 'B', 14) pdf.multi_cell(0, 5, ('Hall Account Statement for Hall: %s' % Hall.name)) pdf.ln() pdf.multi_cell(0, 5, ('Mess Account: %s' % Hall.mess_account)) pdf.ln() pdf.multi_cell(0, 5, ('Salary Account: %s' % Hall.salary_account)) pdf.ln() pdf.multi_cell(0, 5, ('Repair Account: %s' % Hall.repair_account)) pdf.ln() pdf.multi_cell(0, 5, ('Rent Account: %s' % Hall.rent_account)) pdf.ln() pdf.multi_cell(0, 5, ('Others Account: %s' % Hall.others_account)) pdf.ln() # Write generated output file to PDF pdf.output(('hall_statement_%s.pdf' % Hall.hall_ID), 'F')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_out_account_balances(list_of_all_accounts_known):\n for account in list_of_all_accounts_known:\n print('{0} {1}'.format(account.account_id, account.balance))", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"Challenge Definition ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-challenge type:\", self.challenge_type, sep='')\n\n print(indent, \"|-challenge code ID:\", self.challenge_code_ID, sep='')\n\n print(indent, \"|-associated recipient ID:\", self.recipient_ID, sep='')\n recipient = get_indexed_item_from_list(self.recipient_ID, AutoResilGlobal.recipient_list)\n if recipient != None:\n recipient.printout_all(indent_level+1)\n\n print(indent, \"|-info about cloud virtual impacted resource(s):\", self.impacted_cloud_resources_info, sep='')\n\n if self.impacted_cloud_resource_ID_list != None:\n if len(self.impacted_cloud_resource_ID_list) >0:\n print(indent, \"|-associated cloud virtual impacted resource(s):\", sep='')\n for cloud_resource_ID in self.impacted_cloud_resource_ID_list:\n cloud_resource_item = get_indexed_item_from_list(cloud_resource_ID, AutoResilGlobal.cloud_virtual_resource_list)\n if cloud_resource_item != None:\n cloud_resource_item.printout_all(indent_level+1)\n\n print(indent, \"|-info about physical virtual impacted resource(s):\", self.impacted_phys_resources_info, sep='')\n\n if self.impacted_phys_resource_ID_list != None:\n if len(self.impacted_phys_resource_ID_list) >0:\n print(indent, \"|-associated physical impacted resource(s):\", sep='')\n for phys_resource_ID in self.impacted_phys_resource_ID_list:\n phys_resource_item = get_indexed_item_from_list(phys_resource_ID, AutoResilGlobal.physical_resource_list)\n if phys_resource_item != None:\n phys_resource_item.printout_all(indent_level+1)\n\n print(indent, \"|-CLI command to start challenge:\", self.start_challenge_CLI_command_sent, sep='')\n\n print(indent, \"|-CLI command to stop challenge:\", self.stop_challenge_CLI_command_sent, sep='')\n\n # TODO: self.start_challenge_API_command_sent (depends how API commands are stored: likely a list of strings)\n # TODO: self.stop_challenge_API_command_sent (depends how API commands are stored: likely a list of strings)", "def show_all_accounts(self, account_name=None, account_id=None, search=False,\n print_table=True):\n pt = PrettyTable(['ACCOUNT_NAME', 'ACCOUNT_ID'])\n pt.hrules = 1\n pt.align = 'l'\n list = self.get_all_accounts(account_name=account_name,\n account_id=account_id,\n search=search)\n for account in list:\n pt.add_row([account['account_name'], account['account_id']])\n if print_table:\n self.log.info(\"\\n\" + str(pt) + \"\\n\")\n else:\n return pt", "def show_accounts(conn, userid):\n print('\\n\\nAccount statment for user', (userid))\n with conn.cursor() as curs:\n curs.execute('SELECT id, type, balance FROM accounts WHERE owner_id=%s', (userid,))\n rows = curs.fetchall()\n print('Number of results:', curs.rowcount)\n for row in rows:\n print(row)", "def print_table(ledger):\n\n table = PrettyTable() # defines a PrettyTable object\n\n table.field_names = [\n \"hospital\",\n \"patient\",\n \"status\",\n \"nonce\",\n \"prev_hash\",\n \"a\",\n \"b\",\n \"c\",\n \"current_hash\",\n ] # define field names for table\n\n for block in ledger:\n table.add_row(\n [\n block[\"hospital\"],\n block[\"patient\"],\n block[\"status\"],\n block[\"nonce\"],\n block[\"prev_hash\"],\n block[\"a\"],\n block[\"b\"],\n block[\"c\"],\n block[\"current_hash\"],\n ]\n ) # add data to table\n\n print(\"\\n\\n\" + color.BOLD + \"Printing Your Ledger:\" + color.END)\n print(table) # print prettytable of patient info", "def generate_salary_list(Hall):\n\n pdf = FPDF('P', 'mm', 'A4')\n pdf.add_page('P')\n pdf.set_font('Times', 'B', 14)\n\n pdf.multi_cell(0, 5, ('Hall Salary List: Hall %s' % Hall.hall_ID))\n pdf.ln()\n\n worker_list = dbr.rebuild(\"worker\")\n title = \"Role\"\n wage = 0\n for key in worker_list:\n if worker_list[key].hall_ID == Hall.hall_ID:\n if isinstance(worker_list[key], mess_manager.MessManager):\n title = \"Mess Manager\"\n wage = worker_list[key].monthly_salary\n elif isinstance(worker_list[key], clerk.Clerk):\n title = \"Clerk\"\n wage = worker_list[key].monthly_salary\n elif isinstance(worker_list[key], attendant.Attendant):\n title = \"Attendant\"\n wage = worker_list[key].daily_wage\n\n pdf.multi_cell(0, 5, ('%s: %s (%s) - Rs. %s' % (worker_list[key].worker_ID,\n worker_list[key].name, title, wage)))\n pdf.ln()\n\n # Write generated output file to PDF\n pdf.output(('hall_salary_%s.pdf' % Hall.hall_ID), 'F')", "def print_account(account):\r\n markets_output = \"\"\r\n for market in account.get_market_segments():\r\n markets_output += market.name.strip(\"\\'\") + \", \"\r\n markets_output = markets_output.strip(\"\\'\")\r\n print(f'{account.name} ({markets_output[:-2]}): {account.get_sales_rep()}')", "def display_hall_of_fame(self) -> None:\n print(\"Hall of fame\")\n for env, dico in self.score_dic.items():\n print(\"Environment :\", env)\n for team, score in sorted(dico.items()):\n print(\"team: \", team, \"mean: \", score[0], \"std: \", score[1])", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"Physical Resource ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-info:\", self.info, sep='')\n print(indent, \"|-IP address:\", self.IP_address, sep='')\n print(indent, \"|-MAC address:\", self.MAC_address, sep='')", "def display_accounts_details():\n return Records.display_records()", "def PrintSummary(self, dollarsPerKiloWattHour = 0.1149, dollarsPerDTH = 6.53535):\n\t\tprint()\n\t\tprint(\" RESULTS \")\n\t\tprint()\n\t\tprint(\"The Number of times the furnace turns on: \" + str(self.building_hvac.NumberOfTimesHeatingTurnedOn))\n\t\tprint(\"The Number of times the AC turns on: \" + str(self.building_hvac.NumberOfTimesCoolingTurnedOn))\n\t\tprint(\"The Current Temperature: \" + str(self.current_temperature) + \"C\")\n\t\tprint(\"The total Electrical power used: \" + str(self.building_hvac.GetElectricKilowattHours()) + \"KWH\")\n\t\tprint(\"The total Time: \" + str(self.building_hvac.TotalTimeInSeconds))\n\t\tprint(\"The total Time Heating was on: \" + str(self.building_hvac.TotalDurationHeatingOn))\n\t\tprint(\"The total Time Cooling was on: \" + str(self.building_hvac.TotalDurationCoolingOn))\n\t\tprint(\"The Total Gas Energy Used: \" + str(self.building_hvac.GetGasDTH()) + \" DTH\")\n\t\tprint(\"Electrical Cost: $\" + str(self.CalculateElectricEneregyCost()))\n\t\tprint(\"Gas Cost: $\" + str(self.CalculateGasEneregyCost()))", "def report_printing(cls):\n while True:\n print('Donor Name' + ' ' * 16 + '| Total Given | Num Gifts | Average Gift')\n print('-' * 66)\n print(donor_db.create_report())\n print('Returning to main menu...\\n')\n return", "def list(ctx):\n if ctx.obj.get('NAMESPACE') != 'accounts':\n click.echo(\n click.style('Only account data is available for listing.', fg='red')\n )\n return\n\n swag = create_swag_from_ctx(ctx)\n accounts = swag.get_all()\n _table = [[result['name'], result.get('id')] for result in accounts]\n click.echo(\n tabulate(_table, headers=[\"Account Name\", \"Account Number\"])\n )", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"\\nTest Definition ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-associated test case ID:\", self.test_case_ID, sep='')\n test_case = get_indexed_item_from_list(self.test_case_ID, AutoResilGlobal.test_case_list)\n if test_case != None:\n test_case.printout_all(indent_level+1)\n\n print(indent, \"|-test code ID:\", self.test_code_ID, sep='')\n\n print(indent, \"|-associated challenge def ID:\", self.challenge_def_ID, sep='')\n challenge_def = get_indexed_item_from_list(self.challenge_def_ID, AutoResilGlobal.challenge_definition_list)\n if challenge_def != None:\n challenge_def.printout_all(indent_level+1)\n\n if self.VNF_ID_list != None:\n if len(self.VNF_ID_list) >0:\n print(indent, \"|-associated VNFs:\", sep='')\n for VNF_ID in self.VNF_ID_list:\n VNF_item = get_indexed_item_from_list(VNF_ID, AutoResilGlobal.VNF_Service_list)\n if VNF_item != None:\n VNF_item.printout_all(indent_level+1)\n\n if self.associated_metrics_ID_list != None:\n if len(self.associated_metrics_ID_list) >0:\n print(indent, \"|-associated metrics:\", sep='')\n for Metric_ID in self.associated_metrics_ID_list:\n Metric_item = get_indexed_item_from_list(Metric_ID, AutoResilGlobal.metric_definition_list)\n if Metric_item != None:\n Metric_item.printout_all(indent_level+1)\n\n if self.recipient_ID_list != None:\n if len(self.recipient_ID_list) >0:\n print(indent, \"|-associated recipients:\", sep='')\n for recipient_ID in self.recipient_ID_list:\n recipient_item = get_indexed_item_from_list(recipient_ID, AutoResilGlobal.recipient_list)\n if recipient_item != None:\n recipient_item.printout_all(indent_level+1)\n\n if self.test_CLI_command_sent_list != None:\n if len(self.test_CLI_command_sent_list) >0:\n print(indent, \"|-associated CLI commands:\", sep='')\n for CLI_command in self.test_CLI_command_sent_list:\n print(\" \"*INDENTATION_MULTIPLIER, \"|- \", CLI_command, sep='')\n\n # TODO: self.test_API_command_sent_list (depends how API commands are stored: likely a list of strings)", "def show_all_users(self, account_name=None, account_id=None, path=None, user_name=None,\n user_id=None, search=False, print_table=True ):\n pt = PrettyTable(['ACCOUNT:', 'USERNAME:', 'USER_ID', 'ACCT_ID'])\n pt.hrules = 1\n pt.align = 'l'\n list = self.get_all_users(account_name=account_name, account_id=account_id, path=path,\n user_name=user_name, user_id=user_id, search=search)\n for user in list:\n pt.add_row([user['account_name'], user['user_name'],\n user['user_id'], user['account_id']])\n if print_table:\n self.log.info(\"\\n\" + str(pt) + \"\\n\")\n else:\n return pt", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"Test Case ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-JIRA URL:\", self.JIRA_URL, sep='')", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"Recipient ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-version info:\", self.version_info, sep='')\n print(indent, \"|-IP address:\", self.access_IP_address, sep='')\n print(indent, \"|-URL:\", self.access_URL, sep='')\n print(indent, \"|-username for user/pwd credentials:\", self.username_creds, sep='')\n print(indent, \"|-password for user/pwd credentials:\", self.password_creds, sep='')\n print(indent, \"|-key credentials:\", self.key_creds, sep='')\n print(indent, \"|-info about network:\", self.network_info, sep='')", "def write_out_account_numbers_and_balances(list_of_all_accounts_known):\n with open('./practise_accounts.txt', mode='wt') as accounts_and_balances_to_write_out:\n for accounts in list_of_all_accounts_known:\n accounts_and_balances_to_write_out.writelines('{0} {1}\\n'.format(accounts.account_id, accounts.balance));\n # end of withblock, close open file writing", "def view_bank_account_details(self) -> None:\n Menu.prompt_view_bank_account_details()\n print(\"Bank Account Details:\")\n print(self.user.account)\n\n for tx_num, tx_details in \\\n self.user.tx_manager.transaction_records.items():\n print(f\"\\nTransaction #{tx_num}:\\n\"\n f\"{tx_details}\")\n\n print(f\"\\nSpending Summary:\")\n print(f\" Starting Bank Balance: \"\n f\"{'{:.2f}'.format(self.user.account.starting_balance)}\")\n print(f\" Total Transactions Amount: \"\n f\"{'{:.2f}'.format(self.user.tx_manager.calc_total_spent())}\")\n print(f\" Closing Bank Account Balance: \"\n f\"{'{:.2f}'.format(self.user.account.current_balance)}\")", "def print_private(self):\n print('Account Number : ', self.__Account)\n return \"\"", "def display_employee(self):\n print \"[Name: %s] [Salary: %d]\" % (self.name, self.salary)", "def show_all_habits(self):\n # Clean up the console\n self.clear_console()\n # Prints the name of the application and instructions to the main menu\n self.back_to_menu_info()\n print(\"\"\"\n ALL HABITS REGISTERED\n ________________________________________________\n \"\"\")\n \n # Gets the habits table of the DB\n habits_table = self.analytics.habits_table()\n # IDs of habits without trackings\n ids_without_trackings = self.analytics.ids_without_trackings(\n habits_table,\n # Join of the habits table and the trackings table from the DB\n self.analytics.habits_trackings_table())\n # IDs of habits that have trackings\n ids_with_trackings = self.analytics.ids_with_trackings(habits_table, \n ids_without_trackings)\n if len(ids_with_trackings) != 0:\n # Displays information of tracked habits contained in the habits table in table format\n self.analytics.display_table(\n ('ID', 'HABIT', 'PERIODICITY', 'MOTIVATION', 'DESCRIPTION', 'CREATION DAY'),\n # Gets all the tracked habits\n self.analytics.tracked_habits(\n habits_table,\n # Join of the habits table and the trackings table from the DB\n self.analytics.habits_trackings_table()),\n 'TRACKED HABITS'\n )\n print('')\n\n if len(ids_without_trackings) != 0:\n # Habits without trackings in tabular form\n self.table_untracked_habits(habits_table,\n ids_without_trackings)\n print('')\n # Return to the main menu by selecting the number zero\n self.return_menu()", "def printall():\n print listAll()", "def printPassbook(self) :\n for expense in self.__passbook:\n print(expense.toString())", "def print_aldb_to_log(aldb):\n _LOGGER.info(\"ALDB load status is %s\", aldb.status.name)\n if aldb.status not in [ALDBStatus.LOADED, ALDBStatus.PARTIAL]:\n _LOGGER.warning(\"Device All-Link database not loaded\")\n _LOGGER.warning(\"Use service insteon.load_aldb first\")\n return\n\n _LOGGER.info(\"RecID In Use Mode HWM Group Address Data 1 Data 2 Data 3\")\n _LOGGER.info(\"----- ------ ---- --- ----- -------- ------ ------ ------\")\n for mem_addr in aldb:\n rec = aldb[mem_addr]\n # For now we write this to the log\n # Roadmap is to create a configuration panel\n in_use = \"Y\" if rec.control_flags.is_in_use else \"N\"\n mode = \"C\" if rec.control_flags.is_controller else \"R\"\n hwm = \"Y\" if rec.control_flags.is_high_water_mark else \"N\"\n log_msg = (\n f\" {rec.mem_addr:04x} {in_use:s} {mode:s} {hwm:s} \"\n f\"{rec.group:3d} {rec.address.human:s} {rec.data1:3d} \"\n f\"{rec.data2:3d} {rec.data3:3d}\"\n )\n _LOGGER.info(log_msg)", "def _print_field(h_lines: List[Line]):\n\n print(H_LINE)\n\n for line in h_lines:\n line.print()\n\n print(H_LINE)", "def display_menu(self):\n # Gets the number of habits that exist in the habits table\n number_of_habits = len(self.analytics.habits_table())\n # Gets the number of trackings that exist in the trackings table\n number_of_trackings = len(self.analytics.trackings_table())\n\n print(\n \"\"\"\n ________________________________________\n\n WELCOME TO YOUR HABITSBOX\n ________________________________________\n\n Everything can be achieved\n with perseverance and commitment\n\n ---------- Let's get started -----------\n\n Choose a number\n\n 0. Exit\n ----------------------------------------\n 1. Add a new habit\n ----------------------------------------\n \"\"\")\n\n if number_of_habits >= 1:\n print(\n \"\"\"\n 2. Check a habit off\n 3. Delete a habit\n ----------------------------------------\n \"\"\")\n if (number_of_habits == 1):\n print(\"\"\"\n Analysis -------------------------------\n\n 4. See my habit\n ----------------------------------------\n \"\"\")\n elif (number_of_habits > 1): #or (number_of_trackings >= 0):\n print(\n \"\"\"\n Analysis -------------------------------\n\n 4. See a habit\n 5. See all habits registered\n 6. See habits with same periodicity\n \"\"\")\n if (number_of_trackings > 0):\n print(\"\"\" \n 7. See my longest streak of all habits\n ----------------------------------------\n \"\"\")", "def accounts():", "def print_report(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n datas = {'ids': context.get('active_ids', [])}\n\n res = self.read(cr, uid, ids, context=context)\n res = res and res[0] or {}\n datas.update({'form': res})\n return self.pool['report'].get_action(cr, uid, ids, \n 'l10n_cl_hr_payroll.report_hrsalarybymonth', \n data=datas, context=context)", "def get_all_accounts():\n accounts = Account.query.all()\n print(accounts)\n return \"\"", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"VNF or e2e Service ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-info:\", self.info, sep='')\n print(indent, \"|-IP address:\", self.IP_address, sep='')\n print(indent, \"|-URL:\", self.URL, sep='')\n\n if self.related_phys_rsrc_ID_list != None:\n if len(self.related_phys_rsrc_ID_list) >0:\n print(indent, \"|-related/associated physical resource(s):\", sep='')\n for phys_resource_ID in self.related_phys_rsrc_ID_list:\n phys_resource_item = get_indexed_item_from_list(phys_resource_ID, AutoResilGlobal.physical_resource_list)\n if phys_resource_item != None:\n phys_resource_item.printout_all(indent_level+1)\n\n if self.related_cloud_virt_rsrc_ID_list != None:\n if len(self.related_cloud_virt_rsrc_ID_list) >0:\n print(indent, \"|-related/associated cloud virtual resource(s):\", sep='')\n for cloud_resource_ID in self.related_cloud_virt_rsrc_ID_list:\n cloud_resource_item = get_indexed_item_from_list(cloud_resource_ID, AutoResilGlobal.cloud_virtual_resource_list)\n if cloud_resource_item != None:\n cloud_resource_item.printout_all(indent_level+1)", "def show_user_account_home():\n\n user = User.query.filter_by(user_id=int(session['user_id'])).one()\n print user\n\n return render_template(\"base.html\")\n # return render_template(\"user_account.html\", user_id=user.user_id, name=user.first_name)\n #, user_id=user.user_id, email=email, name=first_name)", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"Cloud Virtual Resource ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-info:\", self.info, sep='')\n print(indent, \"|-IP address:\", self.IP_address, sep='')\n print(indent, \"|-URL:\", self.URL, sep='')\n\n if self.related_phys_rsrc_ID_list != None:\n if len(self.related_phys_rsrc_ID_list) >0:\n print(indent, \"|-related/associated physical resource(s):\", sep='')\n for phys_resource_ID in self.related_phys_rsrc_ID_list:\n phys_resource_item = get_indexed_item_from_list(phys_resource_ID, AutoResilGlobal.physical_resource_list)\n if phys_resource_item != None:\n phys_resource_item.printout_all(indent_level+1)", "def display(self):\n print(f'{self.first_name} {self.last_name}, Customer#: '\n f'{self.customer_id}\\n{self.address}\\n{self.phone_number}\\n'\n f'{self.create_invoice()}')", "def users(accountable, query):\n users = accountable.users(query)\n headers = ['display_name', 'key']\n if users:\n rows = [[v for k, v in sorted(u.items()) if k in headers]\n for u in users]\n rows.insert(0, headers)\n print_table(SingleTable(rows))\n else:\n click.secho('No users found for query {}'.format(\n query\n ), fg='red')", "def _get_account_info(self, user_id, action=''):\n if User.check_existing_user(user_id):\n user = User(user_id)\n accounts_info = dict(user.accounts)\n transaction_logs = ''\n for v in accounts_info.values():\n if action == '5':\n transaction_logs += v['transaction_log']\n del v['transaction_log']\n self.session.output(accounts_info, transaction_logs)\n return True\n else:\n self.session.output({'invalid_user': 'please enter valid user ID!\\n'}, '[ Fail to see user info ]')\n return False", "def account_summary(self):\n pass", "def print_taboo_spaces(warehouse_id):\n problem_file = \"./warehouses/warehouse_{:02d}.txt\".format(warehouse_id)\n wh = Warehouse()\n wh.load_warehouse(problem_file)\n print(wh)\n print(\"TABOO CELLS: \")\n taboo = taboo_cells(wh)\n print(taboo)", "def print_header(self):\n print()\n print(\"=\"*25)\n print()\n print(\"Have fun in your blackjack round!\")\n print()\n print(\"=\"*25)", "def simple_banking_management_functional():\n create_user('private', **USERS['Andreas'])\n create_user('company', **USERS['carrot_inc'])\n\n result = search_private_user('Andreas', 'Gustafsson')\n result_2 = search_company_user('carrot')\n\n register_account('savings', USERS['Andreas']['id_nr'])\n register_account('salary', USERS['Andreas']['id_nr'])\n\n deposit('savings', 100, USERS['Andreas']['id_nr'])\n deposit('salary', 20, USERS['Andreas']['id_nr'])\n\n withdraw('savings', 50, USERS['Andreas']['id_nr'])\n withdraw('salary', 30, USERS['Andreas']['id_nr'])\n\n print(BANK[USERS['Andreas']['id_nr']])", "def warehouse_print(warehouse_list):\n for warehouse_item in warehouse_list:\n warehouse_item_print(warehouse_item)", "def display_accounts_details():\n return Credentials.display_credentials()", "def print_db():\r\n try:\r\n conn = sqlite3.connect('account.db')\r\n c = conn.cursor()\r\n for row in c.execute(\"SELECT * FROM accounts\"):\r\n print(row)\r\n except sqlite3.DatabaseError:\r\n print(\"Error. Could not retrieve data.\")\r\n finally:\r\n if c is not None:\r\n c.close()\r\n if conn is not None:\r\n conn.close()", "def print_report(d):\n report_data = d.get_report_data()\n title = \"{:24} | {:12} | {:10} | {:20}\"\n dashes=67*('-');print(dashes)\n print(title.format('Donor Name','Total Given','Num Gifts','Average Gift'))\n strf_format = \"{:24} ${:12.2f} {:^10d} ${:12.2f}\"\n print(dashes)\n for donor in report_data:\n print(strf_format.format(*donor))\n print(dashes)", "def print_individuals(self):\n pt = PrettyTable()\n pt.field_names = ['ID', 'Name', 'Gender', 'Birthday', 'Age', 'Alive', 'Death', 'Child', 'Spouse']\n for i in self.individuals.values():\n pt.add_row(i.get_values())\n print(pt)", "def print_hand(self):\n if self.cheating:\n print(\"You're cheating!\")\n print(\"until you reroll it!\")\n print(\"\"\"\nYou rolled:\na = [ {} ]\nb = [ {} ]\n\nYou are in Stage {}\n \"\"\".format(self.die_a, self.die_b, self.stage))", "def print_families(self):\n pt = PrettyTable()\n pt.field_names = ['ID', 'Married', 'Divorced', 'Husband ID', 'Husband Name', 'Wife ID', 'Wife Name', 'Children']\n for f in self.families.values():\n pt.add_row(f.get_values())\n print(pt)", "def see_habit(self):\n # Clean up the console\n self.clear_console()\n # Prints the name of the application and instructions to the main menu\n self.back_to_menu_info()\n print(\"\"\"\n SEE A HABIT\n ________________________________________________\n \"\"\")\n # Gets the habits table of the DB\n habits_info = self.analytics.habits_table()\n # List of the names and ids of the registered habits in table format\n self.table_registered_habits()\n # Union of the habits table and the trackings table from the DB\n habits_trackings = self.analytics.habits_trackings_table()\n # A list of the trackings that have been recorded in the trackings table of the DB\n trackings = self.analytics.trackings_table()\n # A list with all habit identifiers in the habits table\n ids_habits_table = self.analytics.get_all_ids(habits_info)\n # A list with all habit identifiers in the trackings table\n ids_trackings_table = self.analytics.get_all_ids(trackings)\n\n while True:\n print('')\n id_n = pyip.inputNum(\"\"\"\n Write the ID of the habit you want to check :\n \"\"\")\n if id_n == 0:\n # back to the main menu\n self.run()\n elif id_n in ids_habits_table:\n if id_n in ids_trackings_table:\n # Select all rows belonging to the given habit id from the join of\n # habits table and trackings table\n one_habit_trackings_info = self.analytics.select_rows(\n habits_trackings, 0, id_n)\n # Gets the periodicity of the selected habit\n periodicity = one_habit_trackings_info[0][2]\n if len(one_habit_trackings_info) >= 1:\n # Clean up the console\n self.clear_console()\n\n print(\n \"\"\"\n ___________________________________\n - {} -\n ___________________________________\n Motivation: {}\n Description: {}\n Periodicity: {}\n -----------------------------------\n\n First tracking: {}\n \"\"\".format(one_habit_trackings_info[0][1],\n one_habit_trackings_info[0][3],\n one_habit_trackings_info[0][4],\n periodicity,\n # Gives the date when the first tracking was recorded\n self.analytics.start_habit(one_habit_trackings_info))\n )\n\n if len(one_habit_trackings_info) > 1:\n # A dictionary whose keys are the parts of the day in which\n # the habit was checked and whose values indicate the frecuency\n active_time_dictionary = self.analytics.active_time_dict(\n one_habit_trackings_info)\n # The highest value from the active time dictionary\n max_value_active_time = self.analytics.max_value(\n active_time_dictionary)\n # Most frequently part(s) of the day when the habit is checked off.\n most_active_time = self.analytics.most_active_time(\n active_time_dictionary,\n max_value_active_time)\n\n print(\n \"\"\"\n Last day of activity: {}\n\n You are more active during:\n {}\n \"\"\".format(\n # The date of the last tracking\n self.analytics.last_day(one_habit_trackings_info),\n # Parts of the day separated by commas\n self.analytics.display_elements(most_active_time, ', ')\n )\n )\n if periodicity == 'daily':\n print(\n \"\"\"\n Longest streak: {}\n Days of activity: {}\n \"\"\".format(\n # the longest streak of a daily habit\n self.analytics.longest_streak_periodicity(\n one_habit_trackings_info,\n 'daily'),\n # Number of days in which the habit has been checked off\n self.analytics.activity(\n 'daily',\n one_habit_trackings_info))\n )\n elif periodicity == 'weekly':\n print(\n \"\"\"\n Longest streak: {}\n Weeks of activity: {}\n \"\"\".format(\n # the longest streak of a weekly habit\n self.analytics.longest_streak_periodicity(\n one_habit_trackings_info,\n 'weekly'),\n # Number of weeks in which the habit has been checked off\n self.analytics.activity(\n 'weekly',\n one_habit_trackings_info))\n )\n\n if len(habits_info) > 1:\n # Return to the main menu or see another habit\n self.choice_stay_return('See another habit', self.see_habit)\n else:\n # Return to the main menu by selecting the number zero\n self.return_menu()\n\n else:\n # Select the row in the habits table that corresponds to the selected id\n one_habit_info = self.analytics.select_rows(habits_info, 0, id_n)\n # Clean up the console\n self.clear_console()\n print(\n \"\"\"\n ___________________________________\n - {} -\n ___________________________________\n\n You do not have any trackings yet\n Start today with * {} *\n and check it off!\n ___________________________________\n Motivation: {}\n Description: {}\n Periodicity: {}\n\n Creation day: {}\n ___________________________________\n \"\"\".format(one_habit_info[0][1],\n one_habit_info[0][1],\n one_habit_info[0][3],\n one_habit_info[0][4],\n one_habit_info[0][2],\n one_habit_info[0][-1])\n )\n if len(habits_info) > 1:\n # Return to the main menu or see another habit\n self.choice_stay_return('See another habit', self.see_habit)\n else:\n # Return to the main menu by selecting the number zero\n self.return_menu()", "def print_info(name, salary=3500):\n print('Name:', name)\n print('Salary:', salary)\n return", "def list(self, print_fn=None):\n if not print_fn:\n print_fn = print\n for name, count in zip(self.names, self.hashesperid):\n if name:\n print_fn(name + \" (\" + str(count) + \" hashes)\")", "def list_accounts():\n\n try:\n accounts = Account.query.all()\n except NoResultFound:\n print(f\"No account configured yet.\")\n return\n n_len = max([len(a.nickname) for a in accounts if a.nickname != 'no.name'])\n fmt = \"{nickname:\" + str(n_len) + \"s}: {email:s}\"\n #import pdb; pdb.set_trace()\n for acct in [acct for acct in accounts if acct.nickname != 'no.name']:\n print(fmt.format(nickname=acct.nickname, email=acct.email))\n return", "def print_user_host(host_list, user, ul):\n \n print('\\nHost-group info pertaining to {0}:'.format(user))\n print('-'.center(80, '-'))\n if len(ul) == 0:\n print('User is not a member of any user-lists.')\n else:\n print('User belongs to the following user-lists:')\n for u in ul:\n print(u)\n print()\n print('User has access to the following host-groups:')\n for hg in host_list:\n print(hg)\n print('-'.center(80, '-') + '\\n')\n return", "def print_business(business_object):\n # OLD ----------\n # print('Business name: ' + business_object['name'])\n # print('Address: ' + business_object['address'])\n # print('City: ' + business_object['city'])\n # print('State: ' + business_object['state'])\n # print('Average Ratings: ' + str(business_object['stars']) +\n # ' Review Count: ' + str(business_object['review_count']))\n # print('categories: ' + str(business_object['categories']))\n\n print(business_object['name'])\n print(f'Address: {business_object[\"address\"]}, '\n f'{business_object[\"city\"]}, {business_object[\"state\"]}')\n print('#############################')", "def printthankyou(donorname):\n print(THANK_YOU_LETTER.format(name=donorname, amount=donor_db[donorname][-1]))", "def table_registered_habits(self, title='YOUR HABIT(S)'):\n self.analytics.display_table(\n ('ID', 'HABIT'),\n list(self.analytics.select_columns(\n self.analytics.habits_table(),\n stop=2)),\n title)", "def print_all(self):\n print(\n \"\"\"\\nContents of hash table, with blank lines separating distinct\n linked lists:\"\"\".replace(' ', ''))\n\n for linked_list in self.main_array:\n print(linked_list)\n print('')", "def printreport():\n report = createreport()\n print(report[0])\n print(report[1])\n print(report[2])", "def print_state(self):\n print(self.identifier, \n self.gender, \n self.age,\n self.sexual_activity,\n self.disease_status,\n self.time_since_infection,\n self.number_of_partners,\n self.current_partners)", "def print_summary(self):\n #exec(\"print(storyline.{}_clause+', '+storyline.{}_clause.lower()+', '+storyline.{}_clause.lower())\".format(\"A\", \"B\", \"C\"))\n #exec(\"print(self.{}_clause+', '+self.{}_clause.lower()+', '+self.{}_clause.lower())\".format(\"A\", \"B\", \"C\"))\n lwr = \".lower()\"\n exec(\"print(\"+str(3*(\"self.{}_clause{}+',', \")).format(\"A\",\"\",\"B\",lwr,\"C\",lwr)+\"'\\b\\b')\")", "def power(account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if len(account) == 0:\n if \"default_account\" in mph.config:\n account = [mph.config[\"default_account\"]]\n for name in account:\n a = Account(name, morphene_instance=stm)\n print(\"\\n@%s\" % a.name)\n a.print_info(use_table=True)", "def print_report(report_data):\n\n header = '\\nPROPERTY SUMMARY FOR \"{}\"\\n'.format(report_data['property_name'])\n print('* ' * (len(header) // 2))\n print(header)\n\n print('Property Type:'.ljust(25), report_data['property_type'])\n print('Number of Bedrooms:'.ljust(25), report_data['rooms'])\n print('Number of Bathrooms:'.ljust(25), report_data['bathrooms'])\n\n not_found = ['n/a'] # Print this if nothing found for category\n\n print('\\nAMENITIES:')\n\n for amenity in report_data['general_amenities']:\n print(' * ', amenity)\n\n print('\\nFAMILY AMENITIES:')\n\n for amenity in report_data['family_amenities'] or not_found:\n print(' * ', amenity)\n\n print('\\nSAFETY FEATURES:')\n\n for amenity in report_data['safety_feats'] or not_found:\n print(' * ', amenity)\n\n print('\\n')\n\n return", "def run():\n table = hr.get_hr_table_from_file()\n title_list = [\"ID\", \"Name\", \"BirthYear\"]\n options = [\"View records\",\n \"Add record\",\n \"Remove record\",\n \"Update record\",\n \"Which person is the oldest?\",\n \"Which person is the closet to average age?\"]\n\n\n choice = None\n while choice != \"0\":\n choice = terminal_view.get_choice_inner_menu(options, \"HR manager\")\n if choice == \"1\":\n terminal_view.print_table(table, title_list)\n elif choice == \"2\":\n record = terminal_view.get_inputs(title_list[1::],\"Please provide new item data\")\n table = hr.add(table, record)\n elif choice == \"3\":\n id_to_delete_table = terminal_view.get_inputs([\"ID\"],\"Item to delete\")\n id_to_delete = id_to_delete_table[0]\n table = hr.remove(table, id_to_delete)\n elif choice == \"4\":\n records = terminal_view.get_inputs(title_list,\"Edit item\")\n record_id = records[0]\n table = hr.update(table, record_id, records)\n elif choice == \"5\":\n oldest_person = hr.get_oldest_person(table)\n terminal_view.print_result(oldest_person, \"The oldest person: \")\n elif choice == \"6\":\n closest_to_average = hr.get_persons_closest_to_average(table)\n terminal_view.print_result(closest_to_average,\"The closest to average is: \")\n elif choice != \"0\":\n terminal_view.print_error_message(\"There is no such choice.\")", "def accounts():\n pass", "def print_statistics(self):\n print 'Ran %s iterations in %0.3f seconds\\n' % (\n self.iterations, self.elapsed_time)\n\n print 'Overall Equity'\n for index in range(len(self.holdem_ranges)):\n range_short_form = '%r' % self.holdem_ranges[index]\n print 'P%s) %-15s %0.3f' % (\n index,\n range_short_form,\n float(self.win_stats.get(index, 0))/self.iterations)\n print '\\n'\n print 'Hand distribution for each player'\n for stats in self.player_stats:\n stats.print_report()", "def rrd_out(db):\n stats = basic_stats(db)\n print(\"rp:%d l:%d u:%d\" % (stats['rps'], stats['logins'], stats['users']))", "def display_profile(self):\n statement = f\"\"\"\n ------\n {self.name.upper()}\n ------\n Fee: {self.fee} -/Rs.\n Rating: {self.rating} STARS\n Qualification: {self.qualification}\n Speciality: {self.speciality}\n Language: {self.language}\n Working Hours: {self.working_hrs}\n Contact: {self.contact}\n Location: {self.location}\n \"\"\"\n print(statement)", "def print_individual(individual : Dict[str, str], keys: List[str], individualsDict):\n ind_str = \"\"\n for index, key in enumerate(keys):\n if index != 0:\n ind_str += \", \"\n\n if key == 'name':\n\n #US47 twins special symbol\n twins = {}\n for id, i in individualsDict.items():\n family = i[\"child\"]\n birthday = i[\"birthday\"]\n\n if family+birthday in twins:\n twins[family+birthday] = twins[family+birthday].append(i['id'])\n else:\n twins[family+birthday] = [i['id']]\n\n flatList = []\n for twin_lists in twins.values():\n if len(twin_lists) > 1:\n flatList = flatList + twin_lists\n\n # US44: underline if dead\n if not individual[\"alive\"]:\n ind_str += \"\\u001b[4m\"\n # blue for boy, red for girl\n ind_str += \"\\033[1;34;40m\" if individual[\"gender\"] == \"M\" else \"\\033[1;35;40m\"\n ind_str += f\"name = {individual['name']}\\033[0;37;40m\" # reset color\n ind_str += \"\\u001b[0m\" # reset text decoration\n \n if individual['id'] in flatList:\n ind_str += u'\\1071'\n else:\n ind_str += f\"{key} = {individual[key]}\"\n\n if key == 'birthday':\n ind_str += format_date(individual['birthday'])\n\n print(ind_str)", "def print_hangman(num_of_tries):\n print(HANGMAN_PHOTOS[str(num_of_tries)])", "def main():\n while True:\n employee_id = get_employee_input_int('TEST DATA: Enter employee ID to look up for the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} has a grade = {}, Hence gets {} per hours\\n'\n .format(employee.full_name, employee.grade, payscale.salary))\n HR_Options(employee, payscale)\n break", "def display_menu():\n print()\n print(\"Commands:\")\n print(\" quit - Quit\")\n print(\" new - Create new account\")\n print(\" display - Display account information\")\n print(\" deposit - Desposit money\")\n print(\" check - Write a check\")", "def test_hkd(self):\n self.assertTrue('cash_accounts' in self.port_values)\n\n cash_accounts = self.port_values['cash_accounts']\n self.assertEqual(len(cash_accounts), 4) # read in 4 sheets\n\n cash_account = self.extract_cash_account(cash_accounts, 'HKD')\n self.assertNotEqual(cash_account, {})\n \n self.assertEqual(cash_account['account_num'], '012-875-0-053124-1')\n self.assertEqual(cash_account['account_type'], 'Current Account')\n self.assertEqual(cash_account['bank'], 'Bank of China (Hong Kong) Ltd')\n self.assertEqual(cash_account['date'], datetime.datetime(2015,12,10))\n self.assertAlmostEqual(cash_account['balance'], 6536572.95)\n self.assertEqual(cash_account['fx_rate'], 1.0)\n self.assertAlmostEqual(cash_account['local_currency_equivalent'], 6536572.95)", "def print_statements(self):\n self.print_statements_when_needed(self.statements_insert,\n 'Insert statements for')\n self.print_statements_when_needed(self.statements_update,\n 'Update statements for')\n self.print_statements_when_needed(self.statements_delete,\n 'Delete statements for')", "def print_aldb(service):\n # For now this sends logs to the log file.\n # Future direction is to create an INSTEON control panel.\n entity_id = service.data[CONF_ENTITY_ID]\n signal = f\"{entity_id}_{SIGNAL_PRINT_ALDB}\"\n dispatcher_send(hass, signal)", "def list_viewer(listt):\n\tif len(listt) == 0:\n\t\tprint(\"There are no elements\")\n\t\tprint()\n\telse:\n\t\ti = 0\n\t\tfor dictionary in listt:\n\t\t\ti += 1\n\t\t\tprint(f\"Account #{i} »»\")\n\t\t\tprint(\n\t\t\t\t\"\\tService Name: \", dictionary[\"service\"], \"\\n\",\n\t\t\t\t\"\\tUser Name: \", dictionary[\"user\"], \"\\n\",\n\t\t\t\t\"\\tPassword: \", dictionary[\"password\"], \"\\n\",\n\t\t\t\t)", "def display_imported_players(players_id_list):\r\n for player_id in players_id_list:\r\n print(players_table.get(doc_id=player_id))", "def print_output(blast_results):\n output = ''\n header = ['query ID', 'query length', 'subject ID', 'percent identity',\n 'query coverage']\n line = '\\t'.join(header) + '\\n'\n output += line\n for query in blast_results:\n length = str(query_length(query))\n coverage = query_coverage(query)\n coverage = '{:.2}'.format(coverage)\n identity = '{:.2f}'.format(float(query['% identity']))\n line = '\\t'.join([query['query acc.ver'], length, \n query['subject acc.ver'], identity,\n coverage])\n line += '\\n'\n output += line\n print(output)", "def print_report():\n width = 68\n print(\"-\" * width)\n header = (\"Donor Name\", \"Total Given\", \"Num Gifts\", \"Average Gift\")\n print(\"{:20} | {:15} | {:10} | {:12}\".format(*header))\n print(\"-\" * width)\n for index, donor in enumerate(donors_data):\n name = donor[\"name\"]\n total = sum(donor[\"donations\"])\n num_gift = len(donor[\"donations\"])\n average = total/num_gift\n print(\"{:22} ${:12,.2f} {:12d} ${:12,.2f}\".format(name, total, num_gift, average ))\n print(\"-\" * width)", "def print_header(banner_name):\n print()\n print()\n print(\"----------------------------------------------------\")\n print(\" {0}\".format(banner_name))\n print(\"-----------------------------------------------------\")\n print()", "def displayHands(p_hand, d_hand):\n os.system('clear') # Call to OS clear the screen to clean up output\n print(\"\\nPlayer hand: \", p_hand.showHand())\n print(\"Player score: \", p_hand.handSum())\n\n print(\"\\nDealer hand: \", d_hand.showHand())\n print(\"Dealer score: \", d_hand.handSum())", "def get_holdings(self,account=None, verbose=False):\n \n # Imply account\n if account == None:\n account = self.params['account']\n account = int(account)\n \n # Assemble URL\n url = self.endpoints['base'] +\\\n 'accounts/' +\\\n str(account) +\\\n '/holdings.json'\n \n # Create auth\n session = requests.Session()\n auth = self.create_auth()\n req = requests.Request('GET',url,auth=auth).prepare()\n \n # Send Request\n self.holdings = session.send(req).json()\\\n ['response']['accountholdings']\n \n # Get accounts (necessary?)\n if self.accounts == []:\n self.get_accounts()\n \n return self.holdings", "def print_hands(self):\n # Clear the terminal and reprint round header\n os.system(\"clear\")\n self.print_header\n\n # Only display one of the dealers cards if they are still playing\n if not self.round_winner:\n print()\n print(\"Dealer's Cards\")\n print(\"=\" * 25)\n print(\"UNKNOWN\")\n for card in self.dealer.cards:\n if card != self.dealer.cards[0]:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\"*25)\n print(\"TOTAL = ?\")\n print()\n\n print(\"Player's Cards\")\n print(\"=\" * 25)\n for card in self.player.cards:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\" * 25)\n print(\"TOTAL = \" + str(self.player.sum_cards()))\n print()\n\n # Display the players cards and all of the dealers cards\n elif self.round_winner:\n print()\n print(\"Dealer's Cards\")\n print(\"=\" * 25)\n for card in self.dealer.cards:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\" * 25)\n print(\"TOTAL = \" + str(self.dealer.sum_cards()))\n print()\n\n print(\"Player's Cards\")\n print(\"=\" * 25)\n for card in self.player.cards:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\" * 25)\n print(\"TOTAL = \" + str(self.player.sum_cards()))\n print()\n pass", "def odu100_get_aclconfigtable(host_id):\n try:\n global sqlalche_obj\n sqlalche_obj.sql_alchemy_db_connection_open()\n odu100_profile_id = []\n odu100_acl_conf_table = []\n odu100_profile_id = sqlalche_obj.session.query(\n Hosts.config_profile_id).filter(Hosts.host_id == host_id).one()\n if odu100_profile_id == None or odu100_profile_id == []:\n return {\"success\": 1, \"result\": \"\", \"detail\": \"\"}\n else:\n odu100_acl_conf_table = sqlalche_obj.session.query(Odu100RaAclConfigTable.aclIndex, Odu100RaAclConfigTable.macaddress, Odu100RaAclConfigTable.odu100_raAclConfigTable_id, Odu100RaAclConfigTable.raIndex).filter(\n Odu100RaAclConfigTable.config_profile_id == odu100_profile_id[0]).order_by(Odu100RaAclConfigTable.aclIndex).all()\n sqlalche_obj.sql_alchemy_db_connection_close()\n if odu100_acl_conf_table == None or odu100_acl_conf_table == [] or len(odu100_acl_conf_table) == 0:\n return {\"success\": 1, \"result\": [], \"detail\": \"\"}\n else:\n return {\"success\": 0, \"result\": odu100_acl_conf_table, \"detail\": \"\"}\n\n except ProgrammingError as e:\n return {\"success\": 1, \"result\": \"Some Programming Error Occurs\", \"detail\": \"\"}\n except AttributeError as e:\n return {\"success\": 1, \"result\": \"Some Attribute Error Occurs\", \"detail\": \"\"}\n except OperationalError as e:\n return {\"success\": 1, \"result\": \"Some Operational Error Occurs\", \"detail\": \"\"}\n except TimeoutError as e:\n return {\"success\": 1, \"result\": \"Timeout Error Occurs\", \"detail\": \"\"}\n except NameError as e:\n return {\"success\": 1, \"result\": \"Some Name Error Occurs\", \"detail\": \"\"}\n except UnboundExecutionError as e:\n return {\"success\": 1, \"result\": \"Unbound Execution Error Occurs\", \"detail\": \"\"}\n except DatabaseError as e:\n return {\"success\": 1, \"result\": \"Database Error Occurs,Contact Your Administrator\", \"detail\": \"\"}\n except DisconnectionError as e:\n return {\"success\": 1, \"result\": \"Database Disconnected\", \"detail\": \"\"}\n except NoResultFound as e:\n return {\"success\": 1, \"result\": \"No result Found For this opeartion\", \"detail\": \"\"}\n except UnmappedInstanceError as e:\n return {\"success\": 1, \"result\": \"Some Unmapped instance error\", \"detail\": \"\"}\n except NoReferenceError as e:\n return {\"success\": 1, \"result\": \"No reference Exists\", \"detail\": \"\"}\n except SAWarning as e:\n return {\"success\": 1, \"result\": \"Warning Occurs\", \"detail\": \"\"}\n except Exception as e:\n return {\"success\": 1, \"result\": \"Operation Failed,contact Administrator\", \"detail\": \"\"}\n\n finally:\n sqlalche_obj.sql_alchemy_db_connection_close()", "def create_report(self):\n # Base setup\n line_out = ''\n line_out += \"{:<15} | {:^15} | {:^30}\\n\".format(\"Name\", \"Donations\", \"Email\")\n line_out += (\"-\"*65)\n print(line_out)\n\n # Setup line format to recieve ordered donor info \n for name in self.all_donors:\n line = \"{:<15} | {:^15} | {:^30}\".format(name, self.r.hget(name, 'donations'), self.r.hget(name, 'email'))\n print(line)", "def print_user(self, user):\n status = \"active\"\n token = user.token\n\n if token in [\"finished\", \"revoked\"]:\n status = token\n\n if token is None:\n token = \"\"\n\n subid = \"%s\\t%s[%s]\" % (user.id, token, status)\n print(subid)\n return subid", "def print_tree(account, level=0):\r\n \"\"\" In the example output below, \"GE\" is the root account, \"Jet Engines\"\r\n and \"Appliances\" are first-degree ChildAccounts, and \"DoD Contracts\"\r\n and \"Washing Machines\" are second-degree ChildAccounts.\r\n\r\n > print_tree(general_electric)\r\n GE (Manufacturing, R&D): Daniel Testperson\r\n Jet Engines (Manufacturing, R&D, Aerospace): Daniel Testperson\r\n DoD Contracts (Defense, R&D, Aerospace): William Testperson\r\n Appliances (Manufacturing, Consumer Goods): Janet Testperson\r\n Washing Machines (Consumer Goods): Janet Testperson\r\n \"\"\"\r\n markets_output = \"\"\r\n # work a little magic to properly format the names of the market segments\r\n # specifically strip off the leading and trailing quotes and add a\r\n # separating comma\r\n for market in account.get_market_segments():\r\n markets_output += market.name.strip(\"\\'\") + \", \"\r\n markets_output = markets_output.strip(\"\\'\")\r\n\r\n # print a row to console\r\n print(\"{arrow}> {ac_name} ({markets}): {rep}\"\r\n .format(arrow=2*level*\"-\",\r\n ac_name=account.name,\r\n markets=markets_output[:-2],\r\n rep=account.get_sales_rep()))\r\n\r\n # recursively call print on the children (if any) Base Case: no children\r\n for child in account.get_children():\r\n print_tree(child, level=level+1)", "def CombatRoll(self):\t\t\n\t\tprint(self.name.Title() + \"CombatRoll\")", "def START(db, *args):\r\n \r\n import argparse\r\n AP = argparse.ArgumentParser('ffxi-tools ' + MODULE_NAME,\r\n description='Lists accounts')\r\n \r\n args = AP.parse_args(args=args)\r\n\r\n logins = db.get('select id, login from accounts order by login', ('accid', 'login'));\r\n\r\n if logins:\r\n print 'accid login'\r\n print '----- -----'\r\n for login in logins:\r\n print '%-16s %s' % (login['accid'], login['login'])\r\n else:\r\n print 'No accounts found'", "def full_table():\n #oen the the file\n list_of_current_account_objects = []\n opened_file = open('customers.txt')\n opened_file.readline()\n for line in opened_file: #get a list of all the customers accounts as objects\n line_array = line.split(\",\")\n customer = Account((line_array[0]+\" \"+line_array[1]),line_array[2],line_array[4])\n list_of_current_account_objects.append(customer)\n #update the savings & current variables for all accounts.\n for i in list_of_current_account_objects:\n i.set_sav_bal(account_bal(i,\"savings\"))\n i.set_cur_bal(account_bal(i,\"current\"))\n\n #print the answer\n print(\"customer customer account number-avings balance-current balance\")\n for i in list_of_current_account_objects:\n print(i.get_name()+\"---\"+i.get_acc_num()+\"---\"+str(i.get_sav_bal())+\"---\"+str(i.get_cur_bal()))\n print()", "def simple_banking_management_oop():\n bank = Bank('My_bank') # Initiate bank\n\n # Create users, choose between private and company, return user directly if needed\n ricky = bank.register_user('private', 'Ricky', 'Wysocki', 222222)\n bank.register_user('company', 'E_will_inc', 666666)\n bank.register_user('private', 'Paul', 'Mcbeth', 111111)\n bank.register_user('private', 'Page', 'Pierce', 121212)\n bank.register_user('private', 'Super', 'Man', 123456)\n bank.register_user('private', 'Ricky', 'Wysocki', 221122)\n\n # Search for user no match -> returns no match\n user = bank.search_user('Rikki', 'Whysolucky', 222222)\n print(user)\n\n # Search for user more than one match -> returns prompt to specify search and details about results\n user = bank.search_user('Ricky', 'Wysocki')\n print(user)\n\n # Search for user one match -> Returns user object\n user = bank.search_user('E_will_inc')\n print(user)\n\n # Same search works with different args for both private and company -> return user\n company_user = bank.search_user(666666)\n print(company_user)\n\n # Register an account, specify which type -> None\n ricky.register_account('savings')\n ricky.register_account('salary')\n\n # Deposit to specified account or access directly from account\n ricky.deposit('savings', 100)\n ricky.accounts['savings'].deposit(100)\n ricky.deposit('salary', 20)\n\n # Make a withdrawal if sufficient funds\n ricky.withdraw('savings', 50)\n\n # Prints an exception with explanation\n ricky.withdraw('salary', 30)\n ricky.accounts['salary'].withdraw(30)", "def print_response(response):\n for report in response.get('reports', []):\n rows = report.get('data', {}).get('rows', [])\n for row in rows:\n print(row)", "def main():\n user_answer = prompt_user_what_to_do_next()\n while 'q' != user_answer:\n list_of_all_accounts_known = ATMBankAccount.read_in_account_numbers_and_balances()\n if '1' == user_answer:\n starting_account_balance_ammount = prompt_user_for_starting_balance()\n create_an_account_for_user(list_of_all_accounts_known, int(starting_account_balance_ammount))\n elif '2' == user_answer:\n print_out_account_balances(list_of_all_accounts_known)\n elif '3' == user_answer:\n user_to_account_deposit = prompt_user_account_to_deposit()\n user_money_to_deposit = prompt_user_money_to_deposit()\n ATMBankAccount.deposit_to_account(list_of_all_accounts_known, user_to_account_deposit, user_money_to_deposit)\n print_out_account_balances(list_of_all_accounts_known)\n elif '4' == user_answer:\n user_to_account_withdrawl = prompt_user_to_withdrawl()\n user_money_to_withdrawl = prompt_user_money_to_withdrawl()\n ATMBankAccount.withdrawl_fund_from_account(list_of_all_accounts_known, user_to_account_withdrawl, user_money_to_withdrawl)\n print_out_account_balances(list_of_all_accounts_known)\n elif '5' == user_answer:\n user_account_to_get_interest = prompt_user_account_to_get_interest()\n ATMBankAccount.calculate_half_percent_interest_on_account(list_of_all_accounts_known, user_account_to_get_interest)\n print_out_account_balances(list_of_all_accounts_known)\n user_answer = prompt_user_what_to_do_next()\n break\n ATMBankAccount.write_out_account_numbers_and_balances(list_of_all_accounts_known)", "def __display_login_info(self):\n print(f'\\nYour card has been created\\n'\n f'Your card number:\\n'\n # f'{self.__card_display()}\\n' # uncomment this line and comment out line below for pretty display\n f'{self.card_number}\\n'\n f'Your card PIN:\\n'\n f'{self.__account_pin}\\n', )", "def print_sheet(score_sheet, total_score):\n print('-------------------------\\nYahtzee Score Sheet\\n-------------------------')\n name_list = ['Player', '1. Aces', '2. 2s', '3. 3s', '4. 4s', '5. 5s', '6. 6s', '7. 3 of a kind', '8. 4 of '\n 'a kind', '9. Full House', '10. Small Straight', '11. Large Straight', '12. Yahtzee', '13. Chance']\n temp_list = [\"\" if score == EMPTY_BOX() else score for score in score_sheet]\n for i in range(len(score_sheet)):\n print(f'{name_list[i]}:{temp_list[i]}')\n if EMPTY_BOX() not in score_sheet[slice(UPPER_START(), UPPER_STOP()+1)]:\n if sum(score_sheet[slice(UPPER_START(), UPPER_STOP()+1)]) >= UPPER_BONUS_REQUIREMENT():\n print(f'Sum of Upper Section:{sum(score_sheet[slice(UPPER_START(), UPPER_STOP()+1)])}\\nBonus:35')\n elif sum(score_sheet[slice(UPPER_START(), UPPER_STOP()+1)]) < UPPER_BONUS_REQUIREMENT():\n print(f'Sum of Upper Section:{sum(score_sheet[slice(UPPER_START(), UPPER_STOP()+1)])}\\nBonus:0')\n else:\n print('Sum of Upper Section:\\nBonus:')\n if EMPTY_BOX() not in score_sheet[slice(LOWER_START(), LOWER_STOP()+1)]:\n print(f'Sum of Lower Section:{sum(score_sheet[slice(LOWER_START(), LOWER_STOP()+1+1)])}')\n else:\n print('Sum of Lower Section:')\n if EMPTY_BOX() not in score_sheet:\n print(f'Total Score:{total_score}\\n-------------------------')\n else:\n print(f'Total Score:\\n-------------------------')", "def show_emp_bookings(self):\n try:\n emp_id = int(input(\"Enter Employee Id: \"))\n bookings = self.admin_repository.show_emp_bookings(emp_id)\n if bookings:\n for booking in bookings:\n print(\"Booking Id : {}\".format(booking[5]))\n print(\"Date : {}\".format(booking[0]))\n print(\"Pick up time : {}\".format(booking[1]))\n print(\"Cab_Number : {}\".format(booking[2]))\n print(\"Pick up location: {}\".format(booking[3]))\n print(\"Destination : {}\".format(booking[4]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n\n except Exception as e:\n print(\"Some Error occurred.\")\n return False", "def display_list(the_list):\n print(\"\\n===================================\")\n for person in the_list:\n print(\"{name:12s}\\t\\t{phone}\".format(name=person.name, phone=person.phone))\n if the_list == []:\n print(\"\\nNo entries found!\\n\")\n print(\"===================================\\n\")", "def hall_only(restaurant_only):\n hall = Hall(restaurant_only, max=50)\n return hall", "def check_hours():\n while True:\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n\n print(f\"{business_object['name']} hours are: \"\n f\"{business_object['hours']}\")", "def find_by_account_id(auth_account_id: str, page: int, limit: int):\n current_app.logger.debug(f'<search_purchase_history {auth_account_id}')\n statements, total = StatementModel.find_all_statements_for_account(auth_account_id, page, limit)\n\n statements_schema = StatementModelSchema()\n data = {\n 'total': total,\n 'page': page,\n 'limit': limit,\n 'items': statements_schema.dump(statements, many=True)\n }\n current_app.logger.debug('>statements_find_by_account_id')\n return data", "def print_all():\n print(\"1. Number of games is:\\n\\n\", count_games(\"game_stat.txt\"), \"\\n\")\n print(\"2. There is a game from a given year on the list:\\n\\n\", decide(\"game_stat.txt\", 2004), \"\\n\")\n print(\"3. The latest game on the list is:\\n\\n\", get_latest(\"game_stat.txt\"), \"\\n\")\n print(\"4. Number of games from the list in the given genre is:\\n\\n\", count_by_genre(\"game_stat.txt\", \"rpg\"), \"\\n\")\n print(\"5. List spot of the given game is:\\n\\n\", get_line_number_by_title(\"game_stat.txt\", \"counter-strike\"), \"\\n\")\n print(\"6. Alphabetically sorted games:\\n\\n\", \"\\n \".join(sort_abc(\"game_stat.txt\")), \"\\n\")\n print(\"7. List of genres:\\n\\n\", \"\\n \".join(get_genres(\"game_stat.txt\")), \"\\n\")\n print(\"8. Top sold FPP game premiered in:\\n\\n\", when_was_top_sold_fps(\"game_stat.txt\"), \"\\n\")", "def describe_user(self):\r\n print('\\nFirst Name: ' + self.first_name.title(), end='\\n',)\r\n print('Last Name: ' + self.last_name.title(), end='\\n')\r\n print('Address: ' + self.address.title(), end='\\n',)\r\n print('State: ' + self.state.title(), end='\\n',)\r\n print('Country: ' + self.country.title())" ]
[ "0.5709103", "0.5544263", "0.5517013", "0.5512406", "0.539924", "0.53783804", "0.534881", "0.53042066", "0.5273528", "0.5243377", "0.5196611", "0.5160384", "0.51566947", "0.5139505", "0.5105222", "0.5089654", "0.50697607", "0.50514734", "0.50506085", "0.5023857", "0.5022506", "0.49886104", "0.49775937", "0.4953652", "0.49419856", "0.493483", "0.49311975", "0.4913434", "0.49121898", "0.49002534", "0.48969662", "0.4896658", "0.48825583", "0.48644447", "0.48551667", "0.48493218", "0.4837219", "0.48282003", "0.4825614", "0.4817515", "0.48144293", "0.4803025", "0.47980517", "0.47959578", "0.47852957", "0.47628406", "0.47610602", "0.47542366", "0.474026", "0.47306556", "0.47271994", "0.47208992", "0.47200596", "0.4708246", "0.47068575", "0.4703391", "0.46918446", "0.46911308", "0.4680103", "0.4675185", "0.46712095", "0.4660471", "0.4625863", "0.46247894", "0.46246964", "0.46127722", "0.46054018", "0.46021682", "0.46009263", "0.45907626", "0.4586363", "0.4581697", "0.45790473", "0.45764524", "0.4573135", "0.45633778", "0.4563092", "0.45615372", "0.45609587", "0.45608208", "0.45604274", "0.45542416", "0.45509586", "0.45456508", "0.45387492", "0.45364425", "0.453413", "0.4533295", "0.4531376", "0.45179817", "0.4517091", "0.45155898", "0.45130444", "0.4512101", "0.45087385", "0.45058593", "0.45031556", "0.44969228", "0.44900495", "0.44853526" ]
0.7365047
0
Print salary and payment cheques with worker_ID Print hall payment cheques with hall_ID
def issue_cheque(name, amount): pdf = FPDF('P', 'mm', 'A4') pdf.add_page('P') pdf.set_font('Times', 'B', 14) pdf.multi_cell(0, 5, 'Cheque Payment System') pdf.ln() pdf.multi_cell(0, 5, ('Pay: %s' % name)) pdf.ln() pdf.multi_cell(0, 5, ('The amount of: Rs. %s' % str(amount))) pdf.ln() pdf.multi_cell(0, 5, ('Issued on: %s' % str(time.strftime("%d/%m/%Y")))) pdf.ln() # Write generated output file to PDF pdf.output(('cheque_%s.pdf' % name), 'F')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_salary_list(Hall):\n\n pdf = FPDF('P', 'mm', 'A4')\n pdf.add_page('P')\n pdf.set_font('Times', 'B', 14)\n\n pdf.multi_cell(0, 5, ('Hall Salary List: Hall %s' % Hall.hall_ID))\n pdf.ln()\n\n worker_list = dbr.rebuild(\"worker\")\n title = \"Role\"\n wage = 0\n for key in worker_list:\n if worker_list[key].hall_ID == Hall.hall_ID:\n if isinstance(worker_list[key], mess_manager.MessManager):\n title = \"Mess Manager\"\n wage = worker_list[key].monthly_salary\n elif isinstance(worker_list[key], clerk.Clerk):\n title = \"Clerk\"\n wage = worker_list[key].monthly_salary\n elif isinstance(worker_list[key], attendant.Attendant):\n title = \"Attendant\"\n wage = worker_list[key].daily_wage\n\n pdf.multi_cell(0, 5, ('%s: %s (%s) - Rs. %s' % (worker_list[key].worker_ID,\n worker_list[key].name, title, wage)))\n pdf.ln()\n\n # Write generated output file to PDF\n pdf.output(('hall_salary_%s.pdf' % Hall.hall_ID), 'F')", "def print_statement(Hall):\n\n pdf = FPDF('P', 'mm', 'A4')\n pdf.add_page('P')\n pdf.set_font('Times', 'B', 14)\n\n pdf.multi_cell(0, 5, ('Hall Account Statement for Hall: %s' % Hall.name))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Mess Account: %s' % Hall.mess_account))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Salary Account: %s' % Hall.salary_account))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Repair Account: %s' % Hall.repair_account))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Rent Account: %s' % Hall.rent_account))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Others Account: %s' % Hall.others_account))\n pdf.ln()\n\n # Write generated output file to PDF\n pdf.output(('hall_statement_%s.pdf' % Hall.hall_ID), 'F')", "def payroll_calculation():\n\n name = search_employee()\n if name == None:\n return\n accrual_month = month('Accrual month: ')\n accrual_year = year('Accrual year: ')\n accrual = f'{accrual_month}-{accrual_year}'\n salary_value = month_salary()\n salary_base = salary(salary_value)\n overtime = value_input('Overtime: ')\n absences = value_input('Absences: ')\n late = value_input('Late: ')\n bonus = value_input('Bonus: ')\n\n hourly_wage = round(salary_value / 220, 2)\n overtime_value = round(float(hourly_wage * 1.5), 2)\n overtime_total = round(overtime_value * overtime, 2)\n daily_wage = round(salary_value / 30, 2)\n absences_value = round(daily_wage * absences, 2)\n late_value = round(daily_wage * late / 60, 2)\n inss_value = inss(salary_base, overtime_total)\n irrf_value = irrf(salary_base, overtime_total, inss_value, bonus)\n sleep(2)\n\n\n\n header('EARNINGS')\n print(f'Salary: {salary_base}')\n print(f'Bonus: {bonus}')\n print(f'Overtime: {overtime_total }')\n earnings_total = round(salary_base + overtime_total + bonus, 2)\n sleep(2)\n\n print(line())\n print(f'Earnings total: {earnings_total}')\n print(line())\n sleep(2)\n\n header('DISCOUNTS')\n\n transportation_vouchers = round(salary_base * 6 / 100, 2)\n health_care = round(salary_base * 2 / 100, 2)\n dental_care = round(salary_base * 0.5 / 100, 2)\n meal_ticket = round(salary_base * 1 / 100, 2)\n\n print(f'absences: {absences_value}')\n print(f'late: {late_value}')\n print(f'transportation_vouchers: {transportation_vouchers}')\n print(f'health_care: {health_care}')\n print(f'dental_care: {dental_care}')\n print(f'meal_ticket: {meal_ticket}')\n print(f'inss_value: {inss_value}')\n print(f'irrf_value: {irrf_value}')\n\n discounts_total = round(absences_value + late_value + transportation_vouchers + health_care +\n dental_care + meal_ticket + inss_value + irrf_value, 2)\n\n print(line())\n print(f'Discounts_total : {discounts_total }')\n print(line())\n liquid_salary = round(earnings_total - discounts_total, 2)\n print(f'Liquid_salary: {liquid_salary} ')\n print(line())\n\n conn = sqlite3.connect('data/people_management.db')\n cursor = conn.cursor()\n cursor.execute(f\"\"\"\n INSERT INTO salary (name, salary ,bonus, overtime, absences_value, late_value, \n t_vouchers, health_care, dental_care, meal_ticket, inss, irrf, \n earnings, discounts, liquid_salary, accrual)\n VALUES ('{name}', '{salary_base}' ,'{bonus}', '{overtime_total}', '{absences_value}', \n '{late_value}', '{transportation_vouchers}', '{health_care}', '{dental_care}', \n '{meal_ticket}', '{inss_value}', '{irrf_value}', '{earnings_total}', '{discounts_total}', \n '{liquid_salary}', '{accrual}')\n \"\"\")\n conn.commit()\n conn.close()", "def hire_worker(self):\n\t\tfor i in range(self.num_worker):\n\t\t\ttrader = copy.deepcopy(Trader(self.period_days, self.difference_rate, self.stock_folder_path, self.option_folder_path, self.roe_ttm))\n\t\t\tprint ('worker {}'.format(i))\n\t\t\tself.workers.append(trader)", "def printSummary(self):\n\t\tweekWorkHours = None\n\t\tdayDelta = None\n\t\tfor num in self.workdays:\n\t\t\tday = self.workdays[num]\n\t\t\tif day.daytype == DayType.weekend:\n\t\t\t\tif weekWorkHours:\n\t\t\t\t\thours = weekWorkHours.total_seconds() // 3600\n\t\t\t\t\tmins = weekWorkHours.seconds // 60 % 60\n\t\t\t\t\tprinty('------{}hrs-----'.format(hours), 'y')\n\t\t\t\t\tweekWorkHours = None\n\t\t\t\t\tdayDelta = None\n\t\t\t\tprinty('{:02d}. (WE)'.format(num), 'w')\n\t\t\telif day.daytype == DayType.holiday:\n\t\t\t\tprinty('{:02d}. (Urlaub)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.illness:\n\t\t\t\tprinty('{:02d}. (Krank)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.overtime_free:\n\t\t\t\tprinty('{:02d}. (Überstundenausgleich)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.business_trip:\n\t\t\t\tprinty('{:02d}. (Dienstreise)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.work:\n\t\t\t\tdayDelta = day.getWorkingTime()\n\t\t\t\tworkhours = dayDelta.seconds // 3600\n\t\t\t\tworkrestminutes = dayDelta.seconds // 60 % 60\n\t\t\t\tabsday = datetime.strptime('{}.{}.{}'.format(num, self.monthNum, self.year),'%d.%m.%Y')\n\t\t\t\ttoday = datetime.today()\n\t\t\t\tpauseDelta = day.getPauseTime()\n\t\t\t\tpausehours = pauseDelta.seconds // 3600\n\t\t\t\tpauserestminutes = pauseDelta.seconds // 60 % 60\n\t\t\t\tif absday == today:\n\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'wH')\n\t\t\t\telif absday > today:\n\t\t\t\t\t# future days\n\t\t\t\t\tif len(day.timeblocks) == 0:\n\t\t\t\t\t\tprinty('{:02d}. ?'.format(num), 'g')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'g')\n\t\t\t\telse:\n\t\t\t\t\t# past days\n\t\t\t\t\tif dayDelta > timedelta(hours=8):\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'n>')\n\t\t\t\t\telif dayDelta < timedelta(hours=8):\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'r>')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'n')\n\t\t\tif weekWorkHours == None:\n\t\t\t\tweekWorkHours = dayDelta\n\t\t\telse:\n\t\t\t\tif dayDelta:\n\t\t\t\t\tweekWorkHours = weekWorkHours + dayDelta", "def get_wo_mthly_smry(self, workorder_browse):\n wo_summary_data = []\n wo_check_dict = {}\n no = 0\n if workorder_browse:\n for work_rec in workorder_browse:\n if work_rec.state and work_rec.state == \"done\":\n no += 1\n identification = \"\"\n repair_line_data = \"\"\n if work_rec.vehicle_id:\n identification += work_rec.vehicle_id.name\n if work_rec.vehicle_id.f_brand_id:\n identification += \" \" + work_rec.vehicle_id.f_brand_id.name\n if work_rec.vehicle_id.model_id:\n identification += \" \" + work_rec.vehicle_id.model_id.name\n for repaire_line in work_rec.repair_line_ids:\n if repaire_line.complete is True:\n if (\n repaire_line.repair_type_id\n and repaire_line.repair_type_id.name\n ):\n repair_line_data += (\n repaire_line.repair_type_id.name + \", \"\n )\n if work_rec.parts_ids:\n for parts_line in work_rec.parts_ids:\n if work_rec.id in wo_check_dict.keys():\n parts_data = {\n \"no\": -1,\n \"location\": \"\",\n \"type\": \"\",\n \"wo\": \"\",\n \"identification\": \"\",\n \"vin\": \"\",\n \"plate_no\": \"\",\n \"work_performed\": \"\",\n \"part\": parts_line.product_id\n and parts_line.product_id.default_code\n or \"\",\n \"qty\": parts_line.qty or 0.0,\n \"uom\": parts_line.product_uom\n and parts_line.product_uom.name\n or \"\",\n }\n wo_summary_data.append(parts_data)\n else:\n wo_check_dict[work_rec.id] = work_rec.id\n parts_data = {\n \"no\": no,\n \"location\": work_rec.team_id\n and work_rec.team_id.name\n or \"\",\n \"type\": work_rec.main_type or \"\",\n \"wo\": work_rec.name or \"\",\n \"identification\": identification or \"\",\n \"vin\": work_rec.vehicle_id\n and work_rec.vehicle_id.vin_sn\n or \"\",\n \"plate_no\": work_rec.vehicle_id\n and work_rec.vehicle_id.license_plate\n or \"\",\n \"work_performed\": repair_line_data\n and repair_line_data[:-2]\n or \"\",\n \"part\": parts_line.product_id\n and parts_line.product_id.default_code\n or \"\",\n \"qty\": parts_line.qty or 0.0,\n \"uom\": parts_line.product_uom\n and parts_line.product_uom.name\n or \"\",\n }\n wo_summary_data.append(parts_data)\n else:\n parts_data = {\n \"no\": no,\n \"location\": work_rec.team_id\n and work_rec.team_id.name\n or \"\",\n \"type\": work_rec.main_type or \"\",\n \"wo\": work_rec.name or \"\",\n \"identification\": identification or \"\",\n \"vin\": work_rec.vehicle_id\n and work_rec.vehicle_id.vin_sn\n or \"\",\n \"plate_no\": work_rec.vehicle_id\n and work_rec.vehicle_id.license_plate\n or \"\",\n \"work_performed\": repair_line_data\n and repair_line_data[:-2]\n or \"\",\n \"vehicle_make\": \"\",\n \"qty\": \"\",\n \"uom\": \"\",\n }\n wo_summary_data.append(parts_data)\n if not wo_summary_data:\n msg = _(\n \"Warning! \\n\\\n No data Available for selected work order.\"\n )\n raise UserError(msg)\n return wo_summary_data", "def print_report(self):\n assert len(self) == 1, 'This option should only be used for a single id at a time.'\n datas = {\n 'form': \n {\n 'company_id': self.company_id and [self.company_id.id] or [],\n 'warehouse_ids': [y.id for y in self.warehouse_ids],\n 'start_date': self.start_date,\n 'end_date': self.end_date,\n 'include_zero': self.include_zero,\n 'sort_order': self.sort_order,\n 'value': self.value,\n 'id': self.id,\n }\n }\n\n if [y.id for y in self.warehouse_ids] and (not self.company_id):\n self.warehouse_ids = []\n raise Warning(_('Please select company of those warehouses to get correct view.\\nYou should remove all warehouses first from selection field.'))\n return self.env.ref(\n 'most_selling_product.action_ir_most_selling_product'\n ).report_action(self, data=datas)", "def display_employee(self):\n print \"[Name: %s] [Salary: %d]\" % (self.name, self.salary)", "def print_people_strategies():\n\t\tfor person in sorted(Simulation.community):\n\t\t\tSimulation.community[person].print_info()\n\t\tPerson.person_progression.write(\"--------------- END OF WEEK ---------------\" + \"\\n\")", "def report_printing(cls):\n while True:\n print('Donor Name' + ' ' * 16 + '| Total Given | Num Gifts | Average Gift')\n print('-' * 66)\n print(donor_db.create_report())\n print('Returning to main menu...\\n')\n return", "def run_payroll(self):\n self.employee_id()\n self.classification()\n self.employee_data()\n self.paymethod()\n pay_logfile = \"paylog.txt\"\n if os.path.exists(pay_logfile):\n os.remove(pay_logfile)\n self.issue_payment()", "def warehouse_print(warehouse_list):\n for warehouse_item in warehouse_list:\n warehouse_item_print(warehouse_item)", "def print_report(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n datas = {'ids': context.get('active_ids', [])}\n\n res = self.read(cr, uid, ids, context=context)\n res = res and res[0] or {}\n datas.update({'form': res})\n return self.pool['report'].get_action(cr, uid, ids, \n 'l10n_cl_hr_payroll.report_hrsalarybymonth', \n data=datas, context=context)", "def PrintSummary(self, dollarsPerKiloWattHour = 0.1149, dollarsPerDTH = 6.53535):\n\t\tprint()\n\t\tprint(\" RESULTS \")\n\t\tprint()\n\t\tprint(\"The Number of times the furnace turns on: \" + str(self.building_hvac.NumberOfTimesHeatingTurnedOn))\n\t\tprint(\"The Number of times the AC turns on: \" + str(self.building_hvac.NumberOfTimesCoolingTurnedOn))\n\t\tprint(\"The Current Temperature: \" + str(self.current_temperature) + \"C\")\n\t\tprint(\"The total Electrical power used: \" + str(self.building_hvac.GetElectricKilowattHours()) + \"KWH\")\n\t\tprint(\"The total Time: \" + str(self.building_hvac.TotalTimeInSeconds))\n\t\tprint(\"The total Time Heating was on: \" + str(self.building_hvac.TotalDurationHeatingOn))\n\t\tprint(\"The total Time Cooling was on: \" + str(self.building_hvac.TotalDurationCoolingOn))\n\t\tprint(\"The Total Gas Energy Used: \" + str(self.building_hvac.GetGasDTH()) + \" DTH\")\n\t\tprint(\"Electrical Cost: $\" + str(self.CalculateElectricEneregyCost()))\n\t\tprint(\"Gas Cost: $\" + str(self.CalculateGasEneregyCost()))", "def payment_report_gen(sid, bid, day):\n results = check_payment(sid, bid, day)\n if not results:\n print(\"Cannot Find a Transaction with sid: \" + str(sid) + \" bid: \" + str(bid) + \" day: \" + str(day))\n general_data = [str(value) for key, value in results[0].items()]\n order_title = [\"Renter Id\", \"Boat Id\", \"Date\", \"Price\"]\n row_title = [\"Pay Date\", \"Amount\"]\n order_format = \"{:>20}\"\n for i in range(len(order_title)):\n print(order_format.format(order_title[i] + \": \" + general_data[i]), end=\"\")\n print(\"\")\n print(\"----------------------------------------------------------------------------------\")\n if len(results) > 1:\n row_format = \"{:>20}\" * len(results[1])\n print(row_format.format(*row_title))\n print(\"----------------------------------------------------------------------------------\")\n for result in results[1:]:\n print(row_format.format(*[str(value) for key, value in result.items()]))", "def print_receipt(Student):\n\n pdf = FPDF('P', 'mm', 'A4')\n pdf.add_page('P')\n pdf.set_font('Times', 'B', 14)\n\n pdf.multi_cell(0, 5, 'Student Dues Payment Receipt')\n pdf.ln()\n pdf.multi_cell(0, 5, ('Student ID: %s' % Student.student_ID))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Name: %s' % Student.name))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Mess Fees: %s' % Student.mess_charge))\n pdf.ln()\n\n if Student.room_type == \"S\":\n room_rent = db.get(\"hall\", Student.hall_ID, \"single_room_rent\")[0]\n elif Student.room_type == \"D\":\n room_rent = db.get(\"hall\", Student.hall_ID, \"double_room_rent\")[0]\n\n pdf.multi_cell(0, 5, ('Room Rent: %s' % room_rent))\n pdf.ln()\n\n pdf.multi_cell(0, 5, ('Amenities Charge: %s' % str(db.get(\"hall\", Student.hall_ID, \"amenities_charge\")[0])))\n pdf.ln()\n\n pdf.multi_cell(0, 5, ('Total Amount Paid: %s' % str(Student.total_dues)))\n pdf.ln()\n\n # Write generated output file to PDF\n pdf.output(('receipt_%s.pdf' % Student.hall_ID), 'F')", "def print_table(ledger):\n\n table = PrettyTable() # defines a PrettyTable object\n\n table.field_names = [\n \"hospital\",\n \"patient\",\n \"status\",\n \"nonce\",\n \"prev_hash\",\n \"a\",\n \"b\",\n \"c\",\n \"current_hash\",\n ] # define field names for table\n\n for block in ledger:\n table.add_row(\n [\n block[\"hospital\"],\n block[\"patient\"],\n block[\"status\"],\n block[\"nonce\"],\n block[\"prev_hash\"],\n block[\"a\"],\n block[\"b\"],\n block[\"c\"],\n block[\"current_hash\"],\n ]\n ) # add data to table\n\n print(\"\\n\\n\" + color.BOLD + \"Printing Your Ledger:\" + color.END)\n print(table) # print prettytable of patient info", "def main():\n # create a list of test employees and managers\n testList = [\n {'type': 'employee', 'firstName': 'Mickey', 'lastName': 'Mouse', 'SSN': '100-12-3456', 'salary': 1500.00},\n {'type': 'manager', 'firstName': 'Walt', 'lastName': 'Disney', 'SSN': '100-00-0000', 'salary': 5000.00,\n 'title': 'Head Of Disneyland', 'yearBonus': 1000.00},\n {'type': 'employee', 'firstName': 'Donald', 'lastName': 'Duck', 'SSN': '100-65-4321', 'salary': 1000.00},\n {'type': 'manager', 'firstName': 'Minnie', 'lastName': 'Mouse', 'SSN': '999-99-999', 'salary': 10000.00,\n 'title': 'Head Of Mouse HouseHold', 'yearBonus': 15000.00},\n {'type': 'manager', 'firstName': 'Daisy', 'lastName': 'Duck', 'SSN': '100-65-4321', 'salary': 12000.00,\n 'title': 'Head Of Duck HouseHold', 'yearBonus': 10000.00}]\n\n # Define percentRaise (0.1 == 10%)\n percentRaise = 0.1\n\n # Create Employees and Managers Object using the Test data\n employeeList = loadEmployees(testList)\n\n # Sort employee List, which will ustilize Employee's __lt__ and __eq__ methods\n employeeList.sort()\n\n # Loop over Employee and Manager Objects\n print(\"Employees and Manager should be sorted by last name, then first\\n\")\n for employee in employeeList:\n if type(employee) == Manager:\n print(\"Manager:\")\n else:\n print(\"Employee:\")\n # Print Employee or Manager\n print(employee)\n # Give Raise to Employee or Manager\n employee.giveRaise(percentRaise)\n # Print New Salary\n print(\"With %.2f%% Raise, Salary: $%.2f\\n\" % (percentRaise * 100, employee.salary))\n\n # Employee docStrings\n print(\"\\nEmployee docstring for each method\")\n print(\"Employee.__doc__=\" + Employee.__doc__)\n print(\"Employee.__init__.__doc__=\" + Employee.__init__.__doc__)\n print(\"Employee.giveRaise.__doc__=\" + Employee.giveRaise.__doc__)\n print(\"Employee.__str__.__doc__=\" + Employee.__str__.__doc__)\n print(\"Employee.__eq__.__doc__=\" + Employee.__eq__.__doc__)\n print(\"Employee.__lt__.__doc__=\" + Employee.__lt__.__doc__)\n\n print(\"\\nManger docstring for each method\")\n print(\n \"Since Manager inherits from Employee, several of the methods ('giveRaise', '__eq__' and '__lt__') and the corresponding docstring will originate from the Employee class\\n\")\n print(\"Manager.__doc__=\" + Manager.__doc__)\n print(\"Manager.__init__.__doc__=\" + Manager.__init__.__doc__)\n print(\"Manager.giveRaise.__doc__=\" + Manager.giveRaise.__doc__)\n print(\"Manager.__str__.__doc__=\" + Manager.__str__.__doc__)\n print(\"Manager.__eq__.__doc__=\" + Manager.__eq__.__doc__)\n print(\"Manager.__lt__.__doc__=\" + Manager.__lt__.__doc__)", "def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))", "def worklog(accountable):\n worklog = accountable.issue_worklog()\n headers = ['author_name', 'comment', 'time_spent']\n if worklog:\n rows = [[v for k, v in sorted(w.items()) if k in headers]\n for w in worklog]\n rows.insert(0, headers)\n print_table(SingleTable(rows))\n else:\n click.secho(\n 'No worklogs found for {}'.format(accountable.issue_key),\n fg='red'\n )", "def report(self):\n print(f\"Money: {self.CURRENCY}{self.profit}\")", "def print_sheet(score_sheet, total_score):\n print('-------------------------\\nYahtzee Score Sheet\\n-------------------------')\n name_list = ['Player', '1. Aces', '2. 2s', '3. 3s', '4. 4s', '5. 5s', '6. 6s', '7. 3 of a kind', '8. 4 of '\n 'a kind', '9. Full House', '10. Small Straight', '11. Large Straight', '12. Yahtzee', '13. Chance']\n temp_list = [\"\" if score == EMPTY_BOX() else score for score in score_sheet]\n for i in range(len(score_sheet)):\n print(f'{name_list[i]}:{temp_list[i]}')\n if EMPTY_BOX() not in score_sheet[slice(UPPER_START(), UPPER_STOP()+1)]:\n if sum(score_sheet[slice(UPPER_START(), UPPER_STOP()+1)]) >= UPPER_BONUS_REQUIREMENT():\n print(f'Sum of Upper Section:{sum(score_sheet[slice(UPPER_START(), UPPER_STOP()+1)])}\\nBonus:35')\n elif sum(score_sheet[slice(UPPER_START(), UPPER_STOP()+1)]) < UPPER_BONUS_REQUIREMENT():\n print(f'Sum of Upper Section:{sum(score_sheet[slice(UPPER_START(), UPPER_STOP()+1)])}\\nBonus:0')\n else:\n print('Sum of Upper Section:\\nBonus:')\n if EMPTY_BOX() not in score_sheet[slice(LOWER_START(), LOWER_STOP()+1)]:\n print(f'Sum of Lower Section:{sum(score_sheet[slice(LOWER_START(), LOWER_STOP()+1+1)])}')\n else:\n print('Sum of Lower Section:')\n if EMPTY_BOX() not in score_sheet:\n print(f'Total Score:{total_score}\\n-------------------------')\n else:\n print(f'Total Score:\\n-------------------------')", "def create_report(self):\n # Base setup\n line_out = ''\n line_out += \"{:<15} | {:^15} | {:^30}\\n\".format(\"Name\", \"Donations\", \"Email\")\n line_out += (\"-\"*65)\n print(line_out)\n\n # Setup line format to recieve ordered donor info \n for name in self.all_donors:\n line = \"{:<15} | {:^15} | {:^30}\".format(name, self.r.hget(name, 'donations'), self.r.hget(name, 'email'))\n print(line)", "def create_report():\n names, totals, num_gifts, avg_gift = get_donor_summary(donors)\n print(f\"Donor Name{'':<20} | Total Given{'':>0} | Num Gifts{'':>0} | Average Gift{'':>0}\")\n print(f\"-\" * 72)\n for name, total, num_gift, avg_gift in zip(names, totals, num_gifts, avg_gift):\n print(f\"{name:<32}${total:>11}{num_gift:>12} ${avg_gift:>13}\")\n return None", "def main():\n while True:\n employee_id = get_employee_input_int('TEST DATA: Enter employee ID to look up for the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} has a grade = {}, Hence gets {} per hours\\n'\n .format(employee.full_name, employee.grade, payscale.salary))\n HR_Options(employee, payscale)\n break", "def print_report():\n width = 68\n print(\"-\" * width)\n header = (\"Donor Name\", \"Total Given\", \"Num Gifts\", \"Average Gift\")\n print(\"{:20} | {:15} | {:10} | {:12}\".format(*header))\n print(\"-\" * width)\n for index, donor in enumerate(donors_data):\n name = donor[\"name\"]\n total = sum(donor[\"donations\"])\n num_gift = len(donor[\"donations\"])\n average = total/num_gift\n print(\"{:22} ${:12,.2f} {:12d} ${:12,.2f}\".format(name, total, num_gift, average ))\n print(\"-\" * width)", "def get_personnel():\r\n if len(off) == 0:\r\n print(\"There are no office workers\")\r\n else:\r\n for i in off:\r\n print(str(i))", "def show_flight_schedule_of_employee(self, staff_ob):\n\n print(\"Continue to pick dates\")\n print(\"\\nB Back\\nC Continue\\n\")\n\n action_str = self.choose_action([\"b\", \"c\"])\n while action_str == False:\n action_str = self.choose_action([\"b\", \"c\"])\n\n if action_str == \"b\":\n return\n\n elif action_str == \"c\":\n \n valid_interval = False\n while valid_interval != True:\n date_from = self.get_date_from()\n while date_from == False:\n date_from = self.get_date_from()\n date_to = self.get_date_to()\n while date_to == False:\n date_to = self.get_date_to()\n valid_interval = self.get_valid_interval(date_from, date_to)\n\n flights_on_asked_time = self.llapi.get_employee_schedule_by_date(staff_ob, date_from, date_to)\n \n counter = 1\n if len(flights_on_asked_time) == 0:\n print(f\"\\n{staff_ob.name} has no flights on selected period\")\n\n else:\n print(self.LENGTH_STAR * \"*\")\n print(f\"{staff_ob.name.upper()}'S FLIGHT SCHEDULE\")\n \n for flight_ob in flights_on_asked_time:\n print(flight_ob.print_schedule(counter))\n counter += 1\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return", "def compensate(recruiter, worker_id, email, dollars, sandbox):\n out = Output()\n config = get_config()\n config.load()\n mode = \"sandbox\" if sandbox else \"live\"\n do_notify = email is not None\n no_email_str = \"\" if email else \" NOT\"\n\n with config.override({\"mode\": mode}):\n rec = by_name(recruiter)\n if not click.confirm(\n '\\n\\nYou are about to pay worker \"{}\" ${:.2f} in \"{}\" mode using the \"{}\" recruiter.\\n'\n \"The worker will{} be notified by email. \"\n \"Continue?\".format(worker_id, dollars, mode, recruiter, no_email_str)\n ):\n out.log(\"Aborting...\")\n return\n\n try:\n result = rec.compensate_worker(\n worker_id=worker_id, email=email, dollars=dollars, notify=do_notify\n )\n except Exception as ex:\n out.error(\n \"Compensation failed. The recruiter reports the following error:\\n{}\".format(\n ex\n ),\n )\n return\n\n out.log(\"HIT Details\")\n out.log(tabulate.tabulate(result[\"hit\"].items()), chevrons=False)\n out.log(\"Qualification Details\")\n out.log(tabulate.tabulate(result[\"qualification\"].items()), chevrons=False)\n out.log(\"Worker Notification\")\n out.log(tabulate.tabulate(result[\"email\"].items()), chevrons=False)", "def computeAssignments(solution, h, data, sumW, checkers, hini=None):\n minHours = data[\"minHours\"]\n hours = data[\"hours\"]\n z = solution[\"z\"]\n w = solution[\"w\"]\n\n mustWork = []\n canWork = []\n\n # sort nurses, first by those who work\n sorted_nurses = sorted(range(data[\"nNurses\"]), key=lambda n: solution[\"z\"][n], reverse=True)\n # print(solution[\"z\"])\n # print(sorted_nurses)\n # print(\"\")\n\n # for each nurse\n for n in sorted_nurses:\n\n # canWork Check-------------------------------\n canWork_check = checkIfCanWork_fast(solution, h, n, data, sumW, checkers=checkers, hini=hini)\n if canWork_check:\n\n\n mustWork_check = checkIfMustWork_fast(solution, h, n, data, sumW, True, checkers, hini)\n if mustWork_check:\n mustWork.append(n)\n else: \n canWork.append(n)\n\n # # mustWork Check-------------------------------\n # # avoid repeating canWork_check\n # mustWork_check = checkIfMustWork_fast(solution, h, n, data, sumW, canWork_check, checkers, hini)\n # if mustWork_check:\n # mustWork.append(n)\n # else:\n \n return mustWork, canWork", "def print_analysis_prices(pv, demand,retail,export, param, E,isCommunity=False,hh=None):\n RemainingSOC=E['LevelOfCharge'][-1]\n timestep = param['timestep']\n SelfConsumption = np.sum(E['inv2load']) * timestep # AC\n TotalFromGrid = np.sum(E['grid2load']) * timestep # AC\n TotalToGrid = np.sum(E['inv2grid']) * timestep # AC\n TotalLoad = demand.sum() * timestep # AC\n #TotalBattToLoad = np.sum(E['store2load']) * timestep # AC\n TotalBattToGrid = np.sum(E['store2grid']) * timestep # AC\n TotalPV = pv.sum() * timestep # DC\n TotalBatteryGeneration = np.sum(E['store2inv']) * timestep # DC\n TotalBatteryConsumption = np.sum(E['pv2store']) * timestep # DC\n if 'inv_losses' in E.keys():\n BatteryLosses=E['batt_losses'].sum()*timestep\n InverterLosses=E['inv_losses'].sum()*timestep\n else:\n BatteryLosses = TotalBatteryConsumption * (1 - param['BatteryEfficiency'])\n InverterLosses = (TotalPV - BatteryLosses-RemainingSOC) * (1 - param['InverterEfficiency'])\n SelfConsumptionRate = SelfConsumption / TotalPV * 100 # in %\n SelfSufficiencyRate = SelfConsumption / TotalLoad * 100\n Bill=((E['grid2load'] * timestep) * retail - (E['inv2grid'] * timestep ) * export).sum()\n Batt_revenue=((E['store2load']*param['InverterEfficiency']*timestep*retail-\n E['pv2store']*param['InverterEfficiency']*timestep*export)).sum()\n \n print ('Total yearly consumption: {:1g} kWh'.format(TotalLoad))\n print ('Total PV production: {:1g} kWh'.format(TotalPV))\n print ('Self Consumption: {:1g} kWh'.format(SelfConsumption))\n print ('Total fed to the grid: {:1g} kWh'.format(TotalToGrid))\n print ('Total bought from the grid: {:1g} kWh'.format(TotalFromGrid))\n print ('Self consumption rate (SCR): {:.3g}%'.format(SelfConsumptionRate))\n print ('Self sufficiency rate (SSR): {:.3g}%'.format(SelfSufficiencyRate))\n print ('Amount of energy provided by the battery: {:1g} kWh'.format(TotalBatteryGeneration))\n print ('Total battery losses: {:1g} kWh, i.e., {:1g}% of the total PV'.format(BatteryLosses,BatteryLosses/TotalPV*100))\n #print('Total energy from battery to the load {:1g} kWh'.format(TotalBattToLoad))\n print('Total energy from battery to the grid {:1g} kWh'.format(TotalBattToGrid))\n #print ('Total inverter losses: {:1g} kWh'.format(InverterLosses))\n #print ('Total inverter losses: {:1g} kWh'.format(InverterLosses))\n print ('Total inverter losses: {:1g} kWh, i.e., {:1g}% of the total PV'.format(InverterLosses,InverterLosses/TotalPV*100))\n \n \n TotalCurtailment=np.sum(E['inv2curt'])*timestep # DC\n print ('Total curtailment : {:1g} kWh'.format(TotalCurtailment)) \n residue = TotalPV + TotalFromGrid - TotalToGrid - BatteryLosses - InverterLosses - TotalLoad - TotalCurtailment - RemainingSOC\n print ('Residue (check): {:1g} kWh'.format(residue))\n PV_check = TotalPV - SelfConsumption - TotalToGrid - BatteryLosses - InverterLosses - TotalCurtailment - RemainingSOC\n print ('PV Residue (check): {:1g} kWh'.format(PV_check))\n \n print(bcolors.WARNING + 'Maximum power injected into the grid is {:1g} kW'.format(E['inv2grid'].max())+bcolors.ENDC)\n print(bcolors.WARNING + 'Maximum power drained from the grid is {:1g} kW'.format(E['grid2load'].max())+bcolors.ENDC)\n print (bcolors.WARNING + 'Total bill: {:1g}\\n\\n'.format(Bill)+bcolors.ENDC)\n print (bcolors.WARNING + 'Total Batt_revenue: {:1g}\\n\\n'.format(Batt_revenue)+bcolors.ENDC)\n \n if isCommunity==False:\n AverageDepth = TotalBatteryGeneration / (365 * param['BatteryCapacity'])\n Nfullcycles = 365 * AverageDepth \n print ('Number of equivalent full cycles per year: {:1g} '.format(Nfullcycles))\n print ('Average Charging/Discharging depth: {:1g}\\n\\n'.format(AverageDepth))\n \n out = { 'SCR': SelfConsumptionRate, # \n 'SSR':SelfSufficiencyRate, # \n 'EFC': Nfullcycles, # \n 'Demand_peak': E['grid2load'].max(), # \n 'Inj_peak': E['inv2grid'].max(), #\n 'avg_dod': AverageDepth, #\n 'bill': Bill,\n 'Batt_revenue':Batt_revenue,\n 'Batt_penetration':param['batt_penetration'],\n 'PV_penetration':param['pv_penetration'],\n 'seed':param['seed'],\n 'hh':hh\n }\n else:\n out = { 'SCR': SelfConsumptionRate, # \n 'SSR':SelfSufficiencyRate, # \n 'EFC': None, # \n 'Demand_peak': E['grid2load'].max(), # \n 'Inj_peak': E['inv2grid'].max(), #\n 'avg_dod': None, #\n 'bill': Bill,\n 'Batt_revenue':Batt_revenue,\n 'Batt_penetration':param['batt_penetration'],\n 'PV_penetration':param['pv_penetration'],\n 'seed':param['seed'],\n 'hh':hh\n }\n return out", "def report(self):\n log = self._array.state()\n result = []\n for record in log:\n result.append(f\"{record.worker_name()}\\t${record.task_payment()}\")\n return \"\\n\".join(result)", "def __init__(self, name, hall_ID, password, monthly_salary,\n rebuild=False, worker_ID=None):\n\n # The rebuild flag, if true, denotes that the object is being made from\n # data already present in the database\n # If False, a new data row is added to the specific table\n if not rebuild:\n self.worker_ID = db.add(\"worker\")\n db.update(\"worker\", self.worker_ID, \"worker_type\", \"M\")\n self.password = password\n else:\n self.worker_ID = worker_ID\n self._password = password\n\n self.monthly_salary = monthly_salary\n worker.Worker.__init__(self, self.worker_ID, name, hall_ID)", "def print(self):\n size_bid = len(self.bid)\n size_offer = len(self.offer)\n print(\"Book[%s]: %d bids, %d offers --> mid @ %f\" % (self.security,\n size_bid, size_offer, self.mid()))\n print(\"{0: ^32} | {1: ^32}\".format(\"bid\", \"offer\"))\n print(\"{0:^10},{1:^10},{2:^10} | {3:^10}, {4:^10}, {5:^10}\".format(\n \"count\", \"qty\", \"price\", \"price\", \"qty\", \"count\"))\n\n empty_level = OrderBookLevel(\"-\", \"-\", \"-\")\n for i in range(max(size_bid, size_offer)):\n bid = self.bid[-(i+1)] if i < size_bid else empty_level\n offer = self.offer[i] if i < size_offer else empty_level\n print(\"{0:^10},{1:^10},{2:^10} | {3:^10}, {4:^10}, {5:^10}\".format(\n bid.order_count, bid.qty, bid.price, offer.price, offer.qty, offer.order_count))", "def employee_data(self):\n self.paymethod()\n self.classification()\n for i in self.emp_id:\n if self.clsf[i] == \"Salaried\":\n if self.pymthd[i] == \"Direct Deposit\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][7],self.emp_dict[i][10],\n self.emp_dict[i][11]]\n elif self.pymthd[i] == \"Mailed Check\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][1],self.emp_dict[i][2],\n self.emp_dict[i][3], self.emp_dict[i][4],self.emp_dict[i][7]]\n elif self.clsf[i] == \"Hourly\":\n if self.pymthd[i] == \"Direct Deposit\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][8],self.emp_dict[i][10],\n self.emp_dict[i][11]]\n elif self.pymthd[i] == \"Mailed Check\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][1],self.emp_dict[i][2],\n self.emp_dict[i][3], self.emp_dict[i][4],self.emp_dict[i][8]]\n elif self.clsf[i] == \"Commissioned\":\n if self.pymthd[i] == \"Direct Deposit\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][7],self.emp_dict[i][9],\n self.emp_dict[i][10],self.emp_dict[i][11]]\n elif self.pymthd[i] == \"Mailed Check\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][1],self.emp_dict[i][2],\n self.emp_dict[i][3],self.emp_dict[i][4],self.emp_dict[i][7],self.emp_dict[i][9]]\n else:\n print(\"Error\")\n print(self.emp_data)\n return self.emp_data", "def print_info(name, salary=3500):\n print('Name:', name)\n print('Salary:', salary)\n return", "def get_shift_report_info_waiter(self, sh_reg_id, is_manager_called=False):\n try:\n staff_id = self.db_handler.get_shift_registration_by_shift_reg_id(sh_reg_id)[2]\n is_supervisor = self.is_staff_supervisor_on_shift(sh_reg_id, staff_id)\n msg = ''\n\n if not is_manager_called:\n if is_supervisor:\n msg += f'{emojize(\" :cop:\", use_aliases=True)}Ви були головним на цій зміні!\\n'\n\n check_in, check_out, rating, payment = self.db_handler.get_waiter_personal_info_from_shift_registration(sh_reg_id)\n\n msg += f'{emojize(\" :heavy_plus_sign:\", use_aliases=True)}check-in: {check_in if check_in is not None and check_in !=\"\" else \"Інформація тимчасово відсутня\"}\\n'\\\n f'{emojize(\" :heavy_minus_sign:\", use_aliases=True)}check-out: {check_out if check_out is not None and check_out !=\"\" else \"Інформація тимчасово відсутня\"}\\n' \\\n f'{emojize(\" :hourglass:\", use_aliases=True)}на зміні: {check_out - check_in}\\n'\\\n f'{emojize(\" :chart_with_upwards_trend:\", use_aliases=True)}Рейтинг: {rating if rating is not None and rating !=\"\" else \"Інформація тимчасово відсутня\"}\\n'\\\n f'{emojize(\" :moneybag:\", use_aliases=True)}Нараховано: *{payment if payment is not None and payment !=\"\" else \"Інформація тимчасово відсутня\"}*\\n'\n\n return msg\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def __str__(self):\n return f'Employee #{self.employee_id} ({self.job_group.name}, ${self.job_group.wage}) worked {self.hours_worked} hours on {self.date_worked}.'", "def print_donor_totals_report():\n # # Creating list to hold donors info for printing\n update_lists()\n try:\n print()\n title = ['Donor Name', '| Total Given ', '| Num Gifts',\n ' | Average Gift']\n print('{:<20}{:>14}{:^14}{:>14}'.format(title[0], title[1],\n title[2], title[3]))\n print('-'*65)\n print()\n for donor in donor_totals_list:\n average_gift = float(donor.donation_total) / donor.num_donations\n print('{:<22}{}{:>12.2f}{:>10}{:>8}{:>12.2f}'.format(donor.fullname, '$', donor.donation_total,\n donor.num_donations, '$', average_gift))\n print()\n\n except Exception as e:\n logger.info(f'Error printing donor list at {donor.fullname}')\n logger.info(e)", "def print_results(weeks, days, bus_days, total):\n print(\"{} remaining.\\n\".format(total))\n print(\"{} weeks and {} days.\".format(weeks, days - (weeks * 7)))\n print(\"{} business days.\".format(bus_days))", "def print_all(jobs):\n\n if len(jobs) == 0:\n print('print_all() recieved empty input')\n return\n\n for job in jobs:\n if job.is_relevant:\n print(job)\n else:\n continue", "def display_hall_of_fame(self) -> None:\n print(\"Hall of fame\")\n for env, dico in self.score_dic.items():\n print(\"Environment :\", env)\n for team, score in sorted(dico.items()):\n print(\"team: \", team, \"mean: \", score[0], \"std: \", score[1])", "def printPayment(self):\n print self.output()", "def run_sc(no_prods, prev_ledg_update, list_of_workers, no_prod):\n \n list_of_rands = []\n\n for worker_info in reversed(list_of_workers):\n print(worker_info[0])\n if check_fees(worker_info[3]) == True:\n print(\"Worker \", worker_info[0], \"paid their fees\")\n\n elif check_fees(worker_info[3]) == False:\n \n print(\"Worker \", worker_info[0], \"did not pay their fees\")\n list_of_workers.remove(worker_info)\n \n continue \n \n if check_corr_rando(worker_info[1], worker_info[2], prev_ledg_update) == True:\n print(\"Worker \", worker_info[0], \"has a well formed random\")\n \n\n elif check_corr_rando(worker_info[1], worker_info[2], prev_ledg_update) == False:\n print(\"Worker \", worker_info[0], \"failed to produce a well formed random\")\n list_of_workers.remove(worker_info)\n\n continue\n \n\n list_of_rands.append(worker_info[1])\n\n global_rand = gen_big_rand(list_of_rands)\n\n if global_rand == 0:\n print(\"Something went wrong global_rand was 0\")\n\n dist_list = get_dist_from_big_rand(global_rand, list_of_workers) \n PIDs = find_prod_ids(dist_list, no_prod)\n\n for producer in PIDs:\n print (\"Worker -->\", producer, \"has been selected as a producer for this cycle\")", "def which_hw(bot, update, args, session=session):\n print(\"Calling which_hw\")\n\n if not args:\n result = db_actions.query_all_homework(session)\n else:\n result = db_actions.query_homework_by_subject(args[0], session)\n \n header_string = \"ID- DATE ADDED - SUBJECT - HOMEWORK\\n\"\n result_list = [ \"{} - {}/{}/{} - {} - {} \".format(\n i.id, \n i.date_added.day, i.date_added.month, i.date_added.year,\n i.subject, \n i.todo \n ) \n for i in result ]\n result_text = header_string + \"\\n\".join(result_list)\n bot.send_message(chat_id=update.message.chat_id, text=result_text)", "def main():\n trades = get_trades()\n _print_trades(trades)\n\n print(\"\\n# Cost basis per asset\")\n _cost_basis_per_asset(trades)\n\n for year in range(2015, 2019):\n trades_for_year = _filter_trades_by_time(trades, year)\n _print_balances(trades_for_year, year)\n _print_agg_trades(trades_for_year, year)", "def print_report(d):\n report_data = d.get_report_data()\n title = \"{:24} | {:12} | {:10} | {:20}\"\n dashes=67*('-');print(dashes)\n print(title.format('Donor Name','Total Given','Num Gifts','Average Gift'))\n strf_format = \"{:24} ${:12.2f} {:^10d} ${:12.2f}\"\n print(dashes)\n for donor in report_data:\n print(strf_format.format(*donor))\n print(dashes)", "def view_batter_bysalary(self):\n conn = rs.create_connection(\"dailyfantasyscraper.db\")\n salary = \"$\" + sal.get()\n position = \"P\"\n batter_salary = (salary, position)\n cur = conn.cursor()\n cur.execute(\n \"SELECT * FROM rotowiredk WHERE salary <= ? and position != ? \", batter_salary)\n result = cur.fetchall()\n conn.commit()\n conn.close()\n\n for item in result:\n print(item)\n tree.insert('', 'end', values=item)", "def job_changes(self):\n cols = \"{:25}{:12.1f}\"\n cols2 = \"{:25}{:12.1f}{:12.1f}\"\n\n lines = [\"Benefit from job creation: \" + self.plant.name + \"\\n\"]\n\n row7 = self.farmer.labor()[1]\n row1 = self.farmer.labor_cost()[1]\n row8 = self.reseller.driving_work()[1]\n row2 = self.reseller.driving_wages()[1]\n row11 = self.reseller.loading_work()[1]\n row12 = self.reseller.loading_wages()[1]\n row9 = self.cofiring_plant.cofuel_om_work()[1]\n row3 = self.cofiring_plant.cofuel_om_wages()[1]\n row6 = -self.coal_work_lost[1]\n row5 = -self.coal_wages_lost[1]\n row10 = self.labor[1]\n row4 = self.wages[1]\n\n display_as(row6, \"FTE\")\n display_as(row7, \"FTE\")\n display_as(row8, \"FTE\")\n display_as(row9, \"FTE\")\n display_as(row10, \"FTE\")\n display_as(row11, \"FTE\")\n\n lines.append(cols2.format(\"Biomass collection\", row7, row1))\n lines.append(cols2.format(\"Biomass transportation\", row8, row2))\n lines.append(cols2.format(\"Biomass loading\", row11, row12))\n lines.append(cols2.format(\"O&M\", row9, row3))\n lines.append(cols2.format(\"Mining\", row6, row5))\n lines.append(cols2.format(\"Total\", row10, row4))\n lines.append(\"\")\n lines.append(cols.format(\"Area collected\", self.supply_chain.area()))\n lines.append(\n cols.format(\"Collection radius\", self.supply_chain.collection_radius())\n )\n lines.append(\n cols.format(\"Maximum transport time\", self.reseller.max_trip_time())\n )\n lines.append(cols.format(\"Number of truck trips\", self.reseller.truck_trips[1]))\n lines.append(\"\")\n lines.append(\"Mining job lost from co-firing at \" + self.plant.name + \"\\n\")\n lines.append(cols.format(\"Coal saved\", self.coal_saved[1]))\n lines.append(\n cols.format(\"Productivity\", self.mining_parameter.productivity_underground)\n )\n lines.append(cols.format(\"Job lost\", self.coal_work_lost[1]))\n lines.append(cols.format(\"Job lost\", display_as(self.coal_work_lost[1], \"FTE\")))\n lines.append(\n cols.format(\"Wage\", display_as(self.mining_parameter.wage_mining, \"USD/hr\"))\n )\n lines.append(cols.format(\"Wage lost\", self.coal_wages_lost[1]))\n return \"\\n\".join(lines)", "def report_calc_lattice(self):\n print(\" h k q_obs q_calc\")\n q_calc = np.sqrt(self.calc_q_square())\n for a, b, c, d in zip(self.h, self.k, self.q, q_calc):\n print(\"{0: 1d} {1: 1d} {2: .3f} {3: .3f}\".format(a, b, c, d))", "def func(self):\n from commands.base_commands.guest import census_of_fealty\n\n fealties = census_of_fealty()\n table = PrettyTable([\"{wFealty{n\", \"{w#{n\"])\n for fealty in fealties:\n table.add_row([fealty, fealties[fealty]])\n self.msg(table)", "def donor_report():\n \"\"\"print(\"{:<15}{:5}{:5}{}\".format(\"Donor Name\", \"| Total Given\", \"| Num Gifts\", \"| Average Gift\"))\n print(\"{:-<70}\".format(\"\"))\n \n for i in range(len(donors)):\n print(\"{:25s} ${:11.2f} {:9s} ${:12.2f}\".format((donors[i][0]), sum(donors[i][1]), len(donors[i][1]),\n sum(donors[i][1]) // len(donors[i][1])))\"\"\"", "def printreport():\n report = createreport()\n print(report[0])\n print(report[1])\n print(report[2])", "def getEmployeePaycheck(self, employee, header, reader):\n # extract the paycheck date and normalize it\n date = datetime.datetime.strptime(header[0], '%m/%d/%y').date()\n # make a paycheck\n paycheck = Paycheck(date=date)\n # save it\n employee.paychecks[paycheck.date] = paycheck\n\n # the gross pay\n paycheck.gross = float(header[5].strip())\n # the net pay\n paycheck.net = float(header[12].strip())\n\n # extract the paycheck info\n self.getIncomeAndDeductions(paycheck=paycheck, record=header)\n # process the remaining lines\n for record in reader:\n # if the zeroth field isn't empty\n if record[0]:\n # we are done with this paycheck\n return record\n # otherwise, get more\n self.getIncomeAndDeductions(paycheck=paycheck, record=record)\n\n # all done\n return", "def printPassbook(self) :\n for expense in self.__passbook:\n print(expense.toString())", "def printHam(ham: Dict[str, Any], digits: int = 4) -> None:\n\n qubitNum = ham['circuit']['qubits']\n\n print(f\"\\n====================\\n1. Basic information\\n====================\\n\")\n print(f\"Title: `{ham['file']['title']}`\")\n print(f\"Qubits: {qubitNum}\")\n print(f\"System energy level: {ham['circuit']['sys_level']}\")\n print(f\"Sampling interval: {ham['circuit']['dt']} ns\")\n print(f\"Circuit duration: {ham['circuit']['max_time_ns']} ns\")\n print(f\"Calculation steps: {ham['circuit']['max_time_dt']}\")\n\n # Obtain the max length name\n maxNameLengthDrift = 0 if len(ham[\"drift\"]) == 0 else max([len(key) for key in ham[\"drift\"]])\n maxNameLengthControl = 0 if len(ham[\"control\"]) == 0 else max([len(key) for key in ham[\"control\"]])\n maxNameLength = str(max(max(maxNameLengthDrift, maxNameLengthControl), 10))\n\n # Print abstract of operator\n print(f\"\\n============\\n2. Operators\\n============\\n\")\n qubitFormat = \"{0: <5} {1: <7} {2: <\" + maxNameLength + \"} {3: <9} {4: <6} {5: <6}\"\n print(qubitFormat.format('-' * 5, '-' * 7, '-' * 10, '-' * 9, '-' * 6, '-' * 6))\n print(qubitFormat.format(\"Qubit\", \"Type\", \"Name\", \"On qubits\", \"Pulses\", \"Amp\"))\n for qubit in range(qubitNum):\n print(qubitFormat.format('-' * 5, '-' * 7, '-' * 10, '-' * 9, '-' * 6, '-' * 6))\n for key in ham[\"drift\"]:\n drifts = ham[\"drift\"][key]\n if qubit in drifts[\"on_qubits\"]:\n print(qubitFormat.format(qubit, \"Drift\", key, f\"{drifts['on_qubits']}\", 0, f\"{drifts['amp']}\"))\n for key in ham[\"control\"]:\n ctrls = ham[\"control\"][key]\n if qubit in ctrls[\"on_qubits\"]:\n print(qubitFormat.format(qubit, \"Control\", key, f\"{ctrls['on_qubits']}\",\n len(ctrls['waveforms']), \"-\"))\n\n # Print abstract of waveforms\n def paraRound(para: Dict[str, Any]) -> Union[Dict[str, Any], None]:\n \"\"\" Reduce the length of pulse Parameters \"\"\"\n if para is None:\n return None\n else:\n for key in para:\n para[key] = round(para[key], digits)\n return para\n\n print(f\"\\n============\\n3. Waveforms\\n============\\n\")\n qubitFormat = \"{0: <9} {1: <\" + maxNameLength + \"} {2: <20} {3: <5} {4: <7} {5: <45}\"\n print(qubitFormat.format('-' * 9, '-' * 10, '-' * 15, '-' * 5, '-' * 7, '-' * 45))\n print(qubitFormat.format(\"On qubits\", \"Control\", \"Waveform\", \"Start\", \"Duration\", \"Params (Sequences)\"))\n for key in ham[\"control\"]:\n ctrls = ham[\"control\"][key]\n if len(ctrls['waveforms']) > 0:\n print(qubitFormat.format('-' * 9, '-' * 10, '-' * 15, '-' * 5, '-' * 7, '-' * 45))\n for wave in ctrls['waveforms']:\n waveName = \"\"\n wavePara = \"\"\n if wave['func'] is None:\n waveName = \"Manual Sequence\"\n wavePara = f\"Sequence contains {len(wave['sequence'])} pieces\"\n elif callable(wave['func']):\n waveName = \"Manual Wave\"\n wavePara = f\"{paraRound(wave['para'])}\"\n elif isinstance(wave['func'], str):\n waveName = wave['func']\n wavePara = f\"{paraRound(wave['para'])}\"\n print(qubitFormat.format(f\"{ctrls['on_qubits']}\", key, waveName, wave['insert_ns'],\n wave['duration_ns'], wavePara))", "def driver() :\n\t\n\t#The lists for the first name, last name and the favorite number for all employees.\n\tfname_list = [] \n\tlname_list = []\n\tfavorite_number = []\n\n\t#The list used in calculating the frequency of the numbers at a specific slot. \n\tfreq = [[] for _ in xrange(6)]\n\n\t#The result list containing the Powerball winning number.\n\tres=[]\n\tinput_choice=\"\"\n\n\t# Taking the user inputs until N or n is entered as input.\n\twhile 1 : \n\t\tinput_choice = raw_input(\"Enter employee info? [Y/N] \") \n\n\t\t# if the user inputs lowercase y or n it would still work.\n\t\tif input_choice in ['y','Y']:\n\n\t\t\tdata_update(fname_list,lname_list,favorite_number,freq)\n\n\t\telif input_choice in ['n','N'] :\n\t\t\t\n\t\t\tbreak\n\n\t\telse :\n\t\t\tprint \"Invalid Choice\"\n\t\t\tcontinue\t\t\n\n\tn_employees = len(fname_list)\n\tcounter = 0\n\tprint \"\\n\\n\"\n\n\t#Printing the user names and their favorite numbers to stdout.\n\twhile counter < n_employees :\n\t\tprint fname_list[counter] + \" \" + lname_list[counter] + \" \" + \" \".join(map(str,favorite_number[counter][:-1])) + \" Powerball: \" + str(favorite_number[counter][5])\n\t\tcounter += 1\n\n\tprint \" \\n\\n \"\n\n\t#If No employee info was entered.\n\tif n_employees==0:\n\t\tprint \"No Employee Found\"\n\n\t#Calculating the numbers with max frequency in each slot. If not unique, a random number would be used.\n\telse : \n\t\titr = 0\n\t\twhile itr < 6 :\n\t\t\tcount=Counter(freq[itr])\n\n\t\t\t#There is just one number to choose from in this slot. \n\t\t\tif len(count)==1 :\n\t\t\t\tres.append(count.most_common()[0][0])\n\n\t\t\t#There is no unique number with max frequency.\t\n\t\t\telif count.most_common()[0][1] == count.most_common()[1][1] :\n\t\t\t\tif itr < 5 :\n\t\t\t\t\tres.append(random.randint(1,69))\n\t\t\t\telse :\n\t\t\t\t\tres.append(random.randint(1,26))\n\n\t\t\t#The number with max frequency is unique. \t\n\t\t\telse :\n\t\t\t\tres.append(count.most_common()[0][0])\n\n\t\t\titr += 1\n\n\t\t#Printing out the winning Powerball number.\n\t\tprint \"Powerball winning number:\\n\"\n\t\tprint \" \".join(map(str,res[:-1])) + \" Powerball: \" + str(res[5])\n\t\n\treturn", "def create_report():\n print(\"Donor: | $ Total | Donations | $ Average |\")\n print(\"-\"*76)\n for item in donors:\n amt_total = float(sum(item[1]))\n num_total = int(len(item[1]))\n # Thousand separator as default. Careful with the space if we get some big donors.\n print(\"{:<26}|${:>15,.2f}|{:>15}|{:>15,.2f}\".format(item[0], amt_total, num_total, amt_total/num_total))", "def print_hands(self):\n # Clear the terminal and reprint round header\n os.system(\"clear\")\n self.print_header\n\n # Only display one of the dealers cards if they are still playing\n if not self.round_winner:\n print()\n print(\"Dealer's Cards\")\n print(\"=\" * 25)\n print(\"UNKNOWN\")\n for card in self.dealer.cards:\n if card != self.dealer.cards[0]:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\"*25)\n print(\"TOTAL = ?\")\n print()\n\n print(\"Player's Cards\")\n print(\"=\" * 25)\n for card in self.player.cards:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\" * 25)\n print(\"TOTAL = \" + str(self.player.sum_cards()))\n print()\n\n # Display the players cards and all of the dealers cards\n elif self.round_winner:\n print()\n print(\"Dealer's Cards\")\n print(\"=\" * 25)\n for card in self.dealer.cards:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\" * 25)\n print(\"TOTAL = \" + str(self.dealer.sum_cards()))\n print()\n\n print(\"Player's Cards\")\n print(\"=\" * 25)\n for card in self.player.cards:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\" * 25)\n print(\"TOTAL = \" + str(self.player.sum_cards()))\n print()\n pass", "def user_story_4(self):\n for family in self.families.values():\n if family.married != 'NA':\n if family.wife_id != 'NA' and family.husb_id != 'NA' and family.divorced != 'NA':\n if family.divorced < family.married:\n print(\n f'US04 - {self.individuals[family.wife_id].name} and {self.individuals[family.husb_id].name} married after divorce on line {family._married_line}')", "def print_solution(data, manager, routing, assignment):\n total_distance = 0\n total_load = 0\n for vehicle_id in range(data['num_vehicles']):\n index = routing.Start(vehicle_id)\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n route_load = 0\n while not routing.IsEnd(index):\n node_index = manager.IndexToNode(index)\n route_load += data['demands'][node_index]\n plan_output += ' {0} Load({1}) -> '.format(node_index, route_load)\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(\n previous_index, index, vehicle_id)\n plan_output += ' {0} Load({1})\\n'.format(manager.IndexToNode(index),\n route_load)\n plan_output += 'Distance of the route: {}m\\n'.format(route_distance)\n plan_output += 'Load of the route: {}\\n'.format(route_load)\n # print(plan_output)\n total_distance += route_distance\n total_load += route_load\n with open(f\"Survey/vrp-nanostores/vrp-nanostores/food_deserts/outputs/2-e/clust8/route/route_vehicle{vehicle_id}.txt\", \"w\") as file:\n file.write(plan_output)\n file.close()\n print(\"aaa\")\n print('Total cost for all routes: {}m'.format(total_distance))\n print('Total load of all routes: {}'.format(total_load))\n with open(f\"Survey/vrp-nanostores/vrp-nanostores/food_deserts/outputs/2-e/clust8/load_dist_{data['num_vehicles']}vehicles.txt\", \"w\") as file:\n out_file = \"\"\n out_file += str(total_load) + \",\" + str(total_distance)\n file.write(out_file)\n file.close() # OPEN AND ANALYZE LATER WITH PANDAS", "def assignNurses(solution, hini, data):\n\n demand = data[\"demand\"]\n pending = solution[\"pending\"]\n hours = data[\"hours\"]\n sumW = [0] * data[\"nNurses\"]\n\n\n checkers = initCheckers(data)\n\n z = solution[\"z\"]\n w = solution[\"w\"]\n\n\n for h in range(hours):\n\n #print(\" for loop h=\"+str(h))\n\n solution[\"mustWork_count\"] = 0\n \n mustWork, canWork = computeAssignments(solution, h, data, checkers=checkers, sumW=sumW )\n\n # print(\"h=\" + str(h))\n # print(\"mustWork\")\n # print(mustWork)\n # print(\"canWork\")\n # print(canWork)\n # print(\"hini:\")\n # print(hini)\n # print(\"demand\")\n # print(data[\"demand\"])\n # print(\"pending\")\n # print(solution[\"pending\"])\n\n\n \n # try to assign if pending[h] > 0 and h >= hini[n]\n for n in mustWork:\n # print(\"nurse :\" + str(n) + \" h: \" + str(h) + \" pending: \")\n # print(pending)\n update_checkers(solution, data, n, h, 1,checkers)\n w[n][h] = 1\n sumW[n] += 1\n pending[h] -= 1\n if z[n] == 0:\n z[n] = 1\n solution[\"cost\"] += 1\n \n #print(\"w[\" + str(n) + \",\" + str(h) + \"] = 1\")\n # pp.pprint(solution[\"w\"])\n\n for n in canWork:\n\n # print(\"nurse :\" + str(n) + \" h: \" + str(h) + \" pending: \")\n # print(pending)\n if pending[h] + hini[h] > 0:\n update_checkers(solution, data, n, h, 1,checkers) \n w[n][h] = 1\n sumW[n] += 1\n pending[h] -= 1\n if z[n] == 0:\n z[n] = 1\n solution[\"cost\"] += 1\n \n #print(\"w[\" + str(n) + \",\" + str(h) + \"] = 1\")\n # print(\"w[\" + str(n) + \"]\")\n # pp.pprint(solution[\"w\"]) \n\n # pp.pprint(data)\n # pp.pprint(solution[\"cost\"])\n # print(\"\")\n\n # compute feasibility: if unfeasible -> fitness should be inf\n if not isFeasible(solution, data):\n # assign the max cost\n #solution[\"cost\"] = 200000 * data[\"nNurses\"]\n solution[\"cost\"] = 100 * solution[\"cost\"]\n # print(data[\"demand\"])\n # print(solution[\"pending\"])\n # print(\"\")", "def submission_summary_info(self, job_id):\n ## TODO: Post-sanity checks, 2009-01-08\n #sanity = self.form[\"pdbfile\"].value\n chains = mysql.job_get_chain_sizes(job_id).rstrip(\";\")\n\n ## E.g.,\n # name: CHAINA\n # selected: True\n # chain_id: A\n # length: 39\n # preview: MET ILE TYR ALA GLY\n # desc: Chain A (39 Amino Acid Residues)\n sum = '<table class=\"status_table\">'\n sum += '<tr class=\"status_table_head\">'\n sum += '<th>Chain<th>Analyze</th><th>Residues</th>'\n #sum += '<th>Preview</th>\n sum += '<th>Residue type</th>'\n sum += '<th>Ignored residues/atoms</th>'\n next_chain = ''\n #for list in summary_data:\n for c in chains.split(';'):\n chid, length, selected, type = misc.parse_chains(c)\n #if next_chain != list[\"chain_id\"]:\n if next_chain != chid:\n sum += '</tr>'\n row1 = True\n next_chain = chid\n if row1:\n sum += '<tr class=\"status_table_row1\">'\n else:\n sum += '<tr class=\"status_table_row2\">'\n row1 = not row1\n\n ## Chain id\n sum += '<td class=\"c\">%s</td>' % chid\n\n ## Analyze (i.e., chain selected by user)\n if selected == \"1\":\n sum += '<td class=\"c\">True</td>'\n else:\n sum += '<td class=\"c\">False</td>'\n sum += '<td class=\"c\">%s</td>' % length\n\n ## Preview\n #sum += '<td>%s ...</td>' % list[\"preview\"]\n\n ## Residue type\n if type == \"aa\":\n sum += '<td class=\"c\">amino acid</td>'\n elif type == \"na\":\n sum += '<td class=\"c\">nucleic acid</td>'\n elif type == \"ot\":\n sum += '<td class=\"c\">other</td>'\n\n ## Ignored residues/atoms\n sum += '<td class=\"c\">none</td>'\n\n sum += '</tr></table>'\n\n return sum", "def print_report(donors_list):\n width = 68\n print(\"-\" * width)\n header = (\"Donor Name\", \"Total Given\", \"Num Gifts\", \"Average Gift\")\n print(\"{:20} | {:15} | {:10} | {:12}\".format(*header))\n print(\"-\" * width)\n for index, donor in enumerate(donors_list):\n name = donor[0]\n total = sum(donor[1])\n num_gift = len(donor[1])\n average = total/num_gift\n print(\"{:22} ${:12,.2f} {:12d} ${:12,.2f}\".format(name, total, num_gift, average ))\n print(\"-\" * width)", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"Challenge Definition ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-challenge type:\", self.challenge_type, sep='')\n\n print(indent, \"|-challenge code ID:\", self.challenge_code_ID, sep='')\n\n print(indent, \"|-associated recipient ID:\", self.recipient_ID, sep='')\n recipient = get_indexed_item_from_list(self.recipient_ID, AutoResilGlobal.recipient_list)\n if recipient != None:\n recipient.printout_all(indent_level+1)\n\n print(indent, \"|-info about cloud virtual impacted resource(s):\", self.impacted_cloud_resources_info, sep='')\n\n if self.impacted_cloud_resource_ID_list != None:\n if len(self.impacted_cloud_resource_ID_list) >0:\n print(indent, \"|-associated cloud virtual impacted resource(s):\", sep='')\n for cloud_resource_ID in self.impacted_cloud_resource_ID_list:\n cloud_resource_item = get_indexed_item_from_list(cloud_resource_ID, AutoResilGlobal.cloud_virtual_resource_list)\n if cloud_resource_item != None:\n cloud_resource_item.printout_all(indent_level+1)\n\n print(indent, \"|-info about physical virtual impacted resource(s):\", self.impacted_phys_resources_info, sep='')\n\n if self.impacted_phys_resource_ID_list != None:\n if len(self.impacted_phys_resource_ID_list) >0:\n print(indent, \"|-associated physical impacted resource(s):\", sep='')\n for phys_resource_ID in self.impacted_phys_resource_ID_list:\n phys_resource_item = get_indexed_item_from_list(phys_resource_ID, AutoResilGlobal.physical_resource_list)\n if phys_resource_item != None:\n phys_resource_item.printout_all(indent_level+1)\n\n print(indent, \"|-CLI command to start challenge:\", self.start_challenge_CLI_command_sent, sep='')\n\n print(indent, \"|-CLI command to stop challenge:\", self.stop_challenge_CLI_command_sent, sep='')\n\n # TODO: self.start_challenge_API_command_sent (depends how API commands are stored: likely a list of strings)\n # TODO: self.stop_challenge_API_command_sent (depends how API commands are stored: likely a list of strings)", "def print_taboo_spaces(warehouse_id):\n problem_file = \"./warehouses/warehouse_{:02d}.txt\".format(warehouse_id)\n wh = Warehouse()\n wh.load_warehouse(problem_file)\n print(wh)\n print(\"TABOO CELLS: \")\n taboo = taboo_cells(wh)\n print(taboo)", "def ticket_salida(self):\n total = 0.0\n for x in self.mi_parqueo:\n total=x.cobro=total\n\n print(\"El costo total es de :\",total)", "def print_auction_result(self):\n self._auctioneer.print_winner()\n self._auctioneer.print_bidders()", "def display_balance_eq(self,opt=1):\r\n react = self.dataframe\r\n eq_bilanR = \" + \".join([f'{elem[0]} {elem[1]}' for elem in zip(list(react.loc[react['type']=='r','coef']),list(react.loc[react['type']=='r','esp']))])\r\n eq_bilanP=\" + \".join([f'{elem[0]} {elem[1]}' for elem in zip(list(react.loc[react['type']=='p','coef']),list(react.loc[react['type']=='p','esp']))])\r\n txt = f'{self.info}\\n{eq_bilanR} --> {eq_bilanP}'\r\n if opt == 1: \r\n print(txt)\r\n else:\r\n return txt", "def concertTicket():\n\n\n p =(input (\"What is the ticket price?\"))\n price = float (p)\n\n w = (input (\"What is your hourly wage?\"))\n wage = float (w)\n\n h = price/wage\n hours = round(h,2)\n\n print (\"You need to work\", hours, \"hours to buy your\",\n \"ticket\")", "def budget_for_necessities():\n print(\"========== Displaying hotel options ==========\")\n for i in range(len(hotel_list)):\n print(\" -- Enter\", i+1, \"to choose -- \")\n hotel_list[i].print_hotel()\n print(\" \")\n while True:\n try:\n hotel_num = int(input(\"Please choose your hotel option (Enter a number between 1 to 7): \"))\n hotel_num -= 1\n if hotel_num in range(len(hotel_list)): break\n except ValueError:\n print(\"Please enter an positive integer. Try again... \")\n while True:\n try:\n hotel_stay = int(input(\"Please enter the duration (in days) of your stay: \"))\n if hotel_stay > 0: break\n except ValueError:\n print(\"Please enter an positive integer. Try again... \")\n user_hotel = hotel_list[hotel_num]\n user_hotel_price = user_hotel.get_price()\n user_hotel_name = user_hotel.get_name()\n # display car option and ask for user input\n print(\"\\n======== Displaying rental car options =========\")\n for i in range(len(car_list)):\n print(\" -- Enter\", i+1, \"to choose -- \")\n car_list[i].print_car()\n print(\" \")\n while True:\n try:\n car_num = int(input(\"Please choose your car rental option (Enter a number between 1 to 6): \"))\n car_num -= 1\n if car_num in range(len(hotel_list)): break\n except ValueError:\n print(\"Please enter an positive integer. Try again... \")\n while True:\n try:\n car_rental_day = int(input(\"Please enter the duration (in days) of your car rental: \"))\n if car_rental_day > 0: break\n except ValueError:\n print(\"Please enter an positive integer. Try again... \")\n # calculate user's total cost for car rental and hotel\n user_car = car_list[car_num]\n user_car_price = user_car.get_price()\n user_car_name = user_car.get_name()\n total_hotel_cost = hotel_stay * user_hotel_price\n total_car_rental_cost = car_rental_day * user_car_price\n print(\"\\n=== Displaying your hotel and car rental information ===\")\n print(\"Hotel: \", user_hotel.get_name())\n print(\"Hotel total cost: $\", total_hotel_cost)\n print(\"Car Rental: \", user_car.get_name())\n print(\"Car rental total cost: $\", total_car_rental_cost)\n print(\" \")\n # calculate remaining budget based on hotel and car's cost and/or ask for higher budget\n user_budget.calculate_new_budget(total_hotel_cost + total_car_rental_cost)\n print(\" \")\n return total_hotel_cost, total_car_rental_cost, user_hotel_name, user_car_name", "def print_donor_report(database):\n name_max = 30\n\n rpt_title = \"Donor Name\" + ' ' * (name_max - 9) + \"| Total Given | Num Gifts | Average Gift\"\n print(rpt_title)\n print(\"-\" * len(rpt_title))\n\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n\n query = (Donor\n .select(Donor.name,\n fn.COUNT(Donation.amount).alias('ccount'),\n fn.SUM(Donation.amount).alias('csum'),\n fn.AVG(Donation.amount).alias('cavg'))\n .join(Donation, JOIN.LEFT_OUTER)\n .group_by(Donor.name)\n )\n\n for d in query:\n print(f\"{d.name:{name_max}} $ {d.csum:>10.2f} {d.ccount:>9} ${d.cavg:>12.2f}\")\n\n database.close()", "def main():\n \n welcome()\n myBill = get_bill_amt()\n pct = get_tip_pct()\n tip = calc_tip(myBill, pct)\n show_results(myBill, tip, pct)", "def user_story_18(self):\n for f1 in self.families.values():\n for f2 in self.families.values():\n if f2.husb_id in f1.children and f2.wife_id in f1.children:\n try:\n print(f\"US18 - {self.individuals[f2.husb_id].name} and {self.individuals[f2.wife_id].name} are siblings and are married on line {f2._married_line}\")\n except KeyError:\n print(f'US18 - Siblings married each other.')", "def print_processor(print_que):\n print(termcolor.colored(\"!--DO NOT CLOSE--!\", \"red\"))\n print(len(print_que))\n ID_LIMIT = 40\n run = True\n jobs_ran = 0\n while run:\n Q_Jobs = 0\n if len(print_que) > 0:\n if \"10.56.54.162\" in print_que[0]:\n Q_Jobs = print_status(\"10.56.54.162\")\n else:\n Q_Jobs = print_status(\"10.56.54.156\")\n if Q_Jobs >= ID_LIMIT:\n print(\"Printed so Far: \", str(jobs_ran))\n print(\"Waiting For Jobs to Clear Up\")\n # input(\n # \"Please Confirm Printers Will Support 40 More Job IDS before pressing enter: \")\n jobs_ran = 0\n time.sleep(100)\n continue\n if len(print_que) > 0:\n if(\"banner\" not in print_que[0]):\n os.system(print_que[0])\n print((str(print_que[0]).replace(\n \"C:/Windows/System32/lpr.exe -S 10.56.54.\", \"\").replace(\n '-P PS \"C:/S/SO/', \"\").split(\"-J\")[0]))\n print_que.pop(0)\n jobs_ran += 1\n else:\n print(termcolor.colored(\"\\n!--PROCESSING CAUGHT UP--!: \", \"green\"))\n run = False\n jobs_ran += 1", "def issue_payment(self):\n with open ('paylog.txt', 'w') as log:\n for i in self.emp_data:\n if self.emp_data[i][0] == \"Hourly\":\n if self.emp_data[i][1] == \"Direct Deposit\":\n hours = 0\n for q in self.timecard[i]:\n hours += q\n pay = hours * float(self.emp_data[i][2])\n pay = (\"{0:.2f}\".format(pay))\n log.write(\"{}{}{}{}{}{}{}{}{}{}\".format(\"Check for \",self.emp_id[i] ,\" for $\",pay,\n \" deposited successfully into account number \",\n self.emp_data[i][4],\" routing number \",self.emp_data[i][3],\n '\\n','\\n'))\n elif self.emp_data[i][1] == \"Mailed Check\":\n hours = 0\n for q in self.timecard[i]:\n hours += q\n pay = hours * float(self.emp_data[i][6])\n pay = (\"{0:.2f}\".format(pay))\n log.write(\"{}{}{}{}{}{}{}{}{}{}{}{}{}{}\".format(\"Check for \", self.emp_id[i],\" for $\",pay,\n \" successfully sent to \", self.emp_data[i][2], \" \",\n self.emp_data[i][3],\", \" , self.emp_data[i][4],\" \",\n self.emp_data[i][5],'\\n','\\n'))\n else:\n log.write(\"{}{}{}{}\".format(\"Error with processing payment for \", self.emp_id[i],'\\n','\\n'))\n elif self.emp_data[i][0] == \"Salaried\":\n if self.emp_data[i][1] == \"Direct Deposit\":\n pay = float(self.emp_data[i][2]) / 24\n pay = (\"{0:.2f}\".format(pay))\n log.write(\"{}{}{}{}{}{}{}{}{}{}\".format(\"Check for \",self.emp_id[i] ,\" for $\",pay,\n \" deposited successfully into account number \",\n self.emp_data[i][4],\" routing number \",self.emp_data[i][3],\n '\\n','\\n'))\n elif self.emp_data[i][1] == \"Mailed Check\":\n pay = float(self.emp_data[i][6]) / 24\n pay = (\"{0:.2f}\".format(pay))\n log.write(\"{}{}{}{}{}{}{}{}{}{}{}{}{}{}\".format(\"Check for \", self.emp_id[i],\" for $\",pay,\n \" successfully sent to \", self.emp_data[i][2], \" \",\n self.emp_data[i][3], \", \", self.emp_data[i][4],\" \",\n self.emp_data[i][5],'\\n','\\n'))\n else:\n log.write(\"{}{}{}{}\".format(\"Error with processing payment for \", self.emp_id[i],'\\n','\\n'))\n elif self.emp_data[i][0] == \"Commissioned\":\n if self.emp_data[i][1] == \"Direct Deposit\":\n commission = 0\n for q in self.receipts[i]:\n sale = float(q)\n total = sale * (float(self.emp_data[i][3]) * 0.01)\n commission += total\n pay = commission + float(self.emp_data[i][2])\n pay = (\"{0:.2f}\".format(pay))\n log.write(\"{}{}{}{}{}{}{}{}{}{}\".format(\"Check for \",self.emp_id[i] ,\" for $\",pay,\n \" deposited successfully into account number \",\n self.emp_data[i][4],\" routing number \",self.emp_data[i][3],\n '\\n','\\n'))\n elif self.emp_data[i][1] == \"Mailed Check\":\n commission = 0\n for q in self.receipts[i]:\n sale = float(q)\n total = sale * (float(self.emp_data[i][7]) * 0.01)\n commission += total\n pay = commission + (float(self.emp_data[i][6]) / 24)\n pay = (\"{0:.2f}\".format(pay))\n log.write(\"{}{}{}{}{}{}{}{}{}{}{}{}{}{}\".format(\"Check for \", self.emp_id[i],\" for $\",pay,\n \" successfully sent to \", self.emp_data[i][2],\" \",\n self.emp_data[i][3],\", \", self.emp_data[i][4],\" \",\n self.emp_data[i][5], '\\n','\\n'))\n else:\n log.write(\"{}{}{}{}\".format(\"Error with processing payment for \", self.emp_id[i],'\\n','\\n'))", "def main():\n while True:\n tasks = ['1. Вывести полную информацию по id книги.',\n '2. Вывести полную информацию о книге по ISBN.',\n '3. Подсчитать количество книг по заданному году издания.',\n '4. Подсчитать среднюю стоимость книг по каждому издательству.',\n '5. Вывести информацию о самой дорогой книге(ах) по заданным издательству и году издания.',\n '6. Завершить работу программы.']\n for i in tasks:\n print(i)\n\n while True:\n try:\n choice = int(input('Введите номер операции от 1 до 6: '))\n if choice < 1 or choice > 6:\n raise Exception\n break\n except:\n print('Неверный ввод, попробуйте еще раз.')\n\n if choice == 1:\n while True:\n try:\n input_id = int(input('Введите id книги: '))\n break\n except:\n print('Неверный ввод, попробуйте еще раз.')\n if book_by_id(input_id) == {}:\n print('К сожалению,нет книги с таким id.')\n else:\n for key, value in book_by_id(input_id).items():\n print(key + ':', value)\n\n elif choice == 2:\n while True:\n try:\n input_isbn = input('Введите ISBN книги: ')\n input_isbn = input_isbn.split('-')\n new_isbn = ''\n for i in range(len(input_isbn)-1):\n new_isbn += str(input_isbn[i]) + '-'\n new_isbn = new_isbn + input_isbn[-1]\n break\n except:\n print('Неверный ввод, попробуйте еще раз.')\n if book_by_isbn(new_isbn) == {}:\n print('К сожалению,нет книги с таким ISBN.')\n else:\n for key, value in book_by_isbn(new_isbn).items():\n print(key + ':', value)\n\n elif choice == 3:\n while True:\n try:\n input_year = int(input('Введите год: '))\n break\n except:\n print('Неверный ввод, попробуйте еще раз.')\n if books_by_year(input_year) == 0:\n print('К сожалению,нет книг такого года издания.')\n else:\n print(books_by_year(input_year))\n\n elif choice == 4:\n for key, value in average().items():\n print(key + ':', value)\n\n elif choice == 5:\n while True:\n try:\n input_publisher = input('Введите издательство: ')\n input_year = int(input('Введите год: '))\n break\n except:\n print('Неверный ввод, попробуйте еще раз.')\n if the_most_expensive(input_publisher, input_year) == {}:\n print('К сожалению, нет такой книги(книг).')\n else:\n for key, value in the_most_expensive(input_publisher, input_year).items():\n print(key + ':', value)\n\n elif choice == 6:\n break", "def main():\n # group_id = get_group_id() This would be used if I had\n # the appropriate privileges\n group_id = 15000022833\n setup_logger()\n ticket_ids = get_newhire_tickets(group_id)\n for ticket_id in ticket_ids:\n update_ticket_info(ticket_id)", "def print_report(resources_list, available_balance):\n water_left = resources_list[\"water\"]\n milk_left = resources_list[\"milk\"]\n coffee_left = resources_list[\"coffee\"]\n total_money = available_balance[\"money\"]\n return f\"Water: {water_left}ml \\nMilk: {milk_left}ml \\nCoffee: {coffee_left}g \\nMoney: ${total_money:0.2f}\"", "def print_sales_report(melons_by_salesperson):\n\n for salesperson, melons_sold in melons_by_salesperson.items():\n print(f'{salesperson} sold {melons_sold} melons')", "def _printTruckRec(self, tNode):\n count = self.countTrucks(tNode)\n print(f'Total number of vehicles entered in the warehouse: {count}')\n self.inorder(tNode)\n print('------------------------------------')", "def print_all_donor_donations():\n print(\"\\nList of Donors and Donations\")\n print(\"\\nDonor Name - Donation Date - Donation Amount:\")\n print(\"-\"*40)\n for donation in donor_donations_list:\n print(f'{donation.fullname} - {donation.donation_date} - ${donation.donation_amount:,.2f}')\n print()", "def display(self):\n print(f'{self.first_name} {self.last_name}, Customer#: '\n f'{self.customer_id}\\n{self.address}\\n{self.phone_number}\\n'\n f'{self.create_invoice()}')", "def create_report():\n report = list()\n for name, donations in donor_db.items():\n report.append([name, sum(donations), len(donations), sum(donations) / len(donations)])\n \n sorted_report = sorted(report, key=lambda x: -x[1])\n \n print(\"Donor Name | Total Given | Num Gifts | Average Gift\\n\")\n print('------------------------------------------------------------------')\n for row in sorted_report:\n print(\"{:25} ${:13.2f}{:11d} ${:13.2f}\".format(row[0], row[1], \n row[2], row[3]))\n return \"report printed successfully\"", "def alert_worker_evaluated(hirer,worker):\n message = loader.get_template(\n 'alerts/worker_evaluated.txt').render(\n {'worker': worker, 'hirer': hirer})\n\n return message", "def print_thank_you():\n while True:\n print()\n response = input(''' sub_menu --- Donation Entries ---\n Type your Full Name,\n or 'list' to display list of the donor names,\n or 'quit' to quit and return to main program prompt.\n Enter your choice here: > ''')\n\n if response.lower().strip() in [\"q\", \"quit\"]:\n break\n elif response.lower().strip() in [\"l\", \"list\"]:\n print_donor_list()\n continue\n else:\n record_id = None\n for index, existing_donor in enumerate(donors_data):\n # get a match an existing record\n if existing_donor[\"name\"].lower().strip() == response.lower().strip():\n record_id = index\n break\n\n amount = input(\"Enter Donation amount: \")\n try:\n donate_amount = float(amount)\n except ValueError:\n print(\"The donation amount must be numeric!\")\n\n # add or update a record\n if record_id is None: # new record\n print('Add a new record entry')\n donors_data.append({\"name\": response, \"donations\": [donate_amount]})\n donor_to_print = donors_data[-1]\n else: # existing record\n print('Update an existing record entry')\n donors_data[record_id][\"donations\"].append(donate_amount)\n donor_to_print = donors_data[record_id]\n\n # now print a thank you letter\n print_letter(donor_to_print)", "def printthankyou(donorname):\n print(THANK_YOU_LETTER.format(name=donorname, amount=donor_db[donorname][-1]))", "def displayHands(p_hand, d_hand):\n os.system('clear') # Call to OS clear the screen to clean up output\n print(\"\\nPlayer hand: \", p_hand.showHand())\n print(\"Player score: \", p_hand.handSum())\n\n print(\"\\nDealer hand: \", d_hand.showHand())\n print(\"Dealer score: \", d_hand.handSum())", "def get_shift_report_info_manager(self, shift_id):\n try:\n _, _, pro, mid, beg, supervisor_id, _, _, _, _, _, _, _, _, price = self.db_handler.get_shift_extended_info_by_id(shift_id)\n curr_name = 'грн.'\n shift_registrations = self.db_handler.get_shift_registrations_by_shift_id(shift_id)\n\n res = f'На зміну було зареєстровано:\\n' \\\n f'{emojize(\":full_moon:\", use_aliases=True)}Професіоналів: {pro}\\n' \\\n f'{emojize(\":last_quarter_moon:\", use_aliases=True)}Середнього рівня: {mid}\\n' \\\n f'{emojize(\":new_moon:\", use_aliases=True)}Початківців: {beg}\\n' \\\n f'{emojize(\":moneybag:\", use_aliases=True)}Ціна за подію: {price} {curr_name}\\n' \\\n f'{\"-\" * 20}\\n' \\\n f'Статистика працівників:\\n' \\\n f'{\"-\" * 20}\\n'\n\n for sh_reg in shift_registrations:\n usr = self.db_handler.get_staff_by_id(sh_reg[2])\n staff_str = f'{usr[3]} {usr[1]} {usr[2]}\\n'\n res += staff_str\n\n if str(usr[0]) == str(supervisor_id):\n res += f'{emojize(\":cop:\", use_aliases=True)}Головний на зміні\\n'\n\n res += self.get_shift_report_info_waiter(sh_reg[0], is_manager_called=True)\n res += f'{\"-\" * 20}\\n'\n\n return res\n\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def main():\n print_hist_new(histogram_new(get_pledge_list()))", "def h1(pid, cno, mdl, mat, wng):\n try:\n cur = con.cursor()\n print()\n\n chassisnos = get_values(\"BELONGS_TO\",\"ChassisNo\")\n models = get_values(\"BELONGS_TO\",\"Model\")\n\n if wng is None:\n print(\"This is an invalid input\")\n return False\n\n if pid is None and (cno is None or mdl is None) and mat is None:\n print(\"This is an invalid input\")\n return False\n\n if pid is not None:\n try:\n pid = int(pid)\n except:\n print(\"Enter valid Personnel ID\")\n return False\n if pid in get_values(\"BELONGS_TO\",\"IDnumber\"):\n pquery = \"SELECT * FROM BELONGS_TO WHERE IDnumber={}\".format(pid)\n cur.execute(pquery)\n p_row = cur.fetchall()[0]\n if p_row[\"WingName\"] != wng:\n print(f\"Personnel cannot belong to two wings. Already in {p_row['WingName']} wing\")\n return False\n else:\n pid = \"NULL\"\n\n if cno is not None and mdl is not None:\n try:\n cno = int(cno)\n except:\n print(\"Enter valid Chassis No\")\n return False\n mdl = \"'\" + mdl + \"'\"\n if (cno,mdl[1:-1]) in [(chassisnos[i],models[i]) for i in range(len(models))]:\n cquery = \"SELECT * FROM BELONGS_TO WHERE ChassisNo={} and Model={}\".format(cno,mdl)\n cur.execute(cquery)\n c_row = cur.fetchall()[0]\n if c_row[\"WingName\"] != wng:\n print(\"Vehicle cannot belong to two wings\")\n return False\n else:\n cno = \"NULL\"\n mdl = \"NULL\"\n\n if mat is None:\n mat = \"NULL\"\n else:\n mat = \"'\" + mat + \"'\"\n\n query = \"INSERT INTO BELONGS_TO (IDnumber, ChassisNo, Model, MatName, WingName) VALUES ({},{},{},{},'{}')\".format(pid,cno,mdl,mat,wng)\n print(query)\n cur.execute(query)\n con.commit()\n return True\n\n except Exception as e:\n con.rollback()\n print(\"Failed to insert into database\")\n print(\">>\", e)\n return False", "def tiskni_hru (hriste):\n for radek in hriste:\n for cislo in radek:\n print(cislo, end=' ')\n print()", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"\\nTest Definition ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-associated test case ID:\", self.test_case_ID, sep='')\n test_case = get_indexed_item_from_list(self.test_case_ID, AutoResilGlobal.test_case_list)\n if test_case != None:\n test_case.printout_all(indent_level+1)\n\n print(indent, \"|-test code ID:\", self.test_code_ID, sep='')\n\n print(indent, \"|-associated challenge def ID:\", self.challenge_def_ID, sep='')\n challenge_def = get_indexed_item_from_list(self.challenge_def_ID, AutoResilGlobal.challenge_definition_list)\n if challenge_def != None:\n challenge_def.printout_all(indent_level+1)\n\n if self.VNF_ID_list != None:\n if len(self.VNF_ID_list) >0:\n print(indent, \"|-associated VNFs:\", sep='')\n for VNF_ID in self.VNF_ID_list:\n VNF_item = get_indexed_item_from_list(VNF_ID, AutoResilGlobal.VNF_Service_list)\n if VNF_item != None:\n VNF_item.printout_all(indent_level+1)\n\n if self.associated_metrics_ID_list != None:\n if len(self.associated_metrics_ID_list) >0:\n print(indent, \"|-associated metrics:\", sep='')\n for Metric_ID in self.associated_metrics_ID_list:\n Metric_item = get_indexed_item_from_list(Metric_ID, AutoResilGlobal.metric_definition_list)\n if Metric_item != None:\n Metric_item.printout_all(indent_level+1)\n\n if self.recipient_ID_list != None:\n if len(self.recipient_ID_list) >0:\n print(indent, \"|-associated recipients:\", sep='')\n for recipient_ID in self.recipient_ID_list:\n recipient_item = get_indexed_item_from_list(recipient_ID, AutoResilGlobal.recipient_list)\n if recipient_item != None:\n recipient_item.printout_all(indent_level+1)\n\n if self.test_CLI_command_sent_list != None:\n if len(self.test_CLI_command_sent_list) >0:\n print(indent, \"|-associated CLI commands:\", sep='')\n for CLI_command in self.test_CLI_command_sent_list:\n print(\" \"*INDENTATION_MULTIPLIER, \"|- \", CLI_command, sep='')\n\n # TODO: self.test_API_command_sent_list (depends how API commands are stored: likely a list of strings)", "def print_report(self):\n print '=' * 20 + ' %s ' % self.label + '=' * 20\n print '%-20s%5s\\t%4s\\t%4s\\t%4s\\t%4s' % (\n 'Hand' + '=' * 16, '#', 'Frac', 'W', 'Tie', 'L')\n for hand, result_dict in self.counts.iteritems():\n total_for_hand = sum(result_dict.itervalues())\n if total_for_hand == 0:\n win_frac = 0.0\n tie_frac = 0.0\n loss_frac = 0.0\n else:\n win_frac = float(result_dict[WIN_RESULT])/total_for_hand\n tie_frac = float(result_dict[TIE_RESULT])/total_for_hand\n loss_frac = float(\n result_dict[LOSS_RESULT])/total_for_hand\n print '%-20s%5d\\t%0.3f\\t%0.3f\\t%0.3f\\t%0.3f' % (\n hand, total_for_hand, float(total_for_hand)/self.total_items,\n win_frac, tie_frac, loss_frac)", "def get_personnel():\r\n if len(exe) == 0:\r\n print(\"There are no executives\")\r\n else:\r\n for i in exe:\r\n print(str(i))", "def proposed_order_print(proposed_order_list):\n for item_details in proposed_order_list:\n proposed_order_item_print(item_details)", "def print_report(self, energy_trial, accept):\n minres = self.storage.get_lowest()\n print(\"basinhopping step %d: f %g trial_f %g accepted %d \"\n \" lowest_f %g\" % (self.nstep, self.energy, energy_trial,\n accept, minres.fun))", "def main():\n print(\"\\033c\")\n # read from database gather list of payments due\n query = (\n \"SELECT amount, nominator, start, number, type FROM stakes \"\n + \"WHERE (type='base_amount' OR type='reward') AND due<? \"\n + \"AND status='processing'\"\n )\n values = (munix_nonce(),)\n payments_due = sql_db(query, values)\n print(payments_due)\n choice = input(\"\\ny + Enter to make these payments, or just Enter to abort\\n\")\n\n if choice == \"y\":\n keys = {\n \"custodian\": CUSTODIAN,\n \"password\": getpass(\n f\"\\nInput Pybitshares Password for {CUSTODIAN} and press ENTER:\\n\"\n ),\n }\n payment_parent(payments_due, keys)", "def by_hkl(self, hkl=None):\n \n # this is a simple print statement, does not need to be optimized\n if hkl is None:\n id1 = self.hkl_labels\n seqs = range(len(id1))\n else:\n seqs = None\n for id, label in enumerate(self.hkl_labels):\n hkl0 = list(label[0]['hkl']) #label['multiplicity']\n if hkl == hkl0:\n seqs = [id]\n\n if seqs is not None:\n print(' 2theta d_hkl hkl Intensity Multi')\n for i in seqs:\n print('{:8.3f} {:8.3f} [{:2d} {:2d} {:2d}] {:8.2f} {:8d}'.format(\\\n self.theta2[i], self.d_hkls[i], \\\n self.hkl_labels[i][0][\"hkl\"][0], \\\n self.hkl_labels[i][0][\"hkl\"][1], \\\n self.hkl_labels[i][0][\"hkl\"][2], \\\n 100*self.xrd_intensity[i]/max(self.xrd_intensity),\n self.hkl_labels[i][0][\"multiplicity\"]))\n else:\n print('This hkl is not in the given 2theta range')", "def display(self):\r\n return str((self.last_name + \", \" + self.first_name+\": \" + self.phone_number + \"\\n\" + self.address + \"\\nStart Date: \" +\r\n self.start_date.strftime(\"%m\") + \"/\" + self.start_date.strftime(\"%d\") +\r\n \"/\" + self.start_date.strftime(\"%Y\")+\"\\nSalary: $\" + str(self.salary)))" ]
[ "0.66752523", "0.6188767", "0.5423987", "0.5204655", "0.5056237", "0.49969152", "0.49781322", "0.49219614", "0.49140453", "0.49073628", "0.48915973", "0.48814258", "0.48631328", "0.48400798", "0.48394734", "0.48239037", "0.4811467", "0.48080567", "0.48048052", "0.4777241", "0.4775193", "0.47670245", "0.4744654", "0.47442472", "0.47372025", "0.4737008", "0.47357783", "0.47322732", "0.4724555", "0.47146687", "0.47087544", "0.47037315", "0.46913084", "0.46889246", "0.46856993", "0.46802613", "0.46744722", "0.46722633", "0.46624088", "0.46452335", "0.46276042", "0.46242306", "0.4622747", "0.46198893", "0.4612655", "0.4599203", "0.45981437", "0.459256", "0.45886794", "0.45765108", "0.45741904", "0.45652288", "0.4563712", "0.4558988", "0.4545509", "0.45402062", "0.45316854", "0.4531556", "0.453025", "0.45244455", "0.45209938", "0.45125884", "0.4510474", "0.45049903", "0.4504609", "0.45036712", "0.44882756", "0.44811198", "0.4471863", "0.44700855", "0.44616446", "0.44557777", "0.44500506", "0.44480732", "0.44467902", "0.44457904", "0.44384232", "0.44357058", "0.4435232", "0.44306183", "0.44282436", "0.4428035", "0.44168", "0.44114885", "0.44087812", "0.43993872", "0.43987197", "0.43980518", "0.43953687", "0.43932205", "0.43892658", "0.43824214", "0.43807793", "0.43713963", "0.43650156", "0.43621123", "0.4360749", "0.4351229", "0.4347968", "0.43478957" ]
0.4374707
93
Print list of all employees and respective salary details for specified hall Take dict of Worker objects as parameter
def generate_salary_list(Hall): pdf = FPDF('P', 'mm', 'A4') pdf.add_page('P') pdf.set_font('Times', 'B', 14) pdf.multi_cell(0, 5, ('Hall Salary List: Hall %s' % Hall.hall_ID)) pdf.ln() worker_list = dbr.rebuild("worker") title = "Role" wage = 0 for key in worker_list: if worker_list[key].hall_ID == Hall.hall_ID: if isinstance(worker_list[key], mess_manager.MessManager): title = "Mess Manager" wage = worker_list[key].monthly_salary elif isinstance(worker_list[key], clerk.Clerk): title = "Clerk" wage = worker_list[key].monthly_salary elif isinstance(worker_list[key], attendant.Attendant): title = "Attendant" wage = worker_list[key].daily_wage pdf.multi_cell(0, 5, ('%s: %s (%s) - Rs. %s' % (worker_list[key].worker_ID, worker_list[key].name, title, wage))) pdf.ln() # Write generated output file to PDF pdf.output(('hall_salary_%s.pdf' % Hall.hall_ID), 'F')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_employee(self):\n print \"[Name: %s] [Salary: %d]\" % (self.name, self.salary)", "def process_employees_salary(self, employees_info: List[List[str]]) -> None:\n pass", "def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))", "def get_sal_slip_list(self, as_dict=False):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.name\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=as_dict)\n\n\t\treturn emp_list", "def print_statement(Hall):\n\n pdf = FPDF('P', 'mm', 'A4')\n pdf.add_page('P')\n pdf.set_font('Times', 'B', 14)\n\n pdf.multi_cell(0, 5, ('Hall Account Statement for Hall: %s' % Hall.name))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Mess Account: %s' % Hall.mess_account))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Salary Account: %s' % Hall.salary_account))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Repair Account: %s' % Hall.repair_account))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Rent Account: %s' % Hall.rent_account))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Others Account: %s' % Hall.others_account))\n pdf.ln()\n\n # Write generated output file to PDF\n pdf.output(('hall_statement_%s.pdf' % Hall.hall_ID), 'F')", "def main():\n # create a list of test employees and managers\n testList = [\n {'type': 'employee', 'firstName': 'Mickey', 'lastName': 'Mouse', 'SSN': '100-12-3456', 'salary': 1500.00},\n {'type': 'manager', 'firstName': 'Walt', 'lastName': 'Disney', 'SSN': '100-00-0000', 'salary': 5000.00,\n 'title': 'Head Of Disneyland', 'yearBonus': 1000.00},\n {'type': 'employee', 'firstName': 'Donald', 'lastName': 'Duck', 'SSN': '100-65-4321', 'salary': 1000.00},\n {'type': 'manager', 'firstName': 'Minnie', 'lastName': 'Mouse', 'SSN': '999-99-999', 'salary': 10000.00,\n 'title': 'Head Of Mouse HouseHold', 'yearBonus': 15000.00},\n {'type': 'manager', 'firstName': 'Daisy', 'lastName': 'Duck', 'SSN': '100-65-4321', 'salary': 12000.00,\n 'title': 'Head Of Duck HouseHold', 'yearBonus': 10000.00}]\n\n # Define percentRaise (0.1 == 10%)\n percentRaise = 0.1\n\n # Create Employees and Managers Object using the Test data\n employeeList = loadEmployees(testList)\n\n # Sort employee List, which will ustilize Employee's __lt__ and __eq__ methods\n employeeList.sort()\n\n # Loop over Employee and Manager Objects\n print(\"Employees and Manager should be sorted by last name, then first\\n\")\n for employee in employeeList:\n if type(employee) == Manager:\n print(\"Manager:\")\n else:\n print(\"Employee:\")\n # Print Employee or Manager\n print(employee)\n # Give Raise to Employee or Manager\n employee.giveRaise(percentRaise)\n # Print New Salary\n print(\"With %.2f%% Raise, Salary: $%.2f\\n\" % (percentRaise * 100, employee.salary))\n\n # Employee docStrings\n print(\"\\nEmployee docstring for each method\")\n print(\"Employee.__doc__=\" + Employee.__doc__)\n print(\"Employee.__init__.__doc__=\" + Employee.__init__.__doc__)\n print(\"Employee.giveRaise.__doc__=\" + Employee.giveRaise.__doc__)\n print(\"Employee.__str__.__doc__=\" + Employee.__str__.__doc__)\n print(\"Employee.__eq__.__doc__=\" + Employee.__eq__.__doc__)\n print(\"Employee.__lt__.__doc__=\" + Employee.__lt__.__doc__)\n\n print(\"\\nManger docstring for each method\")\n print(\n \"Since Manager inherits from Employee, several of the methods ('giveRaise', '__eq__' and '__lt__') and the corresponding docstring will originate from the Employee class\\n\")\n print(\"Manager.__doc__=\" + Manager.__doc__)\n print(\"Manager.__init__.__doc__=\" + Manager.__init__.__doc__)\n print(\"Manager.giveRaise.__doc__=\" + Manager.giveRaise.__doc__)\n print(\"Manager.__str__.__doc__=\" + Manager.__str__.__doc__)\n print(\"Manager.__eq__.__doc__=\" + Manager.__eq__.__doc__)\n print(\"Manager.__lt__.__doc__=\" + Manager.__lt__.__doc__)", "def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AIR has {len(employees_ob_list)} employees\")\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def get_personnel():\r\n if len(off) == 0:\r\n print(\"There are no office workers\")\r\n else:\r\n for i in off:\r\n print(str(i))", "def gather_employee_entries(self):\n user_inputs = [\n self.emp_lname.get(), self.emp_mi.get(), self.emp_fname.get(),\n self.emp_hiredate.get()\n ]\n\n return self.check_input_empty(user_inputs)", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def print_info(name, salary=3500):\n print('Name:', name)\n print('Salary:', salary)\n return", "def employees_earning(table):\n\n product_index = 1\n employee_id_index = 2\n amount_sold_index = 4\n\n person_id_index = 0\n person_name_index = 1\n\n game_index = 0\n price_index = 3\n\n store_table = store.get_table()\n store.check_table(store_table)\n hr_table = hr.get_table('model/hr/persons.csv')\n money_earned = {}\n for person in hr_table:\n person_id = person[person_id_index]\n person_name = person[person_name_index]\n money_earned[person_name] = 0\n for record in table:\n product_id = record[product_index]\n employee_id = record[employee_id_index]\n amount_sold = int(record[amount_sold_index])\n if person_id == employee_id:\n for game in store_table:\n game_id = game[game_index]\n if game_id == product_id:\n game_price = int(game[price_index])\n money_earned[person_name] += int(amount_sold * game_price)\n return money_earned", "def generateEmployees(self):\r\n\r\n # Name\r\n maleNames = ['Perry Lovan', 'Horacio Arvidson', 'Gale Skipworth', 'Joshua Lodge', 'Noble Shutter', 'Kristopher Talor', 'Jarod Harrop', 'Joan Henrichs', 'Wilber Vitiello', 'Clayton Brannum', 'Joel Sennett', 'Wiley Maffei', 'Clemente Flore', 'Cliff Saari', 'Miquel Plamondon', 'Erwin Broadus', 'Elvin Defibaugh', 'Ramon Vaquera', 'Roberto Koval', 'Micah Sumter', 'Wyatt Cambareri', 'Jamal Delarosa', 'Franklyn Hayles', 'Riley Haslett', 'Robt Fincher', 'Abraham Denzer', 'Darius Jude', 'Phillip Sunderman', 'August Kindel', 'Jospeh Mawson', 'Damion Postma', 'Gregorio Pasco', 'Rosendo Downing', 'Chance Plascencia', 'Jewell Pankratz', 'Jerrell Tarrance', 'Michal Bliss', 'Josue Larocque', 'Aaron Harpster', 'Zack Hildebrant', 'Frank Souders', 'Lindsay Bechard', 'Agustin Marks', 'Mathew Fredericksen', 'Ivan Hanline', 'Michael Otto', 'Max Oberlander', 'Ricky Mckellar', 'Bernard Friedt', 'King Lorentzen']\r\n femaleNames = ['Lorretta Vansickle', 'Loura Steimle', 'Neomi Fritz', 'Vernie Vanderveen', 'Dede Poehler', 'Margarete Espinoza', 'Leda Leonardo', 'Fae Strand', 'Nichol Winford', 'Danika Ridgeway', 'Elvira Balentine', 'Sharell Xie', 'Sheree Booker', 'Emely Conine', 'Justina Kleve', 'Pia Maxton', 'Sophia Lark', 'Nilsa Albee', 'Felipa Seman', 'Jeraldine Watkins', 'Susann Sowards', 'Asha Irion', 'Shay Koran', 'Rosio Jahn', 'Rachal Slaven', 'Beryl Byron', 'Jona Lira', 'Margert Strite', 'Talia Beauregard', 'Jacqueline Vella', 'Rolande Mccready', 'Margret Hickerson', 'Precious Confer', 'Evita Nicolai', 'Fredda Groner', 'Laquanda Bracken', 'Alana Saddler', 'Melania Harring', 'Shae Everette', 'Marlyn Mcfalls', 'Madeline Nicols', 'Fonda Webster', 'Fumiko Steffy', 'Virginia Sprinkle', 'Lula Frisch', 'Mari Mulherin', 'Alecia Remillard', 'Jeanna Halderman', 'Ocie Waldrep', 'Theresa Knouse']\r\n\r\n for i in range(self.num_of_employees):\r\n\r\n # Clock in an hour before opening, 6 hours after, or 12 hours after\r\n clockIn = random.choice([7, 13, 19])\r\n\r\n # Clock out after 5 hours, 10 hours, or 15 hours\r\n clockOut = random.choice([13, 19, 23])\r\n while clockOut <= clockIn:\r\n clockOut = random.choice([13, 19, 23])\r\n\r\n # Hourly wage\r\n wage = random.choice([8, 9, 10, 12, 20])\r\n\r\n gender = random.choice(['M', 'F'])\r\n if gender == 'M':\r\n name = random.choice(maleNames)\r\n else:\r\n name = random.choice(femaleNames)\r\n\r\n self.c.execute(\"INSERT INTO Employee (Name, ClockIn, ClockOut, Wage) VALUES (?, ?, ?, ?)\", (name, clockIn, clockOut, wage))\r\n self.conn.commit()\r\n\r\n if self.print_employees:\r\n print(\"\\nName:\", name)\r\n print(\"Clock in:\", clockIn)\r\n print(\"Clock out:\", clockOut)\r\n print(\"Wage:\", wage)", "def __str__(self):\n return f'Employee #{self.employee_id} ({self.job_group.name}, ${self.job_group.wage}) worked {self.hours_worked} hours on {self.date_worked}.'", "def make_salaried(self,salary,name):\n id = self.find_employee_id(name)\n if id in self.clsf:\n self.emp_dict[id][5] = \"2\"\n print(\"{}{}\".format(name,\" was successfully changed to be a salaried employee\"))\n self.emp_dict[id][7] = salary\n self.classification()\n return self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()", "def _get_employee_info() -> List[List[str]]:\n return [\n ['100', 'Dave', 'Team Leader'],\n ['101', 'Ram', 'Developer'],\n ['102', 'Raj', 'Developer'],\n ['103', 'Rahul', 'Tester'],\n ]", "def list_employees(order_by=\"id\"):\n ret = {}\n status, result = _query(action=\"employees\", command=\"directory\")\n root = ET.fromstring(result)\n for cat in root:\n if cat.tag != \"employees\":\n continue\n for item in cat:\n emp_id = next(iter(item.values()))\n emp_ret = {\"id\": emp_id}\n for details in item:\n emp_ret[next(iter(details.values()))] = details.text\n ret[emp_ret[order_by]] = emp_ret\n return ret", "def display_hall_of_fame(self) -> None:\n print(\"Hall of fame\")\n for env, dico in self.score_dic.items():\n print(\"Environment :\", env)\n for team, score in sorted(dico.items()):\n print(\"team: \", team, \"mean: \", score[0], \"std: \", score[1])", "def main():\n while True:\n employee_id = get_employee_input_int('TEST DATA: Enter employee ID to look up for the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} has a grade = {}, Hence gets {} per hours\\n'\n .format(employee.full_name, employee.grade, payscale.salary))\n HR_Options(employee, payscale)\n break", "def display_hours(employee_id):\n\n if not g.user:\n flash(\"Please Login to continue.\", \"danger\")\n return redirect(\"/\")\n \n employee = Employee.query.get_or_404(employee_id)\n\n labels = json.dumps( [\"Completed\", \"Required\"])\n data = json.dumps([employee.completed, employee.required])\n \n return render_template(\"users/display_hours.html\", employee = employee, labels = labels, data = data)", "def get_emp_list(self):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.employee as employee, count(*) as attendance_days\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=True)\n\t\treturn emp_list", "def employee_data(self):\n self.paymethod()\n self.classification()\n for i in self.emp_id:\n if self.clsf[i] == \"Salaried\":\n if self.pymthd[i] == \"Direct Deposit\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][7],self.emp_dict[i][10],\n self.emp_dict[i][11]]\n elif self.pymthd[i] == \"Mailed Check\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][1],self.emp_dict[i][2],\n self.emp_dict[i][3], self.emp_dict[i][4],self.emp_dict[i][7]]\n elif self.clsf[i] == \"Hourly\":\n if self.pymthd[i] == \"Direct Deposit\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][8],self.emp_dict[i][10],\n self.emp_dict[i][11]]\n elif self.pymthd[i] == \"Mailed Check\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][1],self.emp_dict[i][2],\n self.emp_dict[i][3], self.emp_dict[i][4],self.emp_dict[i][8]]\n elif self.clsf[i] == \"Commissioned\":\n if self.pymthd[i] == \"Direct Deposit\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][7],self.emp_dict[i][9],\n self.emp_dict[i][10],self.emp_dict[i][11]]\n elif self.pymthd[i] == \"Mailed Check\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][1],self.emp_dict[i][2],\n self.emp_dict[i][3],self.emp_dict[i][4],self.emp_dict[i][7],self.emp_dict[i][9]]\n else:\n print(\"Error\")\n print(self.emp_data)\n return self.emp_data", "def print_sales_report(melons_by_salesperson):\n\n for salesperson, melons_sold in melons_by_salesperson.items():\n print(f'{salesperson} sold {melons_sold} melons')", "def payroll_calculation():\n\n name = search_employee()\n if name == None:\n return\n accrual_month = month('Accrual month: ')\n accrual_year = year('Accrual year: ')\n accrual = f'{accrual_month}-{accrual_year}'\n salary_value = month_salary()\n salary_base = salary(salary_value)\n overtime = value_input('Overtime: ')\n absences = value_input('Absences: ')\n late = value_input('Late: ')\n bonus = value_input('Bonus: ')\n\n hourly_wage = round(salary_value / 220, 2)\n overtime_value = round(float(hourly_wage * 1.5), 2)\n overtime_total = round(overtime_value * overtime, 2)\n daily_wage = round(salary_value / 30, 2)\n absences_value = round(daily_wage * absences, 2)\n late_value = round(daily_wage * late / 60, 2)\n inss_value = inss(salary_base, overtime_total)\n irrf_value = irrf(salary_base, overtime_total, inss_value, bonus)\n sleep(2)\n\n\n\n header('EARNINGS')\n print(f'Salary: {salary_base}')\n print(f'Bonus: {bonus}')\n print(f'Overtime: {overtime_total }')\n earnings_total = round(salary_base + overtime_total + bonus, 2)\n sleep(2)\n\n print(line())\n print(f'Earnings total: {earnings_total}')\n print(line())\n sleep(2)\n\n header('DISCOUNTS')\n\n transportation_vouchers = round(salary_base * 6 / 100, 2)\n health_care = round(salary_base * 2 / 100, 2)\n dental_care = round(salary_base * 0.5 / 100, 2)\n meal_ticket = round(salary_base * 1 / 100, 2)\n\n print(f'absences: {absences_value}')\n print(f'late: {late_value}')\n print(f'transportation_vouchers: {transportation_vouchers}')\n print(f'health_care: {health_care}')\n print(f'dental_care: {dental_care}')\n print(f'meal_ticket: {meal_ticket}')\n print(f'inss_value: {inss_value}')\n print(f'irrf_value: {irrf_value}')\n\n discounts_total = round(absences_value + late_value + transportation_vouchers + health_care +\n dental_care + meal_ticket + inss_value + irrf_value, 2)\n\n print(line())\n print(f'Discounts_total : {discounts_total }')\n print(line())\n liquid_salary = round(earnings_total - discounts_total, 2)\n print(f'Liquid_salary: {liquid_salary} ')\n print(line())\n\n conn = sqlite3.connect('data/people_management.db')\n cursor = conn.cursor()\n cursor.execute(f\"\"\"\n INSERT INTO salary (name, salary ,bonus, overtime, absences_value, late_value, \n t_vouchers, health_care, dental_care, meal_ticket, inss, irrf, \n earnings, discounts, liquid_salary, accrual)\n VALUES ('{name}', '{salary_base}' ,'{bonus}', '{overtime_total}', '{absences_value}', \n '{late_value}', '{transportation_vouchers}', '{health_care}', '{dental_care}', \n '{meal_ticket}', '{inss_value}', '{irrf_value}', '{earnings_total}', '{discounts_total}', \n '{liquid_salary}', '{accrual}')\n \"\"\")\n conn.commit()\n conn.close()", "def call_all_functions(input: list) -> None:\n for emp in input:\n print(\"{0} {1} {2} {3} {4}\".format(emp.get_first_name(), emp.get_last_name(),emp.get_family(), emp.get_salary(), emp.get_department()))", "def __str__(self):\n return \"Employee attributes {}, {}, {} ,{}, {}, {}\". \\\n format(self._last_name, self._first_name, self._address, self._phone_number,\n self._start_date, self._salary)", "def business_info():\n print \"hello\"\n \n yelp_ids_empty = {}\n yelp_ids_dict = yelp_to_salon_list_SF('nail salon', yelp_ids_empty)\n\n businesses = {}\n\n # for business in yelp_ids_dict:\n # businesses = {\n # business.yelp_id: {\n # \"yelpID\": business.yelp_id,\n # \"businessName\": business.business_name,\n # \"busLat\": business.bus_lat,\n # \"busLong\": business.bus_long,\n # \"address\": business.address,\n # \"phone\": business.phone\n # }\n # }\n\n\n return jsonify(yelp_ids_dict)", "def main():\n name = input(\"Please enter in your name: \")\n\n \"\"\"Ask the user to enter a number if they are a Director, Manager or Staff.\"\"\"\n \"\"\"This will check and make sure the user only enters in 1,2, \n or 3 and a number greater than zero\"\"\"\n while True:\n try:\n designation_number = int(input(\"Please enter in \\n1 for Director \"\n \"\\n2 for Manager \\n3 for Staff\\n\"))\n if 0 < designation_number <= 3:\n break\n print(\"Invalid number entered.\")\n except Exception as e:\n print(e)\n \"\"\"Gets the user salary and makes sure is a number and greater than 0\"\"\"\n while True:\n try:\n salary = float(input(\"Please enter in your salary: \"))\n if salary <= 0:\n print(\"Your salary must be at least 1 dollar. Please enter a number greater than zero.\")\n else:\n break\n except ValueError:\n print(\"Oops! That was not a valid number. Try again...\")\n\n \"\"\"Create Employee\"\"\"\n employee1 = employee.Employee()\n employee1.set_name(name)\n employee1.set_designation(designation_number)\n employee1.set_salary(salary)\n print(employee1)", "def lookup_employee():\n unique_names = get_unique_employees()\n while True:\n if len(unique_names) > 1:\n print('Entries found by {} and {}.'.format(\n ', '.join(unique_names[:-1]),\n unique_names[-1]))\n elif len(unique_names) == 1:\n print('Entries found by {}.'.format(unique_names[0]))\n\n search_query = input('Show entries by: ')\n if validate_lookup_employee_format(search_query):\n break\n print('** Please enter a name of alphabetic characters and spaces **')\n return Entry.select().where(Entry.employee_name == search_query)", "def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200", "def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## right now this is ALL users... \n \n return render_template(\"employee_display.html\", employees = employees)", "def show_employee(emp_id, fields=None):\n ret = {}\n if fields is None:\n fields = \",\".join(\n (\n \"canUploadPhoto\",\n \"department\",\n \"displayName\",\n \"firstName\",\n \"id\",\n \"jobTitle\",\n \"lastName\",\n \"location\",\n \"mobilePhone\",\n \"nickname\",\n \"photoUploaded\",\n \"photoUrl\",\n \"workEmail\",\n \"workPhone\",\n \"workPhoneExtension\",\n )\n )\n\n status, result = _query(action=\"employees\", command=emp_id, args={\"fields\": fields})\n\n root = ET.fromstring(result)\n\n ret = {\"id\": emp_id}\n for item in root:\n ret[next(iter(item.values()))] = item.text\n return ret", "def atten_employee(list_emp, name):\r\n with open(\"attendance_log.txt\", \"w\") as attendance_by_emp:\r\n attendance_by_emp.seek(0)\r\n attendance_by_emp.write(\"Employee Attendance Report:\\n\")\r\n for worker in list_emp:\r\n if worker.name == name:\r\n attendance_by_emp.write(\"%s-\\n\" % worker.name)\r\n for date in worker.attendance:\r\n attendance_by_emp.write(\"\\t\" + date + '\\n')\r\n print(\"Report issued!\\n\")\r\n return\r\n print(\"%s is not in employee log\\n\" % name)\r\n return", "def __init__(self, name, hall_ID, password, monthly_salary,\n rebuild=False, worker_ID=None):\n\n # The rebuild flag, if true, denotes that the object is being made from\n # data already present in the database\n # If False, a new data row is added to the specific table\n if not rebuild:\n self.worker_ID = db.add(\"worker\")\n db.update(\"worker\", self.worker_ID, \"worker_type\", \"M\")\n self.password = password\n else:\n self.worker_ID = worker_ID\n self._password = password\n\n self.monthly_salary = monthly_salary\n worker.Worker.__init__(self, self.worker_ID, name, hall_ID)", "def get_all_huns(self):\n for name1 in self.plist.keys():\n for name2 in self.plist.keys():\n if name1 < name2: \n nkey = name1+\":\"+name2\n self.result[nkey] = self.get_hun(name1, name2)", "def show_emp_bookings(self):\n try:\n emp_id = int(input(\"Enter Employee Id: \"))\n bookings = self.admin_repository.show_emp_bookings(emp_id)\n if bookings:\n for booking in bookings:\n print(\"Booking Id : {}\".format(booking[5]))\n print(\"Date : {}\".format(booking[0]))\n print(\"Pick up time : {}\".format(booking[1]))\n print(\"Cab_Number : {}\".format(booking[2]))\n print(\"Pick up location: {}\".format(booking[3]))\n print(\"Destination : {}\".format(booking[4]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n\n except Exception as e:\n print(\"Some Error occurred.\")\n return False", "def home_office(ctx, year=CURRENT_YEAR):\n ss = open_spreadsheet('Home Office %s' % year)\n\n worksheet = ss.worksheet('Monthly fees')\n categories = defaultdict(Decimal)\n\n for row in worksheet.get_all_records():\n categories['hoa assessments'] += get_decimal(row['hoa assessments'])\n categories['homeowners insurance'] += get_decimal(row['homeowners insurance'])\n categories['mortgage'] += get_decimal(row['mortgage'])\n categories['utilities (gas & electric)'] += \\\n get_decimal(row['electric']) + get_decimal(row['gas'])\n\n data = [(k.capitalize(), v) for k, v in categories.items()]\n\n data += [\n (f'Total for {year}', sum(categories.values())),\n (f'Office rent for {year}', sum(categories.values()) / 4),\n ('Repairs & maintenance', get_rm_total(ss)),\n ]\n table = AsciiTable(data, 'Home office')\n table.inner_heading_row_border = False\n print(table.table)", "def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def do_hire(self):\n return f\"{self} is hiring employees\"", "def get_homework(self, **fields):\n existing_fields = [i.name for i in self._db.get_columns('homework')]\n hw_fields = {}\n for key, value in fields.items():\n if key in existing_fields:\n hw_fields[key] = value\n additional_fields_group = ['group_key'] # Additional fields that could be passed in args\n additional_fields_teacher = ['teacher_UID', 'teacher_key']\n teacher_fields = {}\n group_fields = {}\n for key, value in fields.items():\n if key in additional_fields_teacher:\n if key == 'teacher_UID':\n teacher_fields['UID'] = value\n else:\n teacher_fields[key] = value\n if key in additional_fields_group:\n group_fields[key] = value\n group = None if len(group_fields) == 0 else self.get_group(**group_fields)\n teacher = None if len(teacher_fields) == 0 else self.get_teacher(**teacher_fields)\n query = Homework.select().filter(**hw_fields)\n if group is not None:\n query = query.where(Homework.group == group)\n if teacher is not None:\n query = query.where(Homework.teacher == teacher)\n hws = [i for i in query]\n # Expect a single value if search by unique fields, list if by non-unique, by group or by teacher\n return hws if len(hws) > 1 else hws[0] if len(hws) == 1 else None", "def findHierarchy(self):\n def __recursiveHelper(key_name, output, indent):\n if key_name in self.relations:\n for employee in self.relations[key_name].employees:\n output += \" \" * indent + str(employee) +\"\\n\"\n # return __recursiveHelper(employee, output, indent+1)\n __recursiveHelper(employee, output, indent+1)\n else:\n print(output)\n return output\n\n\n #experimenting with Iter() and next() iterators/generators\n #and a while loop in the recursive function:\n\n # def __recursiveHelper(key_name, output, indent):\n # if key_name in self.relations:\n # employees = iter(self.relations[key_name].employees)\n # employee = next(employees, \"stop\")\n # while employees and employee != 'stop':\n # output += \" \" * indent + str(employee) +\"\\n\"\n # __recursiveHelper(next(employees, \"stop\"), output, indent+1)\n # else:\n # employee = next(employees, \"stop\")\n #\n # else:\n # return output\n\n\n\n\n\n output = \"\"\n indent = -1\n # self.relations is a dictionary of manager-name string keys.\n # The employees of None are the top-ranking managers.\n # only issue:\n # having trouble returning the concatenated output\n # from the recursive function:\n return __recursiveHelper(None, output, indent+1)", "def print_families(self):\n pt = PrettyTable()\n pt.field_names = ['ID', 'Married', 'Divorced', 'Husband ID', 'Husband Name', 'Wife ID', 'Wife Name', 'Children']\n for f in self.families.values():\n pt.add_row(f.get_values())\n print(pt)", "def show_all_information():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n all_certs = employee_certification.query.all()\n \n return render_template(\"admin.html\", employees = employees, all_certs = all_certs)", "def printSummary(self):\n\t\tweekWorkHours = None\n\t\tdayDelta = None\n\t\tfor num in self.workdays:\n\t\t\tday = self.workdays[num]\n\t\t\tif day.daytype == DayType.weekend:\n\t\t\t\tif weekWorkHours:\n\t\t\t\t\thours = weekWorkHours.total_seconds() // 3600\n\t\t\t\t\tmins = weekWorkHours.seconds // 60 % 60\n\t\t\t\t\tprinty('------{}hrs-----'.format(hours), 'y')\n\t\t\t\t\tweekWorkHours = None\n\t\t\t\t\tdayDelta = None\n\t\t\t\tprinty('{:02d}. (WE)'.format(num), 'w')\n\t\t\telif day.daytype == DayType.holiday:\n\t\t\t\tprinty('{:02d}. (Urlaub)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.illness:\n\t\t\t\tprinty('{:02d}. (Krank)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.overtime_free:\n\t\t\t\tprinty('{:02d}. (Überstundenausgleich)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.business_trip:\n\t\t\t\tprinty('{:02d}. (Dienstreise)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.work:\n\t\t\t\tdayDelta = day.getWorkingTime()\n\t\t\t\tworkhours = dayDelta.seconds // 3600\n\t\t\t\tworkrestminutes = dayDelta.seconds // 60 % 60\n\t\t\t\tabsday = datetime.strptime('{}.{}.{}'.format(num, self.monthNum, self.year),'%d.%m.%Y')\n\t\t\t\ttoday = datetime.today()\n\t\t\t\tpauseDelta = day.getPauseTime()\n\t\t\t\tpausehours = pauseDelta.seconds // 3600\n\t\t\t\tpauserestminutes = pauseDelta.seconds // 60 % 60\n\t\t\t\tif absday == today:\n\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'wH')\n\t\t\t\telif absday > today:\n\t\t\t\t\t# future days\n\t\t\t\t\tif len(day.timeblocks) == 0:\n\t\t\t\t\t\tprinty('{:02d}. ?'.format(num), 'g')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'g')\n\t\t\t\telse:\n\t\t\t\t\t# past days\n\t\t\t\t\tif dayDelta > timedelta(hours=8):\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'n>')\n\t\t\t\t\telif dayDelta < timedelta(hours=8):\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'r>')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'n')\n\t\t\tif weekWorkHours == None:\n\t\t\t\tweekWorkHours = dayDelta\n\t\t\telse:\n\t\t\t\tif dayDelta:\n\t\t\t\t\tweekWorkHours = weekWorkHours + dayDelta", "def display(self):\r\n return str((self.last_name + \", \" + self.first_name+\": \" + self.phone_number + \"\\n\" + self.address + \"\\nStart Date: \" +\r\n self.start_date.strftime(\"%m\") + \"/\" + self.start_date.strftime(\"%d\") +\r\n \"/\" + self.start_date.strftime(\"%Y\")+\"\\nSalary: $\" + str(self.salary)))", "def getEmployees(self):\n return self.employees", "def get_emp_data(self,employee):\n\t\temp = None\n\t\tfind_by = employee.find_elements_by_tag_name\n\t\tif str(type(employee)) != \"<type 'NoneType'>\" and main.is_desktop():\n\t\t\t# columns = employee.find_elements_by_tag_name(\"td\")\n\t\t\temp = {\n\t\t\t\t'name': find_by('td')[0].text,\n\t\t\t\t'id': find_by('td')[1].text,\n\t\t\t\t'status': find_by('td')[2].text,\n\t\t\t\t'election': find_by('td')[3].text,\n\t\t\t\t'date_changed': find_by('td')[4].text\n\t\t\t}\n\t\telif str(type(employee)) != \"<type 'NoneType'>\":\n\t\t\temp = {\n\t\t\t\t'name': find_by('div')[2].text,\n\t\t\t\t'id': find_by('div')[3].text[13:],\n\t\t\t\t'status': find_by('div')[4].text[8:], #Fail 4:20p, StaleEl\n\t\t\t\t'election': find_by('div')[5].text[17:], #Fail 4:15p, StaleEl\n\t\t\t\t'date_changed': find_by('div')[6].text[14:]\n\t\t\t}\n\n\t\t# raw_input(str(emp))\n\t\treturn emp", "def get_employee_information(user_name: str, employee_name: str, store_name: str):\n\n user_name = auth.get_username_from_hash(user_name)\n permission_handler.is_permmited_to(user_name=user_name, action=Action.EMPLOYEE_INFO.value,\n store_name=store_name)\n permission_handler.is_working_in_store(employee_name, store_name)\n return user_handler.get_employee_information(employee_name)", "def employees(employee_id=None):\n\tif not employee_id:\n\t\temployee_data = _serialize_list(Employee.query.all())\n\telse:\n\t\temployee_data = _serialize_model(Employee.query.filter_by(id=employee_id).first())\n\n\tresp = jsonify(employee_data)\n\treturn resp", "def all_employees(request, company_id=None):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n company_super_user = current_employee.isCompanySuperUserOrHigher()\n if company_id:\n company = Company.objects.get(pk=company_id)\n else:\n company = current_employee.company\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n raise PermissionDenied()\n change_company_form = ChangeCompanyForm(initial=dict(company=company))\n return TemplateResponse(\n request,\n 'all_employees.html',\n {\n 'user': request.user,\n 'company_super_user': company_super_user,\n 'company': company,\n 'change_company_form': change_company_form,\n }\n )", "def get_employees(self):\n return self.employees", "def test_search_employee_displays_employee_names(self):\n # add some data to the database\n test_employees = [\n {'id': 1, 'name': \"Test Employee 1\"},\n {'id': 2, 'name': \"Test Employee 2\"}\n ]\n for employee in test_employees:\n e = db_manager.Employee.get_or_create(name=employee['name'])\n # give each employee an associated logentry\n db_manager.LogEntry.create(\n employee=e[0],\n date=datetime.date(2018, 1, 2),\n task_name='Test task {}'.format(employee['id']),\n duration=employee['id'],\n notes='Note'\n )\n\n title = \"\\nSEARCH BY EMPLOYEE\" + \"\\n\"\n employee_rows = \"\"\n for employee in test_employees:\n employee_rows += \"{}) {}\\n\".format(employee['id'],\n employee['name'])\n\n expected_output = (title +\n employee_rows)\n\n # Create a StringIO object to be a capture object\n captured_output = io.StringIO()\n # point stdout at the capture object\n sys.stdout = captured_output\n # Do anything that's going to have a print statement\n # (these will be accumulated in the captured_output object)\n example_input = '1'\n with patch('builtins.input', side_effect=example_input):\n self.menu.search_employee()\n\n # Revert stdout (captured_output still holds the captured items)\n sys.stdout = sys.__stdout__\n # Do any other test code (e.g., asserts)\n self.assertEqual(expected_output, captured_output.getvalue())", "def warehouse_print(warehouse_list):\n for warehouse_item in warehouse_list:\n warehouse_item_print(warehouse_item)", "def HR_Options(employee, payScale):\n menu = (\n\n '\\nOptions\\n'\n '\\t1)Get Employee\\n'\n '\\t2)Add Employee\\n'\n '\\t3)Delete Employee\\n'\n '\\t4)Update Employee Grade\\n'\n '\\t5)Time-Sheet'\n '\\t6)Salary_SLip\\n'\n '\\t7)Get All Employee'\n '\\t8)Quit\\n'\n 'Choose from above mentioned options'\n )\n\n while True:\n Option_choice = get_employee_input_int(menu)\n if Option_choice == 1:\n get_employee()\n elif Option_choice == 2:\n add_employee()\n elif Option_choice == 3:\n delete_employee()\n elif Option_choice == 4:\n update_employee(employee)\n elif Option_choice == 5:\n enter_time_sheet()\n elif Option_choice == 6:\n emp_salary_slip(payScale)\n elif Option_choice ==7:\n get_all_employee()\n elif Option_choice == 8:\n exit(0);\n else:\n print(\"Invalid Entry !!! Please choose Option between (1-6)\")\n continue\n return", "def employees(self) -> object:\n return self._employees", "def make_employee_dict(names, ID_numbers, salaries, email_addresses):\r\n d = dict()\r\n for i in range(len(names)):\r\n d[ID_numbers[i]] = Employee(names[i], ID_numbers[i], salaries[i], email_addresses[i])\r\n return d", "def print_input(L, E, H, s, fileName):\n\tf = open(fileName, 'w')\n\tf.write(str(len(L))+'\\n') #The first line of the input should contain a single integer, which equals the number of locations\n\tf.write(str(len(H))+'\\n') #The second line should also be an integer, which equals the number of homes\n\tfor location in L:\n\t\tf.write(location+' ')\n\tf.write('\\n')\n\tfor home_id in H:\n\t\tf.write(L[home_id]+' ')\n\tf.write('\\n')\n\tf.write(s+'\\n')\n\tfor i in range(len(E)):\n\t\tfor j in range(len(E)):\n\t\t\tf.write(str(E[i][j]) + ' ')\n\t\tf.write('\\n')\n\tf.close()", "def hospital_resident(residents, hospitals, optimal=\"resident\"):\n\n if optimal == \"resident\":\n return resident_optimal(residents, hospitals)\n if optimal == \"hospital\":\n return hospital_optimal(hospitals)", "def working_employees(self,work_trips_by_date):\r\n\r\n employee_list = self.get_updated_list_from_DB('employee')\r\n working_employees_list = []\r\n line_list = []\r\n\r\n for i,line in enumerate(work_trips_by_date): \r\n\r\n for line in employee_list:\r\n if line[0] in work_trips_by_date[i]:\r\n working_employees_list.append(line[2]+','+line[6]+','+work_trips_by_date[i][0])\r\n \r\n return working_employees_list", "def excel_out(employees_dict, path):\n # Create workbook and worksheet\n try:\n workbook = xlsxwriter.Workbook(path)\n except:\n return False\n worksheet = workbook.add_worksheet(name='Прокуратура')\n # Add format to workbook\n format_headers_po = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 14,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFCA28',\n 'border': 2})\n format_headers_department = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 13,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFD54F',\n 'border': 2})\n format_headers_division = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFE082',\n 'border': 2})\n format_header = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFF59D',\n 'border': 2})\n employee_format_b = workbook.add_format( {'align': 'left',\n 'valign': 'vcenter',\n 'text_wrap': True,\n 'bold': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'border': 2})\n employee_format = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'text_wrap': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'border': 2})\n format_attribute = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'text_wrap': True,\n 'font_size': 10,\n 'font_name': 'Times New Roman',\n 'border': 1})\n\n # Set width of columns and height of rows\n worksheet.set_default_row(40, False)\n worksheet.set_column(0, 0, 5)\n worksheet.set_column(1, 1, 25)\n worksheet.set_column(2, 2, 21)\n worksheet.set_column(3, 3, 21)\n worksheet.set_column(4, 4, 21)\n\n # Begin from row\n row = 0\n\n # Parser for employees dictionary\n for po in employees_dict:\n # Прокуратура\n worksheet.merge_range(row, 0, row, 4, data=po.name, cell_format=format_headers_po)\n row += 1\n # Атрибуты Прокуратуры\n row = add_attribute(po, worksheet, row, format_attribute)\n # Header\n row = add_header(worksheet, row, format_header)\n # Работники Прокуратуры\n if 'employees' in employees_dict[po]:\n for num, employee in enumerate(employees_dict[po]['employees'], 1):\n row = add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n\n # Управление\n if 'departments' in employees_dict[po]:\n for department in employees_dict[po]['departments']:\n worksheet.merge_range(row, 0, row, 4, data=department.name, cell_format=format_headers_department)\n row += 1\n # Атрибуты Управления\n row = add_attribute(department, worksheet, row, format_attribute)\n # Работники Управления\n if 'employees' in employees_dict[po]['departments'][department]:\n for num, employee in enumerate(employees_dict[po]['departments'][department]['employees'], 1):\n row = add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n # Отдел Управления\n if 'divisions' in employees_dict[po]['departments'][department]:\n for division in employees_dict[po]['departments'][department]['divisions']:\n worksheet.merge_range(row, 0, row, 4, data=division.name, cell_format=format_headers_division)\n row += 1\n # Атрибуты Отдела\n row = add_attribute(division, worksheet, row, format_attribute)\n # Работники Отдела\n for num, employee in enumerate(employees_dict[po]['departments'][department]['divisions'][division], 1):\n row = add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n\n # Отдел Прокуратуры\n if 'divisions' in employees_dict[po]:\n for division in employees_dict[po]['divisions']:\n worksheet.merge_range(row, 0, row, 4, data=division.name, cell_format=format_headers_division)\n row += 1\n # Атрибуты Отдела\n row = add_attribute(division, worksheet, row, format_attribute)\n # Работники Отдела\n for num, employee in enumerate(employees_dict[po]['divisions'][division], 1):\n row += add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n try:\n workbook.close()\n except:\n return False\n return True", "def hospital_download():\r\n name = request.args[\"address\"]\r\n hospitals = get_zipcode_hospitals(name)\r\n\r\n return Response(hospitals.to_json(), 200, mimetype=\"application/json\")", "def salary_data(driver):\n try:\n _base = driver.find_element_by_xpath('/descendant::p[@class=\"salary-data-amount\"][1]').text\n _total = driver.find_element_by_xpath('/descendant::p[@class=\"salary-data-amount\"][2]').text\n _base_range = driver.find_element_by_xpath('/descendant::p[@class=\"salary-data-range\"][1]').text\n _total_range = driver.find_element_by_xpath('/descendant::p[@class=\"salary-data-range\"][2]').text\n return {\n \"base\" : ''.join(list(filter(lambda c: c.isdigit(), _base))),\n \"total\" : ''.join(list(filter(lambda c: c.isdigit(), _total))),\n \"base_range\": _base_range,\n \"total_range\": _total_range\n }\n except Exception as e:\n print(\"error acquiring salary info\")\n print(e)\n pass\n return {\"base\": \"\", \"total\": \"\", \"base_range\": \"\", \"total_range\": \"\"}", "def employees(self, employees: object):\n\n self._employees = employees", "def hospital_list(self, request, **dict):\n\t\tdata = self.get_serializer(self.get_queryset(), many=True).data\n\t\treturn Response(data, status.HTTP_200_OK)", "def generate_milestone_data(supervisor_employee_dict, all_employee_dict, run_date):\n supervisor_milestone_list = []\n for supervisor_id in supervisor_employee_dict:\n supervisor_milestone_dict = {}\n employees = supervisor_employee_dict[supervisor_id]\n employee_dict = {}\n\n milestone_counter = 0\n\n # Remove the supervisor from all the employees leaving the non-managers behind\n all_employee_dict.pop(supervisor_id, None)\n supervisor_milestone_dict['supervisor_id'] = supervisor_id\n\n for emp in employees:\n hire_date = emp.get('hire_date')\n emp_id = emp.get('employee_id')\n anv_dates = calculate_anniversary_dates(\n hire_date,\n run_date\n )\n\n # This is built to support employees that share a common milestone date\n for date in anv_dates:\n group = employee_dict.setdefault(date, [])\n group.append(emp_id)\n\n # Sort the dict by date by converting into tuple and sorting\n milestone_tuple = [(v, k) for k, v in employee_dict.iteritems()]\n sorted_ms_tup = sorted(milestone_tuple, key=itemgetter(1))\n upcoming_milestone_list = []\n\n for employee_id_list, milestone_date in sorted_ms_tup:\n for emp_id in employee_id_list:\n\n # Do not print out more than 5 milestones\n if milestone_counter == 5:\n break\n\n upcoming_milestone = {\n 'employee_id': emp_id,\n 'anniversary_date': str(milestone_date)\n }\n upcoming_milestone_list.append(upcoming_milestone)\n milestone_counter += 1\n\n supervisor_milestone_dict['upcoming_milestones'] = upcoming_milestone_list\n supervisor_milestone_list.append(supervisor_milestone_dict)\n\n return supervisor_milestone_list, all_employee_dict", "def create_employee_structure(employees):\n employees_dict = {}\n for employee in position_sort(employees):\n if not employee.is_secretary:\n adder(employees_dict, employee.prosecutors_office, {'employees': [], 'departments': {}, 'divisions': {}})\n if employee.prosecutors_office and employee.department and employee.division:\n adder(employees_dict[employee.prosecutors_office]['departments'], employee.department, {})\n adder(employees_dict[employee.prosecutors_office]['departments'][employee.department], 'divisions', {})\n adder(employees_dict[employee.prosecutors_office]['departments'][employee.department]['divisions'], employee.division, [])\n employees_dict[employee.prosecutors_office]['departments'][employee.department]['divisions'][employee.division].append(employee)\n elif employee.prosecutors_office and employee.department:\n adder(employees_dict[employee.prosecutors_office]['departments'], employee.department, {})\n adder(employees_dict[employee.prosecutors_office]['departments'][employee.department], 'employees', [])\n employees_dict[employee.prosecutors_office]['departments'][employee.department]['employees'].append(employee)\n elif employee.prosecutors_office and employee.division:\n adder(employees_dict[employee.prosecutors_office]['divisions'], employee.division, [])\n employees_dict[employee.prosecutors_office]['divisions'][employee.division].append(employee)\n elif employee.prosecutors_office:\n employees_dict[employee.prosecutors_office]['employees'].append(employee)\n return employees_dict", "def worklog(accountable):\n worklog = accountable.issue_worklog()\n headers = ['author_name', 'comment', 'time_spent']\n if worklog:\n rows = [[v for k, v in sorted(w.items()) if k in headers]\n for w in worklog]\n rows.insert(0, headers)\n print_table(SingleTable(rows))\n else:\n click.secho(\n 'No worklogs found for {}'.format(accountable.issue_key),\n fg='red'\n )", "def query_worklog(self, emp_id=None):\n\n query = \"select * from worklog\"\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def available_employees(self,work_trips_by_date):\r\n\r\n employee_list = self.get_updated_list_from_DB('employee')\r\n available_employees_list = []\r\n total_sets = set()\r\n set_list = []\r\n\r\n for i in range(len(work_trips_by_date)):\r\n set_list.append(set(work_trips_by_date[i])) \r\n \r\n total_sets = set_list[0]\r\n \r\n if len(work_trips_by_date) != 1: \r\n for i in range(1,len(set_list)):\r\n total_sets.update(set_list[i])\r\n\r\n for line in employee_list:\r\n if line[0] not in total_sets:\r\n available_employees_list.append(line)\r\n\r\n row_names = ['id', 'name' ,'role' ,'rank'] #return columns\r\n employee_index_list = self.find_index_from_header('employee', row_names)\r\n filtered_available_employees = self.filter_by_header_index(employee_index_list, available_employees_list)\r\n\r\n available_employees_list.pop(0)\r\n\r\n return filtered_available_employees", "def atten_date(list_emp, name, start_rep, end_rep):\r\n with open(\"attendance_log.txt\", \"w\") as attendance_by_emp:\r\n # writes new\\re writes attendance_log from the beginning\r\n attendance_by_emp.seek(0)\r\n attendance_by_emp.write(\"Employee Attendance Report %s-%s:\\n\" % (start_rep, end_rep))\r\n for worker in list_emp:\r\n if worker.name == name:\r\n # found worker name in list\r\n attendance_by_emp.write(\"%s-\\n\" % worker.name)\r\n for date in worker.attendance:\r\n # writing dates in same representation for comparison\r\n date_log = time.strptime(date[:10:], \"%d/%m/%Y\")\r\n start_date = time.strptime(start_rep, \"%d/%m/%Y\")\r\n end_date = time.strptime(end_rep, \"%d/%m/%Y\")\r\n # comparing dates\r\n if date_log > start_date:\r\n if date_log < end_date:\r\n attendance_by_emp.write(\"\\t\" + date + '\\n')\r\n # finished going through dates\r\n print(\"Report issued!\\n\")\r\n return\r\n # worker not found in list\r\n print(\"Sorry, worker not in log\")\r\n return", "def print_individuals(self):\n pt = PrettyTable()\n pt.field_names = ['ID', 'Name', 'Gender', 'Birthday', 'Age', 'Alive', 'Death', 'Child', 'Spouse']\n for i in self.individuals.values():\n pt.add_row(i.get_values())\n print(pt)", "def hospital_viewer():\r\n name = request.args[\"address\"]\r\n hospitals = get_zipcode_hospitals(name)\r\n hospitals['coordinate'] = 'end_point='+hospitals['name'].astype(str)+'&'+'end_lng=' + hospitals['lon'].astype(str)+'&'+'end_lat='+hospitals['lat'].astype(str)\r\n\r\n\r\n if len(hospitals) > 0:\r\n\r\n #genetrate folium map\r\n hospitals_coordinates = hospitals[[\"lat\", \"lon\"]].values.tolist()\r\n\r\n map=make_folium_map(hospitals_coordinates)\r\n\r\n return render_template(\r\n \"page3_2h.html\",\r\n num_hospitals=get_num_hospitals(name),\r\n address=name,\r\n hospitals=hospitals[[\"name\", \"address\", \"contact\", \"coordinate\"]].values,\r\n map=map._repr_html_()\r\n )\r\n else:\r\n\r\n lng=get_address(name)[1]\r\n lat=get_address(name)[0]\r\n near_hospital = find_5near_hospitals(lng, lat)\r\n near_hospital['coordinate'] = 'end_point='+near_hospital['name'].astype(str)+'&'+'end_lng=' + near_hospital['lon'].astype(str)+'&'+'end_lat='+near_hospital['lat'].astype(str)\r\n\r\n return render_template(\r\n \"page3_2h_nohospital.html\",\r\n address=name,\r\n near_hospital_table=near_hospital[[\"name\", \"address\", \"contact\", \"coordinate\", \"distance\"]].values,\r\n )", "def retrieve_teams():\n #print \"Print the number of teams and the members on team\"\n employee_list_total = []\n employee_number_list = []\n\n # List for keeping used numbers\n for temp in range(1000, 3000):\n employee_number_list.append([None, False]) \n\n # Read how many teams that shall be given\n stdin_input = sys.stdin.readline()\n \n try:\n # Test if input was numeric\n no_of_teams = int(stdin_input)\n \n input_rows = []\n \n # Read in all teams from stdin\n for i in range(0, no_of_teams):\n input_rows.append(sys.stdin.readline())\n \n except ValueError:\n print \"Error: Wrong input format\"\n sys.exit()\n\n for row in input_rows:\n # Split team into two members\n team = row.split()\n\n # Test if two members are given\n if len(team) != 2:\n print \"Error: Two team members must be given: Program will exit!\"\n sys.exit()\n\n temp_empl = [0, 0]\n \n try :\n # Loop both team members on row and check if the are in the list\n for i in range(0, 2):\n # Check for team on position teamnumber-1000\n if employee_number_list[int(team[i])-1000][1] == False:\n # Employee is not found in list, add it!\n temp_empl[i] = Employee(team[i]) \n employee_list_total.append(temp_empl[i])\n # Set employee to been found\n employee_number_list[int(team[i])-1000][1] = True\n # Set reference to the employee object \n employee_number_list[int(team[i])-1000][0] = temp_empl[i]\n else:\n # Retrive the employee object\n temp_empl[i] = employee_number_list[int(team[i])-1000][0]\n \n except ValueError:\n print \"Error: Input must be numeric. Program will exit!\"\n sys.exit()\n \n i = 0 \n for i in range(0, 2):\n # Add co_workers to respectivly employee\n if i == 0:\n temp_empl[i].add_co_worker(temp_empl[1])\n else:\n temp_empl[i].add_co_worker(temp_empl[0])\n \n # Return the list of employees\n return employee_list_total", "def test_employees_by_salary_index(self):\n key=\"employees-by-salary\"\n emps_by_salary = {50000: ['5'], 75000: ['4'], 80000: ['3'], 120000: ['2'],\n 100000: ['1']}\n self.mapper.map(\"select id, salary from redmate.employees\") \\\n .to_sorted_set(key_pattern=key, score=\"salary\")\n self.mapper.run()\n\n for sal in emps_by_salary.items():\n self.assertEqual(sal[1],\n self.redis.zrangebyscore(key, sal[0] - 1, sal[0] + 1))", "def __init__(self,name,empid,designation,experience):\n self.name = name\n self.empid = empid\n self.designation = designation\n self.experience = experience\n self.salary = self.cal_sal()", "def run():\n table = hr.get_hr_table_from_file()\n title_list = [\"ID\", \"Name\", \"BirthYear\"]\n options = [\"View records\",\n \"Add record\",\n \"Remove record\",\n \"Update record\",\n \"Which person is the oldest?\",\n \"Which person is the closet to average age?\"]\n\n\n choice = None\n while choice != \"0\":\n choice = terminal_view.get_choice_inner_menu(options, \"HR manager\")\n if choice == \"1\":\n terminal_view.print_table(table, title_list)\n elif choice == \"2\":\n record = terminal_view.get_inputs(title_list[1::],\"Please provide new item data\")\n table = hr.add(table, record)\n elif choice == \"3\":\n id_to_delete_table = terminal_view.get_inputs([\"ID\"],\"Item to delete\")\n id_to_delete = id_to_delete_table[0]\n table = hr.remove(table, id_to_delete)\n elif choice == \"4\":\n records = terminal_view.get_inputs(title_list,\"Edit item\")\n record_id = records[0]\n table = hr.update(table, record_id, records)\n elif choice == \"5\":\n oldest_person = hr.get_oldest_person(table)\n terminal_view.print_result(oldest_person, \"The oldest person: \")\n elif choice == \"6\":\n closest_to_average = hr.get_persons_closest_to_average(table)\n terminal_view.print_result(closest_to_average,\"The closest to average is: \")\n elif choice != \"0\":\n terminal_view.print_error_message(\"There is no such choice.\")", "def get(self):\n resultado = EmployeeModel.query.all()\n return resultado", "def employee_home(cls):\n return cls.__home(cls._logger)", "def load_employees(self):\n empcsv = open('employees.csv','r')\n emp_temp = []\n empcsv = empcsv.readlines()[1:]\n for line in empcsv:\n for i in line.split(','):\n if line == 0:\n pass\n else:\n emp_temp.append(i)\n employee = emp_temp[0::13]\n data_1 = []\n data = []\n for i in emp_temp:\n if i in employee:\n pass\n else:\n data_1.append(i)\n for i in range(26):\n data_temp = data_1[(i * 12):((i + 1) * 12)]\n data.append(data_temp)\n for i in range(len(employee)):\n self.emp_dict[employee[i]] = data[i]\n #print(self.emp_dict)\n for i in self.emp_dict:\n self.emp_dict[i] = [x.replace('\\n', '') for x in self.emp_dict[i]]\n return self.emp_dict", "def display_list(the_list):\n print(\"\\n===================================\")\n for person in the_list:\n print(\"{name:12s}\\t\\t{phone}\".format(name=person.name, phone=person.phone))\n if the_list == []:\n print(\"\\nNo entries found!\\n\")\n print(\"===================================\\n\")", "def check_hours():\n while True:\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n\n print(f\"{business_object['name']} hours are: \"\n f\"{business_object['hours']}\")", "def print_table(ledger):\n\n table = PrettyTable() # defines a PrettyTable object\n\n table.field_names = [\n \"hospital\",\n \"patient\",\n \"status\",\n \"nonce\",\n \"prev_hash\",\n \"a\",\n \"b\",\n \"c\",\n \"current_hash\",\n ] # define field names for table\n\n for block in ledger:\n table.add_row(\n [\n block[\"hospital\"],\n block[\"patient\"],\n block[\"status\"],\n block[\"nonce\"],\n block[\"prev_hash\"],\n block[\"a\"],\n block[\"b\"],\n block[\"c\"],\n block[\"current_hash\"],\n ]\n ) # add data to table\n\n print(\"\\n\\n\" + color.BOLD + \"Printing Your Ledger:\" + color.END)\n print(table) # print prettytable of patient info", "def make_commissioned(self,salary,commission,name):\n id = self.find_employee_id(name)\n if id in self.clsf:\n self.emp_dict[id][5] = \"3\"\n print(\"{}{}\".format(name,\" was successfully changed to be a commissioned employee\"))\n self.emp_dict[id][7] = salary\n self.emp_dict[id][9] = commission\n self.classification()\n return self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()", "def alert_worker_evaluated(hirer,worker):\n message = loader.get_template(\n 'alerts/worker_evaluated.txt').render(\n {'worker': worker, 'hirer': hirer})\n\n return message", "def print_people_strategies():\n\t\tfor person in sorted(Simulation.community):\n\t\t\tSimulation.community[person].print_info()\n\t\tPerson.person_progression.write(\"--------------- END OF WEEK ---------------\" + \"\\n\")", "def print_business(business_object):\n # OLD ----------\n # print('Business name: ' + business_object['name'])\n # print('Address: ' + business_object['address'])\n # print('City: ' + business_object['city'])\n # print('State: ' + business_object['state'])\n # print('Average Ratings: ' + str(business_object['stars']) +\n # ' Review Count: ' + str(business_object['review_count']))\n # print('categories: ' + str(business_object['categories']))\n\n print(business_object['name'])\n print(f'Address: {business_object[\"address\"]}, '\n f'{business_object[\"city\"]}, {business_object[\"state\"]}')\n print('#############################')", "def get(self):\n args = self.parser.parse_args()\n date = get_date_or_none(args['date'])\n start_date = get_date_or_none(args['start_date'])\n end_date = get_date_or_none(args['end_date'])\n\n if date:\n employees = self.service.get_employees_by_date_of_birth(\n date, strategy=selectinload\n )\n elif start_date and end_date:\n employees = self.service.get_employees_born_in_period(\n start_date, end_date, strategy=selectinload\n )\n else:\n return self.BAD_DATE_MESSAGE, 400\n\n return self.schema.dump(employees, many=True), 200", "def test_api_can_get_all_employees(self):\n res = self.client().get(service_url_emp)\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))\n self.assertIn('name2', str(res.data))", "def query_employee(self, employee_inputs):\n\n query = \"select * from employee where \"\n row_names = [\n \"emp_ID\", \"Region_ID\", \"Emp_Lname\", \"Emp_Mi\", \"Emp_Fname\",\n \"Emp_Hiredate\"\n ]\n filled_attributes = []\n\n row_index = 0\n row_options = []\n for item in employee_inputs:\n if item is not None:\n row_options.append(row_index)\n filled_attributes.append(item)\n row_index += 1\n\n j = 0\n for i in row_options:\n if j == 0:\n query += \"{}='{}' \".format(row_names[i], filled_attributes[j])\n else:\n query += \"and {}='{}' \".format(row_names[i],\n filled_attributes[j])\n j += 1\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def earned_hw_scores(self):\r\n return [s.earned for s in self.get_grade_summary()['totaled_scores']['Homework']]", "def print_all(jobs):\n\n if len(jobs) == 0:\n print('print_all() recieved empty input')\n return\n\n for job in jobs:\n if job.is_relevant:\n print(job)\n else:\n continue", "def generate_payslip_data(employee_data):\n payslip_data = []\n\n for employee in employee_data:\n gross_income = monthly_gross_income(employee['annual_salary'])\n income_tax = monthly_income_tax(\n employee['annual_salary'], tax_brackets)\n net_income = monthly_net_income(\n gross_income, income_tax)\n super_amount = monthly_super_amount(\n gross_income, employee['super_rate'])\n\n payslip_data.append({\n 'full_name': employee['first_name'] + ' ' + employee['last_name'],\n 'payment_period': employee['payment_period'],\n 'gross_income': gross_income,\n 'income_tax': income_tax,\n 'net_income': net_income,\n 'super_amount': super_amount\n })\n\n return payslip_data", "def __init__(self, enclosures, employees):\n self.enclosures = enclosures\n self.employees = employees\n self.speciesList = [Monkey.getSpeciesInfo(), Gorilla.getSpeciesInfo(), PolarBear.getSpeciesInfo()]", "def test_format_name_salary(self):\n\t\tmy_employee = Employee('justin', 'williams', 80_000)\n\t\tmy_employee.format_name_salary()\n\t\tself.assertEqual(my_employee.format_name_salary(), 'Justin Williams 80000')", "def show_employee_menu(self):\n \n action_str = \"\"\n\n while True:\n print(self.LENGTH_STAR * \"*\")\n print(\"EMPLOYEES MENU\\n\")\n print(\"1 Print overview of all employees\")\n print(\"2 Pilots\")\n print(\"3 Cabin Crew\")\n print(\"B Back\\n\")\n\n action_str = self.choose_action([\"1\", \"2\" ,\"3\" ,\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"1\", \"2\", \"3\", \"b\"])\n\n if action_str == \"1\":\n self.show_overview_of_all_employees()\n\n elif action_str == \"2\":\n self.show_pilot_or_crew_menu(self.PILOT)\n\n elif action_str == \"3\":\n self.show_pilot_or_crew_menu(self.CREW)\n\n elif action_str == \"b\":\n return", "def get_employee_training(employee_id):\n with sqlite3.connect(Connection.db_path) as conn:\n conn.row_factory = model_factory(TrainingProgramEmployee)\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n SELECT\n *\n FROM\n hrapp_trainingprogramemployee te\n WHERE\n te.employee_id = ?\n \"\"\", (employee_id, ))\n\n return db_cursor.fetchall()", "def company_data(driver):\n try:\n stats_selector = \"ul.company-growth-stats.stats-list li\"\n company_stats = driver.find_elements_by_css_selector(stats_selector)\n company_info = [stat.text for stat in company_stats]\n except Exception as e:\n print(\"error acquiring company info\")\n print(e)\n else:\n try:\n employees = list(filter(lambda text: 'employees' in text, company_info))\n num_employees = ''.join(list(filter(lambda c: c.isdigit(), employees[0])))\n except Exception as e:\n num_employees = \"\"\n pass\n try:\n tenure = list(filter(lambda text: 'tenure' in text, company_info))\n avg_tenure = ''.join(list(filter(lambda c: c in '0123456789.', tenure[0])))\n except Exception as e:\n avg_tenure = \"\"\n pass\n company_info = {\n \"avg_tenure\" : avg_tenure, \n \"num_employees\" : num_employees\n }\n return {\"avg_tenure\" : avg_tenure, \"num_employees\" : num_employees}", "def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):\n if context is None:\n context = {}\n if 'emp_hours' in context:\n emp_ids = resolve_o2m_operations(cr, uid, self.pool.get('emp.luggage_transfer.hours'),\n context.get('emp_hours'), [\"employee\"], context)\n args.append(('id', 'not in', [isinstance(d['employee'], tuple) and d['employee'][0] or d['employee'] for d in emp_ids]))\n if 'mission_line' in context:\n emp_ids = resolve_o2m_operations(cr, uid, self.pool.get('hr.employee.mission.line'),\n context.get('mission_line'), [\"employee_id\"], context)\n args.append(('id', 'not in', [isinstance(d['employee_id'], tuple) and d['employee_id'][0] or d['employee_id'] for d in emp_ids]))\n \n if 'illness' in context:\n emp_ids = resolve_o2m_operations(cr, uid, self.pool.get('hr.employee.illness'),\n context.get('illness'), [\"employee_id\"], context)\n args.append(('id', 'not in', [isinstance(d['employee_id'], tuple) and d['employee_id'][0] or d['employee_id'] for d in emp_ids]))\n \n\n if 'same' in context:\n emp_ids = resolve_o2m_operations(cr, uid, self.pool.get('hr.employee.mission.line'),\n context.get('same'), [\"employee_id\"], context)\n args.append(('id', 'in', [isinstance(d['employee_id'], tuple) and d['employee_id'][0] or d['employee_id'] for d in emp_ids]))\n \n \n if 'alternative_setting_id' in context:\n old_ids = super(hr_employee, self).name_search(cr, uid, name, args=args, operator=operator, context={}, limit=limit)\n\n alternative_setting_id = context.get('alternative_setting_id')\n setting_obj = self.pool.get('hr.alternative.setting')\n alternative_setting_id = setting_obj.browse(cr, uid, alternative_setting_id)\n degrees_ids = [\n x.id for x in alternative_setting_id.degrees_ids]\n degrees_ids += degrees_ids\n degrees_ids = tuple(degrees_ids)\n\n departments_ids = [\n x.id for x in alternative_setting_id.departments_ids]\n departments_ids += departments_ids\n departments_ids = tuple(departments_ids)\n\n ex_employees_ids = [\n x.id for x in alternative_setting_id.employees_ids]\n ex_employees_ids += ex_employees_ids\n ex_employees_ids = tuple(ex_employees_ids)\n\n\n old_ids_tuple = [x[0] for x in old_ids] + [x[0] for x in old_ids]\n old_ids_tuple = tuple(old_ids_tuple)\n\n accessed_ids = self.search(cr, uid, [])\n accessed_ids += accessed_ids\n accessed_ids = tuple(accessed_ids)\n\n if not old_ids_tuple:\n old_ids_tuple = (0,0)\n \n if not departments_ids:\n departments_ids = (0,0)\n cr.execute(\n ''' Select emp.id,(SELECT MAX(date) as max_date\n FROM hr_alternative_process_line\n WHERE employee_id=emp.id and state='confirmed')date\n from hr_employee emp\n where emp.degree_id in %s \n and emp.department_id not in %s \n and emp.state = 'approved' \n and emp.payroll_state = 'khartoum' \n and emp.id in %s \n and emp.gender='male' \n and emp.id in %s \n and emp.id not in %s \n order by date NULLS LAST''', (degrees_ids,departments_ids,old_ids_tuple,accessed_ids,ex_employees_ids))\n history = cr.dictfetchall()\n new_ids = []\n while True:\n try:\n new_ids.append( history.pop()['id'] )\n except:\n break\n\n temp = dict(old_ids)\n old_ids = [x for x in old_ids if x[0] in new_ids]\n #new_ids = [x for x in new_ids if x in accessed_ids]\n #print \"..........................temp\",new_ids\n #print \"......................\",[(x, temp.get(x,False) ) for x in new_ids]\n #print \"......................\",sorted(old_ids, key=lambda x :new_ids.index(x[0]))\n return sorted(old_ids, key=lambda x :new_ids.index(x[0]))\n\n return super(hr_employee, self).name_search(cr, uid, name, args=args, operator=operator, context=context, limit=limit)", "def showInfo(p,personDict):\n info1 = personDict['EnterpriseID'][p[0]]\n info2 = personDict['EnterpriseID'][p[1]]\n print (\"Person A:\",info1)\n print (\"Person B:\",info2)" ]
[ "0.6291626", "0.60116225", "0.5826728", "0.574985", "0.5696388", "0.5555719", "0.551769", "0.546262", "0.54150295", "0.5407232", "0.5379048", "0.5360772", "0.5356221", "0.53249127", "0.52363753", "0.52281964", "0.5221268", "0.5192776", "0.5192633", "0.51646", "0.5135642", "0.51298136", "0.5125488", "0.5119958", "0.51164687", "0.51129633", "0.50949", "0.50139856", "0.50103647", "0.49830675", "0.49729797", "0.49685016", "0.49571908", "0.4954461", "0.49385574", "0.49065405", "0.4905044", "0.48869818", "0.48855042", "0.48812824", "0.48745084", "0.4871745", "0.4849028", "0.48361784", "0.4828211", "0.4818096", "0.48110893", "0.48100483", "0.48076323", "0.4786485", "0.47718814", "0.47716033", "0.47680813", "0.47419697", "0.47297236", "0.47133914", "0.4708869", "0.4699253", "0.46925697", "0.46651775", "0.46635532", "0.46542192", "0.4647708", "0.46456003", "0.46351743", "0.46258122", "0.46203232", "0.46107423", "0.46098322", "0.46008015", "0.4599737", "0.45993465", "0.45981425", "0.45861217", "0.45761177", "0.45758832", "0.45752722", "0.45535204", "0.45487404", "0.4545335", "0.45377055", "0.45340788", "0.45283154", "0.4521991", "0.45176846", "0.45164967", "0.45139578", "0.4505787", "0.45019177", "0.44988418", "0.4497968", "0.44956914", "0.44835946", "0.44790044", "0.44776174", "0.44693795", "0.4468155", "0.4464608", "0.44627473", "0.44586283" ]
0.7293566
0
Print receipts related to specified Student Contain all three amounts paid mess fees, room rent, amenities charge
def print_receipt(Student): pdf = FPDF('P', 'mm', 'A4') pdf.add_page('P') pdf.set_font('Times', 'B', 14) pdf.multi_cell(0, 5, 'Student Dues Payment Receipt') pdf.ln() pdf.multi_cell(0, 5, ('Student ID: %s' % Student.student_ID)) pdf.ln() pdf.multi_cell(0, 5, ('Name: %s' % Student.name)) pdf.ln() pdf.multi_cell(0, 5, ('Mess Fees: %s' % Student.mess_charge)) pdf.ln() if Student.room_type == "S": room_rent = db.get("hall", Student.hall_ID, "single_room_rent")[0] elif Student.room_type == "D": room_rent = db.get("hall", Student.hall_ID, "double_room_rent")[0] pdf.multi_cell(0, 5, ('Room Rent: %s' % room_rent)) pdf.ln() pdf.multi_cell(0, 5, ('Amenities Charge: %s' % str(db.get("hall", Student.hall_ID, "amenities_charge")[0]))) pdf.ln() pdf.multi_cell(0, 5, ('Total Amount Paid: %s' % str(Student.total_dues))) pdf.ln() # Write generated output file to PDF pdf.output(('receipt_%s.pdf' % Student.hall_ID), 'F')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_student_report(self):\n \n period_type = self.parameter_dict.get(\"period_type\", \"monthly\")\n insert_gender_markers = self.parameter_dict.get(\n \"insert_gender_markers\", False)\n period = [(self.start_date,self.end_date)]\n for student in self.students:\n self.table_data.append(self._generate_single_student_report_line(\n student,period, False))\n self.keys_list.append(\"\")\n self.table_descriptor = \\\n [('name','string','Name'),\n ('days_present','number', 'Days Present'),\n ('percent_present', 'number', '% Present')]", "def display_student(s_info):\n print('')\n print('Your information:')\n print(f'{s_info.student_id} - {s_info.first_name} {s_info.last_name}')", "def generate_report_sheet(self, subjects):\n\t\tif self.is_student:\n\t\t\treport_sheet = []\n\t\t\t# For each subject, find all student assessments\n\t\t\tfor subject in subjects:\n\t\t\t\tsubject_data = {\n\t\t\t\t\t'subject': subject.name\n\t\t\t\t}\n\t\t\t\tsubject_grades = {}\n\t\t\t\tassessment_types = AssessmentType.objects.filter(student_assessments__subject=subject).annotate(\n\t\t\t\t\tnumber=models.Count('student_assessments'), max_score=models.Sum('student_assessments__max_score'))\n\t\t\t\tfor assessment_type in assessment_types:\n\t\t\t\t\t# Probably will optimize this later, but ...\n\t\t\t\t\ttype_weight = StudentAssessmentTypeWeight.objects.filter(subject=subject, assessment_type=assessment_type)[0]\n\t\t\t\t\tsubject_grades[assessment_type.name] = {\n\t\t\t\t\t\t'max_score': assessment_type.max_score,\n\t\t\t\t\t\t'actual_score': 0,\n\t\t\t\t\t\t'max_percentage': type_weight.weight,\n\t\t\t\t\t\t'actual_percentage': 0,\n\t\t\t\t\t}\n\t\t\t\t\tassessments = subject.student_assessments.filter(assessment_type=assessment_type)\n\t\t\t\t\tfor assessment in assessments:\n\t\t\t\t\t\t# Assuming only one grade for now\n\t\t\t\t\t\tstudent_grade = assessment.grades.filter(student=self)[0]\n\t\t\t\t\t\tsubject_grades[assessment_type.name]['actual_score'] += student_grade.score\n\t\t\t\t\tactual_score = subject_grades[assessment_type.name]['actual_score']\n\t\t\t\t\tmax_score = subject_grades[assessment_type.name]['max_score']\n\t\t\t\t\tmax_percentage = type_weight.weight\n\t\t\t\t\tsubject_grades[assessment_type.name]['actual_percentage'] = (float(actual_score)/max_score)*max_percentage\n\t\t\t\tsubject_data['grades'] = subject_grades\n\t\t\t\treport_sheet.append(subject_data)\n\t\t\t# Use final grades to to determine score out of (weight) for each type\n\t\t\t# Determine final grade for the subject\n\t\t\t# Determine final grade (average) overall\n\t\t\tprint('Generated report sheet: {}'.format(report_sheet))\n\t\t\treturn report_sheet\n\t\telse:\n\t\t\tprint('Cannot generate a report sheet for a non-student')", "def check_status_book_students() -> None:\r\n print(f\"Returning back requests : {global_req['back']}\")\r\n print(f\"Applying for new book's requests: {global_req['new_req']}\")\r\n print(\"Format [(Student NAME, Book NAME), (Student NAME, Book NAME)]\")", "def issue_student_admission_letter(Student, body):\n\n pdf = FPDF('P', 'mm', 'A4')\n pdf.add_page('P')\n pdf.set_font('Times', 'B', 14)\n\n pdf.multi_cell(0, 5, 'Student Admission Letter')\n pdf.ln()\n pdf.multi_cell(0, 5, ('Name: %s' % Student.name))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Address: %s' % Student.address))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Contact Number: %s' % Student.contact_number))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Hall Allotted: %s' % str(db.get(\"hall\", Student.hall_ID, \"name\")[0])))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Room Allotted: %s' % Student.room_no))\n pdf.ln()\n pdf.ln()\n pdf.multi_cell(0, 5, ('%s' % body))\n pdf.ln()\n\n # Write generated output file to PDF\n pdf.output(('admission_letter_%s.pdf' % Student.student_ID), 'F')", "def student_summary(self, student_id, request, activity):\n try:\n student = User.objects.get(id=student_id)\n except User.DoesNotExist:\n return HttpResponseNotFound(\"Cet étudiant ne fait pas partie de ce cours\")\n\n if not activity.is_member(student):\n return HttpResponseNotFound(\"Cet étudiant ne fait pas partie de ce cours\")\n\n activities = [acti for acti in activity.indexed_activities() if acti.open]\n indexed_pl = {a: a.indexed_pl() for a in activities}\n all_pl = []\n for indexed in indexed_pl.values():\n all_pl += list(indexed)\n teacher_list = activity.teacher.all()\n tl_id = [t.id for t in teacher_list]\n student_list = activity.student.exclude(id__in=tl_id)\n nb_student = len(student_list) if student_list else 1\n\n grades_query = HighestGrade.objects.filter(activity__in=activities,\n pl__in=all_pl,\n user__in=student_list)\n d_grade = dict()\n for g in grades_query:\n if g.grade is not None:\n d_grade[(g.user.id, g.pl.id)] = int(g.grade)\n\n tp = list()\n for a in activities:\n question = list()\n for pl in a.indexed_pl():\n all_mark = list()\n for s in student_list:\n if (s.id, pl.id) in d_grade:\n ms = max([0, d_grade[(s.id, pl.id)]])\n else:\n ms = 0\n all_mark.append(ms)\n if (student.id, pl.id) not in d_grade:\n mark_student = 0\n else:\n mark_student = max([0, d_grade[(student.id, pl.id)]])\n state = Answer.pl_state(pl, student)\n question.append({\n 'state': state,\n 'name': pl.json['title'],\n 'all_mark': all_mark,\n 'mark': mark_student,\n 'mean': round(sum(all_mark) / (5*nb_student), 2),\n 'min': round(min(all_mark) / 5, 2),\n 'max': round(max(all_mark) / 5, 2),\n })\n len_tp = len(question) if question else 1\n all_grouped_mark = list()\n for i in range(nb_student):\n all_grouped_mark.append(sum([q['all_mark'][i] for q in question]) / len_tp)\n tp.append({\n 'name': a.activity_data['title'],\n 'activity_name': a.name,\n 'id': a.id,\n 'width': str(100 / len_tp),\n 'pl': question,\n 'all_mark': all_grouped_mark,\n 'mark': round(sum([q['mark'] for q in question]) / (5*len_tp), 2),\n 'mean': round(sum(all_grouped_mark) / (5*nb_student), 2),\n 'min': round(min(all_grouped_mark) / 5, 2),\n 'max': round(max(all_grouped_mark) / 5, 2),\n })\n\n len_act = sum([len(t['pl']) for t in tp]) if [len(t['pl']) for t in tp] else 1\n all_act_mark = list()\n for i in range(nb_student):\n sum_mark = 0\n for t in tp:\n sum_mark += sum([e['all_mark'][i] for e in t['pl']])\n all_act_mark.append(sum_mark / len_act)\n course_mark = sum([sum([e['mark'] for e in t['pl']]) for t in tp]) / len_act\n return render(request, 'activity/activity_type/course/student_summary.html', {\n 'state': [i for i in State if i != State.ERROR],\n 'course_name': activity.name,\n 'student': student,\n 'activities': tp,\n 'course_id': activity.id,\n 'mark': round(course_mark / 5, 2),\n 'mean': round(sum(all_act_mark) / (5*nb_student), 2),\n 'min': round(min(all_act_mark) / 5, 2),\n 'max': round(max(all_act_mark) / 5, 2),\n 'nb_more': sum([1 for m in all_act_mark if m > course_mark]),\n 'nb_less': sum([1 for m in all_act_mark if m < course_mark]),\n })", "def report(entries: List[StudentEntry]):\n pass", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n rows = hackbright.get_grades_by_github(github)\n\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n rows=rows)\n # return html", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n # a list of (project_title, grade) for a given student\n titles_grades = hackbright.get_grades_by_github(github)\n\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n titles_grades=titles_grades)\n\n return html", "def print_sales_report(melons_by_salesperson):\n\n for salesperson, melons_sold in melons_by_salesperson.items():\n print(f'{salesperson} sold {melons_sold} melons')", "def __ui_list_grades_by_student(self):\n student_id = input(\"Give student ID: \")\n try:\n list_of_grades = self.__grade_controller.get_grades_by_student(student_id)\n if len(list_of_grades) == 0:\n print(\"Student doesn't have any grade.\")\n return\n\n for g in list_of_grades:\n print(str(g))\n\n except GradeException as ge:\n print(ge)\n return", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n title_grade_list = hackbright.get_grades_by_github(github)\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n title_grade_list=title_grade_list)\n\n return html", "def view_all_students():\n message = ''\n global conn\n with conn:\n rows = select_all_students(conn)\n for row in rows:\n message += str(row) + \"\\n\"\n messagebox.showinfo('Student Table', message)", "def payment_report_gen(sid, bid, day):\n results = check_payment(sid, bid, day)\n if not results:\n print(\"Cannot Find a Transaction with sid: \" + str(sid) + \" bid: \" + str(bid) + \" day: \" + str(day))\n general_data = [str(value) for key, value in results[0].items()]\n order_title = [\"Renter Id\", \"Boat Id\", \"Date\", \"Price\"]\n row_title = [\"Pay Date\", \"Amount\"]\n order_format = \"{:>20}\"\n for i in range(len(order_title)):\n print(order_format.format(order_title[i] + \": \" + general_data[i]), end=\"\")\n print(\"\")\n print(\"----------------------------------------------------------------------------------\")\n if len(results) > 1:\n row_format = \"{:>20}\" * len(results[1])\n print(row_format.format(*row_title))\n print(\"----------------------------------------------------------------------------------\")\n for result in results[1:]:\n print(row_format.format(*[str(value) for key, value in result.items()]))", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n grades = hackbright.get_grades_by_github(github)\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n grades=grades)\n\n return html", "def print_students_gpa(std):\n print (\"Student Id:\", get_id(std))\n print (\"Student name:\", get_fname(get_name(std)), get_lname(get_name(std)))\n print (\"GPA: %.2f\" %(calc_gpa(std)))", "def student(identificator):\n student_table = db.get_table('student')\n student = student_table.get(identificator)\n if student is None:\n abort(404)\n discipline = db.get_table('discipline')\n disciplines = discipline.get()\n scores = student_table.get_scores(identificator)\n for each in disciplines:\n if each['id'] not in scores:\n scores[each['id']] = {'score': '', 'id': 0}\n form = StudentForm()\n return render_template(\n 'student.html', student=student,\n form=form, disciplines=disciplines,\n scores=scores\n )", "def compute_mess_payment(self, student_table):\n\n mess_total = 0.\n\n for key in student_table:\n if student_table[key].hall_ID == self.hall_ID:\n mess_total = mess_total + student_table[key].mess_charge\n\n return mess_total", "def _generate_single_student_report_line(self, student_record, periods,\n use_period_separator = True, separator = None,):\n line = [student_record.full_name_lastname_first()]\n for period in periods:\n # if requested insert a separator between each period but\n # not between the name and the first period\n if (use_period_separator and (len(line) > 1)):\n line.append(separator)\n school_days, days_present = \\\n student_record.attendance.get_summary(\n period[0], period[1])\n if not school_days:\n school_days = 1\n percent_present = round((100.0 *days_present / school_days), 1)\n days_absent = school_days - days_present\n percent_absent = 100.0 - percent_present\n line.extend((days_present, percent_present))\n #days_absent, percent_absent))\n return line", "def get_student():\n\n github = request.args.get('github')\n\n # print (\"aaaaaa\",hackbright.get_student_by_github(github))\n\n # if hackbright.get_student_by_github(github):\n\n first, last, github = hackbright.get_student_by_github(github)\n\n # html = render_template(\"student_info.html\",\n # first = first,\n # last = last,\n # github=github)\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github)", "def student_summary() -> str:\n db_path: str = \"810_startup.db\"\n\n try:\n db: sqlite3.Connection = sqlite3.connect(db_path)\n except sqlite3.OperationalError:\n return f'Error: Unable to open database at path {db_path}'\n else:\n query: str = \"select students.Name, students.CWID, grades.Course, grades.Grade, instructors.Name from students,grades,instructors where students.CWID=StudentCWID and InstructorCWID=instructors.CWID order by students.Name\"\n data: Dict[str, str] = [{'Name': name, 'CWID': cwid, 'Course': course, 'Grade': grade, 'Instructor': instructor} for name, cwid, course, grade, instructor in db.execute(query)]\n\n db.close()\n\n return render_template(\n 'students.html',\n title = 'Stevens Repository',\n table_title = 'Students Summary',\n students = data)", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n project_list = hackbright.get_grades_by_github(github)\n\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n project_list=project_list)", "def find_some_item_from_entry(self):\n target_list = self.find_student()\n\n if not len(target_list):\n print('There is no contents to show')\n else:\n print('{:10s}{:10s}{:10s}'.format('일련번호', '평균', 'Grade'))\n print(target_list[['average', 'grade']].to_string(header=False, col_space=10))", "def print_all_students(filename): \n records = records_from_file(filename)\n students_and_id = all_students(records)\n print(\"All students:\")\n for id_num, student in sorted(students_and_id.items()):\n stud_first_name = student[0]\n stud_last_name = student[1] \n print((\" {0}: {1} {2}\").format(id_num, stud_first_name, \n stud_last_name.upper()))\n print()", "def searchStudent():\n os.system(\"cls\")\n print(\"Input Number: \")\n print(\"Press q to abort\\nPress enter if you done typing\")\n keyin = msvcrt.getwch()\n if keyin == 'q':\n print(\"Searching Aborted.\")\n return\n state, s, hashID = t.search(keyin)\n os.system(\"cls\")\n for k in s:\n print(k)\n while 1:\n print(\"Input Number:\", keyin)\n print(\"Press q to abort\\nPress enter if you done typing\")\n keyinnow = msvcrt.getwch()\n if keyinnow == 'q':\n print(\"Searching Aborted.\")\n return\n elif keyinnow == '\\x08':\n keyin = keyin[:-1]\n elif keyinnow == '\\r':\n break\n else:\n keyin += keyinnow\n os.system(\"cls\")\n state, s, hashID = t.search(keyin)\n for j in s:\n print(j)\n number = keyin\n state, s, hashID = t.search(number)\n if state == 1:\n student = ht.getIndex(hashID, number)\n inp1 = 0\n while inp1 != 4:\n inp1 = int(input(\"1. View Student\\n2. Delete Student\\n3. Edit Student\\n4. Exit\\n\"))\n if inp1 == 1:\n print(\"Name:\", student.data.name)\n print(\"Number:\", student.data.number)\n print(\"GPA:\", student.data.gpa)\n print(\"Field:\", student.data.field)\n if inp1 == 2:\n deleteStudent(hashID, number)\n break\n if inp1 == 3:\n editStudent(hashID, number)\n break\n else:\n print(\"student doesn't exist.\")", "def print_receipt(self) -> typing.List[str]:\n lines = []\n euro_total=0\n usd_total=0\n gbp_total=0\n\n for item in self._items.items():\n euro_price = self._get_product_price(item[0]) * item[1]\n usd_price = self.get_price_in_currency(euro_price,\"USD\")\n gbp_price = self.get_price_in_currency(euro_price,\"GBP\")\n\n euro_total += euro_price\n usd_total += usd_price\n gbp_total += gbp_price\n\n euro_price_string = \"€%.2f\" % euro_price\n usd_price_string = \"$%.2f\" % usd_price\n gbp_price_string = \"£%.2f\" % gbp_price\n \n lines.append(item[0] + \" - \" + str(item[1]) + ' - ' + euro_price_string + ' - ' + \\\n usd_price_string + ' - ' + gbp_price_string)\n \n euro_total_str=\"€%.2f\" % euro_total\n usd_total_str=\"$%.2f\" % usd_total\n gbp_total_str=\"£%.2f\" % gbp_total\n\n lines.append(\"Total = \"+euro_total_str+ ' - ' + usd_total_str + ' - ' + gbp_total_str)\n logging.info(str(datetime.now())+': Receipt =' +str(lines))\n return lines", "def input_student_progression():\r\n global progress, trailer, retriever, exclude\r\n\r\n credit = None\r\n credit_list = []\r\n\r\n while True:\r\n credit_list = []\r\n for name in[\"Pass\", \"Defer\", \"Fail\"]:\r\n\r\n while True:\r\n credit = Student_Validation.data_type_validation(name)\r\n if credit is None:\r\n continue\r\n else:\r\n range_check = Student_Validation.range_validation(credit)\r\n if range_check:\r\n credit_list.append(credit)\r\n break\r\n total = credit_list[0] + credit_list[1] + credit_list[2]\r\n if total == 120:\r\n break\r\n else:\r\n cprint(\"Total is incorrect\", \"magenta\")\r\n continue\r\n\r\n # calling outcomes()\r\n progression = outcomes(credit_list)\r\n cprint(progression, \"yellow\") # printing progression of the student\r\n\r\n # calculating no of progressions\r\n if progression == \"Progress\":\r\n progress += 1\r\n elif progression == \"Progress(module trailer)\":\r\n trailer += 1\r\n elif progression == \"Do not progress – module retriever\":\r\n retriever += 1\r\n else:\r\n exclude += 1\r\n\r\n # returning calculated progressions\r\n return [progress, trailer, retriever, exclude]", "def student_format(student):\r\n s = student\r\n return '{0}, {1}, {2}, {3}'.format(s[0], s[1], s[2], s[3])", "def get_student():\n\n github = request.args.get('github', 'jhacks')\n first, last, github = hackbright.get_student_by_github(github)\n html = render_template('student_info.html',\n first=first,\n last=last,\n github=github)\n return html", "def summary_print(self):\r\n self.ensure_one()\r\n self.sent = True\r\n #return self.env['ir.actions.report'].report_action(self, 'proandsys_purchase_14.summary_landed_report')\r\n return self.env.ref('proandsys_purchase_14.summary_landedcost').report_action(self)", "def _progress_summary(student, request, course):\r\n with manual_transaction():\r\n field_data_cache = FieldDataCache.cache_for_descriptor_descendents(\r\n course.id, student, course, depth=None\r\n )\r\n # TODO: We need the request to pass into here. If we could\r\n # forego that, our arguments would be simpler\r\n course_module = get_module_for_descriptor(student, request, course, field_data_cache, course.id)\r\n if not course_module:\r\n # This student must not have access to the course.\r\n return None\r\n\r\n submissions_scores = sub_api.get_scores(course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id))\r\n\r\n chapters = []\r\n # Don't include chapters that aren't displayable (e.g. due to error)\r\n for chapter_module in course_module.get_display_items():\r\n # Skip if the chapter is hidden\r\n if chapter_module.hide_from_toc:\r\n continue\r\n\r\n sections = []\r\n\r\n for section_module in chapter_module.get_display_items():\r\n # Skip if the section is hidden\r\n with manual_transaction():\r\n if section_module.hide_from_toc:\r\n continue\r\n\r\n graded = section_module.graded\r\n scores = []\r\n\r\n module_creator = section_module.xmodule_runtime.get_module\r\n\r\n for module_descriptor in yield_dynamic_descriptor_descendents(section_module, module_creator):\r\n course_id = course.id\r\n (correct, total) = get_score(\r\n course_id, student, module_descriptor, module_creator, scores_cache=submissions_scores\r\n )\r\n if correct is None and total is None:\r\n continue\r\n\r\n scores.append(Score(correct, total, graded, module_descriptor.display_name_with_default))\r\n\r\n scores.reverse()\r\n section_total, _ = graders.aggregate_scores(\r\n scores, section_module.display_name_with_default)\r\n\r\n module_format = section_module.format if section_module.format is not None else ''\r\n sections.append({\r\n 'display_name': section_module.display_name_with_default,\r\n 'url_name': section_module.url_name,\r\n 'scores': scores,\r\n 'section_total': section_total,\r\n 'format': module_format,\r\n 'due': get_extended_due_date(section_module),\r\n 'graded': graded,\r\n })\r\n\r\n chapters.append({\r\n 'course': course.display_name_with_default,\r\n 'display_name': chapter_module.display_name_with_default,\r\n 'url_name': chapter_module.url_name,\r\n 'sections': sections\r\n })\r\n\r\n return chapters", "def printPassbook(self) :\n for expense in self.__passbook:\n print(expense.toString())", "def student_grades(student, course):\n cg = CourseGradeFactory().create(student, course)\n return cg.summary", "def get_student():\n\n github = request.args.get('github')\n if not github:\n return \"Please enter a student!\"\n\n student = hackbright.get_student_by_github(github)\n\n grades = hackbright.get_grades_by_github(github)\n\n if not student:\n return \"There is no student with github \\\"{}\\\".\".format(github)\n\n first, last, github = student\n # return \"{acct} is the GitHub account for {first} {last}\".format(\n # acct=github, first=first, last=last)\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n grades=grades)\n return html", "def send_mail_to_student(student, param_dict):\r\n\r\n # add some helpers and microconfig subsitutions\r\n if 'course' in param_dict:\r\n param_dict['course_name'] = param_dict['course'].display_name_with_default\r\n param_dict['site_name'] = microsite.get_value(\r\n 'SITE_NAME',\r\n param_dict.get('site_name', '')\r\n )\r\n\r\n subject = None\r\n message = None\r\n\r\n message_type = param_dict['message']\r\n\r\n email_template_dict = {\r\n 'allowed_enroll': ('emails/enroll_email_allowedsubject.txt', 'emails/enroll_email_allowedmessage.txt'),\r\n 'enrolled_enroll': ('emails/enroll_email_enrolledsubject.txt', 'emails/enroll_email_enrolledmessage.txt'),\r\n 'allowed_unenroll': ('emails/unenroll_email_subject.txt', 'emails/unenroll_email_allowedmessage.txt'),\r\n 'enrolled_unenroll': ('emails/unenroll_email_subject.txt', 'emails/unenroll_email_enrolledmessage.txt'),\r\n }\r\n\r\n subject_template, message_template = email_template_dict.get(message_type, (None, None))\r\n if subject_template is not None and message_template is not None:\r\n subject = render_to_string(subject_template, param_dict)\r\n message = render_to_string(message_template, param_dict)\r\n\r\n if subject and message:\r\n # Remove leading and trailing whitespace from body\r\n message = message.strip()\r\n\r\n # Email subject *must not* contain newlines\r\n subject = ''.join(subject.splitlines())\r\n from_address = microsite.get_value(\r\n 'email_from_address',\r\n settings.DEFAULT_FROM_EMAIL\r\n )\r\n\r\n send_mail(subject, message, from_address, [student], fail_silently=False)\r\n\r\n return True\r\n else:\r\n return False", "def __ui_list_students(self):\n try:\n print(str(self.__student_controller))\n except RepositoryException as re:\n print(re)\n return", "def return_book():\r\n ret_status = status_check()\r\n name = input('Enter Your name :')\r\n if name.lower() in ret_status['total_students']:\r\n if name.lower() in ret_status['reserved_students']:\r\n global_req[\"back\"].append((name.lower(), students_di[name.lower()][-1]))\r\n print(\"\\n You request is updated please contact admin for further details\")\r\n else:\r\n print(\"\\n No book is assigned to you, You can request for new book\")\r\n else:\r\n print(f\"You don't have Membership please contact Admin\")\r\n logging.warning(f\"You({name}) don't have Membership please contact Admin\")", "def renter_accounting_report_gen(sid, start, end):\n results = renter_accounting(sid, start, end)\n print(\"Name: \" + results[0])\n sum_value = 0\n row_title = [\"Date\", \"Boat\", \"Rent\", \"Payment\", \"Sum\"]\n row_format = \"{:>15}\" * len(row_title)\n print(row_format.format(*row_title))\n for result in results[1]:\n temp = list(result.keys()) + [value for key, value in list(result.values())[0].items()]\n if temp[2]:\n sum_value += temp[3]\n temp[2] = \"\"\n else:\n sum_value -= temp[3]\n temp[2] = temp[3]\n temp[3] = \"\"\n temp.append(sum_value)\n print(row_format.format(*[str(x) for x in temp]))", "def call_transfer_fund(self):\n ## 1) Create expense line for current student\n ## 2) Create Deposite lines for oney transfer student\n\n ## 1\n student_pool = self.env['op.student']\n partner_obj = self.env['res.partner']\n employee_pool = self.env['hr.employee']\n\n if not self.pin_varification:\n raise except_orm(_('Warning!'),\n _(\"Enter Valid PIN to proceed!\"))\n\n\n student_id = student_pool.search([('user_id', '=', self._uid)])\n\n ## Validate Enter PIN\n if student_id:\n self.validate_current_user_pin(student_id)\n\n expense_vals = {\n 'name': student_id.id,\n 'amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s\" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n 'create_invoice': False,\n # 'student_id': student_id.id,\n }\n\n student_expenses_id = self.env['student.expenses'].sudo().create(expense_vals)\n self.total_expense_balance = student_id.stud_balance_amount\n\n ## Get employee form account id\n employee_id = employee_pool.sudo().search([('ean13', '=', self.account_no)])\n\n ## Search EMployee By Employee ID\n search_by_id_employee_id = employee_pool.sudo().search([('identification_id', '=', self.account_no)])\n\n ## Search by student matrix ID\n search_by_id_student_id = student_pool.sudo().search([('gr_no', '=', self.account_no)])\n\n if not self.account_no:\n ## Logic for search by User Name\n employee_id = self.pass_employee_id.sudo()\n student_id = self.pass_student_id.sudo()\n else:\n ## Get partner form account id\n student_id = student_pool.sudo().search([('ean13', '=', self.account_no)])\n if student_id:\n deposite_vals = {\n 'name': student_id.id,\n # 'amount': self.amount_to_transfer,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n }\n student_deposite_id = self.env['student.deposits'].sudo().create(deposite_vals)\n if not self.account_no:\n trans_student_id = student_id.sudo()\n else:\n trans_student_id = student_pool.sudo().search([('ean13', '=', self.account_no)])\n if trans_student_id:\n self.total_deposite_balance = trans_student_id.stud_balance_amount\n elif employee_id:\n deposite_vals = {\n 'name': employee_id.id,\n 'employee_id': employee_id.identification_id,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n employee_deposite_id = self.env['employee.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = employee_id.available_balance\n\n elif search_by_id_employee_id:\n deposite_vals = {\n 'name': search_by_id_employee_id.id,\n 'employee_id': search_by_id_employee_id.identification_id,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n employee_deposite_id = self.env['employee.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = search_by_id_employee_id.available_balance\n\n elif search_by_id_student_id:\n deposite_vals = {\n 'name': search_by_id_student_id.id,\n 'employee_id': search_by_id_student_id.gr_no,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n student_deposite_id = self.env['student.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = search_by_id_student_id.stud_balance_amount\n\n # return True\n compose_form = self.env.ref('deposite_management.transfer_confirmation_popup_view', False)\n\n try:\n template_id = self.env.ref('deposite_management.email_template_student_fund_transfer', False)\n except ValueError:\n template_id = False\n values = self.env['email.template'].generate_email(template_id.id, self.id)\n\n ## Append Student email id to send mail\n if values and 'email_to' in values:\n values['email_to'] = student_id.sudo().email\n mail_id = self.env['mail.mail'].sudo().create(values)\n if mail_id:\n mail_send_id = mail_id.send()\n\n try:\n template_id_new = self.env.ref('deposite_management.email_template_student_fund_transfer_self_notification', False)\n except ValueError:\n template_id_new = False\n values_new = self.env['email.template'].generate_email(template_id_new.id, self.id)\n ## Append email id to send mail\n if values_new and 'email_to' in values_new:\n if student_id and trans_student_id:\n values_new['email_to'] = trans_student_id.email\n elif employee_id:\n values_new['email_to'] = employee_id.sudo().work_email\n mail_id_new = self.env['mail.mail'].sudo().create(values_new)\n if mail_id_new:\n mail_send_id = mail_id_new.send()\n ## return wizard after click on Fund Transfer Button\n return {\n 'name': _('Fund Transfer Done'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'fund.confirmation.msg',\n 'view_id': compose_form.id,\n 'target': 'new',\n }", "def receipt(basket):\n\n cprint(\"\"\"\\n\\n Item Price Discount Final Price\n------------------------------------------------------------------\"\"\")\n sigma_all = sum([e[1] for e in basket])\n sigma_discount = 0\n for name, price, discount in basket:\n discounted_price = (100 - discount) / 100 * price\n cprint(\"| %16s | £%10.2f | %3d\" % (name, price, discount) + \"%\" + f\" | £%10.2f |\" % discounted_price)\n sigma_discount += discounted_price\n cprint(\"|________________________________________________________________|\")\n\n cprint(\"\\n\\nTotal Price: £%.2f\" % sigma_all)\n cprint(\"Total Discount: £%.2f\" % (sigma_all - sigma_discount))\n cprint(\"Final Price: £%.2f\" % sigma_discount)\n\n cprint(\"\\nThank you for shopping at \" + SHOP_NAME)", "def progress_summary(student, request, course):\r\n with manual_transaction():\r\n return _progress_summary(student, request, course)", "def sa_summary_pdf(sa_id):\n pass", "def main():\n student_info = prompt_student()\n display_student(student_info)", "def print_report():\n width = 68\n print(\"-\" * width)\n header = (\"Donor Name\", \"Total Given\", \"Num Gifts\", \"Average Gift\")\n print(\"{:20} | {:15} | {:10} | {:12}\".format(*header))\n print(\"-\" * width)\n for index, donor in enumerate(donors_data):\n name = donor[\"name\"]\n total = sum(donor[\"donations\"])\n num_gift = len(donor[\"donations\"])\n average = total/num_gift\n print(\"{:22} ${:12,.2f} {:12d} ${:12,.2f}\".format(name, total, num_gift, average ))\n print(\"-\" * width)", "def get_messages(self, student):\n students = self.choose_students(student)\n return ((to_student, self.generate_messages(student, to_student)) for to_student in students)", "def student_account(request, s_id):\n try:\n student_object = Student.objects.select_related('user').values('user_id',\n 'user__username',\n 'user__first_name',\n 'user__last_name',\n 'user__email',\n 'parent_tel_numb',\n 'parent_f_name',\n 'parent_patronimic',\n 'parent_l_name',\n 'student_group_id').get(user_id=s_id)\n\n student = dict(id=student_object['user_id'], username=student_object['user__username'],\n first_name=student_object['user__first_name'],\n last_name=student_object['user__last_name'],\n parent_tel_numb=student_object['parent_tel_numb'],\n parent_f_name=student_object['parent_f_name'],\n parent_patronimic=student_object['parent_patronimic'],\n parent_l_name=student_object['parent_l_name'], email=student_object['user__email'],\n student_group_id=student_object['student_group_id'], student_group=\"\")\n\n if student['student_group_id'] is None:\n student['student_group'] = \"No group\"\n else:\n student['student_group'] = '%s%s' % (StudentGroup.objects.get(id=student['student_group_id']).year,\n StudentGroup.objects.get(id=student['student_group_id']).name)\n\n return render(request, 'main/student_account.html', {'student': student})\n except ObjectDoesNotExist:\n return HttpResponse(\"The student could not be found\")", "def print_squad_questions(subject=None):\n\n squad_data = import_squad_data()\n\n if subject:\n if subject == \"all\":\n squad_records = squad_data\n else:\n squad_records = squad_data.loc[squad_data[\"subject\"] == subject]\n if squad_records.empty:\n print(\"Subject not found in SQuAD dev-v2.0 dataset.\")\n return\n else:\n print(squad_data[\"subject\"].unique())\n print(\n \"Please specify a subject from the list above, or choose 'all', e.g. print_squad_questions(nlp.import_squad_data(), subject='Normans'\"\n )\n return\n\n for _, row in squad_records.iterrows():\n print(\"\\n=============================\")\n print(\"Id: \", row[\"id\"])\n print(\"Reading from: \", row[\"subject\"])\n print(\"\\nContext: \", row[\"context\"])\n print(\"--\")\n print(\"Question: \", row[\"question\"])\n print(\"Answer: \", row[\"answer\"])", "def classmates(self, request, pk=None):\n\n obj = self.get_object().subject\n try:\n query = models.Students.objects.filter(student_sub__subject=obj)\n serializer = self.get_serializer(query, many=True)\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()", "def Reports(request):\n assert isinstance(request, HttpRequest)\n iscapable =False\n if request.user.username in get_librarians():\n iscapable=True;\n #getting books per each department\n booksperdepart={}\n borrowedperStudent={}\n ordersplacedbylibrairans={}\n \n books=get_valid_Books()\n invent=get_Inv()\n for k,v in books.items():\n if v.dpt_id.name not in booksperdepart.keys():\n booksperdepart[v.dpt_id.name]=v.invt.qty\n else:\n li =booksperdepart[v.dpt_id.name]\n li+=v.invt.qty\n booksperdepart[v.dpt_id.name]=li\n libmem =get_libmems()\n borrowed=get_Borrowed()\n for k,v in borrowed.items():\n composite=v.cwid.cwid.stu_name+\" - \"+v.cwid.cwid.stu_id\n if composite not in borrowedperStudent.keys():\n borrowedperStudent[composite]=1\n else:\n li =borrowedperStudent[composite]\n li+=1\n borrowedperStudent[composite]=li\n librianorders=get_LibrarianOrders()\n for k,v in librianorders.items():\n composite=v.lb_id.name+\" - \"+v.lb_id_id\n if composite not in ordersplacedbylibrairans.keys():\n ordersplacedbylibrairans[composite]=[list([v.i_id.i_id.title,v.qty,v.i_id.i_id.dpt_id.name,v.status])]\n else:\n li =ordersplacedbylibrairans[composite]\n li.append(list([v.i_id.i_id.title,v.qty,v.i_id.i_id.dpt_id.name,v.status]))\n ordersplacedbylibrairans[composite]=li\n\n \n\n\n \n\n return render(\n request,\n 'app/reports.html',\n {\n 'title':'Reports Page',\n 'perdptbks':list(zip(booksperdepart.keys(),booksperdepart.values())),\n 'peruserbks':list(zip(borrowedperStudent.keys(),borrowedperStudent.values())),\n 'perlibrarian':list(zip(ordersplacedbylibrairans.keys(),ordersplacedbylibrairans.values())),\n 'iscapable':iscapable,\n 'year':datetime.now().year,\n }\n )", "def select_student_enrollment_detailed(self, student_id):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT \n c.course_id, c.subject, c.course_num, c.course_title,\n cs.course_section_id, cs.schedule_days, cs.start_time, cs.end_time,\n i.first_name || ' ' || i.last_name AS 'Instructor Name', c.course_units\n FROM courses c\n JOIN course_sections cs\n ON c.course_id = cs.course_id\n JOIN course_enrollments ce\n ON ce.course_section_id = cs.course_section_id AND ce.course_id = cs.course_id\n JOIN instructors i\n ON cs.instructor_id = i.instructor_id\n WHERE ce.student_id = ?\"\"\",\n (student_id,),\n )\n return cursor.fetchall()", "def grade_report(course):\n report = []\n for st in course.get_students():\n try:\n average = sum(course.get_grades(st)) / len(course.get_grades(st))\n report.append(str(st) + '\\'s mean grade is: ' + str(average) + '.')\n except ZeroDivisionError:\n report.append(str(st) + ' has no grades.')\n return '\\n'.join(report)", "def search():\n student_to_find=request.args.get(\"student\", None)\n print(f\"A buscar: {student_to_find}\")\n student_list=search_student(student_to_find)\n return render_template(\"search.html\",student_list_result=student_list)", "def test_get_students_for_contact(self):\n pass", "def get_student():\n\n github = request.args.get('github', 'jhacks')\n first, last, github = hackbright.get_student_by_github(github)\n\n\n rows = hackbright.list_projects(github)\n\n return render_template (\"student_info.html\",\n first=first,\n last=last,\n github=github,\n rows=rows\n )", "def print_the_contents_of_all_entries(self):\n\n if len(self.student_list):\n self.print_dataframe(self.student_list)\n else:\n print('There is no contents to show')", "def view_students(request):\n\n\tcontext_dict = {\n\t\t'title': 'All Students',\n\t}\n\treturn render(request, \"viewStudent.html\", context_dict)", "def req_advisor(std_deviation):\r\n students = []\r\n for i in records:\r\n if int(i[i.find(',')+1:]) < std_deviation:\r\n students.append(i[:i.find(',')])\r\n if students != []:\r\n print(\"List of students who need to see an advisor:\")\r\n for i in students:\r\n print(i)", "def print_results(list_object1, list_object2):\n STUDENT_COLUMN = 16\n GENERAL_COLUMN = 14\n\n print()\n print(\"{:>{}}\".format(\"Student ID\",STUDENT_COLUMN),end=\"\")\n\n for i in range(len(list_object1)):\n print(\"{:>{}}\".format(list_object1[i][0],GENERAL_COLUMN),end=\"\")\n \n print(\"{:>{}}\".format(\"Course grade\",GENERAL_COLUMN))\n\n for tuple_element in list_object2:\n\n print(\"{:>{}}\".format(tuple_element[0],STUDENT_COLUMN),end=\"\")\n\n for i, value in enumerate(tuple_element[1]):\n print(\"{:>{}}\".format(value,GENERAL_COLUMN),end=\"\")\n \n print(\"{:>{}}\".format(round(tuple_element[-1],2),GENERAL_COLUMN))", "def send_mail_to_student(student, param_dict):\r\n\r\n # add some helpers and microconfig subsitutions\r\n if 'course' in param_dict:\r\n param_dict['course_name'] = param_dict['course'].display_name_with_default\r\n\r\n param_dict['site_name'] = microsite.get_value(\r\n 'SITE_NAME',\r\n param_dict['site_name']\r\n )\r\n\r\n subject = None\r\n message = None\r\n\r\n # see if we are running in a microsite and that there is an\r\n # activation email template definition available as configuration, if so, then render that\r\n message_type = param_dict['message']\r\n\r\n email_template_dict = {\r\n 'allowed_enroll': (\r\n 'emails/enroll_email_allowedsubject.txt',\r\n 'emails/enroll_email_allowedmessage.txt'\r\n ),\r\n 'enrolled_enroll': (\r\n 'emails/enroll_email_enrolledsubject.txt',\r\n 'emails/enroll_email_enrolledmessage.txt'\r\n ),\r\n 'allowed_unenroll': (\r\n 'emails/unenroll_email_subject.txt',\r\n 'emails/unenroll_email_allowedmessage.txt'\r\n ),\r\n 'enrolled_unenroll': (\r\n 'emails/unenroll_email_subject.txt',\r\n 'emails/unenroll_email_enrolledmessage.txt'\r\n ),\r\n 'add_beta_tester': (\r\n 'emails/add_beta_tester_email_subject.txt',\r\n 'emails/add_beta_tester_email_message.txt'\r\n ),\r\n 'remove_beta_tester': (\r\n 'emails/remove_beta_tester_email_subject.txt',\r\n 'emails/remove_beta_tester_email_message.txt'\r\n ),\r\n }\r\n\r\n subject_template, message_template = email_template_dict.get(message_type, (None, None))\r\n if subject_template is not None and message_template is not None:\r\n subject = render_to_string(subject_template, param_dict)\r\n message = render_to_string(message_template, param_dict)\r\n\r\n if subject and message:\r\n # Remove leading and trailing whitespace from body\r\n message = message.strip()\r\n\r\n # Email subject *must not* contain newlines\r\n subject = ''.join(subject.splitlines())\r\n from_address = microsite.get_value(\r\n 'email_from_address',\r\n settings.DEFAULT_FROM_EMAIL\r\n )\r\n\r\n send_mail(subject, message, from_address, [student], fail_silently=False)", "def _get_notifications(self):\r\n student = self._student('GET')\r\n if student is None:\r\n self._error_response()\r\n\r\n else:\r\n self._success_response({\r\n 'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],\r\n 'count_required': student.num_required,\r\n 'count_graded': student.num_graded,\r\n 'count_available': student.num_pending\r\n })", "def problem6(self, s):\n\n print(\"Correct responses:\")\n postal_problem(grading=True)\n print(\"\\nStudent responses:\")\n x = s.postal_problem()\n if x is not None:\n print x\n \n return self.grade(10)", "def display(self):\n print(f'{self.first_name} {self.last_name}, Customer#: '\n f'{self.customer_id}\\n{self.address}\\n{self.phone_number}\\n'\n f'{self.create_invoice()}')", "def __str__(self):\n return str(self.__student_name) + \" has grade \" + str(self.__grade_value) + \" at \" + str(self.__discipline_name)", "def gradeReport(course):\n report = []\n for student in course.allStudents():\n total = 0.0\n numberOfGrades = 0\n for grade in course.getGrades(student):\n total += grade\n numberOfGrades += 1\n \n try:\n average = total / numberOfGrades\n report.append(str(student) + \"'s mean grade is \" + str(average))\n except ZeroDivisionError:\n report.append(str(student) + \" has no grades\")\n \n return '\\n'.join(report)", "def student_dashboard(self, request, activity, session):\n if request.method == \"GET\" and request.GET.get(\"studentid\"):\n if int(request.GET.get(\"studentid\")) == request.user.id:\n return self.student_summary(request.GET.get(\"studentid\"), request, activity)\n raise PermissionDenied()", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_5 = marks.filter(name='5')\n mark_id_4 = marks.filter(name='4')\n mark_id_pass = marks.filter(name='Зачтено')\n\n records = filter_against_records(request)\n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n \n counter_5 = 0\n counter_4 = 0\n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_5[0].id):\n counter_5 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_4[0].id):\n counter_4 += 1\n counter_all += 1\n\n else:\n counter_all += 1\n\n if counter_all == len(student_records) and len(student_records) > 0 and counter_4 < counter_5:\n students_to_return.append(student)\n \n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)", "def add_student(self, student: 'Student') -> None:\n # Add HOUSEHOLD attributes to the schools' composition\n self.total += 1\n self.composition += student.household.attributes\n self.students[student.idx] = student\n self.has_space = (self.total < self.capacity)", "def __str__(self):\n return \"student:\"+str(self.name)+\":\"+str(self.age)+\":\"+str(self.major)", "def get_student():\n\n # github = \"jhacks\"\n github = request.args.get('github','jhacks')\n first, last, github = hackbright.get_student_by_github(github)\n return render_template(\"student_info.html\" , first=first, gorilla=last, giraffe=github)\n # return \"%s is the GitHub account for %s %s\" % (github, first, last)", "def __str__(self):\n print_info = f\"\\nStudent ID: {self._id}, Name: {self._name}, \" \\\n f\"Year: {self._year} \\nPhone: {str(self._phone)}, \" \\\n f\"Address: {str(self._address)} \" \\\n f\"\\nClasses: {str(self._classes)}\" \\\n f\"\\nBirth Date: {self._date}\"\n return print_info", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n return render_template('student_info.html',\n first=first,\n last=last,\n github=github)\n \n\n #return \"{} is the GitHub account for {} {}\".format(github, first, last)", "def general_export(request):\n export_fields = OrderedDict(GENERAL_EXPORT_FIELDS)\n export = OpenXMLExport('Exportation')\n export.write_line(export_fields.keys(), bold=True) # Headers\n # Data\n query_keys = [f for f in export_fields.values() if f is not None]\n query = Student.objects.filter(archived=False).order_by('klass__name', 'last_name', 'first_name')\n for line in query.values(*query_keys):\n values = []\n for field in query_keys:\n if field == 'gender':\n values.append(('Madame', 'Monsieur')[line[field] == 'M'])\n elif field in ('dispense_ecg', 'dispense_eps', 'soutien_dys'):\n values.append('Oui' if line[field] is True else '')\n else:\n values.append(line[field])\n export.write_line(values)\n\n return export.get_http_response('general_export')", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_5 = marks.filter(name='5')\n mark_id_4 = marks.filter(name='4')\n mark_id_pass = marks.filter(name='Зачтено')\n\n records = filter_against_records(request)\n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n \n counter_5 = 0\n counter_4 = 0\n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_5[0].id):\n counter_5 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_4[0].id):\n counter_4 += 1\n counter_all += 1\n\n else:\n counter_all += 1\n\n if counter_all == len(student_records) and len(student_records) > 0 and counter_4 > counter_5:\n students_to_return.append(student)\n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)", "def _warn_no_students(self):\n message = \"<tr><h2>No student records were found</h2></tr>\"\n self.add_element(message,True,0,True)", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_2 = marks.filter(name='2')\n mark_id_3 = marks.filter(name='3')\n mark_id_pass = marks.filter(name='Зачтено')\n mark_id_not_pass = marks.filter(name='Незачтено')\n\n records = filter_against_records(request)\n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n\n counter_2 = 0\n counter_3 = 0\n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_2[0].id):\n counter_2 += 1\n counter_all += 1\n\n if str(mark) == str(mark_id_3[0].id):\n counter_3 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_pass[0].id):\n counter_all += 1\n\n else:\n counter_all += 1\n\n if counter_all == len(student_records) and len(student_records) > 0 and counter_2 == 0 and counter_3 == 1:\n students_to_return.append(student)\n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_2 = marks.filter(name='2')\n mark_id_3 = marks.filter(name='3')\n mark_id_4 = marks.filter(name='4')\n mark_id_pass = marks.filter(name='Зачтено')\n mark_id_not_pass = marks.filter(name='Незачтено')\n\n records = filter_against_records(request)\n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n\n counter_2 = 0\n counter_3 = 0\n counter_4 = 0\n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_2[0].id):\n counter_2 += 1\n counter_all += 1\n\n if str(mark) == str(mark_id_3[0].id):\n counter_3 += 1\n counter_all += 1\n \n if str(mark) == str(mark_id_4[0].id):\n counter_4 += 1\n counter_all += 1\n\n else:\n counter_all += 1\n\n if counter_all == len(student_records) and len(student_records) > 0 and counter_2 == 0 and counter_3 == 0 and counter_4 == 1:\n students_to_return.append(student)\n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_5 = marks.filter(name='5')\n mark_id_4 = marks.filter(name='4')\n mark_id_pass = marks.filter(name='Зачтено')\n\n records = filter_against_records(request)\n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n \n counter_5 = 0\n counter_4 = 0\n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_5[0].id):\n counter_5 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_4[0].id):\n counter_4 += 1\n counter_all += 1\n\n else:\n counter_all += 1\n\n if counter_all == len(student_records) and len(student_records) > 0:\n students_to_return.append(student)\n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)", "def generate_roster_pdf(sched_act_ids, include_instructions):\n\n pdf_buffer = BytesIO()\n h_margin = 1 * inch\n v_margin = 0.5 * inch\n doc = SimpleDocTemplate(pdf_buffer, pagesize=letter,\n rightMargin=h_margin, leftMargin=h_margin,\n topMargin=v_margin, bottomMargin=v_margin)\n\n elements = []\n\n styles = getSampleStyleSheet()\n styles.add(ParagraphStyle(name=\"Center\", alignment=TA_CENTER))\n styles.add(ParagraphStyle(name=\"BlockLetter\", fontSize=60, leading=72, alignment=TA_CENTER))\n styles.add(ParagraphStyle(name=\"BlockLetterSmall\", fontSize=30, leading=72, alignment=TA_CENTER))\n styles.add(ParagraphStyle(name=\"BlockLetterSmallest\", fontSize=20, leading=72, alignment=TA_CENTER))\n styles.add(ParagraphStyle(name=\"ActivityAttribute\", fontSize=15, leading=18, alignment=TA_RIGHT))\n\n for i, said in enumerate(sched_act_ids):\n sact = EighthScheduledActivity.objects.get(id=said)\n\n sponsor_names = sact.get_true_sponsors().values_list(\"first_name\",\n \"last_name\")\n sponsors_str = \"; \".join(l + \", \" + f for f, l in sponsor_names)\n\n room_names = sact.get_true_rooms().values_list(\"name\", flat=True)\n if len(room_names) == 1:\n rooms_str = \"Room \" + room_names[0]\n else:\n rooms_str = \"Rooms: \" + \", \".join(r for r in room_names)\n\n block_letter = sact.block.block_letter\n\n if len(block_letter) < 4:\n block_letter_width = 1 * inch\n block_letter_width += (0.5 * inch) * (len(block_letter) - 1)\n block_letter_style = \"BlockLetter\"\n elif len(block_letter) < 7:\n block_letter_width = 0.4 * inch\n block_letter_width += (0.3 * inch) * (len(block_letter) - 1)\n block_letter_style = \"BlockLetterSmall\"\n else:\n block_letter_width = 0.3 * inch\n block_letter_width += (0.2 * inch) * (len(block_letter) - 1)\n block_letter_style = \"BlockLetterSmallest\"\n\n header_data = [[\n Paragraph(\"<b>Activity ID: {}<br />Scheduled ID: {}</b>\".format(sact.activity.id, sact.id), styles[\"Normal\"]),\n Paragraph(\"{}<br/>{}<br/>{}\".format(sponsors_str,\n rooms_str,\n sact.block.date.strftime(\"%A, %B %-d, %Y\")),\n styles[\"ActivityAttribute\"]),\n Paragraph(block_letter, styles[block_letter_style])\n ]]\n header_style = TableStyle([\n (\"VALIGN\", (0, 0), (0, 0), \"TOP\"),\n (\"VALIGN\", (1, 0), (2, 0), \"MIDDLE\"),\n (\"TOPPADDING\", (0, 0), (0, 0), 15),\n (\"RIGHTPADDING\", (1, 0), (1, 0), 0),\n ])\n\n elements.append(Table(header_data, style=header_style, colWidths=[2 * inch, None, block_letter_width]))\n elements.append(Spacer(0, 10))\n elements.append(Paragraph(sact.full_title, styles[\"Title\"]))\n\n num_members = sact.members.count()\n num_members_label = \"{} Student{}\".format(num_members, \"s\" if num_members != 1 else \"\")\n elements.append(Paragraph(num_members_label, styles[\"Center\"]))\n elements.append(Spacer(0, 5))\n\n attendance_data = [[\n Paragraph(\"Present\", styles[\"Heading5\"]),\n Paragraph(\"Student Name (ID)\", styles[\"Heading5\"]),\n Paragraph(\"Grade\", styles[\"Heading5\"])\n ]]\n\n members = []\n for member in sact.members.all():\n members.append((\n member.last_name + \", \" + member.first_name,\n (member.student_id if member.student_id else \"User {}\".format(member.id)),\n int(member.grade) if member.grade else \"?\"\n ))\n members = sorted(members)\n\n for member_name, member_id, member_grade in members:\n row = [\"\", \"{} ({})\".format(member_name, member_id), member_grade]\n attendance_data.append(row)\n\n # Line commands are like this:\n # op, start, stop, weight, colour, cap, dashes, join, linecount, linespacing\n attendance_style = TableStyle([\n (\"LINEABOVE\", (0, 1), (2, 1), 1, colors.black, None, None, None, 2),\n (\"LINEBELOW\", (0, 1), (0, len(attendance_data)), 1, colors.black),\n (\"TOPPADDING\", (0, 1), (-1, -1), 6),\n (\"BOTTOMPADDING\", (0, 1), (-1, -1), 0),\n (\"BOTTOMPADDING\", (0, 0), (-1, 0), 5),\n ])\n\n elements.append(Table(attendance_data, style=attendance_style, colWidths=[1.3 * inch, None, 0.8 * inch]))\n elements.append(Spacer(0, 15))\n instructions = \"\"\"\n <b>Highlight or circle</b> the names of students who are <b>absent</b>, and put an <b>\"X\"</b> next to those <b>present</b>.<br />\n If a student arrives and their name is not on the roster, please send them to the <b>8th Period Office</b>.<br />\n If a student leaves your activity early, please make a note. <b>Do not make any additions to the roster.</b><br />\n Before leaving for the day, return the roster and any passes to 8th Period coordinator, Joan Burch's mailbox in the <b>main office</b>. For questions, please call extension 5046 or 5078. Thank you!<br />\"\"\"\n elements.append(Paragraph(instructions, styles[\"Normal\"]))\n\n if i != len(sched_act_ids) - 1:\n elements.append(PageBreak())\n\n doc.build(elements)\n return pdf_buffer", "def pdf_body(input_for,desc_dir):\n res = []\n wt = []\n for item in os.listdir(desc_dir):\n filename=os.path.join(desc_dir,item)\n with open(filename) as f:\n line=f.readlines()\n weight=line[1].strip('\\n')\n name=line[0].strip('\\n')\n print(name,weight)\n res.append('name: ' +name)\n wt.append('weight: ' +weight)\n print(res)\n print(wt)\n new_obj = \"\" \n \n for i in range(len(res)):\n if res[i] and input_for == 'pdf':\n new_obj += res[i] + '<br />' + wt[i] + '<br />' + '<br />'\n return new_obj", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_5 = marks.filter(name='5')\n mark_id_4 = marks.filter(name='4')\n mark_id_3 = marks.filter(name='3')\n mark_id_2 = marks.filter(name='2')\n mark_id_pass = marks.filter(name='Зачтено')\n\n records = filter_against_records(request)\n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n \n counter_5 = 0\n counter_4 = 0\n counter_3 = 0\n counter_2 = 0\n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_5[0].id):\n counter_5 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_4[0].id):\n counter_4 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_3[0].id):\n counter_3 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_2[0].id):\n counter_2 += 1\n counter_all += 1\n\n else:\n counter_all += 1\n\n if counter_all == len(student_records) and len(student_records) > 0 and counter_2 == 1:\n students_to_return.append(student)\n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)", "def cost(period_exams):\n\n student_periods = {}\n\n for aperiod in period_exams:\n for e in period_exams[aperiod]:\n for s in es.exam_students[e]:\n if s not in student_periods:\n student_periods[s] = [aperiod]\n else:\n student_periods[s].append(aperiod)\n\n numofstudents = len(student_periods)\n\n cost = 0\n d = 0\n cost_value = [16, 8, 4, 2, 1]\n for s in student_periods:\n mycal = sorted(student_periods[s])\n for (i, eachexam) in enumerate(sorted(mycal)):\n for j in range(i+1, i+6):\n if j < len(mycal):\n d = mycal[j] - mycal[i]\n if d > 5:\n cost += 0\n else:\n cost += cost_value[d-1]\n\n print(\"-\"*15)\n print(f'Total cost of this problem is {cost/numofstudents:.4f}')\n print(\"-\"*15)\n return(cost/numofstudents)", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_5 = marks.filter(name='5')\n mark_id_4 = marks.filter(name='4')\n mark_id_3 = marks.filter(name='3')\n mark_id_2 = marks.filter(name='2')\n mark_id_pass = marks.filter(name='Зачтено')\n\n records = filter_against_records(request)\n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n \n counter_5 = 0\n counter_4 = 0\n counter_3 = 0\n counter_2 = 0\n counter_pass = 0\n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_5[0].id):\n counter_5 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_4[0].id):\n counter_4 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_3[0].id):\n counter_3 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_2[0].id):\n counter_2 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_pass[0].id):\n counter_pass += 1\n counter_all += 1\n\n else:\n counter_all += 1\n\n if counter_all == len(student_records) and len(student_records) > 0 and counter_pass == 1:\n students_to_return.append(student)\n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)", "def print_invoice(request, invoice_number):\n\n data = Invoice.objects.get(number=invoice_number)\n\n sub_total = sum([a.get(\"total_cost\") for a in data.items])\n s_gst_val = float(sub_total) * (float(data.s_gst) / 100)\n c_gst_val = float(sub_total) * (float(data.c_gst) / 100)\n\n data.addressed_to = data.addressed_to.replace(\"\\n\", \"<br>\")\n\n return render(request,\n \"invoice/invoice_print.html\",\n {\n \"data\": data,\n \"sub_total\": sub_total,\n \"s_gst_value\": s_gst_val,\n \"c_gst_value\": c_gst_val\n })", "def student_rentals(self, student_id:int) -> list:\n self.cursor.execute(\"\"\" \n SELECT name, brand, monthly_cost, start_date, end_date, ri_id AS id, rental_id\n FROM rental\n NATURAL JOIN rental_instrument\n WHERE CURRENT_DATE < end_date \n AND\n student_id = %s\n AND\n (CURRENT_DATE < terminated\n OR\n terminated IS NULL);\n \"\"\", [student_id])\n self.db.commit()\n return self._cursor_result()", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_5 = marks.filter(name='5')\n mark_id_4 = marks.filter(name='4')\n mark_id_3 = marks.filter(name='3')\n mark_id_pass = marks.filter(name='Зачтено')\n mark_id_not_pass = marks.filter(name='Незачтено')\n\n records = filter_against_records(request)\n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n \n counter_5 = 0\n counter_4 = 0\n counter_3 = 0\n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_5[0].id):\n counter_5 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_4[0].id):\n counter_4 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_3[0].id):\n counter_4 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_pass[0].id):\n counter_all += 1\n\n elif str(mark) == str(mark_id_not_pass[0].id):\n counter_all += 1\n\n if counter_all == len(student_records) and len(student_records) > 0 and counter_3 < counter_4 + counter_5 and counter_3 > 0:\n students_to_return.append(student)\n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_5 = marks.filter(name='5')\n mark_id_4 = marks.filter(name='4')\n mark_id_3 = marks.filter(name='3')\n mark_id_pass = marks.filter(name='Зачтено')\n mark_id_not_pass = marks.filter(name='Незачтено')\n\n records = filter_against_records(request)\n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n \n counter_5 = 0\n counter_4 = 0\n counter_3 = 0\n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_5[0].id):\n counter_5 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_4[0].id):\n counter_4 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_3[0].id):\n counter_3 += 1\n counter_all += 1\n\n else:\n counter_all += 1\n\n if counter_all == len(student_records) and len(student_records) > 0 and counter_3 > counter_4 + counter_5:\n students_to_return.append(student)\n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)", "def __init__(self, student, start_date, day_periods):\n self.student = student\n self.start_date = start_date\n self.day_periods = day_periods\n self.student_name = student.full_name_lastname_first(\n show_middle_name=False)\n self.student_gender= student.gender\n self.student_attendance_record = self.student.attendance", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_5 = marks.filter(name='5')\n mark_id_4 = marks.filter(name='4')\n mark_id_3 = marks.filter(name='3')\n mark_id_pass = marks.filter(name='Зачтено')\n\n records = filter_against_records(request)\n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n \n counter_5 = 0\n counter_4 = 0\n counter_3 = 0\n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_5[0].id):\n counter_5 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_4[0].id):\n counter_4 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_3[0].id):\n counter_3 += 1\n counter_all += 1\n\n else:\n counter_all += 1\n\n if counter_all == len(student_records) and len(student_records) > 0:\n students_to_return.append(student)\n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_5 = marks.filter(name='5')\n mark_id_4 = marks.filter(name='4')\n mark_id_3 = marks.filter(name='3')\n mark_id_2 = marks.filter(name='2')\n mark_id_pass = marks.filter(name='Зачтено')\n mark_id_not_pass = marks.filter(name='Незачтено')\n\n records = filter_against_records(request) \n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n \n counter_5 = 0\n counter_4 = 0\n counter_3 = 0\n counter_2 = 0\n counter_pass = 0\n counter_not_pass = 0\n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_5[0].id):\n counter_5 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_4[0].id):\n counter_4 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_3[0].id):\n counter_3 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_2[0].id):\n counter_2 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_pass[0].id):\n counter_pass += 1\n counter_all += 1\n \n elif str(mark) == str(mark_id_not_pass[0].id):\n counter_not_pass += 1\n counter_all += 1\n\n else:\n counter_all += 1\n \n\n if counter_all == len(student_records) and len(student_records) > 0 and counter_not_pass == 1:\n students_to_return.append(student)\n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)", "def display(self):\r\n return str((self.last_name + \", \" + self.first_name+\": \" + self.phone_number + \"\\n\" + self.address + \"\\nStart Date: \" +\r\n self.start_date.strftime(\"%m\") + \"/\" + self.start_date.strftime(\"%d\") +\r\n \"/\" + self.start_date.strftime(\"%Y\")+\"\\nSalary: $\" + str(self.salary)))", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_5 = marks.filter(name='5')\n mark_id_4 = marks.filter(name='4')\n mark_id_3 = marks.filter(name='3')\n mark_id_2 = marks.filter(name='2')\n mark_id_pass = marks.filter(name='Зачтено')\n mark_id_not_pass = marks.filter(name='Незачтено')\n mark_id_not_appointed = marks.filter(name='Неявка')\n\n records = filter_against_records(request)\n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n \n counter_pass = 0\n counter_not_pass = 0\n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_5[0].id):\n counter_all += 1\n\n elif str(mark) == str(mark_id_4[0].id):\n counter_all += 1\n\n elif str(mark) == str(mark_id_3[0].id):\n counter_all += 1\n\n elif str(mark) == str(mark_id_2[0].id):\n counter_all += 1\n\n elif str(mark) == str(mark_id_pass[0].id):\n counter_pass += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_not_pass[0].id):\n counter_not_pass += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_not_appointed[0].id):\n counter_all += 1\n\n if counter_all == len(student_records) and len(student_records) > 0 and counter_pass < counter_not_pass:\n students_to_return.append(student)\n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)", "def give_extra_credit(grades,netids,bonus):\n # No accumulator. This is a procedure\n \n for student in netids:\n if student in grades: # Test if student is a key in grades\n grades[student] = grades[student]+bonus", "def display(self):\n statement = f\"\"\"\n ------\n By {self.prescribed_by.name.upper()}\n ------\n Patient Detail!\n Name: {self.prescribed_to.name.capitalize()}\n Age: {self.prescribed_to.age}\n Gender: {self.prescribed_to.gender}\n Prescribed Medicines!\"\"\"\n print(statement)\n self.display_cure()", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_4 = marks.filter(name='4')\n mark_id_pass = marks.filter(name='Зачтено')\n mark_id_not_pass = marks.filter(name='Незачтено')\n\n records = filter_against_records(request)\n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n\n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_4[0].id):\n counter_all += 1\n\n elif str(mark) == str(mark_id_pass[0].id):\n counter_all += 1\n\n elif str(mark) == str(mark_id_not_pass[0].id):\n counter_all += 1\n\n if counter_all == len(student_records) and len(student_records) > 0:\n students_to_return.append(student)\n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_5 = marks.filter(name='5')\n mark_id_4 = marks.filter(name='4')\n mark_id_3 = marks.filter(name='3')\n mark_id_2 = marks.filter(name='2')\n mark_id_pass = marks.filter(name='Зачтено')\n mark_id_not_pass = marks.filter(name='Незачтено')\n mark_id_not_appointed = marks.filter(name='Неявка')\n\n records = filter_against_records(request)\n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n \n counter_pass = 0\n counter_not_pass = 0\n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_5[0].id):\n counter_all += 1\n\n elif str(mark) == str(mark_id_4[0].id):\n counter_all += 1\n\n elif str(mark) == str(mark_id_3[0].id):\n counter_all += 1\n\n elif str(mark) == str(mark_id_2[0].id):\n counter_all += 1\n\n elif str(mark) == str(mark_id_pass[0].id):\n counter_pass += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_not_pass[0].id):\n counter_not_pass += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_not_appointed[0].id):\n counter_all += 1\n\n if counter_all == len(student_records) and len(student_records) > 0 and counter_pass > counter_not_pass:\n students_to_return.append(student)\n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)", "def generate_section_report(self):\n summary_line = [\"No Students in Section\",0,0,0,0,0,0,0.0]\n day_type_list, dates_list = \\\n load_daytypes_lists(self.start_date, self.total_days,\n self.section, self.total_days)\n self.num_schooldays = len(dates_list)\n if len(self.students):\n summary_line[0] = \"Averages\"\n for i in xrange(0,self.total_days):\n if (((day_type_list[i][0] & \\\n SchoolDB.models.StudentAttendanceRecord.school_day)) or\n ((day_type_list[i][1] & \\\n SchoolDB.models.StudentAttendanceRecord.school_day))):\n #skip non school days\n self.keys_list.append(\"\")\n self.table_data.append(\n self._generate_section_report_day(\n dates_list[i], summary_line)[0])\n if (len(self.table_data)):\n for i in range(1,8):\n summary_line[i] = \\\n round((float(summary_line[i])/ len(self.table_data)),1)\n self.table_data.append(summary_line)\n self.table_descriptor = \\\n [('date','string','Date'),\n ('m_en', 'number', 'Male En'),\n ('f_en', 'number', 'Female En'),\n ('m_morn', 'number', 'Male Morn'),\n ('f_morn', 'number', 'Female Morn'),\n ('m_aft', 'number', 'Male Aft'),\n ('f_aft', 'number', 'Female Aft'),\n ('percent', 'number', '% Present')]", "def info():\n # -------- Task 1 -------------------------\n # Please complete the following information\n\n return {\"agent name\": \"?\", # COMPLETE HERE\n \"student name\": [\"?\"], # COMPLETE HERE\n \"student number\": [\"?\"]} # COMPLETE HERE", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_2 = marks.filter(name='2')\n mark_id_pass = marks.filter(name='Зачтено')\n mark_id_not_pass = marks.filter(name='Незачтено')\n\n records = filter_against_records(request)\n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n\n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_2[0].id):\n counter_all += 1\n\n elif str(mark) == str(mark_id_pass[0].id):\n counter_all += 1\n\n elif str(mark) == str(mark_id_not_pass[0].id):\n counter_all += 1\n\n if counter_all == len(student_records) and len(student_records) > 0:\n students_to_return.append(student)\n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)", "def _getStudentEntries(self, program_entity, student_entity,\n params, id, user, prefix):\n\n items = []\n\n timeline_entity = program_entity.timeline\n\n if timeline_helper.isAfterEvent(timeline_entity,\n 'student_signup_start'):\n # add a link to show all projects\n items += [(ghop_redirects.getListStudentTasksRedirect(\n program_entity, {'url_name':'ghop/student'}),\n \"List my Tasks\", 'any_access')]\n\n items += super(View, self)._getStudentEntries(program_entity,\n student_entity, params, id, user, prefix)\n\n return items", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_5 = marks.filter(name='5')\n mark_id_4 = marks.filter(name='4')\n mark_id_3 = marks.filter(name='3')\n mark_id_2 = marks.filter(name='2')\n mark_id_pass = marks.filter(name='Зачтено')\n mark_id_not_pass = marks.filter(name='Незачтено')\n mark_id_not_appointed = marks.filter(name='Неявка')\n\n records = filter_against_records(request)\n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n \n counter_5 = 0\n counter_4 = 0\n counter_3 = 0\n counter_2 = 0\n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_5[0].id):\n counter_5 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_4[0].id):\n counter_4 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_3[0].id):\n counter_3 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_2[0].id):\n counter_2 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_pass[0].id):\n counter_all += 1\n\n elif str(mark) == str(mark_id_not_pass[0].id):\n counter_all += 1\n\n elif str(mark) == str(mark_id_not_appointed[0].id):\n counter_all += 1\n\n if counter_all == len(student_records) and len(student_records) > 0 and counter_2 > counter_3 + counter_4 + counter_5:\n students_to_return.append(student)\n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)" ]
[ "0.58656114", "0.5852727", "0.5595382", "0.55868745", "0.5516554", "0.5511955", "0.5490234", "0.54839575", "0.5436576", "0.541411", "0.5412061", "0.53985006", "0.53951246", "0.5368056", "0.53509426", "0.5329534", "0.5324563", "0.5310762", "0.5279635", "0.527889", "0.5268582", "0.52622575", "0.5260029", "0.5233126", "0.5229564", "0.51993275", "0.5197813", "0.5186582", "0.517136", "0.51639354", "0.5162838", "0.5161532", "0.51438177", "0.5142861", "0.5131198", "0.5128228", "0.5116403", "0.51105785", "0.5110277", "0.5103836", "0.5071059", "0.50645226", "0.5055595", "0.5046465", "0.5034812", "0.50267017", "0.49916637", "0.49902552", "0.49778977", "0.4971713", "0.49627325", "0.49475232", "0.49467137", "0.49450517", "0.49438405", "0.4942534", "0.4935168", "0.4929968", "0.4926985", "0.49268326", "0.4926012", "0.49163342", "0.49063143", "0.48996222", "0.48939046", "0.4882703", "0.48824936", "0.48699543", "0.4863561", "0.4853294", "0.48369643", "0.48324764", "0.48316583", "0.48304322", "0.48254937", "0.48202392", "0.48148823", "0.48103067", "0.48021734", "0.48001996", "0.4793308", "0.47928953", "0.47914618", "0.47912657", "0.47886625", "0.47862053", "0.4783326", "0.4782581", "0.47718745", "0.47689223", "0.4768103", "0.47676387", "0.47605145", "0.47583967", "0.47572023", "0.47569713", "0.47562575", "0.4754963", "0.4750357", "0.47502753" ]
0.75747764
0
Print letter for Student at time of admission Contains details as provided by the Student
def issue_student_admission_letter(Student, body): pdf = FPDF('P', 'mm', 'A4') pdf.add_page('P') pdf.set_font('Times', 'B', 14) pdf.multi_cell(0, 5, 'Student Admission Letter') pdf.ln() pdf.multi_cell(0, 5, ('Name: %s' % Student.name)) pdf.ln() pdf.multi_cell(0, 5, ('Address: %s' % Student.address)) pdf.ln() pdf.multi_cell(0, 5, ('Contact Number: %s' % Student.contact_number)) pdf.ln() pdf.multi_cell(0, 5, ('Hall Allotted: %s' % str(db.get("hall", Student.hall_ID, "name")[0]))) pdf.ln() pdf.multi_cell(0, 5, ('Room Allotted: %s' % Student.room_no)) pdf.ln() pdf.ln() pdf.multi_cell(0, 5, ('%s' % body)) pdf.ln() # Write generated output file to PDF pdf.output(('admission_letter_%s.pdf' % Student.student_ID), 'F')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_student(s_info):\n print('')\n print('Your information:')\n print(f'{s_info.student_id} - {s_info.first_name} {s_info.last_name}')", "def __str__(self):\n return \"student:\"+str(self.name)+\":\"+str(self.age)+\":\"+str(self.major)", "def __str__(self):\n return str(self.__student_name) + \" has grade \" + str(self.__grade_value) + \" at \" + str(self.__discipline_name)", "def display(self):\n return f'{self._last_name},{self._first_name}:({self._student_id}) {self._major} gpa:{self._gpa}'", "def printinfo(assign, question):\n print(\"Last Name: Bell\")\n print (\"First Name: Daniel\")\n print(\"Student ID: 282911\")\n print(\"Course: CPSC 231\")\n print(\"Tutorial Section: T02\")\n print(\"Assignment: %d\" %assign)\n print(\"Question: %s\" %question)\n print(\"\")", "def output_patient(patient):\n print(\"{} {} is a {} years old {}.\".format(patient[\"First\"],\n patient[\"Last\"],\n patient[\"Age\"],\n patient[\"Gender\"],))\n print(\"TSH data: {}.\".format(patient[\"TSH Data\"]))\n print(\"{} {}'s result is {} .\\n\".format(patient[\"First\"],\n patient[\"Last\"],\n patient[\"TSH Result\"],))\n return", "def __str__(self):\n print_info = f\"\\nStudent ID: {self._id}, Name: {self._name}, \" \\\n f\"Year: {self._year} \\nPhone: {str(self._phone)}, \" \\\n f\"Address: {str(self._address)} \" \\\n f\"\\nClasses: {str(self._classes)}\" \\\n f\"\\nBirth Date: {self._date}\"\n return print_info", "def toStudentString(self):\r\n return \"{0}th year, section {1}, {2} {3}\".format(self.batch, self.batch_id, self.batch, self.batch_id)", "def display(self):\n statement = f\"\"\"\n ------\n By {self.prescribed_by.name.upper()}\n ------\n Patient Detail!\n Name: {self.prescribed_to.name.capitalize()}\n Age: {self.prescribed_to.age}\n Gender: {self.prescribed_to.gender}\n Prescribed Medicines!\"\"\"\n print(statement)\n self.display_cure()", "def print_header_information():\n\t\tprint \"Elijah Molloy\"\n\t\tprint \"70-510 - Spring 1 - 2018\"\n\t\tprint \"PROGRAMMING ASSIGNMENT #4\\n\"", "def tell(self):\n print('Name {}, Age {}'. format(self.name, self.age), end=\" \")", "def __str__(self):\n return \"%s %s %s %s\" % (self.assignment.course.code, self.assignment.code, self.student.username, self.sub_date)", "def display_profile(self):\n statement = f\"\"\"\n ------\n {self.name.upper()}\n ------\n Fee: {self.fee} -/Rs.\n Rating: {self.rating} STARS\n Qualification: {self.qualification}\n Speciality: {self.speciality}\n Language: {self.language}\n Working Hours: {self.working_hrs}\n Contact: {self.contact}\n Location: {self.location}\n \"\"\"\n print(statement)", "def course_info(self):\n print(\"Course name: {}\".format(self._course_name))\n print(\"Lead teacher: {}\".format(self._teacher))\n\n if len(self._students) == 0:\n print(\"Course does not enrolled by any student\")\n else:\n print(\"Enrolled: {}/{}\".format(len(self._students), self._total_place))", "def student_format(student):\r\n s = student\r\n return '{0}, {1}, {2}, {3}'.format(s[0], s[1], s[2], s[3])", "def show_game_mission(self):\n print_bold(\"Mission:\")\n print(\" 1. Fight with the enemy.\")\n print(\" 2. Bring all the huts in the village under your control\")\n print(\"---------------------------------------------------------\\n\")", "def print_letter(donor):\n message = \"Dearest, {}. Thank you so much for your generosity with your most recent donation of ${}. \\nSincerely.\"\n print(message.format(donor[0], donor[1][-1]))", "def __str__(self):\n return f\"{self.semester} | {self.school} | {self.position} | {self.class_name}\"", "def __str__(self):\n print('=' * 20, \"Subject Information\", '=' * 20)\n print(\"Subject Name: {}\".format(self.name))\n print(\"Pulse Data Length for general questions\")\n print(self.pulse_length[0:20])\n print(\"Number of general Questions: {}\".format(\n len(self.pulse_data[0])))\n print(\"Pulse Data Length for video 1\")\n print(\"Number of questions for video 1: {}\".format(\n len(self.pulse_data[1])))\n print(self.pulse_length[20:40])\n print(\"Pulse Data Length for video 2\")\n print(\"Number of questions for video 2: {}\".format(\n len(self.pulse_data[0])))\n print(self.pulse_length[40:60])\n print('Label Data')\n print(self.label_data)\n print('Label Data shape: {}'.format(self.label_data.shape))\n\n return ''", "def display(self):\r\n return str((self.last_name + \", \" + self.first_name+\": \" + self.phone_number + \"\\n\" + self.address + \"\\nStart Date: \" +\r\n self.start_date.strftime(\"%m\") + \"/\" + self.start_date.strftime(\"%d\") +\r\n \"/\" + self.start_date.strftime(\"%Y\")+\"\\nSalary: $\" + str(self.salary)))", "def __ui_statistics_sort_alpha(self, discipline_name):\n sorted_list = self.__student_controller.get_sorted_students_for_discipline(discipline_name)\n if len(sorted_list) == 0:\n print(\"There is no student enrolled at the given discipline!\")\n return\n\n for student in sorted_list:\n print(str(student) + \"\\n\")", "def print_students_gpa(std):\n print (\"Student Id:\", get_id(std))\n print (\"Student name:\", get_fname(get_name(std)), get_lname(get_name(std)))\n print (\"GPA: %.2f\" %(calc_gpa(std)))", "def display(self):\r\n return str((self.last_name + \", \" + self.first_name+\": \" + self.phone_number + \"\\n\" + self.address + \"\\nStart Date: \" +\r\n self.start_date.strftime(\"%m\") + \"/\" + self.start_date.strftime(\"%d\") +\r\n \"/\" + self.start_date.strftime(\"%Y\")+\"\\nHourly Pay $\" + str('%.2f' % self.hourly_pay)))", "def __str__(self):\r\n stu_info = super().__str__()\r\n return f'{stu_info} {self.thesis}'", "def show_game_mission():\n print_bold(\"Misija:\")\n print(\"\\tOdaberi kućicu u kojoj se Talion može odmoriti ...\")\n print_bold(\"SAVJET:\")\n print(\"PAZI kako biraš jer neprijatelji su blizu!\")\n print_dotted_line()", "def print_intro(self):\n \n print('Did you know mammals tend to have the shortest migration routes because walking takes more energy than flying or swimming?')", "def get_student_name(student_information):\n return student_information[0]", "def main():\r\n future_student = \"Future begins\"\r\n print_message(future_student)\r\n print_message(\"Dreams begin\")\r\n print_message(\"Aspirations begin\")", "def main():\n student_info = prompt_student()\n display_student(student_info)", "def print_event(self):\n\n list_of_names = [str(c) for c in self.__list_of_contacts]\n joined_names = ', '.join(list_of_names)\n table = [[str(self._title)],[\"Date: \"+str(self._date)],[\"Time: \"+str(self._start)+\" - \"+str(self._end)],[\"Participants: \"+str(joined_names)]]\n print(tabulate(table, tablefmt='grid'))", "def printCardDetails(self):\n \n print(\"\\nCard Details - {self.name}\\nCard Number: {self.cardNum}\\nExpiry Date (mm/yy): {self.cardExp[0]:02d}/{self.cardExp[1]:02d}\".format(self=self))", "def __statistics_students_enrolled(self):\n try:\n discipline_name = input(\"Give discipline discipline_name: \")\n self.__discipline_validator.validate_name(discipline_name)\n except DisciplineException as de:\n print(de)\n return\n\n menu_string = \"\\t1. Sort alphabetically \\n\\t\" \\\n \"2. Sort descending by average \\n\\t\" \\\n \"0. Exit\"\n command = self.__ui_read_command(menu_string)\n\n if command == '1':\n self.__ui_statistics_sort_alpha(discipline_name)\n elif command == '2':\n self.__ui_statistics_sort_avg(discipline_name)\n elif command == '0':\n return", "def current_subject(self):\n return \"%s: %s\" % (self.name, self.phase)", "def student_version():\r\n print(\"------------------------------------------------------------\")\r\n word = \"Student Version\"\r\n for i in word:\r\n cprint(i, \"blue\", end=\"\")\r\n time.sleep(0.05)\r\n print()\r\n outcome = input_student_progression()\r\n return outcome", "def print_header():\n print('------------------------------------')\n print(' Lesson04')\n print(' Kata Fourteen Assignment')\n print('------------------------------------\\n')", "def info():\n # -------- Task 1 -------------------------\n # Please complete the following information\n\n return {\"agent name\": \"?\", # COMPLETE HERE\n \"student name\": [\"?\"], # COMPLETE HERE\n \"student number\": [\"?\"]} # COMPLETE HERE", "def announce(outcome, who):\r\n print(who, 'rolled a', outcome)\r\n print(draw_number(outcome))", "def printESInfo(self,timeStamp=-1):\n dict = self.makeESQuery(timeStamp)\n\tkeyList = dict.keys()\n\tkeyList.sort()\n\tprint \"Requested timeStamp:\",timeStamp \n\tprint \"--------------------------------\"\n\tprint \" time \"\n\tprint \" grade \"\n\tprint \" minRun maxRun details \"\n\tprint \"--------------------------------\"\n\tfor key in keyList:\n\t list = dict[key]\n\t print\n\t if key=='0' or key==0: \n\t key = '00000000'\n\t print key,\n\t usedGrade = \"\"\n\t usedSVName= \"\"\n\t for item in list:\n\t\tgrade = item[0]\n\t\tminR = item[1]\n\t\tmaxR = item[2]\n\t\tif minR==1: minR = '000001'\n\t\tif maxR==1: maxR = '000001'\n\t\tsvName= item[3]\n\t\tif usedGrade==grade:\n\t\t output = \" %s %s\"%(minR,maxR)\n\t\telse:\n\t\t usedGrade =grade\n\t\t output = \"\\n\"\n\t\t output+=\" %s\\n\"%grade\n\t\t output+=\" %s %s\"%(minR,maxR)\n\t\tif usedSVName!=svName:\n\t\t output+=\" %s\"%svName\n\t\t usedSVName = svName\n\t\tprint output", "def show_game_mission():\n print_bold(\"任务:\")\n print(\"\\t选择李维可以休息的小屋...\")\n print_bold(\"TIP:\")\n print(\"保持警惕,周围有敌人!\")\n print_dotted_line()", "def get_descriptive_name(self):\r\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\r\n #Mostrar_Grande = long_name.upper()\r\n #return long_name.upper()\r\n #return Mostrar_Grande #Funciona Com Return TAMBÉM, mas olhe na linha 39 como seria necessário usar.\r\n print(long_name.upper())", "def __str__(self):\n # Use 'Unknown' if the course instance does not have a term\n if self.course_instance.term:\n term = self.course_instance.term.verbose_name()\n else:\n term = 'Unknown'\n\n exam_unicode = '{term} {number} {type} for {course}'.format(\n term=term,\n number=self.get_exam_number_display(),\n type=self.get_exam_type_display(),\n course=self.course_instance.course)\n if self.instructors:\n instructors = ', '.join([i.last_name for i in self.instructors])\n return '{}, taught by {}'.format(exam_unicode, instructors)\n else:\n return '{} (Instructors Unknown)'.format(exam_unicode)", "def __repr__(self) -> str:\n return f\"{self.title} that starts at {self.start} and ends at {self.end} and is taught by {self.instructor}\"", "def print_state(self):\n print(self.identifier, \n self.gender, \n self.age,\n self.sexual_activity,\n self.disease_status,\n self.time_since_infection,\n self.number_of_partners,\n self.current_partners)", "def get_student_name(self):\n return self.__student_name", "def display_employee(self):\n print \"[Name: %s] [Salary: %d]\" % (self.name, self.salary)", "def get_raw_information(self):\n try:\n info = self.student_attendance_record.get_period_info(\n self.start_date, self.day_periods)\n return (self.student_name, self.student_gender, info)\n except AttributeError:\n raise AttributeError, \\\n \"Failed to get student attendance record for: %s\" \\\n %unicode(self.student)", "def generate_student_report(self):\n \n period_type = self.parameter_dict.get(\"period_type\", \"monthly\")\n insert_gender_markers = self.parameter_dict.get(\n \"insert_gender_markers\", False)\n period = [(self.start_date,self.end_date)]\n for student in self.students:\n self.table_data.append(self._generate_single_student_report_line(\n student,period, False))\n self.keys_list.append(\"\")\n self.table_descriptor = \\\n [('name','string','Name'),\n ('days_present','number', 'Days Present'),\n ('percent_present', 'number', '% Present')]", "def display_simple(self):\n print(\"\") \n print(\"Date: {}\".format(self.date))\n print(\" Task name: {}\".format(self.task_name))\n print(\" Time spent: {} minutes\".format(self.time_spent))\n print(\" Notes: {}\".format(self.notes))\n print(\" Task number: {}\".format(self.task_number))\n print(\"\")", "def __str__(self):\n return \"%s, %02d, %s\" % (self.code, self.section, self.semester)", "def _generate_single_student_report_line(self, student_record, periods,\n use_period_separator = True, separator = None,):\n line = [student_record.full_name_lastname_first()]\n for period in periods:\n # if requested insert a separator between each period but\n # not between the name and the first period\n if (use_period_separator and (len(line) > 1)):\n line.append(separator)\n school_days, days_present = \\\n student_record.attendance.get_summary(\n period[0], period[1])\n if not school_days:\n school_days = 1\n percent_present = round((100.0 *days_present / school_days), 1)\n days_absent = school_days - days_present\n percent_absent = 100.0 - percent_present\n line.extend((days_present, percent_present))\n #days_absent, percent_absent))\n return line", "def user_story_01(self):\n td=datetime.today()\n for person in self.individuals.values():\n pb=person.birthday\n pd=person.death\n if pb !=\"NA\" and pb>td:\n print(f'US01 - {person.name} birthday after today on line {person._birthday_line}')\n if pd !=\"NA\" and pd>td:\n print(f'US01 - {person.name} death after today on line {person._death_line}')\n for family in self.families.values():\n fm=family.married \n fd=family.divorced\n if fm !=\"NA\" and fm>td:\n print(f'US01 - {self.individuals[family.wife_id].name} marriage after today on line {family._married_line}')\n if fd !=\"NA\" and fd>td:\n print(f'US01 - {self.individuals[family.husb_id].name} divorce after today on line {family._divorced_line}')", "def __str__(self):\n\t\ttime = \"%d-%d-%d\" % (self.created.day, self.created.month, self.created.year)\n\t\treturn \"%s %s | score: %d\" % (self.username, time, self.score)", "def __str__(self):\n return self._str_hsp_header() + \"\\n\" + self._str_aln()", "def __init__(self, student, start_date, day_periods):\n self.student = student\n self.start_date = start_date\n self.day_periods = day_periods\n self.student_name = student.full_name_lastname_first(\n show_middle_name=False)\n self.student_gender= student.gender\n self.student_attendance_record = self.student.attendance", "def print_mission(self): \n #Download mission from vehicle\n missionlist = self.download_mission()\n \n #Add commands\n for cmd in missionlist:\n commandline=\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (cmd.seq,cmd.current,cmd.frame,cmd.command,cmd.param1,cmd.param2,cmd.param3,cmd.param4,cmd.x,cmd.y,cmd.z,cmd.autocontinue)\n print commandline", "def print_phase_header(phase: str) -> None:\n print(f'\\n#### {phase.upper()} PHASE ####\\n')", "def __str__(self):\n return f'{self.mission} {self.category}: {self.title} / {self.created_at}'", "def output(self):\n print \"Name:\", self.name\n print \"City:\", self.city\n print \"Country:\", self.country\n print \"Number of Reviews:\", len(self.sentiments)\n print \"Old Reviews (Stars):\", self.stars_avg\n print \"Old Reviews (%):\", self.stars_avg/5\n print \"New Rating (Stars)\", self.new_rating*5\n print \"New Rating (%):\", self.new_rating", "def day_009_1():\n student_scores = {\n \"Harry\": 81,\n \"Ron\": 78,\n \"Hermione\": 99,\n \"Draco\": 74,\n \"Neville\": 62,\n }\n\n student_grades = {}\n\n for student in student_scores:\n if student_scores[student] <= 70:\n student_grades[student] = \"Fail\"\n elif student_scores[student] <= 80:\n student_grades[student] = \"Acceptable\"\n elif student_scores[student] <= 90:\n student_grades[student] = \"Exceeds Expectations\"\n else:\n student_grades[student] = \"Outstanding\"\n\n print(student_grades)", "def __repr__(self):\n return '<Student {} {}>'.format(self.first_name, self.last_name)", "def print_all_students(filename): \n records = records_from_file(filename)\n students_and_id = all_students(records)\n print(\"All students:\")\n for id_num, student in sorted(students_and_id.items()):\n stud_first_name = student[0]\n stud_last_name = student[1] \n print((\" {0}: {1} {2}\").format(id_num, stud_first_name, \n stud_last_name.upper()))\n print()", "def describe_user(self):\n print(\"\\n Name: \" + self.f_name.title() + ' ' + self.l_name.title())\n print(\"Age: \" + str(self.age)) \n print(\"Birthplace: \" + self.birthplace.title())", "def print_intro(self):\n \n print('Did you know that birds hold the record for longest animal migrations?')", "def __str__(self):\n return f\"This player has {self.hand} for a current total of {self.total} and {self.aceCount} Aces \" \\\n f\"valued at a soft 11. This player is a dealer: {self.dealer}.\"", "def __str__(self):\n start = f\"{self.start:%y/%m/%d %H:%M} - \" if self.start else \"\"\n return f\"Course run {self.id!s} starting {start:s}\"", "def __str__(self):\r\n to_print = (\"Name: \" + self.name + \", Age: \" +\r\n str(self.age) + \", Hobbys: \" + str(self.hobbys))\r\n return to_print", "def __str__(self):\n return \"%s (graded by %s at %s)\" % (self.submission, self.grader, self.date)", "def _printable(self):\n toPrint = \"Time Info header. \"\n toPrint += \"timestamp: \" + str(self.datetime) + \" \"\n\n return toPrint", "def first_entrance(self):\n print(\"A medium-sized room with a track in the center surrounded by benches. \"\n \"A tablet rests on a stand at the end of the track. \"\n \"A horrendous, rotten smell seems to be coming \"\n \"from the Terra Communications Room, which is, oddly, locked by a \"\n \"bulky, old-school mechanical lock...\")", "def __str__(self):\n return str(self.indenter.user.first_name) + ' ' \\\n + str(self.indenter.user.last_name) + ' ' \\\n + str(self.created_at)", "def __str__(self):\n return self.name + ' - ' + str(self.date_of_contact) + ' - ' + str(self.specialty)", "def select_student_enrollment_detailed(self, student_id):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT \n c.course_id, c.subject, c.course_num, c.course_title,\n cs.course_section_id, cs.schedule_days, cs.start_time, cs.end_time,\n i.first_name || ' ' || i.last_name AS 'Instructor Name', c.course_units\n FROM courses c\n JOIN course_sections cs\n ON c.course_id = cs.course_id\n JOIN course_enrollments ce\n ON ce.course_section_id = cs.course_section_id AND ce.course_id = cs.course_id\n JOIN instructors i\n ON cs.instructor_id = i.instructor_id\n WHERE ce.student_id = ?\"\"\",\n (student_id,),\n )\n return cursor.fetchall()", "def print_header():\n print(\"STEM Center Temperature Project\")\n print(\"Shaotong Wen\")", "def print_summary(self):\n #exec(\"print(storyline.{}_clause+', '+storyline.{}_clause.lower()+', '+storyline.{}_clause.lower())\".format(\"A\", \"B\", \"C\"))\n #exec(\"print(self.{}_clause+', '+self.{}_clause.lower()+', '+self.{}_clause.lower())\".format(\"A\", \"B\", \"C\"))\n lwr = \".lower()\"\n exec(\"print(\"+str(3*(\"self.{}_clause{}+',', \")).format(\"A\",\"\",\"B\",lwr,\"C\",lwr)+\"'\\b\\b')\")", "def req_advisor(std_deviation):\r\n students = []\r\n for i in records:\r\n if int(i[i.find(',')+1:]) < std_deviation:\r\n students.append(i[:i.find(',')])\r\n if students != []:\r\n print(\"List of students who need to see an advisor:\")\r\n for i in students:\r\n print(i)", "def format(self):\r\n\r\n earth = \"???\" if self.maskearth else self.earth\r\n air = \"???\" if self.maskair else self.air\r\n fire = \"???\" if self.maskfire else self.fire\r\n water = \"???\" if self.maskwater else self.water\r\n\r\n if any((self.earth, self.fire, self.water)):\r\n statsline = f'Stats: {earth}/{air}/{fire}/{water}'\r\n elif self.air:\r\n statsline = f'Air: {air}'\r\n else:\r\n statsline = ''\r\n\r\n return (\r\n f'Character {self.name}, [{self.token}]. '\r\n f'Init: {self.init} {statsline} Owner: {self.user.name}'\r\n )", "def show_data(self, ):\r\n return print('society_name : {}\\n'\r\n 'flat : {}\\n'\r\n 'house_no : {}\\n'\r\n 'no_of_members : {}\\n'\r\n 'income : {}\\n '\r\n .format(self.society_name, self.flat, self.house_no, self.no_of_members, self.income))", "def print_fullname(self):\n print('Full name : ', self.first_name + self.Last_name)\n return \"\"", "def print_fullname(self):\n print('Full name : ', self.first_name + self.Last_name)\n return \"\"", "def print_fullname(self):\n print('Full name : ', self.first_name + self.Last_name)\n return \"\"", "def display_short(self):\n print(f'{self.name.upper()} ({self.speciality.upper()})')", "def __str__(self):\n return f'Character name: {self.name}\\nhealth: {self.health}\\n' \\\n f'strength: {self.strength}\\nchance dodge: ' \\\n f'{round(self.chance_dodge, 2)}\\nchance critical:' \\\n f' {round(self.chance_critical, 2)} '", "def printSchedule():\r\n print(\"{0:^45}\".format(\"Your Schedule:\\n\"))\r\n print(\" Day Class Time\")\r\n if(len(classes) == 0):\r\n print(\"\\nThere are no classes\\n\")\r\n return\r\n for class_ in classes:\r\n print(class_.scheduleString())\r\n print()", "def __str__(self):\r\n return 'Autor: {user}.\\nText: {text}'.format(user=self.author, text=self.text)", "def print(self):\r\n self.print_avec_separateur()", "def __str__(self):\n return f'{self.date} - {self.opponent_abbr}'", "def print_card(card):\n\n titles = [\"Ones\", \"Twos\", \"Threes\", \"Fours\", \"Fives\", \"Sixes\", \n \"One pair\", \"Two Pairs\", \"Three of\", \"Four of\", \"Straigth\",\n \"Big straight\", \"House\", \"Yatzy\"]\n \n print(\"+---------+-----------------+-------+\")\n print(\"| Index | Name | Score |\")\n print(\"+---------+-----------------+-------+\")\n\n for i in range(len(card)):\n print(\"| {:>7} | {:<15} | {:<5} |\".format(i, titles[i], card[i]))\n\n print(\"+---------+-----------------+-------+\")", "def __str__(self):\n _str = indent(self.name, color = self.color)\n _str += indent(\"-\"*len(self.name))\n _str += indent(\"( \" + self.movemement + \" - \"+ self.symbole + \" )\")\n _str += indent(\"Awareness: \" + str(self.awareness))\n for _iel in self.abilities:\n _str += indent(_iel)\n if self.description is not None:\n _str += indent(\"-\"*45 + \"\\n\")\n _str += indent(self.description)\n _str += indent(\"-\"*45 + \"\\n\")\n _str += indent(BOLD_BLACK + \" \"*15 + \"Combat Stats\" + RESET)\n _str += indent(\"-\"*45 + \"\\n\")\n _str += indent(BLUE + \" Horror\" + RESET + \" Toughness\"\\\n + RED + \" Combat\" + RESET)\n _str += indent(BLUE + \"Rating Damage \"\\\n + RED + \"Rating Damage\")\n _str += indent(BLUE + \" \" + str(self.horror_rating)\\\n + \" \"*8 + str(self.horror_damage) + RESET\\\n + \" \"*8 + str(self.toughness) + RED\\\n + \" \"*8 + str(self.combat_rating)\\\n + \" \"*8 + str(self.combat_damage) + RESET)\n _str += indent(\"-\"*45 + \"\\n\")\n return _str", "def create_letter(cls, donor_status, donor_name, donation_amt):\n if donor_status == 0:\n letter_text = '''\n Dear {0},\n\n Thank you for your very kind donation of ${1:.2f}, and for your continuing support.\n\n Your generous contribution will be put to very good use.\n\n Sincerely,\n -The Team\n '''.format(donor_name, donation_amt)\n return letter_text\n elif donor_status == 1:\n letter_text = '''\n Dear {0},\n\n Thank you for your very kind donation of ${1:.2f}.\n\n Your generous contribution will be put to very good use.\n\n Sincerely,\n -The Team\n '''.format(donor_name, donation_amt)\n return letter_text\n elif donor_status == 2:\n return ('''\n Dear {0},\n\n Thank you for your very kind contribution(s) totaling ${1:.2f}.\n\n We would like you to know that your generous donation(s) will be put to very good use.\n\n Sincerely,\n -The Team\n '''.format(donor_name, donation_amt))", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n title_grade_list = hackbright.get_grades_by_github(github)\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n title_grade_list=title_grade_list)\n\n return html", "def print_intro(self):\n \n print(\"Did you know that most insect migrations are intergenerational, meaning that offspring continue their parent's journey?\\n\")", "def _printable(self):\n toPrint = \"Measurement Outcome header. \"\n toPrint += \"measurement outcome: \" + str(self.outcome) + \" \"\n\n return toPrint", "def print_letter(donor):\n msg = get_thankyou_message(donor)\n print(msg)", "def schedule_text():", "def print_info(c, timestamp):\r\n print(f\"\\n[{timestamp}] [{id(c)}] [Fitness: {c.fitness()}]\\n \" +\r\n f\"Age: {c.age} seconds, F.Eaten: {c.food_eaten}, P.Eaten: {c.poison_eaten}\\n\" +\r\n f\"currHP: {c.health}, Gen: {c.gen}, Childs: {c.childs}\\n\" +\r\n f\"DNA: {c.dna}\\n\" +\r\n f\"FoodAttr: {c.food_attraction}, PoisonAttr: {c.poison_attraction}\\n\" +\r\n f\"FoodDist: {c.food_dist}, PoisonDist: {c.poison_dist}\\n\" +\r\n f\"MaxHealth: {c.max_health}, MaxVel: {c.max_vel}, Size: {c.size}\\n\" +\r\n f\"MaxSteer: {c.max_steer_force}, DirAngleMult: {c.dir_angle_mult}\\n\")", "def describe_user(self):\n print(self.first_name.title() + \" \" + self.last_name.title() +\n \" is a \" + str(self.age) + \" year old who identifies as \" +\n self.gender + \".\")", "def draw(self):\n print(self.Firstname, self.LastName)", "def print_hand(self):\n if self.cheating:\n print(\"You're cheating!\")\n print(\"until you reroll it!\")\n print(\"\"\"\nYou rolled:\na = [ {} ]\nb = [ {} ]\n\nYou are in Stage {}\n \"\"\".format(self.die_a, self.die_b, self.stage))", "def print_section(self, s):\n section = s.upper()\n\n self.print_newline()\n self.print_newline()\n self._write('%s\\n' % section)\n self._write('%s\\n' % ('-' * len(section)))\n self.print_newline()", "def print_seq(self):\n names, values = [], []\n for each in self.minions:\n names.append(each.name)\n values.append(f'{each.atk}/{each.dfs}')\n t = PrettyTable()\n t.add_row(names)\n t.add_row(values)\n print(t)" ]
[ "0.6675286", "0.6378189", "0.6343893", "0.6320618", "0.6295893", "0.6235386", "0.61603206", "0.6133385", "0.6130784", "0.6011712", "0.6004627", "0.5992418", "0.5982091", "0.59453845", "0.5931745", "0.59027773", "0.5890217", "0.58893615", "0.585697", "0.5848341", "0.5838587", "0.5809452", "0.57592124", "0.5755029", "0.5714626", "0.5707981", "0.5706553", "0.57057977", "0.5684904", "0.56631744", "0.56581503", "0.56547266", "0.56527406", "0.5637643", "0.5627992", "0.56254137", "0.5618264", "0.5599353", "0.5598423", "0.5591961", "0.55909026", "0.554981", "0.55449206", "0.5527606", "0.55165195", "0.55071694", "0.5494398", "0.5494235", "0.5491863", "0.5485089", "0.5480433", "0.5477359", "0.54701734", "0.54653823", "0.54576516", "0.54492104", "0.5442282", "0.543872", "0.5437099", "0.542465", "0.540786", "0.54074615", "0.5402189", "0.5400505", "0.53973985", "0.53929317", "0.5390702", "0.5389731", "0.5385258", "0.5380735", "0.5379623", "0.5375072", "0.5370672", "0.53706473", "0.5358965", "0.5358789", "0.5356881", "0.5356577", "0.5356577", "0.5356577", "0.53514683", "0.53308755", "0.53230935", "0.5320408", "0.5314559", "0.53068274", "0.53052", "0.53012276", "0.52834034", "0.5272252", "0.52718186", "0.5268673", "0.5261448", "0.52516705", "0.5248595", "0.5247417", "0.523775", "0.5236046", "0.52350837", "0.522919" ]
0.6613428
1
Initialize an object with loading the credentials using a credentials file and yaml key
def __init__(self, cred_file, yaml_key): self.premium_search_args = load_credentials(cred_file, yaml_key=yaml_key, env_overwrite=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, passwd=None, yamlname=None): # Passwd is in the clear so be careful\n if yamlname is None:\n utilities.log.error('No json configuration was provided for iRODS: Abort')\n sys.exit(1)\n if passwd is None:\n utilities.log.error('No passwd was provided for iRODS: Abort')\n sys.exit(1)\n self.config=utilities.load_config(yamlname)\n self.passwd = passwd", "def from_file(cls, filename, **kwargs):\n return super(Credentials, cls).from_file(filename, **kwargs)", "def __init__(self, username=None, password=None, file=None, token=None):\n FILE_NAME = 'client_secrets.json'\n \n self.response = None\n self.keycloak_openid = KeycloakOpenID(\n server_url=self.SERVER_URL,\n client_id=self.CLIENT_ID,\n realm_name=self.REALM_NAME,\n )\n # Get WellKnow\n self.config_well_know = self.keycloak_openid.well_know()\n #check file\n if file is not None:\n if Path(file).is_dir():\n file += \"client_secrets.json\"\n if not str(file).endswith('.json'):\n file += '.json'\n else:\n file = Path(qc_qubosolv.__file__.rsplit('/', 1)[0] + '/' + FILE_NAME)\n #check username and password\n if username is not None and password is not None:\n if Path(file).exists():\n with open(file) as config_file:\n data = json.load(config_file)\n data['username'] = username\n data['password'] = password\n with open(file, 'w') as config_file:\n json.dump(data, config_file, indent = 4)\n else:\n with open(Path(file), 'w') as config_file:\n account = {\n 'username': username,\n 'password': password\n }\n json.dump(account, config_file, indent = 4)\n elif username is None and password is None:\n if not Path(file).exists():\n raise ValueError(\"File does not exist and username/password is not given\")\n else:\n pass\n elif username is None or password is None:\n raise ValueError(\"Only Username or Password is given, but both are needed\")\n \n # load token or username/password\n if token is not None:\n self.configuration = qc_qubosolv_api.Configuration()\n self.configuration.access_token = token\n else:\n with open(Path(file)) as config_file:\n data = json.load(config_file)\n try:\n response_token = self.keycloak_openid.token(\n data['username'],\n data['password']\n )\n except KeyError:\n raise ValueError(\n \"username and password is not saved, therefore token \"\n \"cannot be generated or token is not given\"\n )\n self.configuration = qc_qubosolv_api.Configuration()\n self.configuration.access_token = response_token[\"access_token\"]", "def __init__(self, user, password, domain, project, auth_url,\n key_file=None, cert_file=None):\n self.user = user\n self.password = password\n self.domain = domain\n self.project = project\n self.auth_url = auth_url\n self.key_file = key_file\n self.cert_file = cert_file\n self.auth_token = None\n self.catalog = None\n\n # Authenticate to keystone and save the token for future requests\n res = self._authenticate()\n if res['status'] == 201:\n self.auth_token = res['headers']['x-subject-token']\n self.catalog = self._extract_catalog(json.loads(res['body']))\n else:\n LOG.error('Keystone authentication failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def load_creds(self):\n home = expanduser(\"~\")\n with open(os.path.join(home, 'creds.json')) as creds_file:\n self.creds_data = json.load(creds_file)", "def __init__(self, openrc_file=None, password=None, no_env=False, interactive=False, use_admin=False):\n self.creds = {}\n self.api_version = 2\n # List of accepted keys for Keystone version 2 and 3\n self.auth_keys = {\n 2: ['auth_url', 'username', 'password', 'token', 'user_id', 'trust_id', 'tenant_id', 'tenant_name'],\n 3: ['auth_url', 'username', 'password', 'token', 'token_id', 'user_id', 'user_domain_id',\n 'user_domain_name', 'trust_id', 'domain_id', 'domain_name', 'project_id', 'project_name',\n 'project_domain_id', 'project_domain_name']\n }\n\n # Make sure we have something to load from\n if not openrc_file and no_env:\n raise CredError('No OpenRC file specified and no environment flag set. No credentials to load')\n\n # Load in OpenRC file\n if openrc_file:\n if not os.path.isfile(openrc_file):\n raise CredError('OpenRC file %s not found' % openrc_file)\n self.loadrc(openrc_file)\n\n # Load in environment if no_env is False\n if not no_env:\n self.loadenv()\n\n # Set password if specified\n if password:\n if 'username' in self.creds:\n self.creds['password'] = password\n else:\n self.creds['token'] = password\n\n # Check for required credentials\n if 'auth_url' not in self.creds:\n raise CredError('OS_AUTH_URL is missing from OpenRC file and environment')\n\n # Check for project if admin mode is disabled\n if not use_admin:\n found = False\n for name in ['tenant_name', 'tenant_id', 'project_name', 'project_id']:\n if name in self.creds:\n found = True\n if not found:\n raise CredError('Project information is missing from OpenRC file and environment')\n\n # Warn if no region_name\n if 'region_name' not in self.creds:\n logging.warning('OS_REGION_NAME is missing from OpenRC file and environment. May cause issues')\n self.creds['region_name'] = None\n\n # Password is used when there is a username, otherwise it needs a token\n auth_type = 'password'\n if 'username' not in self.creds:\n auth_type = 'token'\n\n if auth_type not in self.creds:\n # Fail out if interactive is false\n if not interactive:\n raise CredError('OS_PASSWORD and OS_TOKEN missing from OpenRC file and environment')\n # Ask user for password / token if we don't have one\n password = ''\n while len(password) == 0:\n ask_str = 'Enter your OpenStack %s for %s on region %s: ' % (auth_type,\n self.creds['auth_url'],\n self.creds['region_name'])\n password = getpass.getpass(ask_str)\n self.creds[auth_type] = password\n\n # Set API version to 3 if needed\n if self.creds['auth_url'][-2:] == 'v3':\n self.api_version = 3", "def __init__(self):\n\n # open json config file that reads in information\n config_path = open(\"config.json\", \"r\")\n config_json = config_path.read()\n config_dict = json.loads(config_json)\n\n # assign object variables\n self.project_id = config_dict[\"project-id\"]\n self.bucket_name = config_dict[\"bucket-name\"]\n self.location_id = config_dict[\"key-location\"]\n self.key_ring_id = config_dict[\"key-ring-id\"]\n self.crypto_key_id = config_dict[\"crypto-key-id\"]\n self.service_account_email = config_dict[\"service-account-email\"]\n\n # close the file\n config_path.close()", "def __init__(self, config: Union[str, Path, TextIOWrapper] = None):\n if not isinstance(config, TextIOWrapper):\n config = Path(config) if config else Path(self._DEFAULT_LOCATION)\n config = config.expanduser().absolute()\n with open(config, 'r') as fp:\n self._config = json.load(fp)\n else:\n self._config = json.load(config)\n self._store = self._config.get('credsStore', None)\n if self._store not in self._SUPPORTED_STORES:\n raise UnsupportedStore(f'Credential store \"{self._store}\" not supported')\n # TODO: Support the other methods besides secretservice when we can actually test with them\n self._cmd = ['docker-credential-secretservice', 'get']", "def __init__(self):\n with open('config.json', encoding='UTF-8') as json_data_file:\n self.config = json.load(json_data_file)\n self._get_credential()\n self.file_tree = [{}] * 100", "def __init__(self, creds_file):\n self.creds_file = creds_file\n self.service = None\n self.creds = None\n self.courses = None\n self.scopes = None\n self.client_id = None\n self.client_secret = None\n self.hostname = None", "def __init__(self, username, password):\n self.username = username\n self.password = password\n self.privkey = None\n\n # sets self.privkey\n self.__set_or_create_key_if_not_exist()", "def _get_credentials(self):\n if self.config_file:\n with open(self.config_file) as f:\n config_str = f.read()\n credentials_dict = json.loads(config_str)\n self.credentials = credentials_dict[self.account][self.auth_type]\n else:\n self.credentials = {\n \"account\": os.environ.get('SNOWSQL_ACCOUNT'),\n \"user\": os.environ.get('SNOWSQL_USER'),\n \"password\": os.environ.get('SNOWSQL_PWD')\n }", "def from_json_file(cls, filename: str):\n # Import standard modules\n from json import load\n\n with open(filename) as file_obeject:\n credentials = load(file_obeject)\n\n key = credentials.get('API_KEY')\n secret = credentials.get('API_SECRET')\n url = credentials.get('URL')\n\n if not all([key, secret]):\n err = (\n '`API_KEY` and `API_SECRET` are mandatory attributes.\\n'\n 'Please make sure they are contained in your `.json` file'\n )\n KeyError(err)\n\n return cls(key, secret, url)", "def __init__(self):\n try:\n with open(os.path.expanduser(\"~/.dkeyrc\"), 'r') as f:\n self.__cfgdata = json.load(f)\n except Exception as e:\n print(\"Error: Unable to load config JSON at ~/.dkeyrc -- %s\" % (e))\n sys.exit(1)", "def _authenticate_from_file(self, credentials):\n self._gauth.LoadCredentialsFile(credentials)", "def __init__(self, credentials=None, certificate=None):\n\n if credentials is not None:\n self.credentials = credentials\n if certificate is not None:\n self.certificate = certificate", "def load_credentials(self, credentials_file):\n credentials = ET.parse(credentials_file)\n self.db_host = credentials.find('db_host').text\n self.db_port = credentials.find('db_port').text\n if self.db_port is not None:\n self.db_port = int(self.db_port)\n self.db_user = credentials.find('db_user').text\n self.db_name = credentials.find('db_name').text\n self.db_password = credentials.find('db_password').text", "def __init__(self, credentials_name,credentials_user_name,fname, lname,email, credentials_site, credentials_password):\n self.credentials_name = credentials_name\n self.credentials_user_name = credentials_user_name\n self.fname = fname\n self.lname = lname\n self.email = email\n self.credentials_site = credentials_site\n self.credentials_password = credentials_password", "def __init__(self, name=None):\n\n conf = Config()[\"cloudmesh\"]\n super().__init__(name)\n\n self.user = Config()[\"cloudmesh\"][\"profile\"][\"user\"]\n self.spec = conf[\"cloud\"][name]\n self.cloud = name\n\n self.default = self.spec[\"default\"]\n self.cloudtype = self.spec[\"cm\"][\"kind\"]\n\n self.cred = self.spec[\"credentials\"]\n self.default = self.spec[\"default\"]\n self.project_id = self.cred[\"auth\"][\"project_id\"]\n\n # pprint(self.cred)\n\n self.cloudman = openstack.connection.Connection(**self.cred)\n\n # self.default_image = deft[\"image\"]\n # self.default_size = deft[\"size\"]\n # self.default.location = cred[\"datacenter\"]\n\n try:\n self.public_key_path = conf[\"profile\"][\"publickey\"]\n self.key_path = path_expand(\n Config()[\"cloudmesh\"][\"profile\"][\"publickey\"])\n f = open(self.key_path, 'r')\n self.key_val = f.read()\n except:\n raise ValueError(\"the public key location is not set in the \"\n \"profile of the yaml file.\")", "def __init__(self, **kwargs):\n self._username = kwargs.get('username', current_app.config.get('WORDAI_API_EMAIL', None))\n self._password = kwargs.get('password', current_app.config.get('WORDAI_API_PASSWORD', None))\n self._hash = kwargs.get('hash', current_app.config.get('WORDAI_API_KEY', None))", "def __init__(self,\n container_path: str,\n local_path : str,\n credentials: Dict[str, Any] = None):\n self._container_path = container_path\n self._local_path = local_path\n self._credentials = credentials", "def __init__(self, auth_key, auth_secret):\n\n self._auth_key = auth_key\n self._auth_secret = auth_secret", "def load(cred_path: Path):\n logger.debug(f\"Retrieving credentials from {cred_path}\")\n if not cred_path.exists():\n raise Exception(f\"Authentication file {cred_path} does not exist.\")\n\n with open(cred_path, mode=\"r\", encoding=\"utf-8\") as cred:\n content = yaml.full_load(cred)\n return AzureCredentials(\n endpoint=content.get(\"endpoint\"), key=content.get(\"key\"),\n )", "def __init__(self,\n account,\n config_file='',\n auth_type='basic',\n connector_type='snowflake_connector'\n ):\n self.account = account\n self.config_file = config_file\n self.auth_type = auth_type\n self.connector_type = connector_type\n self._get_credentials()\n self._set_connection()", "def __init__(self, config_file_name=\"config.json\"):\n with open(config_file_name, \"r\") as config:\n f = dict(json.load(config))\n for key, value in f.items():\n setattr(self, key, value)", "def __init__(self, account_name, user_name, password):\n self.account_name = account_name\n self.user_name = user_name\n self.password = password", "def __init__(\n self,\n creds=None,\n credential_path=\"\",\n credential_scopes=[\"https://www.googleapis.com/auth/drive\"],\n token_prefix=\"GoogleDrive_\",\n token_suffix=\"\",\n ):\n if creds is not None and self.credential_validation(creds):\n self.creds = creds\n else:\n self.creds = self.credential(\n credential_path, credential_scopes, token_prefix, token_suffix\n )", "def setUp(self):\n self.credentials = {\n \"username\": \"BobRobert\",\n \"first_name\": \"Bob\",\n \"last_name\": \"Robert\",\n \"email\": \"[email protected]\",\n \"password\": \"fglZfYmr%?,\",\n }", "def __init__(self, username, password):\n\n self._username = username\n self._password = password", "def load_credentials(cred=\"credentials_prod.json\"):\n if isinstance(cred, dict):\n # Easy way to handle if a function was handed valid credentials\n pass\n elif isinstance(cred, str):\n with open(cred, 'r') as f:\n cred = json.load(f)\n else:\n raise ValueError(\"Invalid input cred={0}\".format(cred))\n\n # Check for correct entries\n cred_keys = [ \"access_token\", \"expires_in\", \"refresh_token\", \"scope\", \"token_type\"]\n for k in cred_keys:\n if k not in cred:\n raise ValueError(\"Credentials missing key {0}\".format(k))\n return cred", "def __init__(self, config_file):\n with open(config_file, 'r') as file:\n self.config = json.load(file)\n self.set_config(self.config)", "def __init__(self, account, usernames, passwords):\n self.account = account\n self.usernames = usernames\n self.passwords = passwords", "def __init__(__self__, *,\n credentials: pulumi.Input['ContainerRegistryBasicCredentialsArgs']):\n pulumi.set(__self__, \"credentials\", credentials)", "def _load_credentials(creds_file=None):\n\n creds = None\n\n # Validate the credentials file\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit('Could not find a credentials.json file. ' \\\n 'Either pass one as argument or make sure credentials.json exists in ' \\\n 'the current directory or ' + expanduser('~'))\n\n # Creates CACHE_DIR if it does not exist\n # mode 0x777 (the default) is used because the system's umask value is masked out first\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first time.\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES)\n creds = flow.run_local_server(port=0)\n\n # Save the credentials for the next run\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n\n return creds", "def __init__(self, os_creds, keypair_settings):\n super(self.__class__, self).__init__(os_creds)\n\n self.keypair_settings = keypair_settings\n self.__delete_keys_on_clean = True\n\n # Attributes instantiated on create()\n self.__keypair = None", "def __init__(self):\n #print (\"Object created\")\n self.apikey='acc_4fc1a435b3188b5'\n self.secret = 'f49c4be14a048d5de7e7f6c564b52022'\n self.fileToIdMap = {}", "def __init__(self, name=None):\n self.name = name or \"default\"\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\".format(config_path))", "def __init__(self, config_file_name=\"config.json\"):\n self.config_file_name = config_file_name\n self._config = self._open_config_file()", "def __init__(self, file_name, host, username=None, password=None):\n self._file_name = file_name\n self._host = host\n self._username = username\n self._password=password", "def __init__(self):\n # Read configuration into dictionary\n self.directories = general.config_directories()\n self.config = general.read_yaml_files(self.directories)", "def load(cls):\n cls._api_key = \"\"\n cls._token = \"\"\n data = None\n\n try:\n data = literal_eval(cls.config_file.read_text())\n cls._api_key = data[\"key\"]\n cls._token = data[\"token\"]\n except Exception:\n pass\n\n return data", "def __init__(self):\n self.charm_config = hookenv.config()\n self.kv = unitdata.kv()\n if not self.synapse_signing_key_file:\n self.synapse_signing_key_file = \"{}/{}.signing.key\".format(\n self.synapse_conf_dir, self.get_server_name()\n )", "def __init__(self, username, password):\n\n self.username = username\n self.password = password", "def __init__(self, rds_manager_path, os_path=False):\n self.key = hashlib.md5('admin_pass'.encode()).digest()\n if os_path:\n # encrypted file, MUST be stored in repository\n self.passfile_enc = os.path.join(\n rds_manager_path, 'credentials.txt.enc'\n )\n # decrypted file\n self.passfile_dec = os.path.join(\n rds_manager_path, 'credentials.txt'\n )\n else:\n # encrypted file, MUST be stored in repository\n self.passfile_enc = '{}/{}'.format(\n rds_manager_path, 'credentials.txt.enc'\n )\n # decrypted file\n self.passfile_dec = '{}/{}'.format(\n rds_manager_path, 'credentials.txt'\n )", "def __init__(self, environment):\n with open('config.json') as f:\n self.config = eval(f.read())\n self.config = self.config[environment]", "def __init__(self):\r\n self.load_config()\r\n self.login()", "def _load_key(self, path):\n with open(path, 'r') as f:\n self._key = f.readline().strip()\n self._secret = f.readline().strip()", "def __init__(__self__, *,\n password: str,\n username: str):\n pulumi.set(__self__, \"password\", password)\n pulumi.set(__self__, \"username\", username)", "def __init__(self, config_file):\n defaults = {'kmsauth_user_key': None, 'logging_level_option': 'INFO'}\n ConfigParser.RawConfigParser.__init__(self, defaults=defaults)\n self.read(config_file)\n\n if not self.has_section(SECTION):\n raise ValueError(\n \"Missing {0} configuration section.\".format(SECTION)\n )\n\n for option in ['kmsauth_key', 'kmsauth_to_context']:\n if not self.has_option(SECTION, option):\n raise ValueError(\"{0} not set.\".format(option))", "def _load_credentials(self, path):\r\n with open(path) as file:\r\n raw_credentials = json.load(file)[\"credentials\"]\r\n self._credentials.clear()\r\n for r in raw_credentials: # each app\r\n self._credentials.append(Credentials(\r\n consumer_key=r[\"consumerKey\"],\r\n consumer_secret=r[\"consumerSecret\"],\r\n access_token=r[\"accessToken\"],\r\n access_secret=r[\"accessSecret\"]))", "def __init__(\n self,\n audience,\n subject_token_type,\n token_url,\n credential_source,\n *args,\n **kwargs\n ):\n\n self.interactive = kwargs.pop(\"interactive\", False)\n super(Credentials, self).__init__(\n audience=audience,\n subject_token_type=subject_token_type,\n token_url=token_url,\n credential_source=credential_source,\n *args,\n **kwargs\n )\n if not isinstance(credential_source, Mapping):\n self._credential_source_executable = None\n raise exceptions.MalformedError(\n \"Missing credential_source. The credential_source is not a dict.\"\n )\n self._credential_source_executable = credential_source.get(\"executable\")\n if not self._credential_source_executable:\n raise exceptions.MalformedError(\n \"Missing credential_source. An 'executable' must be provided.\"\n )\n self._credential_source_executable_command = self._credential_source_executable.get(\n \"command\"\n )\n self._credential_source_executable_timeout_millis = self._credential_source_executable.get(\n \"timeout_millis\"\n )\n self._credential_source_executable_interactive_timeout_millis = self._credential_source_executable.get(\n \"interactive_timeout_millis\"\n )\n self._credential_source_executable_output_file = self._credential_source_executable.get(\n \"output_file\"\n )\n\n # Dummy value. This variable is only used via injection, not exposed to ctor\n self._tokeninfo_username = \"\"\n\n if not self._credential_source_executable_command:\n raise exceptions.MalformedError(\n \"Missing command field. Executable command must be provided.\"\n )\n if not self._credential_source_executable_timeout_millis:\n self._credential_source_executable_timeout_millis = (\n EXECUTABLE_TIMEOUT_MILLIS_DEFAULT\n )\n elif (\n self._credential_source_executable_timeout_millis\n < EXECUTABLE_TIMEOUT_MILLIS_LOWER_BOUND\n or self._credential_source_executable_timeout_millis\n > EXECUTABLE_TIMEOUT_MILLIS_UPPER_BOUND\n ):\n raise exceptions.InvalidValue(\"Timeout must be between 5 and 120 seconds.\")\n\n if self._credential_source_executable_interactive_timeout_millis:\n if (\n self._credential_source_executable_interactive_timeout_millis\n < EXECUTABLE_INTERACTIVE_TIMEOUT_MILLIS_LOWER_BOUND\n or self._credential_source_executable_interactive_timeout_millis\n > EXECUTABLE_INTERACTIVE_TIMEOUT_MILLIS_UPPER_BOUND\n ):\n raise exceptions.InvalidValue(\n \"Interactive timeout must be between 30 seconds and 30 minutes.\"\n )", "def __init__(self):\n fd = open(\"conf/redis_config.json\", \"r\")\n tmp = fd.read()\n data = json.loads(tmp)\n self.database = redis.StrictRedis(\n host=data[\"host\"], \n port=data[\"port\"], \n password=None,\n decode_responses=True\n )\n self.key = data[\"key\"]", "def __init__(self,account_name, username, password):\n self.account_name = account_name\n self.username = username\n self.password = password", "def __init__(self, pw_file, provision=False):\r\n self.filename = pw_file\r\n self.scanner = re.compile(_RE)\r\n pass_re = re.compile(_PASS_RE)\r\n self.pass_validator = lambda x: True if pass_re.match(x) else False\r\n\r\n # Run some basic tests to check if the settings file is valid\r\n if self.filename is None:\r\n print('Settings variable PASSWORD_FILE not set')\r\n exit()\r\n\r\n if not os.access(os.path.dirname(self.filename), os.W_OK):\r\n print('The user lacks privileges to access/modify '\r\n 'the password file.')\r\n exit()\r\n\r\n if not provision:\r\n if not os.path.exists(self.filename):\r\n print('Credential file missing please run the provision script '\r\n 'first.')\r\n exit()", "def __init__(self, config_file='/etc/sfa/ldap_config.py'):\n\n try:\n execfile(config_file, self.__dict__)\n\n self.config_file = config_file\n # path to configuration data\n self.config_path = os.path.dirname(config_file)\n except IOError:\n raise IOError, \"Could not find or load the configuration file: %s\" \\\n % config_file", "def __get_credentials_from_config(self):\n cr = ConfigFileReader()\n\n self.username = cr.get_value(Config.EDUROAM_USER)\n debug(\"Username set to : \" + self.username)\n self.password = cr.get_value(Config.EDUROAM_PWD)", "def set_credentials():", "def init_auth_client(self):\n with open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n token = util.prompt_for_user_token(\n cfg['username'],\n scope=cfg['scope'],\n client_id=cfg['spotipy_client_id'],\n client_secret=cfg['spotipy_client_secret'],\n redirect_uri=cfg['spotipy_redirect_uri'])\n sp = spotipy.Spotify(auth=token)\n return sp, cfg['username']", "def __init__(self, **options):\n\n super().__init__(**options)\n\n self._private_key = None\n self._public_key = None\n\n self._load_keys(**options)", "def __init__(self):\n\n self._authorization = None\n self._last_used = datetime.utcnow() - timedelta(hours=10)\n\n self._resource_owner_key = None\n self._resource_owner_secret = None\n\n self._consumer_key = etrade_config.oauth_consumer_key\n self._consumer_secret = etrade_config.oath_consumer_secret\n\n self._auth_file_path = etrade_config.auth_file_path\n self._user_name = etrade_config.user_name\n self._user_pwd = etrade_config.user_pwd", "def __init__(self, *args, **kwargs):\n psk_file = kwargs.get('psk_file', PSK_FILE)\n self.psk = self._load_psk(psk_file)", "def test_init_json(self, mock_creds):\n pk = \"pk\"\n email = \"email\"\n file_data = '{\"private_key\": \"%s\", \"client_email\": \"%s\"}' % (pk, email)\n\n file_mock = mock.mock_open(read_data=file_data)\n with mock.patch.object(moves.builtins, 'open', file_mock):\n credentials.Credentials('key.json')\n mock_creds.assert_called_once_with(email, pk, mock.ANY)", "def __init__(self, username=None, password=None, apitoken=None):\n self.__credentials = None\n self.__headers = {}\n if apitoken:\n self.authenticate_by_token(apitoken)\n if username and password:\n self.authenticate(username, password)", "def _load_config(self, config_data):\r\n # Username and password are required.\r\n\r\n for item in [self.CONFIG_NAME_USER, self.CONFIG_NAME_KEY]:\r\n if item not in config_data:\r\n raise ValueError('Error: Missing %s in configuration.' % item)\r\n\r\n configuration = Configuration()\r\n\r\n # Add to the final configuration (required)\r\n\r\n configuration.username = config_data[self.CONFIG_NAME_USER]\r\n configuration.password = config_data[self.CONFIG_NAME_KEY]\r\n\r\n # Proxy\r\n\r\n if self.CONFIG_NAME_PROXY in config_data:\r\n configuration.proxy = config_data[self.CONFIG_NAME_PROXY]\r\n\r\n # Cert File\r\n\r\n if self.CONFIG_NAME_SSL_CA_CERT in config_data:\r\n configuration.ssl_ca_cert = config_data[\r\n self.CONFIG_NAME_SSL_CA_CERT]\r\n\r\n # Keep config values with class instance, and load api client!\r\n\r\n self.config_values = config_data\r\n\r\n try:\r\n self.api_client = ApiClient(configuration)\r\n\r\n except Exception as error:\r\n\r\n if 'Proxy' in type(error).__name__:\r\n raise ValueError(\r\n 'The specified proxy ' +\r\n config_data[self.CONFIG_NAME_PROXY] +\r\n ' is not valid, please check your proxy settings')\r\n else:\r\n raise ValueError(\r\n 'Unauthorized: you must download an API key or export '\r\n 'credentials to the environment. Please see\\n ' +\r\n 'https://github.com/Kaggle/kaggle-api#api-credentials ' +\r\n 'for instructions.')", "def __init__(self, url, username, password):\n if url[-1] == \"/\":\n self._url = url[:-1]\n else:\n self._url = url\n\n params = 'username=%s&password=%s' % (username, password)\n r = requests.post(urljoin(self._url, Client._auth_resource) + params)\n \n if r.status_code != requests.status_codes.codes.ok:\n raise ClientAuthenticationFailed()\n\n try:\n data = r.json()\n token_id = data['tokenId']\n except Exception, e:\n raise ClientException(r.status_code, \n \"Some error has ocurred getting the token value from %s\" % r.text)\n\n self._token_id = token_id\n self._username = username", "def load_from_env(cls, config):\n try:\n base64_config = config.decode('base64')\n cls.providers = yaml.safe_load(base64_config)\n return cls\n except binascii.Error:\n cls.providers = yaml.safe_load(config)\n return cls\n except yaml.YAMLError:\n raise ImproperlyConfigured('PROVIDER_CONFIG must be valid YAML, and Base64-encoded or plaintext')", "def __init__(self, access_key, secret_key, **kwargs):\r\n pass", "def __init__(self, api_url, credentials):\n self.api_url = api_url\n while self.api_url.endswith(\"/\"):\n self.api_url = self.api_url[:-1]\n self.credentials = credentials\n self.username = credentials.username\n self.token = credentials.token", "def __init__(self, bot, name, default_settings=None):\n if default_settings is None:\n default_settings = {}\n self.bot = bot\n self.name = name\n self.default_settings = default_settings\n\n # set up storage for settings and load from persistent file\n self.settings_path = pathlib.Path(\".settings\", f\"{self.name}.yml\")\n self.id_dict = load_persistent_settings(self.settings_path)", "def __init__(self, account, user_username, user_password):\n self. account = account\n self. user_username = user_username\n self.user_password = user_password", "def __init__(self, file_handle):\n config = ConfigParser.ConfigParser()\n config.readfp(file_handle)\n self.database_address_ = config.get('General', 'database_address')\n self.google_developer_key_ = config.get('Google', 'developer_key')\n self.google_cref_ = config.get('Google', 'cref')", "def _load_credentials(self, datasource):\n\n self.credentials = datasource.credentials # Access the credentials\n\n # If there are credentials then make the api call\n if self.credentials:\n self.credentials = yaml.load(self.credentials)\n if self._validate_credentials():\n return self.credentials[\"client_id\"], self.credentials[\"client_secret\"]\n\n raise InvalidOrMissingCredentials(\"client_id and client_secret are missing or invalid\")", "def __init__(self, key_id: str, user: str, password: str):\n\n self.key_id = key_id\n self.user = user\n self.password = password\n self.con_strategy = \"unknown\"\n self.session = requests.Session()\n self.session.auth = (user, password)\n self.__fields = None\n if self.key_id == \"localhost\":\n self.local_ip_list = \"127.0.0.1\"\n self.local_ip = \"127.0.0.1\"\n self.port = \"52199\"\n self.con_strategy = \"local\"", "def __init__(self, token=None, token_path=\"tokens.txt\", username=None, password=None,\n grant_type=\"api-password\", client_id=\"brandwatch-api-client\",\n api_url=\"https://api.brandwatch.com/\"):\n self.api_url = api_url\n self.oauthpath = \"oauth/token\"\n\n if token:\n self._update_by_test_auth(username, token)\n self._write_auth(token_path)\n elif username is not None and password is not None:\n self._update_by_auth(username, password, token_path, grant_type, client_id)\n self._write_auth(token_path)\n elif username is not None:\n self._read_auth(username, token_path)\n else:\n raise KeyError(\"Must provide valid token, username and password,\"\n \" or username and path to token file\")", "def init():\n file_name = 'config.json'\n home_directory_path = str(Path.home())\n config_file_directory = home_directory_path+\"/.config/files/\"\n full_path = config_file_directory + file_name\n\n if os.path.isfile(full_path) and os.access(full_path, os.R_OK): # Readable Config file exists and is valid\n try:\n with open(full_path) as file:\n json_file = json.load(file)\n load_json_and_arguments(json_file)\n\n except ValueError as exception:\n raise ValueError(\"Invalid JSON configuration file\")\n\n elif not os.path.isfile(full_path): # Config file doesn't exist yet, create it\n\n if not os.path.exists(config_file_directory): # Make the directory if that doesn't exist as well\n os.makedirs(config_file_directory)\n\n get_account_info(full_path)\n\n else:\n raise IOError(\"Config file: \" + full_path + \" not accessible\")", "def __init__(self):\n self.base_url = credentials.api['base_url']\n self.config_path = credentials.api['config_path']\n self.group_base = credentials.api['group_base']\n self.session = requests.session()\n self.uidaruba = self.login()", "def __init__(self, base='', *path_parts):\n self._config = {}\n self.path = join(base, *path_parts)\n\n if not isfile(self.path):\n raise ImproperlyConfigured('Not a file')\n\n with open(self.path, 'r') as secret_file:\n content = secret_file.read()\n\n for line in content.splitlines():\n if line and not line.startswith('#'):\n line_parts = line.split('=', 1)\n self._config[line_parts[0]] = line_parts[1]", "def __init__(self, vault_file, vault_pass):\n if not exists(expanduser(vault_file)):\n raise Exception(f\"No such file: {vault_file}\")\n if not isinstance(vault_pass, bytes):\n raise Exception(\"Vault pass must be instance of `bytes`\")\n\n self.vault_file = vault_file\n self.vault_pass = vault_pass", "def __init__(self, conf_file_location: str, template_dir: str, target_dir: str, hard_reset: bool):\n self.config: Config = yaml_loader.load(conf_file_location, Config)\n self.massage_config_file()\n self.config_dict: Dict = as_dict(self.config)\n self.template_dir = template_dir\n self.target_dir = target_dir\n self.hard_reset = hard_reset", "def __init__(self,account,username, password):\n self.user_name = username\n self.password = password\n self.account = account", "def __init__(self,username, password):\n self.username = username\n self.password = password", "def __init__(self, settings):\n self._read_config(settings)", "def __init__(self, name, email, username, password):\n self.name = name\n self.email = email\n self.username = username\n # I don't trust people to always give me hashed passwords\n hash_pass = md5.new(password + config.get('security', 'salt')).digest()\n self.password = hash_pass", "def __init__(self, authenticator, username, password):\n super(ScriptAuthorizer, self).__init__(authenticator)\n self._username = username\n self._password = password", "def _set_credentials(args):\n if hasattr(args, 'username') and hasattr(args, 'apikey') \\\n and args.username and args.apikey:\n config.update({'username': args.username})\n config.update({'apikey': args.apikey})\n elif os.path.exists(os.path.expanduser('~/.jarvice.cfg')):\n CParser = configparser.ConfigParser()\n CParser.read([os.path.expanduser('~/.jarvice.cfg'), ])\n config.update({'username': CParser.get('auth', 'username')})\n config.update({'apikey': CParser.get('auth', 'apikey')})\n else:\n sys.stderr.write(\"username and apikey must be passed as arguments \" \n \"or set in ~/.jarvice.cfg\")\n sys.exit(1)", "def __init__(self, configFile=\"oauth2.ini\", additionalSection=None):\n self.configFile = configFile\n self.additionalSection = additionalSection\n self.__initConfiguration() # init self.conf, self.orgConf\n self.__initCacheSection() # init self.cacheSection\n self.accessToken = None\n self.refreshToken = None\n self.__loadCacheTokens()", "def authenticate(self):\r\n\r\n config_data = {}\r\n\r\n # Step 1: try getting username/password from environment\r\n config_data = self.read_config_environment(config_data)\r\n\r\n # Step 2: if credentials were not in env read in configuration file\r\n if self.CONFIG_NAME_USER not in config_data \\\r\n or self.CONFIG_NAME_KEY not in config_data:\r\n if os.path.exists(self.config):\r\n config_data = self.read_config_file(config_data)\r\n else:\r\n raise IOError('Could not find {}. Make sure it\\'s located in'\r\n ' {}. Or use the environment method.'.format(\r\n self.config_file, self.config_dir))\r\n\r\n # Step 3: load into configuration!\r\n self._load_config(config_data)", "def __init__(self, host, access_key, secret_key):\n self._host = host\n self._access_key = access_key\n self._secret_key = secret_key", "def __init__(self, host, access_key, secret_key):\n self._host = host\n self._access_key = access_key\n self._secret_key = secret_key", "def __init__(self, host, access_key, secret_key):\n self._host = host\n self._access_key = access_key\n self._secret_key = secret_key", "def load(cls, path, password=None):\n with open(path) as f:\n keystore = json.load(f)\n if not keys.check_keystore_json(keystore):\n raise ValueError('Invalid keystore file')\n return Account(keystore, password, path=path)", "def setUp(self):\n # instantiate an object by populating with dummy values.\n self.new_credential = Credentials(\"MySpace\", \"Ghostke99\", \"daimaMkenya001\")", "def __init__(self, my_data, my_auth):\n self.user = my_auth.user\n self.password = my_auth.password\n self.my_data = my_data", "def __init__(self, username, file_path):\r\n self.username = username\r\n self.file_path = file_path", "def __init__(self, **kwds):\n self.system=self.username=self.password=\"\"\n if kwds.has_key(\"system\"):\n self.system=kwds[\"system\"]\n if kwds.has_key(\"username\"):\n self.username=kwds[\"username\"]\n if kwds.has_key(\"password\"):\n self.password=kwds[\"password\"]\n if kwds.has_key(\"element\"):\n self.fromElement(kwds[\"element\"])", "def __init__(self, yaml_dict):\n self._params = self._get_params_from_yaml_dict(yaml_dict)", "def __init__(self,account,username, password):\n self.account = account\n self.username = username\n self.password = password", "def __init__(__self__, *,\n db_name: pulumi.Input[str],\n resource_group: pulumi.Input[str],\n roles: pulumi.Input[Sequence[pulumi.Input[str]]],\n server: pulumi.Input[str],\n admin_secret: Optional[pulumi.Input[str]] = None,\n admin_secret_key_vault: Optional[pulumi.Input[str]] = None,\n key_vault_to_store_secrets: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"db_name\", db_name)\n pulumi.set(__self__, \"resource_group\", resource_group)\n pulumi.set(__self__, \"roles\", roles)\n pulumi.set(__self__, \"server\", server)\n if admin_secret is not None:\n pulumi.set(__self__, \"admin_secret\", admin_secret)\n if admin_secret_key_vault is not None:\n pulumi.set(__self__, \"admin_secret_key_vault\", admin_secret_key_vault)\n if key_vault_to_store_secrets is not None:\n pulumi.set(__self__, \"key_vault_to_store_secrets\", key_vault_to_store_secrets)\n if username is not None:\n pulumi.set(__self__, \"username\", username)", "def __init__(__self__, *,\n db_name: pulumi.Input[str],\n resource_group: pulumi.Input[str],\n roles: pulumi.Input[Sequence[pulumi.Input[str]]],\n server: pulumi.Input[str],\n admin_secret: Optional[pulumi.Input[str]] = None,\n admin_secret_key_vault: Optional[pulumi.Input[str]] = None,\n key_vault_to_store_secrets: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"db_name\", db_name)\n pulumi.set(__self__, \"resource_group\", resource_group)\n pulumi.set(__self__, \"roles\", roles)\n pulumi.set(__self__, \"server\", server)\n if admin_secret is not None:\n pulumi.set(__self__, \"admin_secret\", admin_secret)\n if admin_secret_key_vault is not None:\n pulumi.set(__self__, \"admin_secret_key_vault\", admin_secret_key_vault)\n if key_vault_to_store_secrets is not None:\n pulumi.set(__self__, \"key_vault_to_store_secrets\", key_vault_to_store_secrets)\n if username is not None:\n pulumi.set(__self__, \"username\", username)", "def __init__(self, file):\n self.__config = file\n with open(self.__config) as json_file:\n data = json.load(json_file)\n self.__data = data" ]
[ "0.7274843", "0.6939988", "0.69185185", "0.68165654", "0.6759032", "0.67555183", "0.6753559", "0.67516315", "0.6747947", "0.6732597", "0.6726737", "0.6692598", "0.6635135", "0.65862745", "0.6492167", "0.6491658", "0.64460856", "0.6417166", "0.64065987", "0.63984865", "0.6352587", "0.63439876", "0.6342038", "0.6324819", "0.6314338", "0.6312912", "0.6284323", "0.62835073", "0.6277517", "0.6272061", "0.6267762", "0.6258236", "0.6257071", "0.6256481", "0.62543553", "0.6252085", "0.62483007", "0.6244275", "0.62058496", "0.6180559", "0.6179659", "0.6178878", "0.61748165", "0.6174321", "0.61681813", "0.6166696", "0.615619", "0.61537963", "0.61492586", "0.61487854", "0.61214006", "0.6118412", "0.61126345", "0.6110398", "0.6106174", "0.6095888", "0.60938823", "0.60903424", "0.60879517", "0.6079556", "0.60690033", "0.6062587", "0.6059303", "0.6055922", "0.6037777", "0.6030285", "0.6025283", "0.6022477", "0.60206425", "0.60178965", "0.60110927", "0.60022616", "0.6001626", "0.5998099", "0.5995705", "0.5993675", "0.5993542", "0.5986326", "0.5978751", "0.5976168", "0.59733546", "0.59730315", "0.5970106", "0.59596634", "0.594256", "0.59418434", "0.5933204", "0.5926635", "0.5926635", "0.5926635", "0.5920596", "0.5917591", "0.5913006", "0.59129363", "0.5898219", "0.5895517", "0.5895295", "0.5887353", "0.5887353", "0.58869684" ]
0.728429
0
Get only the necessary fields of a tweet returns a Tweet with the needed attributes as a dict
def get_tweet_attributes(self, tw): tw_dict = {} tw_dict['created_at'] = tw['created_at'] tw_dict['lang'] = tw['lang'] tw_dict['text'] = tw['text'] tw_dict['entities'] = tw['entities'] tw_dict['favorite_count'] = tw['favorite_count'] tw_dict['retweet_count'] = tw['retweet_count'] tw_dict['user_followers_cnt'] = tw['user']['followers_count'] tw_dict['user_following_cnt'] = tw['user']['friends_count'] tw_dict['user_id'] = tw['user']['id_str'] return tw_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tweets_features(tweet):\n tweet = remove_stop_words(tweet)\n return {'TWEET': tweet}", "def extract_user_dict_from_tweet( tweet: Tweet ):\n if tweet.other_data and len( tweet.other_data ) > 0:\n # extract the json into a dict\n j = json.loads( tweet.other_data )\n # extract the user json from the created dict\n return json.loads( j[ 'user' ] )", "def process_tweet(tweet):\n d = {}\n d['hastags'] = [hashtag['text'] for hashtag in tweet['entities']['hashtags']]\n d['text'] = tweet['text']\n d['user'] = tweet['user']['screen_name']\n d['user_loc'] = tweet['user']['location']\n return d", "def format_tweet(tweet):\n user = tweet['user']\n return {\n 'tweet_id': tweet['id'],\n 'hashtag': HASHTAG,\n 'text': tweet['text'],\n 'created_at': tweet['created_at'],\n 'user': {\n 'user_id': user['id'],\n 'name': user['name'],\n 'handle': user['screen_name'],\n 'profile_image_url': user['profile_image_url'],\n 'profile_url': f\"https://twitter.com/{user['screen_name']}\"\n }\n }", "def extract_relevant(self):\n item_extraction = self.data\n my_dict = {'tweeted_time': item_extraction['created_at'],\n 'tweet_id': item_extraction['id'],\n # If the time comes when the below becomes more significant, it will be no trouble at all to make an\n # additional column for it, but delimiting it with a ` creates less clutter in the Database\n 'in_reply_to':\n \"NAME/\" + str(item_extraction['in_reply_to_screen_name']) + \"`\" +\n \"STATUSID/\" + str(item_extraction['in_reply_to_status_id_str']) + \"`\" +\n \"USERID/\" + str(item_extraction['in_reply_to_user_id_str']),\n 'lang': item_extraction['lang'],\n 'place': item_extraction['place'], 'source': item_extraction['source']}\n if item_extraction['place'] is not None:\n my_dict['place'] = item_extraction['place']['full_name']\n if 'retweeted_status' in item_extraction.keys():\n my_dict['original_author_id'] = item_extraction['retweeted_status']['user']['id']\n my_dict['original_author_handle'] = item_extraction['retweeted_status']['user']['screen_name']\n tester = item_extraction['retweeted_status']['text']\n cleaned = ' '.join(re.sub(\"(RT : )|(@[\\S]+)|(&\\S+)|(http\\S+)\", \" \", tester).split())\n removed_others = \" \".join(re.sub(\"(#\\S+)\", ' ', cleaned).split())\n final_text = ''.join(list(filter(lambda x: x.isalpha() or x is ' ', removed_others)))\n # This final text will make it a lot easier to run NLP\n final_text = final_text.strip().replace(' ', ' ').replace(' ', ' ')\n my_dict['plain_text'] = final_text.lower()\n my_dict['tweet'] = cleaned\n else:\n my_dict['original_author_id'] = item_extraction['user']['id']\n my_dict['original_author_handle'] = item_extraction['user']['screen_name']\n cleaned = ' '.join(re.sub(\"(@[\\S]+)|(&\\S+)|(http\\S+)\", \" \", item_extraction['text']).split())\n removed_others = \" \".join(re.sub(\"(#\\S+)\", ' ', cleaned).split())\n final_text = ''.join(list(filter(lambda x: x.isalpha() or x is ' ', removed_others)))\n final_text = final_text.strip().replace(' ', ' ').replace(' ', ' ')\n my_dict['plain_text'] = final_text.lower()\n my_dict['tweet'] = cleaned\n return my_dict", "def get_params():\n return {\"tweet.fields\": \"id,text,author_id,conversation_id,\"\n \"created_at,geo,in_reply_to_user_id,lang,\"\n \"public_metrics,source\"}", "def get_tweet_info(item):\n \n tweet_info = {\n 'tweet_id': item.id,\n 'name': item.user.name,\n 'screen_name': item.user.screen_name,\n 'retweet_count': item.retweet_count,\n 'text': item.full_text,\n 'info_pulled_at': datetime.datetime.now(),\n 'created_at': item.created_at,\n 'favourite_count': item.favorite_count,\n 'hashtags': item.entities['hashtags'],\n 'status_count': item.user.statuses_count,\n 'location': item.place,\n 'source_device': item.source\n }\n\n try:\n tweet_info['retweet_text'] = item.retweeted_status.full_text\n except:\n tweet_info['retweet_text'] = 'None'\n try:\n tweet_info['quote_text'] = item.quoted_status.full_text\n tweet_info['quote_screen_name'] = item.quoted_status.user.screen_name\n except:\n tweet_info['quote_text'] = 'None'\n tweet_info['quote_screen_name'] = 'None'\n\n return tweet_info", "def tweet_text(tweet):\n return tweet['text']", "def _map_status_fields(self, tweet):\n data = {\n # status\n \"date\": tweet.created_at.strftime('%Y-%m-%d %H:%M:%S'),\n \"id\": tweet.id_str,\n \"text\": tweet.text,\n \"truncated\": tweet.truncated,\n \"lang\": tweet.lang,\n # user\n \"user_id\": tweet.user.id_str,\n \"user_screen_name\": tweet.user.screen_name,\n \"user_verified\": tweet.user.verified,\n \"user_lang\": tweet.user.lang,\n # reply\n \"reply_to_id\": tweet.in_reply_to_status_id_str,\n # quote\n \"quoted_id\": None,\n \"quoted_text\": None,\n # retweet\n \"retweeted_id\": None,\n \"retweeted_text\": None\n }\n # full text\n try:\n data.update({\n \"text\": tweet.extended_tweet['full_text']\n })\n except AttributeError:\n pass\n # quote\n if hasattr(tweet, \"quoted_status\"):\n data.update({\"quoted_id\": tweet.quoted_status.id_str})\n try:\n data.update({\n \"quoted_text\":\n tweet.quoted_status.extended_tweet['full_text']\n })\n except AttributeError:\n data.update({\n \"quoted_text\":\n tweet.quoted_status.text\n })\n # retweet\n if hasattr(tweet, \"retweeted_status\"):\n data.update({\"retweeted_id\": tweet.retweeted_status.id_str})\n try:\n data.update({\n \"retweeted_text\":\n tweet.retweeted_status.extended_tweet['full_text']\n })\n except AttributeError:\n data.update({\n \"retweeted_text\":\n tweet.retweeted_status.text\n })\n data.update({\n \"tweet_url\":\n \"https://twitter.com/%s/status/%s\" %\n (tweet.user.screen_name, tweet.id_str)\n })\n return(data)", "def tweet_to_salmon_vars(self, tweet):\n # there might be more than one URL in the tweet. find the one on our domain.\n # https://dev.twitter.com/docs/tweet-entities\n link = None\n for url_data in tweet.get('entities', {}).get('urls', []):\n # expanded_url isn't always provided\n url = url_data.get('expanded_url') or url_data.get('url')\n if url and urlparse.urlparse(url).netloc == self.key().name():\n link = url\n\n # parse the timestamp, formatted e.g. 'Sun, 01 Jan 2012 11:44:57 +0000'\n created_at = tweet.get('created_at')\n if created_at:\n created_at = re.sub(' \\+[0-9]{4}$', '', created_at)\n updated = datetime.datetime.strptime(created_at,\n '%a, %d %b %Y %H:%M:%S')\n updated = updated.isoformat()\n else:\n updated = ''\n\n return {\n 'id': util.tag_uri(self.DOMAIN, str(tweet.get('id'))),\n 'author_name': tweet.get('from_user_name'),\n 'author_uri': 'acct:%[email protected]' % tweet.get('from_user'),\n 'in_reply_to': link,\n 'content': tweet.get('text'),\n 'title': tweet.get('text'),\n 'updated': updated,\n }", "def tweet2features(tweet):\r\n features = {\r\n 'len(tweet)': len(tweet),\r\n 'avg_word_length': get_avg_word_len(tweet)\r\n }\r\n return features", "def get_tweets():\r\n tweets = models.Tweet.query.all()\r\n output = []\r\n\r\n for tweet in tweets:\r\n tweet_data = {'id': tweet.id,\r\n 'content': tweet.text_content,\r\n 'username': tweet.username,\r\n 'timestamp': tweet.timestamp.isoformat(),\r\n 'likes_count': models.Like.query.filter(models.Like.post_id == tweet.id).count(),\r\n 'retweets_count': models.Retweet.query.filter(models.Retweet.post_id == tweet.id).count()}\r\n\r\n output.append(tweet_data)\r\n\r\n return {\"tweets\": output}", "def tweet(self):\n try: \n return self._parsed_tweet\n except:\n if self.item_json:\n self._parsed_tweet = json.loads(self.item_json)\n else:\n self._parsed_tweet = {}\n return self._parsed_tweet", "def tweet_time(tweet):\n return tweet['time']", "def extract_data(tweets):\n dbfile = open('Tweet_extracter\\countries.pkl', 'rb')\n countries = pickle.load(dbfile)\n n_tweets = {}\n for i, tweet in enumerate(tweets):\n n_tweets[i] = {}\n n_tweets[i][\"text\"] = tweet.full_text\n loc = tweet.user.location\n if location_handler(loc) and loc in countries:\n n_tweets[i][\"location\"] = tweet.user.location\n else:\n n_tweets[i][\"location\"] = \"None\"\n return n_tweets", "def fetch_tweets(self, screen_name, count):\n return {}", "def get_tweets(self):\r\n return self.tweets", "def flatten_tweets(tweets_json):\r\n tweets_list = []\r\n \r\n # Iterate through each tweet\r\n for tweet in tweets_json:\r\n tweet_obj = json.loads(tweet)\r\n \r\n # Store the user screen name in 'user-screen_name'\r\n tweet_obj['user-screen_name'] = tweet_obj['user']['screen_name']\r\n \r\n # Check if this is a 140+ character tweet\r\n if 'extended_tweet' in tweet_obj:\r\n # Store the extended tweet text in 'extended_tweet-full_text'\r\n tweet_obj['extended_tweet-full_text'] = tweet_obj['extended_tweet']['full_text']\r\n \r\n if 'retweeted_status' in tweet_obj:\r\n # Store the retweet user screen name in 'retweeted_status-user-screen_name'\r\n tweet_obj['retweeted_status-user-screen_name'] = tweet_obj['retweeted_status']['user']['screen_name']\r\n\r\n # Store the retweet text in 'retweeted_status-text'\r\n tweet_obj['retweeted_status-text'] = tweet_obj['retweeted_status']['text']\r\n \r\n tweets_list.append(tweet_obj)\r\n return tweets_list", "def getTweetById(tweetId):\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n tmpTweet = api.get_status(tweetId, tweet_mode=\"extended\")\n tmpTweet._json['text']=tmpTweet._json['full_text']\n del (tmpTweet._json['full_text'])\n return tmpTweet._json", "def get_tweets(self, user, count):\n topTweetsList = self.api.user_timeline(screen_name=user, count=count, tweet_mode='extended')\n clnTweets = {}\n for tweet in topTweetsList:\n clnTweets[processTweet(getNonRetweet(tweet))] = ({'like':getFavoriteCount(tweet),'RT':getNumRetweet(tweet),'follower':getNumFollowers(tweet)}) \n\n tweetTxt = [twt for twt in clnTweets.keys()]\n \n if user in self.userTweetsStat:\n self.userTweetsStat[user].append(clnTweets)\n else:\n tmp = []\n tmp.append(clnTweets)\n self.userTweetsStat[user] = tmp\n return tweetTxt, self.userTweetsStat", "def strip_checkin(tweet):\n place = tweet['place']\n return {\n 'created_at': tweet['created_at'].isoformat(),\n 'retweeted': tweet['retweeted'],\n 'retweet_count': tweet['retweet_count'],\n 'in_reply_to_status_id': tweet['in_reply_to_status_id'],\n 'in_reply_to_screen_name': tweet['in_reply_to_screen_name'],\n 'in_reply_to_user_id': tweet['in_reply_to_user_id'],\n 'favorited': tweet['favorited'],\n 'favorite_count': tweet['favorite_count'],\n 'id': tweet['id'],\n 'text': tweet['text'],\n 'place': {\n 'place_type': place['place_type'],\n 'lng': place['bounding_box']['coordinates'][0][0][0],\n 'lat': place['bounding_box']['coordinates'][0][0][1],\n 'name': place['name'],\n 'full_name': place['full_name'],\n 'id': place['id'],\n 'category': place['category'],\n },\n 'user': {\n 'id': tweet['user']['id'],\n 'screen_name': tweet['user']['screen_name']\n }\n }", "def keep(attrs, tweets):\n new_tweets = []\n for tweet in tweets:\n stripped = {}\n for attr in attrs:\n stripped = merge(stripped, find_item(attr.split('.'), tweet))\n new_tweets.append(stripped)\n return new_tweets", "def processTweet(title, tweet, remove_title=False):\n\n # create a title regex and initialize a dictionary to hold results\n\n texp = r\"#?\" + r\" ?\".join(processTitle(title).split(\" \"))\n results = {}\n\n # retrieve author metadata\n\n results['author_id'] = tweet.author.id\n results['author_name'] = tweet.author.name\n results['author_verified'] = tweet.author.verified\n results['author_followers'] = tweet.author.followers_count\n results['author_friends'] = tweet.author.friends_count\n results['author_favorites'] = tweet.author.favourites_count\n results['author_statuses'] = tweet.author.statuses_count\n\n # retrieve tweet metadata\n\n results['tweet_id'] = tweet.id\n results['tweet_datetime'] = tweet.created_at.strftime('%Y-%m-%d %H:%m:%S')\n results['tweet_favorites'] = tweet.favorite_count\n results['tweet_retweets'] = tweet.retweet_count\n\n retweet = re.search('^RT @\\w+:', tweet.text)\n results['tweet_retweet'] = True if retweet else False\n\n mention = re.search('@\\w+', tweet.text)\n results['tweet_mention'] = True if mention and not retweet else False\n\n # retrieve raw tweet text and clean it up\n\n text = tweet.text.replace('\\n', '').replace(\"'\", \"\").replace('\"', '')\n text = re.sub(r'(RT )?@\\w+:?', '', text)\n text = re.sub(texp, '', text, flags=re.IGNORECASE) if remove_title else text\n text = re.sub(r' {2,}', ' ', text).strip()\n\n results['tweet_text'] = text\n return results", "def tweet_parser(tweet):\n tweet = tweet.encode()\n\n match = re.match(\n r'^@\\w{11} (@(?P<opponent>[^\\s]+) )?#(?P<game>\\w+)( (?P<move>[^\\s]+))?( (?P<message>.*))?$',\n tweet\n )\n\n if match:\n return match.groupdict()\n else:\n raise ValueError(\"Tweet not formatted correctly.\")", "def get_feature_set_PA(tweet):\n features= {}\n return features", "def tweet_cleaner(tweets):\n n_tweets = {}\n clean = cleaner()\n for tweet in tweets:\n text = clean.clean_text(tweets[tweet][\"text\"])\n if len(text) > 15:\n n_tweets[tweet] = tweets[tweet]\n return n_tweets", "def get_feature_set_SA(tweet):\n features= {}\n return features", "def get_tweets():\n\n return Tweet.query.all()", "def extract_metadata(tweets):\n fg = FeatureGenerator()\n result = np.zeros((len(tweets), len(fg.structured_features)))\n for i, t in enumerate(tweets):\n features = fg.extract_structured_features_for_tweet(t)\n result[i] = features\n return result", "def get_tweets():\n broken_json = read_tweets()\n #\n # Remove the last comma and wrap in a json list\n #\n parsed = json.loads('[%s]' % broken_json[:-1])\n return parsed", "def extract_timestamp(tweet):\n\n if 'timestamp_ms' in tweet:\n timestamp_ms = int(tweet[\"timestamp_ms\"])\n timestamp_ms = timestamp_ms - timestamp_ms % 1000\n return timestamp_ms / 1000.\n elif 'created_at' in tweet:\n return parse(tweet['created_at']).timestamp()\n\n raise KeyError(\"Neither the 'timestamp_ms' attribute, nor the 'created_at' attribute could be found in the tweet.\")", "def _extract_feats(self, a_tweet):\n raise NotImplementedError", "def get_tweet_text(worker_response):\n return worker_response.get('fields').get('tweet')", "def get_retweets():\r\n\r\n retweets = models.Retweet.query.all()\r\n output = []\r\n\r\n for retweet in retweets:\r\n original_tweet = models.Tweet.query.get(retweet.post_id)\r\n retweet_data = {\r\n 'content': original_tweet.text_content,\r\n 'retweet_user': retweet.username,\r\n 'tweet_id': original_tweet.id,\r\n 'tweet_user': original_tweet.username,\r\n 'timestamp': retweet.timestamp.isoformat()\r\n }\r\n\r\n output.append(retweet_data)\r\n\r\n return {\"retweets\": output}", "def enrich(self, tweet):\n tweet = urlize_tweet(expand_tweet_urls(tweet))\n # parses created_at \"Wed Aug 27 13:08:45 +0000 2008\"\n\n if settings.USE_TZ:\n tweet['datetime'] = datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y').replace(tzinfo=timezone.utc)\n else:\n tweet['datetime'] = datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y')\n\n return tweet", "def process_tweets(tweets_response, keep_all=False, debug=False):\n tweets = tweets_response\n\n #print(json.dumps(tweets, indent=4, ensure_ascii=False))\n\n output_tweets = []\n for tweet in tweets:\n # loop through every tweet\n output_tweet = {}\n output_tweet['likes'] = 0\n for k, v in tweet.items():\n if k == \"favorite_count\" or k == \"retweeted_status\":\n # print('checking favorite_count at {}'.format(k))\n # print(v)\n if k == \"favorite_count\" and v:\n output_tweet['likes'] = v\n elif k == \"retweeted_status\" and v:\n # print(\"rt:\", v)\n try:\n output_tweet['likes'] = v['favorite_count']\n except:\n print('favorites not found')\n print(v)\n pass\n\n elif k == \"media\" and v:\n # turn media dict into img url\n output_tweet[k] = []\n for m in v:\n output_tweet[k].append(m['media_url_https'])\n\n elif k == \"id\" and v:\n # make url from id and dispose id\n output_tweet['url'] = \"https://twitter.com/anyuser/status/\" + str(v)\n\n elif k == \"retweet_count\":\n if v:\n if debug: print(' picking this: ', k, v)\n output_tweet[k] = v\n else:\n if debug: print(' skipping this: ', k, v)\n # not keeping those with 0 RT\n output_tweet[k] = 0\n\n elif k == \"created_at\":\n tweet_creation_time = str_2_datetime(v, input_format=time_format_twitter_created_at)\n tweet_checked_time = datetime.datetime.now(tz=pytz.utc)\n\n output_tweet['timestamp'] = {\n \"created\": datetime_2_str(tweet_creation_time, output_format=time_format_full_with_timezone),\n \"last_checked\": datetime_2_str(tweet_checked_time, output_format=time_format_full_with_timezone)\n }\n\n else:\n # keep k:v same\n if debug: print('keeping this: ', k, repr(v))\n output_tweet[k] = v\n\n print('num of likes: ', output_tweet['likes'])\n\n output_tweets.append(output_tweet)\n\n output = []\n if not keep_all:\n for o in output_tweets:\n if o['likes'] > 0 and o['retweet_count'] > 0:\n output.append(o)\n else:\n output = output_tweets\n\n return output", "def write_tweet(tweet):\n try:\n tweet_data = [tweet.date, tweet.content.encode('utf-8'), tweet.id, tweet.likeCount,\n tweet.replyCount,\n tweet.retweetCount, tweet.quoteCount,\n tweet.user.username, tweet.user.id, tweet.user.followersCount,\n tweet.user.friendsCount,\n tweet.user.statusesCount, tweet.user.verified, tweet.user.url, tweet.url]\n if tweet.mentionedUsers is not None:\n tweet_data.append([tweet.mentionedUsers])\n else:\n tweet_data.append(None)\n if tweet.quotedTweet is not None:\n tweet_data.append(tweet.quotedTweet.id)\n tweet_data.append(tweet.quotedTweet.content.encode('utf-8'))\n tweet_data.append(tweet.quotedTweet.user.username)\n tweet_data.append(tweet.quotedTweet.user.id)\n if tweet.quotedTweet.mentionedUsers is not None:\n tweet_data.append([tweet.quotedTweet.mentionedUsers])\n else:\n tweet_data.append(None)\n else:\n tweet_data.append(None)\n tweet_data.append(None)\n tweet_data.append(None)\n tweet_data.append(None)\n return tweet_data\n except UnicodeEncodeError:\n pass", "def getbasics(tfinal):\n tfinal[\"screen_name\"] = df[\"user\"].apply(lambda x: x[\"screen_name\"])\n tfinal[\"user_id\"] = df[\"user\"].apply(lambda x: x[\"id\"])\n tfinal[\"followers_count\"] = df[\"user\"].apply(lambda x: x[\"followers_count\"])\n return tfinal", "def tweets_enrichment(self, tweets):\n tweet_dict = {}\n for tweet in tweets:\n new_tweet = self.tweet_enrichment(tweet)\n if new_tweet:\n tweet_dict.update(new_tweet)\n return tweet_dict", "def getAttributes(statuses):\n\n status_texts = [status['text']\n for status in statuses]\n screen_names = [status['user']['screen_name']\n for status in statuses]\n hashtag = [hashtag['text']\n for status in statuses\n for hashtag in status['entities']['hashtags']]\n #Take alphanumeric only from words\n words = [re.sub(r'\\W+', '', w) \n for t in status_texts \n for w in t.split()]\n return (status_texts, screen_names, \n util.stringNormalization(hashtag),\n util.stringNormalization(words))", "def parse_tweet(line):\n # The following regex just strips of an URL (not just http), any punctuations,\n # or Any non alphanumeric characters\n # http://goo.gl/J8ZxDT\n text = re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\",\" \",json.loads(line[1])[\"value\"]).strip()\n # remove terms <= 2 characters\n text = ' '.join(filter(lambda x: len(x) > 2, text.split(\" \")))\n\n return (line[0], text)", "def get_tweet(self, id):\r\n return self.tweets[id]", "def make_tweet(text, time, lat, lon):\n return {'text': text, 'time': time, 'latitude': lat, 'longitude': lon}", "def list_tweets():\n tweets = []\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE])\n for tuple in tuples:\n tweet = {}\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweets.append(tweet)\n return jsonify({'tweets':tweets}),200", "def get_tweets():\n\n\tuser ='kaiserkumars'\n\t# api = twitter.Api(consumer_key='iJoZZuV7etVrJfE4K9ir8sIqa',\n\t# consumer_secret='uyJyWoP05z2MUKnggW7vHnIG2sckmM1aHRMgGveZLyrz8401Xs',\n\t# access_token_key='622588040-TYDgG1UlGUvA1hW8PA7mOG5CiMw0WiuPZlkoP8cc',\n\t# access_token_secret='laAmFjeLhWzOK7Y524VevdMdeLeNpnmCUmjee1AQU7osj')\n\tapi = twitter.Api(consumer_key=get_secret('consumer_key'),\n\t consumer_secret=get_secret('consumer_secret'),\n\t access_token_key=get_secret('access_token_key'),\n\t access_token_secret=get_secret('access_token_secret'))\n\n\tstatuses = api.GetUserTimeline(user_id=622588040,count=0)\n\t# print(statuses)\n\t# duplicate='UNIQUE constraint failed: mtwitter_weatherdata.location, core_weatherdata.metric, core_weatherdata.date'\n\tbulk_insert=[]\n\t# print(dir(TwitterData))\n\tfor s in statuses:\n\t\t# print(s)\n\t\tdt = parse(s.created_at)\n\t\t# print(dt)\n\t\tdata = TwitterData(org_name=s.user.name,profile_url=s.user.profile_image_url,tweet_id =s.id,screen_name=s.user.screen_name, tweet = s.text, date= dt, favCount =0)\n\t\tbulk_insert.append(data)\n\ttry:\n\t\tTwitterData.objects.bulk_create(bulk_insert)\n\t\tprint(\"Success.\")\n\texcept Exception as e:\n\t\t# if(str(e)==duplicate):\n\t\t# \tprint('Duplicate Data')\n\t\t# else:\n\t\tprint(str(e))\n\n\treturn statuses", "def extract_tweets(path):\n dict_list = []\n\n for line in open(path):\n loaded = json.loads(line)\n dict_list.append(loaded)\n\n text = \"\"\n for item in dict_list:\n '''\n try:\n tweet = item[\"text\"]\n #filter(lambda x: x in set(string.printable), tweet)\n text += text\n except UnicodeEncodeError:\n pass\n '''\n tweet = str(item[\"text\"].encode('ascii', 'ignore'))\n #filter(lambda x: x in set(string.printable), tweet)\n text += tweet\n\n return text", "def extract_common_fields(self, data):\n member = data.get('member', {})\n return {'username': member.get('name'), 'email': member.get('email')}", "def get_tweets():\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def get_tweets(self):\n\t\ttweets = ''\n\t\tfor each in self.tweets_posted:\n\t\t\ttweets += each.timeline_format() + '\\n'\n\t\ttweets = tweets.strip('\\n')\n\t\treturn tweets", "def get_feature_set_PB(tweet):\n features= {\n 'text_length': np.log(len(tweet.text))\n } #ADD ADDITIONAL FEATURES\n if tweet.nrof_sademoticons>0:\n features['sademoticons'] = tweet.nrof_sademoticons\n if tweet.nrof_happyemoticons>0:\n features['happyemoticons'] = tweet.nrof_happyemoticons\n \n return features", "def build_data(self, tid, tmsg, tent):\n if tid: # have twitter id?\n if tmsg: # have a twitter message?\n if tent: # have a twitter entitiy? (complicated)\n\n # gracefully fail if we screw up\n try:\n dtags = tent['hashtags']\n durls = tent['urls']\n\n # will this survive json if not str?\n dt_str = \"%s\" % dt.db_datetime_utc()\n\n #---\n # data structure for storage\n py_data = dict(id_str=tid, # tweet id\n message=tmsg, # msg sent\n hashtag=tent['hashtags'],# list of #tags \n urls=tent['urls'], # list of urls\n date_str=dt_str) # epoch in utc\n #---\n except:\n return False\n return py_data\n return False", "def getTweetsFromPheme(self):\n self.helper.buildDict4Tweets(self.folderpath)", "def extract_important(tweet_objects_list):\n # This section extracts important information such as most common hashtags\n hashtag_dictionary = {}\n for tweet in tweet_objects_list:\n if \"hashtags\" in tweet:\n for individual_hashtag in tweet[\"hashtags\"]:\n if not individual_hashtag[\"text\"].lower() in hashtag_dictionary:\n hashtag_dictionary[individual_hashtag[\"text\"].lower()] = 1\n else:\n hashtag_dictionary[individual_hashtag[\"text\"].lower()] += 1\n frequency = Counter(hashtag_dictionary)\n most_frequent_hashtags = frequency.most_common(50)\n\n user_dictionary = {}\n for tweet in tweet_objects_list:\n if \"user_mentions\" in tweet:\n for individual_user in tweet[\"user_mentions\"]:\n if not individual_user[\"screen_name\"] in user_dictionary:\n user_dictionary[individual_user[\"screen_name\"].lower()] = 1\n else:\n user_dictionary[individual_user[\"screen_name\"].lower()] += 1\n frequency = Counter(user_dictionary)\n most_frequent_users = frequency.most_common(50)\n symbol_dictionary = {}\n for tweet in tweet_objects_list:\n if \"symbols\" in tweet:\n for individual_symbol in tweet[\"symbols\"]:\n if not individual_symbol[\"text\"] in symbol_dictionary:\n symbol_dictionary[individual_symbol[\"text\"]] = 1\n else:\n symbol_dictionary[individual_symbol[\"text\"]] += 1\n frequency = Counter(symbol_dictionary)\n most_frequent_symbols = frequency.most_common(50)\n return most_frequent_hashtags, most_frequent_users, most_frequent_symbols", "def getHashtagsAndMentions(tweets):\n hashtags = Counter()\n mentions = Counter()\n plain = Counter()\n\n pattern = re.compile(r\"[^#@\\w'-]+\")\n\n for t in tweets:\n words = pattern.split(t.message)\n for word in words:\n # Ignore null strings caused by split characters at the end of a\n # message and remove standalone hyphens.\n if word and not word.startswith(\"-\"):\n # Increment count for the word in the Counter.\n if word.startswith(\"#\"):\n hashtags.update({word: 1})\n elif word.startswith(\"@\"):\n mentions.update({word: 1})\n else:\n # TODO: apply nltk.corpus.stopwords.words() here,\n # across languages. Consider that the stopwords cut off\n # before apostrophe, therefore check if the word\n # starts with the stopword.\n plain.update({word: 1})\n\n return hashtags, mentions, plain", "def map_tweepy_array (self, tweet):\n new_tweet = [tweet.created_at,\n tweet.id,\n tweet.id_str,\n tweet.truncated,\n tweet.text,\n str(constants.TRACKS),\n tweet.source,\n tweet.source_url,\n tweet.in_reply_to_status_id,\n tweet.in_reply_to_status_id_str,\n tweet.in_reply_to_user_id,\n tweet.in_reply_to_user_id_str,\n tweet.in_reply_to_screen_name,\n tweet.user.screen_name,\n tweet.user.location,\n tweet.geo,\n tweet.coordinates,\n tweet.place,\n tweet.contributors,\n tweet.is_quote_status,\n tweet.retweet_count,\n tweet.favorite_count,\n tweet.favorited,\n tweet.retweeted,\n tweet.lang ]\n\n return new_tweet", "def make_response(self):\n params = {\n 'tweet.fields': 'created_at,public_metrics,entities',\n 'expansions': 'author_id',\n 'user.fields': 'description'\n }\n return self.response_limit(params)", "def twitter(n=1):\n tweet = get_tweet(TWITTER_NAME, n)\n tweet_info = {\n 'text': tweet.text,\n 'date': tweet.created_at.strftime('%A, %B %d'),\n 'time': tweet.created_at.strftime('%H:%M'),\n 'latest': (int(n) == 1), # True if n is one, else False.\n }\n return jsonify(tweet_info)", "def filter_tweet(tweet):\n if not filter_tweet_core(tweet):\n return False\n if bannedusers.search(tweet['user']['screen_name']) or (\n 'retweeted_status' in tweet and bannedusers.search(tweet['retweeted_status']['user']['screen_name'])):\n return False\n if tweet['user']['screen_name'] == credentials['username']: # Do not match self tweets :-)\n return False\n return True", "def read_tweets(filename: TextIO) -> Dict[str, List[tuple]]:\n \n #file_tweet = open(filename, 'r')\n \n tweet_dict = {} \n list_tuple = []\n tuple_tweet = ()\n text_tweet = ''\n \n for lines in filename.readline(): \n line = lines.strip()\n if '<<<EOT' not in line:\n if line != '\\n' and line.endswith(':'):\n username = line[0:line.find(':')].lower()\n elif line[0:14].isnumeric(): \n source = line.split(',')\n date = int(source[FILE_DATE_INDEX])\n source = source[FILE_SOURCE_INDEX]\n favourite_count = int(source[FILE_FAVOURITE_INDEX])\n retweet = int(source[FILE_RETWEET_INDEX])\n else: \n text_tweet += line \n else:\n tuple_tweet = (text_tweet, date, source,\n favourite_count, retweet)\n list_tuple = list_tuple + [tuple_tweet]\n tweet_dict[username] = list_tuple\n text_tweet = ''\n \n return tweet_dict", "def excludeTwitterTags(tweet):\n\ttwext = tweet['text'].lower()\n\tif tweet['entities']['hashtags']:\n\t\tfor hashtag in tweet['entities']['hashtags']:\n\t\t\ttwext = twext.replace(hashtag['text'].lower(),\"\")\n\t\t\tif tweet['entities']['user_mentions']:\n\t\t\t\tfor user_mention in tweet['entities']['user_mentions']:\n\t\t\t\t\ttwext = twext.replace(user_mention['screen_name'].lower(),\"\")\n\t\t\tif tweet['entities']['urls']:\n\t\t\t\tfor url in tweet['entities']['urls']:\n\t\t\t\t\ttwext = twext.replace(url['url'].lower(),\"\")\n\treturn twext", "def get_tweet_data(card):\n username = card.find_element_by_xpath('.//span').text\n handle = card.find_element_by_xpath('.//span[contains(text(), \"@\")]').text\n\n # get postdate & filter out ads\n try:\n postdate = card.find_element_by_xpath('.//time').get_attribute('datetime')\n except NoSuchElementException:\n return\n\n # content of tweet\n comment = card.find_element_by_xpath('.//div[2]/div[2]/div[1]').text\n responding = card.find_element_by_xpath('.//div[2]/div[2]/div[2]').text\n content = comment + responding\n\n # stats\n reply_count = card.find_element_by_xpath('//div[@data-testid=\"reply\"]').text\n retweet_count = card.find_element_by_xpath('//div[@data-testid=\"retweet\"]').text\n like_count = card.find_element_by_xpath('//div[@data-testid=\"like\"]').text\n\n single_tweet = (username, handle, postdate, content, reply_count, retweet_count, like_count)\n return single_tweet", "def save(self):\n return getattr(self, \"_tweets\", None)", "def get_tweets(self, query, count=10):\n # empty list to store parsed tweets\n tweets = []\n\n try:\n # call twitter api to fetch tweets\n fetched_tweets = self.api.search(q=query, count=count)\n\n # parsing tweets one by one\n for tweet in fetched_tweets:\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.text\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n\n # appending parsed tweet to tweets list\n if tweet.retweet_count > 0:\n # if tweet has retweets, ensure that it is appended only once\n if parsed_tweet not in tweets:\n tweets.append(parsed_tweet)\n else:\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets\n\n except tweepy.TweepError as e:\n # print error (if any)\n print(\"Error : \" + str(e))", "def process_tweet(json_data):\n text = json_data.get('text')\n\n # Strip URLs.\n for url in json_data.get('entities').get('urls', []):\n text = text.replace(url.get('url', ''), 'http')\n\n # Tokenize text.\n tokens = twitter_tokenizer.tokenize(text)\n\n # Remove punctuation and stopwords.\n tokens = [x for x in tokens if x not in punctuation_set and x not in stopwords_set]\n\n # Stem the tokens.\n if toggles['stem_tokens']:\n tokens = [stemmer.stem(x) for x in tokens]\n\n result = {}\n result['stemmed'] = tokens\n result['user'] = json_data.get('user')\n\n return result", "def filter_tweet_core(tweet):\n if not ('user' in tweet and 'screen_name' in tweet['user'] \\\n and 'text' in tweet):\n return False\n if not filter_tweet_text(tweet['text']):\n return False\n if bannedclients.search(tweet['source']):\n return False\n if bannedterms.search(tweet['text']):\n return False\n if bannedterms.search(tweet['user']['screen_name']):\n return False\n if 'entities' in tweet and bannedterms.search(json.dumps(tweet['entities'])):\n return False\n if not filter_tweet_entropy(tweet['text']):\n return False\n if not filter_tweet_cardspam(tweet):\n return False\n return True", "def filter_tweets(tweets):\n # We keep only tweets by chrisalbon with pictures\n search_tweets = [tw for tw in tweets if tw['username'] == '@chrisalbon' and len(tw['images']) > 0]\n # He made multiple tweets on the same topic, we keep only the most recent tweets\n # We use the indexes of the reversed tweet list and dictionnaries to keep only key \n unique_search_index = sorted(list({t['text'].lower():i for i,t in list(enumerate(search_tweets))[::-1]}.values()))\n unique_search_tweets = [search_tweets[i] for i in unique_search_index]\n\n # Keep non-downloaded tweets\n most_recent_file = sorted([datetime.datetime.fromtimestamp(os.path.getmtime(path)) \n for path in glob.glob(\"./downloaded_pics/*.jpg\")], reverse=True)[0]\n recent_seach_tweets = [tw for tw in unique_search_tweets if tw['date'] > most_recent_file]\n\n # Uncomment for testing new tweets\n # recent_seach_tweets = [tw for tw in unique_search_tweets if tw['date'] > datetime.datetime(2017, 7, 6, 13, 41, 48)]\n return recent_seach_tweets", "def id_to_tweetinfo(arrayofdict):\n info_to_id_dict = {}\n for entry in arrayofdict:\n # we do not want retweets, so get the original tweet in case it is not in the dict\n entry_id = entry[\"id\"] if not \"retweeted_status\" in entry else entry[\"retweeted_status\"][\"id\"] \n info = entry if not \"retweeted_status\" in entry else entry[\"retweeted_status\"]\n info_to_id_dict[entry_id] = info\n return info_to_id_dict", "def read_tweets(self, month):\r\n tweet_dict = {}\r\n with open('tweets.csv', newline='') as csvfile:\r\n reader = csv.DictReader(csvfile)\r\n for row in reader:\r\n # Check if the post date is same as the month\r\n if month in row['post_date'] and \"2020\" in row['post_date']:\r\n # Check if the tweets is relevant to the selected currency\r\n if self.choice in row['tweets']:\r\n # Add the tweet with the date to the dictionary\r\n tweet_dict[row['tweets']] = row['post_date']\r\n csvfile.close()\r\n return tweet_dict", "def extract_tweets(consumer_key,consumer_secret,access_token,access_token_secret,search_key):\n # Step 1 - Authenticate\n consumer_key= str(consumer_key)\n consumer_secret= str(consumer_secret)\n\n access_token=str(access_token)\n access_token_secret=str(access_token_secret)\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n api = tweepy.API(auth)\n\n #Step 3 - Retrieve Tweets\n public_tweets = api.search(search_key)\n tweets_list=[]\n for tweet in public_tweets:\n tweets_list.append(tweet.text)\n return tweets_list", "def read_tweet(tweet_line):\n\ttweet = json.loads(tweet_line)\n\t#get text \n\ttry:\n\t\ttweet_text = tweet['text']\n\texcept:\n\t\treturn \"\"\n\t# get only tweets in english\n\tif \ttweet['lang'] != 'en':\n\t\treturn \"\"\n\treturn tweet_text.encode('utf-8')", "def compute_flatten_retweeted_status_user_attributes(row):\n retweeted_status_original_user_field_names = [\n 'id', 'name', 'screen_name', 'location', 'description', 'followers_count', 'friends_count',\n 'listed_count', 'favourites_count', 'statuses_count', 'created_at', 'time_zone', 'lang']\n\n if not pd.isnull(row[\"retweeted_status_user\"]):\n series = pd.read_json(json.dumps(row[\"retweeted_status_user\"]), typ='series')\n return series[retweeted_status_original_user_field_names]\n # So, context-sensitive menus will give us available function calls.\n # row = pd.Series(row)\n # row.append(pd.Series(retweeted_status_user_object_fields), ignore_index=True)\n # print(f\"{row}\")\n row[retweeted_status_original_user_field_names] = np.NaN\n return row[retweeted_status_original_user_field_names]", "def gettweets(request):\n temp = json.loads(request.body)\n print (temp['hashtags'])\n return Response(tw_fetcher.gethashes(temp['hashtags']), status=status.HTTP_201_CREATED)", "def get_tweets(api):\n return api.user_timeline()", "def get_all_tweets(screen_name: object):\r\n temptweets = []\r\n alltweets = []\r\n new_tweets = api.user_timeline(screen_name=screen_name, count=199)\r\n alltweets.extend(new_tweets)\r\n print(alltweets[1].id)\r\n oldest = alltweets[-1].id - 1\r\n while 0 < len(new_tweets) < 200:\r\n new_tweets = tweepy.Cursor(api.user_timeline, screen_name=screen_name, count=199, max_id=oldest).items(1500)\r\n alltweets.extend(new_tweets)\r\n for tweet in alltweets:\r\n if (not tweet.retweeted) and ('RT @' not in tweet.text):\r\n temptweets.append(tweet)\r\n oldest = alltweets[-1].id - 1\r\n print(\"Total tweets downloaded from %s are %s\" % (screen_name, len(temptweets)))\r\n return temptweets", "def get_tweets(twitter, screen_name, num_tweets):\n\n request = robust_request(twitter, 'search/tweets', {'q': screen_name, 'count': num_tweets})\n tweets = [a['text'] for a in request]\n\n return tweets", "def get_tweet_data(card): \n username= card.find_element_by_xpath('.//span').text\n handle= card.find_element_by_xpath('.//span[contains(text(),\"@\")]').text\n try:\n postdate=card.find_element_by_xpath('.//time').get_attribute('datetime')\n except NoSuchElementException:\n return\n text = card.find_element_by_xpath('.//div[@class=\"css-1dbjc4n\"]').text\n reply_count=card.find_element_by_xpath('.//div[@data-testid=\"reply\"]').text\n try:\n retweet_count = card.find_element_by_xpath('.//div[data-testid=\"retweet\"]').text\n except:\n retweet_count='0'\n try:\n like_count = card.find_element_by_xpath('.//div[@data-testid=\"like\"]').text\n except:\n like_count='0'\n \n tweet=(username,handle,postdate,text,reply_count,retweet_count,like_count)\n print(tweet)\n return tweet", "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text", "def format_tweets(driver):\n tweets_found = driver.find_elements_by_class_name('tweet') \n tweets = []\n for tweet in tweets_found:\n tweet_dict = {}\n tweet = tweet.get_attribute('innerHTML')\n bs = BeautifulSoup(tweet.strip(), \"lxml\")\n tweet_dict['username'] = bs.find('span', class_='username').text\n timestamp = float(bs.find('span', class_='_timestamp')['data-time'])\n tweet_dict['date'] = datetime.datetime.fromtimestamp(timestamp)\n tweet_dict['tweet_link'] = 'https://twitter.com' + bs.find('a', class_='js-permalink')['href']\n tweet_dict['text'] = bs.find('p', class_='tweet-text').text\n try:\n tweet_dict['images'] = [k['src'] for k in bs.find('div', class_=\"AdaptiveMedia-container\").find_all('img')]\n except:\n tweet_dict['images'] = []\n if len(tweet_dict['images']) > 0:\n tweet_dict['text'] = tweet_dict['text'][:tweet_dict['text'].index('pic.twitter')-1]\n tweets.append(tweet_dict)\n driver.close()\n return tweets", "def get_tweets():\n if not Tweet.objects.all():\n # If the db is empty, don't get max_id.\n tweets = api.search(\n q='#python',\n count=100\n )\n else:\n # If the db is not empty, get max_id.\n subtask(clean_tweetdb)\n max_id = min([tweet.tweet_id for tweet in Tweet.objects.all()])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n\n # Store the tweet data in lists.\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n # Iterate over these lists and add data to db.\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n # Check that they are valid.\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def get_saved_tweets(self, user):\n return [list(item.keys()) for item in self.userTweetsStat[user]][0]", "def inject_general_timeline():\n return dict(get_general_timeline=Tweet.get_general_timeline)", "def get(self):\n params = request.args.to_dict()\n _format = params.pop('format', None)\n results = TwitterStatus.filter_tweets(**params)\n from app.utils import write_to_csv\n if _format == 'csv':\n fieldnames = FIELD_TYPECAST_FUNC_MAPPING.keys()\n return write_to_csv(results, fieldnames)\n else:\n return {\n 'error': False,\n 'remark': 'success',\n 'data': results\n }", "def post_to_tuple(self, tweetId, post):\r\n return (tweetId, post.postId, post.title, post.mediaType, post.media, post.size, post.views, post.ups, post.downs, post.tag)", "def extract(self):\n if 'email' not in self._dict:\n raise ex.NoMemberEmailError\n\n extracted = dict(x for x in self._dict.items()\n if x[0] in ['member_id', 'email'])\n fields = dict(x for x in self._dict.items()\n if x[0] in self.account.fields.export_shortcuts())\n if fields:\n extracted['fields'] = fields\n\n return extracted", "def get_tweets(self):\r\n now = datetime.datetime.now()\r\n tweet_json = self.api.get_tweets(self.last, now)\r\n self.last = now\r\n return [Tweet(x) for x in tweet_json]", "def html_ann_tweet(tweets):\r\n for tweet in tweets:\r\n\r\n # Fairly efficient way of dealing with the fact that these keys might not exist\r\n try:\r\n text = tweet['text']\r\n except:\r\n pass\r\n\r\n try:\r\n text = tweet['full_text']\r\n except:\r\n pass\r\n\r\n try:\r\n text = tweet['extended_tweet']['full_text']\r\n except:\r\n pass\r\n\r\n\r\n # Hashtags\r\n tweet['text_html_annotated'] = re.sub(r'\\B#\\w\\w+',\r\n '<span class=\"hashtag\">\\g<0></span>',\r\n text)\r\n\r\n # Usernames\r\n tweet['text_html_annotated'] = re.sub(r'(?<=^|(?<=[^a-zA-Z0-9-_\\.]))@'\r\n r'([A-Za-z]+[A-Za-z0-9]+)',\r\n '<span class=\"user\">\\g<0></span>',\r\n tweet['text_html_annotated'])\r\n\r\n # Links\r\n tweet['text_html_annotated'] = re.sub(\r\n r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|'\r\n r'(?:%[0-9a-fA-F][0-9a-fA-F]))+', '<a href=\"\\g<0>\">\\g<0></a>',\r\n tweet['text_html_annotated'])\r\n\r\n return tweets", "def get_tweet(username, n):\n return twitterAPI.home_timeline(count=n)[-1:][0] # return specified tweet", "def extractDDE(self, lang, username, screenname, description, tweets):\n if isinstance(tweets, list):\n tweets = ' '.join(tweets)\n form = {\n 'lang': lang,\n 'username': username,\n 'screenname': screenname,\n 'description': description,\n 'tweet': tweets\n }\n return self.POST('extract', {}, form)", "def make_tweet_dict( txt ):\n txtLow = ' ' + txt.lower() + ' '\n\n # result storage\n fvec = {}\n\n # search for each feature\n for test in testFeatures:\n\n key = test[0]\n\n fvec[key] = False;\n for tstr in test[1]:\n fvec[key] = fvec[key] or (txtLow.find(tstr) != -1)\n\n return fvec", "def recoverTweets(authors=[], words=[], removeRetweets=False, sortBy='newest',**kwargs):\n authors = mapToValid(authors)\n words = mapToValid(words)\n\n def getTopNTweets(retrievedTweets, numberOfTweets):\n \"\"\"Sort the retrievedTweets by sortBy specified and returns the top-N Tweets\"\"\"\n if sortBy=='newest':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['id'], reverse=True)\n elif sortBy=='oldest':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['id'],reverse=False)\n elif sortBy=='favorite_count':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['favorite_count'],reverse=True)\n elif sortBy=='retweet_count':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['retweet_count'],reverse=True)\n else:\n retrievedTweets = random.sample(retrievedTweets, numberOfTweets)\n return retrievedTweets[:numberOfTweets]\n\n def getTweetsByUser(username, maxTweets=1000):\n \"\"\"Returns a list of (json) objects representing the tweets for a specified Twitter username.\n If any words is queried, it will filter out every tweet that doesn't contain any of those words.\"\"\"\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n myTweets=[]\n if words:\n apiRes = tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items()\n for tweet in apiRes:\n if any(containsWord(tweet._json['full_text'],word) for word in words):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n\n return getTopNTweets(myTweets, maxTweets)\n\n def searchTweets():\n \"\"\" returns a list of (json) objects representing the tweets retrieved for a specified query.\n It doesn't work if any authors is specified.\n Then, startingDate and endingDate cannot be older than one week ago because of Twitter restrictions for standardAPI\n :reference: https://developer.twitter.com/en/docs/tweets/search/api-reference/get-search-tweets\n \"\"\"\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName='apiConf2.txt'))\n #SEARCHING TWEETS CONTAINING THE HASHTAG \"#bitcoin\" USING TWEEPY LIBRARY\n myTweets= []\n #words=list(map(str,words))\n if words:\n myQuery=' OR '.join(words)\n else:\n myQuery = '*'\n if removeRetweets:\n myQuery += ' - filter:retweets'\n kwargs['q']=myQuery\n kwargs['count']=100\n kwargs['tweet_mode']='extended'\n if 'startingDate' in kwargs:\n kwargs['since']=kwargs['startingDate']\n del(kwargs['startingDate'])\n if 'endingDate' in kwargs:\n kwargs['until']=kwargs['endingDate']\n del(kwargs['endingDate'])\n if 'maxTweets' in kwargs:\n del(kwargs['maxTweets'])\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.search, kwargs).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.search, kwargs).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n return getTopNTweets(myTweets, maxTweets)\n\n\n def getTwitterscraperTweets():\n \"\"\" returns a list of (json) objects representing the tweets retrieved for the specified inputs.\n It's very useful to avoid restrictions such as number of requests or dates not older than 7 days ago for twitterAPI (and tweepy).\n It will call the recoverTweets.sh script to properly query the API by twitterscraper.\n :reference: https://github.com/taspinar/twitterscraper\n \"\"\"\n import subprocess\n numOfAuthors = len(authors)\n numOfWords = len(words)\n callVars = ['./recoverTweets.sh',str(numOfWords),str(numOfAuthors)]\n callVars.extend([word for word in words]+[author for author in authors])\n if startingDate:\n callVars.extend(['-sd',startingDate])\n if endingDate:\n callVars.extend(['-ed',endingDate])\n #if maxTweets:\n # callVars.extend(['-max',str(maxTweets)])\n callVars.append(\"data/twitterscrapertmp\")\n print(\"Querying twitterAPI by using TwitterScraper... (it may take a long time)\")\n subprocess.call(callVars)\n with open('data/twitterscrapertmp') as json_data:\n tweets = json.load(json_data)\n if removeRetweets:\n tweets = [tweet for tweet in tweets if not isRetweet(tweet)]\n print(\"Query ended. Retrieved: \",len(tweets),\" tweets\")\n #saveTweets(tweets,outputCollection,onFile=True,onDb=True)\n os.remove('data/twitterscrapertmp')\n return tweets\n\n\n if \"maxTweets\" in kwargs:\n maxTweets=kwargs['maxTweets']\n else:\n maxTweets=1000\n\n if len(authors)==0 and len(words)==0:\n return(\"qua\") ###call sample function with maxTweets and (if any) dates\n if 'startingDate' in kwargs or 'endingDate' in kwargs:\n return getTwitterscraperTweets()\n\n if len(authors)!=0:\n tweets, splits, i = [], splitIntegerIntoIntegers(maxTweets,len(authors)), 0\n for author in authors:\n tweets.extend(getTweetsByUser(username=author, maxTweets=splits[i]))\n i+=1\n return tweets\n return getTweets()", "def remove_promotional_tweets(tweets):\n clean = cleaner()\n n_tweets = {}\n for tweet in tweets:\n if not clean.linkChecker(tweets[tweet][\"text\"]):\n n_tweets[tweet] = tweets[tweet]\n return n_tweets", "def preprocess_tweet(tweet):\n\n\n clean_tweet, hashtags = separate_hastags_mentions_urls(tweet)\n clean_tweet = remove_emoji_punc(clean_tweet)\n return clean_tweet, hashtags", "def get_tweets(api, listOfTweets, keyword, numOfTweets=20, date_since='2019-1-1', lang=\"en\"):\n spinner = yaspin()\n spinner.start()\n for tweet in tweepy.Cursor(api.search, q=keyword, lang=lang, since=date_since).items(numOfTweets):\n # Add tweets in this format\n dict_ = {'Screen Name': tweet.user.screen_name,\n 'User Name': tweet.user.name,\n 'Tweet Created At': str(tweet.created_at),\n 'Tweet Text': tweet.text,\n 'Cleaned Tweet Text': func.clean_tweets(tweet.text),\n 'User Location': str(tweet.user.location),\n 'Tweet Coordinates': str(tweet.coordinates),\n 'Retweet Count': str(tweet.retweet_count),\n 'Retweeted': str(tweet.retweeted),\n 'Phone Type': str(tweet.source),\n 'Favorite Count': str(tweet.favorite_count),\n 'Favorited': str(tweet.favorited),\n 'Replied': str(tweet.in_reply_to_status_id_str)\n }\n listOfTweets.append(dict_)\n spinner.stop()\n return listOfTweets", "def get_tweets(user, num = 200):\n tweets = []\n \n for tweet in user.home_timeline(count = num):\n edited_tweet = tweet.text\n edited_tweet = edited_tweet.encode(encoding='UTF-8', errors='Ignore') \n tweets.append(edited_tweet)\n return tweets", "def get_fields(data):\n return data['train'][data['train'].keys()[0]].attrs.keys()", "def extract_times(raw_times_dict):\n actual_times = {}\n if raw_times_dict[\"realtime\"] is not None:\n actual_times[\"realtime\"] = raw_times_dict[\"realtime_t\"]\n\n if raw_times_dict[\"realtime_noloads\"] is not None:\n actual_times[\"realtime_noloads\"] = raw_times_dict[\"realtime_noloads_t\"]\n\n if raw_times_dict[\"ingame\"] is not None:\n actual_times[\"ingame\"] = raw_times_dict[\"ingame_t\"]\n\n return actual_times", "def extract_user_data_from_update(update: Update) -> Dict:\n user = update.effective_user.to_dict()\n\n return dict(\n user_id=user[\"id\"],\n is_blocked_bot=False,\n **{\n k: user[k]\n for k in [\"username\", \"first_name\", \"last_name\", \"language_code\"]\n if k in user and user[k] is not None\n },\n )", "def get_tweets(n=1):\n tweets = list(collection.find())[-n:]\n return tweets", "def get_trial_user_attrs(self, trial_id: int) -> Dict[str, Any]:\n return self.get_trial(trial_id).user_attrs", "def test_no_extra_fields():\n t_task = Task()\n t_dict = t_task._asdict()\n assert len(t_dict) <= 4" ]
[ "0.6789574", "0.67805123", "0.6629773", "0.6599947", "0.6406722", "0.6375403", "0.6339529", "0.62415105", "0.62412876", "0.6117636", "0.6082488", "0.605193", "0.60203075", "0.5877773", "0.58547664", "0.5826857", "0.57997686", "0.57727486", "0.5763892", "0.57532144", "0.57317895", "0.5725694", "0.5700882", "0.56928355", "0.56769747", "0.56625646", "0.5604443", "0.56005555", "0.55871916", "0.5561685", "0.5538002", "0.5537393", "0.5517026", "0.54820484", "0.5479626", "0.5471839", "0.54591036", "0.54542327", "0.5400135", "0.53789324", "0.5369175", "0.53619176", "0.535312", "0.53472924", "0.53375334", "0.53362787", "0.5324388", "0.53207046", "0.5317363", "0.5278102", "0.5271335", "0.5255146", "0.52531147", "0.5248522", "0.52256393", "0.522098", "0.52063876", "0.5205305", "0.5200038", "0.5179961", "0.5179271", "0.5177721", "0.51776505", "0.51754946", "0.5165027", "0.51542497", "0.515011", "0.5141073", "0.51380074", "0.51249474", "0.51032645", "0.50969154", "0.50922096", "0.50845", "0.5070152", "0.5059881", "0.50557196", "0.50547814", "0.5042027", "0.50359493", "0.5013876", "0.5009348", "0.5005967", "0.500426", "0.49945027", "0.49878877", "0.49877137", "0.49563083", "0.49557996", "0.49524415", "0.49375358", "0.49307293", "0.49300674", "0.49223378", "0.49152106", "0.49008122", "0.48969072", "0.48931366", "0.4893101", "0.4883378" ]
0.7178673
0
Sets the Search Query and maximum Tweets to be retrieved to save Quota
def premium_set_search_params(self, search_query, from_date, to_date, no_retweets=True, results_per_call=500): # Set a static Language Filter for English Tweets lang_filter = ' lang:en' if no_retweets: rt_filter = ' -is:retweet' # Adds an ignore Retweets tag to the (Altcoin) Query self.query = search_query + lang_filter + rt_filter else: # This Query includes all Tweets, also Retweets self.query = search_query + lang_filter # Sets the Rule for the Query to be executed (time frame & # of Results) self.rule = gen_rule_payload(self.query, results_per_call=results_per_call, from_date=from_date, to_date=to_date)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search(self, query, maxhits=100):", "def search_tweets(q, count=100, result_type=\"recent\"):\n\n return t.search.tweets(q=q, result_type=result_type, count=count)", "def searchTweets():\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName='apiConf2.txt'))\n #SEARCHING TWEETS CONTAINING THE HASHTAG \"#bitcoin\" USING TWEEPY LIBRARY\n myTweets= []\n #words=list(map(str,words))\n if words:\n myQuery=' OR '.join(words)\n else:\n myQuery = '*'\n if removeRetweets:\n myQuery += ' - filter:retweets'\n kwargs['q']=myQuery\n kwargs['count']=100\n kwargs['tweet_mode']='extended'\n if 'startingDate' in kwargs:\n kwargs['since']=kwargs['startingDate']\n del(kwargs['startingDate'])\n if 'endingDate' in kwargs:\n kwargs['until']=kwargs['endingDate']\n del(kwargs['endingDate'])\n if 'maxTweets' in kwargs:\n del(kwargs['maxTweets'])\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.search, kwargs).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.search, kwargs).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n return getTopNTweets(myTweets, maxTweets)", "def collect_tweets(redis_client, twitter_client, search_term):\n search = Search(redis_client, twitter_client, search_term)\n search.get_term_state()\n search.parse_term_state()\n search.set_query_string()\n search.set_execution_time()\n search.execute_query()\n search.incr_query_counters()\n search.set_newest_id()\n search.set_oldest_id()\n search.set_scenario()\n search.set_term_state()\n search.store_results()\n search.set_score()\n search.log_state()", "def search(self, query, limit=10):\n word_ids, url_ids = self.query(query, limit)\n selected_url = random.choice(url_ids)\n print(\"User selected url \\\"{}\\\"\".format(self.get_url_name(selected_url)))\n return SearchNet().train_query(word_ids, url_ids, selected_url)", "async def twitter_search(self, query, limit=5):\n try:\n results = await self.bot.loop.run_in_executor(None, self.api.search_users, query, limit)\n except tweepy.TweepError as e:\n log.error(str(e))\n raise TwitterError('Unknown error from the Twitter API, this has been logged.') from e\n if not results:\n raise TwitterError('No result.')\n\n embed = discord.Embed(colour=0x738bd7)\n for user in results:\n name = '{} - @{}'.format(user.name, user.screen_name)\n description = textwrap.shorten(user.description, 1024) if user.description else 'No description.'\n embed.add_field(name=name, value=description, inline=False)\n await self.bot.say(embed=embed)", "def query_tweets_once(query, limit=None, num_tweets=0):\n logging.info(\"Querying {}\".format(query))\n query = query.replace(' ', '%20').replace(\"#\", \"%23\").replace(\":\", \"%3A\")\n pos = None\n tweets = []\n try:\n while True:\n new_tweets, pos = query_single_page(\n INIT_URL.format(q=query) if pos is None\n else RELOAD_URL.format(q=query, pos=pos),\n pos is None\n )\n if len(new_tweets) == 0:\n logging.info(\"Got {} tweets for {}.\".format(\n len(tweets), query))\n return tweets\n\n logging.info(\"Got {} tweets ({} new).\".format(\n len(tweets) + num_tweets, len(new_tweets)))\n\n tweets += new_tweets\n\n if limit is not None and len(tweets) + num_tweets >= limit:\n return tweets\n except KeyboardInterrupt:\n logging.info(\"Program interrupted by user. Returning tweets gathered \"\n \"so far...\")\n except BaseException:\n logging.exception(\"An unknown error occurred! Returning tweets \"\n \"gathered so far.\")\n\n return tweets", "def referencesearchservlet_max_pages(self, referencesearchservlet_max_pages):\n\n self._referencesearchservlet_max_pages = referencesearchservlet_max_pages", "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text", "def test_n_results_greater_than_500(self):\n\n wk = os.path.dirname(os.path.abspath(__file__))\n f = os.path.join(wk, \"search_tweets.config\")\n thing = SearchTweets(self.db, f)\n with patch.object(thing, '_SearchTweets__twitter_n_results', new_callable=PropertyMock(return_value=1070)):\n with patch.object(thing, '_SearchTweets__twitter_all_tweets',\n new_callable=PropertyMock(return_value=False)):\n with patch.object(thing, '_SearchTweets__multi_user', new_callable=PropertyMock(return_value=False)):\n with patch.object(thing, '_SearchTweets__twitter_users',\n new_callable=PropertyMock(return_value=[])):\n with patch.object(thing, '_SearchTweets__twitter_keyword',\n new_callable=PropertyMock(return_value=\"Eurovision\")):\n with patch.object(thing, '_SearchTweets__save'):\n\n thing.search()\n\n self.assertEqual(thing.total_result, 1070)", "def max_results(self, max_results: float):\n\n self._max_results = max_results", "def twitter_search(a, q, x=10000000, t=100, s=None, m=-1):\n\n tweets = list()\n tweet_count = 0\n\n while tweet_count < x:\n try:\n if (m <= 0):\n if (not s):\n new_tweets = a.search(q = q, count = t)\n else:\n new_tweets = a.search(q = q, count = t, since_id = s)\n else:\n if (not s):\n new_tweets = a.search(q = q, count = t, max_id = (m - 1))\n else:\n new_tweets = a.search(q = q, count = t, max_id = (m - 1), since_id = s)\n\n if not new_tweets:\n break\n\n for tweet in new_tweets:\n tweets.append(tweet)\n\n tweet_count += len(new_tweets)\n m = new_tweets[-1].id\n\n except tweepy.TweepError as e:\n error = (-1, \"error:\" + str(e))\n return error\n\n search_results = (tweet_count, tweets)\n\n return search_results", "def perform_query(tweets_dict, index, tf, idf, rt, likes, score, get_input=True, query=None):\n print(\"Insert your query:\\n\")\n if get_input:\n query = input()\n ranked_docs = search(query, index, idf, tf, rt, likes, score) \n return query, ranked_docs", "def process_query(api, query):\n last_tweet_id = None if 'LastTweetId' not in query else int(query['LastTweetId']['N'])\n results = api.GetSearch(result_type=\"recent\", term=query['Term']['S'],\n count=25, lang=\"en\", since_id=last_tweet_id)\n new_tweets = []\n if results:\n latest_tweet_id = results[0].id\n for tweet in results:\n if last_tweet_id is not None and tweet.id <= last_tweet_id:\n break\n new_tweets.append(tweet)\n store_tweets(query, new_tweets)\n update_last_tweet(query, latest_tweet_id)\n return len(new_tweets)", "def limit(self, limit):\n self._limit = limit", "def request_data(self, search_query=None, app_index=0):\n tweet_obj_fields = utils.tweet_object_fields()\n tweet_fields = ','.join(tweet_obj_fields[\"twitter_fields\"])\n params = {'query': search_query, \n 'tweet.fields': tweet_fields}\n\n if search_query is None:\n raise AttributeError(\"No query parsed.\")\n\n base_url = \"https://api.twitter.com/2/tweets/search/recent?\"\n headers = self.get_bearer_header(app_index)\n response = requests.get(base_url, headers=headers, params=params)\n return response", "def collect_tweets(search_id, search_term, number_of_tweets):\n\n tweets = []\n for tweet in api_collector.collect(search_term, number_of_tweets):\n tweets.append((tweet.id_str, tweet.created_at, tweet.full_text))\n if len(tweets) == 0:\n search = Search.objects.get(pk=search_id)\n search.empty = True\n search.save()\n notify_searchers.delay(search_id)\n else:\n classify_tweets.delay(search_id, tweets)", "def search(query, max: int = None):\n for post in client.search(query, max=max):\n print(json.dumps(post))", "def __init__(self, find=None, near=None, max_results=3, **kwargs) -> None:\n super(YelpSpider, self).__init__(**kwargs)\n self.find = find\n self.near = near\n self.max_results = int(max_results)", "def search_field(self, field, query, index=None, doc_type=None):\r\n return self.search({\r\n 'query': {\r\n 'fuzzy_like_this_field': {\r\n field: {\r\n 'like_text': query\r\n ,'max_query_terms': 250\r\n }\r\n }\r\n }\r\n }, index=index, doc_type=doc_type, size=25)", "def __init__(self, search_type):\n self.search = search_type\n self.items = None\n self.lang = None\n self.limit = None\n self.limit_type = None\n self.database = None\n self.table = None\n self.access_token = None\n self.access_token_secret = None\n self.consumer_key = None\n self.consumer_secret = None", "def updateSearch(self, authenticationToken, search):\r\n pass", "def limit(self, limit):\n\n self._limit = limit", "def limit(self, limit):\n\n self._limit = limit", "def limit(self, limit):\n\n self._limit = limit", "def searchByKeywordPro(self, query, since=\"\", until=\"\", maxResults=None):\n\n tweetsList = []\n if(not maxResults):\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since)\n tweetsList.append(tweetList)\n while(next_token):\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since, next=next_token)\n tweetsList.append(tweetList)\n else:\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since)\n tweetsList.append(tweetList)\n maxResults -= len(tweetList)\n while(next_token and maxResults > 0):\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since, next=next_token)\n tweetsList.append(tweetList)\n maxResults -= len(tweetList)\n for status in tweetsList:\n createdDate = parser.parse(str(status._json[\"created_at\"]).strip())\n createdDate = createdDate.replace(\n tzinfo=pytz.utc) - createdDate.utcoffset()\n status_refined = {\n 'keyword': query,\n '_id': status._json[\"id\"],\n 'created_at': createdDate,\n 'tweetText': status._json[\"text\"],\n 'hashtags': status._json[\"entities\"][\"hashtags\"],\n 'userLoc': status._json[\"user\"][\"location\"],\n 'tweetGeo': status._json[\"geo\"],\n 'tweetCoordinates': status._json[\"coordinates\"],\n 'tweetPlace': status._json[\"place\"],\n 'retweet': {},\n 'quote': {},\n }\n if hasattr(status, \"quoted_status\"):\n if \"extended_tweet\" in status._json[\"quoted_status\"].keys():\n print(\"Taking the expanded tweet\")\n status_refined['tweetText'] = status._json[\"quoted_status\"][\"extended_tweet\"][\"full_text\"]\n else:\n status_refined['tweetText'] = status._json[\"quoted_status\"][\"text\"]\n status_refined['quote'] = {\n 'original_retweet_id': status._json[\"quoted_status\"][\"id\"],\n 'origUserLoc': status._json[\"quoted_status\"][\"user\"][\"location\"],\n 'origTweetLoc': status._json[\"quoted_status\"][\"geo\"],\n 'origTweetPlace': status._json[\"quoted_status\"][\"place\"],\n 'origTweetCoord': status._json[\"quoted_status\"][\"coordinates\"],\n 'origHashtags': status._json[\"quoted_status\"][\"entities\"][\"hashtags\"],\n 'retweet_count': status._json[\"quote_count\"],\n }\n elif hasattr(status, \"retweeted_status\"):\n print(status._json[\"retweeted_status\"])\n if \"extended_tweet\" in status._json[\"retweeted_status\"].keys():\n print(\"Taking the expanded tweet\")\n status_refined['tweetText'] = status._json[\"retweeted_status\"][\"extended_tweet\"][\"full_text\"]\n else:\n status_refined['tweetText'] = status._json[\"retweeted_status\"][\"text\"]\n status_refined['retweet'] = {\n 'original_retweet_id': status._json[\"retweeted_status\"][\"id\"],\n 'origUserLoc': status._json[\"retweeted_status\"][\"user\"][\"location\"],\n 'origTweetLoc': status._json[\"retweeted_status\"][\"geo\"],\n 'origTweetPlace': status._json[\"retweeted_status\"][\"place\"],\n 'origTweetCoord': status._json[\"retweeted_status\"][\"coordinates\"],\n 'origHashtags': status._json[\"retweeted_status\"][\"entities\"][\"hashtags\"],\n 'retweet_count': status._json[\"retweet_count\"],\n }\n elif hasattr(status, \"extended_tweet\"):\n if \"extended_tweet\" in status._json.keys():\n status_refined['tweetText'] = status._json[\"extended_tweet\"][\"full_text\"]\n self.tweets.append(status_refined)\n return self.tweets", "def apply_limit(self, query, limit):\n if limit is not None:\n limit = int(limit)\n if limit < 0:\n raise ValueError(\"limit can not be a negative integer.\")\n query = query.limit(limit)\n return query", "def limit(self, limit):\n if limit is None:\n return self\n\n self.query = self.query.limit(limit)\n self._has_limit = True\n return self", "def _query_set_limit(query: str, limit: int) -> str:\n if limit < 0:\n return query\n\n # the query has the structure of \"section | section | section ...\"\n query_list = query.split('|')\n\n # split the query to sections and find limit sections\n changed = False\n for i, section in enumerate(query_list):\n section_list = section.split()\n # 'take' and 'limit' are synonyms.\n if section_list and section_list[0] == 'limit' or section_list[0] == 'take':\n query_list[i] = f\" limit {limit} \"\n changed = True\n\n # if the query have not been changed than limit is added to the query\n if not changed:\n query_list.append(f\" limit {limit} \")\n\n fixed_query = '|'.join(query_list)\n return fixed_query", "def set_limit(self, limit, truncated=False):\n self.limit = {'limit': limit, 'type': 'limit', 'truncated': truncated}", "def set_result_limit(self, data):\n self.add_payload('resultLimit', data)\n self._result_limit = self._uni(data)", "def search_term(self, search_term: str):\n\n self._search_term = search_term", "def limit(self, limit):\n self._limit = limit\n return self", "def get_tweets_from_search(api, search_string, parameters=\" -filter:retweets\", since=\"2021-08-09\", lang=\"en\", max_tweets=1000):\n\n tweet_list = []\n count = 0\n search = search_string\n params = parameters\n\n for tweet in tweepy.Cursor(api.search, q=search + params,\n count=100,\n tweet_mode=\"extended\",\n lang=lang,\n since=since,\n # until=\"2015-02-01\",\n ).items():\n tweet_list.append(tweet._json[\"full_text\"])\n count += 1\n if count == max_tweets:\n break\n print(count)\n return pd.DataFrame({\"text\": tweet_list})", "def set_limit(self, limit):\n self.limit = limit\n self._prune()", "def limit(self, limit: int) -> 'Query':\n self.limit_index = max(1, limit)\n return self", "def search_doc(self, field_key, query_string, number_of_results=100, get_more_suggestions=False):\n # Stores original query\n self.search_result.original_query = query_string\n # Query initialization\n qp = QueryParser(field_key, schema=self.ix.schema)\n qp.add_plugin(DateParserPlugin())\n q = qp.parse(query_string)\n # old_query = query.DateRange(\"release_date\",datetime.strptime(\"1995\",\"%Y\"),datetime.strptime(\"2000\",\"%Y\"))\n # allow_query = query.NumericRange(\"vote_average\",5, 10)\n # allow_query = query.Require(old_query,allow_query)\n # Only as long as 's' is open we can access results (iterator is returned)\n with self.ix.searcher(weighting=scoring.TF_IDF()) as s:\n # checks query for spelling errors\n corrected = s.correct_query(q, query_string)\n if corrected.query != q:\n self.search_result.corrected_query = \"Did you mean:\" + corrected.string\n # Updates query with closest corrected version\n q = qp.parse(corrected.string)\n # If more than one suggestion is required for spelling check\n if get_more_suggestions:\n suggestions = self.get_more_suggestions(query_string, field_key, corrected.string, s)\n self.search_result.set_item(\"suggested_spelling\", suggestions)\n # gets final search result from index\n results = s.search(q) #, filter = allow_query,limit=number_of_results)\n\n # Makeshift function for now, in order to store iterator in the search_result\n print (results[:5])\n print (len(results))\n # Stores result as a list\n self.search_result.set_item(field_key, list(results))", "def search_settings(self, search_settings):\n\n self._search_settings = search_settings", "def limit(self, limit):\n self._limit = limit\n\n return self", "def set_search_params(self, **kwargs):\n self._search_params = kwargs", "def limit(self, limit):\n\n self._limit = limit\n return self", "def search_machine(ID,machine):\n\tconsumer_key = machine['consumer_key']\n\tconsumer_secret = machine['consumer_secret']\n\taccess_token = machine['access_token']\n\taccess_secret = machine['access_secret']\n\tauth = OAuthHandler(consumer_key, consumer_secret)\n\tauth.set_access_token(access_token, access_secret)\n\tapi = tweepy.API(auth, wait_on_rate_limit_notify=True)\n\n\t\"\"\"Search for tweets via Twitter Search API.\"\"\"\n\tsinceId = None\n\tmax_id = ID\n\ttweetsCounts = 0\n\tfinshed_job = False\n\twith open (outfile,'w+') as f:\n\t\twhile tweetsCounts < maxTweets:\n\t\t\ttry:\n\t\t\t\tif (max_id <= 0):\n\t\t\t\t\tif (not sinceId):\n\t\t\t\t\t\tnew_tweets = api.search(\n\t\t\t\t\t\t\tq = query,\n\t\t\t\t\t\t\tgeocode = geo,\n\t\t\t\t\t\t\tcount = searchLimits)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnew_tweets = api.search(\n\t\t\t\t\t\t\tq=query,\n\t\t\t\t\t\t\tcount = searchLimits,\n\t\t\t\t\t\t\tgeocode=geo,\n\t\t\t\t\t\t\tsinceId = sinceId)\n\t\t\t\telse:\n\t\t\t\t\tif (not sinceId):\n\t\t\t\t\t\tnew_tweets = api.search(\n\t\t\t\t\t\t\tq=query, \n\t\t\t\t\t\t\tcount=searchLimits,\n\t\t\t\t\t\t\tgeocode = geo,\n\t\t\t\t\t\t\tmax_id=str(max_id - 1))\n\t\t\t\t\telse:\n\t\t\t\t\t\tnew_tweets = api.search(\n\t\t\t\t\t\t\tq=query, \n\t\t\t\t\t\t\tcount=searchLimits,\n\t\t\t\t\t\t\tgeocode = geo,\n\t\t\t\t\t\t\tmax_id=str(max_id - 1),\n\t\t\t\t\t\t\tsince_id=sinceId)\n\t\t\t\tif not new_tweets:\n\t\t\t\t\tprint(\"NO MORE TWEETS\")\n\t\t\t\t\tfinshed_job = True\n\t\t\t\t\tbreak\n\t\t\t\tfor tweet in new_tweets:\n\t\t\t\t\tif tweet.coordinates or tweet.place:\n\t\t\t\t\t\tjson.dump(tweet._json,f,ensure_ascii=False)\n\t\t\t\t\t\tf.write('\\n')\n\t\t\t\t\n\t\t\t\ttweetsCounts += len(new_tweets)\n\t\t\t\t#print(\"Downloaded {0} tweets\".format(tweetsCounts))\n\t\t\t\tmax_id = new_tweets[-1].id\n\t\t\texcept tweepy.RateLimitError as e:\n\t\t\t\tprint(machine['index'],'Time to sleep 15 mins') \n\t\t\t\tAPI_status[machine['index']] = False\n\t\t\t\tif machine['index'] == 0:\n\t\t\t\t\tAPI_status['time'] = time.time() + 901.00\n\t\t\t\treturn finshed_job,max_id\n\t\t\texcept tweepy.TweepError as e:\n\t\t\t\tlogging.error(str(e))\n\t\t\t\tbreak\n\tf.close()\n\treturn finshed_job,max_id", "def rest_api(self):\n self.__db_init('rest')\n api = self.__api_init()\n self.c.execute(\"SELECT MAX(id) FROM tweets\")\n db_max_id = self.c.fetchone()[0] \n try: \n most_recent = api.search(q=self.keyword, result_type='recent')[0].id\n except tweepy.TweepError as e:\n print(str(e.message[0]['message']) + \n ' Update api.ini with your proper credentials:')\n print(os.path.abspath(_path_finder('userconfig','api.ini')))\n sys.exit(-1)\n flag = 0\n while ( flag == 0 ):\n try:\n batch = 5000\n flag = batch\n for search_res in tweepy.Cursor(api.search, q=self.keyword,\n count=100, result_type=\"recent\", \n since_id=db_max_id, \n max_id=most_recent).items(batch):\n flag -= 1\n print(search_res.id, search_res.created_at)\n self.c.execute('''INSERT OR IGNORE INTO tweets (id, date) \n VALUES (?, ?)''', \n (search_res.id, search_res.created_at))\n except tweepy.TweepError as e:\n print('I caught an error:', e.message)\n flag = 0\n finally:\n self.c.execute(\"SELECT last_insert_rowid() from tweets\")\n rid = self.c.fetchone()[0]\n if rid:\n self.c.execute('''SELECT id FROM tweets WHERE\n rowid={0}'''.format(rid))\n rid = self.c.fetchone()[0]\n most_recent = rid - 1\n data = api.rate_limit_status()\n print(data['resources']['search'])\n self.conn.commit()\n self.conn.close()\n print('REST database file has been created/updated:') \n print(os.path.abspath(_path_finder(\n 'keydata','{0}_rest.db'.format(self.keyword))))", "def run_full(self):\n # Get a cursor of all the keywords in the databse\n keyword_cursor = self.mongo_controller.get_keyword_batch_cursor()\n\n # Go over each batch\n for batch in keyword_cursor:\n\n # Go over each keyword in the batch\n for keyword_dict in bson.decode_all(batch):\n\n keyword = Keyword.from_dict(keyword_dict) # Cast the keyword to a Keyword object\n twitter_results = self.crawler.search(keyword, limit=self.limit_requests) # Run the search\n self.__save_tweets(twitter_results) # Save all tweets to the DB", "async def bingsearch(self, *, text):\n settings = loadauth()\n operation = 'websearch'\n if settings['apikey'] == '' or settings['apikey'] == 'blank':\n return await self.bot.say(\"Missing or incorrect API key. Please \" +\n \"contact the owner to add an API key.\")\n apikey = settings['apikey']\n text, limit = self.limitget(text)\n result = self.getfrombing(apikey, text, limit, operation)\n bottext = self.obtainresult(result, operation)\n return await self.bot.say(bottext)", "def set_options(self, options_list):\n self._result_limit = options_list['result_limit'].get_value()", "def get_tweets(self, query, count=10):\n # empty list to store parsed tweets\n tweets = []\n\n try:\n # call twitter api to fetch tweets\n fetched_tweets = self.api.search(q=query, count=count)\n\n # parsing tweets one by one\n for tweet in fetched_tweets:\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.text\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n\n # appending parsed tweet to tweets list\n if tweet.retweet_count > 0:\n # if tweet has retweets, ensure that it is appended only once\n if parsed_tweet not in tweets:\n tweets.append(parsed_tweet)\n else:\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets\n\n except tweepy.TweepError as e:\n # print error (if any)\n print(\"Error : \" + str(e))", "def set_limit(self, errors):\n self.limit = errors", "def set_artist(self, artist: str) -> None:\n self.artist = artist\n # Rebuild the song's search query to include the artist defined.\n self.query_accuracy = 0\n self.__generate_search_query()", "def _add_better_search_words(self):\n for kw in self.better_search_kw:\n self.search_query += kw", "def test_n_results_less_than_10(self):\n wk = os.path.dirname(os.path.abspath(__file__))\n f = os.path.join(wk, \"search_tweets.config\")\n thing = SearchTweets(self.db, f)\n with patch.object(thing, '_SearchTweets__twitter_n_results', new_callable=PropertyMock(return_value=9)):\n with patch.object(thing, '_SearchTweets__twitter_all_tweets',\n new_callable=PropertyMock(return_value=False)):\n with patch.object(thing, '_SearchTweets__multi_user',\n new_callable=PropertyMock(return_value=False)):\n with patch.object(thing, '_SearchTweets__twitter_users',\n new_callable=PropertyMock(return_value=[])):\n with patch.object(thing, '_SearchTweets__twitter_keyword',\n new_callable=PropertyMock(return_value=\"Eurovision\")):\n with patch.object(thing, '_SearchTweets__save'):\n\n thing.search()\n\n self.assertEqual(thing.total_result, 10)", "def limit_path_query(self, limit_path_query):\n\n self._limit_path_query = limit_path_query", "def testMaxResult(self):\n \"\"\" In this case we are asking for 520 tweets, and we return as first result 500 tweets \"\"\"\n \"\"\" we check that er make exactly 2 requests. \"\"\"\n\n response1 = {'meta': {'result_count': 500, 'next_token': 1}}\n response2 = {'meta': {'result_count': 20, 'next_token': 2}}\n wk = os.path.dirname(os.path.abspath(__file__))\n f = os.path.join(wk, \"search_tweets.config\")\n thing = SearchTweets(self.db, f)\n with patch.object(thing, '_SearchTweets__twitter_n_results', new_callable=PropertyMock(return_value=520)):\n with patch.object(thing, '_SearchTweets__connect_to_endpoint') as mock_method:\n with patch.object(thing, '_SearchTweets__multi_user', new_callable=PropertyMock(return_value=False)):\n with patch.object(thing, '_SearchTweets__twitter_users',\n new_callable=PropertyMock(return_value=[])):\n with patch.object(thing, '_SearchTweets__twitter_keyword',\n new_callable=PropertyMock(return_value=\"Eurovision\")):\n with patch.object(thing, '_SearchTweets__save'):\n\n mock_method.side_effect = [response1, response2]\n thing.search()\n\n self.assertEqual(mock_method.call_count, 2)", "def search_text(self, search_text):\n\n self._search_text = search_text", "def getTweets(self, query, start, end):\n gettweets = Twitter.GetTweets(self.rootpath, self.folderpath,\n start, end, query)\n gettweets.start_getTweets()", "def set_parameters(self, population_size=40, num_tests=5, num_searches=5, num_enabled=17, bonus1=10, bonus2=1,\n **kwargs):\n kwargs.pop('num_searches_best', None)\n super().set_parameters(num_searches_best=0, local_searches=(mts_ls1v1, mts_ls2), **kwargs)", "def searchbar_changed(data):\n print('searching for ' + data['query'])\n if data['query'] != '':\n options = queue.instantiate_options()\n query = data['query'].replace(' ', '+')\n response = get_request(search_uri + query)\n songs = []\n is_explicit_list = []\n\n for track_obj in response.json()['tracks']['items']:\n song_obj, is_explicit = create_song(track_obj, return_is_explicit=True)\n songs.append(song_obj)\n is_explicit_list.append(is_explicit)\n \n if options['safe_mode'] == 'true':\n temp_songs = []\n for i in range(len(songs)):\n if not is_explicit_list[i]:\n temp_songs.append(songs[i])\n songs = temp_songs\n\n if len(songs) > 5:\n songs = songs[:5]\n\n serialized_songs = [song.to_dict() for song in songs]\n emit('suggestions_changed', serialized_songs)", "def limit(self, count):\n self._limit = count\n return self", "def test_search_result_limit(self):\n results = self.searcher.search(\"crossfit\", 1)\n expected_results = 6\n\n self.assertEqual(results[0].indexable.docid, expected_results)", "def getTweets(user,maxTweets=3000,count=0,tweetId=0,cacheKey=False,credentials=False):\n api = ratedTwitter(credentials=credentials)\n limit = api.get_user_timeline_limited()\n if limit:\n print '*** TWITTER RATE-LIMITED: statuses.user_timeline:'+user+':'+str(count)+' ***'\n raise getTweets.retry(countdown = limit)\n else:\n args = {'screen_name':user,'exclude_replies':False,'include_rts':True,'trim_user':False,'count':200}\n if tweetId:\n args['max_id'] = tweetId\n \n okay, result = api.get_user_timeline(**args)\n \n if okay:\n print '*** TWITTER USER_TIMELINE: '+user+':'+str(tweetId)+' ***'\n if result:\n newCount = count + len(result)\n if maxTweets:\n if newCount > maxTweets: # No need for the task to call itself again.\n pushTweets.delay(result,user,cacheKey=cacheKey) # Give pushTweets the cache-key to end the job.\n return\n else:\n pushTweets.delay(result,user)\n\n newTweetId = min([t['id'] for t in result]) - 1 \n # Not done yet, the task calls itself with an updated count and tweetId.\n getTweets.delay(user,maxTweets=maxTweets,count=newCount,tweetId=newTweetId,cacheKey=cacheKey,credentials=credentials)\n else:\n pushTweets.delay([],user,cacheKey=cacheKey) # Nothing more found, so tell pushTweets the job is done.\n else:\n if result == '404':\n setUserDefunct(user)\n cache.set('scrape_tweets','done')\n if result == 'limited':\n raise getTweets.retry(countdown = api.get_user_timeline_limited())", "def __generate_search_query(self) -> None:\n if self.query_accuracy < 100:\n if self.title is not None and self.title != '' and self.artist is not None and self.artist != '':\n # Use the title and the artist name to find more information about the song.\n query: str = self.title + ' ' + self.artist\n query = re.sub(self.__get_filter_regex(), '', query)\n self.query = query\n # Remove unnecessary information in order to get a simpler query version.\n self.minimal_query = re.sub(r'\\([\\s\\S]+\\)', '', query).strip()\n self.query_accuracy = 100\n return\n if self.query_accuracy < 50:\n # No title nor artist name available, use the filename as search query.\n filename: str = os.path.basename(self.original_path)\n filename = os.path.splitext(filename)[0]\n query: str = filename.lower()\n query = re.sub(self.__get_filter_regex(), '', query)\n query = query.replace('_', ' ')\n query = query.strip()\n self.query = query\n self.minimal_query = re.sub(r'\\([\\s\\S]+\\)', '', query).strip()\n self.query_accuracy = 50", "def set_timelimit(self, timelimit):\n self._timelimit = timelimit", "def __init__(self, userName = None, password = None, userAgent = None):\n\n self.limit = 50\n self.userName = userName\n self.password = password\n self.userAgent = userAgent", "def limit(self, limit):\n\n # Return between 1 and 250 results, defaults to 10\n return max(1, min(250, int(limit) if limit else 10))", "def search_boost(self, search_boost):\n\n self._search_boost = search_boost", "def suggest(self, name, query, count=SUGGESTION_COUNT, params=None):\n url = f\"{self.suggestions_url}/suggestions/api/4_1/rs/suggest/{name}\"\n data = {\"query\": query, \"count\": count}\n if params:\n data.update(params)\n response = self._post(url, data)\n return response[\"suggestions\"]", "def search(self, query):", "def search(self, q):\n self.__query = q\n self.scrape_page()", "def search_result(q, since_id=None, max_id=None):\n client = TwitterClient()\n url = (\"https://api.twitter.com/1.1/search/tweets.json?count=100&q=%s\" %\n quote(q, safe=''))\n if since_id:\n url += \"&since_id=%s\" % since_id\n if max_id:\n url += \"&max_id=%s\" % max_id\n resp = client.fetch(url)\n\n statuses = resp[\"statuses\"]\n\n if len(statuses) > 0:\n new_max_id = int(statuses[-1][\"id_str\"]) + 1\n else:\n new_max_id = max_id\n\n if max_id == new_max_id:\n logging.info(\"no new tweets with id < %s\", max_id)\n return [], max_id\n\n return statuses, new_max_id", "def limit(self, limit):\n self._evaluated = False\n self._limit = limit\n return self", "def modify_search_settings(self):\n want_to_exit = False\n while want_to_exit == False:\n\n print('_____ Current Settings _____\\n'\n ' good_word_tolerance = %d\\n' % self.bot_squad[0].good_word_tolerance,\n 'bad_word_tolerance = %d\\n' % self.bot_squad[0].bad_word_tolerance,\n 'min_years_exp = %d\\n' % self.bot_squad[0].min_years_exp,\n 'min_str_len = %d\\n' % self.bot_squad[0].min_str_len,\n 'page_limit = %d\\n' % self.bot_squad[0].page_limit,)\n\n for bot in self.bot_squad:\n print(' %s is seeded with URL:' % bot.name)\n print(' %s\\n' % bot.base_url)\n\n print('Choose parameter to modify:\\n'\n '____________________________________\\n'\n ' 1-good_word_tolerance | q-Quit\\n'\n ' 2-bad_word_tolerance | w-Seed URLs\\n'\n ' 3-min_years_exp | e-Site Toggles\\n'\n ' 4-min_str_len | r-Filter Tuning\\n'\n ' 5-page_limit |\\n'\n '_______________ Input ______________\\n')\n my_input = input()\n\n if my_input == '1':\n print('Input integer:\\n')\n parameter_input = input()\n if not is_integer(parameter_input):\n print('Invalid input\\n'\n 'returning to main menu')\n return\n else:\n f = open('trunk/filters/good_word_tolerance.txt', 'w')\n f.write(parameter_input)\n f.close()\n print('good_word_tolerance changed to %d\\n' % int(parameter_input))\n print('restart program to take effect')\n continue\n\n if my_input == '2':\n print('Input integer:\\n')\n parameter_input = input()\n if not is_integer(parameter_input):\n print('Invalid input\\n'\n 'returning to main menu')\n return\n else:\n f = open('trunk/filters/bad_word_tolerance.txt', 'w')\n f.write(parameter_input)\n f.close()\n print('bad_word_tolerance changed to %d\\n' % int(parameter_input))\n print('restart program to take effect')\n continue\n\n if my_input == '3':\n print('Input integer:\\n')\n parameter_input = input()\n if not is_integer(parameter_input):\n print('Invalid input\\n'\n 'returning to main menu')\n return\n else:\n f = open('trunk/filters/min_years_exp.txt', 'w')\n f.write(parameter_input)\n f.close()\n print('min_years_exp changed to %d\\n' % int(parameter_input))\n print('restart program to take effect')\n continue\n\n if my_input == '4':\n print('Input integer:\\n')\n parameter_input = input()\n if not is_integer(parameter_input):\n print('Invalid input\\n'\n 'returning to main menu')\n return\n else:\n f = open('trunk/filters/min_str_len.txt', 'w')\n f.write(parameter_input)\n f.close()\n print('min_str_len changed to %d\\n' % int(parameter_input))\n print('restart program to take effect')\n continue\n\n if my_input == '5':\n print('Input integer:\\n')\n parameter_input = input()\n if not is_integer(parameter_input):\n print('Invalid input\\n'\n 'returning to main menu')\n return\n else:\n f = open('trunk/filters/page_limit.txt', 'w')\n f.write(parameter_input)\n f.close()\n print('page_limit changed to %d\\n' % int(parameter_input))\n print('restart program to take effect')\n continue\n\n if my_input == 'q':\n want_to_exit = True\n print('Returning to main menu')\n continue\n\n if my_input == 'w':\n print('Instructions: edit seed URLs directly in the .txt files:\\n'\n ' trunk/branch/indeed_bot.txt\\n'\n ' trunk/branch/monster_bot.tx\\n'\n ' trunk/branch/craigs_bot.tx\\n')\n\n continue\n\n if my_input == 'e':\n print('WIP')\n continue\n\n if my_input == 'r':\n print('Instructions: edit keyword libraries directly in the .txt files:\\n'\n ' trunk/filters/essential_body.txt\\n'\n ' trunk/filters/excluded_body.txt\\n'\n ' trunk/filters/excluded_title.txt\\n')\n return\n\n print('Invalid input\\n')\n\n\n # TODO TODO TODO TODO TODO TODO TODO TODO\n # TODO TODO TODO TODO TODO TODO TODO TODO", "def get_tweets(keyword, max_tweets=200):\n\n # API keys.\n consumer_key = \"kNOG1klRMMUYbsjMuY5TKl4lE\"\n consumer_secret = \"ieghv6WI1qseYly43A0Ra1MPksEw1i5Onma0txfEu5aHantD2v\"\n access_key = \"3291622062-15ssVc0qpJXf2SFXbA7vgfl1Sooz4Ueo2DGPQVz\"\n access_secret = \"9XJuzgGSVLnx93tq6NfRzMT07S6o2lzjmHfjt3VRlkqXn\"\n\n # Initialize tweepy API object and authorize using API key.\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth)\n\n \"\"\" Get tweets.\"\"\"\n\n alltweets = []\n for status in tweepy.Cursor(\n api.search,\n q=keyword + \" -RT\", # the -RT flag excludes retweets.\n count=1000,\n result_type=\"recent\",\n include_entities=True,\n monitor_rate_limit=True,\n wait_on_rate_limit=True,\n lang=\"en\",\n ).items():\n\n # get text of the tweet, encoding as utf-8.\n text = str(status.text.encode(\"utf-8\"))\n\n # add to the data structure, alltweets, holding the tweets.\n alltweets.append(text)\n\n # if we've reached max_tweets, break.\n if len(alltweets) >= max_tweets:\n break\n\n return alltweets", "def get_search_suggestions(Resource=None, SuggestionQuery=None):\n pass", "def crawl(self):\n retrievedTweets = []\n\n count = 1\n \n today = datetime.datetime.now()\n today = today.replace(hour=23, minute=59, second=59, microsecond=999999)\n gap = 1\n yesterday = today - datetime.timedelta(gap) \n nextDay = yesterday + datetime.timedelta(gap)\n \n while True:\n try:\n lst = tweepy.Cursor(self.api.search, lang='en', q=self.keyword, count=50, until=nextDay.date(), result_type='popular').items(50)\n for tweet in lst:\n self.data = [tweet.created_at, tweet.id, tweet.text,\n tweet.user._json['screen_name'], tweet.user._json['name'], \n tweet.favorite_count, tweet.retweet_count, tweet.user.location]\n self.data = tuple(self.data)\n retrievedTweets.append(self.data)\n break\n except tweepy.TweepError as e:\n print(e.reason)\n continue\n except StopIteration: \n break\n\n return retrievedTweets", "def get_tweets_upload_to_bq(users, min_date, max_date, result_limit, key, secret_key, project_id, table_id, **context):\n\n if context.get(\"yesterday_ds\"):\n df = get_users_tweets(users, context['yesterday_ds'], context['yesterday_ds'], result_limit, key, secret_key)\n else: \n df = get_users_tweets(users, min_date, max_date, result_limit, key, secret_key)\n upload_df_to_bq(df, project_id, table_id)\n\n return 'scraped tweets and uploaded to bq'", "def __prepare_query(self, query, stopwords=[], stemming_func=None):\n pass", "def __init__(self):\r\n self.tweets = []\r\n self.lcs = \"outliers\"\r\n self.importance = 0", "def after_search(self):\n self.search_number += 1\n\n if not self.store():\n logger.debug('''\n No results to store for keyword: \"{}\" in search engine: {}\n '''.format(\n self.query,\n self.search_engine_name)\n )\n\n if self.progress_queue:\n self.progress_queue.put(1)\n self.cache_results()", "def post_suggestions(self):\n\n # today is?\n # Note: We use UTC as the basis for our calculations, because\n # the Wikipedia API also returns timestamps as UTC, thus allowing\n # us to correctly post suggestions to new subscribers who saw\n # SuggestBot post to their user talk page earlier.\n now = datetime.utcnow()\n\n # Query to get all regular users of the current language versions\n getRegularsQuery = r\"\"\"SELECT *\n FROM {}\n WHERE lang=%(lang)s\n AND active=1\n AND retired=0\"\"\".format(config.regulars_table)\n\n # Query to update a specific user's status (to processing|idle|ready)\n setStatusQuery = r\"\"\"UPDATE {} SET status=%(status)s\n WHERE lang=%(lang)s\n AND username=%(username)s\"\"\".format(config.regulars_table)\n\n # Query to update a specific user's last recommendation time\n setLastrecQuery = r\"\"\"UPDATE {}\n SET last_rec=%(rectime)s\n WHERE lang=%(lang)s\n AND username=%(username)s\"\"\".format(config.regulars_table)\n\n # Query to get the time of the last suggestion posted\n getLastRecQuery = r\"\"\"SELECT MAX(last_rec) AS last_rec\n FROM {}\n WHERE lang=%(lang)s\n AND active=1\"\"\".format(config.regulars_table)\n\n # query to increment the number of recommendations count\n incRecCountQuery = r'''UPDATE {}\n SET n_recs=n_recs+1\n WHERE lang=%(lang)s\n AND username=%(user)s'''.format(config.regulars_table)\n\n \n # Query to set (or reset) the busy bit in the status info table\n updateStatusTableQuery = r\"\"\"UPDATE {status}\n SET daily_running=%(status)s\n WHERE lang=%(lang)s\"\"\".format(status=config.status_table)\n\n # Query to check the busy bit in the status info table, so that\n # multiple updates don't run at the same time (otherwise we'll get\n # double-posts (how do we know that? we tested it!))\n checkStatusTableQuery = r\"\"\"SELECT daily_running FROM {status}\n WHERE lang=%(lang)s\"\"\".format(status=config.status_table)\n\n # instantiate the database object, and connect\n myDb = db.SuggestBotDatabase()\n # if connection fails, fail too.\n if not myDb.connect():\n logging.error('unable to connect to the SuggestBot database')\n return(False)\n\n (dbconn, dbcursor) = myDb.getConnection()\n\n # Check if a job is already running\n dbcursor.execute(checkStatusTableQuery, {'lang': self._lang})\n row = dbcursor.fetchone()\n dbcursor.fetchall() # flush cursor\n\n if ord(row['daily_running']):\n logging.warning(\"SuggestBot is already posting to users on {0}wiki, exiting!\".format(self._lang))\n return(True)\n\n ## Instantiating bot so we can get suggestions\n sbot = suggestbot.SuggestBot(lang=self._lang)\n \n # Update the status of busyness to pretty busy...\n dbcursor.execute(updateStatusTableQuery, {'status': 1,\n 'lang': self._lang})\n dbconn.commit()\n\n # Figure out how long since we last ran.\n dbcursor.execute(getLastRecQuery, {'lang': self._lang})\n row = dbcursor.fetchone()\n dbcursor.fetchall() # flush cursor\n # Check that we got a row and that it's something...\n if row and row['last_rec']:\n timeSinceLastRun = now - row['last_rec']\n # If tSLR.days < 0, something's not right:\n if timeSinceLastRun.days < 0:\n logging.error(\"Time since last set of recs posted is negative, aborting!\")\n return(False)\n else:\n # We might see this branch the first time we're running...\n timeSinceLastRun = timedelta(0)\n\n # If it's more than one day since we last ran, we don't look\n # into the future, instead we'll just catch up. Otherwise,\n # we look half the distance into the future.\n # FIXME: this will bump people if one run runs a little long,\n # and the user is at the end of the next run. We should instead\n # store the start and end-time of the last run somewhere, perhaps\n # actually have a log, and then use the last start-time from the log.\n lookaheadTime = 0\n if timeSinceLastRun.days == 0:\n lookaheadTime = timeSinceLastRun.seconds / 2\n\n logging.info(\"looking {0} seconds ahead for due recs.\".format(lookaheadTime))\n\n # Store users who should get recs in this list:\n userQueue = list()\n\n dbcursor.execute(getRegularsQuery, {'lang': self._lang})\n done = False\n while not done:\n row = dbcursor.fetchone()\n if not row:\n done = True\n continue\n\n # The values of the row we currently use:\n lastRec = row['last_rec']\n period = row['period']\n username = row['username'].decode('utf-8')\n pagetitle = row['page_title']\n if pagetitle:\n pagetitle = pagetitle.decode('utf-8')\n design = row['design']\n\n recTemplate = config.templates[self._lang]['regulars']\n # If the user has chosen to use a different design from the default,\n # check if we have a template and possibly use that.\n if design:\n try:\n recTemplate = config.templates[self._lang][design]\n except KeyError:\n pass\n\n # If the user wants recs replaced, do so.\n replace = False\n if ord(row['replace_recs']):\n replace = True\n\n # FIXME: better to use the Subscriber object now, since it is\n # here and has slots for all the variables. Makes more sense.\n\n # if lastRec is None (NULL), they didn't receive any recs earlier,\n # which means it's definitely time to post.\n if not lastRec:\n ## print('lastRec is None/False, adding user')\n userQueue.append({'username': username,\n 'page': pagetitle,\n 'replace': replace,\n 'template': recTemplate,\n })\n continue\n\n # Use last rec and period to check if it's time to post or not\n if period == 0:\n # Add 28 days to last rec. This is stricly not always\n # \"once a month\", but it's a lot easier than trying to\n # handle overflow when the last recommendation occurred near\n # the end of the previous month (e.g. Jan to Feb). It also\n # has the added feature that recommendations usually happen on\n # the same day of the week.\n modLastRec = lastRec + timedelta(days=28)\n else:\n # add 'period' days to last rec\n modLastRec = lastRec + timedelta(days=period)\n\n # subtract the modified last rec from today\n timelapse = now - modLastRec\n\n # It's time to post recommendations if we're past this user's due\n # date, or if it's less than lookaheadTime seconds ahead.\n # This makes sure that we don't always bump users to the\n # next day's recommendations, which would otherwise mean\n # we'd consistently post a day late.\n if timelapse.days >= 0 \\\n or (timelapse.days == -1 and (86400 - timelapse.seconds) < lookaheadTime):\n # add {'username':username, 'page':page_title} to list\n userQueue.append({'username': username,\n 'page': pagetitle,\n 'replace': replace,\n 'template': recTemplate,\n })\n logging.info(\"Checked subscribers, found {n} users to post to.\".format(\n n=len(userQueue)))\n\n # (We shuffle the user list so it doesn't necessarily get processed in\n # alphabetical order, IIRC the results of this SELECT is in sorted\n # order because we use a primary key)\n if len(userQueue) > 0:\n shuffle(userQueue)\n\n # for each user on said list...\n for user in userQueue:\n # update database to processing\n dbcursor.execute(setStatusQuery,\n {'status': 'processing',\n 'lang': self._lang,\n 'username': user['username'].encode('utf-8')})\n dbconn.commit()\n\n logging.info(\"now getting recs for User:{username}\".format(\n username=user['username']))\n\n # Get recommendations and post...\n # Design and template is passed along based on what we looked\n # up earlier.\n success = sbot.recommend(username=user['username'],\n userGroup='suggest',\n filterMinor=True,\n filterReverts=True,\n page=user['page'],\n recTemplate=user['template'],\n replace=user['replace'])\n if success:\n # update database to idle, and update last_rec\n dbcursor.execute(setStatusQuery,\n {'status': 'idle',\n 'lang': self._lang,\n 'username': user['username'].encode('utf-8')})\n\n # we don't update the rec time on a test run...\n if not config.testrun:\n # Note: we call utcnow() to store the closest last recommendation\n # time in the database. If some slack is needed with regards to\n # posting time, we can instead alter the scheduling.\n dbcursor.execute(setLastrecQuery,\n {'rectime': datetime.utcnow(),\n 'lang': self._lang,\n 'username': user['username'].encode('utf-8')})\n # update count of number of recommendations for this user\n dbcursor.execute(incRecCountQuery,\n {'lang': self._lang,\n 'user': user['username'].encode('utf-8')})\n \n dbconn.commit()\n logging.info(\"Posted recs to User:{username}\".format(\n username=user['username']))\n\n # Update the status of busyness to pretty unbusy...\n dbcursor.execute(updateStatusTableQuery, {'status': 0,\n 'lang': self._lang})\n dbconn.commit()\n\n # disconnect from database\n myDb.disconnect()\n\n # ok, done\n return", "def limit(self, amount):\n self._limit = amount\n return self", "def search(request):\n\t\n\t# User's query\n\tquery = request.GET.get('query')\n\n\t# Search for 50 most popular tweets about user's query\n\ttweets = tweepy.Cursor(api.search, q=query, lang=\"en\", tweet_mode='extended', include_entities=True, result_type='popular').items(50)\n\n\t# Search for 20 most relevant news about user's query\n\tall_news = newsapi.get_everything(q=query, language='en', sort_by='relevancy')\n\n\t# Search for 25 hottest subreddits about user's query\n\tsubreddit = reddit.subreddit('all')\n\treddit_news = subreddit.search(query, limit=25, sort='hot')\n\n\tcontext = {\n\t\t\"tweets\": tweets, # most popular tweets\n\t\t\"all_news\": all_news, # most relevant google news\n\t\t\"reddit_news\": reddit_news # hottest subreddits\n\t}\n\n\treturn render(request, 'hashtrend/search.html', context)", "def __init__(self, con, freq_dict):\n\n self.target_con = con\n self.freq_dict = freq_dict\n self.max_results = 100\n\n self.get_records_sql = \"\"\"\n SELECT * FROM fts_target WHERE fts_target MATCH '{}' limit {};\n \"\"\"", "def search_from_raw_query(api, raw_query, **kwargs):\n tweets=api.GetSearch(raw_query=raw_query)\n return {\"tweets\":tweets}", "def search_users(self, filter=\"\", maxCount=\"\"):\n params = {\n \"f\" : \"json\",\n \"filter\" : filter,\n \"maxCount\" : maxCount\n }\n uURL = self._url + \"/users/search\"\n return self._con.post(path=uURL, postdata=params)", "def search(self, query, limit = 5000,\r\n weighting = None,\r\n sortedby = None, reverse = False):\r\n \r\n doc_reader = self.doc_reader\r\n \r\n t = time.time()\r\n if sortedby is not None:\r\n if isinstance(sortedby, basestring):\r\n sortedby = scoring.FieldSorter(sortedby)\r\n elif isinstance(sortedby, (list, tuple)):\r\n sortedby = scoring.MultiFieldSorter(sortedby)\r\n elif callable(sortedby):\r\n sortedby = sortedby()\r\n \r\n scored_list = sortedby.order(self, query.docs(self), reverse = reverse)\r\n scores = None\r\n docvector = BitVector(doc_reader.doc_count_all(),\r\n source = scored_list)\r\n if len(scored_list) > limit:\r\n scored_list = list(scored_list)[:limit]\r\n else:\r\n # Sort by scores\r\n topdocs = TopDocs(limit, doc_reader.doc_count_all())\r\n topdocs.add_all(query.doc_scores(self, weighting = weighting or self.weighting))\r\n \r\n best = topdocs.best()\r\n if best:\r\n # topdocs.best() returns a list like\r\n # [(docnum, score), (docnum, score), ... ]\r\n # This unpacks that into two lists: docnums and scores\r\n scored_list, scores = zip(*topdocs.best())\r\n else:\r\n scored_list = []\r\n scores = []\r\n \r\n docvector = topdocs.docs\r\n t = time.time() - t\r\n \r\n return Results(self,\r\n query,\r\n scored_list,\r\n docvector,\r\n runtime = t,\r\n scores = scores)", "def __init__(self, twitter_consumer_key, twitter_consumer_secret,\n twitter_access_key, twitter_access_secret,\n search_terms, search_on='news',\n bitly_access_token='',\n news_api_key=''):\n\n # Access Keys and Secrets for Twitter API obtained at: https://developer.twitter.com/\n auth = tweepy.OAuthHandler(twitter_consumer_key, twitter_consumer_secret)\n auth.set_access_token(twitter_access_key, twitter_access_secret)\n\n # Store API object for access to Twitter REST API\n self.__api = tweepy.API(auth)\n\n # Term(s) to search news feeds or Twitter on\n self.search_terms = search_terms\n\n # Method TwitterBot will use to search on. Current options are 'news' or 'twitter'\n self.search_on = search_on\n\n # Access token for optional Bitly API: https://dev.bitly.com/\n self.__bitly_access_token = bitly_access_token\n\n # Access token for optional News API: https://newsapi.org/\n self.__news_api_key = news_api_key\n\n # Will store list of items scraped from news or Twitter\n self.list = []", "def set_query_string(self):\n\n if self.search_by == 'by-postal-code':\n self.querystring = {'postalCode': self.search_input, 'countryCode': \"US\"}\n else :\n self.querystring = {'city': self.search_input}", "def check_rate_limit(session, provided_iocs):\n rate_limit = session.rate_limit_status()[\"resources\"][\"search\"][\"/search/tweets\"]\n\n if rate_limit[\"remaining\"] == 0:\n reset_time = rate_limit[\"reset\"]\n rate_limit[\"reset\"] = time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(reset_time))\n return rate_limit\n\n if len(provided_iocs) > rate_limit[\"remaining\"]:\n rate_limit = {\"Search term limit\": rate_limit[\"remaining\"],\n \"Total Search Terms Provided\": len(provided_iocs)}\n return rate_limit\n return", "def __init__(self, *args, **kwargs):\n self.es_conn = Elasticsearch(ELASTICSEARCH_CONN)\n self.size = kwargs.get(\"size\", 10)\n self.from_ = int(kwargs.get(\"from\", 0))\n to_limit = kwargs.get(\"to\")\n if to_limit:\n self.size = int(to_limit) - self.from_\n self.q_dict = kwargs.get(\"query\", {})\n self.fields = kwargs.get(\"fields\", None)\n #configuration to list all keys allowed for package model\n self.es_query_keys = kwargs.get(\"ES_QUERY_KEYS\", list())\n #configuration to list date type keys in package model\n self.es_date_keys = kwargs.get(\"ES_DATE_KEYS\", list())\n self.sort = kwargs.get('sort', \"_score:desc\")", "def limit_package_query(self, limit_package_query):\n\n self._limit_package_query = limit_package_query", "def get_query(self, q, request):\r\n \r\n return self.model.objects.filter(filter__icontains=q).order_by('filter')[:50]", "def __init__(self, api, geo, query):\n self.api = api\n # self.db = db\n self.geo = geo\n self.query = query\n\n # API rate call limit.\n self.limit = 100", "def setRatingFilter(self, min = 0, max = 100):\n self._updateMovieList = self._updateMovieList or self._filterRatingMin != min or self._filterRatingMax != max\n self._filterRatingMin, self._filterRatingMax = min, max", "def setRatingFilter(self, min = 0, max = 100):\n self._updateMovieList = self._updateMovieList or self._filterRatingMin != min or self._filterRatingMax != max\n self._filterRatingMin, self._filterRatingMax = min, max", "def setRatingFilter(self, min = 0, max = 100):\n self._updateMovieList = self._updateMovieList or self._filterRatingMin != min or self._filterRatingMax != max\n self._filterRatingMin, self._filterRatingMax = min, max", "def streamTweets(words = [], authors = [], timeLimit=120, removeRetweets=False, **kwargs):\n if 'stream' not in globals():\n global stream\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n listener = StdOutListener(removeRetweets=removeRetweets)\n auth = api.auth\n stream = tweepy.Stream(auth, listener, tweet_mode='extended')\n else:\n stream.listener.setRemoveRetweets(removeRetweets)\n stream.listener.resetTweets()\n\n words = mapToValid(words)\n authors = mapToValid(authors)\n if not words and not authors:\n words=[\"the\", \"i\", \"to\", \"a\", \"and\", \"'s\", \"is\", \"in\", \"it\", \"you\", \"of\", \"for\", \"on\", \"my\", \"that\", \"e\", \"with\", \"me\", \"do\", \"have\", \"ciao\", \"o\", \"u\", \"cool\", \"good\", \"nice\", \"#\", \"*\", \":\", \";\", \",\", \".\", \"?\", \"-\", \"%\", \"$\", \"€\", \"!\", \"(\", \")\", \"=\", \"'\"]\n\n #myQuery = ' OR '.join(kwargs[\"words\"])\n if authors:\n kwargs[\"follow\"]=[user.id_str for user in list(map(api.get_user,authors))]\n else:\n kwargs[\"track\"]=words\n #if removeRetweets:\n # myQuery += \" -filter:retweets\"\n\n #myQuery += ' from:'\n #myQuery += ' OR from:'.join(kwargs[\"authors\"])\n #print(myQuery)\n import signal\n # Register the signal function handler\n signal.signal(signal.SIGALRM, __streamHandler__)\n # Define a timeout for your function\n signal.alarm(timeLimit)\n try:\n __stream__(stream,**kwargs)\n except Exception:\n print(\"Streaming over after time period of\", timeLimit, \"seconds... Retrieved\", len(stream.listener.getTweets()), \"tweets.\")\n stream.disconnect()\n if authors and words:\n print(\"Filtering out tweets that don't contain the specified words...\")\n myTweets=[]\n for tweet in stream.listener.getTweets():\n if 'full_text' in tweet:\n tweet['text'] = tweet['full_text']\n del (tweet['full_text'])\n if any(containsWord(tweet['text'],word) for word in words):\n myTweets.append(tweet)\n print(\"Done. Retrieved\", len(myTweets), \"tweets written by the authors specified and containing (any of) the words specified.\")\n return myTweets\n return stream.listener.getTweets()", "def fetch_tweets(self, screen_name, count):\n return {}", "def setmaxsize(self, maxsize):\n self.maxsize = maxsize", "def __init__(self, query_set, per_page_limit, optional_count_query_set=None,\n allow_empty_first_page=True):\n self.query_set = query_set\n self.per_page_limit = per_page_limit\n self.optional_count_query_set = optional_count_query_set\n self.allow_empty_first_page = allow_empty_first_page\n self.__total_pages = self.__count = None\n self.__iter_page = 1", "def get_tweets_count_times(twitter, count, query=None):\n # get id to start from\n oldest_id, newest_id = _get_oldest_id(query=query)\n newest_id = newest_id or oldest_id\n\n all_tweets = []\n i = 0\n while i < count:\n i += 1\n # use search api to request 100 tweets. Twitter returns the most recent (max_id) first\n if oldest_id <= newest_id:\n tweets = get_tweets(query=query, max_id=oldest_id - 1, count=TWEETS_PER_SEARCH, twitter=twitter)\n else:\n tweets = get_tweets(query=query, max_id=oldest_id - 1, since_id=newest_id, count=TWEETS_PER_SEARCH, twitter=twitter)\n rate_limit_remaining = twitter.get_lastfunction_header('x-rate-limit-remaining')\n rate_limit_reset = twitter.get_lastfunction_header('x-rate-limit-reset')\n\n if not len(tweets):\n # not rate limitted, just no tweets returned by query\n oldest_id = oldest_id + ((newest_id or oldest_id) - oldest_id + 1) * 10000\n break\n elif isinstance(tweets, dict):\n # rate limit hit, or other twython response error\n print(tweets)\n break\n\n all_tweets.extend(tweets)\n\n # determine new oldest id\n tweet_ids = {t['id'] for t in tweets}\n if oldest_id:\n tweet_ids.add(oldest_id)\n oldest_id, newest_id = min(tweet_ids), max(tweet_ids)\n if rate_limit_remaining == 1:\n time.sleep(rate_limit_reset - time.time())\n\n save_tweets(all_tweets, query=query)\n\n # set id to start from for next time\n _set_oldest_id(oldest_id, newest_id, query=query)\n\n if len(all_tweets) == 0:\n os.remove(make_oldest_id_path(query))\n\n return len(all_tweets), twitter.get_lastfunction_header('x-rate-limit-remaining')" ]
[ "0.6628925", "0.6010755", "0.58798695", "0.5672977", "0.56503266", "0.56472254", "0.55878645", "0.557733", "0.5573124", "0.55702484", "0.54949117", "0.5455376", "0.5433694", "0.5421352", "0.5407392", "0.5406379", "0.53991216", "0.53986245", "0.536909", "0.53584504", "0.5355301", "0.5340513", "0.5337286", "0.5337286", "0.5337286", "0.53333443", "0.53106385", "0.53098434", "0.52820116", "0.5261518", "0.5255397", "0.524949", "0.52471066", "0.5227934", "0.52244544", "0.52090055", "0.52003515", "0.51827574", "0.517412", "0.51712316", "0.5168555", "0.5154198", "0.5117352", "0.5107253", "0.51047695", "0.510137", "0.5096175", "0.5093544", "0.5075898", "0.5054047", "0.5041463", "0.5037245", "0.5037128", "0.50356567", "0.5005901", "0.5005536", "0.49952003", "0.49858886", "0.49842566", "0.497508", "0.49681547", "0.49610975", "0.49587524", "0.4957202", "0.49531987", "0.49514607", "0.49478078", "0.4930078", "0.49024045", "0.48998886", "0.48910993", "0.4872186", "0.4871754", "0.4862215", "0.48610124", "0.48524317", "0.4840898", "0.48365286", "0.48302728", "0.48261574", "0.48241138", "0.48202214", "0.4818253", "0.48182163", "0.4812063", "0.48112735", "0.48112112", "0.48025018", "0.47991928", "0.47987425", "0.47954115", "0.47937876", "0.47936732", "0.47936732", "0.47936732", "0.47810632", "0.47797933", "0.4779541", "0.47759584", "0.47705367" ]
0.5829757
3
Downloads all Tweets since from_date for a Query and saves them into txt File (in append mode)
def premium_download_save_tweets(self, file_name, max_results=100): tweets = collect_results(self.rule, max_results=max_results, result_stream_args=self.premium_search_args) # save all tweets into specified file_name with open(file_name, 'a+') as f: for i, tweet in enumerate(tweets): if i % 100 == 0: print('write tweet %s to %s' % (i, file_name)) tw = self.get_tweet_attributes(tweet) f.write(jsonpickle.encode(tw, unpicklable=False) + '\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text", "def getTweets(self, query, start, end):\n gettweets = Twitter.GetTweets(self.rootpath, self.folderpath,\n start, end, query)\n gettweets.start_getTweets()", "def extract_tweets(secret: str, query: str, outfile: str, count: int = 0, wait: int = 300) -> None:\n logger = logging.getLogger(\"extracter\")\n logger.info(\"Authenticating with Tweepy\")\n\n logger.info(\"Reading secrets file %s\", secret)\n token_fp = open(secret, \"r\")\n auth = tweepy.OAuthHandler(token_fp.readline().strip(), token_fp.readline().strip())\n auth.set_access_token(token_fp.readline().strip(), token_fp.readline().strip())\n api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n token_fp.close()\n\n logger.info(\"Attempting to authenticate\")\n api.verify_credentials()\n\n logger.info(\"Authenticated! Examining outfile.\")\n if not os.path.exists(outfile):\n logger.info(\"%s doesn't exist - it will be created.\", outfile)\n file_p = open(outfile, \"w\", encoding=\"utf-8\")\n tweet_writer = csv.writer(file_p)\n tweet_writer.writerow(\n [\n \"full_text\",\n \"created_at\",\n \"source\",\n \"id\",\n \"retweet_count\",\n \"favorite_count\",\n \"user_name\",\n \"user_id_str\",\n \"user_handle\",\n \"user_location\",\n \"user_desc\",\n \"user_protected\",\n \"user_followers\",\n \"user_created\",\n \"user_verified\",\n \"user_tweet_count\",\n ]\n )\n else:\n logger.info(\"%s exists - will append.\", outfile)\n file_p = open(outfile, \"a\", encoding=\"utf-8\")\n tweet_writer = csv.writer(file_p)\n\n logger.info(\"Starting Tweet extraction for query '%s'\", query)\n\n if not count:\n logger.info(\"(executing forever)\")\n else:\n logger.info(\"(executing %s times)\", count)\n\n i = 1\n bookmark = \"1\"\n\n while True:\n # Our search query.\n #\n # q - search query. We use the -filter:retweets\n # specifier in order to prune any retweets.\n # Otherwise we'd have to prune Tweets that\n # are prefaced with 'RT'\n #\n # lang - English Tweets only\n #\n # count - 100 is the max as per the Twitter API\n #\n # tweet_mode - we use extended tweet mode in\n # order to access Tweets that are greater\n # than 140 char. in length this is to keep\n # legacy Twitter API applications intact\n #\n # result_type - we use recent so as to create\n # a chronological record of Tweets\n #\n # since_id - we keep track of the last Tweet\n # saved and use it as a bookmark in order\n # to only get the Tweets coming after it\n #\n for tweet in api.search(\n q=f\"{query} -filter:retweets\",\n lang=\"en\",\n count=100,\n tweet_mode=\"extended\",\n result_type=\"recent\",\n max_id=bookmark,\n ):\n # These are the features we write\n tweet_writer.writerow(\n [\n tweet.full_text,\n tweet.created_at,\n tweet.source,\n tweet.id_str,\n tweet.retweet_count,\n tweet.favorite_count,\n tweet.user.name,\n tweet.user.id_str,\n tweet.user.screen_name,\n tweet.user.location,\n tweet.user.description,\n tweet.user.protected,\n tweet.user.followers_count,\n tweet.user.created_at,\n tweet.user.verified,\n tweet.user.statuses_count,\n ]\n )\n\n # Flush the stream every time just in case\n file_p.flush()\n\n # Set the most recent Tweet as a bookmark\n bookmark = tweet.id_str\n\n # Transparency/monitoring\n limits = api.rate_limit_status()\n rem = limits[\"resources\"][\"application\"][\"/application/rate_limit_status\"][\"remaining\"]\n logger.info(\"Tweets written to %s (%s hourly API accesses left)\", outfile, rem)\n\n # Do not loop if demo\n if i == count:\n break\n i += 1\n\n # Respect API\n time.sleep(wait)", "def collect_twitter_sentiment():\r\n # Open/create a file to append data to\r\n csvFile = open(NAME+'_posts.csv', 'a')\r\n # Use csv writer\r\n csvWriter = csv.writer(csvFile)\r\n # Calling the user function with current parameters\r\n results = twitter.user_timeline(id=NAME, count=TWEET_COUNT)\r\n for tweet in results:\r\n print(tweet.created_at, tweet.text)\r\n csvWriter.writerow([tweet.created_at, tweet.text.encode('utf-8')])\r\n return csvFile", "def get_tweets(self):\n keyword = 'covid'\n\n # Load tokens from file\n with open('../data/tokens.json', 'r') as f:\n tokens = json.load(f)\n\n # Stream tweets\n auth = tweepy.OAuthHandler(tokens['consumer_key'], tokens['consumer_secret'])\n auth.set_access_token(tokens['access_token_key'], tokens['access_token_secret'])\n api = tweepy.API(auth)\n\n # listen for tweets\n while True:\n\n # TODO: save file in Cloud Storage\n file_name = date.today().strftime('corpus-%d-%m-%Y.json')\n print(f'Updating {file_name} ...')\n\n StreamListener = StreamListener(\n file_name=file_name, \n max_tweets=1000)\n myStream = tweepy.Stream(\n auth=api.auth, \n listener=StreamListener)\n\n myStream.filter(track=[keyword], languages=['en'])\n \n time.sleep(60)", "def crawl(self):\n retrievedTweets = []\n\n count = 1\n \n today = datetime.datetime.now()\n today = today.replace(hour=23, minute=59, second=59, microsecond=999999)\n gap = 1\n yesterday = today - datetime.timedelta(gap) \n nextDay = yesterday + datetime.timedelta(gap)\n \n while True:\n try:\n lst = tweepy.Cursor(self.api.search, lang='en', q=self.keyword, count=50, until=nextDay.date(), result_type='popular').items(50)\n for tweet in lst:\n self.data = [tweet.created_at, tweet.id, tweet.text,\n tweet.user._json['screen_name'], tweet.user._json['name'], \n tweet.favorite_count, tweet.retweet_count, tweet.user.location]\n self.data = tuple(self.data)\n retrievedTweets.append(self.data)\n break\n except tweepy.TweepError as e:\n print(e.reason)\n continue\n except StopIteration: \n break\n\n return retrievedTweets", "def getTwitterscraperTweets():\n import subprocess\n numOfAuthors = len(authors)\n numOfWords = len(words)\n callVars = ['./recoverTweets.sh',str(numOfWords),str(numOfAuthors)]\n callVars.extend([word for word in words]+[author for author in authors])\n if startingDate:\n callVars.extend(['-sd',startingDate])\n if endingDate:\n callVars.extend(['-ed',endingDate])\n #if maxTweets:\n # callVars.extend(['-max',str(maxTweets)])\n callVars.append(\"data/twitterscrapertmp\")\n print(\"Querying twitterAPI by using TwitterScraper... (it may take a long time)\")\n subprocess.call(callVars)\n with open('data/twitterscrapertmp') as json_data:\n tweets = json.load(json_data)\n if removeRetweets:\n tweets = [tweet for tweet in tweets if not isRetweet(tweet)]\n print(\"Query ended. Retrieved: \",len(tweets),\" tweets\")\n #saveTweets(tweets,outputCollection,onFile=True,onDb=True)\n os.remove('data/twitterscrapertmp')\n return tweets", "def get_all_tweets(user, alltweets):\n\n #TODO check that user is a valid screen name??\n\n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(user, count=200)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n #print alltweets[0].text\n\n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n #print \"starting loop\"\n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n\n #all subsiquent requests starting with oldest\n new_tweets = api.user_timeline(user, count=200, max_id=oldest)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1", "def get_all_tweets(screen_name: object):\r\n temptweets = []\r\n alltweets = []\r\n new_tweets = api.user_timeline(screen_name=screen_name, count=199)\r\n alltweets.extend(new_tweets)\r\n print(alltweets[1].id)\r\n oldest = alltweets[-1].id - 1\r\n while 0 < len(new_tweets) < 200:\r\n new_tweets = tweepy.Cursor(api.user_timeline, screen_name=screen_name, count=199, max_id=oldest).items(1500)\r\n alltweets.extend(new_tweets)\r\n for tweet in alltweets:\r\n if (not tweet.retweeted) and ('RT @' not in tweet.text):\r\n temptweets.append(tweet)\r\n oldest = alltweets[-1].id - 1\r\n print(\"Total tweets downloaded from %s are %s\" % (screen_name, len(temptweets)))\r\n return temptweets", "def retrieve_all_tweets(api, id_scr):\n full_tweet_list = []\n new_tweets = api.user_timeline(user_id=id_scr, count=200)\n full_tweet_list.extend(new_tweets)\n oldest = full_tweet_list[-1].id - 1\n\n while len(new_tweets) > 0:\n print \"getting tweets before {}\".format(oldest)\n new_tweets = api.user_timeline(user_id=id_scr, count=200, max_id=oldest)\n full_tweet_list.extend(new_tweets)\n oldest = full_tweet_list[-1].id - 1\n\n out_tweets = [[tweet.id_str, tweet.created_at, tweet.text.encode(\"utf-8\"), tweet.entities] for tweet in\n full_tweet_list]\n\n with open('{}_tweets.csv'.format(id_scr), 'wb') as f:\n writer = csv.writer(f)\n writer.writerow([\"id\", \"created_at\", \"text\", \"entities\"])\n writer.writerows(out_tweets)", "def save_user_tweets(user, n, auth):\r\n t = twitter.Twitter(auth=auth)\r\n print(\"Fetching %i tweets from @%s\" % (n, user))\r\n tweets = t.statuses.user_timeline(screen_name=user, count=n)\r\n print(\" (actually fetched %i)\" % len(tweets))\r\n for tweet in tweets:\r\n save_tweet(tweet, outfile)", "def process_query(api, query):\n last_tweet_id = None if 'LastTweetId' not in query else int(query['LastTweetId']['N'])\n results = api.GetSearch(result_type=\"recent\", term=query['Term']['S'],\n count=25, lang=\"en\", since_id=last_tweet_id)\n new_tweets = []\n if results:\n latest_tweet_id = results[0].id\n for tweet in results:\n if last_tweet_id is not None and tweet.id <= last_tweet_id:\n break\n new_tweets.append(tweet)\n store_tweets(query, new_tweets)\n update_last_tweet(query, latest_tweet_id)\n return len(new_tweets)", "def query_all_tweets(query):\n year = 2006\n month = 3\n\n limits = []\n while date(year=year, month=month, day=1) < date.today():\n nextmonth = month + 1 if month < 12 else 1\n nextyear = year + 1 if nextmonth == 1 else year\n\n limits.append(\n (date(year=year, month=month, day=1),\n date(year=year, month=month, day=10))\n )\n limits.append(\n (date(year=year, month=month, day=10),\n date(year=year, month=month, day=20))\n )\n limits.append(\n (date(year=year, month=month, day=20),\n date(year=nextyear, month=nextmonth, day=1))\n )\n year, month = nextyear, nextmonth\n\n queries = ['{} since:{} until:{}'.format(query, since, until)\n for since, until in reversed(limits)]\n\n pool = Pool(20)\n all_tweets = []\n try:\n for new_tweets in pool.imap_unordered(query_tweets_once, queries):\n all_tweets.extend(new_tweets)\n logging.info(\"Got {} tweets ({} new).\".format(\n len(all_tweets), len(new_tweets)))\n except KeyboardInterrupt:\n logging.info(\"Program interrupted by user. Returning all tweets \"\n \"gathered so far.\")\n\n return sorted(all_tweets)", "def get_tweets(api, listOfTweets, keyword, numOfTweets=20, date_since='2019-1-1', lang=\"en\"):\n spinner = yaspin()\n spinner.start()\n for tweet in tweepy.Cursor(api.search, q=keyword, lang=lang, since=date_since).items(numOfTweets):\n # Add tweets in this format\n dict_ = {'Screen Name': tweet.user.screen_name,\n 'User Name': tweet.user.name,\n 'Tweet Created At': str(tweet.created_at),\n 'Tweet Text': tweet.text,\n 'Cleaned Tweet Text': func.clean_tweets(tweet.text),\n 'User Location': str(tweet.user.location),\n 'Tweet Coordinates': str(tweet.coordinates),\n 'Retweet Count': str(tweet.retweet_count),\n 'Retweeted': str(tweet.retweeted),\n 'Phone Type': str(tweet.source),\n 'Favorite Count': str(tweet.favorite_count),\n 'Favorited': str(tweet.favorited),\n 'Replied': str(tweet.in_reply_to_status_id_str)\n }\n listOfTweets.append(dict_)\n spinner.stop()\n return listOfTweets", "def get_tweets(self, query, count=10):\n # empty list to store parsed tweets\n tweets = []\n\n try:\n # call twitter api to fetch tweets\n fetched_tweets = self.api.search(q=query, count=count)\n\n # parsing tweets one by one\n for tweet in fetched_tweets:\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.text\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n\n # appending parsed tweet to tweets list\n if tweet.retweet_count > 0:\n # if tweet has retweets, ensure that it is appended only once\n if parsed_tweet not in tweets:\n tweets.append(parsed_tweet)\n else:\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets\n\n except tweepy.TweepError as e:\n # print error (if any)\n print(\"Error : \" + str(e))", "def get_tweets(self, start_date, end_date):\r\n pass", "def run_queries(q, file): \n data = csv(cd(file)) # modified to point to Data dir.\n seen = set(col(0, data))\n \n for q in reversed(q):\n for t in twitter(q):\n if t.id not in seen:\n data.append((\n t.id,\n t.author,\n t.language,\n t.text,\n t.date,\n t.likes,\n ))\n seen.add(t.id)\n\n data.save()", "def populate_twitter_acct_tweets_by_date():\n api = twitter.Api(**settings.TWITTER_OAUTH, sleep_on_rate_limit=False)\n twitter_accts = CredibleUSTwitterAccount.objects.all()\n\n for acct in twitter_accts:\n results = api.GetSearch(raw_query=\"l=&q=from%3AReutersUS%20since%3A2017-12-01%20until%3A2017-12-02&src=typd\")", "def scrape_all():\n for query in queries:\n for i, tweet in enumerate(sntwitter.TwitterSearchScraper(query + 'lang:en' + 'since:2019-11-06 '\n 'until:2019-12-13').get_items()):\n tweet_data = write_tweet(tweet)\n try:\n all_writer.writerow(tweet_data)\n except UnicodeEncodeError:\n pass", "def get_posts(username):\r\n\r\n # Authenticate to Twitter\r\n auth = tweepy.OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)\r\n auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)\r\n\r\n api = tweepy.API(auth)\r\n\r\n try:\r\n api.verify_credentials()\r\n print(\"Authentication OK\")\r\n except:\r\n print(\"Error during authentication\")\r\n\r\n alltweets=[]\r\n\r\n new_tweets = api.user_timeline(screen_name = username,count=200,tweet_mode='extended')\r\n status = new_tweets[0]\r\n json_str = json.dumps(status._json)\r\n\r\n #convert to string\r\n json_str = json.dumps(status._json)\r\n #deserialise string into python object\r\n parsed = json.loads(json_str)\r\n print(json.dumps(parsed, indent=4, sort_keys=True))\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # save the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n # keep grabbing tweets until there are no tweets left to grab\r\n while len(new_tweets) > 0:\r\n print(f\"getting tweets before {oldest}\")\r\n\r\n # all subsiquent requests use the max_id param to prevent duplicates\r\n new_tweets = api.user_timeline(screen_name=username, count=200, max_id=oldest,tweet_mode='extended')\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # update the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n print(f\"...{len(alltweets)} tweets downloaded so far\")\r\n\r\n\r\n outtweets=[]\r\n\r\n\r\n for item in alltweets:\r\n\r\n mined = {\r\n 'tweet_id': item.id,\r\n 'name': item.user.name,\r\n 'screen_name': item.user.screen_name,\r\n 'retweet_count': item.retweet_count,\r\n 'lang' : item.lang,\r\n 'text': item.full_text,\r\n 'mined_at': datetime.datetime.now(),\r\n 'created_at': item.created_at,\r\n 'favourite_count': item.favorite_count,\r\n 'hashtags': item.entities['hashtags'],\r\n 'status_count': item.user.statuses_count,\r\n 'location': item.place,\r\n 'source_device': item.source\r\n }\r\n\r\n try:\r\n mined['retweet_text'] = item.retweeted_status.full_text # In case the tweet is a RT, there is a need to\r\n # retrieve the retweet_text field which contains the full comment (up to 280 char) accompanying the retweet\r\n except:\r\n mined['retweet_text'] = ''\r\n\r\n outtweets.extend([mined])\r\n\r\n return outtweets", "def get_user_tweets(api, screen_name, output_path):\n logger = logging.getLogger(__name__)\n logger.info('Pulling tweets')\n\n # Create empty list for tweet objects\n tweets = []\n # Pulls users must recent 200 tweets\n new_tweets = api.user_timeline(screen_name=screen_name, count=200)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n\n # Continues to pull tweets 200 at a time until limit is hit\n while len(new_tweets) > 0:\n new_tweets = api.user_timeline(screen_name=screen_name,\n count=200, max_id=oldest)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n\n logger.info(\"...%s tweets downloaded and cleaned\" % (len(tweets)))\n\n # Write all text of tweets to a file\n filename = screen_name + '.csv'\n file = open(join(output_path, filename), 'w')\n\n # Iterates through all tweets and cleans them before outputting\n for tweet in tweets:\n clean_tweet = clean_string(tweet.text)\n line = screen_name + ', ' + clean_tweet + '\\n'\n file.write(line)\n logger.info(\"Done pulling tweets for %s\" % screen_name)\n file.close()", "def DownloadRingtoneDataSince(request, since):\r\n response = HttpResponse(mimetype='text/csv')\r\n response['Content-Disposition'] = 'attachment; filename=ringtones.csv'\r\n\r\n writer = csv.DictWriter(response, models.Ringtone.CSV_FILEDS)\r\n # Hack. Write the header first.\r\n d = {}\r\n for k in models.Ringtone.CSV_FILEDS:\r\n d[k] = k\r\n writer.writerow(d)\r\n if since:\r\n query = models.Ringtone.all().filter('creation_time >= ',\r\n datetime.datetime.strptime(since, \"%Y-%m-%dT%H:%M:%S.%fZ\"))\r\n else:\r\n query = models.Ringtone.all()\r\n for r in query:\r\n writer.writerow(r.DumpToCSVRow())\r\n return response", "def TweetColecting(list_of_kw, startdate, enddate, exclude, outfile):\n\tlist_of_kw = [f'\"{item}\" OR ' for item in list_of_kw]\n\tkeys_to_scrap = [''.join(list_of_kw).strip(\" OR \")]\n\t#print(keys_to_scrap)\n\n\tdaterange = (pd.date_range(start=startdate, end=enddate, freq='24h'))\n\n\n\n\tprint(\"\\nCollecting tweets by key : \", key)\n\n\tfor single_date in daterange:\n\n\t\tday_after = single_date + relativedelta(days=1)\n\n\t\toutputFilePath = \"./\" + outfile + \"/\"\n\t\toutputFileName = str(single_date.strftime(\"%Y-%m-%d\")) + \".csv\"\n\n\t\tif not os.path.exists(outfile):\n\t\t\tos.makedirs(outfile)\n\n\t\tprint(\"\\nCollecting tweets between\", single_date.strftime(\"%Y-%m-%d\"), \" to \", day_after.strftime(\"%Y-%m-%d\"), \"for\", outputFilePath + outputFileName)\n\n\t\ttweetCriteria = (got.manager.TweetCriteria()\n\t\t\t\t\t\t .setQuerySearch(key)\n\t\t\t\t\t\t .setSince(single_date.strftime(\"%Y-%m-%d\"))\n\t\t\t\t\t\t .setUntil(day_after.strftime(\"%Y-%m-%d\")).setLang('en')\n\t\t\t\t\t\t .setEmoji('named')\n\t\t\t\t\t\t .setExcludeWords(exclude))\n\n\t\toutputFile = codecs.open(outputFilePath + outputFileName, \"a\", \"utf-8\")\n\n\t\tprint('Searching...\\n')\n\n\t\ttweet = got.manager.TweetManager.getTweets(tweetCriteria, receiveBuffer, outputFile)\n\t\ttime.sleep(2)", "def get_twitter_data(keyword, from_date, to_date):\r\n # Creating list to append tweet data to\r\n counts_list = []\r\n dates_list = []\r\n \r\n days = pd.date_range(start = from_date, end = to_date)\r\n \r\n for i in range(len(days)-1):\r\n \r\n # Using TwitterSearchScraper to count daily tweets\r\n daily_count = 0\r\n for item in sntwitter.TwitterSearchScraper(keyword + ' since:' + str(days[i].date()) + ' until:' + str(days[i+1].date())).get_items():\r\n daily_count = daily_count + 1\r\n \r\n print(\"Day\", str(days[i].date()), \"had:\", daily_count, \". Going to next day...\")\r\n \r\n dates_list.append(days[i].date())\r\n counts_list.append(daily_count)\r\n \r\n return pd.DataFrame({'date': dates_list, 'tweets': counts_list})", "def __update_local_tweets(self):\n f_tweets = open(f'{TWEETS}', 'w')\n f_tweeted = open(f'{TWEETED}', 'w')\n try:\n f_tweets.write(json.dumps(self.tweets, sort_keys=True, indent=4))\n f_tweeted.write(json.dumps(self.tweeted, sort_keys=True, indent=4))\n finally:\n f_tweets.close()\n f_tweeted.close()", "def get_tweets(user, num = 200):\n tweets = []\n \n for tweet in user.home_timeline(count = num):\n edited_tweet = tweet.text\n edited_tweet = edited_tweet.encode(encoding='UTF-8', errors='Ignore') \n tweets.append(edited_tweet)\n return tweets", "def get_tweet_data(session, analytics_account, start_time, end_time, user_agent):\n\n export_url = \"https://analytics.twitter.com/user/\" + analytics_account + \"/tweets/export.json\"\n bundle_url = \"https://analytics.twitter.com/user/\" + analytics_account + \"/tweets/bundle\"\n\n export_data = {\n 'start_time' : end_time,\n 'end_time' : start_time,\n 'lang' : 'en'\n }\n querystring = '?' + urllib.parse.urlencode(export_data)\n print('Querying Twitter...')\n\n\n status = 'Pending'\n counter = 0\n while status == 'Pending':\n attempt = session.post(export_url + querystring, headers=user_agent)\n status_dict = json.loads(attempt.text)\n status = status_dict['status']\n counter += 1\n print('Attempt:', counter, ' Response:',status)\n time.sleep(5)\n\n csv_header = {'Content-Type': 'application/csv',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'}\n\n data_req = session.get(bundle_url + querystring, headers=csv_header)\n #print(\"data_req response: \", data_req.status_code)\n print(\"Data retrieved, appending dataset.\")\n return data_req.text", "def get_tweets(api, username, fh, limit):\n if args.json is False:\n for status in tqdm(tweepy.Cursor(api.user_timeline, screen_name=username).items(limit), unit=\"tw\", total=limit):\n process_tweet(status)\n if args.save:\n fh.write(str(json.dumps(status._json))+\",\")\n else:\n for status in (tweepy.Cursor(api.user_timeline, screen_name=username).items(limit)):\n process_tweet(status)\n if args.save:\n fh.write(str(json.dumps(status._json))+\",\")", "def scrape_from_user(acc, num, path='data/tweet_ids.txt'):\n print('Collecting tweets from {}'.format(acc[num]))\n\n tweets = []\n new_tweets = []\n\n new_tweets = _api.user_timeline(screen_name=acc[num], count=200)\n tweets.extend(new_tweets)\n\n oldest = tweets[-1].id - 1\n\n while len(new_tweets) > 0:\n new_tweets = _api.user_timeline(screen_name=acc[num], count=200,\n max_id=oldest)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n print('{} tweets collected so far'.format(len(tweets)), end='\\r')\n\n with open(path, 'a+') as f:\n for x in range(len(tweets)):\n f.write(str(tweets[x].id_str))\n f.write('\\n')\n\n print('\\nDone.')", "def get_tweets_upload_to_bq(users, min_date, max_date, result_limit, key, secret_key, project_id, table_id, **context):\n\n if context.get(\"yesterday_ds\"):\n df = get_users_tweets(users, context['yesterday_ds'], context['yesterday_ds'], result_limit, key, secret_key)\n else: \n df = get_users_tweets(users, min_date, max_date, result_limit, key, secret_key)\n upload_df_to_bq(df, project_id, table_id)\n\n return 'scraped tweets and uploaded to bq'", "def Pull_Relevant(flist, DateRange, TermList, OutFile):\n\n TweetCount=0\n for Filename in flist:\n Tweetset_Current = \"Start\"\n print(Filename)\n input_file = open(Filename, 'r')\n raw_batch = islice(input_file, None)\n with open(OutFile, 'a') as f: # append the batch, and close file each time.\n for current_line in raw_batch:\n tweet_item = json.loads(current_line)\n if RelevantTweet(tweet_item, TermList, DateRange):\n f.write(json.dumps(tweet_item))\n f.write('\\n')\n TweetCount=TweetCount+1\n return(TweetCount)", "def query_tweets_once(query, limit=None, num_tweets=0):\n logging.info(\"Querying {}\".format(query))\n query = query.replace(' ', '%20').replace(\"#\", \"%23\").replace(\":\", \"%3A\")\n pos = None\n tweets = []\n try:\n while True:\n new_tweets, pos = query_single_page(\n INIT_URL.format(q=query) if pos is None\n else RELOAD_URL.format(q=query, pos=pos),\n pos is None\n )\n if len(new_tweets) == 0:\n logging.info(\"Got {} tweets for {}.\".format(\n len(tweets), query))\n return tweets\n\n logging.info(\"Got {} tweets ({} new).\".format(\n len(tweets) + num_tweets, len(new_tweets)))\n\n tweets += new_tweets\n\n if limit is not None and len(tweets) + num_tweets >= limit:\n return tweets\n except KeyboardInterrupt:\n logging.info(\"Program interrupted by user. Returning tweets gathered \"\n \"so far...\")\n except BaseException:\n logging.exception(\"An unknown error occurred! Returning tweets \"\n \"gathered so far.\")\n\n return tweets", "def read_tweets(self)-> None:\n self.no_of_tweets = len(self.list_of_files)\n for i in range(0, self.no_of_tweets):\n # for i in range(0,10): # running a small loop for testing purpose\n try:\n with open(self.list_of_files[i]) as json_file:\n file = json.load(json_file)\n tweet = {'id': file['id']}\n try:\n tweet['created_time'] = file['retweeted_status']['created_at']\n tweet['text'] = file['retweeted_status']['full_text']\n except:\n tweet['created_time'] = file['created_at']\n tweet['text'] = file['full_text']\n self.tweets.append(tweet)\n except:\n print(\"Error for \",self.list_of_files[i])\n if i%1000 == 0:\n print(str(round(i/self.no_of_tweets,2)*100),\"% read\")\n print(\"All Tweets read into memory\")", "def filter_tweets(tweets):\n # We keep only tweets by chrisalbon with pictures\n search_tweets = [tw for tw in tweets if tw['username'] == '@chrisalbon' and len(tw['images']) > 0]\n # He made multiple tweets on the same topic, we keep only the most recent tweets\n # We use the indexes of the reversed tweet list and dictionnaries to keep only key \n unique_search_index = sorted(list({t['text'].lower():i for i,t in list(enumerate(search_tweets))[::-1]}.values()))\n unique_search_tweets = [search_tweets[i] for i in unique_search_index]\n\n # Keep non-downloaded tweets\n most_recent_file = sorted([datetime.datetime.fromtimestamp(os.path.getmtime(path)) \n for path in glob.glob(\"./downloaded_pics/*.jpg\")], reverse=True)[0]\n recent_seach_tweets = [tw for tw in unique_search_tweets if tw['date'] > most_recent_file]\n\n # Uncomment for testing new tweets\n # recent_seach_tweets = [tw for tw in unique_search_tweets if tw['date'] > datetime.datetime(2017, 7, 6, 13, 41, 48)]\n return recent_seach_tweets", "def getTweets(self, fromDate, toDate):\n return self.session.query(Tweet.text).\\\n filter(Tweet.created_at > fromDate).\\\n filter(Tweet.created_at < toDate).all()", "def searchTweets():\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName='apiConf2.txt'))\n #SEARCHING TWEETS CONTAINING THE HASHTAG \"#bitcoin\" USING TWEEPY LIBRARY\n myTweets= []\n #words=list(map(str,words))\n if words:\n myQuery=' OR '.join(words)\n else:\n myQuery = '*'\n if removeRetweets:\n myQuery += ' - filter:retweets'\n kwargs['q']=myQuery\n kwargs['count']=100\n kwargs['tweet_mode']='extended'\n if 'startingDate' in kwargs:\n kwargs['since']=kwargs['startingDate']\n del(kwargs['startingDate'])\n if 'endingDate' in kwargs:\n kwargs['until']=kwargs['endingDate']\n del(kwargs['endingDate'])\n if 'maxTweets' in kwargs:\n del(kwargs['maxTweets'])\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.search, kwargs).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.search, kwargs).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n return getTopNTweets(myTweets, maxTweets)", "def bring_records_to_file_using_threads():\n username = username_entry.get()\n password = password_entry.get()\n day = int(day_entry.get())\n month = int(month_entry.get())\n year = int(year_entry.get())\n today = datetime.date(year, month, day)\n if username in users:\n if password == users[username]:\n db = Database(database_name)\n data = db.fetch_calculations(day, month, year)\n # print(data)\n # print(today)\n save_to_file(today, data)", "def userTweets(username):\n api = twitter.Api()\n user_tweets = api.GetUserTimeline(username)\n for tweet in user_tweets:\n util.safe_print(tweet.GetText())", "def collect_tweets(ticker):\n\n # Authenticate Tweepy credentials\n auth = tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_SECRET_CONSUMER_KEY)\n auth.set_access_token(settings.TWITTER_TOKEN_KEY, settings.TWITTER_SECRET_TOKEN_KEY)\n api = tweepy.API(auth)\n\n stock = Stock.objects.get(ticker=ticker)\n\n # Search for recent Tweets with the specific ticker\n collected_tweets = api.search(q=ticker, result_type='recent', count=100)\n\n # Iterate over the collected Tweets and save them\n for tweet in collected_tweets:\n try:\n Tweet.objects.create(\n text=tweet.text,\n created_at=tweet.created_at,\n user_id=tweet.user.id,\n user_screen_name=tweet.user.screen_name,\n verified=tweet.user.verified,\n followers_count=tweet.user.followers_count,\n friends_count=tweet.user.friends_count,\n favourites_count=tweet.user.favourites_count,\n retweet_count=tweet.retweet_count,\n stock=stock,\n )\n except IntegrityError:\n pass", "def get_tweets(query, pages=25):\n\n logger = Logger()\n after_part = 'include_available_features=1&include_entities=1&include_new_items_bar=true'\n if query.startswith('#'):\n query = quote(query)\n url = 'https://twitter.com/i/search/timeline?f=tweets&vertical=default&q={}&src=tyah&reset_error_state=false&'.format(query)\n else:\n url = 'https://twitter.com/i/profiles/show/{}/timeline/tweets?'.format(query)\n url += after_part\n \n headers = {\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Referer': 'https://twitter.com/{}'.format(query),\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8',\n 'X-Twitter-Active-User': 'yes',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Accept-Language': 'en-US'\n }\n\n def gen_tweets(pages):\n logger.add(\"MSG: Sending request to url '{}'...\".format(url))\n r = session.get(url, headers=headers)\n\n logger.add(\"MSG: Parsing result...\".format(url))\n while pages > 0:\n try:\n html = BeautifulSoup(r.json()['items_html'], parser='html', features=\"lxml\")\n except KeyError:\n raise ValueError(\n 'Oops! Either \"{}\" does not exist or is private.'.format(query))\n\n comma = \",\"\n dot = \".\"\n tweets = []\n for tweet in html.select('.stream-item'):\n # 10~11 html elements have `.stream-item` class and also their `data-item-type` is `tweet`\n # but their content doesn't look like a tweet's content\n try:\n text = tweet.select('.tweet-text')[0].get_text()\n except IndexError: # issue #50\n continue\n\n tweet_id = tweet['data-item-id']\n\n time = datetime.fromtimestamp(int(tweet.select('._timestamp')[0]['data-time-ms']) / 1000.0)\n\n interactions = [\n x.get_text()\n for x in tweet.select('.ProfileTweet-actionCount')\n ]\n\n replies = int(\n interactions[0].split(' ')[0].replace(comma, '').replace(dot, '')\n or interactions[3]\n )\n\n retweets = int(\n interactions[1].split(' ')[0].replace(comma, '').replace(dot, '')\n or interactions[4]\n or interactions[5]\n )\n\n likes = int(\n interactions[2].split(' ')[0].replace(comma, '').replace(dot, '')\n or interactions[6]\n or interactions[7]\n )\n\n hashtags = [\n hashtag_node.get_text()\n for hashtag_node in tweet.select('.twitter-hashtag')\n ]\n urls = [\n url_node['data-expanded-url']\n for url_node in tweet.select('a.twitter-timeline-link:not(.u-hidden)')\n ]\n photos = [\n photo_node['data-image-url']\n for photo_node in tweet.select('.AdaptiveMedia-photoContainer')\n ]\n\n is_retweet = False\n if tweet.select('.js-stream-tweet')[0].has_attr('data-retweet-id'):\n is_retweet = True\n\n is_pinned = False\n if tweet.select(\".pinned\"):\n is_pinned = True\n\n videos = []\n video_nodes = tweet.select(\".PlayableMedia-player\")\n for node in video_nodes:\n styles = node['style'].split()\n for style in styles:\n if style.startswith('background'):\n tmp = style.split('/')[-1]\n video_id = tmp[:tmp.index('.jpg')]\n videos.append({'id': video_id})\n\n tweets.append({\n 'tweetId': tweet_id,\n 'isRetweet': is_retweet,\n 'time': time,\n 'text': text,\n 'replies': replies,\n 'retweets': retweets,\n 'likes': likes,\n 'isPinned': is_pinned,\n 'entries': {\n 'hashtags': hashtags, 'urls': urls,\n 'photos': photos, 'videos': videos\n }\n })\n\n\n last_tweet = html.select('.stream-item')[-1]['data-item-id']\n\n for tweet in tweets:\n if tweet:\n tweet['text'] = re.sub(r'\\Shttp', ' http', tweet['text'], 1)\n tweet['text'] = re.sub(r'\\Spic\\.twitter', ' pic.twitter', tweet['text'], 1)\n yield tweet\n\n r = session.get(url, params={'max_position': last_tweet}, headers=headers)\n pages += -1\n yield from gen_tweets(pages)", "def get_tweets_from_username(api, screen_name):\n\n # initialize a list to hold all the Tweets\n alltweets = []\n output = []\n\n # make initial request for most recent tweets\n # (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # save the id of the oldest tweet less one to avoid duplication\n oldest = alltweets[-1].id - 1\n\n # keep grabbing tweets until there are no tweets left\n while len(new_tweets) > 0:\n print(\"Getting tweets before %s\" % (oldest))\n\n # all subsequent requests use the max_id param to prevent\n # duplicates\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, max_id=oldest, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n print(\"... %s tweets downloaded so far\" % (len(alltweets)))\n\n # transform the tweepy tweets into a 2D array that will\n for tweet in alltweets:\n output.append([tweet.id_str,\n tweet.created_at,\n tweet.full_text,\n tweet.in_reply_to_screen_name,\n tweet.user.name,\n tweet.user.location,\n tweet.user.followers_count,\n tweet.user.friends_count,\n tweet.geo,\n tweet.coordinates,\n tweet.retweet_count,\n tweet.favorite_count,\n tweet.lang,\n tweet.retweeted])\n\n # Convert to dataframe\n df = pd.DataFrame.from_records(output, columns=[\"id_str\",\n \"created_at\",\n \"full_text\",\n \"in_reply_to_screen_name\",\n \"user_name\",\n \"user_location\",\n \"user_followers_count\",\n \"user_friends_count\",\n \"geo\",\n \"coordinates\",\n \"retweet_count\",\n \"favorite_count\",\n \"lang\",\n \"retweeted\"])\n return df", "def get_tweets():\n\n # Read bearer token from secrets file\n with open(\"./secrets.yml\", \"r\") as f:\n bearer_token = yaml.load(f, Loader=yaml.FullLoader)[\"BEARER_TOKEN\"]\n\n # Set start and end times as current time rounded down to nearest minute with supplied offset\n dt_fmt = \"%Y-%m-%dT%H:%M:00Z\"\n dt_now = datetime.datetime.now().replace(second=0, microsecond=0)\n start_time_offset = int(sys.argv[1])\n end_time_offset = int(sys.argv[2])\n dt_end = dt_now - datetime.timedelta(minutes=end_time_offset)\n dt_start = dt_now - datetime.timedelta(minutes=start_time_offset)\n dt_end = dt_end.strftime(dt_fmt)\n dt_start = dt_start.strftime(dt_fmt)\n\n # Make request, checking for mentions in specified time period\n logging.info(\"Getting mentions from Twitter\")\n uri = \"https://api.twitter.com/2/tweets/search/recent\"\n headers = {\"Authorization\": f\"Bearer {bearer_token}\"}\n query = {\"query\": f\"@{ACCOUNT_NAME}\",\n \"expansions\" : \"author_id\",\n \"user.fields\" : \"username\",\n \"start_time\" : dt_start,\n \"end_time\" : dt_end}\n response = requests.get(uri, headers=headers, params=query)\n\n # Make connection to local database\n connection = sqlite3.connect(\"../database/procrystaldb.db\")\n cursor = connection.cursor()\n\n # Get current total number of rows in database\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n initial_rows = cursor.fetchall()[0][0]\n\n # Get usernames and tweet ids from tweets and save to database\n if response.status_code == 200:\n content = response.json()\n num_results = content[\"meta\"][\"result_count\"]\n if num_results > 0:\n # First get dictionary of usernames\n user_id_to_name = {}\n for user in content[\"includes\"][\"users\"]:\n user_id_to_name[user[\"id\"]] = user[\"username\"]\n # Then get tweet id, username and save to database\n for result in content[\"data\"]:\n # if KEYWORD in result[\"text\"].lower():\n tweet_id = result[\"id\"]\n username = user_id_to_name[result[\"author_id\"]]\n sql_insert = f\"\"\"\n INSERT OR IGNORE INTO Twitter (tweet_id, username, reply_sent)\n VALUES ('{tweet_id}', '{username}', false);\n \"\"\"\n cursor.execute(sql_insert)\n logging.info(f\"Mentions fetched: {num_results}\")\n else:\n logging.error(f\"Get mentions errored with: {response.json()}\")\n\n # Get final total number of rows in database and therefore number of rows added\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n final_rows = cursor.fetchall()[0][0]\n rows_added = final_rows - initial_rows\n logging.info(f\"New mentions added: {rows_added}\")\n\n # Close database connection\n connection.commit()\n connection.close()\n\n return rows_added", "def collect_tweets(redis_client, twitter_client, search_term):\n search = Search(redis_client, twitter_client, search_term)\n search.get_term_state()\n search.parse_term_state()\n search.set_query_string()\n search.set_execution_time()\n search.execute_query()\n search.incr_query_counters()\n search.set_newest_id()\n search.set_oldest_id()\n search.set_scenario()\n search.set_term_state()\n search.store_results()\n search.set_score()\n search.log_state()", "def get_tweets(self, start_date, end_date):\r\n # get tweets from api\r\n config = crawler.APIConfig()\r\n config.set_api_key(\"8e1618e9-419f-4239-a2ee-c0680740a500\")\r\n config.set_end_time(end_date)\r\n config.set_filter(self.region)\r\n config.set_start_time(start_date)\r\n return crawler.FetchTweets(config).fetch()", "def exportToDB(self, tweets):\n for t in range(len(tweets)):\n for x in range(len(tweets[t])):\n doc_ref = self.fs_db.collection(u'twitter').document(str(tweets[t][1]))\n doc_ref.set({\n u'created_date': str(tweets[t][0]),\n u'id': str(tweets[t][1]),\n u'tweet': tweets[t][2],\n u'screen_name': tweets[t][3],\n u'name': tweets[t][4],\n u'likes': tweets[t][5],\n u'retweets': tweets[t][6],\n u'location': tweets[t][7]\n })", "def get_tweets(self):\r\n now = datetime.datetime.now()\r\n tweet_json = self.api.get_tweets(self.last, now)\r\n self.last = now\r\n return [Tweet(x) for x in tweet_json]", "def load_tweets(self, max_items=10000, user=None):\n for name, info in self.users.items():\n try:\n os.mkdir(self.root + info['party'].lower().replace(' ', '_'))\n except FileExistsError:\n pass\n \n filepath = self.root + info['party'].lower().replace(' ', '_')\n filepath = filepath + '/' + name.lower().replace(' ', '')\n try:\n print(f'Reading tweets from {name}')\n user = info['screen_name']\n curs = tweepy.Cursor(self.api.user_timeline,\n screen_name=user,\n count=200,\n tweet_mode=\"extended\"\n ).items(max_items)\n\n with open(filepath + '.jsonl', 'w') as f:\n for status in curs:\n tweet = status._json\n json_dump_line(tweet, f)\n \n except tweepy.TweepError as exc:\n print(exc)\n os.remove(filepath + '.jsonl')", "def data_pull(database_file, query):\n \n user_tweets = {}\n conn = sqlite3.connect(database_file)\n conn.row_factory = sqlite3.Row\n \n for row in conn.cursor().execute(query):\n if row['text'] is not None:\n data = tweetclean.cleanup(row['text'], True, True)\n try:\n user_tweets[row['owner']].append(data)\n except KeyError:\n user_tweets[row['owner']] = []\n user_tweets[row['owner']].append(data)\n\n conn.close()\n\n return user_tweets", "def get_tweets(api, query):\n \n results = []\n for tweet in tweepy.Cursor(api.search, q=query).items(1000):\n results.append(tweet)\n \n id_list = [tweet.id for tweet in results]\n #unpack into dataframe\n data = pd.DataFrame(id_list,columns=['id'])\n \n data[\"text\"]= [tweet.text.encode('utf-8') for tweet in results]\n data[\"datetime\"]=[tweet.created_at for tweet in results]\n data[\"Location\"]=[tweet.place for tweet in results]\n \n return data", "def streamTweets(words = [], authors = [], timeLimit=120, removeRetweets=False, **kwargs):\n if 'stream' not in globals():\n global stream\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n listener = StdOutListener(removeRetweets=removeRetweets)\n auth = api.auth\n stream = tweepy.Stream(auth, listener, tweet_mode='extended')\n else:\n stream.listener.setRemoveRetweets(removeRetweets)\n stream.listener.resetTweets()\n\n words = mapToValid(words)\n authors = mapToValid(authors)\n if not words and not authors:\n words=[\"the\", \"i\", \"to\", \"a\", \"and\", \"'s\", \"is\", \"in\", \"it\", \"you\", \"of\", \"for\", \"on\", \"my\", \"that\", \"e\", \"with\", \"me\", \"do\", \"have\", \"ciao\", \"o\", \"u\", \"cool\", \"good\", \"nice\", \"#\", \"*\", \":\", \";\", \",\", \".\", \"?\", \"-\", \"%\", \"$\", \"€\", \"!\", \"(\", \")\", \"=\", \"'\"]\n\n #myQuery = ' OR '.join(kwargs[\"words\"])\n if authors:\n kwargs[\"follow\"]=[user.id_str for user in list(map(api.get_user,authors))]\n else:\n kwargs[\"track\"]=words\n #if removeRetweets:\n # myQuery += \" -filter:retweets\"\n\n #myQuery += ' from:'\n #myQuery += ' OR from:'.join(kwargs[\"authors\"])\n #print(myQuery)\n import signal\n # Register the signal function handler\n signal.signal(signal.SIGALRM, __streamHandler__)\n # Define a timeout for your function\n signal.alarm(timeLimit)\n try:\n __stream__(stream,**kwargs)\n except Exception:\n print(\"Streaming over after time period of\", timeLimit, \"seconds... Retrieved\", len(stream.listener.getTweets()), \"tweets.\")\n stream.disconnect()\n if authors and words:\n print(\"Filtering out tweets that don't contain the specified words...\")\n myTweets=[]\n for tweet in stream.listener.getTweets():\n if 'full_text' in tweet:\n tweet['text'] = tweet['full_text']\n del (tweet['full_text'])\n if any(containsWord(tweet['text'],word) for word in words):\n myTweets.append(tweet)\n print(\"Done. Retrieved\", len(myTweets), \"tweets written by the authors specified and containing (any of) the words specified.\")\n return myTweets\n return stream.listener.getTweets()", "def write_to_file(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.clean_unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")", "def search_machine(ID,machine):\n\tconsumer_key = machine['consumer_key']\n\tconsumer_secret = machine['consumer_secret']\n\taccess_token = machine['access_token']\n\taccess_secret = machine['access_secret']\n\tauth = OAuthHandler(consumer_key, consumer_secret)\n\tauth.set_access_token(access_token, access_secret)\n\tapi = tweepy.API(auth, wait_on_rate_limit_notify=True)\n\n\t\"\"\"Search for tweets via Twitter Search API.\"\"\"\n\tsinceId = None\n\tmax_id = ID\n\ttweetsCounts = 0\n\tfinshed_job = False\n\twith open (outfile,'w+') as f:\n\t\twhile tweetsCounts < maxTweets:\n\t\t\ttry:\n\t\t\t\tif (max_id <= 0):\n\t\t\t\t\tif (not sinceId):\n\t\t\t\t\t\tnew_tweets = api.search(\n\t\t\t\t\t\t\tq = query,\n\t\t\t\t\t\t\tgeocode = geo,\n\t\t\t\t\t\t\tcount = searchLimits)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnew_tweets = api.search(\n\t\t\t\t\t\t\tq=query,\n\t\t\t\t\t\t\tcount = searchLimits,\n\t\t\t\t\t\t\tgeocode=geo,\n\t\t\t\t\t\t\tsinceId = sinceId)\n\t\t\t\telse:\n\t\t\t\t\tif (not sinceId):\n\t\t\t\t\t\tnew_tweets = api.search(\n\t\t\t\t\t\t\tq=query, \n\t\t\t\t\t\t\tcount=searchLimits,\n\t\t\t\t\t\t\tgeocode = geo,\n\t\t\t\t\t\t\tmax_id=str(max_id - 1))\n\t\t\t\t\telse:\n\t\t\t\t\t\tnew_tweets = api.search(\n\t\t\t\t\t\t\tq=query, \n\t\t\t\t\t\t\tcount=searchLimits,\n\t\t\t\t\t\t\tgeocode = geo,\n\t\t\t\t\t\t\tmax_id=str(max_id - 1),\n\t\t\t\t\t\t\tsince_id=sinceId)\n\t\t\t\tif not new_tweets:\n\t\t\t\t\tprint(\"NO MORE TWEETS\")\n\t\t\t\t\tfinshed_job = True\n\t\t\t\t\tbreak\n\t\t\t\tfor tweet in new_tweets:\n\t\t\t\t\tif tweet.coordinates or tweet.place:\n\t\t\t\t\t\tjson.dump(tweet._json,f,ensure_ascii=False)\n\t\t\t\t\t\tf.write('\\n')\n\t\t\t\t\n\t\t\t\ttweetsCounts += len(new_tweets)\n\t\t\t\t#print(\"Downloaded {0} tweets\".format(tweetsCounts))\n\t\t\t\tmax_id = new_tweets[-1].id\n\t\t\texcept tweepy.RateLimitError as e:\n\t\t\t\tprint(machine['index'],'Time to sleep 15 mins') \n\t\t\t\tAPI_status[machine['index']] = False\n\t\t\t\tif machine['index'] == 0:\n\t\t\t\t\tAPI_status['time'] = time.time() + 901.00\n\t\t\t\treturn finshed_job,max_id\n\t\t\texcept tweepy.TweepError as e:\n\t\t\t\tlogging.error(str(e))\n\t\t\t\tbreak\n\tf.close()\n\treturn finshed_job,max_id", "def __refresh_local_tweets(self):\n f_tweets = open(f'{TWEETS}', 'r')\n f_tweeted = open(f'{TWEETED}', 'r')\n\n try:\n self.tweets = json.load(f_tweets)\n self.tweeted = json.load(f_tweeted)\n finally:\n f_tweets.close()\n f_tweeted.close()", "def get_tweets(keyword, max_tweets=200):\n\n # API keys.\n consumer_key = \"kNOG1klRMMUYbsjMuY5TKl4lE\"\n consumer_secret = \"ieghv6WI1qseYly43A0Ra1MPksEw1i5Onma0txfEu5aHantD2v\"\n access_key = \"3291622062-15ssVc0qpJXf2SFXbA7vgfl1Sooz4Ueo2DGPQVz\"\n access_secret = \"9XJuzgGSVLnx93tq6NfRzMT07S6o2lzjmHfjt3VRlkqXn\"\n\n # Initialize tweepy API object and authorize using API key.\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth)\n\n \"\"\" Get tweets.\"\"\"\n\n alltweets = []\n for status in tweepy.Cursor(\n api.search,\n q=keyword + \" -RT\", # the -RT flag excludes retweets.\n count=1000,\n result_type=\"recent\",\n include_entities=True,\n monitor_rate_limit=True,\n wait_on_rate_limit=True,\n lang=\"en\",\n ).items():\n\n # get text of the tweet, encoding as utf-8.\n text = str(status.text.encode(\"utf-8\"))\n\n # add to the data structure, alltweets, holding the tweets.\n alltweets.append(text)\n\n # if we've reached max_tweets, break.\n if len(alltweets) >= max_tweets:\n break\n\n return alltweets", "def get_posts(self, userid, username):\n dict_json = {}\n x = 0\n outfile_name = \"tweetsFrom\" + username + \".json\"\n posts = api.GetUserTimeline(user_id=userid, count=200)\n text_list = [p.text for p in posts]\n for text in text_list:\n dict_json[x] = text\n x += 1\n with open(outfile_name, \"w\") as outfile:\n json.dump(dict_json, outfile)\n outfile.close()", "def write_to_file_ann(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")", "def list_tweets():\n tweets = []\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE])\n for tuple in tuples:\n tweet = {}\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweets.append(tweet)\n return jsonify({'tweets':tweets}),200", "def get_tweets(username, amount):\n tweets = []\n twitter = Twython()\n\n finished = False\n page = 1\n while not finished:\n\n if amount <= 200:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count=str(amount))\n finished = True\n\n else:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count='200')\n amount -= 200\n page += 1\n\n if isinstance(search_results, dict) and search_results['error']:\n raise TwitterAPIException(str(search_results['error']))\n elif not search_results:\n raise TwitterAPIException('User has no tweets.')\n\n for result in search_results:\n tweets.append(result['text']) \n\n return tweets", "def display_sentiment(ticker: str, n_tweets: int, n_days_past: int, export: str = \"\"):\n # Date format string required by twitter\n dtformat = \"%Y-%m-%dT%H:%M:%SZ\"\n\n # Algorithm to extract\n dt_recent = datetime.now() - timedelta(seconds=20)\n dt_old = dt_recent - timedelta(days=n_days_past)\n print(\n f\"From {dt_recent.date()} retrieving {n_tweets*24} tweets ({n_tweets} tweets/hour)\"\n )\n\n df_tweets = pd.DataFrame(\n columns=[\n \"created_at\",\n \"text\",\n \"sentiment\",\n \"positive\",\n \"negative\",\n \"neutral\",\n ]\n )\n while True:\n # Iterate until we haven't passed the old number of days\n if dt_recent < dt_old:\n break\n # Update past datetime\n dt_past = dt_recent - timedelta(minutes=60)\n\n temp = twitter_model.load_analyze_tweets(\n ticker,\n n_tweets,\n start_time=dt_past.strftime(dtformat),\n end_time=dt_recent.strftime(dtformat),\n )\n\n if temp.empty:\n return\n\n df_tweets = pd.concat([df_tweets, temp])\n\n if dt_past.day < dt_recent.day:\n print(\n f\"From {dt_past.date()} retrieving {n_tweets*24} tweets ({n_tweets} tweets/hour)\"\n )\n\n # Update recent datetime\n dt_recent = dt_past\n\n # Sort tweets per date\n df_tweets.sort_index(ascending=False, inplace=True)\n df_tweets[\"cumulative_compound\"] = df_tweets[\"sentiment\"].cumsum()\n df_tweets[\"prob_sen\"] = 1\n\n # df_tweets.to_csv(r'notebooks/tweets.csv', index=False)\n df_tweets.reset_index(inplace=True)\n df_tweets[\"Month\"] = pd.to_datetime(df_tweets[\"created_at\"]).apply(\n lambda x: x.month\n )\n df_tweets[\"Day\"] = pd.to_datetime(df_tweets[\"created_at\"]).apply(lambda x: x.day)\n df_tweets[\"date\"] = pd.to_datetime(df_tweets[\"created_at\"])\n df_tweets = df_tweets.sort_values(by=\"date\")\n df_tweets[\"cumulative_compound\"] = df_tweets[\"sentiment\"].cumsum()\n _, ax = plt.subplots(2, 1, figsize=plot_autoscale(), dpi=cfg_plot.PLOT_DPI)\n ax[0].plot(\n pd.to_datetime(df_tweets[\"created_at\"]),\n df_tweets[\"cumulative_compound\"].values,\n lw=3,\n c=\"cyan\",\n )\n ax[0].set_ylabel(\"Cumulative VADER Sentiment\")\n xlocations = []\n xlabels = []\n for _, day_df in df_tweets.groupby(by=\"Day\"):\n day_df[\"time\"] = pd.to_datetime(day_df[\"created_at\"])\n day_df = day_df.sort_values(by=\"time\")\n ax[0].plot(day_df[\"time\"], day_df[\"sentiment\"].cumsum(), c=\"tab:blue\")\n xlocations.append(day_df.time.values[0])\n xlabels.append(day_df[\"time\"].apply(lambda x: x.strftime(\"%m-%d\")).values[0])\n\n ax[1].bar(df_tweets[\"date\"], df_tweets[\"positive\"], color=\"green\", width=0.02)\n ax[1].bar(df_tweets[\"date\"], -1 * df_tweets[\"negative\"], color=\"red\", width=0.02)\n ax[0].grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\", lw=1.5, alpha=0.5)\n ax[0].minorticks_on()\n ax[0].grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n ax[0].set_xticks(xlocations)\n ax[0].set_xticklabels(xlabels)\n\n ax[1].grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\", lw=1.5, alpha=0.5)\n ax[1].minorticks_on()\n ax[1].grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n ax[1].set_ylabel(\"VADER Polarity Scores\")\n ax[1].set_xticks(xlocations)\n ax[1].set_xticklabels(xlabels)\n plt.suptitle(\n f\"Twitter's {ticker} total compound sentiment over time is {np.sum(df_tweets['sentiment'])}\"\n )\n if gtff.USE_ION:\n plt.ion()\n plt.show()\n print(\"\")\n export_data(\n export, os.path.dirname(os.path.abspath(__file__)), \"sentiment\", df_tweets\n )", "def request_data(self, search_query=None, app_index=0):\n tweet_obj_fields = utils.tweet_object_fields()\n tweet_fields = ','.join(tweet_obj_fields[\"twitter_fields\"])\n params = {'query': search_query, \n 'tweet.fields': tweet_fields}\n\n if search_query is None:\n raise AttributeError(\"No query parsed.\")\n\n base_url = \"https://api.twitter.com/2/tweets/search/recent?\"\n headers = self.get_bearer_header(app_index)\n response = requests.get(base_url, headers=headers, params=params)\n return response", "def query_into_file(self, query, fname=\"\", fields=None, parameters=None):\n target_url = self.build_query(query, fields=fields, parameters=parameters)\n\n with urllib.request.urlopen(target_url) as url:\n content = url.read()\n\n with open(fname, 'wb') as ofs:\n ofs.write(content)", "def get_tweets_count_times(twitter, count, query=None):\n # get id to start from\n oldest_id, newest_id = _get_oldest_id(query=query)\n newest_id = newest_id or oldest_id\n\n all_tweets = []\n i = 0\n while i < count:\n i += 1\n # use search api to request 100 tweets. Twitter returns the most recent (max_id) first\n if oldest_id <= newest_id:\n tweets = get_tweets(query=query, max_id=oldest_id - 1, count=TWEETS_PER_SEARCH, twitter=twitter)\n else:\n tweets = get_tweets(query=query, max_id=oldest_id - 1, since_id=newest_id, count=TWEETS_PER_SEARCH, twitter=twitter)\n rate_limit_remaining = twitter.get_lastfunction_header('x-rate-limit-remaining')\n rate_limit_reset = twitter.get_lastfunction_header('x-rate-limit-reset')\n\n if not len(tweets):\n # not rate limitted, just no tweets returned by query\n oldest_id = oldest_id + ((newest_id or oldest_id) - oldest_id + 1) * 10000\n break\n elif isinstance(tweets, dict):\n # rate limit hit, or other twython response error\n print(tweets)\n break\n\n all_tweets.extend(tweets)\n\n # determine new oldest id\n tweet_ids = {t['id'] for t in tweets}\n if oldest_id:\n tweet_ids.add(oldest_id)\n oldest_id, newest_id = min(tweet_ids), max(tweet_ids)\n if rate_limit_remaining == 1:\n time.sleep(rate_limit_reset - time.time())\n\n save_tweets(all_tweets, query=query)\n\n # set id to start from for next time\n _set_oldest_id(oldest_id, newest_id, query=query)\n\n if len(all_tweets) == 0:\n os.remove(make_oldest_id_path(query))\n\n return len(all_tweets), twitter.get_lastfunction_header('x-rate-limit-remaining')", "def searchByKeyword(self, keyword, until=\"\", since=\"\", count=None, result_type=\"recent\"):\n if count is None:\n tweets = tweepy.Cursor(self.api.search, q=keyword, until=until, since=since, result_type=result_type,\n full_text=True, tweet_mode=\"extended\", lang=\"en\").items()\n else:\n tweets = tweepy.Cursor(self.api.search, q=keyword, until=until, since=since, result_type=result_type,\n full_text=True, tweet_mode=\"extended\", lang=\"en\").items(count)\n\n for status in tweets:\n createdDate = parser.parse(str(status._json[\"created_at\"]).strip())\n createdDate = createdDate.replace(\n tzinfo=pytz.utc) - createdDate.utcoffset()\n status_refined = {\n 'keyword': keyword,\n '_id': status._json[\"id\"],\n 'created_at': createdDate,\n 'tweetText': status._json[\"full_text\"],\n 'hashtags': status._json[\"entities\"][\"hashtags\"],\n 'userLoc': status._json[\"user\"][\"location\"],\n 'tweetGeo': status._json[\"geo\"],\n 'tweetCoordinates': status._json[\"coordinates\"],\n 'tweetPlace': status._json[\"place\"],\n 'retweet': {},\n }\n if hasattr(status, \"retweeted_status\"):\n status_refined['tweetText'] = status._json[\"retweeted_status\"][\"full_text\"]\n status_refined['retweet'] = {\n 'original_retweet_id': status._json[\"retweeted_status\"][\"id\"],\n 'origUserLoc': status._json[\"retweeted_status\"][\"user\"][\"location\"],\n 'origTweetLoc': status._json[\"retweeted_status\"][\"geo\"],\n 'origTweetPlace': status._json[\"retweeted_status\"][\"place\"],\n 'origTweetCoord': status._json[\"retweeted_status\"][\"coordinates\"],\n 'origHashtags': status._json[\"retweeted_status\"][\"entities\"][\"hashtags\"],\n 'retweet_count': status._json[\"retweet_count\"],\n }\n self.tweets.append(status_refined)\n return self.tweets", "def get_tweets(self, output_path, tweets_ids):\n\n\t\tloading = 0\n\n\t\tapp = TwitterApp.get_twitter_app_instance(self)\n\n\t\ttweets_content = []\n\n\t\tnew_tweets_ids = []\n\n\t\tqty_tweets = len(tweets_ids)\n\n\t\tlast_index = 0\n\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\n\t\t\t\tresponse = app.GetStatuses(tweets_ids[last_index:last_index+100], map=True)\n\t\t\t\t\n\t\t\texcept Exception as e:\n\n\t\t\t\t# save the available posts to this time\n\t\t\t\tdataset = pd.DataFrame({'tweet_id':new_tweets_ids, 'post_content':tweets_content})\n\t\t\t\twrite_tweets(output_path, dataset)\n\n\t\t\t\tlogging.info(''.join(['Error on request ', str(loading)]))\n\n\t\t\t\tprint(\"ERROR:\", e)\n\n\t\t\t\t'''\n\t\t\t\tUsually, the rate limit allowed by Twitter API is exceeded (in this case GET statuses/lookup is 900 for user auth and 300 for the app auth for every 15 minutes), one way to deal with it is sleeping the code for approximately 15 minutes to continue after.\n\t\t\t\t'''\n\t\t\t\ttime.sleep(950)\n\n\t\t\t\ttry:\n\n\t\t\t\t\tresponse = app.GetStatuses(tweets_ids[last_index:last_index+100], map=True)\n\t\t\t\t\n\t\t\t\texcept Exception as e:\n\n\t\t\t\t\tprint(e)\n\t\t\t\t\texit(1)\n\n\n\t\t\tfor id_value, text in response.items():\t\t\t\n\n\t\t\t\t# This means that the post is not available now.\n\t\t\t\tif (text == None):\n\t\t\t\t\tcontinue\n\n\t\t\t\telse:\n\n\t\t\t\t\tnew_tweets_ids.append(id_value)\n\t\t\t\t\ttweets_content.append(text.text)\n\n\t\t\t# Each request gets 100 posts\n\t\t\tlast_index = last_index + 100\n\n\t\t\t# There is no more IDs\n\t\t\tif (last_index > qty_tweets):\n\t\t\t\tbreak\t\n\t\t\n\t\t# save all tweets\n\t\tdataset = pd.DataFrame({'tweet_id':new_tweets_ids, 'post_content':tweets_content})\n\t\twrite_tweets(output_path, dataset)", "def save_user_archive_to_file(user):\n filename = str(user).__add__('-tweets.json')\n with open(filename, \"w\") as f:\n archive_generator = rest.fetch_user_archive(user)\n for page in archive_generator:\n for tweet in page:\n f.write(json.dumps(tweet) + \"\\n\")\n logging.warning(u\"Wrote tweets from the user\")", "def reply_to_tweets():\n last_seen_id = retrieve_last_seen_id(FILE_NAME)\n mentions = api.mentions_timeline(\n last_seen_id,\n tweet_mode='extended')\n\n for mention in reversed(mentions):\n print(str(mention.id) + ' - ' + mention.full_text, flush=True)\n last_seen_id = mention.id\n store_last_seen_id(last_seen_id, FILE_NAME)\n for i in range(len(keywords)):\n if keywords[i] in mention.full_text.lower():\n print(\"responding back to: \" + '@' +\n mention.user.screen_name, flush=True)\n api.update_status('@' + mention.user.screen_name + ' ' +\n salts[i], mention.id)", "def user_scrape(users: List, outfile: str, limit: int, since: str) -> None:\n assert(len(users)>0)\n\n # put params into configuration object\n c = twint.Config()\n c.Hide_output = True\n c.Limit = limit\n c.Language = \"en\"\n c.Output = os.path.join(data_dir, outfile)\n c.Store_csv = True\n c.Since = since\n\n for u in tqdm(users, total=293):\n # and run the search for each username\n sleep(2.5)\n try:\n #print(\"scanning tweets from user {}\".format(u))\n c.Username = u\n twint.run.Search(c)\n except:\n continue", "def searchByKeywordPro(self, query, since=\"\", until=\"\", maxResults=None):\n\n tweetsList = []\n if(not maxResults):\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since)\n tweetsList.append(tweetList)\n while(next_token):\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since, next=next_token)\n tweetsList.append(tweetList)\n else:\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since)\n tweetsList.append(tweetList)\n maxResults -= len(tweetList)\n while(next_token and maxResults > 0):\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since, next=next_token)\n tweetsList.append(tweetList)\n maxResults -= len(tweetList)\n for status in tweetsList:\n createdDate = parser.parse(str(status._json[\"created_at\"]).strip())\n createdDate = createdDate.replace(\n tzinfo=pytz.utc) - createdDate.utcoffset()\n status_refined = {\n 'keyword': query,\n '_id': status._json[\"id\"],\n 'created_at': createdDate,\n 'tweetText': status._json[\"text\"],\n 'hashtags': status._json[\"entities\"][\"hashtags\"],\n 'userLoc': status._json[\"user\"][\"location\"],\n 'tweetGeo': status._json[\"geo\"],\n 'tweetCoordinates': status._json[\"coordinates\"],\n 'tweetPlace': status._json[\"place\"],\n 'retweet': {},\n 'quote': {},\n }\n if hasattr(status, \"quoted_status\"):\n if \"extended_tweet\" in status._json[\"quoted_status\"].keys():\n print(\"Taking the expanded tweet\")\n status_refined['tweetText'] = status._json[\"quoted_status\"][\"extended_tweet\"][\"full_text\"]\n else:\n status_refined['tweetText'] = status._json[\"quoted_status\"][\"text\"]\n status_refined['quote'] = {\n 'original_retweet_id': status._json[\"quoted_status\"][\"id\"],\n 'origUserLoc': status._json[\"quoted_status\"][\"user\"][\"location\"],\n 'origTweetLoc': status._json[\"quoted_status\"][\"geo\"],\n 'origTweetPlace': status._json[\"quoted_status\"][\"place\"],\n 'origTweetCoord': status._json[\"quoted_status\"][\"coordinates\"],\n 'origHashtags': status._json[\"quoted_status\"][\"entities\"][\"hashtags\"],\n 'retweet_count': status._json[\"quote_count\"],\n }\n elif hasattr(status, \"retweeted_status\"):\n print(status._json[\"retweeted_status\"])\n if \"extended_tweet\" in status._json[\"retweeted_status\"].keys():\n print(\"Taking the expanded tweet\")\n status_refined['tweetText'] = status._json[\"retweeted_status\"][\"extended_tweet\"][\"full_text\"]\n else:\n status_refined['tweetText'] = status._json[\"retweeted_status\"][\"text\"]\n status_refined['retweet'] = {\n 'original_retweet_id': status._json[\"retweeted_status\"][\"id\"],\n 'origUserLoc': status._json[\"retweeted_status\"][\"user\"][\"location\"],\n 'origTweetLoc': status._json[\"retweeted_status\"][\"geo\"],\n 'origTweetPlace': status._json[\"retweeted_status\"][\"place\"],\n 'origTweetCoord': status._json[\"retweeted_status\"][\"coordinates\"],\n 'origHashtags': status._json[\"retweeted_status\"][\"entities\"][\"hashtags\"],\n 'retweet_count': status._json[\"retweet_count\"],\n }\n elif hasattr(status, \"extended_tweet\"):\n if \"extended_tweet\" in status._json.keys():\n status_refined['tweetText'] = status._json[\"extended_tweet\"][\"full_text\"]\n self.tweets.append(status_refined)\n return self.tweets", "def get_tweets():\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def extract_tweets(consumer_key,consumer_secret,access_token,access_token_secret,search_key):\n # Step 1 - Authenticate\n consumer_key= str(consumer_key)\n consumer_secret= str(consumer_secret)\n\n access_token=str(access_token)\n access_token_secret=str(access_token_secret)\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n api = tweepy.API(auth)\n\n #Step 3 - Retrieve Tweets\n public_tweets = api.search(search_key)\n tweets_list=[]\n for tweet in public_tweets:\n tweets_list.append(tweet.text)\n return tweets_list", "def get_live_tweets_from_twitter_stream(auth, terms, num_tweets):\n listener = TwitterListener()\n listener._max_tweets = num_tweets\n twitter_stream = Stream(auth, listener)\n twitter_stream.filter(track=terms, languages=['en'])\n listener.store_live_tweets()", "def on_status(self, status):\n try:\n \n time = status.created_at\n text = str(status.text)\n \n if text.startswith('RT'):\n text = text.split('RT')[1].replace(',','')\n print(text)\n print(time)\n \n line = str(text + ',' + str(time) + '\\n')\n output = open('tweets.txt','a')\n output.write(line)\n output.close() \n else:\n text = text.split('RT')[0].replace(',','')\n print(text)\n \n line = str(text + ',' + str(time) + '\\n')\n output = open('tweets.txt','a')\n output.write(line)\n output.close()\n\n # count\n self.counter += 1\n print(self.counter)\n \n if self.counter < self.limit:\n return True\n else:\n self.counter ==0\n twitterStream.disconnect()\n \n \n except BaseException as e:\n print('failed on_status,',str(e))", "def download_report(self, keywords, date='all', geo='all', geor='all', graph = 'all_csv', sort=0, scale=0, sa='N'):\n params = urllib.urlencode({\n 'q': \",\".join(keywords),\n 'date': date,\n 'graph': graph,\n 'geo': geo,\n 'geor': geor,\n 'sort': str(sort),\n 'scale': str(scale),\n 'sa': sa\n }) \n self.raw_data = self.opener.open('http://www.google.com/trends/viz?' + params).read()[2::2]\n self._build_header_dictionary()", "def populate_twitter_acct_tweets(retrieve_until_dt=datetime.now(tz=timezone.utc) - timedelta(days=60)):\n spinner = itertools.cycle(['|', '/', '-', '\\\\'])\n api = twitter.Api(**settings.TWITTER_OAUTH, sleep_on_rate_limit=False)\n twitter_accts = CredibleUSTwitterAccount.objects.all()\n\n while 1:\n for acct in twitter_accts:\n # acct_oldest_tweet = USTwitterNewsFeed.objects.filter(posted_by=acct).first()\n acct_oldest_tweet = USTwitterNewsFeed.objects.filter(posted_by=acct, created_datetime__gte=date(2018, 2, 7)).first()\n\n max_id = None\n if acct_oldest_tweet is not None:\n max_id = acct_oldest_tweet.feedid - 1\n\n # do api call 15 for each account times due to twitter rate limit\n for _ in range(15):\n feed_created_dt = None\n try:\n statuses = api.GetUserTimeline(screen_name=acct.screen_name, include_rts=False, max_id=max_id)\n for s in statuses:\n write_and_restart_line(next(spinner))\n created_feed = USTwitterNewsFeed.objects.create(posted_by=acct,\n created_datetime=datetime.strptime(s.created_at, '%a %b %d %X %z %Y'),\n text=s.text,\n feedid=s.id)\n max_id = created_feed.feedid - 1\n feed_created_dt = created_feed.created_datetime\n except TwitterError as e:\n print(e.message)\n except IntegrityError as e:\n print('integrity error')\n break\n\n # only retrieve until last status created datetime earlier than retrieve until\n # if (feed_created_dt is None) or (feed_created_dt < retrieve_until_dt):\n # break", "def get_followers(twitter,screen_name,filename,count):\n url = 'https://api.twitter.com/1.1/followers/ids.json?&screen_name=@'+screen_name+'&skip_status=true&include_user_entities=false&count='+str(count) \n consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)\n access = oauth.Token(key=access_token, secret=access_token_secret)\n client = oauth.Client(consumer, access)\n try:\n response,data = client.request(url)\n dataStr = data.decode('utf-8') \n if('Rate limit exceeded' in dataStr ):\n print('rate limit exceeded error.. sleep for 15 min')\n time.sleep(61 * 15)\n response,data = client.request(url)\n \n jsonid = json.loads(dataStr)\n li = list(jsonid['ids'])\n output = open(filename, 'wb')\n pickle.dump(li, output)\n output.close()\n except:\n pass\n \n return li", "def search_tweets(q, count=100, result_type=\"recent\"):\n\n return t.search.tweets(q=q, result_type=result_type, count=count)", "def twitter(self):\n\n q = \" OR \".join(self.search_terms) + \" -filter:retweets\"\n results = self.__api.search(q=q, lang='en', count=100)\n\n tweets = []\n\n for res in results:\n\n publishedAt = datetime.strptime(res._json['created_at'], '%a %b %d %H:%M:%S +0000 %Y').strftime(\"%Y-%m-%d\")\n\n if (res._json['in_reply_to_screen_name'] == None and publishedAt == datetime.now().strftime(\"%Y-%m-%d\")):\n tweets.append([res._json['id'],\n res._json['text'],\n res._json['user']['screen_name'],\n publishedAt,\n res._json['user']['followers_count']])\n\n self.list = pd.DataFrame(tweets, columns=['id', 'title', 'user', 'publishedAt', 'followers_count']).nlargest(10,\n 'followers_count')\n\n return", "def trendingTweets():\n api = twitter.Api()\n trending_topics = api.GetTrendsWoeid(PHILA_WOEID)\n for topic in trending_topics:\n topicSearchTerm = topic.name\n trending_tweets = api.GetSearch(topicSearchTerm)\n for tweet in trending_tweets:\n util.safe_print(tweet.GetText())\n # pass", "def get_tweets(self, kafka_obj):\n\n try:\n\n # call twitter api to fetch tweets\n # for tweet in api.search('#machinelearning', count=5):\n\n for tweet in tweepy.Cursor(api.search, q='#machinelearning', since='2019-06-25', until='2019-07-07').items():\n\n # empty dictionary to store required params of a tweet\n parsed_tweet = dict()\n parsed_tweet['text'] = tweet.text\n parsed_tweet['date'] = str(tweet.created_at)\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n parsed_tweet['tweet_id'] = tweet.id_str\n parsed_tweet['location'] = tweet.user.location\n parsed_tweet['user'] = tweet.user.screen_name\n parsed_tweet['retweet_count'] = tweet.retweet_count\n\n if tweet.entities.get('hashtags'):\n parsed_tweet['hashtags'] = ', '.join([i['text'] for i in tweet.entities.get('hashtags')])\n else:\n parsed_tweet['hashtags'] = ''\n \n print('Search API', parsed_tweet)\n\n #Pushing all the tweets to the Kafka Topic\n\n kafka_producer = kafka_obj.producer_instance()\n kafka_obj.publish_urls(kafka_producer, 'twitter', 'tweet', json.dumps(parsed_tweet))\n\n except Exception as e:\n print(e)", "def getTweetsPerUser(self, fromDate, toDate, number):\n return self.session.query(func.count(User.id), User.screen_name).\\\n join(Tweet).group_by(User.id).\\\n order_by(desc(func.count(User.id))).\\\n filter(Tweet.created_at > fromDate).\\\n filter(Tweet.created_at < toDate)[0: number]", "def load_all_tweets(self, count):\n\n for influencer in tqdm(self.influencers.allInfluencers, desc='Gathering Tweets'):\n self.get_tweets(influencer, count)", "def queryTerm2Twitter(term): \n statusList = api.GetSearch(term, count=100, result_type='recent')\n timeStampOfStatus = [datetime.fromtimestamp(i.created_at_in_seconds) for i in statusList]\n timeStampOfStatus.sort() \n return timeStampOfStatus[0]", "def get_tweets_in_date_range(start, end, screen_name):\n start, end = convert_string_to_datetime(start), convert_string_to_datetime(end)\n culled_tweets = []\n first_date, max_id = start, None\n errors = 0\n while first_date >= start:\n try:\n tweets = get_tweets(max_id=max_id, screen_name=screen_name)\n except TwitterException as e:\n errors += 1\n with open('twitter_errors.txt', 'a') as f:\n f.write(e.message + ',' + screen_name + '\\n')\n if errors != 5:\n time.sleep(1)\n continue\n else:\n if not culled_tweets:\n return False\n break\n if max_id is not None and (tweets and tweets[0]['id_str'] == max_id):\n tweets.pop(0)\n oldest_tweet, newest_tweet = tweets[-1], tweets[0]\n first_date = convert_time_string(oldest_tweet['created_at'])\n last_date = convert_time_string(newest_tweet['created_at'])\n max_id = oldest_tweet['id_str']\n if first_date <= start or last_date >= end:\n tweets = [t for t in tweets\n if convert_time_string(t['created_at']) <= end\n and convert_time_string(t['created_at']) >= start]\n culled_tweets.extend(tweets)\n\n return culled_tweets", "def crawlAccount(target):\n\n\t# connect Twitter api\n\ttwitter = connectTwitter()\t\n\ttry:\n\t\tuser_timeline = twitter.get_user_timeline(screen_name=target, count=200, include_rts=False, exclude_replies=False)\n\texcept TwythonError:\n\t\tsys.exit('Received 404 for %s. Account does not exist or is banned.' % target)\n\t\n\tuser_timeline = twitter.get_user_timeline(screen_name=target, count=200, include_rts=True, exclude_replies=False)\t\n\ttweets = []\n\tids = []\n\n\n\t# stop this loop\n\twhile len(ids) < user[0]['statuses_count']:\n\t\tif len(user_timeline) == 0:\n\t\t\tprint '[!] No more tweets available. Ending scraper.\\n'\n\t\t\tbreak\n\n\t\tfor tweet in user_timeline:\n\t\t\tids.append(tweet['id'])\t\t\t\n\t\t\ttweets.append(tweet)\n\n\t\t\twith open('../Raw data/tweets/%s.json' % screen_name, 'a') as json_out:\n\t\t\t\tjson.dump(tweet, json_out)\n\t\t\t\tjson_out.write('\\n')\n\n\t\tprint '\\t[i] Found %i tweets so far.' % (len(ids))\n\t\t\n\t\ttime.sleep(5)\n\t\tuser_timeline = twitter.get_user_timeline(screen_name=screen_name, count=200, max_id=min(ids) - 1, include_rts=True, exclude_replies=False)\t\n\t\t\n\telse:\n\t\tprint '[!] All tweets scraped. Ending scraper.\\n'\n\t\treturn", "def recoverTweets(authors=[], words=[], removeRetweets=False, sortBy='newest',**kwargs):\n authors = mapToValid(authors)\n words = mapToValid(words)\n\n def getTopNTweets(retrievedTweets, numberOfTweets):\n \"\"\"Sort the retrievedTweets by sortBy specified and returns the top-N Tweets\"\"\"\n if sortBy=='newest':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['id'], reverse=True)\n elif sortBy=='oldest':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['id'],reverse=False)\n elif sortBy=='favorite_count':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['favorite_count'],reverse=True)\n elif sortBy=='retweet_count':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['retweet_count'],reverse=True)\n else:\n retrievedTweets = random.sample(retrievedTweets, numberOfTweets)\n return retrievedTweets[:numberOfTweets]\n\n def getTweetsByUser(username, maxTweets=1000):\n \"\"\"Returns a list of (json) objects representing the tweets for a specified Twitter username.\n If any words is queried, it will filter out every tweet that doesn't contain any of those words.\"\"\"\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n myTweets=[]\n if words:\n apiRes = tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items()\n for tweet in apiRes:\n if any(containsWord(tweet._json['full_text'],word) for word in words):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n\n return getTopNTweets(myTweets, maxTweets)\n\n def searchTweets():\n \"\"\" returns a list of (json) objects representing the tweets retrieved for a specified query.\n It doesn't work if any authors is specified.\n Then, startingDate and endingDate cannot be older than one week ago because of Twitter restrictions for standardAPI\n :reference: https://developer.twitter.com/en/docs/tweets/search/api-reference/get-search-tweets\n \"\"\"\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName='apiConf2.txt'))\n #SEARCHING TWEETS CONTAINING THE HASHTAG \"#bitcoin\" USING TWEEPY LIBRARY\n myTweets= []\n #words=list(map(str,words))\n if words:\n myQuery=' OR '.join(words)\n else:\n myQuery = '*'\n if removeRetweets:\n myQuery += ' - filter:retweets'\n kwargs['q']=myQuery\n kwargs['count']=100\n kwargs['tweet_mode']='extended'\n if 'startingDate' in kwargs:\n kwargs['since']=kwargs['startingDate']\n del(kwargs['startingDate'])\n if 'endingDate' in kwargs:\n kwargs['until']=kwargs['endingDate']\n del(kwargs['endingDate'])\n if 'maxTweets' in kwargs:\n del(kwargs['maxTweets'])\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.search, kwargs).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.search, kwargs).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n return getTopNTweets(myTweets, maxTweets)\n\n\n def getTwitterscraperTweets():\n \"\"\" returns a list of (json) objects representing the tweets retrieved for the specified inputs.\n It's very useful to avoid restrictions such as number of requests or dates not older than 7 days ago for twitterAPI (and tweepy).\n It will call the recoverTweets.sh script to properly query the API by twitterscraper.\n :reference: https://github.com/taspinar/twitterscraper\n \"\"\"\n import subprocess\n numOfAuthors = len(authors)\n numOfWords = len(words)\n callVars = ['./recoverTweets.sh',str(numOfWords),str(numOfAuthors)]\n callVars.extend([word for word in words]+[author for author in authors])\n if startingDate:\n callVars.extend(['-sd',startingDate])\n if endingDate:\n callVars.extend(['-ed',endingDate])\n #if maxTweets:\n # callVars.extend(['-max',str(maxTweets)])\n callVars.append(\"data/twitterscrapertmp\")\n print(\"Querying twitterAPI by using TwitterScraper... (it may take a long time)\")\n subprocess.call(callVars)\n with open('data/twitterscrapertmp') as json_data:\n tweets = json.load(json_data)\n if removeRetweets:\n tweets = [tweet for tweet in tweets if not isRetweet(tweet)]\n print(\"Query ended. Retrieved: \",len(tweets),\" tweets\")\n #saveTweets(tweets,outputCollection,onFile=True,onDb=True)\n os.remove('data/twitterscrapertmp')\n return tweets\n\n\n if \"maxTweets\" in kwargs:\n maxTweets=kwargs['maxTweets']\n else:\n maxTweets=1000\n\n if len(authors)==0 and len(words)==0:\n return(\"qua\") ###call sample function with maxTweets and (if any) dates\n if 'startingDate' in kwargs or 'endingDate' in kwargs:\n return getTwitterscraperTweets()\n\n if len(authors)!=0:\n tweets, splits, i = [], splitIntegerIntoIntegers(maxTweets,len(authors)), 0\n for author in authors:\n tweets.extend(getTweetsByUser(username=author, maxTweets=splits[i]))\n i+=1\n return tweets\n return getTweets()", "def collect_tweets(search_id, search_term, number_of_tweets):\n\n tweets = []\n for tweet in api_collector.collect(search_term, number_of_tweets):\n tweets.append((tweet.id_str, tweet.created_at, tweet.full_text))\n if len(tweets) == 0:\n search = Search.objects.get(pk=search_id)\n search.empty = True\n search.save()\n notify_searchers.delay(search_id)\n else:\n classify_tweets.delay(search_id, tweets)", "def gatherData():\n\n # connect to database, set up the tweepy API object, and find the next date to search\n\n cnx = sqlite3.connect(DB_FILE)\n api = generateAPI(wait_on_rate_limit=True, wait_on_rate_limit_notify=True, **CREDENTIALS)\n\n nextdate = findNextDate(cnx, FIRSTDATE)\n year = nextdate[:4]\n\n # attempt to scrape box office data\n\n bodata = getTopMovies(BO_ENDPOINT, nextdate, CNT_MOVIES)\n\n if not bodata.empty:\n bodata.to_sql('boxoffice', ENGINE, if_exists='append', index=False)\n print(\"Box Office Data for [{0}] Written to Database\".format(nextdate))\n else:\n raise BOError(\"Error Scraping/Writing Box Office Data for [{0}]\".format(nextdate))\n\n # attempt to collect tweet data\n\n for movie in bodata.title:\n try:\n tweets = searchMovie(api, movie, nextdate, MAX_TWEETS)\n if not tweets.empty:\n tweets.to_sql('tweets', ENGINE, if_exists='append', index=False)\n print(\"Tweets for [{0}] Written to Database\".format(movie))\n else:\n raise TweetError(\"Error Fetching/Writing Tweets for [{0}]\".format(movie))\n except tweepy.error.TweepError:\n raise TweetError(\"Error Fetching/Writing Tweets for [{0}]\".format(movie))\n\n # attempt to collect movie metadata\n\n for movie in bodata.title:\n minfo = getMovieInfo(OMDB_ENDPOINT, processTitle(movie), year)\n if minfo:\n insertMovie(cnx, movie, nextdate, minfo)\n else:\n minfo = getMovieInfo(OMDB_ENDPOINT, processTitle(movie), str(int(year)-1))\n if minfo:\n insertMovie(cnx, movie, nextdate, minfo)\n else:\n print(\"Movie: [{0}] Could Not be Found via OMDB\".format(movie))\n\n # commit changes and close DB connection\n\n cnx.commit()\n cnx.close()\n\n print(\"\\nAll Data for {0} Successfully Added to the Database!\\n\".format(nextdate))\n return nextdate", "def getTweetsByUser(username, maxTweets=1000):\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n myTweets=[]\n if words:\n apiRes = tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items()\n for tweet in apiRes:\n if any(containsWord(tweet._json['full_text'],word) for word in words):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n\n return getTopNTweets(myTweets, maxTweets)", "def pushTweets(tweets,user,cacheKey=False):\n \n tweetDump = filterTweets(tweets) # Extract mentions, URLs, replies hashtags etc...\n\n pushRenderedTweets2Neo.delay(user,tweetDump) \n pushRenderedTweets2Cass.delay(user,tweetDump)\n pushRenderedTweets2Solr.delay(tweetDump['tweets']+tweetDump['retweets'])\n\n if cacheKey: # These are the last Tweets, tell the scaper we're done.\n cache.set(cacheKey,'done')\n print '*** '+user+': DONE WITH TWEETS ***' \n \n #return True", "def get_tweets(n=1):\n tweets = list(collection.find())[-n:]\n return tweets", "def get_trend_urls():\n\n trends = []\n twitter = 'http://search.twitter.com/'\n tmp = 'tmp' + str(random.randint(0,1000))\n os.system('wget %s --output-document=%s' % (twitter, tmp))\n with open(tmp) as f:\n for line in f:\n if 'a href' in line and 'search?q' in line:\n trends.append(twitter\n + line.split('a href=\\\"/')[1].split('\\\"')[0])\n os.system('rm %s' % tmp)\n return trends", "def get_tweets(api):\n return api.user_timeline()", "def get_users_tweets(users, min_date, max_date, result_limit, key, secret_key):\n \n auth = tweepy.OAuthHandler(key, secret_key)\n max_datetime = datetime.datetime.strptime(max_date, '%Y-%m-%d').date()\n min_datetime = datetime.datetime.strptime(min_date, '%Y-%m-%d').date()\n \n #initialize variables\n max_id = None\n min_id = None\n mydata = []\n\n for user in users:\n my_api = tweepy.API(auth)\n\n statuses = my_api.user_timeline(screen_name=user,\n count=result_limit,\n tweet_mode = 'extended',\n include_retweets=True\n )\n for item in statuses: \n if item.created_at.date() > max_datetime:\n max_id = item.id\n #max_id_date = item.created_at\n elif min_datetime <= item.created_at.date() <= max_datetime:\n mydata.append(get_tweet_info(item))\n if max_id == None:\n max_id = item.id\n else: #less than min_datetime\n min_id = item.id\n #min_id_date = item.created_at\n break\n\n while min_id == None:\n start_id = item.id\n statuses = my_api.user_timeline(screen_name=user,\n count=result_limit,\n max_id=start_id,\n tweet_mode = 'extended',\n include_retweets=True\n )\n for item in statuses: \n if item.created_at.date() > max_datetime:\n max_id = item.id\n #max_id_date = item.created_at\n elif min_datetime <= item.created_at.date() <= max_datetime:\n mydata.append(get_tweet_info(item))\n if max_id == None:\n max_id = item.id\n else: #less than min_datetime\n min_id = item.id\n #min_id_date = item.created_at\n break \n #get another 25 starting with the max... \n # if min_id is None... then call again... using the bottom of mydata as max_id...\n\n df = pd.DataFrame(mydata).loc[:,'tweet_id':'favourite_count']\n return df", "def handle(self, *args, **options):\n past_two_days = timezone.now() - datetime.timedelta(days=2)\n top_bookmarks = Bookmark.objects.filter(click__pub_date__gte=past_two_days)\\\n .annotate(click_count=Count(\"click\")).order_by(\"click_count\")\n\n with open('top_bookmarks.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['Bookmark', ' Clicks in last two days'])\n for bookmark in top_bookmarks:\n writer.writerow([bookmark.title, bookmark.bookmark_clicks])", "def get_tweets(self):\n\t\ttweets = ''\n\t\tfor each in self.tweets_posted:\n\t\t\ttweets += each.timeline_format() + '\\n'\n\t\ttweets = tweets.strip('\\n')\n\t\treturn tweets", "def get_tweets(twitter, screen_name, num_tweets):\n\n request = robust_request(twitter, 'search/tweets', {'q': screen_name, 'count': num_tweets})\n tweets = [a['text'] for a in request]\n\n return tweets", "def fetch_tweets(n_tweets=100, data_home=None, token=None, tweets_ids=None):\n pass", "def get_tweets():\n\n\tuser ='kaiserkumars'\n\t# api = twitter.Api(consumer_key='iJoZZuV7etVrJfE4K9ir8sIqa',\n\t# consumer_secret='uyJyWoP05z2MUKnggW7vHnIG2sckmM1aHRMgGveZLyrz8401Xs',\n\t# access_token_key='622588040-TYDgG1UlGUvA1hW8PA7mOG5CiMw0WiuPZlkoP8cc',\n\t# access_token_secret='laAmFjeLhWzOK7Y524VevdMdeLeNpnmCUmjee1AQU7osj')\n\tapi = twitter.Api(consumer_key=get_secret('consumer_key'),\n\t consumer_secret=get_secret('consumer_secret'),\n\t access_token_key=get_secret('access_token_key'),\n\t access_token_secret=get_secret('access_token_secret'))\n\n\tstatuses = api.GetUserTimeline(user_id=622588040,count=0)\n\t# print(statuses)\n\t# duplicate='UNIQUE constraint failed: mtwitter_weatherdata.location, core_weatherdata.metric, core_weatherdata.date'\n\tbulk_insert=[]\n\t# print(dir(TwitterData))\n\tfor s in statuses:\n\t\t# print(s)\n\t\tdt = parse(s.created_at)\n\t\t# print(dt)\n\t\tdata = TwitterData(org_name=s.user.name,profile_url=s.user.profile_image_url,tweet_id =s.id,screen_name=s.user.screen_name, tweet = s.text, date= dt, favCount =0)\n\t\tbulk_insert.append(data)\n\ttry:\n\t\tTwitterData.objects.bulk_create(bulk_insert)\n\t\tprint(\"Success.\")\n\texcept Exception as e:\n\t\t# if(str(e)==duplicate):\n\t\t# \tprint('Duplicate Data')\n\t\t# else:\n\t\tprint(str(e))\n\n\treturn statuses", "def requestFromSite(self, link_s):\n \n with open('dividend_history.csv', 'wb') as handle:\n request = requests.get(link_s, stream=True)\n \n for block in request.iter_content(1024):\n if not block:\n break\n \n handle.write(block)", "def getFeed(self, fromDate, toDate=None, includeDir=False):\n # fromDate and toDate will be string arguments\n fromDate = self.convertGMTToInteger(fromDate)\n if toDate is not None:\n toDate = self.convertGMTToInteger(toDate)\n rows = self.__controller.getRecordsFromDate(fromDate, toDate)\n if includeDir==False:\n # filter out directory entries\n rows = [[f, t, n] for f, t, n, isDir in rows if isDir==False]\n else:\n rows = [list(r) for r in rows]\n for row in rows:\n file = row[0]\n if file.startswith(\"/\"):\n file = \"file://\" + file\n else:\n file = \"file:///\" + file\n row[0] = file\n #row[1] = int(row[1]) # To convert from Int64(IronPython) to just long\n row[1] = self.formatDateTime(row[1])\n return rows" ]
[ "0.62766623", "0.62647164", "0.6243787", "0.62299496", "0.6025572", "0.5915222", "0.5898549", "0.5892832", "0.5863716", "0.5862079", "0.5858822", "0.5833427", "0.58319485", "0.582733", "0.5794519", "0.57683766", "0.57517594", "0.5748663", "0.5743377", "0.57433707", "0.57396716", "0.5679409", "0.5676976", "0.5664916", "0.56414086", "0.56296575", "0.5613331", "0.5602387", "0.55617076", "0.5558305", "0.5548353", "0.5536406", "0.55330026", "0.54481834", "0.54211605", "0.5390841", "0.5348307", "0.5338548", "0.5335516", "0.5332878", "0.5311156", "0.5310178", "0.53035676", "0.5301449", "0.5280179", "0.5270267", "0.5255522", "0.52551496", "0.5246078", "0.5237762", "0.5235405", "0.52343935", "0.5176081", "0.5174046", "0.51737297", "0.51726943", "0.51725996", "0.5172313", "0.5166368", "0.51523715", "0.5144414", "0.51387435", "0.5134687", "0.51338494", "0.51323116", "0.5128701", "0.5127147", "0.51190054", "0.5110379", "0.51008016", "0.5084053", "0.50796753", "0.5078297", "0.50713027", "0.5061354", "0.50557154", "0.5049878", "0.5036579", "0.50342125", "0.50222456", "0.5019252", "0.50162864", "0.5007858", "0.49987525", "0.49669775", "0.49594373", "0.49390736", "0.49338728", "0.49282435", "0.4926391", "0.4922768", "0.49170077", "0.49145782", "0.49107128", "0.49063382", "0.49023342", "0.49005786", "0.48982793", "0.48962006", "0.4885399" ]
0.5302306
43
Add all graphs for displaying sensor data.
def CreateGraphs(self, sensorid): for i in range(len(sensorid)): # Add a graph plot to list self.plotter[sensorid[i]] = plot.PlotCanvas(self.panel) self.plotter[sensorid[i]].SetEnableLegend(True) #self.plotter[sensorid[i]].SetEnableGrid(True) #self.plotter.append((sensorid[i], plot.PlotCanvas(self.panel))) # Add the above graph plot to the Box self.box.Add(self.plotter[sensorid[i]], 1, wx.EXPAND) # Add the Box to the panel self.panel.SetSizer(self.box)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_graphs(self):\n self.frequency_plot_graph.show()\n self.resistance_graph.show()\n self.temperature_plot_graph.show()\n self.pressure_plot_graph.show()\n self.humidity_plot_graph.show()\n self.overview_graph.show()\n self.overview_graph.setXRange(-1000, 5000)", "def update_graph(self):\n parameters = []\n dtype = {'Timestamp': 'str'}\n for header in self.headers:\n if self.top_plot.current_param in header or self.bottom_plot.current_param in header:\n parameters.append(header)\n dtype[header] = 'float'\n data = pd.read_csv(self.reactor.file,\n dtype=dtype,\n parse_dates=['Timestamp'], usecols=['Timestamp'] + parameters, low_memory=False,\n na_filter=False)\n start_time = data['Timestamp'][0]\n data.insert(loc=2, column='EFT', value=(data['Timestamp'] - start_time) / np.timedelta64(1, 'h'))\n\n for label, content in data.iteritems():\n if label == 'Timestamp' or label == 'EFT':\n continue\n elif self.top_plot.current_param in label:\n self.top_plot.clear()\n self.top_plot.plot(data['EFT'], content)\n else:\n self.bottom_plot.clear()\n self.bottom_plot.plot(data['EFT'], content)", "def _PlotGraph(self, event):\n self._rcvLock.acquire()\n for j in event.data[0].keys():\n data = event.data[0][j]\n #print data\n line = []\n for k in data.keys():\n if k in COLORS.keys():\n c = COLORS[k]\n else:\n c = 'black'\n line.append(plot.PolyLine(data[k], colour=c, width=1,\n legend=\"Node %d\"%(k,)))\n # To draw markers: default colour = black, size = 2\n # shapes = 'circle', 'cross', 'square', 'dot', 'plus'\n #marker = plot.PolyMarker(event.data[1], marker='triangle')\n\n # set up text, axis and draw\n if j == ERRORPLOT:\n t = \"Synchronization Error\"\n xa = \"Time [s]\"\n ya = \"Error [ms]\"\n elif j == TEMPPLOT:\n t = \"Temperature Index\"\n xa = \"Time [s]\"\n ya = \"Index\"\n elif j == SKEWPLOT:\n t = \"Frequency Error\"\n xa = \"Time [s]\"\n ya = \"Frequency Error [ppm]\"\n gc = plot.PlotGraphics(line, t, xa, ya)\n # Draw graphs for each plot\n self.plotter[j].Draw(gc, xAxis=(self._x_lower,\n self._x_upper), yAxis=(float(self._y_lower[j]),\n float(self._y_upper[j])))\n self._rcvLock.release()", "def _initialize_graphs(self):\n # Add in teh cleared widgets array\n self.widgets['curve'] = []\n self.widgets['legend'] = [get_legend_from_graphics_view(legend) for legend in self.widgets['legend']]\n\n # Create curves\n # Power\n self.widgets['curve'].append(self.widgets['graph'][0].plot(\n pen=pg.mkPen(color=self.gui.COLOR_LIST[0])\n ))\n add_to_legend(\n legend=self.widgets['legend'][0],\n curve=self.widgets['curve'][0],\n curve_name=\"Power\"\n )\n\n # Setpoint\n self.widgets['curve'].append(self.widgets['graph'][0].plot(\n pen=pg.mkPen(color=self.gui.COLOR_LIST[1])\n ))\n add_to_legend(\n legend=self.widgets['legend'][0],\n curve=self.widgets['curve'][1],\n curve_name=\"Setpoint\"\n )\n\n # Voltage\n self.widgets['curve'].append(self.widgets['graph'][1].plot(\n pen=pg.mkPen(color=self.gui.COLOR_LIST[0])\n ))\n add_to_legend(\n legend=self.widgets['legend'][1],\n curve=self.widgets['curve'][2],\n curve_name=\"Voltage\"\n )\n\n # Error\n self.widgets['curve'].append(self.widgets['graph'][1].plot(\n pen=pg.mkPen(color=self.gui.COLOR_LIST[1])\n ))\n add_to_legend(\n legend=self.widgets['legend'][1],\n curve=self.widgets['curve'][3],\n curve_name=\"Error\"\n )\n\n self._clear_data_plots(5000)", "def clear_graphs(self):\n for ax in (self.master_plot, self.time_velocity, self.time_power, self.power_velocity):\n ax.cla()", "def result(self):\n\n chart_series = [] # will hold all the series created\n\n # determine the sensor to plot from the sensor selected by the user.\n the_sensor = bmsapp.models.Sensor.objects.get(pk=self.request_params['select_sensor'])\n\n # get the requested averaging interval in hours\n averaging_hours = float(self.request_params['averaging_time'])\n\n # determine the start time for selecting records\n st_ts, end_ts = self.get_ts_range()\n\n # get the database records\n df = self.reading_db.dataframeForOneID(the_sensor.sensor_id, st_ts, end_ts, pytz.timezone(self.timezone))\n\n if not df.empty:\n\n # info needed to create each series (selection list, series name, visible)\n if self.schedule:\n occupied_times = df.ts.apply(self.schedule.is_occupied)\n unoccupied_times = -occupied_times\n\n series_info = [(None, 'All Data', True),\n (occupied_times, 'Occupied Periods', False),\n (unoccupied_times, 'Unoccupied Periods', False)]\n else:\n # no schedule, so just return the 'All Data' series\n series_info = [(None, 'All Data', True)]\n\n for mask, series_name, visibility in series_info:\n if mask is None:\n select_df = df\n else:\n select_df = df[mask]\n\n if averaging_hours:\n select_df = bmsapp.data_util.resample_timeseries(select_df, averaging_hours)\n\n histogram_series = bmsapp.data_util.histogram_from_series(select_df.val)\n\n chart_series.append({'x': [x for x,y in histogram_series],\n 'y': [y for x,y in histogram_series],\n 'type': 'scatter',\n 'mode': 'lines', \n 'name': series_name, \n 'visible': 'true' if visibility else 'legendonly'\n })\n\n opt = self.get_chart_options('plotly')\n opt['data'] = chart_series\n opt['layout']['title'] = the_sensor.title + ' Histogram: ' + self.building.title\n opt['layout']['xaxis']['title'] = the_sensor.unit.label\n opt['layout']['xaxis']['type'] = 'linear'\n opt['layout']['yaxis']['title'] = '% of Readings'\n opt['layout']['yaxis']['rangemode'] = 'tozero'\n\n html = basechart.chart_config.chart_container_html(opt['layout']['title'])\n\n return {'html': html, 'objects': [('plotly', opt)]}", "def createGraph(self):\n self.measurements(45,50,10)\n avg = self.readFile(\"avg.pickle\")\n table = []\n for a in avg:\n table.append((a[0], a[1], a[2], a[3], a[4], \"Boolean\"))\n table.append((a[0], a[1], a[2], a[5], a[6], \"Fractional\"))\n table.append((a[0], a[1], a[2], a[7], a[8], \"Hierarchical\"))\n df = pd.DataFrame(table)\n df.columns = [\"nPages\", \"nCentroids\", \"Time\", \"Mean\", \"Std\", \"Type\"]\n print(df)\n sns.set(style = 'darkgrid')\n sns.lmplot(x = \"nCentroids\", y = \"Mean\", col = \"Type\", hue=\"Type\", data = df)\n #sns.lmplot(x = \"nPages\", y = \"Mean\", col = \"Type\", hue=\"Type\", data = df)\n #sns.scatterplot(x = \"nCentroids\", y = \"Mean\", size = \"Time\", hue = \"Type\", sizes = (20, 200), data = df)\n #sns.scatterplot(x = \"nPages\", y = \"Mean\", size = \"Time\", hue = \"Type\", sizes = (20, 200), data = df)\n plt.show()", "def plot_graph(self) -> None:", "def graphs_ajax(request, sensor_id):\n # Update sensor before viewing\n if not updatesensor(sensor_id):\n messages.info(request, 'Unable to update sensor')\n\n # Get sensors from database\n Session = app.get_persistent_store_database('sensor_db', as_sessionmaker=True)\n session = Session()\n sensor = session.query(Sensor).get(int(sensor_id))\n\n if sensor.temperature_graph:\n temperature_graph_plot = create_temperature_graph(sensor.temperature_graph.id, height='300px')\n else:\n temperature_graph_plot = None\n\n if sensor.ozone_graph:\n ozone_graph_plot = create_ozone_graph(sensor.ozone_graph.id, height='300px')\n else:\n ozone_graph_plot = None\n\n if sensor.no2_graph:\n no2_graph_plot = create_no2_graph(sensor.no2_graph.id, height='300px')\n else:\n no2_graph_plot = None\n\n if sensor.h2s_graph:\n h2s_graph_plot = create_h2s_graph(sensor.h2s_graph.id, height='300px')\n else:\n h2s_graph_plot = None\n\n if sensor.so2_graph:\n so2_graph_plot = create_so2_graph(sensor.so2_graph.id, height='300px')\n else:\n so2_graph_plot = None\n\n context = {\n 'temperature_graph_plot': temperature_graph_plot,\n 'ozone_graph_plot': ozone_graph_plot,\n 'no2_graph_plot': no2_graph_plot,\n 'h2s_graph_plot': h2s_graph_plot,\n 'so2_graph_plot': so2_graph_plot,\n }\n\n session.close()\n return render(request, 'open_air/graphs_ajax.html', context)", "def show_graphs ():\n plt.ylim = (0, 300)\n plt.xlim = (0, 300)\n #Set up lidar plot to figure 1\n lidar_plot = plt.figure (1)\n #Assign title\n plt.title ('Lidar data')\n #Assign data\n plt.imshow (lidar_clean)\n #Set up radar plot to figure 2\n radar_plot = plt.figure (2)\n #Assign title\n plt.title ('Radar data')\n #Assign data\n plt.imshow (radar_clean)\n #Show plots\n plt.show ()", "def custom_graphs(self) -> List[Component]:\n graphs = []\n # TODO: Figure this out\n for i, go_data in enumerate(self.config.overview_graphs):\n groupby = go_data.pop('groupby', None)\n agg = go_data.pop('agg', None)\n if groupby and agg:\n data = getattr(self.summary.groupby(groupby), agg)()\n else:\n data = self.summary\n graphs.append(\n dbc.Row(\n dbc.Col(\n dcc.Graph(\n id=f'graph_{i}',\n figure=self.graph(data, go_data.pop('graph_type'), **go_data)\n )\n )\n )\n )\n return graphs", "def plot_all(self):\n self.plot_ramps()\n self.plot_groupdq()", "def plot_data(self):", "def generate_plots(self):\n freq_to_channel = {v: k for k, v in self.frequency_dict.iteritems()}\n data_axes = None\n for index, frequency in enumerate(sorted(freq_to_channel)):\n channel = freq_to_channel[frequency]\n td_f = self.frequency_dict[channel]\n title = 'Volume Backscatter (Sv) :Channel #%d: Frequency: %.1f kHz' % (channel, td_f)\n data_axes = self._generate_plot(self.ax[index], self.power_data_dict[channel], title,\n self.min_db, self.max_db)\n\n if data_axes:\n self._display_x_labels(self.ax[2], self.data_times)\n self.fig.tight_layout(rect=[0, 0.0, 0.97, 1.0])\n self._display_colorbar(self.fig, data_axes)", "def __init__(self, data):\n self.root = Tk()\n \n titleFont = (\"Arial\", 16, \"bold\")\n \n \n # sensor table\n self.sensorTable = Frame(master=self.root)\n self.sensorNameColumn = LabelFrame(self.sensorTable, text=\"Name\", font=titleFont)\n self.sensorNameColumn.grid(row = 0, column = 0)\n self.sensorValueColumn = LabelFrame(self.sensorTable, text=\"Value\", font=titleFont)\n self.sensorValueColumn.grid(row = 0, column = 1)\n self.sensorButtonColumn = LabelFrame(self.sensorTable, text=\"Button\", font=titleFont)\n self.sensorButtonColumn.grid(row = 0, column = 2)\n self.sensorRows = {} # do we really need to keep track of this?\n self.sensorTable.pack()\n \n self.figures = {}\n self.axes = {}\n self.lines = {}\n \n self.data = data\n \n self.sensorDisplayButtonCallback = self.openGraph\n \n quit_button = Button(self.root, text=\"Quit\", command=self.quit)\n quit_button.pack(side=BOTTOM)\n \n plt.ion()", "def plot_singlefig(data,NO_SENSORS,dataSelection):\n \n # Axis options\n yAxisLimits = [[0,1024],[-3,3]]\n \n # Plots graphs for each sensor on 1 figure\n plt.figure(1)\n for i in range(0,NO_SENSORS):\n # The figure is seperated into subplots using the parameter. 231 means 2 rows, 3 columns, subplot 1\n plt.subplot(231 + i)\n plt.title('Sensor ' + str(i + 1))\n plt.plot(data[:,(3 + (4 * i))],data[:,(0 + (4 * i))],label='X Axis')\n plt.plot(data[:,(3 + (4 * i))],data[:,(1 + (4 * i))],label='Y Axis')\n plt.plot(data[:,(3 + (4 * i))],data[:,(2 + (4 * i))],label='Z Axis')\n plt.ylim(yAxisLimits[dataSelection][0],yAxisLimits[dataSelection][1])\n plt.xlabel('Time/s')\n plt.ylabel('Acceleration/g')\n plt.legend()\n plt.show()", "def plot_multifig(data,NO_SENSORS,dataSelection):\n \n # Axis options\n yAxisLimits = [[0,1024],[-3,3]]\n \n # Plots a seperate graph for each sensor\n for i in range(0,NO_SENSORS):\n plt.figure(i + 1)\n plt.title('Sensor ' + str(i + 1))\n plt.plot(data[:,(3 + (4 * i))],data[:,(0 + (4 * i))],label='X Axis')\n plt.plot(data[:,(3 + (4 * i))],data[:,(1 + (4 * i))],label='Y Axis')\n plt.plot(data[:,(3 + (4 * i))],data[:,(2 + (4 * i))],label='Z Axis')\n plt.ylim(yAxisLimits[dataSelection][0],yAxisLimits[dataSelection][1])\n plt.xlabel('Time/s')\n plt.ylabel('Acceleration/g')\n plt.legend()\n plt.show()", "def graph_data(self, timeframe):\n logging.info(\"Graphing Data\")\n pprog = self.prog_logs\n cursor = pprog.find({})\n data = {\n \"emotional\": [],\n \"physical\": [],\n \"cognitive\": []\n }\n comp = self.get_timeframe(timeframe)\n for doc in cursor:\n date = list(doc.keys())[1]\n try:\n datecomp = datetime.datetime.strptime(date, \"%Y-%m-%d %H:%M\")\n except:\n datecomp = datetime.datetime.today()\n if datecomp > datetime.datetime.combine(comp, datetime.time.min):\n for key in data.keys():\n rating = int(doc[date][\"data\"][key][\"rating\"])\n data[key].append(rating)\n plt.ylabel('Level')\n plt.xlabel('Number of Logs - Ordered By Date')\n for key in data.keys():\n plt.plot(data[key])\n plt.legend(['Emotional', 'Physical', 'Cognitive'], loc='upper left')\n plt.show()", "def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)", "def charts(self, charts):\n\n self.container['charts'] = charts", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial1\"][1]) + \" at \" + str(self.values[\"Trial1\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream1.write(dict(x=[], y=[]))\n\t\t\tself.stream2.write(dict(x=[], y=[]))\n\t\t\tself.stream3.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream1.write(dict(x=self.values[\"Trial1\"][0], y=self.values[\"Trial1\"][1]))#, trace=Bar)\n\t\t\tself.stream2.write(dict(x=self.values[\"Trial2\"][0], y=self.values[\"Trial2\"][1]))\n\t\t\tself.stream3.write(dict(x=self.values[\"Trial3\"][0], y=self.values[\"Trial3\"][1]))", "def graphs():\n return render_template(\"graphs.html\")", "def graph_many_really ( self, db_device_adapters, db_start, db_end, min_points ):\n db_file_name = AppGlobal.gui.get_db_file_name()\n if not( os.path.isfile( db_file_name )):\n print( f\"db file does not exist: {db_file_name}\" )\n return\n\n self.line_style.reset()\n\n # prep data\n for i_device_adapter in db_device_adapters:\n #time_data, inst_pw_data, total_power_data, = self._prep_data( i_device_adapter, db_start, db_end, min_points )\n i_device_adapter.retrived_data_cache = self._prep_data( i_device_adapter, db_start, db_end, min_points )\n\n# min_time_data = time_data[0]\n# max_time_data = time_data[-1]\n# device_name = db_device_adapter.name\n # because of Nones, firguring this out is a mess leave for later\n# data = i_device_adapter.retrived_data_cache[ 0 ][0]\n# if min_time_data = None:\n# min_time_data =\n\n # ------------------- now plot new ------\n # may be that this is way of multiple plots on one canvas -- but here works for one plot\n #how to set figure size ?/\n\n #plt.figure( plt.figure( figsize = ( self.parameters.graph_x_size , self.parameters.graph_y_size ) )) ) us this get two graphs\n\n fig, ax1 = plt.subplots( figsize=( self.parameters.graph_x_size , self.parameters.graph_y_size ) )\n color = 'tab:red'\n\n ax1.set_title( f\"Power and Energy for Device SmartPlugs\" );\n ax1.set_xlabel( f\"Time in {self.parameters.graph_time_units} from {self.parameters.graph_time_zero}\")\n\n ax1.set_ylabel( \"Power (Watts)\", color=color ) # done in next line seems not to work\n\n for i_device_adapter in db_device_adapters:\n self.line_style.get_next_style()\n time_data, inst_pw_data, total_power_data, = i_device_adapter.retrived_data_cache\n if ( ( time_data is None ) or len( time_data ) == 0 ):\n print( f\"no data for {i_device_adapter.name}\" )\n continue\n ax1.plot( time_data, inst_pw_data, linestyle = self.line_style.line_style,\n marker = self.line_style.marker_style,\n color = self.line_style.color_style,\n label = \"label1\" ) # label= \"Power (Watts)\" )\n\n ax1.tick_params( axis= 'y', labelcolor=color)\n\n #ax1.set_ylim( self.parameters.graph_inst_power_min, self.parameters.graph_inst_power_max )\n\n# inst_pw_data = [ ( x/2 ) for x in inst_pw_data ]\n# ax1.plot( time_data, inst_pw_data, linestyle='--', marker='.', color='green', label = \"label2\" ) # label= \"Power (Watts)\" )\n\n# ax1.legend(['Power']) # having trouble with this\n# ax1.legend( loc = 2 ) # what mean location\n\n ax1.legend( loc = 2 )\n\n # ------ second graph energy\n\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\n color = 'tab:blue'\n\n ax2.set_ylabel( \"Energy (watt * hr)\", color=color) # we already handled the x-label with ax1\n ax2.set_ylim( self.parameters.graph_total_power_min, self.parameters.graph_total_power_max )\n\n for i_device_adapter in db_device_adapters:\n self.line_style.get_next_style()\n time_data, inst_pw_data, total_power_data, = i_device_adapter.retrived_data_cache\n if ( ( time_data is None ) or len( time_data ) == 0 ):\n print( f\"no data for {i_device_adapter.name}\" )\n continue\n ax2.plot( time_data, total_power_data, linestyle = self.line_style.line_style,\n marker = self.line_style.marker_style,\n color = self.line_style.color_style,\n label = \"Energy (Watts*hr)\") # label= \"Power (Watts)\" )\n\n ax2.tick_params( axis='y', labelcolor=color )\n\n ax2.legend(['ax2 Total Energy legend'])\n ax2.legend( loc = 1 )\n\n #fig.tight_layout() # otherwise the right y-label is slightly clipped\n #plt.figure( figsize = ( self.parameters.graph_x_size , self.parameters.graph_y_size ) ) gives second plot empty\n\n plt.show()\n msg = \"... Graph done.\"\n AppGlobal.gui.display_info_string( msg )", "def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r", "def data_graph():\n station_reference = request.args.get(\"stationReference\")\n station_name = request.args.get(\"stationName\")\n station_name = station_name.replace(\" \",\"+\")\n\n if station_name is not None:\n # station_data = station_data.replace(\" \", \"+\")\n station = station_data.loc[station_data.stationName == station_name]\n else:\n station = station_data.loc[station_data.stationReference == station_reference]\n result_station = station.iloc[0]\n\n # Get optional parameters\n time_from = request.args.get(\"from\")\n time_to = request.args.get(\"to\")\n if time_from:\n pass\n else:\n time_from = None\n if time_to:\n pass\n else:\n time_to = None\n # plot pic\n magic_trick= data.station_graph(result_station.stationName, time_from, time_to)\n # img_stream = io.BytesIO(img)\n # img = Image.open(img_stream)\n # imgByteArr = io.BytesIO()\n # img.save(imgByteArr,format='PNG')\n # imgByteArr = imgByteArr.getvalue()\n # return send_file(io.BytesIO(imgByteArr),\n # mimetype = 'image/png',\n # as_attachment = True,\n # attachment_filename = 'tmp.png')\n image_data = open(\"tmp.png\", \"rb\").read()\n response = make_response(image_data)\n response.headers['Content-Type'] = 'image/png'\n return response", "def plot(self):\n pass", "def _update_plots(self):\n #Adding in new data to plots\n currSignal = self._ai_client.get_ai_voltage(self._ai_channel, max_range=self.max_input_voltage)\n self.measured_powers = np.append(self.measured_powers[1:], np.mean(currSignal))\n self.out_voltages = np.append(self.out_voltages[1:], self._curr_output_voltage)\n self.errors = np.append(self.errors[1:], (currSignal[-1] - self.voltageSetpoint))\n self.sp_data = np.append(self.sp_data[1:], self.voltageSetpoint)\n #Update power plots\n self.widgets['curve'][0].setData(self.measured_powers*self.gain)\n #Update setpoint plots\n self.widgets['curve'][1].setData(self.sp_data*self.gain)\n\n # Now update voltage polots\n self.widgets['curve'][2].setData(self.out_voltages)\n self.widgets['curve'][3].setData(self.errors*self.gain)", "def create_plots(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n self.sse_plot()\n self.avg_sse_plot()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(X[:,0])\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(X[:,1])\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(X[:,2])\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(X[:,3])\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(X[:,4])\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(X[:,5])\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##########################################################################################\n # Plotting the Fourier Transform of the signals\n\n freq = np.fft.fftfreq(len(y), 1*60*60)\n\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(freq, np.abs(np.fft.fft(y)))\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(freq, np.abs(np.fft.fft(X[:,0])))\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(freq, np.abs(np.fft.fft(X[:,1])))\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(freq, np.abs(np.fft.fft(X[:,2])))\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(freq, np.abs(np.fft.fft(X[:,3])))\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(freq, np.abs(np.fft.fft(X[:,4])))\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(freq, np.abs(np.fft.fft(X[:,5])))\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##################################################################################################\n # Print correlation matrix\n\n df = pd.DataFrame(np.c_[y, X])\n df.columns = ['Avg Global PSP (vent/cor) [W/m^2]','Avg Zenith Angle [degrees]','Avg Azimuth Angle [degrees]','Avg Tower Dry Bulb Temp [deg C]','Avg Tower RH [%]','Avg Total Cloud Cover [%]','Avg Avg Wind Speed @ 6ft [m/s]']\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr(), fignum=f.number)\n plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=20)\n plt.yticks(range(df.shape[1]), df.columns, fontsize=14)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16);\n plt.show()", "def clean_graph(self):\n #self.time = 0#\n \n # values of microcontroller\n #if self.graf_t.buffer_info()[1] != 0:\n for a in range(self.graf_t.buffer_info()[1]):\n self.graf_t.pop()\n \n for a in range(self.graf_r.buffer_info()[1]):\n self.graf_r.pop()\n\n for a in range(self.graf_x0.buffer_info()[1]):\n self.graf_x0.pop()\n\n for a in range(self.graf_x1.buffer_info()[1]):\n self.graf_x1.pop()\n\n for a in range(self.graf_u.buffer_info()[1]):\n self.graf_u.pop()\n \n self.referenceLine.set_data(self.graf_t, self.graf_r)\n self.x0Line.set_data(self.graf_t, self.graf_x0)\n self.x1Line.set_data(self.graf_t, self.graf_x1)\n self.uLine.set_data(self.graf_t, self.graf_u)\n \n try:\n #Draw the lines\n if self.checkBox_R.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.referenceLine)\n if self.checkBox_x0.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.x0Line)\n if self.checkBox_U.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.uLine)\n if self.checkBox_x1.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.x1Line)\n except AssertionError:\n pass\n try:\n self.mplWidget.canvas.blit(self.mplWidget.canvas.ax.bbox)\n except AttributeError:\n pass\n \n # force an image redraw\n self.mplWidget.canvas.draw()", "def on_new_data(self):\n\n if self.connected:\n tab_open = self.tab_open()\n\n # Update plot data\n for i, series in enumerate(self.measurements_list):\n if i == tab_open:\n self.plotted_data[i].setData(self.data_indices, self.measurements_list[i])", "def on_worker_started(self):\n\n # Old backend:\n #\n # self.measurements_list = [QLineSeries() for x in range(8)]\n #\n # Add a legend to each chart, and connect data (series) to charts\n # for i, series in enumerate(self.measurements_list):\n # self.chart_list[i].chart().legend().setVisible(False)\n # self.chart_list[i].chart().addSeries(series)\n #\n # Add axes to each chart\n # self.xaxis_list = [QValueAxis() for x in range(8)]\n # self.yaxis_list = [QValueAxis() for x in range(8)]\n\n # for i, series in enumerate(self.measurements_list):\n # series.attachAxis(self.xaxis_list[i])\n # series.attachAxis(self.yaxis_list[i])\n\n #\n # Prepare EMG visualization\n #\n for i, series in enumerate(self.measurements_list):\n # self.chart_list[i].chart().addAxis(self.xaxis_list[i], Qt.AlignBottom)\n # self.chart_list[i].chart().addAxis(self.yaxis_list[i], Qt.AlignLeft)\n # self.xaxis_list[i].setRange(0, NUM_GUI_SAMPLES)\n # self.yaxis_list[i].setRange(-128, 127) # EMG values are signed 8-bit\n # self.chart_list[i].setXRange(0, NUM_GUI_SAMPLES)\n\n # Generate an initial, empty plot --> update data later\n self.plotted_data[i] = self.chart_list[i].plot(self.data_indices, self.measurements_list[i],\n pen=pg.functions.mkPen(\"08E\", width=2),\n symbol='o', symbolSize=SYMBOL_SIZE)\n\n # Update states\n self.enable_text.setText(\"Disable: \")\n self.connected = True\n self.enable_box.setEnabled(True)", "async def plot_device_data(self, axes, name) -> []:\n pass", "def plot_data(self):\n # plot every log image\n for log_img in self.log_img_map.itervalues():\n log_img.plot()", "def update_graph(self, data_list):\n #log.debug(\"render graph\")\n x_axis = range(len(data_list))\n\n mcd = self.main_curve_dialog\n mcd.curve.set_data(x_axis, data_list)\n\n if self.auto_scale:\n mcd.get_plot().do_autoscale()\n else:\n mcd.get_plot().replot()", "async def plot(self, new=False) -> None:\n self._logger.debug(\"running\")\n self.figure.clear()\n self.figure.set_tight_layout(True)\n num_plots = len(self._plots)\n axes = None\n for i in range(num_plots):\n plot = self._plots[i]\n name = plot[0]\n active = plot[2]\n if active:\n if i == 0:\n axes = self.figure.add_subplot(1, 1, 1)\n axes.tick_params(axis='x', labelrotation=30)\n axes.set_ylabel(name, color='#1f77b4')\n await sleep(.001)\n if not new:\n await create_task(self.plot_device_data(axes, name))\n else:\n alt_axes = axes.twinx()\n alt_axes.set_ylabel(name, color='#ff7f0e')\n alt_axes.tick_params(axis='y', labelcolor='#ff7f0e')\n alt_axes.set_yticks(np.arange(0, 6, step=1))\n await sleep(.001)\n if not new:\n await create_task(self.plot_device_data(alt_axes, name))\n\n if not new:\n self.add_vert_lines()\n await sleep(.001)\n self.figure.canvas.draw()\n self._logger.debug(\"done\")", "def assemblePlot(self):\n self.clearPlot()\n self.axes = self.figure.add_subplot(111)\n\n # Reset handles\n self._fluxOverlayHandles = []\n self._magneticAxisHandle = None\n self._orbitHandles = []\n self._separatrixOverlayHandle = None\n self._wallCrossSectionOverlayHandle = None\n\n # Plot image\n self.plotEq()\n\n # Plot overlays\n self.plotOverlays()\n\n self.adjustAxes()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='red')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='gray')\n self.canvas.draw()", "def plot(accessToken, collection):\n \n plt.xlabel('Date/Time')\n plt.ylabel('Sensor Value')\n plt.title(\"Sensors Monitor\")\n \n # to save png files\n i = 0\n \n # set interactive mode on\n plt.ion()\n \n # set figure to full screen\n mng = plt.get_current_fig_manager()\n mng.full_screen_toggle()\n\n while True:\n jsondata = getJsonData(accessToken)\n if jsondata:\n #limit date string\n jsondata[DATE] = jsondata[DATE][8:13]\n appendJsonData(jsondata, collection)\n \n # clear figure\n plt.clf()\n \n # limit samples to be viewed\n if (len(collection[DATE]) > SAMPLE_SIZE_LIMIT):\n plt.xticks(range(SAMPLE_SIZE_LIMIT), collection[DATE][-SAMPLE_SIZE_LIMIT:])\n plt.plot(collection[SENSOR1][-SAMPLE_SIZE_LIMIT:], 'k', label='sensor 1')\n plt.plot(collection[SENSOR2][-SAMPLE_SIZE_LIMIT:], 'b', label='sensor 2')\n plt.plot(collection[SENSOR3][-SAMPLE_SIZE_LIMIT:], 'g', label='sensor 3')\n plt.plot(collection[SENSOR4][-SAMPLE_SIZE_LIMIT:], 'r', label='sensor 4')\n else:\n plt.xticks(range(len(collection[DATE])), collection[DATE])\n plt.plot(collection[SENSOR1], 'k', label='sensor 1')\n plt.plot(collection[SENSOR2], 'b', label='sensor 2')\n plt.plot(collection[SENSOR3], 'g', label='sensor 3')\n plt.plot(collection[SENSOR4], 'r', label='sensor 4')\n \n plt.legend(loc='upper left')\n plt.show()\n \n # Take a screenshot on Gnome desktop\n if os.environ.get(\"XDG_MENU_PREFIX\").startswith(\"gnome\"):\n os.system(\"gnome-screenshot -f screenshot{}.png\".format(i))\n i = i+1\n \n #plt.pause(1)\n plt.pause(60*60) # one hour\n else:\n print(str(datetime.datetime.now()) + \" Empty json data\")", "def graph():\n # Try to get params request\n params = extract_variables(['start_time', 'end_time', 'sensor_id'], request)\n # Fetch data from database\n results = query_climate_range(**params)\n\n # Turn it in to lists which can be graphed\n dates = []\n humids = []\n temps = []\n pressures = []\n for result in results:\n dates.append(datetime.datetime.fromtimestamp(result['time']))\n humids.append(result['humid'])\n temps.append(result['temp'])\n pressures.append(result['pressure'])\n\n # Graph it\n fig = Figure()\n # First y axis (temp and humid)\n axis = fig.add_subplot(1, 1, 1)\n # Plot humidity and temp on the same scale\n axis.plot_date(dates, humids, '-', color=COLORS['blue'])\n axis.plot_date(dates, temps, '-', color=COLORS['red'])\n axis.xaxis.set_major_formatter(DateFormatter('%d/%m/%y %H:%M'))\n axis.set_ylabel('Humidity in % & Temps in C')\n axis.set_xlabel('Time')\n # Second y axis (pressure)\n axis_pressure = axis.twinx()\n # Plot pressure\n axis_pressure.plot_date(dates, pressures, '-', color=COLORS['green'])\n axis_pressure.xaxis.set_major_formatter(DateFormatter('%d/%m/%y %H:%M'))\n axis_pressure.set_ylabel('Pressure in mbar')\n # Configure the figure\n fig.autofmt_xdate()\n fig.legend(['Humidity', 'Temperature', 'Pressure'], loc='lower right')\n fig.set_tight_layout(True)\n canvas = FigureCanvas(fig)\n # Save output\n png_output = BytesIO()\n canvas.print_png(png_output)\n\n # Create the response and send it\n response = make_response(png_output.getvalue())\n response.headers['Content-Type'] = 'image/png'\n return response", "def show_data(self):\n\n self.area_canvas.axes.cla()\n self.draw_scatterplot(self.scatter_canvas, 'x [µm]', 'y [µm]', self.p_inputs['flip y-axis'].isChecked())\n self.draw_hist(self.area_canvas, 'area', 'cluster area [µm²]', 'number of clusters')\n self.draw_hist(self.number_canvas, 'nclusters', 'number of cluster', 'number of regions')\n self.draw_hist(self.density_canvas, 'density', 'cluster density [µm⁻²]', 'number of clusters')\n self.draw_hist(self.percentage_canvas, 'pclustered', 'percentage clustered',\n 'number of regions')\n self.draw_hist(self.ratio_canvas, 'reldensity', 'relative density clusters/background',\n 'number of regions')", "def create_chart(conf, entries):\r\n serie_index = 0\r\n for serie in conf['series']:\r\n data = []\r\n for entry in entries:\r\n if entry is not None:\r\n data.append(entry.datatolist(str(serie['db'])))\r\n conf['series'][serie_index]['data'] = data\r\n serie_index += 1\r\n \r\n \"\"\" Add PlotBands \"\"\" \r\n plotBands = []\r\n last_entry = len(entries)-1\r\n n = 1\r\n while n < last_entry and\\\r\n entries[n].phase is not None and\\\r\n entries[n] is not None and\\\r\n entries[n].next().phase is not None:\r\n begin = entries[n].dt\r\n phase = entries[n].phase\r\n n += 1\r\n while entries[n] is not None and\\\r\n entries[n].phase is not None and\\\r\n entries[n].phase == phase and\\\r\n n < last_entry:\r\n n += 1\r\n end = entries[n].dt\r\n plotBand = {\r\n 'color': PhaseColor[phase],\r\n 'from': datetime_to_timestamp(begin),\r\n 'to': datetime_to_timestamp(end)\r\n }\r\n plotBands.append(plotBand)\r\n conf['xAxis']['plotBands'] = plotBands\r\n \r\n \"\"\" Add Labels \"\"\" \r\n condition_flag_allumage = '((prec.phase is not None) and (prec.phase is not PHASE_ALLUMAGE))'\r\n condition_next_is_not_maintien = '((next.phase is not None) and (next.phase is not PHASE_MAINTIEN))'\r\n labels = json.loads(json.dumps(ChartLabel)) #make a copy of original object\r\n labels['name'] = 'Labels'\r\n for entry in entries:\r\n if entry is not None and entry.phase is not None:\r\n #Label Allumage \r\n if entry.event is not None:\r\n data = {\r\n \"x\": datetime_to_timestamp(entry.dt),\r\n \"title\": 'Allumage'\r\n }\r\n labels['data'].append(data)\r\n \"\"\"\r\n # Label Combustion \r\n if entry.phase == PHASE_COMBUSTION and\\\r\n entry.prec() is not None and\\\r\n entry.prec().phase is not PHASE_COMBUSTION and\\\r\n entry.all_next_verify_condition(5, condition_next_is_not_maintien):\r\n data = {\r\n \"x\": datetime_to_timestamp(entry.dt),\r\n \"title\": 'Combustion'\r\n }\r\n labels['data'].append(data)\r\n \"\"\"\r\n conf['series'].append(labels)\r\n\r\n \"\"\" Add Subtitle (plotbands legend) \"\"\"\r\n #conf[\"subtitle\"] = ChartLegend\r\n\r\n \"\"\" Add Title (date begin date end) \"\"\"\r\n if len(entries) > 3:\r\n begin = pretty_date(entries[0].dt)\r\n end = pretty_date(entries[len(entries)-1].dt)\r\n #conf[\"title\"][\"text\"] = 'Monitoring Chaudière du {0} au {1}'.format(begin, end)\r\n conf[\"title\"][\"text\"] = 'Monitoring Chaudière'\r\n conf[\"subtitle\"][\"text\"] = ' du {0} au {1}'.format(begin, end)\r\n\r\n else:\r\n conf[\"title\"][\"text\"] = 'Monitoring Chaudière'\r\n\r\n \"\"\" Return new conf \"\"\"\r\n return conf", "def show_custom_graph(self):\n pass", "def charts():\n\n global show_gaps\n global timespan\n\n form = ChartForm(\n request.form,\n graph_type=timespans.index(timespan),\n graph_gaps=show_gaps\n )\n\n if request.method == 'POST':\n if form.submit_button.data:\n timespan = timespans[int(form.graph_type.data)]\n show_gaps = form.graph_gaps.data\n else:\n flash('Unknown Event', 'error')\n\n chart = Chart(app)\n data_values1, data_values2, data_values3, data_labels = \\\n chart.get_data(timespan, show_gaps)\n\n if len(data_values3) > 0:\n cb = np.array(data_values3)\n peaks = peakutils.indexes(cb, thres=0.02 / max(cb), min_dist=5)\n\n starts_total = len(peaks)\n starts_per_h = int(round(float(starts_total) / \\\n float(hourtable[timespan]), 0))\n else:\n starts_total = 0\n starts_per_h = 0\n\n return render_template(\n 'charts.html',\n form=form,\n user=current_user,\n values1=data_values1,\n values2=data_values2,\n values3=data_values3,\n labels=data_labels,\n burner_total=starts_total,\n burner_ph=starts_per_h,\n )", "def result(self):\n\n # determine the X and Y sensors to plot from those sensors selected by the user.\n sensorX = bmsapp.models.Sensor.objects.get(pk=self.request_params['select_sensor_x'])\n sensorY = bmsapp.models.Sensor.objects.get(pk=self.request_params['select_sensor_y'])\n\n # determine the averaging time\n averaging_hours = float(self.request_params['averaging_time_xy'])\n\n # get the building's timezone\n tz = pytz.timezone(self.timezone)\n\n # determine the start and end time for selecting records\n st_ts, end_ts = self.get_ts_range()\n\n # get the dividing date, if there is one\n div_datestring = self.request_params['div_date']\n div_dt = tz.localize(parser.parse(div_datestring)) if len(div_datestring) else None\n\n\n # The list that will hold each series\n series = []\n\n # get the X and Y sensor records and perform the requested averaging\n dfX = self.reading_db.dataframeForOneID(sensorX.sensor_id, st_ts, end_ts, tz)\n dfY = self.reading_db.dataframeForOneID(sensorY.sensor_id, st_ts, end_ts, tz)\n\n if not dfX.empty and not dfY.empty: # both sensors have some data, so proceed to average the data points\n \n dfX = bmsapp.data_util.resample_timeseries(dfX,averaging_hours)\n dfX.rename(columns = {'val':'X'}, inplace = True)\n\n dfY = bmsapp.data_util.resample_timeseries(dfY,averaging_hours)\n dfY.rename(columns = {'val':'Y','ts':'tsY'}, inplace = True)\n\n # Join the X and Y values for the overlapping time intervals and make\n # a list of points.\n df_all = dfX.join(dfY, how='inner') # inner join does intersection of timestamps\n\n # make sure there are matched records before continuing\n if len(df_all):\n\n # add a point name column to be used in the tooltip.\n df_all['name'] = df_all.index.strftime('%a %m/%d/%y %H:%M')\n\n # add a column identifying whether point is in occupied or unoccupied period.\n resolution = self.occupied_resolution()\n if (self.schedule is None) or (resolution is None):\n # no schedule or data doesn't lend itself to classifying\n # consider all points to be occupied\n df_all['occupied'] = 1\n else:\n df_all['occupied'] = [self.schedule.is_occupied(ts, resolution=resolution) for ts in df_all.ts]\n\n # Set up the parameters for the different series of data\n # Required Info is (starting datetime, ending datetime, occupied status (0 or 1), series name, \n # series color, series symbol, series radius, series zindex).\n now_dt = datetime.now()\n if div_dt:\n # A dividing date was provided by the user.\n div_dt = div_dt.replace(tzinfo=None) # needs to be naive\n ser_params = ( (datetime(1970,1,1), div_dt, 1, 'Prior to %s' % div_datestring, '#2f7ed8', 'circle', 4.5),\n (datetime(1970,1,1), div_dt, 0, 'Prior to %s, Unoccupied' % div_datestring, '#2f7ed8', 'triangle-up', 3),\n (div_dt, now_dt, 1, '%s and beyond' % div_datestring, '#FF0000', 'circle', 4.5),\n (div_dt, now_dt, 0, '%s and beyond, Unoccupied' % div_datestring, '#FF0000', 'triangle-up', 3) )\n else:\n # Divide data by how recent it is.\n ser_params = ( (now_dt - timedelta(days=1), now_dt, 1, 'Last 24 Hours', '#FF0000', 'circle', 4.5),\n (now_dt - timedelta(days=1), now_dt, 0, 'Last 24 Hours, Unoccupied', '#FF0000', 'triangle-up', 3),\n (now_dt - timedelta(days=7), now_dt - timedelta(days=1), 1, 'Last 7 Days', '#00CC00', 'circle', 4.5),\n (now_dt - timedelta(days=7), now_dt - timedelta(days=1), 0, 'Last 7 Days, Unoccupied', '#00CC00', 'triangle-up', 3),\n (datetime(1970,1,1), now_dt - timedelta(days=7), 1, '7+ Days Old', '#2f7ed8', 'circle', 4.5),\n (datetime(1970,1,1), now_dt - timedelta(days=7), 0, '7+ Days Old, Unoccupied', '#2f7ed8', 'triangle-up', 3),\n )\n\n for t_start, t_end, occup, ser_name, ser_color, ser_symbol, radius in reversed(ser_params):\n mask = (df_all.index >= t_start) & (df_all.index < t_end) & (df_all.occupied==occup)\n if mask.max():\n series.append( {'x': np.char.mod('%.4g',df_all[mask].X.values).astype(float).tolist(),\n 'y': np.char.mod('%.4g',df_all[mask].Y.values).astype(float).tolist(),\n 'text': df_all[mask].name.values.tolist(),\n 'type': 'scatter',\n 'mode': 'markers', \n 'name': ser_name,\n 'marker': { 'color': ser_color,\n 'symbol': ser_symbol,\n 'size': radius * 2\n }\n } )\n\n # create the X and Y axis labels and the series\n x_label = '%s, %s' % (sensorX.title, sensorX.unit.label)\n y_label = '%s, %s' % (sensorY.title, sensorY.unit.label)\n\n opt = self.get_chart_options('plotly')\n opt['data'] = series\n opt['layout']['title'] = sensorY.title + \" vs. \" + sensorX.title\n opt['layout']['xaxis']['title'] = x_label\n opt['layout']['yaxis']['title'] = y_label\n opt['layout']['legend']['traceorder'] = 'reversed'\n\n html = basechart.chart_config.chart_container_html(opt['layout']['title'])\n\n return {'html': html, 'objects': [('plotly', opt)]}", "def __show_all(self):\n print(\"\\nEvents:\\n\")\n self.__show_all_events()\n print(\"\\nMetrics:\\n\")\n self.__show_all_metrics()", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial\"][1]) + \" at \" + str(self.values[\"Trial\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream.write(dict(x=self.values[\"Trial\"][0], y=self.values[\"Trial\"][1]))", "def show_data_files(self):\n for idx in self.plot_data:\n self.plot_data[idx].show()", "def fill_dataset(self):\n rm, rstd = self.get_rolling_stats()\n\n self.add_rolling_mean(rm)\n self.add_bollinger_bands(rstd)\n self.add_spy_info()\n self.add_beta_and_sharpe()\n self.add_stlouis_data()", "def update_graph(self, data):\n if (self.type == 'matplotlib'):\n pass\n else:\n pass", "def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()", "def plot(self):\n # get data without totals\n data = self.woe_report[self.woe_report.index != 'total']\n # setup panel\n fig, axs = plt.subplots(1, 3, figsize=(12, 3))\n plt.subplots_adjust(wspace=0.3)\n # first chart\n data['P(Hi|A)'].plot(ax=axs[0], linewidth=3, alpha=0.7)\n data['P(Hi|Ā)'].plot(ax=axs[0], linewidth=3, alpha=0.7)\n axs[0].set_title('Probability distribution')\n axs[0].set_xlabel(data.index.name)\n axs[0].set_ylabel('probability')\n axs[0].legend(['P(Hi|A)', 'P(Hi|Ā)'])\n # second chart\n data['weight-of-evidence'].plot(ax=axs[1], linewidth=3, alpha=0.7)\n axs[1].set_title('WoE')\n axs[1].set_xlabel(data.index.name)\n axs[1].set_ylabel('WoE')\n # third chart\n data['information-value'].plot(ax=axs[2], linewidth=3, alpha=0.7)\n axs[2].set_title('Information value')\n axs[2].set_ylabel('IV')", "def results_plot_fuel_reactor(self):\n \n import matplotlib.pyplot as plt \n\n # Total pressure profile\n P = []\n for z in self.MB_fuel.z:\n P.append(value(self.MB_fuel.P[z]))\n fig_P = plt.figure(1)\n plt.plot(self.MB_fuel.z, P)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total Pressure [bar]\") \n\n # Temperature profile\n Tg = []\n Ts = []\n# Tw = []\n for z in self.MB_fuel.z:\n Tg.append(value(self.MB_fuel.Tg[z] - 273.15))\n Ts.append(value(self.MB_fuel.Ts[z] - 273.15))\n# Tw.append(value(self.MB_fuel.Tw[z]))\n fig_T = plt.figure(2)\n plt.plot(self.MB_fuel.z, Tg, label='Tg')\n plt.plot(self.MB_fuel.z, Ts, label='Ts')\n# plt.plot(self.MB_fuel.z, Tw, label='Tw')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Temperature [C]\") \n \n # Superficial gas velocity and minimum fluidization velocity\n vg = []\n umf = []\n for z in self.MB_fuel.z:\n vg.append(value(self.MB_fuel.vg[z]))\n umf.append(value(self.MB_fuel.umf[z]))\n fig_vg = plt.figure(3)\n plt.plot(self.MB_fuel.z, vg, label='vg')\n plt.plot(self.MB_fuel.z, umf, label='umf')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Superficial gas velocity [m/s]\")\n \n # Gas components molar flow rate\n for j in self.MB_fuel.GasList:\n F = []\n for z in self.MB_fuel.z:\n F.append(value(self.MB_fuel.F[z,j]))\n fig_F = plt.figure(4)\n plt.plot(self.MB_fuel.z, F, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Gas component molar flow rate, F [mol/s]\") \n \n # Bulk gas phase total molar flow rate\n Ftotal = []\n for z in self.MB_fuel.z:\n Ftotal.append(value(self.MB_fuel.Ftotal[z]))\n fig_Ftotal = plt.figure(5)\n plt.plot(self.MB_fuel.z, Ftotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total molar gas flow rate [mol/s]\") \n\n # Solid components mass flow rate\n for j in self.MB_fuel.SolidList:\n M = []\n for z in self.MB_fuel.z:\n M.append(value(self.MB_fuel.Solid_M[z,j]))\n fig_M = plt.figure(6)\n plt.plot(self.MB_fuel.z, M, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid components mass flow rate [kg/s]\")\n \n # Bulk solid phase total molar flow rate\n Mtotal = []\n for z in self.MB_fuel.z:\n Mtotal.append(value(self.MB_fuel.Solid_M_total[z]))\n fig_Mtotal = plt.figure(7)\n plt.plot(self.MB_fuel.z, Mtotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid total mass flow rate [kg/s]\") \n \n # Gas phase concentrations\n for j in self.MB_fuel.GasList:\n Cg = []\n for z in self.MB_fuel.z:\n Cg.append(value(self.MB_fuel.Cg[z,j]))\n fig_Cg = plt.figure(8)\n plt.plot(self.MB_fuel.z, Cg, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Concentration [mol/m3]\") \n \n # Gas phase mole fractions\n for j in self.MB_fuel.GasList:\n y = []\n for z in self.MB_fuel.z:\n y.append(value(self.MB_fuel.y[z,j]))\n fig_y = plt.figure(9)\n plt.plot(self.MB_fuel.z, y, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"y [-]\") \n \n # Solid phase mass fractions\n for j in self.MB_fuel.SolidList:\n x = []\n for z in self.MB_fuel.z:\n x.append(value(self.MB_fuel.x[z,j]))\n fig_x = plt.figure(10)\n plt.plot(self.MB_fuel.z, x, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"x [-]\") \n\n # Total mass fraction\n xtot = []\n for z in self.MB_fuel.z:\n xtot.append(value(self.MB_fuel.xtot[z]))\n fig_xtot = plt.figure(11)\n plt.plot(self.MB_fuel.z, xtot)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total mass fraction [-]\") \n \n # # Gas mix density\n # rhog = []\n # for z in self.MB_fuel.z:\n # rhog.append(value(self.MB_fuel.rho_vap[z]))\n # fig_rhog = plt.figure(23)\n # plt.plot(self.MB_fuel.z, rhog)\n # plt.grid()\n # plt.xlabel(\"Bed height [-]\")\n # plt.ylabel(\"Gas mix density [kg/m3]\") \n \n # Fe conversion\n X_Fe = []\n for z in self.MB_fuel.z:\n X_Fe.append(value(self.MB_fuel.X[z])*100)\n fig_X_Fe = plt.figure(13)\n plt.plot(self.MB_fuel.z, X_Fe)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Fraction of metal oxide converted [%]\")", "def generate(self):\n\n # Load the required datapoints into memory.\n self._load_results()\n\n # Calculate datapoints statistics, like min. and max. values.\n self._calc_stats()\n\n # Generate the plots.\n self._generate_scatter_plots()\n self._generate_histograms()\n\n # Put together the final HTML report.\n self._generate_report()", "def generate_plots():\n\n hmp = homemonitor_plot()\n hmp.load_data()\n hmp.plot_day()\n hmp.plot_hist()", "def index():\n graphs = [\n message_genre_bar_chart(df),\n category_bar_chart(df),\n top_words_bar_chart(df)\n ]\n \n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n \n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)", "def add_graph(self):\n \n self.cd_sampling = None\n \n if \"CD\" in self.algorithm:\n\n self.add_cd_samples()\n \n if self.num_hidden ==0:\n \n self.cd_sampling = self.get_cd_samples()\n \n if \"CSS\" in self.algorithm and self.mf_steps > 0: \n \n self.add_mf_updates()\n \n elif \"CSS\" in self.algorithm and self.gibbs_steps > 0:\n \n self.add_cd_samples()\n \n if self.num_hidden ==0:\n \n self.cd_sampling = self.get_cd_samples() \n \n self.add_objective()\n\n self.add_grad_updates() \n \n if self.report_p_tilda:\n \n self.add_p_tilda()\n \n self.add_pseudo_cost_measure()\n\n self.optimize = self.optimization_step()", "def updateplot(self):\n plotfiles = []\n try:\n self.plotter.reset()\n self.plotter.set_xrange(self.xrangemin.value(), self.xrangemax.value())\n self.plotter.set_yrange(self.yrangemin.value(), self.yrangemax.value())\n self.plotter.set_bgirange(self.bgintmin.value(), self.bgintmax.value())\n self.plotter.set_pkrange(self.halphamin.value(), self.halphamax.value())\n for n,pf in enumerate(self.selecteddata):\n tf = os.path.join(self.tempdir, \"tf%d\" % n)\n self.dfparser.writefile(tf, pf)\n plotfiles.append(tf)\n self.plotter.set_plot(plotfiles)\n except datafile.Datafile_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()\n except plotter.Plotter_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()", "def DrawTimeSeriesGraph(connection, table_name, y_axis_field, time,\n arrangement):\n def GetListFromDB(time, category, y_axis_field, connection, table_name):\n condition = (\"where time_of_entry >= \\\"{}\\\" and\"\n \" category=\\\"{}\\\" Group By RunID \"\n \"Order By time_of_entry\").format(\n time, category)\n single_list = db_utils.GetFieldFromTable(\n connection, table_name,\n field=\"AVG({}), STDDEV({}), time_of_entry, RunID\".format(\n y_axis_field, y_axis_field),\n cond=condition)\n if not single_list:\n print(\"Values are not found in table for category {}.\".format(\n category))\n return None\n\n return single_list\n\n direct_list = GetListFromDB(time, \"direct-{}\".format(arrangement),\n y_axis_field, connection, table_name)\n envoy_list = GetListFromDB(time, \"envoy-{}\".format(arrangement),\n y_axis_field, connection, table_name)\n\n if direct_list:\n direct_means, direct_std = zip(*direct_list)[:2]\n direct_times = [v[2].time().strftime(\"%H:%M\") if not i % 2 else \"\"\n for i, v in enumerate(direct_list)]\n else:\n raise ShowGraphError(\"Direct's data not found for time-series graph.\")\n\n if envoy_list:\n envoy_means, envoy_std = zip(*envoy_list)[:2]\n # time is not needed again but if needed, it can be taken from here\n # envoy_times = [v[2] for v in envoy_list]\n else:\n raise ShowGraphError(\"Envoy's data not found for time-series graph.\")\n\n ind = np.arange(len(direct_times))\n fig, ax = plt.subplots()\n rects1 = ax.errorbar(ind, direct_means, color=\"r\", yerr=direct_std)\n rects2 = ax.errorbar(ind, envoy_means, color=\"y\", yerr=envoy_std)\n\n ax.set_ylabel(y_axis_field)\n ax.set_xlabel(\"time\")\n ax.set_xticks(ind)\n ax.set_xticklabels(direct_times, rotation=\"vertical\", fontsize=8)\n ax.legend((rects1[0], rects2[0]), (\"Direct\", \"Envoy\"),\n loc=\"center left\", bbox_to_anchor=(1, 0.5))\n\n # Helper function to put standard deviation as labels inside the graph\n # data points\n def PutStdDevOnGraph(ax, rects, stddev):\n for i, num in enumerate(rects[0].get_xydata()):\n ax.text(num[0], 1.05*num[1],\n \"%d%%\" % int(100.0*stddev[i]/(1.0*num[1])),\n ha=\"center\", va=\"bottom\", fontsize=8)\n\n PutStdDevOnGraph(ax, rects1, direct_std)\n PutStdDevOnGraph(ax, rects2, envoy_std)\n\n fig.savefig(\"Time-{}-{}.png\".format(time, arrangement),\n bbox_inches=\"tight\")", "def temperature_graph(request, temperature_graph_id):\n\n # Update sensor before viewing\n if not updatesensor(sensor_id):\n messages.info(request, 'Unable to update sensor')\n\n temperature_graph_plot = create_temperature_graph(temperature_graph_id)\n\n context = {\n 'temperature_graph_plot': temperature_graph_plot,\n }\n return render(request, 'open_air/graphs.html', context)", "def update_visualization(self) -> None:\n pass", "def addDataPoints(self):\n pass", "def initGraphs(self):\n \n self.graph = ConjunctiveGraph()\n # Create a separate graph for annotations\n self.annotationGraph = ConjunctiveGraph()\n \n self.log.debug('Adding namespaces to graphs')\n # Bind namespaces to graphs\n for namespace in self.namespaces:\n self.graph.namespace_manager.bind(namespace, self.namespaces[namespace])\n\n # Same for annotation graph\n for namespace in self.annotationNamespaces:\n self.annotationGraph.namespace_manager.bind(namespace, self.annotationNamespaces[namespace])\n \n # Add schema information\n self.log.debug('Adding some schema information (dimension and measure properties) ')\n self.addDataCellProperty()\n\n # Add dimensions \n self.graph.add((self.namespaces['tablink']['dimension'], RDF.type, self.namespaces['qb']['DimensionProperty']))\n \n #self.graph.add((self.namespaces['tablink']['label'], RDF.type, RDF['Property']))", "def plot(self):\n\n fig, ax = plt.subplots()\n\n for run in self.runs:\n # Load datasets\n data_measure = run.get_dataset(\"stats-collect_link_congestion-raw-*.csv\")\n data_sp = run.get_dataset(\"stats-collect_link_congestion-sp-*.csv\")\n\n # Extract link congestion information\n data_measure = data_measure['msgs']\n data_sp = data_sp['msgs']\n\n # Compute ECDF and plot it\n ecdf_measure = sm.distributions.ECDF(data_measure)\n ecdf_sp = sm.distributions.ECDF(data_sp)\n\n variable_label = \"\"\n size = run.orig.settings.get('size', None)\n if size is not None:\n variable_label = \" (n=%d)\" % size\n\n ax.plot(ecdf_measure.x, ecdf_measure.y, drawstyle='steps', linewidth=2,\n label=\"U-Sphere%s\" % variable_label)\n ax.plot(ecdf_sp.x, ecdf_sp.y, drawstyle='steps', linewidth=2,\n label=u\"Klasični usmerjevalni protokol%s\" % variable_label)\n\n ax.set_xlabel('Obremenjenost povezave')\n ax.set_ylabel('Kumulativna verjetnost')\n ax.grid()\n ax.axis((28, None, 0.99, 1.0005))\n self.convert_axes_to_bw(ax)\n\n legend = ax.legend(loc='lower right')\n if self.settings.GRAPH_TRANSPARENCY:\n legend.get_frame().set_alpha(0.8)\n fig.savefig(self.get_figure_filename())", "def plot_data(self, data, backup_frame):\n title = self.filename.split('-')\n final_titles = title[2].split('.')\n self.final_title_sub = final_titles[0].lower()\n\n # Accounts for the three types of graph required\n # date for archival purposes\n # web for the web server and\n # log for the logarithmic graphs\n graph_list = ['date', 'web', 'log']\n for mode in graph_list:\n for column in data.columns:\n data['Rest of the World'] = \\\n backup_frame['Global_Cases'] - data[column]\n x_axis = data.index.values\n\n fig, axes = plt.subplots()\n axes.plot(x_axis, data[column], marker='o',\n label=column)\n axes.plot(x_axis, data['Rest of the World'], marker='s',\n label='Rest of the World')\n fig.autofmt_xdate()\n\n every_nth = 4\n for number, label in enumerate(axes.xaxis.get_ticklabels()):\n if number % every_nth != 0:\n label.set_visible(False)\n\n axes.set(xlabel='Date', ylabel='Cases',\n title=f'Covid-19 {self.final_title_sub} '\n f'cases for {column} - data from '\n f'John Hopkins CSSE')\n axes.grid()\n axes.legend()\n\n # Setting the y-axis\n if mode == 'log':\n axes.set_yscale('log')\n else:\n data_max = data.max(axis=1)\n max_number = data_max[-1]\n rounded_max = self.round_up(max_number, -3)\n rounded_max += 2000\n axes.set_ylim([0, rounded_max])\n\n # -----------------------------------------------------\n # Adds Labels to annotate the last data point for each\n # plot\n y_axis1 = data[column][-1]\n y_axis2 = data['Rest of the World'][-1]\n\n plt.annotate(y_axis1, (x_axis[-1], y_axis1 + 500),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=12)\n plt.annotate(y_axis2, (x_axis[-1], y_axis2 + 500),\n bbox=dict(facecolor='red', alpha=0.5),\n fontsize=12)\n # -----------------------------------------------------\n\n # Required in order to stop the column from summing\n # the total of each run through the loop\n # otherwise this leads to Rest of World values in the\n # millions\n data = data.drop('Rest of the World', axis=1)\n\n if mode == 'log':\n dir_name = f'{self.out_dir}docs/graphics/' \\\n f'log_' \\\n f'{self.final_title_sub}_for_' \\\n f'{column}.png'\n elif mode == 'date':\n dir_name = f'{self.out_dir}docs/graphics/' \\\n f'{x_axis[-1]}-2020-' \\\n f'{self.final_title_sub}_for_{column}.png'\n\n elif mode == 'web':\n dir_name = f'{self.out_dir}docs/graphics/' \\\n f'{self.final_title_sub}_for_{column}.png'\n\n else:\n print('error')\n\n fig.savefig(dir_name, transparent=False, dpi=300,\n bbox_inches=\"tight\")\n\n if os.path.exists(dir_name):\n logging.debug('File saved at: %s', {dir_name})\n print(f'Files saved at:\\n'\n f'{dir_name}\\n')\n else:\n logging.debug('Failed to save')\n logging.debug(os.getcwd())\n plt.close()\n return data", "def setDrawing(self):\n self.graph_drawing=[]", "def graphplot(self):\n if self.binned:\n self.line.set_ydata(self.fft_bins_y)\n else:\n self.line.set_ydata(self.spec_y)\n self.line2.set_ydata(self.wave_y)\n self.ax1.draw_artist(self.ax1.patch)\n self.ax2.draw_artist(self.ax2.patch)\n self.ax1.draw_artist(self.line)\n self.ax2.draw_artist(self.line2)\n self.fig.canvas.update()\n self.fig.canvas.flush_events()", "def get_graphs(self):\n\n try:\n from keras.utils import plot_model\n from keras.utils.vis_utils import model_to_dot\n\n # from IPython.display import SVG\n\n plot_model(self.model, to_file=\"model.png\")\n plot_model(\n self.latent_to_states_model, to_file=\"latent_to_states_model.png\"\n )\n plot_model(self.batch_model, to_file=\"batch_model.png\")\n if self.mol_to_latent_model is not None:\n plot_model(self.mol_to_latent_model, to_file=\"mol_to_latent_model.png\")\n\n print(\"Models exported to png files.\")\n\n except:\n print(\"Check pydot and graphviz installation.\")", "def _update_plots(self):\n for dock in self.plotDocks:\n for widget in dock.widgets:\n if not self.dataList.findItems(dock.name(), QtCore.Qt.MatchExactly):\n # no data for this plot -> reset it\n widget.getPlotItem().clear()\n # TODO remove tab from dock and del instance\n else:\n widget.getPlotItem().clear()\n x_data = self.currentDataset[\"results\"][\"time\"]\n y_data = self._get_data_by_name(dock.name())\n widget.getPlotItem().plot(x=x_data, y=y_data)", "def graph10():\r\n sheet = workbook.sheet_by_index(4)\r\n data = [[sheet.cell_value(r, c) for c in range(sheet.ncols)] for r in range(sheet.nrows)]\r\n\r\n for i in range(4, 5):\r\n list_data[0].append(round((data[i][1]/sum(data[i][j] for j in range(1, 6)))*100, 2))\r\n list_data[1].append(round((data[i][2]/sum(data[i][j] for j in range(1, 6)))*100, 2))\r\n list_data[2].append(round((data[i][3]/sum(data[i][j] for j in range(1, 6)))*100, 2))\r\n list_data[3].append(round((data[i][4]/sum(data[i][j] for j in range(1, 6)))*100, 2))\r\n list_data[4].append(round((data[i][5]/sum(data[i][j] for j in range(1, 6)))*100, 2))\r\n\r\n gauge = pygal.SolidGauge(inner_radius=0.70, title=u'รอยละของประชากรอายุ 15 ปขึ้นไปที่ดื่มสุราหรือเครื่องดื่มมึนเมา จําแนกตามความถี่ในการดื่มสุราหรือเครื่องดื่มมึนเมา ปี 2557')\r\n percent_formatter = lambda x: '{:.10g}%'.format(x)\r\n gauge.value_formatter = percent_formatter\r\n for i in range(5):\r\n gauge.add(data_name[i], list_data[i])\r\n gauge.render_to_file('10Classified by frequency of drinking in 2557.svg')", "def create_dashboard(h, t, k, p):\n plt.style.use('seaborn')\n # Initialize the dashboard\n fig = plt.figure(figsize=(20, 8))\n ax1 = fig.add_subplot(2, 2, 1)\n ax2 = fig.add_subplot(2, 2, 2)\n ax3 = fig.add_subplot(2, 2, 3)\n ax4 = fig.add_subplot(2, 2, 4)\n\n # Create individual graphs\n dt_line, = ax1.plot(h, lw=3, c='k')\n total_line, = ax2.plot(t, lw=3, c='#d62728')\n k_line, = ax3.plot(k, lw=3, c='#1f77b4')\n p_line = ax4.plot(p, lw=3, c='#2ca02c')\n\n ax1.set_title(r'Variation in $\\Delta t$')\n ax1.set_ylabel(r'$\\Delta t$')\n ax2.set_title(r'Total Energy over Time')\n ax2.set_ylabel('Total Energy')\n ax3.set_title('Kinetic Energy over Time')\n ax3.set_ylabel('Kinetic Energy')\n ax3.set_xlabel('Time Steps')\n ax4.set_title('Potential Energy over Time')\n ax4.set_ylabel('Potential Energy')\n ax4.set_xlabel('Time Steps')\n\n plt.show()\n\n \"\"\"im = ax[0, 0].imshow(model.lattice, cmap='Greys', vmin=-1, vmax=1)\n energy_line, = ax[0, 1].plot([], [], lw=3)\n mag_line, = ax[1, 0].plot([], [], lw=3)\n heat_line, = ax[1, 1].plot([], [], lw=3)\n susceptibility_line, = ax[2, 0].plot([], [], lw=3)\n acceptance_line, = ax[2, 1].plot([], [], lw=3)\"\"\"", "def init_plot(self):\n self.dpi = 100\n self.fig = Figure((5.0, 5.0), dpi = self.dpi)\n\n self.main_plot = self.fig.add_subplot(111)\n self.main_plot.set_axis_bgcolor('black')\n self.main_plot.set_title('Dynamic venous flow view', size = 12)\n\n pylab.setp(self.main_plot.get_xticklabels(), fontsize = 8)\n pylab.setp(self.main_plot.get_yticklabels(), fontsize = 8)\n\n # Plot the data as a green line\n self.plot_data = self.main_plot.plot(\n self.daq.data0,\n linewidth = 1,\n color = (0, 1, 0),\n )[0]\n self.main_plot.grid(True, color='gray')", "def draw_all_plots(self):\n\n plot_names = []\n e = self.find_di_tri(self.lang_found)\n letter_dct = e[1]\n di_dct = e[2]\n tri_dct = e[3]\n\n plot_name = self.lang_found + '_letters'\n self.wykres(letter_dct, 'Wyres liter', 'litera', plot_name, 0)\n plot_names.append(plot_name)\n plot_name = self.lang_found + '_digram'\n self.wykres(di_dct, 'Wykres digramów', 'digram', plot_name, 1)\n plot_names.append(plot_name)\n plot_name = self.lang_found + '_trigram'\n self.wykres(tri_dct, 'Wykres trigramów', 'trigram', plot_name, 2)\n plot_names.append(plot_name)\n\n for cnt, plt_scn in enumerate(self.plot_scenes):\n pic = QtGui.QPixmap(self.img_dir + '/' + plot_names[cnt] + \".png\")\n plt_scn.setPixmap(pic.scaled(427, 320, Qt.KeepAspectRatio))", "def plotGraph(self, dayArray, commentsArray, upvotesArray, retweetsArray, likesArray):\n self.canvas.get_tk_widget().place(relx=0.219, rely=0.519, relheight=0.389, relwidth=0.352)\n\n # Clears graph before plotting to prevent appending two graphs at once\n self.figure.clear()\n # self.figure.\n plt = self.figure.add_subplot(1, 1, 1)\n x = []\n max_log_size = 5000\n for i in dayArray:\n i = ''.join(i.split())\n i = i[:-5]\n x.append(i)\n\n # now there's 3 sets of points\n yCO = commentsArray\n yUV = upvotesArray\n yRT = retweetsArray\n yLK = likesArray\n\n if max(yCO)>=max_log_size or max(yUV)>=max_log_size or max(yRT)>=max_log_size or max(yLK)>=max_log_size:\n plt.set(yscale=\"log\")\n plt.plot(x, yCO, label='Comments', marker='o', color='red')\n plt.plot(x, yUV, label='Upvotes', marker='o', color='#fa93b0')\n plt.plot(x, yRT, label='Retweets', marker='o', color='#2374f7')\n plt.plot(x, yLK, label='Likes', marker='o', color='#accafa')\n\n plt.legend()\n self.figure.canvas.draw()", "def draw(self):\r\n scalex,scaley = self.getScale()\r\n try:\r\n self.clear()\r\n # Draw Graph Background\r\n self.drawLayout()\r\n if self.app.data == None:# If no data, break\r\n return\r\n # How much each pixel represents\r\n if scalex[1]-scalex[0] == 0:\r\n return\r\n step = (scalex[1]-scalex[0])/self.w# Draw lines at pixel level resolution\r\n self.fitYScale()\r\n sens_index = [0]# If one sensor displayed in this data player\r\n if len(self.sensor_ids) == 2:# If two sensors displayed in this data player\r\n sens_index = [1,0]# Draw order blue then red to make blue line on top\r\n for s in sens_index:\r\n i = scalex[0]\r\n x = 0\r\n trackcol = self.app.getSensorCol(self.sensors[self.sensor_ids[s]])\r\n while i < scalex[1]:\r\n i += step# i Is data\r\n x += 1# x is iteration/pixel-coordinate\r\n if i<0:# Skip data for t<0\r\n continue\r\n try:\r\n # Data retrieved from xml\r\n y = float(self.app.data[int(i)][self.sensor_ids[s]].text)\r\n y2 = float(self.app.data[int(i+step)][self.sensor_ids[s]].text)\r\n # Normalize into range 0 to 1 and multiply by height\r\n y = ((y-scaley[0])/(scaley[1]-scaley[0])) * self.h\r\n y2 = ((y2-scaley[0])/(scaley[1]-scaley[0])) * self.h\r\n except IndexError:# Missing data is skipped\r\n continue\r\n self.c.create_line(x,-y+self.h,x+1,-y2+self.h,fill=trackcol,width=1)\r\n self.drawScrubber()\r\n self.drawPeekScrubber()\r\n self.c.update()\r\n except tk.TclError:# If canvas destroyed, cancel draw operation\r\n return", "def set_figure_variables(self):\n #self.fig.canvas.manager.full_screen_toggle()\n self.gs = self.fig.add_gridspec(2, 3)\n self.ax1 = self.fig.add_subplot(self.gs[0, 0])\n self.ax2 = self.fig.add_subplot(self.gs[0, 1])\n self.ax3 = self.fig.add_subplot(self.gs[0, 2])\n self.ax4 = self.fig.add_subplot(self.gs[1, 0])\n self.ax5 = self.fig.add_subplot(self.gs[1, 1])\n self.ax6 = self.fig.add_subplot(self.gs[1, 2])\n # histogram with indicator scoring\n self.ax1.set_xlabel(\"indicators\")\n self.ax1.set_ylabel(\"score (%)\")\n # graph with flood safety levels\n self.ax2.set_xlabel(\"dike section\")\n self.ax2.set_ylabel(\"chance of flooding occurrence\")\n # graph with water levels vs dike height\n self.ax3.set_xlabel(\"river length (meters)\")\n self.ax3.set_ylabel(\"height (meters)\")\n # graph with overall costs made\n self.ax6.set_ylabel(\"million Euros\")\n \n self.ax1.set_ylim([0, 100])\n self.ax2.set_ylim([0, 100])\n self.ax3.set_ylim([14, 18])\n self.ax6.set_ylim([0, 25000000])\n \n self.ax1.set_title(\"Overall score on indicators\")\n self.ax2.set_title(\"Flood safety levels\")\n self.ax3.set_title(\"Normative water levels vs dike crest height\")\n self.ax6.set_title(\"Budget spent\")\n \n self.x_pos = np.arange(len(self.indicators))\n self.ax1.set_xticks(self.x_pos)\n self.ax1.set_xticklabels(self.indicators)\n \n flood_safety_levels = [100, 200, 400, 600, 800, 1000, 1250]\n self.ax2.set_yticks(flood_safety_levels)\n self.ax2.set_yticklabels([\"1/\"+str(value) for value in flood_safety_levels])\n \n self.plot1 = None\n self.plot2 = None\n self.plot3 = None\n self.plot4 = None\n self.plot5 = None\n self.plot6 = None\n return", "def update_plot():\n pass", "def charts(self, charts):\n\n self._charts = charts", "def generate_statistics_plots(graph_name, graph_steps):\n df_final_situation = pd.DataFrame(columns=[\"type\", \"value\"])\n df_step = pd.DataFrame(columns=[\"type\", \"step\", \"value\"])\n df_exposed = pd.DataFrame(columns=[\"step\", \"type\", \"value\"])\n\n st.markdown(\"\")\n\n for i in range(graph_steps):\n # read graph and print stats\n graph_result_path = \"./data/output/\"\n G = nx.read_gexf(f\"{graph_result_path}G_{graph_name}_step{i}.gexf\")\n print_stats(G, i, graph_name)\n\n # LINE CHART (append informations into dataframe)\n df_step = df_step.append(\n {\"type\": \"not_exposed\", \"step\": i, \"value\": cn.count_not_exposed(G)},\n ignore_index=True,\n )\n df_step = df_step.append(\n {\"type\": \"exposed\", \"step\": i, \"value\": cn.count_exposed(G)},\n ignore_index=True,\n )\n df_step = df_step.append(\n {\"type\": \"infected\", \"step\": i, \"value\": cn.count_infected(G)},\n ignore_index=True,\n )\n\n line_chart = px.line(\n df_step,\n x=\"step\",\n y=\"value\",\n color=\"type\",\n title=f\"Infection overall: {graph_name} step: {i}\",\n )\n\n # BAR CHART (append informations into dataframe)\n df_exposed = df_exposed.append(\n {\n \"step\": i,\n \"type\": \"opinion_leader\",\n \"value\": cn.count_exposed_opinion_leader(G),\n },\n ignore_index=True,\n )\n df_exposed = df_exposed.append(\n {\"step\": i, \"type\": \"bot\", \"value\": cn.count_exposed_bot(G)},\n ignore_index=True,\n )\n df_exposed = df_exposed.append(\n {\"step\": i, \"type\": \"user\", \"value\": cn.count_exposed_user(G)},\n ignore_index=True,\n )\n bar_chart = px.bar(\n df_exposed,\n x=\"step\",\n y=\"value\",\n color=\"type\",\n title=f\"Type of agents exposed: {graph_name} step: {i}\",\n )\n\n # PIE CHART (append informations into dataframe)\n if i == 4:\n df_final_situation = df_final_situation.append(\n {\"type\": \"not_exposed\", \"value\": cn.count_not_exposed(G)},\n ignore_index=True,\n )\n df_final_situation = df_final_situation.append(\n {\"type\": \"exposed\", \"value\": cn.count_exposed(G)},\n ignore_index=True,\n )\n df_final_situation = df_final_situation.append(\n {\"type\": \"infected\", \"value\": cn.count_infected(G)},\n ignore_index=True,\n )\n\n #### CREATE THE PLOTS\n ##Uncomment plot(..) to save the plots to disk in html format\n\n plot_folder = \"./data/plots/\"\n\n # Plotly Line Plot\n # plot(line_chart, filename=f\"{plot_folder}steps_{graph_name}.html\")\n st.plotly_chart(line_chart, use_container_width=True)\n\n # Plotly bar plot\n # plot(bar_chart, filename=f\"{plot_folder}exposed_type_{graph_name}.html\")\n st.plotly_chart(bar_chart, use_container_width=True)\n\n # Plotly final pie chart\n final_pie_chart = px.pie(\n df_final_situation, values=\"value\", names=\"type\", title=f\"Final situation plot of: {graph_name}\"\n )\n # plot(final_pie_chart, filename=f\"{plot_folder}final_situation.html\")\n st.plotly_chart(final_pie_chart, use_container_width=True)\n\n print(\"\\nStatistics calculated succesfully\")\n\n return True", "def populate_graph(self):", "def update_data(self):\n\n # Update all plots in the figure\n self.data = self.model.measurements.get_bokeh_vis_data()\n self.source.stream(self.data, len(self.data))\n self.line_source.stream(self.data[self.data.agent_type == 'system'])\n self.school_dropdown_func()\n\n # Update the utility histograms\n self.update_histograms()\n\n # Update the composition histograms\n to_update = [self.neighbourhood_composition_quads, \n self.school_composition_quads, self.distance_quads]\n\n for quads in to_update:\n\n # Grab the new data\n if quads == self.neighbourhood_composition_quads:\n hist_data = self.composition_data(agent_type='neighbourhood')\n elif quads == self.school_composition_quads:\n hist_data = self.composition_data(agent_type='school')\n else:\n hist_data = self.composition_data(agent_type='household')\n\n # Update the bars and edges\n for group in hist_data.keys():\n\n hist, edges = np.histogram(hist_data[group],\n density=True,\n bins=20)\n\n # Update histogram\n quads[group].data_source.data['top'] = hist\n quads[group].data_source.data['left'] = edges[:-1]\n quads[group].data_source.data['right'] = edges[1:]", "def make_charts(self):\n\n def _insert_pie_chart(wbook, wsheet, title, cell_pos, series):\n piechart = wbook.add_chart({\"type\": \"pie\"})\n piechart.set_title({\"name\": title})\n piechart.set_style(10)\n piechart.add_series(series)\n wsheet.insert_chart(cell_pos, piechart, {\"x_offset\": 25, \"y_offset\": 10})\n\n def _data_frame_days_to_excel(writer, sheet_name, data_frame_days):\n data_frame_days.to_excel(writer, sheet_name=sheet_name, startrow=1, header=False)\n self._set_workbook_layout(writer.book, (writer.sheets[sheet_name]), data_frame_days)\n\n with pd.ExcelWriter(\"Hive Metrics.xlsx\", engine=\"xlsxwriter\", options={\"strings_to_urls\": False}) as writer:\n workbook = writer.book\n worksheet = workbook.add_worksheet(\"Summary Charts\")\n worksheet.hide_gridlines(2)\n\n _insert_pie_chart(\n workbook,\n worksheet,\n title=\"New vs. Closed Cases\",\n cell_pos=\"D2\",\n series={\n \"name\": \"Open vs. Closed Cases Last 30\",\n \"categories\": \"=Tracking!$B$1:$C$1\",\n \"values\": \"=Tracking!$B$2:$C$2\",\n },\n )\n _insert_pie_chart(\n workbook,\n worksheet,\n title=\"Case Ownership\",\n cell_pos=\"M19\",\n series={\n \"name\": \"Case Ownership Last 30\",\n \"categories\": \"=Tracking!$A$3:$A$9\",\n \"values\": \"=Tracking!$D$3:$D$9\",\n },\n )\n _insert_pie_chart(\n workbook,\n worksheet,\n title=\"Case Resolution\",\n cell_pos=\"D19\",\n series={\n \"name\": \"Case Resolution Last 30\",\n \"categories\": \"=Tracking!$A$10:$A$12\",\n \"values\": \"=Tracking!$E$10:$E$12\",\n },\n )\n _insert_pie_chart(\n workbook,\n worksheet,\n title=\"Case Severities\",\n cell_pos=\"M2\",\n series={\n \"name\": \"Severity Last 30\",\n \"categories\": \"=Tracking!$A$13:$A$15\",\n \"values\": \"=Tracking!$F$13:$F$15\",\n },\n )\n\n _data_frame_days_to_excel(\n writer, sheet_name=\"Cases newer than 30 Days\", data_frame_days=self._data_frame_30days,\n )\n _data_frame_days_to_excel(\n writer, sheet_name=\"Cases older than 60 days\", data_frame_days=self._data_frame_60days,\n )\n _data_frame_days_to_excel(\n writer, sheet_name=\"Cases newer than 90 Days\", data_frame_days=self._data_frame_90days,\n )\n\n self._data_frame_counts.to_excel(writer, sheet_name=\"Tracking\")\n writer.save()", "def on_axes_update(self):\n\n if self.connected:\n tab_open = self.tab_open()\n\n # Update axes\n for i, series in enumerate(self.measurements_list):\n if i == tab_open:\n self.chart_list[i].setXRange(self.worker.start_range,\n self.worker.samples_count + NUM_GUI_SAMPLES, padding=0.075)\n\n # for i, series in enumerate(self.measurements_list):\n #\n # # An optimization to prevent unnecessary rendering\n # if i == tab_open:\n #\n # # Remove old x-axis\n # series.detachAxis(self.xaxis_list[i])\n # self.chart_list[i].chart().removeAxis(self.xaxis_list[i])\n # self.xaxis_list[i] = QValueAxis()\n #\n # # Add new x-axis\n # self.chart_list[i].chart().addAxis(self.xaxis_list[i], Qt.AlignBottom)\n # self.xaxis_list[i].setRange(self.worker.samples_count, self.worker.samples_count +\n # NUM_GUI_SAMPLES)\n # series.attachAxis(self.xaxis_list[i])", "def on_plot(self, event=None):\n data_id, theory_id, state_id = self.set_data_helper()\n self.parent.plot_data(data_id=data_id,\n state_id=state_id,\n theory_id=theory_id,\n append=False)\n self.enable_remove_plot()", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def run():\n #Initialise variables\n data = build_station_list()\n update_water_levels(data)\n ls = []\n ID = []\n \n #Number of days in past taken data from\n dt = 7\n #How many graphs per window\n limit = 4\n #How many stations\n number = 6\n \n #Create list of measuring_id's sorted by water level\n for station in data:\n if station.typical_range_consistent() == True and station.relative_water_level() != None:\n ls.append((station, station.relative_water_level()))\n\n ls = sorted_by_key(ls, 1)\n \n for station in ls:\n ID.append(station[0])\n \n s = count_inconsistent_sets(ID[:number], dt)\n \n ID = ID[:number+s]\n\n plot_water_levels(ID, dt, limit, s)", "def graph9():\r\n sheet = workbook.sheet_by_index(4)\r\n data = [[sheet.cell_value(r, c) for c in range(sheet.ncols)] for r in range(sheet.nrows)]\r\n\r\n for i in range(4, 5):\r\n list_data[0].append(round((data[i][1]/sum(data[i][j] for j in range(1, 6)))*100, 2))\r\n list_data[1].append(round((data[i][2]/sum(data[i][j] for j in range(1, 6)))*100, 2))\r\n list_data[2].append(round((data[i][3]/sum(data[i][j] for j in range(1, 6)))*100, 2))\r\n list_data[3].append(round((data[i][4]/sum(data[i][j] for j in range(1, 6)))*100, 2))\r\n list_data[4].append(round((data[i][5]/sum(data[i][j] for j in range(1, 6)))*100, 2))\r\n\r\n gauge = pygal.SolidGauge(inner_radius=0.70, title=u'รอยละของประชากรอายุ 15 ปขึ้นไปที่ดื่มสุราหรือเครื่องดื่มมึนเมา จําแนกตามความถี่ในการดื่มสุราหรือเครื่องดื่มมึนเมา ปี 2556')\r\n percent_formatter = lambda x: '{:.10g}%'.format(x)\r\n gauge.value_formatter = percent_formatter\r\n for i in range(5):\r\n gauge.add(data_name[i], list_data[i])\r\n gauge.render_to_file('9Classified by frequency of drinking in 2556.svg')", "def add_nodes(self):\n for node_id in self.nodes:\n x = self.nodes[node_id][0]\n y = self.nodes[node_id][1]\n if node_id == 0:\n self.G.add_node(\"Source\", x=x, y=y, demand=0)\n self.G.add_node(\"Sink\", x=x, y=y, demand=0)\n else:\n self.G.add_node(node_id, x=x, y=y, demand=0)", "def plot_all(self) -> None:\n self.__plot_si_cf_plane()\n self.__plot_convex_hull()\n self.__plot_fixed_radius()\n self.__plot_delaunay()", "def plot_graph(self, dataset):\n data = self.data\n diagrams = []\n\n for time_stamp, data_tag in dataset:\n data_x, data_y = [], []\n for item in data:\n data_x.append(item[time_stamp])\n data_y.append(item[data_tag])\n diagrams.append(Scatter(x=data_x, y=data_y, mode='markers'))\n\n layout = plotly.graph_objs.Layout(yaxis=dict(autorange='reversed'))\n data = Data(diagrams)\n fig = plotly.graph_objs.Figure(data=data, layout=layout)\n plotly.plotly.plot(fig, filename='exo-line')", "def testing():\n\n\n valueDumpFile1 = open(\"datasets/bbc/politics.pickle\",\"rb\")\n #pickle.dump(plot1,valueDumpFile1)\n plot1=pickle.load(valueDumpFile1)\n valueDumpFile1 = open(\"datasets/bbc/business.pickle\",\"rb\")\n #pickle.dump(plot2,valueDumpFile1)\n plot2=pickle.load(valueDumpFile1)\n valueDumpFile1 = open(\"datasets/bbc/tech.pickle\",\"rb\")\n #pickle.dump(plot3,valueDumpFile1)\n plot3=pickle.load(valueDumpFile1)\n valueDumpFile1 = open(\"datasets/bbc/entertainment.pickle\",\"rb\")\n #pickle.dump(plot4,valueDumpFile1)\n plot4=pickle.load(valueDumpFile1)\n valueDumpFile1 = open(\"datasets/bbc/sport.pickle\", \"rb\")\n #pickle.dump(plot5, valueDumpFile1)\n plot5=pickle.load(valueDumpFile1)\n total = []\n for d in plot1:\n total.append(d)\n for d in plot2:\n total.append(d)\n\n for d in plot3:\n total.append(d)\n for d in plot4:\n total.append(d)\n\n for d in plot5:\n total.append(d)\n fig = plt.figure()\n ax = plt.subplot(111)\n\n #ax.scatter(plot1[:,0],plot1[:,1],s=5,linewidths=5)\n line1=ax.plot(plot1[:,0],plot1[:,1],\"bo\",label=\"Politics\")\n line2=ax.plot(plot2[:,0],plot2[:,1],\"ro\",label='Business')\n line3=ax.plot(plot3[:,0],plot3[:,1],\"go\",label=\"Tech\")\n line4=plt.plot(plot4[:,0],plot4[:,1],\"yo\",label=\"Entertainment\")\n line4 = plt.plot(plot5[:, 0], plot5[:, 1], \"ko\", label=\"sport\")\n ax.legend()\n fig.add_subplot(ax)\n #fig.add_subplot(aq)\n print(total)\n plt.show()\n\n\n \"\"\"\n temp=open(\"datasets/bbc/002.txt\",\"r\")\n plotValue,correspondingWord=im.plotDocumentWords(temp)\n plotValue= np.array(plotValue)\n \"\"\"\n colors = 100*[\"r\",\"g\",\"b\",\"c\",\"k\",\"l\",\"p\"]\n (classifications,centroids)= kMeans.execute_kmeans(total, k=5, showPlot=True, plotRef=plt)\n x=[]\n y=[]\n \"\"\"\n count = 0\n for centroid in centroids:\n plt.scatter(centroids[centroid][0], centroids[centroid][1], marker=\"o\", color=colors[count], s=100,\n linewidths=5)\n count = count + 1\n \n for classification in classifications:\n color = colors[classification]\n if len(classifications[classification]) > 0:\n for featureSet in classifications[classification]:\n plt.scatter(featureSet[0], featureSet[1], marker=\"x\", color=color, s=100, linewidths=5)\n \n \n for k in plotValue:\n x.append(k[0])\n y.append(k[1])\n #plt.scatter(x,y,linewidths=2,s=5)\n for i in range(len(correspondingWord)):\n xy=(x[i],y[i])\n plt.annotate(correspondingWord[i],xy)\n \"\"\"\n plt.show()", "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])", "def init_plot(self, num_axes):\r\n self.i = []\r\n self.val = []\r\n plt.ion()\r\n self.axes = plt.gca()\r\n self.lines =[]\r\n\r\n for i in range(num_axes):\r\n self.val.append([])\r\n self.lines.append([])\r\n self.lines[i], = self.axes.plot([], self.val[0], '-', c=[random.random() for _ in range(3)], linewidth=1.5, markersize=4)", "def plot_main(self):\n\n f, axes = plt.subplots(2, 3, figsize=(16, 8))\n self.data_plot(ax=axes[0, 0])\n self.model_plot(ax=axes[0, 1])\n self.normalized_residual_plot(ax=axes[0, 2], v_min=-6, v_max=6)\n self.source_plot(ax=axes[1, 0], convolution=False, deltaPix_source=0.01, numPix=100)\n self.convergence_plot(ax=axes[1, 1], v_max=1)\n self.magnification_plot(ax=axes[1, 2])\n f.tight_layout()\n f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05)\n return f, axes" ]
[ "0.6879036", "0.6579829", "0.65368325", "0.6452261", "0.6339766", "0.6261383", "0.62553847", "0.6229404", "0.62257975", "0.62213117", "0.6219536", "0.61914164", "0.6187895", "0.6159931", "0.6148075", "0.61305416", "0.6126133", "0.6087059", "0.60675865", "0.6028357", "0.6000444", "0.5973141", "0.59224564", "0.591784", "0.59059864", "0.5902614", "0.5901958", "0.58846337", "0.5872278", "0.5872278", "0.5872278", "0.5872278", "0.5872278", "0.5854392", "0.5840738", "0.58305854", "0.5819496", "0.58155495", "0.5814316", "0.5810924", "0.58053076", "0.5793129", "0.5774321", "0.57641286", "0.57577515", "0.573725", "0.5734907", "0.5724312", "0.5712359", "0.570086", "0.569778", "0.56904304", "0.56806755", "0.5673331", "0.5662566", "0.56560135", "0.5647187", "0.56439334", "0.56323445", "0.5631093", "0.56226", "0.5617223", "0.5608197", "0.5608019", "0.560592", "0.5603151", "0.5598758", "0.55949306", "0.5580216", "0.5561015", "0.55603456", "0.55583185", "0.55533946", "0.5552426", "0.55502206", "0.55475605", "0.55420583", "0.55408305", "0.55401164", "0.55387914", "0.55362743", "0.5523817", "0.5519794", "0.55174446", "0.5512261", "0.5509307", "0.5496366", "0.54959273", "0.549485", "0.54895633", "0.54872245", "0.54870814", "0.5486605", "0.5485769", "0.54854", "0.5477563", "0.5472454", "0.5456288", "0.54556835", "0.5454475" ]
0.6922832
0
Set window size of current display
def SetWindowSize(self, size): self.WINDOW_SIZE = size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setwinsize(self, rows, cols):", "def setWindowSize(width,height):\n dislin.winsiz(width,height)", "def set_screen(self, size):\r\n self.screen = size", "def _set_size(self):\n if self.width_key is not None:\n width = config.get(self.width_key)\n height = config.get(self.height_key)\n self.window.resize(width, height)", "def resize_display(self, (w, h)):\n self.surface = pygame.display.set_mode((w, h), pygame.RESIZABLE)", "def set_igv_window_size(self, width=800, height=600):\n self.set_igv_window_width(width)\n self.set_igv_window_height(height)", "def set_resolution(self, width, height):\n self.driver.set_window_size(width, height, self.driver.window_handles[0])", "def set_screen_size(self, width, height, width_mm, height_mm):\n # FIXME: setting framebuffer size doesn't work for some reason\n # self.window.xrandr_set_screen_size(width, height, 310+550, 310)\n # so I am going to use this dirty hack for the time being\n import subprocess\n\n subprocess.call([\"xrandr\", \"--fb\", \"%dx%d\" % (width, height)])", "def setWindowSize(self, width, height, windowHandle='current'):\n cmdId = self.executeCommand(Command.SET_WINDOW_SIZE, {'width': int(width), 'height': int(height), \n \"windowHandle\": windowHandle})\n return cmdId", "def configure_window(self, width, height):\n self.configure_surface(width, height)", "def resize(self, win, width:int, height:int):\r\n\r\n\t\tglViewport(0, 0, width, height)", "def setWindowGeometry(x,y,width,height):\n dislin.window(x,y,width,height)", "def size_with_window(self, size_with_window):\n\n self.container['size_with_window'] = size_with_window", "def setWindowSize(self, value):\n return self._set(windowSize=value)", "def update_dimensions(self):\r\n # stores the old screen height for cleaning the screen\r\n old_w_height = self.w_height\r\n\r\n self.w_width, self.w_height = get_terminal_size()\r\n # see __init__\r\n self.w_width -= self.w_width % 2\r\n self.w_height -= self.w_height % 2\r\n\r\n # no need to clear screen if window size hasn't changed\r\n if old_w_height != self.w_height:\r\n self.clear_screen(old_w_height)", "def __window_resizeTo(self, iWidth, iHeight):\n pass", "def init_window(self, size, screen=None):\n # enforce minimum size\n (mw, mh), (w, h) = config.minsize, size\n if w < mw or h < mh:\n size = mw, mh\n\n # init view surface and pass it to screen\n self.view = pygame.display.set_mode(size, pygame.RESIZABLE)\n self.view.fill((0, 0, 0))\n if screen is not None:\n screen.resize_view()", "def updatesize(frame):\n winwid, winhgt = frame.winfo_width(), frame.winfo_height()\n scrwid, scrhgt = frame.winfo_screenwidth(), frame.winfo_screenheight()\n newx, newy = math.floor(scrwid * 0.99) - winwid, math.floor(scrhgt * 0.01)\n frame.master.geometry(\"{}x{}+{}+{}\".format(winwid, winhgt, newx, newy))", "def adjust_screen_size(self) -> None:\n if self.screen:\n max_row, max_cols = self.screen.getmaxyx()\n if max_row < MIN_SIZE + len(self.all_items):\n self.screen.resize(self.menu_height, max_cols)\n self.draw()", "def setKnownConsoleSize(self, width, height):\n # Local import to avoid win32 issues.\n import tty\n class FakeFcntl(object):\n def ioctl(self, fd, opt, mutate):\n if opt != tty.TIOCGWINSZ:\n self.fail(\"Only window-size queries supported.\")\n return struct.pack(\"4H\", height, width, 0, 0)\n self.patch(cftp, \"fcntl\", FakeFcntl())", "def window_size(self, window_size):\n\n self._window_size = window_size", "def defaultWindowSize(self):\n self.resize(self.defaultWindowWidth, self.defaultWindowHeight)", "def getwinsize(self):", "def resize(self):\n h, w = self.win.getmaxyx()\n self.maxh, self.maxw = h, w\n if w == 0 or h == 2:\n return\n self.win.resize(h, w)\n self.lpane.do_resize(h, w)\n self.rpane.do_resize(h, w)\n self.statusbar.resize(h, w)\n self.tabbar.resize(1,w)\n self.regenerate()\n self.display()", "def on_size(self, window, width, height):\n viewport = glfw.get_framebuffer_size(window)\n GL.glViewport(0, 0, *viewport)\n self.camera.viewport = viewport", "def save_my_size(self):\n if not settings.get_bool('maximized', False):\n width, height = self.get_size()\n settings.set('width', width)\n settings.set('height', height)", "def resize(self, yx=None):\n if yx == None:\n yx = self.screen.getmaxyx()\n self.screen.clear()\n curses.resizeterm(yx[0], yx[1])\n self.setup_windows(resize = True)\n self.screen.refresh()", "def set_size(self, width, height):\n cairo.cairo_xcb_surface_set_size(self._pointer, width, height)\n self._check_status()", "def ev_windowsizechanged(self, event: WindowResized) -> None:", "def __ev_resize(self, event):\n\n new_size = event.dict['size']\n surface_size = self.__screen.get_size()\n old_center = self.__screen.get_rect().center\n if new_size != surface_size:\n self.__screen = pygame.display.set_mode(new_size,\n self.__screen.get_flags(),\n self.__screen.get_bitsize())\n self.init(offset=vect_diff(self.__screen.get_rect().center,\n old_center))\n self.__screen_width, self.__screen_height = self.__screen.get_size()", "def set_window_width(self, width):\n self.device.set_window_width(int(width))\n return \"OK\"", "def resize(self, width, height):\n\n\t\tself._window.resize(width, height)", "def _save_size(self):\n if self.width_key is not None:\n (width, height) = self.window.get_size()\n config.set(self.width_key, width)\n config.set(self.height_key, height)\n config.save()", "def SetWindow(self, w):\r\n\r\n self.window = w", "def resize_to(self, width, height):\n\n self.driver.resize_window_to(self.handle, width, height)", "def setSize(self, width, height):\n frameWidth = width\n frameHeight = height\n repaint()", "def SizeWindows(self):\n self._SizeWindows()", "def screensize(self, canvwidth=None, canvheight=None, bg=None):\n return self._resize(canvwidth, canvheight, bg)", "def setup_window(self, fullscreen, dual):\n cv2.startWindowThread()\n if fullscreen:\n cv2.namedWindow(self.wname, cv2.WINDOW_NORMAL)\n else:\n cv2.namedWindow(self.wname)\n cv2.namedWindow(self.wname)\n cv2.setWindowProperty(self.wname, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\n if dual:\n # Move is to make sure it's on the right monitor\n cv2.moveWindow(self.wname, 1920, 0)\n cv2.namedWindow(self.wname + ' Small View')\n cv2.resizeWindow(self.wname + ' Small View', 960, 540)", "def _set_resolution( self ):\r\n offset = 0\r\n # if current and skinned resolutions differ and skinned resolution is not\r\n # 1080i or 720p (they have no 4:3), calculate widescreen offset\r\n if ( ( not ( self.currentResolution == self.resolution ) ) and self.resolution > 1 ):\r\n # check if current resolution is 16x9\r\n if ( self.currentResolution == 0 or self.currentResolution % 2 ): iCur16x9 = 1\r\n else: iCur16x9 = 0\r\n # check if skinned resolution is 16x9\r\n if ( self.resolution % 2 ): i16x9 = 1\r\n else: i16x9 = 0\r\n # calculate widescreen offset\r\n offset = iCur16x9 - i16x9\r\n self.win.setCoordinateResolution( self.resolution + offset )", "def resize(self):\r\n del self.win\r\n self.__create_win()", "def placeWindow(self):\r\n\t\t# window size\r\n\t\tw = 600\r\n\t\th = 300\r\n\t\t# find the screen size\r\n\t\tsw = self.parent.winfo_screenwidth()\r\n\t\tsh = self.parent.winfo_screenheight()\r\n\t\t# now define the location on the current screen\r\n\t\tx = (sw/2-0.5*w)\r\n\t\ty = (sh/2-0.5*h)\r\n\t\tself.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def reshape(w, h):\n global win_width, win_height\n win_width = w\n win_height = h\n glutPostRedisplay() # May need to call a redraw...", "def resize(self):\r\n Win.resize(self)\r\n self.write(\"### console has been resized\")", "def set_ui_scale():\n # TODO test on other OS and resolutions\n moniter_h = QtWidgets.QDesktopWidget().screenGeometry(-1).height()\n if sys.platform == 'win32':\n if moniter_h == 1080:\n scale = 1.0\n elif moniter_h == 1440:\n scale = 1.0\n else:\n scale = 1.0\n elif sys.platform == 'linux':\n if moniter_h == 1080:\n scale = 1.0\n elif moniter_h == 1440:\n scale = 1.23\n else:\n scale = 1.4\n elif sys.platform == 'darwin':\n if moniter_h == 1080:\n scale = 1.0\n elif moniter_h == 1440:\n scale = 1.25\n else:\n scale = 1.55\n return scale", "def set_igv_window_width(self, width=800):\n self.igv_window_width = int(width)", "def set_size(self, w, h):\n\t\tpass", "def eval_screen_size():\n center_x = 32 // 2 * app_manager.get_map_width()\n center_y = 32 // 2 * app_manager.get_map_height()\n\n loc1_le = EPD(0x58DC60)\n loc1_te = EPD(0x58DC60 + 4)\n loc1_re = EPD(0x58DC60 + 8)\n loc1_be = EPD(0x58DC60 + 12)\n\n # screen position and location\n loc1_lv = f_dwread_epd(loc1_le)\n loc1_tv = f_dwread_epd(loc1_te)\n loc1_rv = f_dwread_epd(loc1_re)\n loc1_bv = f_dwread_epd(loc1_be)\n prev_sx = f_dwread_epd(EPD(0x0062848C))\n prev_sy = f_dwread_epd(EPD(0x006284A8))\n\n # centerview and update x, y\n SeqCompute([\n (loc1_le, SetTo, center_x),\n (loc1_te, SetTo, center_y),\n (loc1_re, SetTo, center_x),\n (loc1_be, SetTo, center_y)])\n f_dwwrite_epd(loc1_le, center_x)\n f_dwwrite_epd(loc1_te, center_y)\n f_dwwrite_epd(loc1_re, center_x)\n f_dwwrite_epd(loc1_be, center_y)\n DoActions(CenterView(1))\n cur_sx = f_dwread_epd(EPD(0x0062848C))\n cur_sy = f_dwread_epd(EPD(0x006284A8))\n\n # get size\n dx = center_x - cur_sx\n dy = center_y - cur_sy\n\n # restore screen\n screen_x = prev_sx + dx\n screen_y = prev_sy + dy\n SeqCompute([\n (loc1_le, SetTo, screen_x),\n (loc1_te, SetTo, screen_y),\n (loc1_re, SetTo, screen_x),\n (loc1_be, SetTo, screen_y)])\n DoActions(CenterView(1))\n\n # restore location\n SeqCompute([\n (loc1_le, SetTo, loc1_lv),\n (loc1_te, SetTo, loc1_tv),\n (loc1_re, SetTo, loc1_rv),\n (loc1_be, SetTo, loc1_bv)])\n\n EUDReturn([dx*2, dy*2])", "def set_size(self, width, height):\n # Combine the height and width to single string to be passed to root\n set_str = '{}x{}'.format(str(width), str(height))\n self.root.geometry(set_str)", "def set_geometry(self, width, height, fullscreen=False):\n self.root.tk.call(\"tk\", \"scaling\", self.scaling_factor)\n if fullscreen:\n initial_dimensions = (self.root.winfo_screenwidth(), self.root.winfo_screenheight())\n else:\n initial_dimensions = (round(width * self.scaling_factor),\n round(height * self.scaling_factor))\n\n if fullscreen and sys.platform == \"win32\":\n self.root.state('zoomed')\n elif fullscreen:\n self.root.attributes('-zoomed', True)\n else:\n self.root.geometry(\"{}x{}+80+80\".format(str(initial_dimensions[0]),\n str(initial_dimensions[1])))\n logger.debug(\"Geometry: %sx%s\", *initial_dimensions)", "def change_window_size(self, size):\n value = 0\n try:\n value = int(size)\n except ValueError:\n raise ValueError(\"Please type in a valid number.\")\n\n if value >= 0:\n self.__window_size = value\n else:\n raise ValueError(\"Please type in a valid positive number.\")", "def set_2d_size(self, w=None, h=None, x=0, y=0):\r\n from pi3d.Display import Display\r\n if w == None:\r\n w = Display.INSTANCE.width\r\n if h == None:\r\n h = Display.INSTANCE.height\r\n self.unif[42:44] = [x, y]\r\n self.unif[45:48] = [w, h, Display.INSTANCE.height]", "def on_resize(self, _: int = 0) -> None:\n assert CursesMenu.stdscr is not None\n screen_rows, screen_cols = CursesMenu.stdscr.getmaxyx()\n curses.resizeterm(screen_rows, screen_cols)\n self.draw()", "def ev_windowsizechanged(self, event: tcod.event.WindowResized) -> T | None:", "def get_curr_screen_size():\n root = tk.Tk()\n root.update_idletasks()\n root.attributes('-fullscreen', True)\n root.state('iconic')\n size = (root.winfo_width(), root.winfo_height(),)\n root.destroy()\n return size", "def resize(self, width, height):\n geo = self.geometry\n # Start of menu.\n self.menu_start = self.window.width - (geo.menu_width +\\\n geo.horizontal_margin + geo.scroll_bar_width)\n # Update vertical span of the window.\n self.current_view_span = height - self.status_bar.height\n # Call the resize method of all objects in the current window.\n for object in self.object_list:\n object.resize(width, height)\n # Just one call to the adaptive plot height is needed. Therefore the\n # calls need to be here.\n if self.waveforms:\n self.utils.adaptPlotHeight()", "def window(main):\r\n main.title(\"BinCryptor 1.0\")\r\n main.update_idletasks()\r\n width = main.winfo_width() #Width of the current screen\r\n height = main.winfo_height() #Height of the current screen\r\n x = (main.winfo_screenwidth() // 2) - (width // 2)\r\n y = (main.winfo_screenheight() // 2) - (height // 2)\r\n main.geometry(f'{width}x{height}+{x}+{y}') #Adjusts the height and width\r", "def WriteWindowSize(self, width, height, win_name):\n if win_name == \"main\":\n height_str = \"window_height\"\n width_str = \"window_width\"\n else:\n height_str = \"pref_height\"\n width_str = \"pref_width\"\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n if config.has_section(\"Settings\"):\n config.set(\"Settings\", width_str, width)\n config.set(\"Settings\", height_str, height)\n config.write(open(self.app_conf, \"w\"))", "def set_viewport_size(self, width, height):\n\n if self.display:\n self.webview.resize(QSize(width, height))\n self.page.setViewportSize(QSize(width, height))", "def setSurfaceSize(xmin, xmax, ymin, ymax):\n dislin.sursze(xmin, xmax, ymin, ymax)", "def get_window_size(self):\n raise NotImplementedError", "def set_canvas_size(self, width, height):\n self.canvas.config(width = int(width), height = int(height))", "def create_screen(self, width, height):", "def resizeGL(self, width, height):\n self._sceneviewer.setViewportSize(width, height)\n # resizeGL end", "def resizeGL(self, width, height):\n self.width, self.height = width, height\n gl.glViewport(0, 0, width, height)\n gl.glMatrixMode(gl.GL_PROJECTION)\n gl.glLoadIdentity()\n gl.glOrtho(0, 1, 0, 1, 0, 1)", "def resize_child_window(self):\n s = struct.pack('HHHH', 0, 0, 0, 0)\n x = fcntl.ioctl(0,termios.TIOCGWINSZ,s)\n fcntl.ioctl(self.child_fd,termios.TIOCSWINSZ,x)", "def setMaxSize(self,width,height):\n assert (type(width) == int), \"width %s is not an int\" % `width`\n assert (width > 0), \"width %s is negative\" % `width`\n assert (type(height) == int), \"height %s is not an int\" % `height`\n assert (height > 0), \"height %s is negative\" % `height`\n self._frame._root.maxsize(width,height)", "def __window_resizeBy(self, xDelta, yDelta):\n pass", "def _update_dimensions(self):\n _, self.width = self.window.getmaxyx()\n self.spacing = self.width // self.total_columns", "def _set_window(self, x0, y0, x1, y1):\n self._set_columns(x0, x1)\n self._set_rows(y0, y1)\n self._write(ST7789_RAMWR)", "def configure_canvas(self):\r\n self.window.update_idletasks() # this updates window size\r\n\r\n border = 10\r\n self.canvas.config(\r\n width=self.window.winfo_reqwidth() + border,\r\n height=min(350, self.window.winfo_reqheight() + border,))\r\n self.canvas.configure(scrollregion=(\r\n 0, 0,\r\n self.window.winfo_reqwidth() + border,\r\n self.window.winfo_reqheight() + border))", "def maximize(self):\n lib.SDL_MaximizeWindow(self._ptr)", "def DoSetViewport(self):\n size = self.size = self.GetClientSize()\n self.SetCurrent(self.context)\n glViewport(0, 0, size.width, size.height)", "def _SetSize(self, pixels = None):\n if not pixels:\n pixels = self.GetClientSize()\n self.canvas.SetSize(pixels)\n self.figure.set_size_inches(pixels[0]/self.figure.get_dpi(),\n pixels[1]/self.figure.get_dpi())", "def set_igv_window_height(self, height=600):\n self.igv_window_height = int(height)", "def _resize_image(self, event):\n self.window_width = event.width\n self.window_height = event.height", "def get_window_size(self):\n return self.__window_size", "def resize(w, h):\n global width, height, scale\n\n r = radius\n glViewport(0, 0, w, h)\n width = w\n height = h\n\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n if w > h:\n glOrtho(-w/h*r, w/h*r, -r, r, -r, r)\n scale = 2.0 * r / h \n else:\n glOrtho(-r, r, -h/w * r, h/w * r, -r, r)\n scale = 2.0 * r / w", "def SetUniformBitmapSize(self, size):\r\n\r\n self._requested_bmp_size = wx.Size(*size)\r\n\r\n # if window is already initialized, recalculate the tab height\r\n if self._dummy_wnd:\r\n self.UpdateTabCtrlHeight()", "def on_resize(self, *args):\n\n self.page_current.width = terminal.width # Give page new terminal width\n self.render_buffer = []\n\n self.render() # Re-render buffer", "def change_size(self, width, height):\n oldw = float(self.size().width())\n oldh = float(self.size().height())\n\n if self.indicator_type == 'session':\n neww = int(oldw + oldw * (width / 100.0))\n if neww > 0:\n self.setFixedSize(neww, oldh)\n elif self.indicator_type == 'unit':\n newh = int(oldh + oldh * (height / 100.0))\n if newh > 0:\n self.setFixedSize(oldw, newh)\n\n self.set_font_size()", "def set_mode(self, size, *args, **kwargs):\n if env.japplet:\n self.jframe = env.japplet\n else:\n self.jframe = Frame(self.caption, size)\n if self.icon:\n self.jframe.setIconImage(self.icon)\n env.jframe = self.jframe\n self.jpanel = self.jframe.jpanel\n self.surface = self.jpanel.surface\n self.surface._display = self\n self._surfaceRect = self.surface.get_rect()\n self._surface_rect = [self._surfaceRect]\n self._rect_list = None\n self.jframe.setLocationRelativeTo(None)\n self.jframe.setVisible(True)\n self._warmup()\n return self.surface", "def resize(self, x=0, y=0, w=0, h=0):\r\n if w <= 0:\r\n w = self.max_width\r\n if h <= 0:\r\n h = self.max_height\r\n self.width = w\r\n self.height = h\r\n\r\n self.left = x\r\n self.top = y\r\n self.right = x + w\r\n self.bottom = y + h\r\n self.opengl.resize(x, y, w, h)", "def set_viewport_size(driver, device):\n if device == \"laptop\":\n window_size = driver.execute_script(\"\"\"return [window.outerWidth - window.innerWidth + arguments[0], \n window.outerHeight - window.innerHeight + arguments[1]];\"\"\", 1200, 700)\n driver.set_window_size(*window_size)\n elif device == \"tablet\":\n window_size = driver.execute_script(\"\"\"return [window.outerWidth - window.innerWidth + arguments[0], \n window.outerHeight - window.innerHeight + arguments[1]];\"\"\", 768, 700)\n driver.set_window_size(*window_size)\n elif device == \"mobile\":\n window_size = driver.execute_script(\"\"\"return [window.outerWidth - window.innerWidth + arguments[0], \n window.outerHeight - window.innerHeight + arguments[1]];\"\"\", 500, 700)\n driver.set_window_size(*window_size)\n else:\n raise Exception(\"The device is not supported.\")", "def set_window_rect(self, value: bool):\n self._caps['setWindowRect'] = value", "def sets_window_size():\n test_str = make_random(596)\n server = start_server(reference=True)\n client = start_client(flags=[\"-w\", str(8)])\n\n write_to(client, test_str)\n segments = read_segments_from(client)\n if not segments:\n return False\n\n return segments[0].window == 8 * MAX_SEG_DATA_SIZE", "def set_canvas_size(self, width_npix, height_npix):\n\n self.variables.canvas_width = width_npix\n self.variables.canvas_height = height_npix\n if self.variables.canvas_image_object is not None:\n self.variables.canvas_image_object.canvas_nx = width_npix\n self.variables.canvas_image_object.canvas_ny = height_npix\n self.config(width=width_npix, height=height_npix)", "def resize(self):\n\t\tself.win.erase()\n\t\tfor c in self.components:\n\t\t\tc.resize()\n\t\tself.draw(True)", "def set_window(self, handle):\n pass", "def set_max_size(self, width: int, height: int):\n self.tk_ref.maxsize(width=width, height=height)", "def OnSize(self, event):\r\n\r\n self.Layout()", "def screen_setup(screen_size):\n window = turtle.Screen()\n window.bgcolor(\"black\")\n window.title(\"Maze Game\")\n window.setup(screen_size, screen_size)", "def setSize(self, width, height):\n dw = (width - self.width()) / 2.0\n dh = (height - self.height()) / 2.0\n rect = self.sceneRect()\n rect.adjust(-dw, -dh, dw, dh)\n self.setSceneRect(rect)", "def on_resize(event):\n gloo.set_viewport(0, 0, *event.physical_size)", "def resize(self, *args):\n if self.parent is None: # when deleted\n return\n if self.parent.render_window is None: # BasePlotter\n return\n\n if self._prior_window_size != self.parent.window_size:\n self._prior_window_size = self.parent.window_size\n\n actor = self._actors['background']\n image_data = actor.GetInput()\n origin = image_data.GetOrigin()\n extent = image_data.GetExtent()\n spacing = image_data.GetSpacing()\n xc = origin[0] + 0.5 * (extent[0] + extent[1]) * spacing[0]\n yc = origin[1] + 0.5 * (extent[2] + extent[3]) * spacing[1]\n yd = (extent[3] - extent[2] + 1) * spacing[1]\n dist = self.camera.distance\n\n # make the longest dimensions match the plotting window\n img_dim = np.array(image_data.dimensions[:2])\n self.camera.focus = np.array([xc, yc, 0.0])\n self.camera.position = np.array([xc, yc, dist])\n\n ratio = img_dim / np.array(self.parent.window_size)\n scale_value = 1\n if ratio.max() > 1:\n # images are not scaled if larger than the window\n scale_value = ratio.max()\n\n if self._scale is not None:\n scale_value /= self._scale\n\n self.camera.parallel_scale = 0.5 * yd / self._scale", "def setupWindow(self, framerate, bgColor=\"black\", fgColor=\"green\"):\r\n self.framerate = framerate\r\n self.clock = pygame.time.Clock()\r\n \r\n pygame.init()\r\n self.screen = pygame.display.set_mode((self.screen_Width, self.screen_Height))\r\n\r\n self.bgColor = pygame.Color(bgColor)\r\n self.fgColor = pygame.Color(fgColor)", "def setUp(self):\r\n self.caption = \"mirra extending classes\" # window name\r\n self.size = 640, 480 #window size\r\n self.pos = 100,100 # window top left location\r\n self.fullScreen = 0 # if fullScreen is on it will overwrite your pos and size to match the display's resolution\r\n self.frameRate = 15 # set refresh framerate\r", "def __init_window(self) -> pygame.Surface:\n pygame.display.set_caption(CAPTION)\n win = pygame.display.set_mode((WIDTH, HEIGHT))\n \n return win", "def _get_screen_size():\n import PySide.QtGui\n rect = PySide.QtGui.QDesktopWidget().screenGeometry(-1)\n return [rect.width(), rect.height()]", "def resize (self):\n return self._arrange_displays()" ]
[ "0.79207087", "0.78689796", "0.7652138", "0.7652046", "0.75910395", "0.75640345", "0.756109", "0.7339863", "0.7235763", "0.7189826", "0.71311337", "0.7078242", "0.7067841", "0.69597495", "0.6903613", "0.68661195", "0.68347853", "0.6817149", "0.68012214", "0.67621917", "0.6740218", "0.67143506", "0.66864294", "0.6634352", "0.6614251", "0.6608719", "0.6601682", "0.6601495", "0.6589191", "0.6564029", "0.65294677", "0.65170723", "0.6496931", "0.64939356", "0.64889586", "0.64389837", "0.64329433", "0.64131576", "0.6396674", "0.63886106", "0.6367436", "0.6358327", "0.6349191", "0.63455516", "0.63213956", "0.6317099", "0.63017017", "0.6294881", "0.6290925", "0.62892336", "0.62839967", "0.6246597", "0.6225952", "0.6217659", "0.62089676", "0.6170626", "0.6154787", "0.6149024", "0.6122027", "0.6061767", "0.60578775", "0.60540783", "0.60166687", "0.60051274", "0.6005007", "0.5995081", "0.598538", "0.5975834", "0.596666", "0.59593", "0.5957186", "0.59501034", "0.5946967", "0.59450865", "0.59447956", "0.5927293", "0.5922821", "0.59101874", "0.5900517", "0.5899502", "0.58936495", "0.5873039", "0.5869942", "0.5855526", "0.5854512", "0.5852209", "0.58489895", "0.5848983", "0.5843524", "0.58428437", "0.58388174", "0.5829903", "0.5826764", "0.5799301", "0.5789663", "0.5782888", "0.57796407", "0.5766806", "0.57620025", "0.5760686" ]
0.76013273
4
Set sample rate of sensor data
def SetSampleRate(self, rate): self.SAMPLE_RATE = rate
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_sample_rate(self, rate):\n self.check_validity()\n\n rate = int(rate)\n\n self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_SET_SAMPLE_RATE, (rate,), 'B', 0, '')", "def set_sample_rate(self, sample_rate):\n self.sample_rate = int(sample_rate)", "def input_data_sample_rate(self, value):\n self._input_data_sample_rate = value", "def set_channel_sampling_rate(self , srate:float):\n self.__sampling_rate = srate", "def set_samp_rate(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_set_samp_rate(self, *args, **kwargs)", "def set_samp_rate(self, *args, **kwargs):\n return _uhd_swig.usrp_source_set_samp_rate(self, *args, **kwargs)", "def set_samp_rate(self, *args, **kwargs):\n return _uhd_swig.usrp_source_sptr_set_samp_rate(self, *args, **kwargs)", "def set_samp_rate(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_sptr_set_samp_rate(self, *args, **kwargs)", "def set_rate(self, rate = 1e4, count = 1000, clk_source = 'ao/SampleClock', finite = True):\n if finite:\n ctr_mode = mx.int32(mx.DAQmx_Val_FiniteSamps)\n else:\n ctr_mode = mx.int32(mx.DAQmx_Val_ContSamps)\n ctr_rate = mx.float64(rate) #override python type\n ctr_count = mx.uInt64(int(count))\n self._clock_source = clk_source\n \n self.stop() #make sure task not running, \n # CfgSampClkTiming ( const char source[], float64 rate, int32 activeEdge, \n # int32 sampleMode, uInt64 sampsPerChan );\n # default clock source is subsystem acquisition clock\n try: \n self.task.CfgSampClkTiming(clk_source, ctr_rate, mx.DAQmx_Val_Rising, ctr_mode, ctr_count) \n #exact rate depends on hardware timer properties, may be slightly different from requested rate\n ctr_rate.value = 0\n self.task.GetSampClkRate(mx.byref(ctr_rate));\n self._rate = ctr_rate.value\n self._count = count\n #self._mode = 'buffered'\n except mx.DAQError as err:\n self.error(err)\n self._rate = 0", "def setDataRate(self, DataRate):\n \n self.DataRate = DataRate", "def _set_rate(self):\r\n interval = self.data.iloc[2, 0] - self.data.iloc[1, 0]\r\n self.rate = int(1 / interval)", "def update_rate(self):\n self._rate = (\n (self._received - self._samples[0]) / float(self.sample_size)\n )\n self._samples.append(self._received)", "def update_timing(self, sample_rate, samples_per_chan):\n self.sample_rate = sample_rate\n self.samples_per_channel = samples_per_chan\n self.task.timing.cfg_samp_clk_timing(sample_rate, samps_per_chan=samples_per_chan)", "def set_sampling_rate(self, sampling_rate):\n\n possible_sampling_rates = SAMPLING_RATE_DICT.keys()\n if sampling_rate not in possible_sampling_rates:\n self.hd.log.error(f\"AWG {self.index}: Invalid sampling rate '{sampling_rate}', possible choices are {list(possible_sampling_rates)}\")\n return\n\n sampling_rate_index = SAMPLING_RATE_DICT[sampling_rate]\n\n self.hd.seti(f'awgs/{self.index}/time', sampling_rate_index)\n self.hd.log.info(\n f\"AWG {self.index}: Changed sampling rate to {sampling_rate}.\"\n )", "def set_microphone_sample_rate_to_16khz():\n\n return _update_device_state_bit(_16khz_bit, 1)", "def sample_rate(self, sample_rate):\n if sample_rate is None:\n raise ValueError(\"Invalid value for `sample_rate`, must not be `None`\")\n\n self._sample_rate = sample_rate", "def test_sample_rate(self):\n test_sample_rate = 48000\n self.encoder._sample_rate = test_sample_rate\n self.assertEqual(self.encoder._sample_rate, test_sample_rate)", "def setSampleTime(self, sample_time):\r\n self.sample_time = sample_time", "def set_microphone_sample_rate_to_22khz():\n\n return _update_device_state_bit(_16khz_bit, 0)", "def set_samplerate(self, samplerate):\n\t\tnew_samplerate = _PM_UPDATE_RATE/min(max(1,samplerate),200)\n\t\tshift = min(math.ceil(math.log(new_samplerate,2)),16)\n\t\tself.output_decimation = 2**shift\n\t\tself.output_shift = shift\n\n\t\tprint \"Output decimation: %f, Shift: %f, Samplerate: %f\" % (self.output_decimation, shift, _PM_UPDATE_RATE/self.output_decimation)", "def rate(self, rate):\n\n self._rate = rate", "def rate(self, rate):\n\n self._rate = rate", "def _do_set_rate(self, rate):\n self.set_remote_status(1)\n if rate == 0:\n self.set_to_slow()\n elif rate == 1:\n self.set_to_fast()\n self.set_remote_status(3)\n print(self._do_get_rate())", "def setSampleTime(self, sample_time):\n\t\tself.sample_time = sample_time", "def sample_rate(self):\r\n return self.config.sample_rate", "def Config_Sample_Clock(self,samples_per_sec=1000.0,num_samps_per_ch=1000):\n self.samples_per_sec = samples_per_sec\n self.num_samples_per_ch = num_samps_per_ch", "def set_custom_sample_rate(self, sample_rate, audio_bits=24, channels=2):\n if (channels < 1 or channels > 2):\n raise I2SError (\"Channels can only be 1 or 2 at this time\")\n\n clock_rate = self.get_clock_rate()\n divisor = clock_rate / ((sample_rate * audio_bits * channels) + 1)\n self.set_clock_divisor(divisor)", "def samples_per_frame(self, value):\n self._samples_per_frame = value", "def setSampleTime(self, sample_time):\n self.sample_time = sample_time", "def set_current_rate(self, rate_to_set):\n pass", "def _setup_sensor ( self ):\n self.spectral = Spectral ( np.array([500, 610, 780, 1580.] ),\n np.array([590, 680, 890, 1750.] ) )", "def set_framerate(self, framerate):\n self._framerate = int(framerate)", "def set_clock_rate(self, *args, **kwargs):\n return _uhd_swig.usrp_source_sptr_set_clock_rate(self, *args, **kwargs)", "def set_clock_rate(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_set_clock_rate(self, *args, **kwargs)", "def set_update_rate(self, delay_ms):\n self._log_msg_start(\"Setting NMEA message update rate\")\n self._ubx.send(\"CFG-RATE\", measRate=delay_ms, navRate=1, timeRef=1)", "def set_clock_rate(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_sptr_set_clock_rate(self, *args, **kwargs)", "def setValues(\n self,\n frameRate: int = None,\n timeScale: int = None,\n vpDecorations: Boolean = ON,\n vpBackground: Boolean = OFF,\n compass: Boolean = OFF,\n ):\n pass", "def producer_set(self, sample):\r\n sample = ChannelSource._type_remap_and_check(self, self.type, sample)\r\n self.__rlock.acquire()\r\n try:\r\n if sample.timestamp == 0 and self.options & DPROP_OPT_AUTOTIMESTAMP:\r\n sample.timestamp = digitime.time()\r\n self.__sample = copy(sample)\r\n finally:\r\n self.__rlock.release()", "def _set_sample(self, sample, PB_X, t):\n for sensor in PB_X.keys():\n sample.set(sensor, np.array(PB_X[sensor]), t=t+1)", "def set_learning_rate(self, rate):\n self.SGD.set_learning_rate(rate)", "def _setup_sensor ( self ):\n self.spectral = Spectral ( np.array([450, 520, 630, 770., 1550, 2090.] ),\n np.array([ 520, 600, 690, 900., 1750., 2350.] ) )", "def get_sample_rate(self):\n return 1", "def SetSampleParameters(self, data):\n self._SetParameters(data, 'SetSampleParameters')", "def generate_sound(self, data, rate):\n\n # If the data are not in an integer format (if they are e.g. \"float\"), convert\n # them to integer and scale them to a reasonable amplitude\n if not np.issubdtype(data.dtype, np.integer):\n defaultAmp = 2**13\n # Watch out with integer artefacts!\n data = np.int16(data * (defaultAmp / np.max(data)))\n \n self.data = data\n self.rate = rate\n self.source = None\n self._setInfo()", "def sample_rate(self, sr=None):\n return self._sample_rate", "def set_clock_rate(self, *args, **kwargs):\n return _uhd_swig.usrp_source_set_clock_rate(self, *args, **kwargs)", "def set_frequency(self, new_freq):\n self.freq = new_freq\n self.ts_resample()", "def set_speed(self, speed):\n self._kernel.set_speed(float(speed))", "def sample_rate(self):\n return self._sample_rate", "def set_limit_per_second(self, rate_limit_per_second):\n pass", "def setSamplingTime(self, time):\n return self._AWG.setSamplingTime_ns(time)", "def set_max_rate(self, rate=100):\n if not isinstance(rate, int):\n raise TypeError('rate must be an integer')\n if rate <= 0:\n raise ValueError('rate must be positive')\n if self.connected:\n self.producer.send(\"RATE:\"+str(rate))", "def samples(self, value):\n self.data = numpy.array([0]*int(value), numpy.float)\n self.n_samples = int(value)\n self.pos = 0", "def setScheduleRate(self, rate, unit='hz'):\n DPxSetDinSchedRate(rate, unit)", "def set_pwm_freq(self, freq_hz):\n prescaleval = 25000000.0 # 25MHz\n prescaleval /= 4096.0 # 12-bit\n prescaleval /= float(freq_hz)\n prescaleval -= 1.0\n prescale = int(math.floor(prescaleval + 0.5))\n oldmode = self.i2cBus.read_byte_data(self.address, MODE1)\n newmode = (oldmode & 0x7F) | 0x10 # sleep\n self.i2cBus.write_byte_data(self.address, MODE1, newmode) # go to sleep\n self.i2cBus.write_byte_data(self.address, PRESCALE, prescale)\n self.i2cBus.write_byte_data(self.address, MODE1, oldmode)\n time.sleep(0.005)\n self.i2cBus.write_byte_data(self.address, MODE1, oldmode | 0x80)", "def rate_per_unit(self, rate_per_unit):\n\n self._rate_per_unit = rate_per_unit", "def sample_rate(self):\n return self._sample_rate", "def sample_rate(self):\n return self._sample_rate", "def cfg_samp_clk_timing(\r\n self, rate, source=\"\", active_edge=Edge.RISING,\n sample_mode=AcquisitionType.FINITE, samps_per_chan=1000):\r\n cfunc = lib_importer.windll.DAQmxCfgSampClkTiming\r\n if cfunc.argtypes is None:\r\n with cfunc.arglock:\r\n if cfunc.argtypes is None:\r\n cfunc.argtypes = [\r\n lib_importer.task_handle, ctypes_byte_str,\n ctypes.c_double, ctypes.c_int, ctypes.c_int,\n ctypes.c_ulonglong]\r\n\r\n error_code = cfunc(\r\n self._handle, source, rate, active_edge.value, sample_mode.value,\n samps_per_chan)\r\n check_for_error(error_code)", "def set_scan_rate(self, scan_rate_selector):\n raise NotImplementedError", "def sample(self):\n self.dev.write(1, 'S')", "def set_pixel_rate(self, rate=None):\n rates=self.get_available_pixel_rates()\n if rate is None:\n rate=rates[-1]\n else:\n rate=sorted(rates,key=lambda r: abs(r-rate))[0]\n lib.is_PixelClock_dt(self.hcam,6,ctypes.c_uint,int(np.round(rate/1E6)))\n return self.get_pixel_rate()", "def resample(data, resample_rate=16000):\n for sample in data:\n assert \"sample_rate\" in sample\n assert \"wav\" in sample\n sample_rate = sample[\"sample_rate\"]\n waveform = sample[\"wav\"]\n if sample_rate != resample_rate:\n sample[\"sample_rate\"] = resample_rate\n sample[\"wav\"] = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=resample_rate)(waveform)\n yield sample", "def get_samp_rate(self):\n return _uhd_swig.usrp_sink_sptr_get_samp_rate(self)", "def set_custom_speed(self, bytes_per_second):\n self._custom_speed = bytes_per_second", "def setInitialRate(self, rate):\n return self._set(initialRate=rate)", "def rate(self, rate):\n # Get the sign of the rates before calculating\n x_sign = copysign(1, self.x_rate)\n y_sign = copysign(1, self.y_rate)\n self._rate = rate\n # Multiply by the original sign to retain direction\n self.x_rate = x_sign * fabs(rate * cos(self._angle))\n self.y_rate = y_sign * fabs(rate * sin(self._angle))", "def get_samp_rate(self):\n return _uhd_swig.usrp_source_sptr_get_samp_rate(self)", "def input_data_sample_rate(self):\n return self._input_data_sample_rate", "def set_freq_hz(self, freq=None):\n if freq is None:\n freq = 1000000 * self.def_freq\n self.instr.write('F1 ' + str(freq) + ' H')\n time.sleep(self.sleep_time)", "def set_sensor_configuration(self, data_rate, air_pressure_low_pass_filter):\n data_rate = int(data_rate)\n air_pressure_low_pass_filter = int(air_pressure_low_pass_filter)\n\n self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_SET_SENSOR_CONFIGURATION, (data_rate, air_pressure_low_pass_filter), 'B B', '')", "def d_rate(self, d_rate):\n\n self._d_rate = d_rate", "def get_samplerate(self):\n\t\treturn _PM_UPDATE_RATE / self.output_decimation", "def get_samp_rate(self):\n return _uhd_swig.usrp_sink_get_samp_rate(self)", "def read(self):\n beats, interval_ms = self.read_raw()\n if 0 < interval_ms < 2500:\n rate = 60000.0 / interval_ms\n else:\n raise RuntimeError(\"Value out of range or device not connected.\")\n return rate", "def set_accel(self, accel):\n \"\"\" Accel is pixel per second second \"\"\"\n self.accel = accel", "def set_default_refresh_rate(self, rate: int) -> None:\n self._update_thread.update_global_refresh_rate(rate)", "def samples(self, samples):\n\n self._samples = samples", "def __init__(self, data_rate: Union[int, float], processing_window: int = None):\n super().__init__()\n self.data_rate = data_rate\n self.processing_window = processing_window if processing_window else data_rate\n self.raw_data_buffer = []\n self.processed_data_buffer = []\n self._is_one = None", "def a_rate(self, a_rate):\n\n self._a_rate = a_rate", "async def set_sampling_interval(self, interval):\n data = [interval & 0x7f, (interval >> 7) & 0x7f]\n await self._send_sysex(PrivateConstants.SAMPLING_INTERVAL, data)", "async def set_sampling_interval(self, interval):\n data = [interval & 0x7f, (interval >> 7) & 0x7f]\n await self._send_sysex(PrivateConstants.SAMPLING_INTERVAL, data)", "def sample_rate(self):\n return self.query_float('ENTER Current Sample Rate (Sa/s)')", "def set_speed(self, ratio):\n self._speed = ratio", "def setSamplingFrequency(self, sampleFrequency, noSamples, oversample=0,\n segmentIndex=0):\n # TODO: make me more like the functions above\n # at least in terms of what I return\n sampleInterval = 1.0 / sampleFrequency\n duration = noSamples * sampleInterval\n self.setSamplingInterval(sampleInterval, duration, oversample,\n segmentIndex)\n return (self.sampleRate, self.maxSamples)", "def setPSampling(self, period):\n if (period < 30 or period > 300):\n return\n self.pSampling = period", "def set_bandwidth(self, *args, **kwargs):\n return _uhd_swig.usrp_source_sptr_set_bandwidth(self, *args, **kwargs)", "def set_fan_speed(self, value):\n self.parent.fancoolers.set_speed(value)", "def _setInfo(self):\n\n if len(self.data.shape)==1:\n self.numChannels = 1\n self.totalSamples = len(self.data)\n else:\n self.numChannels = self.data.shape[1]\n self.totalSamples = self.data.shape[0]\n \n self.duration = float(self.totalSamples)/self.rate # [sec]\n self.dataType = str(self.data.dtype)", "def get_samp_rate(self):\n return _uhd_swig.usrp_source_get_samp_rate(self)", "def samp_rate(self):\n return self._samp_rate", "def set_speed(self,speed):\n self.speed = speed", "def set_rx_freq(self, rx_freq):\n\t\ttry:\n\t\t\tfloat(rx_freq)\n\t\t\tself._rx_freq = rx_freq\n\t\texcept ValueError:\n\t\t\tsys.stderr.write(\"\\nERROR : %s rx_freq must be a float so it can't be %s !\\n\" % (self._target_id, rx_freq))\n\t\t\tsys.exit(1)", "def change_Focus(self, rate):\n self.speed = int(rate)\n print(\"Setting Focus Rate to: \" + str(rate))", "def set_sampwidth(self, sampwidth):\n self._sampwidth = int(sampwidth)", "def set_frequency(self, f=1e9):\r\n _debug('simq03b_api.set_frequency')\r\n \r\n self.write('SOUR:FREQ:CW '+str(f))", "def set_speed(self,speed):\n self.speed_p = speed", "def sampling_rate(self):\n return self.track.sampling_rate", "def sample_interval(self):\n\n if self.sample_rate != 0:\n return 1.0 / self.sample_rate\n return 0.0", "def update_volt_rate(self):\r\n self.cmd = None\r\n if self.connected:\r\n self.cmd = (\"SYST:COMM:SER:SEND ':SENS:VOLT:NPLC \"\r\n + (str(self.DeltaRate.value()) if self.current_tab\r\n else str(self.dIdVRate.value())) + \"'\")\r\n self.voltmeter_rate = (str(self.DeltaRate.value())\r\n if self.current_tab\r\n else str(self.dIdVRate.value()))\r\n self.I_source.write(self.cmd)" ]
[ "0.78305626", "0.7814654", "0.7746749", "0.75562114", "0.7241588", "0.72325975", "0.72177637", "0.7163793", "0.71605283", "0.69814014", "0.69683295", "0.68970877", "0.6854266", "0.6808533", "0.6751939", "0.6750906", "0.66985595", "0.6612969", "0.6603941", "0.658892", "0.6561272", "0.6561272", "0.6481994", "0.6475641", "0.64749885", "0.6442063", "0.642223", "0.637945", "0.63479286", "0.6326366", "0.62325317", "0.6226898", "0.61679", "0.6138832", "0.61264366", "0.6115653", "0.6115242", "0.61118394", "0.6085416", "0.60782784", "0.6068502", "0.6068317", "0.6063405", "0.6049163", "0.6047574", "0.60130835", "0.60128504", "0.6001366", "0.5999048", "0.5997657", "0.599522", "0.5981439", "0.59716916", "0.5957276", "0.5950393", "0.59463894", "0.5927653", "0.5927653", "0.59243554", "0.5922625", "0.5911959", "0.5906731", "0.5902535", "0.59020144", "0.5895144", "0.5893808", "0.58924484", "0.5892448", "0.5886753", "0.5886655", "0.58863914", "0.5885496", "0.58853626", "0.5867119", "0.5864024", "0.58257776", "0.58091164", "0.5805773", "0.578049", "0.5760283", "0.5756932", "0.5756932", "0.5754757", "0.57527626", "0.57525253", "0.5746871", "0.5743607", "0.5727202", "0.57135075", "0.57075447", "0.5703651", "0.56998086", "0.5693519", "0.5688623", "0.56883377", "0.5684889", "0.5681889", "0.5679528", "0.5663454", "0.5655268" ]
0.77738357
2
Set the frequency of clock used for timestamps
def SetTimeClockSource(self, source): self.TIME_CLOCK_SOURCE = source
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_freq_hz(self, freq=None):\n if freq is None:\n freq = 1000000 * self.def_freq\n self.instr.write('F1 ' + str(freq) + ' H')\n time.sleep(self.sleep_time)", "def frequency(self, freq):\n self.set_frequency(f'{freq}' if self._is_min_max(freq) else f'{freq}HZ')", "def set_frequency(self, f=1e9):\r\n self.write('SOUR:FREQ:CW '+str(f))", "def set_frequency(self, f=1e9):\r\n self.write('SOUR:FREQ:CW '+str(f))", "def set_frequency(self, new_freq):\n self.freq = new_freq\n self.ts_resample()", "def set_clock_rate(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_set_clock_rate(self, *args, **kwargs)", "def set_frequency(self, f=1e9):\r\n _debug('simq03b_api.set_frequency')\r\n \r\n self.write('SOUR:FREQ:CW '+str(f))", "def set_write_cycle_time(self, osc_freq=32000000):\n self.SPItrans([0xac, 0x5d, 0x00, int((0.000025 * osc_freq) / 64)])\n self._wrt_defined = True", "def set_frequency(self, f=1e9):\r\n self.f = f", "def set_frequency(self, f=1e9):\r\n self.write('FREQ '+str(f))", "def set_times(self, p, f):\n self._dot_print_time = p\n self._dot_feed_time = f", "def set_clock_rate(self, *args, **kwargs):\n return _uhd_swig.usrp_source_set_clock_rate(self, *args, **kwargs)", "def clock_speed(self, clock_speed):\n\n self._clock_speed = clock_speed", "def set_clock_rate(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_sptr_set_clock_rate(self, *args, **kwargs)", "def set_frequency(self, f=1e9):\r\n return self._api.set_frequency(f)", "def set_freq_mhz(self, freq=None):\n if freq is None:\n freq = self.def_freq\n return\n self.instr.write('F1 ' + str(freq) + ' MH')", "def setfrequency(self, value):\n self.instrument.write('FREQ {0}'.format(value))", "def set_frequency(self, newval):\n rest_val = str(int(round(newval * 65536.0, 1)))\n return self._setAttr(\"frequency\", rest_val)", "def set_refclock(self, frequency):\n\n self.refclock_freq = frequency\n self.clock_freq = self.freqmult*self.refclock_freq\n if (self.clock_freq < 99.999e6 or self.clock_freq > 500.001e6):\n warn('Clock frequency out of range. Use set_freqmult to set clock \\\n frequency between 100MHz and 500MHz')\n print ('Refclock =', \"{:.2e}\".format(frequency), 'Hz \\nFreqmult =', self.freqmult,\n '\\nClock Frequency =', \"{:.2e}\".format(self.clock_freq), 'Hz')", "def set_frequency(self, pin, frequency):\n raise NotImplementedError", "def set_frequency(miner: Miner, login, frequency):\n #default for S9 is 550\n #\"bitmain-freq\" : \"550\",\n commands = get_changeconfigcommands(getminerfilename(miner), 'bitmain-freq', frequency)\n sendcommands_and_restart(miner, login, commands)", "def frequency(self, frequency: int):\n self._freq = freq", "def UpdateFrequency(self, newfreq):\n\n if self.strategy:\n setattr(self.strategy, managers.UTICK, newfreq)", "def set_clock_config(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_set_clock_config(self, *args, **kwargs)", "def frequency(self):\n return self.reference_clock_speed / 4096 / self.prescale_reg", "def __init__(self, val, frequency=1):\n global _clock_num\n self._frequency = frequency\n self._timescale = self.timescale\n self._period = 1/frequency\n self._set_hticks()\n self.clock_num = _clock_num\n _clock_num += 1\n super(Clock, self).__init__(bool(val))\n ClockList.append(self)", "def valkkafsmanager_set_time_cb(self, t):\n self.signals.set_time.emit(t)", "def set_clock_divide_ratio_frequency(ratio, frequency):\n send_command(0xD5)\n send_command(frequency << 4 | ratio)", "def frequency(self, frequency: int):\n\n self._frequency = frequency", "def set_clock():\n import package\n package.install(\"ntpdate\")\n sudo(\"ntpdate 0.fi.pool.ntp.org 1.fi.pool.ntp.org 2.fi.pool.ntp.org\")", "def set_clock_rate(self, *args, **kwargs):\n return _uhd_swig.usrp_source_sptr_set_clock_rate(self, *args, **kwargs)", "def set_frequency(self, frequency):\n\n if frequency == 1:\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 0, 0)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 1, 0)\n if frequency == 2:\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 0, 1)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 1, 0)\n if frequency == 3:\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 0, 0)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 1, 1)\n if frequency == 4:\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 0, 1)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 1, 1)\n self.__bus.write_byte_data(\n self.__rtcaddress, self.CONTROL, self.__rtcconfig)\n return", "def set_time(self, timestamp):\n\n\t\tdata = pack(\"!bL\", 2, timestamp)\n\t\tself._send_message(\"TIME\", data)", "def set_running_time(self, t):\n util.write_to_file(self.running_time_file, str(int(t)))", "def set_Freq(self,freq):\n super(self.__class__, self).setFreq(self, freq)", "def change_frequency(self, frequency):\n self.frequency = frequency\n self.change_backlog(self.backlog)", "def set_time(self, value: float):\n super().set_time(value)\n self.music.set_time(value)", "def frequency(self, frequency):\n\n self._frequency = frequency", "def set_clock_config(self, *args, **kwargs):\n return _uhd_swig.usrp_source_set_clock_config(self, *args, **kwargs)", "def setFreq(self,newfreq):\n\t\tself.freq = newfreq;", "def set_clock_config(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_sptr_set_clock_config(self, *args, **kwargs)", "def set_system_time(cls, dispatcher, timestamp): # pragma: no cover\n\n if not cls.is_system_openwrt():\n return\n\n dispatcher.update_all_timers(timestamp - time.time())\n with open(os.devnull, \"w\") as dev_null:\n cls.logger.info(\"Setting system time to %s\",\n datetime.utcfromtimestamp(timestamp).strftime('%Y %b %d %H:%M:%S'))\n try:\n call([\"date\", \"+%s\", \"-s\", \"@\" + str(timestamp)],\n stdout=dev_null)\n except OSError:\n cls.logger.exception(\"Failed to set system time\")", "def set_report_freq(self, freq):\n if freq < 0:\n freq = 0\n self.report_freq = freq", "def set_frequency(self):\n\t\t\"\"\"For Frequency Prescalar-0\"\"\"\n\t\tbus.write_byte_data(PCA9530_2C_1_DEFAULT_ADDRESS, PCA9530_2C_1_REG_PSC0, PCA9530_2C_1_PSC0_USERDEFINED)\n\t\t\n\t\t\"\"\"For Frequency Prescalar-1\"\"\"\n\t\tbus.write_byte_data(PCA9530_2C_1_DEFAULT_ADDRESS, PCA9530_2C_1_REG_PSC1, PCA9530_2C_1_PSC1_USERDEFINED)", "def set_ticks_per_second(self, ticks_per_second: Optional[int] = None) -> None:\n if ticks_per_second is not None:\n self.ticks_per_second = ticks_per_second\n self.clock = pygame.time.Clock()\n else:\n self.ticks_per_second = None\n self.clock = None", "def set_time(self, value: float):\n raise NotImplementedError()", "def set_time(self, sec):\n self.set_timed(round(sec * 10.0))", "def set_sg_freq():\n freq = request.params.get(\"freq\", 0, type=float)\n output = request.params.get(\"output\", 1, type=int)\n retval = RP_LIB.rp_GenFreq(output, ctypes.c_float(freq))\n if retval != 0:\n LOG.error(\"Failed to set signal generator frequency. Error code: %s\", ERROR_CODES[retval])", "def _configure_frequencies(self) -> None:\n i = 3\n while i < len(self._lora_frequencies):\n self.set_ch_parameters(i, self._lora_frequencies[i], 0, 5, True)\n i += 1\n self.set_ch_parameters(i, 868800000, 7, 7, True)", "def change_frequency(self):\n if not self.ftext.text():\n return\n frequency = float(self.ftext.text())\n if frequency > 6.0:\n frequency = 6.0\n self.qbpm.change_frequency(frequency)\n self.ftext.setText(str(self.qbpm.frequency))", "def set_pwm_freq(self, freq_hz):\n prescaleval = 25000000.0 # 25MHz\n prescaleval /= 4096.0 # 12-bit\n prescaleval /= float(freq_hz)\n prescaleval -= 1.0\n prescale = int(math.floor(prescaleval + 0.5))\n oldmode = self.i2cBus.read_byte_data(self.address, MODE1)\n newmode = (oldmode & 0x7F) | 0x10 # sleep\n self.i2cBus.write_byte_data(self.address, MODE1, newmode) # go to sleep\n self.i2cBus.write_byte_data(self.address, PRESCALE, prescale)\n self.i2cBus.write_byte_data(self.address, MODE1, oldmode)\n time.sleep(0.005)\n self.i2cBus.write_byte_data(self.address, MODE1, oldmode | 0x80)", "def set_start_time(self, timestamp):\n self.start_day = int(timestamp[8:10])\n hour = int(timestamp[11:13])\n minute = int(timestamp[14:16])\n second = int(timestamp[17:19])\n usecond = float(int(timestamp[21:])) / 1000000\n self.start_time = float(hour * 3600 + minute * 60 + second) + usecond", "def set_time(self, time):\n self._time = time", "def set_clock_config(self, *args, **kwargs):\n return _uhd_swig.usrp_source_sptr_set_clock_config(self, *args, **kwargs)", "def change_stopwatch(timez):\r\n\r\n m = timez // 60\r\n s2 = timez % 60\r\n s1 = 0 if s2 < 10 else \"\"\r\n now = f\"{m}:{s1}{s2}\"\r\n stopwatch.configure(text=now)", "def tick(self):\n self.times.append(timeit.default_timer())", "def frequency(self):\n return self.reference_clock_speed / 4096 / self.prescale_reg", "def configure_freq(self, channel):\n if 0 < channel <= 3:\n self.write(\":CONF:FREQ (@{0})\".format(channel))", "def Config_Sample_Clock(self,samples_per_sec=1000.0,num_samps_per_ch=1000):\n self.samples_per_sec = samples_per_sec\n self.num_samples_per_ch = num_samps_per_ch", "def settimepattern(self, pattern):\n self._apachetimepattern = pattern", "def start_clock(self):\n pass", "def plot_frequency(self):\n canvas = xboa.common.make_root_canvas(\"frequency vs time\")\n canvas.Draw()\n freq_list = [freq for freq in self.freq_list]\n hist, graph = xboa.common.make_root_graph(\"frequency vs time\",\n self.time_list, \"time [ns]\",\n freq_list, \"f [GHz]\")\n hist.Draw()\n graph.Draw(\"sameL\")\n fit = ROOT.TF1(\"fit\", \"pol4\", 0, 20*1e6)\n fit.FixParameter(0, freq_list[0])\n graph.Fit(fit)\n canvas.Update()", "def set_output_frequency(self, frequency):\n self.output_frequency = frequency", "def freq_minutes(self):\n return 5", "def set_ga_timestamp(self, time: int):\n for cl in self:\n cl.tga = time", "def setTimepoint(self, tp):\n\t\tpass", "def set_time(self, set_time):\n\n self._set_time = set_time", "def set_frequency(self):\n def f():\n freq = float(self.freq_edit.get())\n duty = float(self.duty_edit.get())\n if duty == 0:\n duty = 1\n if duty > 1:\n duty = duty / 100\n self.parent.update_frequency(freq, duty, self.model.upper())\n return f", "def set_freq(self, freq):\n\n return self._service.exposed_set_freq(freq)", "def set_rate(self, rate = 1e4, count = 1000, clk_source = 'ao/SampleClock', finite = True):\n if finite:\n ctr_mode = mx.int32(mx.DAQmx_Val_FiniteSamps)\n else:\n ctr_mode = mx.int32(mx.DAQmx_Val_ContSamps)\n ctr_rate = mx.float64(rate) #override python type\n ctr_count = mx.uInt64(int(count))\n self._clock_source = clk_source\n \n self.stop() #make sure task not running, \n # CfgSampClkTiming ( const char source[], float64 rate, int32 activeEdge, \n # int32 sampleMode, uInt64 sampsPerChan );\n # default clock source is subsystem acquisition clock\n try: \n self.task.CfgSampClkTiming(clk_source, ctr_rate, mx.DAQmx_Val_Rising, ctr_mode, ctr_count) \n #exact rate depends on hardware timer properties, may be slightly different from requested rate\n ctr_rate.value = 0\n self.task.GetSampClkRate(mx.byref(ctr_rate));\n self._rate = ctr_rate.value\n self._count = count\n #self._mode = 'buffered'\n except mx.DAQError as err:\n self.error(err)\n self._rate = 0", "def set_frequency(self):\r\n def move_synth(delta_f_synth):\r\n sign_delta_f_synth = int(delta_f_synth/abs(delta_f_synth))\r\n stepsize_Hz = int(10)\r\n num_steps = int(abs(delta_f_synth)/stepsize_Hz)\r\n remainder_Hz = round(abs(delta_f_synth)%stepsize_Hz,1)\r\n self.synth.set_incr(stepsize_Hz, 'Hz')\r\n for nn in range(num_steps): # slowly move the synth by delta_f_synth in stepsize steps\r\n self.synth.walk(sign_delta_f_synth)\r\n time.sleep(0.1)\r\n self.synth.set_incr(remainder_Hz, 'Hz')\r\n self.synth.walk(sign_delta_f_synth)\r\n time.sleep(0.1)\r\n \r\n def get_delta_f_synth():\r\n #get latest f_rep,f_0\r\n self.get_frequency() \r\n #calculate required f_rep to get desired PA_freq. switches n and frep in above eq.\r\n f_rep_goal = (self.setfrequency - self.sign_lock * self.f_lock - self.sign_0 * self.f_0) / self.n\r\n # print 'f_rep_goal = %.0f Hz'%f_rep_goal\r\n # lock uses 3rd harmonic so synth must be set to *3\r\n delta_f_synth = (f_rep_goal - self.f_rep)*3 \r\n delta_f_synth = round(delta_f_synth,1)\r\n # print 'delta_f_synth = %.1f Hz'%delta_f_synth\r\n return delta_f_synth\r\n \r\n iteration = 0\r\n delta_f_synth = get_delta_f_synth()\r\n while abs(delta_f_synth) > self.synth_tol:\r\n move_synth(delta_f_synth)\r\n delta_f_synth = get_delta_f_synth()\r\n iteration += 1\r\n if iteration > self.max_iteration:\r\n # print 'REACHED MAX ITERATION: delta_f_synth = %.1f'%delta_f_synth\r\n break", "def _set_timestamp(self):\n d = datetime.now()\n self._time_stamp = \"{:>2} {} {} {:>2}:{:>02}\".format(\n d.day, MONTH_ABBREV[d.month], d.year, d.hour, d.minute)", "def updateTimeStamp(self, ts):\n self.ga_timestamp = ts", "def setTime(self,time):\n self.time = time", "def SetTimestampLogging(new_timestamp=True):\n global _log_time\n _log_time = new_timestamp", "def tick(self):\n\n if self.seconds != 59:\n self.seconds += 1\n else:\n self.seconds = 0\n\n if self.minutes != 59:\n self.minutes += 1\n else:\n self.minutes = 0\n\n if self.hours != 23:\n self.hours += 1\n else:\n self.hours = 0", "def set_clock(self, value):\n \"\"\"while not self.sem.locked():\n sleep(0.1)\"\"\"\n self.clock = value", "def tick(self):\r\n new_time = time.strftime('%H:%M:%S')\r\n if new_time != self.time:\r\n self.time = new_time\r\n self.config(text=self.time)\r\n self.after(200, self.tick)", "def update_timing(self, sample_rate, samples_per_chan):\n self.sample_rate = sample_rate\n self.samples_per_channel = samples_per_chan\n self.task.timing.cfg_samp_clk_timing(sample_rate, samps_per_chan=samples_per_chan)", "def test_fixed_freq(self):\n plot_index = pd.date_range(start=\"2000-1-1\", freq=\"D\", periods=10000)\n tl = formatter.TimestampLocator(plot_index, 'MS')\n xticks = tl._process(0, 30*3)\n assert len(xticks) == 3\n\n tl = formatter.TimestampLocator(plot_index, 'MS')\n xticks = tl._process(0, 30*6)\n assert len(xticks) == 6\n\n tl = formatter.TimestampLocator(plot_index, 'W')\n xticks = tl._process(0, 10*7)\n assert len(xticks) == 10\n\n tl = formatter.TimestampLocator(plot_index, 'AS')\n xticks = tl._process(0, 10 * 365)\n assert len(xticks) == 10", "def freq(self, freq=None):\n if freq is not None:\n self.cmd(':AC:SETB:FREQ %0.2f\\n' % freq)\n self.freq_param = freq\n\n return freq", "def __set_time_data(self, tdata):\n assert tdata.shape[-1] == self._nt\n self._in_time = tdata\n self._in_freq = None", "def set_time_override(override_time=datetime.datetime.utcnow()):\r\n utcnow.override_time = override_time", "def timing(self, timing):\n\n self._timing = timing", "def time_of_day(self, value):\n self.time_of_day_value = value", "def __init__(self, clock=proctime):\n self._clock = clock", "def frequency_trigger(self, frequency_trigger):\n\n self._frequency_trigger = frequency_trigger", "def update_timestamp(self):\n self._timestamp = datetime.datetime.now()", "def set_startTime(self, startTime):\n self.startTime = mktime(startTime)", "def write_timed(\n self, data: AnyWritableBuf, freq: int | Timer, /, *, mode: int = NORMAL\n ) -> None:", "def time_interval_prop(self, time_step, nsteps):\n world.time = TimeAxis(0.0, int(nsteps), float(time_step))\n print(\"Setting time\")", "def set_start_time():\n __start = current_time_milli()", "def cpu_freq(self):\n self.monitoring_object['cpu_freq'] = \\\n psutil.cpu_freq(percpu=True)", "def __init__(self):\n super(FakeTime, self).__init__()\n # Note that time.time() and divmod return floating point values.\n timestamp, fraction_of_second = divmod(time.time(), 1)\n self._microseconds = int(fraction_of_second * 1000000)\n self._number_of_seconds = int(timestamp)\n self.precision = definitions.PRECISION_1_MICROSECOND", "def setIntegrationTime(self, timeInMs):\n self.sendCommand(cmdBytes = b'\\x02',\n payloadBytes = pack('<L',int(timeInMs*self.timeScale)))", "def dt_freq(self):\n return DateTimeDefault.register(pandas.Series.dt.freq)(self)", "def setIntegrationTime(self,t_int):\n \n acc_len = self._adcClock*1e6*t_int/(1024.0) \n if acc_len > 65536:\n raise(\"Integration time is too long:\",t_int)\n self._t_int = t_int\n \n #acc_len = 2048 # hardwire for now to known working condition\n period = acc_len*16384\n \n self.regwrite(\"cs/vacc/acc_len\",acc_len-1)\n self.regwrite(\"period1\",period-2)\n self._write_info({'IntegrationTime': t_int})", "def setCurTime(self):\n\t\tself.config.SET_CUT_TIME = True", "def timefreq(self, period, frequency):\n\t\ttf = self.timefactor(period)\n\t\treturn tf * self.unitsize(frequency.unit())", "def startClock(self, day, hour, rate):\n\n if self.clock != None:\n self.clock.stop()\n\n self.clock.setTime(hour, rate)\n self.clock.start()" ]
[ "0.7045347", "0.6800048", "0.66994375", "0.66994375", "0.6680092", "0.6667348", "0.6635364", "0.6589021", "0.6579017", "0.6578679", "0.657218", "0.6483262", "0.6451029", "0.6443337", "0.6431313", "0.6403015", "0.634181", "0.63294667", "0.6321863", "0.6309367", "0.62990135", "0.6291961", "0.6280398", "0.6260426", "0.6250416", "0.624154", "0.6238868", "0.62345695", "0.62336636", "0.62234575", "0.62164205", "0.6214212", "0.6163228", "0.6125764", "0.61231613", "0.6122445", "0.6120334", "0.6107059", "0.6096817", "0.609486", "0.6085022", "0.6071637", "0.6035065", "0.60144264", "0.60074675", "0.60036236", "0.5994565", "0.59837854", "0.5980078", "0.59787613", "0.5974475", "0.59468436", "0.592125", "0.58940387", "0.5893895", "0.58775574", "0.5859357", "0.58564425", "0.5843337", "0.5837648", "0.5835912", "0.5811815", "0.5809396", "0.5795412", "0.5789623", "0.57841927", "0.57830065", "0.5773937", "0.57727426", "0.5761326", "0.5751501", "0.5745348", "0.57420576", "0.5739448", "0.5730022", "0.57286096", "0.57259446", "0.57122445", "0.570768", "0.5696985", "0.56861496", "0.56775427", "0.5676349", "0.567195", "0.5671435", "0.5659156", "0.56575716", "0.56529474", "0.56524175", "0.5649809", "0.5644335", "0.5640249", "0.5640235", "0.5637146", "0.5633936", "0.5628562", "0.5627487", "0.56257844", "0.5614012", "0.5605039" ]
0.607602
41
Set the range of Yaxis for display
def SetYAxisRange(self, lower, upper): self.Y_AXIS_RANGE = (lower, upper)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __draw_yaxis(self):\n self.ax.set_ylim(self.ylims)\n # set y ticks\n yticks = [*range(0, self.ymax, 5)]\n fs = self.settings.rcParams[\"axes.labelsize\"] if self.settings.otherParams[\n \"ylabel.fontsize\"] is None else self.settings.otherParams[\"ylabel.fontsize\"]\n color = self.settings.rcParams[\"axes.labelcolor\"] if self.settings.otherParams[\n \"ylabel.color\"] is None else self.settings.otherParams[\"ylabel.color\"]\n self.ax.set_yticks(yticks)\n self.ax.set_ylabel(self.yaxis_label, fontsize=fs, color=color)\n self.ax.yaxis.set_label_coords(\n *self.settings.otherParams[\"ylabel.position\"])\n self.ax.invert_yaxis()", "def format_y_axis(self, y_tick, y_limits):\n self._fig.update_layout(\n yaxis=dict(\n range=y_limits,\n dtick=y_tick,\n ),\n )", "def reverse_y():\n plt.ylim(plt.ylim()[::-1])", "def setScaleY(self,starty,endy):\r\n if starty == endy:# Prevent /0 errors when scaling\r\n endy += 0.1\r\n self.scaleLock.acquire()\r\n self.scaley = [starty,endy]\r\n self.scaleLock.release()", "def adjust_ylimits(self, ylim1, ylim2):\n self.axplot.set_ylim(ylim1, ylim2)\n self.fig.canvas.draw()\n return", "def yaxis ( self ) :\n return self.__yaxis", "def yaxis ( self ) :\n return self.__yaxis", "def set_axis_y(self, new_axis_point):\r\n self.__y_axis = new_axis_point", "def set_y(self, y: float):\n self.y = y", "def _verticalLimit_changed(self):\n self.masterContainer.range2d.y_range.high = self.verticalLimit", "def setYUnits(self, units): \n self.__y_units__ = units", "def yaxis(self,label,units):\n if units != \"\": label = label + \" (\" + units + \")\"\n self.subplot.set_ylabel(label)\n pass", "def setY(self, value):\n self.components[1] = value", "def setY(self, value):\n self.components[1] = value", "def setY(self, y):\r\n\t\tself._y=y", "def set_y(self, y):\n self.scene.set_y_loc(y)\n self.redraw()", "def set_y(self, value: int) -> None:\n assert -self.__max_value <= value and value <= self.__max_value\n\n should_sync = self.__y != value\n self.__y = value\n if should_sync:\n self.__sync_y()", "def setRange(self, x_range, y_range):\n self._visualiser._plt.setRange(xRange=x_range, yRange=y_range)", "def setY(self, y):\n self.y = y\n pass", "def setY(self, *args):\n return _libsbml.BoundingBox_setY(self, *args)", "def set_y(self, y):\n self._y = y", "def set_y(self, new_y):\r\n self.y = new_y", "def add_yaxis(self, name, y_range=None, location=\"right\", label=None, color=None):\n\n if y_range is not None and not isinstance(y_range, Range1d):\n y_min, y_max = y_range\n y_range = Range1d(start=y_min, end=y_max)\n elif y_range is None:\n y_range = DataRange1d()\n\n self.figure.extra_y_ranges[name] = y_range\n\n axis = LinearAxis(y_range_name=name, axis_label=label)\n self.figure.add_layout(axis, location)\n\n if color is not None:\n self.color_axis(name, color)", "def setY(self, value):\n self.position[1] = value", "def getYAxis(self, x_range, style):\n\n function = self.function\n style = dict(style)\n style.update(self.style)\n errors = style.get(\"errors\", False)\n log_scale = style.get(\"y_axis_log_scale\", False)\n num_samples = style.get(\"number_of_samples\", 250)\n\n # Find the range of y values over the specified x range.\n lo, hi = hep.hist.function.getRange(function, x_range, num_samples)\n # Make an unbinned axis with this range.\n y_axis = hep.hist.Axis(function.axis.type, range=(lo, hi))\n\n # Set the axis label.\n if \"y_axis_label\" in style:\n y_axis.label = style[\"y_axis_label\"]\n if hasattr(function, \"units\"):\n y_axis.units = function.units\n \n return y_axis", "def ylim(self, bottom=None, top=None):\r\n for ax in self._subaxes:\r\n ax.set_ylim(bottom, top)\r\n self.figure.canvas.draw()", "def ylim(bottom=None, top=None):\n impl.ylim(**locals())", "def set_range(self, **rangekwargs):\n\n if 'xrange' in rangekwargs.keys(): \n xrange = rangekwargs['xrange']\n else: \n xrange = [-50.0, 50.0] # (default)\n\n if 'yrange' in rangekwargs.keys(): \n yrange = rangekwargs['yrange']\n else: \n yrange = [0.0, 1.25 * self.hist_max]\n\n self.sub.set_xlim(xrange) \n self.sub.set_ylim(yrange) \n\n self.sub.set_xlabel(r\"$\\mathtt{d_{LOS}}$ (Mpc/h)\", fontsize=20)\n\n return None", "def y_formatter_cb(self, ax):\n # y_vals should be the y-location of the labels.\n labels = getattr( self, 'labels', [] )\n labels = list(labels); #labels.reverse()\n y_vals = numpy.arange(.5,len(labels)+.5,1)\n\n # Locations should be fixed.\n fl = FixedLocator( y_vals )\n # Make the formatter for the y-axis\n ff = FixedFormatter( labels )\n ax.yaxis.set_major_formatter( ff )\n ax.yaxis.set_major_locator( fl )", "def Y(self, value):\n self._Y = value", "def secondaryYaxis(low,high,first,step,length,name,direction,x,y,log=0):\n if log:\n dislin.yaxlg(low,high,first,step,length,name,direction,x,y)\n else:\n dislin.yaxis(low,high,first,step,length,name,direction,x,y)", "def SetY(self, y):\r\n\r\n self._y = y", "def yaxis(self):\n return self._yaxis", "def yaxis(self,label,units):\r\n if units != \"\": label = label + \" (\" + units + \")\"\r\n self.ybox.set_text(r\"$%s$\" % (label))\r\n pass", "def setY(self, *args):\n return _libsbml.Point_setY(self, *args)", "def get_axis_y(self):\r\n return self.__y_axis", "def get_ylim(self):\n if isinstance(self._frame, root.TH1F):\n return (self._frame.GetMinimum(), self._frame.GetMaximum())\n else:\n return (self._frame.GetYaxis().GetXmin(), self._frame.GetYaxis().GetXmax())", "def getYAxis(self, x_range, style):\n\n style = dict(style)\n style.update(self.style)\n\n histogram = self.histogram\n axis = histogram.axis\n overflows = style[\"y_axis_overflows\"]\n errors = style[\"errors\"]\n log_scale = style[\"y_axis_log_scale\"]\n bin_width = self._getNormalBinWidth(style)\n\n # Compute the range of y values corresponding to the x range.\n bin_numbers = hep.hist.AxesIterator(\n histogram.axes, range=(x_range, ), overflows=overflows)\n lo, hi = getBinRange(\n histogram, bin_numbers, errors, log_scale, bin_width)\n # Expand the range a bit.\n lo -= 0.01 * abs(lo)\n hi += 0.01 * abs(hi)\n\n # Make an unbinned axis with this range.\n axis = hep.hist.Axis(histogram.bin_type, range=(lo, hi))\n\n # Set the axis label.\n if \"y_axis_label\" in style:\n axis.label = style[\"y_axis_label\"]\n elif bin_width is not None:\n bin_units = getattr(histogram, \"units\", \"entries\")\n axis_units = getattr(histogram.axis, \"units\", \"\")\n lo, hi = histogram.axis.range\n axis.label = \"%s / %s %s\" \\\n % (bin_units, formatNumber(bin_width), axis_units)\n else:\n bin_units = getattr(histogram, \"units\", \"entries\")\n axis.label = \"%s / bin\" % bin_units\n \n return axis", "def setRange(self, x_range, y_range):\n pass", "def set_axis2_limits(self, start, end):\n if start > end:\n raise ValueError(\"Start point over end for this view.\")\n\n self.axis2_limits = start, end", "def auto_ylim(self, xlim=None, yscale='linear'):\r\n electrodes = [col for col in self.data.columns\r\n if col in ELECTRODES]\r\n if xlim is None:\r\n data = self.data.ix[:, electrodes]\r\n else:\r\n indices = ((self.data.index >= xlim[0]) &\r\n (self.data.index <= xlim[1]))\r\n data = self.data.ix[indices, electrodes]\r\n min_data = data.min().min()\r\n max_data = data.max().max()\r\n abs_max = max(abs(min_data), max_data)\r\n if yscale == 'linear' or yscale == 'symlog':\r\n if min_data >= 0:\r\n ylim = 0, max_data\r\n else:\r\n ylim = -abs_max, abs_max\r\n elif yscale == 'log':\r\n if min_data > 0:\r\n ylim = min_data, max_data\r\n else:\r\n pseudo_zero = abs_max * 10 ** -5\r\n ylim = pseudo_zero, abs_max\r\n else:\r\n raise ValueError('Wrong value to yscale: {}'.format(yscale))\r\n return ylim", "def y(self, value):\n self.data_validator(\"y\", value)\n self.__y = value", "def set_yscale(self, value):\n if value in [\"linear\", \"lin\"]:\n self._pad.SetLogy(0)\n self._logy = False\n\n elif value in [\"log\", \"logy\"]:\n bottom, top = self.get_ylim()\n if top <= 0:\n warnings.warn(\n \"Current frame has no positive values, and therefore cannot \"\n \"be log-scaled. Try running ax.set_ylim() first.\"\n )\n elif bottom <= 0:\n # Arbitrarily set bottom to 0.1 (or 0.1*top if top < 0.1)\n # so that the frame can be displayed\n if top <= 0.1:\n self.set_ylim(bottom=0.1 * top)\n else:\n self.set_ylim(bottom=0.1)\n\n self._pad.cd()\n self._pad.SetLogy(1)\n self._pad.Modified()\n self._logy = True", "def yscale(self, value='linear'):\r\n for ax in self._subaxes:\r\n ax.set_yscale(value)\r\n self.figure.canvas.draw()", "def getYLimit(self):\n return self.axes.get_ylim()", "def yax(self):\n return self.__yax", "def y(self, value):\n if not isinstance(value, int):\n raise TypeError(\"y must be an integer\")\n if value < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = value", "def y(self, value):\n self.validate_input(y=value)\n self.__y = value", "def y(self, value):\n if isinstance(value, int) is False:\n raise TypeError(\"y must be an integer\")\n if value < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = value", "def set_ylim(self, bottom=None, top=None):\n if top is None and np.iterable(bottom):\n bottom, top = bottom\n\n if bottom is None or top is None:\n old_bottom, old_top = self.get_ylim()\n if bottom is None:\n bottom = old_bottom\n if top is None:\n top = old_top\n\n if bottom == top:\n warnings.warn(\n \"Attempting to set identical bottom == top == {} y-axis limits\".format(\n bottom\n ),\n stacklevel=2,\n )\n\n if bottom > top:\n raise ValueError(\"Axis limits must be in increasing order\")\n\n if top <= 0 and self._logy:\n warnings.warn(\n \"Attempting to set non-positive top ylim on a log-scaled axis.\\n\"\n \"Invalid limit will be ignored.\",\n stacklevel=2,\n )\n top = self.get_ylim()[1]\n\n elif bottom <= 0 and self._logy:\n warnings.warn(\n \"Attempting to set non-positive bottom ylim on a log-scaled axis.\\n\"\n \"Invalid limit will be ignored.\",\n stacklevel=2,\n )\n bottom = self.get_ylim()[0]\n\n if isinstance(self._frame, root.TH1F):\n self._frame.SetMinimum(bottom)\n self._frame.SetMaximum(top)\n else:\n self._frame.GetYaxis().SetRangeUser(bottom, top)\n\n self._pad.Modified() # Draw the updated axes\n\n return (bottom, top)", "def hide_y_ticks():\n ax = plt.gca()\n ax.axes.get_yaxis().set_ticks([])", "def setYLabel(self, label):\n self.__y_label__ = label", "def set_y(self, state_value):\n val = state_value / self.space_subdivisions + self.unit\n epsilon = 1e-6\n if not self.unit <= val <= 1.0 - self.unit + epsilon:\n raise AttributeError(\"Value out of bounds\")\n self.pos_y = val", "def minor_yvals(self):\n raise NotImplementedError(\"Derived class must implement this.\")", "def autoHistogramRange(self):\n self.vb.enableAutoRange(self.vb.XAxis, True)\n self.vb.enableAutoRange(self.vb.YAxis, True)\n # self.range = None\n # self.updateRange()\n # self.vb.setMouseEnabled(False, False)\n\n # def updateRange(self):\n # self.vb.autoRange()\n # if self.range is not None:\n # self.vb.setYRange(*self.range)\n # vr = self.vb.viewRect()\n\n # self.region.setBounds([vr.top(), vr.bottom()])", "def setY(self, y):\n self.position.setY(y)", "def setYPos(self,newYPos):\n self.yPos=newYPos", "def initPlotY(self):\n\n self.plotFineY = [np.array([]) for i in range(len(self.plotFineX))]", "def set_visualization_range(self, start: int, end: int):\n self.__range = (start, end)", "def y(self, value):\n if not (0 < value < SCREEN_HEIGHT - self.height):\n self.dir_y = -self.dir_y\n self._y += abs(self._y - value) * self.dir_y", "def setY(ax1: Union[object, List], ax2: Union[object, List]):\n if type(ax1) is list:\n print(\"PlotHelpers: cannot use list as source to set Y axis\")\n return\n ax2 = _ax_tolist(ax2)\n # if type(ax2) is not list:\n # ax2 = [ax2]\n refy = ax1.get_ylim()\n for ax in ax2:\n ax.set_ylim(refy)", "def setYOffset(self, *args):\n return _libsbml.Point_setYOffset(self, *args)", "def format_y_axis(self, text=None, positionx=None, positiony=None, color=None, fontsize=None):\n if text is not None:\n self.xaxis_label = text\n\n x, y = self.settings.otherParams[\"ylabel.position\"]\n if positionx is not None:\n x = positionx\n if positiony is not None:\n y = positiony\n self.settings.otherParams[\"ylabel.position\"] = (x, y)\n\n if color is not None:\n self.settings.otherParams[\"ylabel.color\"] = color\n\n if fontsize is not None:\n self.settings.otherParams[\"ylabel.fontsize\"] = fontsize", "def set_yunits(self, units, include_brackets):\n if include_brackets:\n plt.ylabel(\n \"y (\" + self.yunits_from_units(units=units) + \")\", fontsize=self.ysize\n )\n else:\n plt.ylabel(self.yunits_from_units(units=units), fontsize=self.ysize)", "def yscale(value):\n impl.yscale(**locals())", "def set_delta_y(self, *args: str, delta_y: Sequence[float] | float = 0.0) -> None:\n self.set_delta('y', *args, delta=delta_y)", "def minor_yvals(self):\n return list(range(self.nMinorRows))", "def setHistogramRange(self, mn, mx, padding=0.1):\n self.vb.enableAutoRange(self.vb.YAxis, False)\n if self.orientation == 'horizontal':\n self.vb.setXRange(mn, mx, padding)\n elif self.orientation == 'vertical':\n self.vb.setYrange(mn, mx, padding)\n # mn -= d*padding\n # mx += d*padding\n # self.range = [mn,mx]\n # self.updateRange()\n # self.vb.setMouseEnabled(False, True)\n # self.region.setBounds([mn,mx])", "def CalibrateY(self):\r\n print(\"Calibrating axis Y, please do not move sensor...\")\r\n buff = []\r\n for t in range(20):\r\n while self.Get_AxisDataAvailable_Value()[1] == 0:\r\n time.sleep(0.0001)\r\n buff.append(self.Get_RawOutY_Value())\r\n self.meanY = numpy.mean(buff) \r\n self.maxY = max(buff)\r\n self.minY = min(buff)\r\n print(\"Done: (min={0};mean={1};max={2})\".format(self.minY, self.meanY, self.maxY))", "def y(self, y):\n if type(y) is not int:\n raise TypeError(\"y must be an integer\")\n if y < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = y", "def y(self, y):\n if type(y) is not int:\n raise TypeError(\"y must be an integer\")\n if y < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = y", "def format_axes():\n\n plt.axes(frameon=False)\n plt.axvline(0, PlotParameter.y_axis_bot_lim, PlotParameter.y_axis_top_lim, color='k')\n plt.tick_params(which='both', bottom='off', top='off', right='off', labelbottom='off')\n plt.xlim(0, PlotParameter.x_axis_right_lim)\n plt.ylim(PlotParameter.y_axis_bot_lim, PlotParameter.y_axis_top_lim)\n plt.ylabel(PlotParameter.y_axis_label)", "def make_YAxis(yaxis_title):\n yaxis = graph_objs.YAxis(title=yaxis_title,\n showticklabels=True,\n autorange=True,\n ticklen=4,\n showline=True,\n zeroline=False,\n showgrid=True,\n mirror=False)\n return yaxis", "def set_y(self,Y):\n self.posY = Y", "def _cast_y_axis_extrema(y_axis_extrema):\r\n try:\r\n y_axis_extrema = float(y_axis_extrema)\r\n except ValueError:\r\n if y_axis_extrema == 'auto':\r\n y_axis_extrema = None\r\n else:\r\n raise ValueError(\"The min and max y-axis values must be numbers \"\r\n \"or 'auto'. Couldn't handle the value %r.\" %\r\n y_axis_extrema)\r\n return y_axis_extrema", "def _dualy_overrides(self):\n arg = self._dualy_arg\n if arg is None:\n return\n scale = self.yaxis._scale\n olim = self.get_ylim()\n if (scale, *olim) == self._dualy_cache:\n return\n child = self._alty_child\n funcscale = axistools.Scale(\n 'function', arg, invert=True, parent_scale=scale,\n )\n child.yaxis._scale = funcscale\n child._update_transScale()\n funcscale.set_default_locators_and_formatters(\n child.yaxis, only_if_default=True)\n nlim = list(map(funcscale.functions[1], np.array(olim)))\n if np.sign(np.diff(olim)) != np.sign(np.diff(nlim)):\n nlim = nlim[::-1]\n child.set_ylim(nlim, emit=False)\n self._dualy_cache = (scale, *olim)", "def y(self, y):\n if type(y) is not int:\n raise TypeError(\"y must be an integer\")\n elif y < 0:\n raise ValueError(\"y must be >= 0\")\n else:\n self.__y = y", "def yvals(self):\n raise NotImplementedError(\"Derived class must implement this.\")", "def setRange(self, x_range, y_range):\n self._pipe.send(\"range,%f,%f,%f,%f\" % (x_range + y_range))", "def compute_axes(self):\n mini, maxi = self._get_extremes()\n self.y_axis.min = mini\n self.y_axis.max = maxi\n self.y_axis._max_min()\n\n if not None in [s.xvalues for s in self]:\n mini, maxi = self._get_extremes('xvalues')\n self.x_axis.min = mini\n self.x_axis.max = maxi\n self.x_axis._max_min()", "def draw_plot(yscale='linear'):\n plt.yscale(yscale)\n plt.xticks(list(range(0, 101, 5)))\n plt.xlabel('percentile [%]')\n plt.grid(True)\n plt.ylabel('operation time [ns]')\n plt.legend()\n plt.show()", "def y_size(self):\n pass", "def _set_y_size(self):\n self._level_gen.size = (self._level_gen.size[X],\n self._level_size_y_spinbox.value(),\n self._level_gen.size[Z])\n self._refresh_view()", "def set_ycenter(self, ycenter):\n self.delta_y = self.delta_x\n self.ymin = ycenter - (self.rows / 2) * self.delta_y\n self.ymax = self.ymin + (self.rows -1) * self.delta_y", "def yminmax ( self ) :\n return self.yvar.minmax()", "def ylim(self):\r\n lim = [ax.get_ylim() for ax in self._subaxes]\r\n if lim == []:\r\n lim = None\r\n return lim", "def y(self, number):\n self.validate_int(\"y\", number)\n if number < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = number", "def pos_y(self, *args, **kwargs) -> Any:\n pass", "def ybar(self):\n return np.squeeze(self._ybar)", "def align_yaxis(axes):\n axes = np.array(axes)\n extrema = np.array([ax.get_ylim() for ax in axes])\n\n # reset for divide by zero issues\n for i in range(len(extrema)):\n if np.isclose(extrema[i, 0], 0.0):\n extrema[i, 0] = -1\n if np.isclose(extrema[i, 1], 0.0):\n extrema[i, 1] = 1\n\n # upper and lower limits\n lowers = extrema[:, 0].min()\n uppers = extrema[:, 1].max()\n\n extrema[:,0] = lowers\n extrema[:,1] = uppers\n # bump by 10% for a margin\n extrema[i, 0] *= 1.05\n extrema[i, 1] *= 1.05\n \n # set axes limits\n [axes[i].set_ylim(*extrema[i]) for i in range(len(extrema))]", "def __set_y__(self,y):\n\n # Input vaidation\n try:\n y = int(y)\n except:\n raise ValueError('H Bridge direction is not valid')\n \n if(y != 0 and y != 1 and y != -1):\n raise ValueError('H Bridge direction is not valid')\n \n self.direction['y'] = y\n self.HBridges['y'].SetDirection(y)", "def y0(self, level):\n resolution = self.resolution(level)\n return np.arange(0, (self.y_extent + resolution - 1) // resolution, 64)", "def _cast_y_axis_extrema(y_axis_extrema):\n try:\n y_axis_extrema = float(y_axis_extrema)\n except ValueError:\n if y_axis_extrema == 'auto':\n y_axis_extrema = None\n else:\n raise ValueError(\"The min and max y-axis values must be numbers \"\n \"or 'auto'. Couldn't handle the value %r.\" %\n y_axis_extrema)\n return y_axis_extrema", "def setColorBarRange(start=1,end=254):\n dislin.colran(start,end)", "def y_offsets(self, **kwargs):\n reserved = ['minmax', '0max']\n special = None\n y = self.y(**kwargs)\n offset = self.attr('offset', None)\n if offset is not None:\n o = offset[1] if isinstance(offset, list) else offset\n if isinstance(o, str):\n if o in reserved:\n special = o\n o = 0\n else:\n o = self._fractionToFloat(o)\n y = y + o\n muloffset = self.attr('muloffset', None)\n if muloffset is not None:\n o = muloffset[1] if isinstance(muloffset, list) else muloffset\n if isinstance(o, str):\n if o.replace(' ', '') in reserved:\n special = o\n o = 1\n else:\n o = self._fractionToFloat(o)\n y = y * o\n if special is not None:\n m, M = np.min(y), np.max(y)\n if special == 'minmax':\n y = (y - m) / (M - m)\n elif special == '0max':\n y = y / M\n return y", "def test_y_range():\n for _ in range(100):\n val1 = random.random() - 3.0*random.random()\n val2 = random.random() + 2.0*random.random()\n lower_bound = min(val1, val2)\n upper_bound = max(val1, val2)\n rnn = RNN(layers_info=[[\"lstm\", 20], [\"gru\", 5], [\"lstm\", 25]],\n hidden_activations=\"relu\", y_range=(lower_bound, upper_bound),\n initialiser=\"xavier\", input_dim=22)\n random_data = torch.randn((10, 11, 22))\n out = rnn.forward(random_data)\n out = out.reshape(1, -1).squeeze()\n assert torch.sum(out > lower_bound).item() == 25*10, \"lower {} vs. {} \".format(lower_bound, out)\n assert torch.sum(out < upper_bound).item() == 25*10, \"upper {} vs. {} \".format(upper_bound, out)", "def normalize_wrt_y(self):\n\n x_min = min(self.x)\n y_min = min(self.y)\n y_max = max(self.y)\n\n y_range = y_max - y_min\n\n x = np.array(self.x)\n y = np.array(self.y)\n x -= x_min\n y -= y_min\n x = x / float(y_range)\n y = y / float(y_range)\n\n self.x = x.tolist()\n self.y = y.tolist()", "def hogg_lim_and_label():\n plt.xlim(-20., 1020.)\n plt.xlabel(\"time (d)\")\n plt.ylim(-20., 20.)\n plt.ylabel(\"radial velocity (m\\,s$^{-1}$)\")\n return None", "def _nice_axes(self, ax):\n ax.ticklabel_format(axis='y', style='sci', scilimits=(-2, 3))\n pstyle.set_xaxis_label(ax)\n try:\n pstyle.set_xLimits(self.twiss_df.SEQUENCE, ax)\n except pstyle.ArgumentError:\n pass\n if self._ip_pos is not None and len(self._ip_pos) > 0:\n pstyle.show_ir(self._ip_pos, ax)", "def tick_values(self, vmin, vmax):\n raise NotImplementedError('Derived must override')" ]
[ "0.7353007", "0.72912204", "0.7056452", "0.7000773", "0.69802177", "0.69701964", "0.69701964", "0.6961962", "0.6933012", "0.6850009", "0.6794499", "0.67578864", "0.67419934", "0.67419934", "0.67362744", "0.67272717", "0.67264444", "0.666699", "0.66562355", "0.6655635", "0.66232735", "0.6617262", "0.6616948", "0.66149676", "0.6612967", "0.65858835", "0.65660286", "0.6541642", "0.65309274", "0.6526762", "0.65254015", "0.65231204", "0.6499158", "0.6499055", "0.6499035", "0.6475339", "0.6470053", "0.64590734", "0.64379877", "0.64346904", "0.6430373", "0.6430181", "0.6425013", "0.6423413", "0.63951725", "0.63570786", "0.63552344", "0.63388085", "0.6325586", "0.6296702", "0.6292324", "0.6289036", "0.62756556", "0.62741333", "0.627079", "0.6263932", "0.62562025", "0.6235975", "0.62212545", "0.61731255", "0.6148588", "0.6145458", "0.6103607", "0.60846096", "0.6066814", "0.6054738", "0.6035597", "0.6032744", "0.60207796", "0.60032946", "0.60032946", "0.6002621", "0.6001829", "0.5976707", "0.5959246", "0.59537774", "0.59534776", "0.5931503", "0.59311736", "0.592823", "0.59142226", "0.59133", "0.59103066", "0.5897789", "0.5889277", "0.5876273", "0.5874428", "0.5870176", "0.5868165", "0.58681333", "0.5866958", "0.58649033", "0.5860736", "0.58599705", "0.5859811", "0.5855913", "0.5848337", "0.5842721", "0.58419335", "0.5829963" ]
0.8293696
0
Plot the graph for corresponding sensor data
def _PlotGraph(self, event): self._rcvLock.acquire() for j in event.data[0].keys(): data = event.data[0][j] #print data line = [] for k in data.keys(): if k in COLORS.keys(): c = COLORS[k] else: c = 'black' line.append(plot.PolyLine(data[k], colour=c, width=1, legend="Node %d"%(k,))) # To draw markers: default colour = black, size = 2 # shapes = 'circle', 'cross', 'square', 'dot', 'plus' #marker = plot.PolyMarker(event.data[1], marker='triangle') # set up text, axis and draw if j == ERRORPLOT: t = "Synchronization Error" xa = "Time [s]" ya = "Error [ms]" elif j == TEMPPLOT: t = "Temperature Index" xa = "Time [s]" ya = "Index" elif j == SKEWPLOT: t = "Frequency Error" xa = "Time [s]" ya = "Frequency Error [ppm]" gc = plot.PlotGraphics(line, t, xa, ya) # Draw graphs for each plot self.plotter[j].Draw(gc, xAxis=(self._x_lower, self._x_upper), yAxis=(float(self._y_lower[j]), float(self._y_upper[j]))) self._rcvLock.release()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_data(self):", "async def plot_device_data(self, axes, name) -> []:\n pass", "def plot_graph(self) -> None:", "def visualize_signal(self):\n plt.figure()\n plt.title('Accelerometer Signal')\n plt.plot(range(len(self.data)), self.data[1])", "def plot(self):\n pass", "def plot_multifig(data,NO_SENSORS,dataSelection):\n \n # Axis options\n yAxisLimits = [[0,1024],[-3,3]]\n \n # Plots a seperate graph for each sensor\n for i in range(0,NO_SENSORS):\n plt.figure(i + 1)\n plt.title('Sensor ' + str(i + 1))\n plt.plot(data[:,(3 + (4 * i))],data[:,(0 + (4 * i))],label='X Axis')\n plt.plot(data[:,(3 + (4 * i))],data[:,(1 + (4 * i))],label='Y Axis')\n plt.plot(data[:,(3 + (4 * i))],data[:,(2 + (4 * i))],label='Z Axis')\n plt.ylim(yAxisLimits[dataSelection][0],yAxisLimits[dataSelection][1])\n plt.xlabel('Time/s')\n plt.ylabel('Acceleration/g')\n plt.legend()\n plt.show()", "def plotOfSingleSensor(self,index,plot='all'): #name='LFS01_S1'\n\t\tp1=_plot.plot(yLabel='V',xLabel='time [ms]',\n\t\t\t\t\t subtitle=self.sensorNames[index],title=self.title,\n\t\t\t\t\t shotno=self.shotno)\n\t\tif plot=='all' or plot=='raw':\n\t\t\tp1.addTrace(yData=self.solDataRaw[index],xData=self.time*1000,\n\t\t\t\t\t\tyLegendLabel=self.sensorNames[index]+' Raw')\n\t\tif plot=='all' or plot=='fit': \n\t\t\tp1.addTrace(yData=self.solDataFit[index],xData=self.time*1000,\n\t\t\t\t\t\tyLegendLabel=self.sensorNames[index]+' Fit') \n\t\tif plot=='all' or plot=='smoothed' or plot=='smoothedOnly': \n\t\t\tp1.addTrace(yData=self.solData[index],xData=self.time*1000,\n\t\t\t\t\t\tyLegendLabel=self.sensorNames[index]+' Without Offset') \n\t\treturn p1", "def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(X[:,0])\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(X[:,1])\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(X[:,2])\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(X[:,3])\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(X[:,4])\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(X[:,5])\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##########################################################################################\n # Plotting the Fourier Transform of the signals\n\n freq = np.fft.fftfreq(len(y), 1*60*60)\n\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(freq, np.abs(np.fft.fft(y)))\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(freq, np.abs(np.fft.fft(X[:,0])))\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(freq, np.abs(np.fft.fft(X[:,1])))\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(freq, np.abs(np.fft.fft(X[:,2])))\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(freq, np.abs(np.fft.fft(X[:,3])))\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(freq, np.abs(np.fft.fft(X[:,4])))\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(freq, np.abs(np.fft.fft(X[:,5])))\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##################################################################################################\n # Print correlation matrix\n\n df = pd.DataFrame(np.c_[y, X])\n df.columns = ['Avg Global PSP (vent/cor) [W/m^2]','Avg Zenith Angle [degrees]','Avg Azimuth Angle [degrees]','Avg Tower Dry Bulb Temp [deg C]','Avg Tower RH [%]','Avg Total Cloud Cover [%]','Avg Avg Wind Speed @ 6ft [m/s]']\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr(), fignum=f.number)\n plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=20)\n plt.yticks(range(df.shape[1]), df.columns, fontsize=14)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16);\n plt.show()", "def plot(self, *args, **kwargs):\n pass", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def plot_data(self):\n # plot every log image\n for log_img in self.log_img_map.itervalues():\n log_img.plot()", "def __init__(self, data):\n self.root = Tk()\n \n titleFont = (\"Arial\", 16, \"bold\")\n \n \n # sensor table\n self.sensorTable = Frame(master=self.root)\n self.sensorNameColumn = LabelFrame(self.sensorTable, text=\"Name\", font=titleFont)\n self.sensorNameColumn.grid(row = 0, column = 0)\n self.sensorValueColumn = LabelFrame(self.sensorTable, text=\"Value\", font=titleFont)\n self.sensorValueColumn.grid(row = 0, column = 1)\n self.sensorButtonColumn = LabelFrame(self.sensorTable, text=\"Button\", font=titleFont)\n self.sensorButtonColumn.grid(row = 0, column = 2)\n self.sensorRows = {} # do we really need to keep track of this?\n self.sensorTable.pack()\n \n self.figures = {}\n self.axes = {}\n self.lines = {}\n \n self.data = data\n \n self.sensorDisplayButtonCallback = self.openGraph\n \n quit_button = Button(self.root, text=\"Quit\", command=self.quit)\n quit_button.pack(side=BOTTOM)\n \n plt.ion()", "def plot(self):\n\t\tself.plotOfLoopVoltage()", "def plot_temp():\r\n work_book = xlrd.open_workbook(\"Temp.xls\")\r\n sheet1 = work_book.sheet_by_name(\"Temperature\")\r\n time_x = sheet1.col_values(1)\r\n temp_y = sheet1.col_values(0)\r\n plt.title(\"Time\")\r\n plt.xlabel(\"Time\")\r\n plt.ylabel(\"Temperature\")\r\n plt.plot(time_x, temp_y)\r\n plt.show()", "def plot_singlefig(data,NO_SENSORS,dataSelection):\n \n # Axis options\n yAxisLimits = [[0,1024],[-3,3]]\n \n # Plots graphs for each sensor on 1 figure\n plt.figure(1)\n for i in range(0,NO_SENSORS):\n # The figure is seperated into subplots using the parameter. 231 means 2 rows, 3 columns, subplot 1\n plt.subplot(231 + i)\n plt.title('Sensor ' + str(i + 1))\n plt.plot(data[:,(3 + (4 * i))],data[:,(0 + (4 * i))],label='X Axis')\n plt.plot(data[:,(3 + (4 * i))],data[:,(1 + (4 * i))],label='Y Axis')\n plt.plot(data[:,(3 + (4 * i))],data[:,(2 + (4 * i))],label='Z Axis')\n plt.ylim(yAxisLimits[dataSelection][0],yAxisLimits[dataSelection][1])\n plt.xlabel('Time/s')\n plt.ylabel('Acceleration/g')\n plt.legend()\n plt.show()", "def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()", "def plot(self, x_values=None, y_values=None,\n x_experiment_values=None, y_experiment_values=None) -> None:\n\n # TODO: make the plot at the beginning to say something, like \"Press RUN\"\n self.temperature_subplot.cla()\n self.temperature_subplot.set_title('Temperature plot')\n self.temperature_subplot.set_xlabel(\"Time [s]\")\n self.temperature_subplot.set_ylabel(\"Temperature [°C]\")\n\n # In both cases of plotting we have to make sure we plot the\n # data with the same dimensions, therefore we first determine\n # she shortest array and plot just that data\n\n if x_values is not None and y_values is not None:\n min_length = min(len(x_values), len(y_values))\n self.temperature_subplot.plot(x_values[:min_length],\n y_values[:min_length],\n label='Calculated Data',\n color=\"blue\")\n\n if x_experiment_values is not None and y_experiment_values is not None:\n min_length = min(len(x_experiment_values), len(y_experiment_values))\n self.temperature_subplot.plot(x_experiment_values[:min_length],\n y_experiment_values[:min_length],\n label='Experiment Data',\n color=\"orange\")\n\n self.temperature_subplot.legend()\n self.draw()", "def _plot(self):\r\n fig = plt.figure()\r\n\r\n # Take out second component of intensity if needed\r\n # if self._vna.isTwoComponents():\r\n # intensitySimplified = []\r\n # for i in range(len(self._intensity)):\r\n # tempSet = []\r\n # for j in range(len(self._intensity[i])):\r\n # if (j%2) == 0:\r\n # tempSet.append(self._intensity[i][j])\r\n # intensitySimplified.append(tempSet)\r\n # for i in range(len(self._frequency)):\r\n # plt.plot(self._frequency[i],intensitySimplified[i],label=('%sv' % self._voltages[i][0]))\r\n # else:\r\n for i in range(len(self._frequency)):\r\n plt.plot(self._frequency[i],self._intensity[i],label=('%sv' % self._voltages[i][0]))\r\n plt.legend(loc='upper left')\r\n fig.suptitle('Intensity-Frequency with non-Constant Voltage', fontsize=18)\r\n plt.xlabel('Frequency (Hz)', fontsize=18)\r\n plt.ylabel('Intensity (dBm)', fontsize=16)\r\n\r\n # Save plot\r\n self._saveFig()", "def draw_sensors(self,renderer):\n pass", "def graph(df):\n df.plot()\n plt.show()", "def graph():\n # Try to get params request\n params = extract_variables(['start_time', 'end_time', 'sensor_id'], request)\n # Fetch data from database\n results = query_climate_range(**params)\n\n # Turn it in to lists which can be graphed\n dates = []\n humids = []\n temps = []\n pressures = []\n for result in results:\n dates.append(datetime.datetime.fromtimestamp(result['time']))\n humids.append(result['humid'])\n temps.append(result['temp'])\n pressures.append(result['pressure'])\n\n # Graph it\n fig = Figure()\n # First y axis (temp and humid)\n axis = fig.add_subplot(1, 1, 1)\n # Plot humidity and temp on the same scale\n axis.plot_date(dates, humids, '-', color=COLORS['blue'])\n axis.plot_date(dates, temps, '-', color=COLORS['red'])\n axis.xaxis.set_major_formatter(DateFormatter('%d/%m/%y %H:%M'))\n axis.set_ylabel('Humidity in % & Temps in C')\n axis.set_xlabel('Time')\n # Second y axis (pressure)\n axis_pressure = axis.twinx()\n # Plot pressure\n axis_pressure.plot_date(dates, pressures, '-', color=COLORS['green'])\n axis_pressure.xaxis.set_major_formatter(DateFormatter('%d/%m/%y %H:%M'))\n axis_pressure.set_ylabel('Pressure in mbar')\n # Configure the figure\n fig.autofmt_xdate()\n fig.legend(['Humidity', 'Temperature', 'Pressure'], loc='lower right')\n fig.set_tight_layout(True)\n canvas = FigureCanvas(fig)\n # Save output\n png_output = BytesIO()\n canvas.print_png(png_output)\n\n # Create the response and send it\n response = make_response(png_output.getvalue())\n response.headers['Content-Type'] = 'image/png'\n return response", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def plotOfOneChannel(self, i=0):\n\t\tp1=_plot.plot(xLabel='time [ms]',yLabel=r'a.u.',title=self.title,\n\t\t\t\t\t shotno=[self.shotno],subtitle=self.sensorNames[i]);\n\t\t\n\t\t# smoothed data\n\t\tp1.addTrace(yData=self.data[i],xData=self.time*1000,\n\t\t\t\t\tyLegendLabel=self.sensorNames[i]) \n\t\t\t\n\t\treturn p1", "def plot(self):\n\t\tself.plotOfSpect()", "def plot(self):\n x = np.arange(5)\n # labels = ['temp', 'humi', 'mais', 'o2', 'co2']\n plt.bar(x - 0.35/2, self.data, 0.35, label='actual')\n plt.bar(x + 0.35/2, self.desired_values, 0.35, label='desired')\n plt.ylim(-5, 80)\n plt.legend()\n\n plt.draw()\n plt.pause(0.000001)\n plt.clf()", "def plot_energies(self):\n plt.plot(self.energies[0], self.energies[1])\n plt.xlabel('Time (s)')\n plt.ylabel('Energy (J)')\n plt.show()", "def plot_humidity(timestamps,timelabels,humidities):\n\n #into x,y data and 2nd column as the x-axis tick\n TOOLS = \"pan,wheel_zoom,box_zoom,reset,save,box_select,lasso_select\"\n p = plt.figure(title=\"Christchurch Humidity\", tools=TOOLS,\n x_axis_label='Record Time', y_axis_label='Humidity(%)')\n\n # add a line renderer with legend and line thickness\n\n p.xaxis.ticker = timestamps\n p.xaxis.major_label_overrides=(dict(zip(timestamps,timelabels)))\n p.xaxis.major_label_orientation = pi/2\n p.xaxis.ticker.desired_num_ticks = 1\n\n p.line(timestamps,humidities, legend_label=\"Humidity\", line_width=2)\n\n from bokeh.resources import CDN\n from bokeh.embed import components\n script, div = components(p)\n \n return get_bokeh_plot_head(), script, div", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def plot_graph(self, dataset):\n data = self.data\n diagrams = []\n\n for time_stamp, data_tag in dataset:\n data_x, data_y = [], []\n for item in data:\n data_x.append(item[time_stamp])\n data_y.append(item[data_tag])\n diagrams.append(Scatter(x=data_x, y=data_y, mode='markers'))\n\n layout = plotly.graph_objs.Layout(yaxis=dict(autorange='reversed'))\n data = Data(diagrams)\n fig = plotly.graph_objs.Figure(data=data, layout=layout)\n plotly.plotly.plot(fig, filename='exo-line')", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='red')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='gray')\n self.canvas.draw()", "def display_plot(self, parameter):\n values = list(self.dataframe[parameter])\n #Begining and ending date of the dataset\n beg = self.beg\n end = self.end\n #Settings of the plot\n if parameter == 'temperature':\n #Differienciate the color of the points according to temeprature rule\n import matplotlib as mpl\n cmap = mpl.colors.ListedColormap(['blue', 'yellow', 'orange', 'red'])\n c_norm = mpl.colors.BoundaryNorm(boundaries=[-30,0,15,25,45], ncolors=4)\n plt.scatter(time_range, values, s=0.3, c=values, cmap=cmap, norm=c_norm)\n plt.colorbar()\n else:\n plt.plot(time_range, values, linewidth=0.2)\n plt.xlabel('from {} to {}'.format(beg, end))\n plt.ylabel(parameter)\n plt.title('Weather historical data')\n plt.grid(True)", "def plot_observed(self):\n \n fig = plt.figure(figsize=(15,5))\n plt.subplot(1,3,1)\n for k in self.observed_data.keys():\n plt.plot(self.observed_data[k][0], self.observed_data[k][1], 'bx')\n plt.xlabel(\"X\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n\n fig = plt.figure(figsize=(16,4))\n \n # Plot of time vs X\n plt.subplot(1,3,2)\n for k in self.observed_data.keys(): \n plt.plot(k*np.ones(self.observed_data[k].shape[1]), self.observed_data[k][0], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"X\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n\n # Plot of time vs Y\n plt.subplot(1,3,3)\n for k in self.observed_data.keys():\n plt.plot(k*np.ones(self.observed_data[k].shape[1]), self.observed_data[k][1], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n plt.show();", "def plot(self, data_frame):\n self.axes.plot(data_frame, 'o-')\n self.axes.set_ylim(0.0, 200.0)\n self.fig.autofmt_xdate()\n self.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.canvas.draw()", "def plot(self):\n\t\tself.plotOfTF().plot()", "def _plot_robot(self):\n try:\n x = 200\n y = 200\n self.ax1.plot(x, y, marker='o', markersize=10, linestyle='None')\n except Exception as err:\n rospy.loginfo(err)", "def plot(self):\n plot_spectrum(self.data, self.fig, self.ax_e, self.ax_s, title = \"Solar spectrum\")", "def plot(self):\n # Get data\n #print(self.file_name)\n fig, ax = plb.subplots(1,1,figsize=(18,20))\n for key,value in self.testTrend.items():\n x = np.arange(len(self.data_array))\n y = np.asarray(value)\n plb.plot(x,y, label=key)\n ax.scatter(x, y)\n for i in range(0, len(value)):\n ax.annotate(str(i), (x[i], y[i]))\n # Title\n plb.title(self.file_name)\n # Legend\n plb.legend(bbox_to_anchor=(.05, 1), loc='best', borderaxespad=0.)\n # x ticks\n plb.xticks(np.arange(min(x), max(x) + 1, 2.0))\n #plb.ylim(-250, 1)\n # Show image\n plb.show()", "def plot(self):\n\n # initialize outside the loop to avoid memory leak\n\n plot_a = None\n\n # initial plotting scales\n vmin = 0\n vmax = 0\n pmin = 0\n pmax = 0\n\n sr = self.dio.get_properties(self.channel)['samples_per_second']\n\n if self.control.verbose:\n print 'sample rate: ', sr\n\n # initial time info\n display_lag = 60\n b = self.dio.get_bounds(self.channel)\n\n if self.control.verbose:\n print 'data bounds: ', b\n\n if self.control.start:\n dtst0 = dateutil.parser.parse(self.control.start)\n st0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n st0 = int(st0 * sr)\n else:\n st0 = int(b[0])\n\n if self.control.end:\n dtst0 = dateutil.parser.parse(self.control.end)\n et0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n et0 = int(et0 * sr)\n else:\n et0 = int(b[1])\n\n if self.control.verbose:\n\n print 'start sample st0: ', st0\n print 'end sample et0: ', et0\n\n blocks = self.control.bins * self.control.frames\n\n samples_per_stripe = self.control.num_fft * \\\n self.control.integration * self.control.decimation\n total_samples = blocks * samples_per_stripe\n\n if total_samples > (et0 - st0):\n print 'Insufficient samples for %d samples per stripe and %d blocks between %ld and %ld' % (samples_per_stripe, blocks, st0, et0)\n return\n\n stripe_stride = (et0 - st0) / blocks\n\n bin_stride = stripe_stride / self.control.bins\n\n start_sample = st0\n\n print 'first ', start_sample\n\n # get metadata\n # this could be done better to ensure we catch frequency or sample rate\n # changes\n mdt = self.dio.read_metadata(st0, et0, self.channel)\n try:\n md = mdt[mdt.keys()[0]]\n cfreq = md['center_frequencies'].ravel()[self.sub_channel]\n except (IndexError, KeyError):\n cfreq = 0.0\n\n if self.control.verbose:\n print 'processing info : ', self.control.frames, self.control.bins, samples_per_stripe, bin_stride\n\n for p in numpy.arange(self.control.frames):\n sti_psd_data = numpy.zeros(\n [self.control.num_fft, self.control.bins], numpy.float)\n sti_times = numpy.zeros([self.control.bins], numpy.complex128)\n\n for b in numpy.arange(self.control.bins):\n\n if self.control.verbose:\n print 'read vector :', self.channel, start_sample, samples_per_stripe\n\n d_vec = self.dio.read_vector(\n start_sample, samples_per_stripe, self.channel)\n data = d_vec[:, self.sub_channel]\n\n if self.control.decimation > 1:\n data = scipy.signal.decimate(data, self.control.decimation)\n sample_freq = sr / self.control.decimation\n else:\n sample_freq = sr\n\n if self.control.mean:\n detrend_fn = matplotlib.mlab.detrend_mean\n else:\n detrend_fn = matplotlib.mlab.detrend_none\n\n try:\n psd_data, freq_axis = matplotlib.mlab.psd(\n data, NFFT=self.control.num_fft, Fs=float(sample_freq), detrend=detrend_fn, scale_by_freq=False)\n except:\n traceback.print_exc(file=sys.stdout)\n\n sti_psd_data[:, b] = numpy.real(\n 10.0 * numpy.log10(numpy.abs(psd_data) + 1E-12))\n\n sti_times[b] = start_sample / sr\n\n start_sample += stripe_stride\n\n # Now Plot the Data\n ax = self.subplots[p]\n\n # determine image x-y extent\n extent = (\n 0,\n self.control.bins,\n numpy.min(freq_axis) / 1e3,\n numpy.max(freq_axis) / 1e3,\n )\n\n # determine image color extent in log scale units\n Pss = sti_psd_data\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n if self.control.zaxis:\n vmin = int(string.split(self.control.zaxis, ':')[0])\n vmax = int(string.split(self.control.zaxis, ':')[1])\n else:\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n im = ax.imshow(sti_psd_data, cmap='jet', origin='lower', extent=extent,\n interpolation='nearest', vmin=vmin, vmax=vmax, aspect='auto')\n\n ax.set_ylabel('f (kHz)', fontsize=8)\n\n # plot dates\n\n tick_spacing = numpy.arange(\n self.control.bins / 8, self.control.bins, self.control.bins / 8)\n ax.set_xticks(tick_spacing)\n tick_labels = []\n\n for s in tick_spacing:\n tick_time = sti_times[s]\n\n if tick_time == 0:\n tick_string = ''\n else:\n gm_tick_time = time.gmtime(numpy.real(tick_time))\n tick_string = '%02d:%02d:%02d' % (\n gm_tick_time[3], gm_tick_time[4], gm_tick_time[5])\n tick_labels.append(tick_string)\n\n ax.set_xticklabels(tick_labels)\n\n # set the font sizes\n tl = ax.get_xticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n tl = ax.get_yticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n print 'last ', start_sample\n\n # create a time stamp\n start_time = st0 / sr\n srt_time = time.gmtime(start_time)\n sub_second = int(round((start_time - int(start_time)) * 100))\n\n timestamp = \"%d-%02d-%02d %02d:%02d:%02d.%02d UT\" % (srt_time[0], srt_time[\n 1], srt_time[2], srt_time[3], srt_time[4], srt_time[5], sub_second)\n\n self.f.suptitle('%s %s %4.2f MHz (%s)' % (\n self.control.title, timestamp, cfreq / 1E6, self.control.path), fontsize=10)\n\n # ax.legend(fontsize=8)\n ax.set_xlabel('time (UTC)', fontsize=8)\n\n # fixup ticks\n\n tl = ax.get_xticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n tl = ax.get_yticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n\n self.gridspec.update()\n\n self.f.tight_layout()\n\n self.f.subplots_adjust(top=0.95, right=0.88)\n cax = self.f.add_axes([0.9, 0.12, 0.02, 0.80])\n self.f.colorbar(im, cax=cax)\n if self.control.outname:\n fname, ext = os.path.splitext(self.control.outname)\n if ext == '':\n ext = '.png'\n print \"Save plot as {}\".format(fname+ext)\n matplotlib.pyplot.savefig(fname+ext)\n if self.control.appear or not self.control.outname:\n print \"Show plot\"\n matplotlib.pyplot.show()", "def plot(axes, axis, values, c='chartreuse'):\n a = axes[axis]\n a.set_xlabel('time (s)')\n x = np.array(range(len(values))) / 1000\n dim = 'x' if axis == 0 else 'y' if axis == 1 else 'z'\n a.set_title('-'.join([dim, 'acceleration']))\n a.plot(x, values / 1000, c=c)", "def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r", "def plotXY(xName,xDataRaw,yName, yDataRaw):\n scanFileHolder = getScanFileHolderXY(xName,xDataRaw,yName, yDataRaw) \n scanFileHolder.plot(xName, yName)\n return scanFileHolder", "def plot_data(self):\n if hasattr(self,'data'):\n plt.scatter(*self.data.T)\n plt.show()\n else:\n raise Exception('No 2d data of the instance has been loaded')", "def plot(self):\n\t\tself.plotOfXray().plot()", "def peek(self, **kwargs):\n\n plt.figure()\n axes = plt.gca()\n data_lab=self.meta['OBS-FREQ'][0:2] + ' ' + self.meta['OBS-FREQ'][2:5]\n axes.plot(self.data.index,self.data,label=data_lab)\n axes.set_yscale(\"log\")\n axes.set_ylim(1e-4,1)\n axes.set_title('Nobeyama Radioheliograph')\n axes.set_xlabel('Start time: ' + self.data.index[0].strftime(TIME_FORMAT))\n axes.set_ylabel('Correlation')\n axes.legend()\n plt.show()", "def plot():\n pass", "def plot_values(self, plot_widget, data, x_range, y_range):\r\n\r\n self.widget = plot_widget\r\n self.data = data\r\n self.x_range = x_range\r\n self.y_range = y_range\r\n\r\n self.widget.setXRange(0, self.x_range)\r\n self.widget.setYRange(0, self.y_range)\r\n self.widget.showGrid(x=True, y=True)\r\n self.widget.addLegend()\r\n # self.widget.setLabel('left', 'Value', units='y')\r\n self.widget.setLabel('bottom', 'Frames')\r\n self.widget.clear()\r\n\r\n for item in self.data.items():\r\n line = self.widget.plot(np.insert(item[1], 0, item[1][0]), pen=self.get_color(item[0]),\r\n symbolPen=self.get_color(item[0]), symbol='o', symbolSize=1, name=item[0])\r\n self.marker(self.widget)", "def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)", "def plot(accessToken, collection):\n \n plt.xlabel('Date/Time')\n plt.ylabel('Sensor Value')\n plt.title(\"Sensors Monitor\")\n \n # to save png files\n i = 0\n \n # set interactive mode on\n plt.ion()\n \n # set figure to full screen\n mng = plt.get_current_fig_manager()\n mng.full_screen_toggle()\n\n while True:\n jsondata = getJsonData(accessToken)\n if jsondata:\n #limit date string\n jsondata[DATE] = jsondata[DATE][8:13]\n appendJsonData(jsondata, collection)\n \n # clear figure\n plt.clf()\n \n # limit samples to be viewed\n if (len(collection[DATE]) > SAMPLE_SIZE_LIMIT):\n plt.xticks(range(SAMPLE_SIZE_LIMIT), collection[DATE][-SAMPLE_SIZE_LIMIT:])\n plt.plot(collection[SENSOR1][-SAMPLE_SIZE_LIMIT:], 'k', label='sensor 1')\n plt.plot(collection[SENSOR2][-SAMPLE_SIZE_LIMIT:], 'b', label='sensor 2')\n plt.plot(collection[SENSOR3][-SAMPLE_SIZE_LIMIT:], 'g', label='sensor 3')\n plt.plot(collection[SENSOR4][-SAMPLE_SIZE_LIMIT:], 'r', label='sensor 4')\n else:\n plt.xticks(range(len(collection[DATE])), collection[DATE])\n plt.plot(collection[SENSOR1], 'k', label='sensor 1')\n plt.plot(collection[SENSOR2], 'b', label='sensor 2')\n plt.plot(collection[SENSOR3], 'g', label='sensor 3')\n plt.plot(collection[SENSOR4], 'r', label='sensor 4')\n \n plt.legend(loc='upper left')\n plt.show()\n \n # Take a screenshot on Gnome desktop\n if os.environ.get(\"XDG_MENU_PREFIX\").startswith(\"gnome\"):\n os.system(\"gnome-screenshot -f screenshot{}.png\".format(i))\n i = i+1\n \n #plt.pause(1)\n plt.pause(60*60) # one hour\n else:\n print(str(datetime.datetime.now()) + \" Empty json data\")", "def plot(self, *args, **kwargs):\n raise NotImplementedError", "def plot_data(heart_filt, pace_filt):\n\n plt.figure(1)\n plt.plot(heart_filt, pace_filt)\n plt.show()", "def _plot_rawdata(self):\n fig, ax = plt.subplots(1, 1)\n ax.imshow(self.data, origin='top', extent=(0., 360., -90., 90.))\n ax.set_title('Driscoll Healy Grid')\n ax.set_xlabel('longitude')\n ax.set_ylabel('latitude')\n fig.tight_layout(pad=0.5)\n return fig,ax", "def plot_acc(acc_watch, x_acc_df):\n\tfig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)\n\tplt.xlabel('Time (ms)')\n\tplt.ylabel('acc. value')\n\tax1.set_title('Acceleration Data from ECG')\n\tax2.set_title('Acceleration Data from Watch')\n\n\t# ecg data\n\tax1.plot(x_acc_df['timestamp'], x_acc_df['x_acc'] )\n\t# ppg data\n\tax2.plot(acc_watch['timestamp'], acc_watch['v0'])\n\n\tplt.show()", "def plot(self, show=True):\n xs, ys = zip(*[(float(ix)/self.sample_rate, val)\n for ix, val in enumerate(self.samples)])\n plt.plot(xs, ys)\n if show:\n plt.show()", "def make_plot(x,y):", "def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()", "def plot(self):\n\n import matplotlib.pyplot as plt\n plt.matshow(self.event_roll.T, cmap=plt.cm.gray, interpolation='nearest', aspect='auto')\n plt.show()", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial1\"][1]) + \" at \" + str(self.values[\"Trial1\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream1.write(dict(x=[], y=[]))\n\t\t\tself.stream2.write(dict(x=[], y=[]))\n\t\t\tself.stream3.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream1.write(dict(x=self.values[\"Trial1\"][0], y=self.values[\"Trial1\"][1]))#, trace=Bar)\n\t\t\tself.stream2.write(dict(x=self.values[\"Trial2\"][0], y=self.values[\"Trial2\"][1]))\n\t\t\tself.stream3.write(dict(x=self.values[\"Trial3\"][0], y=self.values[\"Trial3\"][1]))", "def plot_motion(x, y):\n plt.xlabel(\"X Position (m)\")\n plt.ylabel(\"Y Position (m)\")\n plt.plot(x, y)\n plt.show()", "def plot_lines(self):\n self.plot(3)", "def plot(self,plot='smoothedOnly',includeBP=True):\n\n\t\tif plot=='all':\n\t\t\tfor j in range(0,20):\n\t\t\t\t\tp1=self.plotOfSingleSensor(j,'all').plot()\n \n\t\telse:\n\t\t\tfor j in range(0,8):\n\t\t\t\tif j==0:\n\t\t\t\t\tp1=self.plotOfSingleSensor(j,plot) \n\t\t\t\t\tp3=self.plotOfSingleSensor(12+j,plot) \n\t\t\t\t\tif j<4:\n\t\t\t\t\t\tp2=self.plotOfSingleSensor(8+j,plot) \n\t\t\t\telse:\n\t\t\t\t\tp1.mergePlots(self.plotOfSingleSensor(j,plot))\n\t\t\t\t\tp3.mergePlots(self.plotOfSingleSensor(12+j,plot))\n\t\t\t\t\tif j<4:\n\t\t\t\t\t\tp2.mergePlots(self.plotOfSingleSensor(8+j,plot)) \t\n\t\t\tp1.subtitle='Section 1 SOL Sensors'\t\n\t\t\tp2.subtitle='Section 4 SOL Sensors'\t\n\t\t\tp3.subtitle='Section 8 SOL Sensors'\t\t\t\n\t\t\treturn _plot.subPlot([p1,p2,p3],plot=True)", "def __plot(name, x, y):\n import matplotlib.pyplot as plt\n\n plt.plot(x, y)\n plt.xlabel('elements')\n plt.ylabel('time (seconds)')\n plt.savefig(\"{}\".format(name))", "def plot_raw_eeg_data(time_data, eeg_data):\n plt.plot(time_data, eeg_data, 'g-')\n plt.xlabel(\"time [secs]\")\n plt.ylabel(\"raw EEG values\")\n plt.title(\"EEG Data\")\n # plt.xlim(min(x_data) - 1, max(x_data) + 1)\n # plt.ylim(min(y_data) - 1, max(y_data) + 1)\n plt.show()", "def graphplot(self):\n if self.binned:\n self.line.set_ydata(self.fft_bins_y)\n else:\n self.line.set_ydata(self.spec_y)\n self.line2.set_ydata(self.wave_y)\n self.ax1.draw_artist(self.ax1.patch)\n self.ax2.draw_artist(self.ax2.patch)\n self.ax1.draw_artist(self.line)\n self.ax2.draw_artist(self.line2)\n self.fig.canvas.update()\n self.fig.canvas.flush_events()", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial\"][1]) + \" at \" + str(self.values[\"Trial\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream.write(dict(x=self.values[\"Trial\"][0], y=self.values[\"Trial\"][1]))", "def plot(self, show=True, save=True):\n x = numpy.vstack([therm.X for therm in self._thermals])\n plt.scatter(x[:,1] / 1000.0, x[:,0] / 1000.0, s=5, edgecolors='none')\n if save:\n f = plt.gcf()\n f.savefig('thermal_field.png', format='png', dpi=1000)\n if show:\n plt.show()", "def data_graph():\n station_reference = request.args.get(\"stationReference\")\n station_name = request.args.get(\"stationName\")\n station_name = station_name.replace(\" \",\"+\")\n\n if station_name is not None:\n # station_data = station_data.replace(\" \", \"+\")\n station = station_data.loc[station_data.stationName == station_name]\n else:\n station = station_data.loc[station_data.stationReference == station_reference]\n result_station = station.iloc[0]\n\n # Get optional parameters\n time_from = request.args.get(\"from\")\n time_to = request.args.get(\"to\")\n if time_from:\n pass\n else:\n time_from = None\n if time_to:\n pass\n else:\n time_to = None\n # plot pic\n magic_trick= data.station_graph(result_station.stationName, time_from, time_to)\n # img_stream = io.BytesIO(img)\n # img = Image.open(img_stream)\n # imgByteArr = io.BytesIO()\n # img.save(imgByteArr,format='PNG')\n # imgByteArr = imgByteArr.getvalue()\n # return send_file(io.BytesIO(imgByteArr),\n # mimetype = 'image/png',\n # as_attachment = True,\n # attachment_filename = 'tmp.png')\n image_data = open(\"tmp.png\", \"rb\").read()\n response = make_response(image_data)\n response.headers['Content-Type'] = 'image/png'\n return response", "def plot_data():\n \n [X_train, X_dev, X_test, Y_train, Y_dev, Y_test, numOutputNodes] = load_data('regression') \n \n traindev = np.concatenate((Y_train, Y_dev), 1)\n traindevtest = np.concatenate((traindev, Y_test), 1)\n tdt = traindevtest.reshape(traindevtest.shape[1],)\n\n Y_train = Y_train.reshape(Y_train.shape[1],)\n Y_dev = Y_dev.reshape(Y_dev.shape[1],)\n Y_test = Y_test.reshape(Y_test.shape[1],)\n\n sigma = np.round(np.std(tdt), 3)\n mu = np.round(np.mean(tdt), 3)\n\n # plots histogram of all data together, indicating values of mean and standard deviation\n plt.figure(1)\n plt.hist(tdt)\n plt.title(\"{} data points, mu = {}, sigma = {}\".format(tdt.size, mu, sigma))\n plt.xlabel(\"average Fe coordination number\")\n plt.ylabel(\"frequency\")\n plt.show()\n\n # plots histogram where the training, cross-validation, and test sets have separate bars\n plt.figure(2)\n plt.hist([Y_train, Y_dev, Y_test], label = ['training', 'cross-validation', 'test'], density = True)\n plt.xlabel(\"average Fe coordination number\")\n plt.ylabel(\"frequency\")\n plt.legend()\n plt.show()\n\n # below is graphing for the charge data, as opposed to the averaged spectrum data\n [X_train1, X_dev1, X_test1, _, _, _, Y_train1, Y_dev1, Y_test1, numOutputNodes1] = load_data('multi_task')\n traindev1 = np.concatenate((Y_train1, Y_dev1), 1)\n traindevtest1 = np.concatenate((traindev1, Y_test1), 1)\n tdt1 = traindevtest1.reshape(traindevtest1.shape[1],)\n\n Y_train1 = Y_train1.reshape(Y_train1.shape[1],)\n Y_dev1 = Y_dev1.reshape(Y_dev1.shape[1],)\n Y_test1 = Y_test1.reshape(Y_test1.shape[1],)\n\n sigma = np.round(np.std(tdt1), 3)\n mu = np.round(np.mean(tdt1), 3)\n\n # plots histogram of all data together, indicating values of mean and standard deviation\n plt.figure(3)\n plt.hist(tdt1)\n plt.title(\"{} data points, mu = {}, sigma = {}\".format(tdt1.size, mu, sigma))\n plt.xlabel(\"charge\")\n plt.ylabel(\"frequency\")\n plt.show()\n\n # plots histogram where the training, cross-validation, and test sets have separate bars\n plt.figure(4)\n plt.hist([Y_train1, Y_dev1, Y_test1], label = ['training', 'cross-validation', 'test'], density = True)\n plt.xlabel(\"charge\")\n plt.ylabel(\"frequency\")\n plt.legend()\n plt.show()\n\n return None", "def Plot(self):\n\n ### Create the path names ###\n folder_string = self.params.folder+\"/plots/\"\n u_string = self.params.folder+\"/plots/u.pdf\"\n p_string = self.params.folder+\"/plots/p.pdf\"\n\n ### Check if folder exists ###\n if not os.path.exists(folder_string): os.makedirs(folder_string)\n\n ### Plot the x component of velocity ###\n plot(self.u_next[0],title=\"Velocity in the x Direction\")\n plt.savefig(u_string)\n plt.figure()\n\n ### Plot the pressure ###\n plot(self.p_next,title=\"Pressure\")\n plt.savefig(p_string)\n plt.show()", "def visualisation(self):\n plt.plot(self.x, self.y, 'o', label = 'Example data')\n plt.plot(self.x, np.dot(self.w, self.X), label = 'Model')\n plt.xlim([-1,1])\n plt.ylim([-1,1])", "def plot_coords(coords: List[int], labels: List[str], sample_density=100):\n global graph_data\n size = len(coords)\n if not graph_data:\n graph_data = [[0] * 100] * size\n for i in range(size):\n graph_data[i][-1] = coords[i]\n\n live_plotter(np.linspace(result_count - sample_density, result_count, sample_density), graph_data,\n identifier='Sensor Values',\n labels=labels)\n for i in range(size):\n graph_data[i] = np.append(graph_data[i][1:], 0.0)", "def test_2d_plot(self):\n db = pd.HDFStore('test.h5')\n df_iv = db['iv']\n dates = df_iv[df_iv['dte'] == 30]['date']\n impl_vols = df_iv[df_iv['dte'] == 30]['impl_vol']\n db.close()\n\n print df_iv.sort_values('impl_vol').head()\n\n plt.plot(dates, impl_vols)\n plt.xlabel('date')\n plt.ylabel('impl_vols')\n plt.show()", "def plot_data(self, filepath=None, time_min=None, time_max=None, title=None,\n electrode=None):\n\n # normalizes the samples x electrodes array containing the EEG data and\n # adds 1 to each row so that the y-axis value corresponds to electrode\n # location in the MNI coordinate (x,y,z) by electrode df containing\n # electrode locations\n\n if self.get_data().shape[0] == 1:\n nii = self.to_nii()\n nii.plot_glass_brain(pdfpath=filepath)\n elif self.get_data().empty:\n fig = plt.figure()\n ax = fig.add_subplot(111, aspect='equal')\n ax.set_facecolor('w')\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"electrode\")\n if filepath:\n plt.savefig(filename=filepath)\n else:\n plt.show()\n else:\n Y = _normalize_Y(self.data) # self.get_data()) this allows us to plot all the electrodes even the recon ones\n\n if electrode is not None:\n Y = Y.loc[:, electrode]\n if len(Y.shape) > 1:\n for i, column in enumerate(Y):\n Y[column] = Y[column] - int(column) + i\n\n # divide index by sample rate so that index corresponds to time\n if self.sample_rate:\n Y.index = np.divide(Y.index,np.mean(self.sample_rate))\n\n # if a time window is designated index data in that window\n if all([time_min, time_max]):\n mask = (Y.index >= time_min) & (Y.index <= time_max)\n Y = Y[mask]\n\n # if a time window is not designated, default to the first 500 seconds\n else:\n time_min = 0\n time_max = 10\n mask = (Y.index >= time_min) & (Y.index <= time_max)\n Y= Y[mask]\n \n if electrode:\n if len(Y.shape) > 1:\n ax = Y.plot(title=title, lw=.6)\n else:\n ax = Y.plot(title=title, lw=.6, color='k')\n else:\n ax = Y.plot(legend=False, title=title, color='k', lw=.6)\n ax.set_facecolor('w')\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"electrode\")\n\n if filepath:\n plt.savefig(filename=filepath)\n else:\n plt.show()", "def __plot(data, days: int = None):\n if days is not None:\n points = days * 144\n else:\n points = len(data)\n\n temp = data[-points:, 1]\n\n plt.plot(range(points), temp)\n plt.grid()\n plt.show()", "def clean_graph(self):\n #self.time = 0#\n \n # values of microcontroller\n #if self.graf_t.buffer_info()[1] != 0:\n for a in range(self.graf_t.buffer_info()[1]):\n self.graf_t.pop()\n \n for a in range(self.graf_r.buffer_info()[1]):\n self.graf_r.pop()\n\n for a in range(self.graf_x0.buffer_info()[1]):\n self.graf_x0.pop()\n\n for a in range(self.graf_x1.buffer_info()[1]):\n self.graf_x1.pop()\n\n for a in range(self.graf_u.buffer_info()[1]):\n self.graf_u.pop()\n \n self.referenceLine.set_data(self.graf_t, self.graf_r)\n self.x0Line.set_data(self.graf_t, self.graf_x0)\n self.x1Line.set_data(self.graf_t, self.graf_x1)\n self.uLine.set_data(self.graf_t, self.graf_u)\n \n try:\n #Draw the lines\n if self.checkBox_R.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.referenceLine)\n if self.checkBox_x0.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.x0Line)\n if self.checkBox_U.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.uLine)\n if self.checkBox_x1.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.x1Line)\n except AssertionError:\n pass\n try:\n self.mplWidget.canvas.blit(self.mplWidget.canvas.ax.bbox)\n except AttributeError:\n pass\n \n # force an image redraw\n self.mplWidget.canvas.draw()", "def results_plot_fuel_reactor(self):\n \n import matplotlib.pyplot as plt \n\n # Total pressure profile\n P = []\n for z in self.MB_fuel.z:\n P.append(value(self.MB_fuel.P[z]))\n fig_P = plt.figure(1)\n plt.plot(self.MB_fuel.z, P)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total Pressure [bar]\") \n\n # Temperature profile\n Tg = []\n Ts = []\n# Tw = []\n for z in self.MB_fuel.z:\n Tg.append(value(self.MB_fuel.Tg[z] - 273.15))\n Ts.append(value(self.MB_fuel.Ts[z] - 273.15))\n# Tw.append(value(self.MB_fuel.Tw[z]))\n fig_T = plt.figure(2)\n plt.plot(self.MB_fuel.z, Tg, label='Tg')\n plt.plot(self.MB_fuel.z, Ts, label='Ts')\n# plt.plot(self.MB_fuel.z, Tw, label='Tw')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Temperature [C]\") \n \n # Superficial gas velocity and minimum fluidization velocity\n vg = []\n umf = []\n for z in self.MB_fuel.z:\n vg.append(value(self.MB_fuel.vg[z]))\n umf.append(value(self.MB_fuel.umf[z]))\n fig_vg = plt.figure(3)\n plt.plot(self.MB_fuel.z, vg, label='vg')\n plt.plot(self.MB_fuel.z, umf, label='umf')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Superficial gas velocity [m/s]\")\n \n # Gas components molar flow rate\n for j in self.MB_fuel.GasList:\n F = []\n for z in self.MB_fuel.z:\n F.append(value(self.MB_fuel.F[z,j]))\n fig_F = plt.figure(4)\n plt.plot(self.MB_fuel.z, F, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Gas component molar flow rate, F [mol/s]\") \n \n # Bulk gas phase total molar flow rate\n Ftotal = []\n for z in self.MB_fuel.z:\n Ftotal.append(value(self.MB_fuel.Ftotal[z]))\n fig_Ftotal = plt.figure(5)\n plt.plot(self.MB_fuel.z, Ftotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total molar gas flow rate [mol/s]\") \n\n # Solid components mass flow rate\n for j in self.MB_fuel.SolidList:\n M = []\n for z in self.MB_fuel.z:\n M.append(value(self.MB_fuel.Solid_M[z,j]))\n fig_M = plt.figure(6)\n plt.plot(self.MB_fuel.z, M, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid components mass flow rate [kg/s]\")\n \n # Bulk solid phase total molar flow rate\n Mtotal = []\n for z in self.MB_fuel.z:\n Mtotal.append(value(self.MB_fuel.Solid_M_total[z]))\n fig_Mtotal = plt.figure(7)\n plt.plot(self.MB_fuel.z, Mtotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid total mass flow rate [kg/s]\") \n \n # Gas phase concentrations\n for j in self.MB_fuel.GasList:\n Cg = []\n for z in self.MB_fuel.z:\n Cg.append(value(self.MB_fuel.Cg[z,j]))\n fig_Cg = plt.figure(8)\n plt.plot(self.MB_fuel.z, Cg, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Concentration [mol/m3]\") \n \n # Gas phase mole fractions\n for j in self.MB_fuel.GasList:\n y = []\n for z in self.MB_fuel.z:\n y.append(value(self.MB_fuel.y[z,j]))\n fig_y = plt.figure(9)\n plt.plot(self.MB_fuel.z, y, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"y [-]\") \n \n # Solid phase mass fractions\n for j in self.MB_fuel.SolidList:\n x = []\n for z in self.MB_fuel.z:\n x.append(value(self.MB_fuel.x[z,j]))\n fig_x = plt.figure(10)\n plt.plot(self.MB_fuel.z, x, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"x [-]\") \n\n # Total mass fraction\n xtot = []\n for z in self.MB_fuel.z:\n xtot.append(value(self.MB_fuel.xtot[z]))\n fig_xtot = plt.figure(11)\n plt.plot(self.MB_fuel.z, xtot)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total mass fraction [-]\") \n \n # # Gas mix density\n # rhog = []\n # for z in self.MB_fuel.z:\n # rhog.append(value(self.MB_fuel.rho_vap[z]))\n # fig_rhog = plt.figure(23)\n # plt.plot(self.MB_fuel.z, rhog)\n # plt.grid()\n # plt.xlabel(\"Bed height [-]\")\n # plt.ylabel(\"Gas mix density [kg/m3]\") \n \n # Fe conversion\n X_Fe = []\n for z in self.MB_fuel.z:\n X_Fe.append(value(self.MB_fuel.X[z])*100)\n fig_X_Fe = plt.figure(13)\n plt.plot(self.MB_fuel.z, X_Fe)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Fraction of metal oxide converted [%]\")", "def plot(self):\n\t\tclf()\n\n\t\t# Plot rate for flow 1\n\t\tx = []\n\t\ty = []\n\t\ti = 0\n\t\tmaxY = None\n\t\twhile i < self.max_time:\n\t\t\tbytes = 0\n\t\t\t# loop through array of data and find relevant data\n\t\t\tfor (t,sequence,size) in self.data1:\n\t\t\t\tif (t >= i - 1) and (t <= i):\n\t\t\t\t\tbytes += size\n\t\t\t# compute interval\n\t\t\tleft = i - 1\n\t\t\tif i - 1 < 0:\n\t\t\t\tleft = 0\n\t\t\tright = i\n\t\t\t# add data point\n\t\t\tif (right - left) != 0:\n\t\t\t\trate = (bytes*8.0/1000000)/(right-left)\n\t\t\t\tx.append(i)\n\t\t\t\ty.append(rate)\n\t\t\t\tif not maxY or rate > maxY:\n\t\t\t\t\tmaxY = int(rate) + 1\n\t\t\ti += 0.1\n\t\t\n\t\tplot(x,y)\n\n\t\t# Plot rate for flow 2\n\t\tx = []\n\t\ty = []\n\t\ti = 0\n\t\twhile i < self.max_time:\n\t\t\tbytes = 0\n\t\t\t# loop through array of data and find relevant data\n\t\t\tfor (t,sequence,size) in self.data2:\n\t\t\t\tif (t >= i - 1) and (t <= i):\n\t\t\t\t\tbytes += size\n\t\t\t# compute interval\n\t\t\tleft = i - 1\n\t\t\tif i - 1 < 0:\n\t\t\t\tleft = 0\n\t\t\tright = i\n\t\t\t# add data point\n\t\t\tif (right - left) != 0:\n\t\t\t\trate = (bytes*8.0/1000000)/(right-left)\n\t\t\t\tx.append(i)\n\t\t\t\ty.append(rate)\n\t\t\t\tif not maxY or rate > maxY:\n\t\t\t\t\tmaxY = int(rate) + 1\n\t\t\ti += 0.1\n\t\t\n\t\tplot(x,y)\n\n\t\t# Plot rate for flow 3\n\t\tx = []\n\t\ty = []\n\t\ti = 0\n\t\twhile i < self.max_time:\n\t\t\tbytes = 0\n\t\t\t# loop through array of data and find relevant data\n\t\t\tfor (t,sequence,size) in self.data3:\n\t\t\t\tif (t >= i - 1) and (t <= i):\n\t\t\t\t\tbytes += size\n\t\t\t# compute interval\n\t\t\tleft = i - 1\n\t\t\tif i - 1 < 0:\n\t\t\t\tleft = 0\n\t\t\tright = i\n\t\t\t# add data point\n\t\t\tif (right - left) != 0:\n\t\t\t\trate = (bytes*8.0/1000000)/(right-left)\n\t\t\t\tx.append(i)\n\t\t\t\ty.append(rate)\n\t\t\t\tif not maxY or rate > maxY:\n\t\t\t\t\tmaxY = int(rate) + 1\n\t\t\ti += 0.1\n\t\t\n\t\tplot(x,y)\n\n\t\t# Plot rate for flow 4\n\t\tx = []\n\t\ty = []\n\t\ti = 0\n\t\twhile i < self.max_time:\n\t\t\tbytes = 0\n\t\t\t# loop through array of data and find relevant data\n\t\t\tfor (t,sequence,size) in self.data4:\n\t\t\t\tif (t >= i - 1) and (t <= i):\n\t\t\t\t\tbytes += size\n\t\t\t# compute interval\n\t\t\tleft = i - 1\n\t\t\tif i - 1 < 0:\n\t\t\t\tleft = 0\n\t\t\tright = i\n\t\t\t# add data point\n\t\t\tif (right - left) != 0:\n\t\t\t\trate = (bytes*8.0/1000000)/(right-left)\n\t\t\t\tx.append(i)\n\t\t\t\ty.append(rate)\n\t\t\t\tif not maxY or rate > maxY:\n\t\t\t\t\tmaxY = int(rate) + 1\n\t\t\ti += 0.1\n\t\t\n\t\tplot(x,y)\n\n\t\t# Plot rate for flow 1\n\t\tx = []\n\t\ty = []\n\t\ti = 0\n\t\twhile i < self.max_time:\n\t\t\tbytes = 0\n\t\t\t# loop through array of data and find relevant data\n\t\t\tfor (t,sequence,size) in self.data5:\n\t\t\t\tif (t >= i - 1) and (t <= i):\n\t\t\t\t\tbytes += size\n\t\t\t# compute interval\n\t\t\tleft = i - 1\n\t\t\tif i - 1 < 0:\n\t\t\t\tleft = 0\n\t\t\tright = i\n\t\t\t# add data point\n\t\t\tif (right - left) != 0:\n\t\t\t\trate = (bytes*8.0/1000000)/(right-left)\n\t\t\t\tx.append(i)\n\t\t\t\ty.append(rate)\n\t\t\t\tif not maxY or rate > maxY:\n\t\t\t\t\tmaxY = int(rate) + 1\n\t\t\ti += 0.1\n\t\t\n\t\tplot(x,y)\n\n\t\txlabel('Time (seconds)')\n\t\tylabel('Rate (Mbps)')\n\t\tylim([0,maxY])\n\t\tsavefig(self.output_file + '.png')", "def __plot(self, x: list, y:list):\r\n # clear the figure\r\n self.figure.clear()\r\n # create an axis\r\n self.canvas.axes = self.figure.add_subplot(111)\r\n # plot data\r\n self.canvas.axes.plot(x, y, self.primaryColor, label=self.inputFunction)\r\n # refresh canvas\r\n self.canvas.draw()", "def plot_data(f_name):\n # The load_data_from_csv function is a utility function that will dump our\n # csv data into an array called data.\n x, data = load_data_from_csv(f_name)\n # plt.subplots is a way of initializing matplotlib so you can plot\n fig, ax = plt.subplots()\n # ax.errorbar is the main plotting function call.\n # The `fmt`, `capsize`, `elinewidth`, `color` and `label` keyword\n # arguments are there to style the plot -- they are not instrumental.\n std_err = np.std(data,axis=1)/np.sqrt(data.shape[1])\n ax.errorbar(x,np.mean(data, axis=1),yerr=std_err,\n fmt='o',capsize=3, elinewidth=1, color='green',\n label=\"Some description of data\")\n # Set the text on the x axis.\n ax.set_xlabel(\"Simulated Independent Variable (units)\")\n # Set the text on the y axis.\n ax.set_ylabel(\"Simulated Dependent Variable, (units)\")\n # Set the text for the title.\n ax.set_title(\"Some Noisy Data with a linear trend\")\n # Turn on the legend box that appears on the plot figure.\n ax.legend()\n # Turn on grid lines\n ax.grid(True)\n # Create a window with the plot. You can click the save icon to\n # save it to file. Alternatively, you can uncomment the\n # `fig.savefig(\"sample_data_plot.png\")` line to save directly.\n # plt.show()\n fig.savefig(\"sample_data_plot.png\")", "def graph(self, ax=None, logax=False, el=None):\n if ax == None:\n fig, ax = plt.subplots()\n\n ax.cla()\n ax.clear()\n # if element is defined, plot only one element, otherwise all\n if el:\n self.data.plot(ax=ax, y=el, kind='line', legend=False)\n else:\n self.data.plot(ax=ax, kind='line', legend=False)\n\n if logax:\n ax.set_yscale('log')\n\n if self.starts and self.ends:\n # create lines for start and end of each ablation\n for i in range(0, len(self.starts)):\n ax.axvline(x=self.time[self.starts[i]],\n color='blue', linewidth=2)\n for i in range(0, len(self.ends)):\n ax.axvline(x=self.time[self.ends[i]],\n color='blue', linewidth=2)\n\n if self.laser_off:\n # higlights bacground\n for off in self.laser_off:\n #print(self.time[off[0]], self.time[off[1]])\n try:\n ax.axvspan(\n self.time[off[0]], self.time[off[1]], alpha=0.2, color='red')\n except:\n warnings.warn('something is wrong')\n\n if self.laser_on:\n # higlihts ablation\n for on in self.laser_on:\n ax.axvspan(self.time[on[0]], self.time[on[1]],\n alpha=0.2, color='green')\n\n plt.show()", "def plotTime(self):\n plt.figure()\n t = [i for i in range(len(self.nodes_infected))]\n print(t)\n plt.title('Nodos infectados vs Tiempo')\n plt.xlabel('Instantes de tiempo')\n plt.ylabel('# de nodos infectados')\n plt.plot(t, self.nodes_infected)\n plt.grid(True)\n plt.show()", "def plot(self, **kwargs):\n\n from ..plot import Plot\n\n p = Plot(1, 1, 1, **kwargs)\n\n p.axes[0].plot(self.dispersion.value, self.flux.value,\n drawstyle='steps-mid')\n\n if self.flux.uncertainty is not None:\n p.axes[0].plot(self.dispersion.value, self.flux.uncertainty.value,\n drawstyle='steps-mid')\n\n p.tidy()\n p.display()", "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])", "def plot(self):\n R = self.length\n\n plt.figure()\n for ii, car in enumerate(self.cars):\n theta = self.positions[ii] + car.position\n x = R * np.cos(theta)\n y = R * np.sin(theta)\n if ii == 0:\n plt.scatter(x, y, marker='x')\n else:\n plt.scatter(x, y)\n\n plt.axis('scaled')\n lim = (-1.2 * R, 1.2 * R)\n plt.ylim(lim)\n plt.xlim(lim)\n plt.savefig('traffic_{:d}.png'.format(self.time))\n plt.close()", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='green')", "def simple_plot(self):\n for i in np.arange(len(self.e2)):\n self.ax.plot(self.e2[i], 'o', label=self.labels[i])", "def epics_data_plot(data):\n if isinstance(data, (xr.DataArray, xr.Dataset)):\n data = data.to_dataframe()", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='blue')", "def plot(self):\n\t\t\n\t\ttf=tfData(self.shotno,tStart=None,tStop=None)\n\t\t\n\t\t_plt.figure()\n\t\tax1 = _plt.subplot2grid((3,2), (0,1), rowspan=3) #tf\n\t\tax2 = _plt.subplot2grid((3,2), (0,0)) #vf\n\t\tax3 = _plt.subplot2grid((3,2), (1,0),sharex=ax2) #oh\n\t\tax4 = _plt.subplot2grid((3,2), (2, 0),sharex=ax2) #sh\n\t\tfig=_plt.gcf()\n\t\tfig.set_size_inches(10,5)\n\t\t\t\t\n\t\ttStart=-2\n\t\ttStop=20\n\t\t\n\t\tax1.plot(tf.time*1e3,tf.tfBankField)\n\t\tax1.axvspan(tStart,tStop,color='r',alpha=0.3)\n\t\t_plot.finalizeSubplot(ax1,xlabel='Time (s)',xlim=[-150,450],ylabel='TF Field (T)')#,title=self.title\n\t\t\n\t\tax2.plot(self.vfTime*1e3,self.vfBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax2,ylabel='VF Current\\n(kA)')\n\t\t\n\t\tax3.plot(self.ohTime*1e3,self.ohBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax3,ylim=[-20,30],ylabel='OH Current\\n(kA)')\n\t\t\n\t\tax4.plot(self.shTime*1e3,self.shBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax4,ylim=[tStart,tStop],xlabel='Time (s)',ylabel='SH Current\\n(kA)')\n\t\t\n\t\t_plot.finalizeFigure(fig,title=self.title)\n#\t\tfig.set_tight_layout(True)\n\t\t\n\t\treturn fig", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='red')", "def plot_r(self):\n for k, v, o in self.data:\n self.plot_r1(k, v, o)", "def show_track(name_of_track):\n\n df = pd.read_csv(name_of_track, index_col=0)\n\n x = df[\"x\"][:] # simple x and y position of the track\n y = df[\"y\"][:]\n\n plt.plot(x[:2500], y[:2500], \"r.\")\n plt.plot(x[2501:5000], y[2501:5000], \"g.\")\n plt.plot(x[5001:7500], y[5001:7500], \"b.\")\n plt.plot(x[7501:10000], y[7501:10000], \"y.\")\n plt.show()", "def update_graph(self):\n parameters = []\n dtype = {'Timestamp': 'str'}\n for header in self.headers:\n if self.top_plot.current_param in header or self.bottom_plot.current_param in header:\n parameters.append(header)\n dtype[header] = 'float'\n data = pd.read_csv(self.reactor.file,\n dtype=dtype,\n parse_dates=['Timestamp'], usecols=['Timestamp'] + parameters, low_memory=False,\n na_filter=False)\n start_time = data['Timestamp'][0]\n data.insert(loc=2, column='EFT', value=(data['Timestamp'] - start_time) / np.timedelta64(1, 'h'))\n\n for label, content in data.iteritems():\n if label == 'Timestamp' or label == 'EFT':\n continue\n elif self.top_plot.current_param in label:\n self.top_plot.clear()\n self.top_plot.plot(data['EFT'], content)\n else:\n self.bottom_plot.clear()\n self.bottom_plot.plot(data['EFT'], content)", "def plot(self):\n\t\tself.plotOfCos1().plot()", "def plot (self):\n \n plt.stem(self.nTs, self._signal)", "def plot(self):\n y = self.projection\n mpl.scatter(y[:, 0], y[:, 1], c=self.data_class)\n mpl.show()" ]
[ "0.75636965", "0.70755386", "0.70508736", "0.7003091", "0.6793981", "0.670657", "0.6649556", "0.6627259", "0.6556057", "0.65330535", "0.64661133", "0.64386946", "0.6424212", "0.6413959", "0.6405251", "0.6394528", "0.6362279", "0.63578933", "0.63299096", "0.6308484", "0.630025", "0.62945217", "0.62909204", "0.6285159", "0.6268585", "0.62468517", "0.62442183", "0.6236838", "0.6236838", "0.6236838", "0.6236838", "0.6236838", "0.6226533", "0.62234706", "0.6221129", "0.62144214", "0.6211902", "0.61958414", "0.6179331", "0.61620337", "0.6152692", "0.6149163", "0.6148495", "0.6128618", "0.61238116", "0.6121699", "0.6111991", "0.61009365", "0.6099232", "0.60944456", "0.608287", "0.6078746", "0.6077164", "0.607295", "0.6071393", "0.6061216", "0.60607624", "0.6042396", "0.603946", "0.60256165", "0.6015723", "0.6015712", "0.60150754", "0.6006252", "0.60049105", "0.6001912", "0.59972346", "0.5994073", "0.5994045", "0.597634", "0.59758", "0.59745014", "0.59728384", "0.59692144", "0.5966506", "0.59583217", "0.5948734", "0.5946704", "0.5940122", "0.5930455", "0.59280723", "0.59279794", "0.59275013", "0.5904763", "0.5903676", "0.59018844", "0.5892558", "0.5890649", "0.5886699", "0.5881118", "0.58802736", "0.5877696", "0.5876", "0.58670753", "0.58664185", "0.58657384", "0.58651066", "0.5863915", "0.58627295", "0.585628" ]
0.6922587
4
raises astropy.units.UnitsError if not a distance unit
def _convert_to_and_validate_distance_unit(unit): if unit is not None: unit = u.Unit(unit) if not unit.is_equivalent(u.kpc): raise u.UnitsError(six.u('Unit "{0}" is not a distance').format(unit)) return unit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDistanceUnits(self) -> Unit:\n ...", "def test_get_distance() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_NEW_YORK[0],\n COORDINATES_NEW_YORK[1],\n )\n\n assert meters / 1000 - DISTANCE_KM < 0.01", "def testAbsDist():\n units = unitsystem.UnitSystem()\n assert units.absorption_distance(25000, 3) == 0.13377926628219666\n assert units.absorption_distance(25000, 2) == 0.07525083728373562\n assert units.absorption_distance(25000, 3) / units.absorption_distance(12500, 3) == 2.", "def test_get_distance_to_same_place() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n )\n\n assert meters == 0", "def distance_sensor(unit):\n\n\tsensor_name = \"baseBoard\"\n\treg_addr = 24\n\tdata_len = 56\n\tregist_sensor(sensor_name, reg_addr, data_len)\n\n\tdata = rospy.wait_for_message(\"MediumSize/SensorHub/Range\", Range, 2)\n\tdistance = data.range\n\t# transfer sensor data to target unit\n\tif unit == \"cm\":\n\t\tresult = distance / 10.0\n\telse:\n\t\tresult = distance\n\n\tdelete_sensor(sensor_name)\n\treturn result", "def test_06_valid_distance(self):\n distance_record = SwimRecord(distance=20)\n try:\n distance_record.full_clean()\n except ValidationError as e:\n self.assertTrue(\"Ensure this value is greater than or equal to 50.\" in e.message_dict['distance'])", "def testatomcoords_units(self):\r\n min_carbon_dist = get_minimum_carbon_separation(self.data)\r\n dev = abs(min_carbon_dist - 1.34)\r\n assert dev < 0.15, f\"Minimum carbon dist is {min_carbon_dist:.2f} (not 1.34)\"", "def test_length_unknown_unit(self):\n with self.assertRaises(ValueError):\n METRIC_SYSTEM.length(5, 'fr')", "def distance():\n return str(us.get_distance())", "def distance_traveled():\n user_distance = raw_input(\"How far is the ship going? Include unit label (m, km, au, or mi). \")\n distance = \"\"\n unit = \"\"\n for ch in user_distance:\n if ch in [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]:\n distance = distance + str(ch)\n elif ch == \".\":\n distance = distance + \".\"\n elif ch == \",\":\n distance = distance + \".\"\n else:\n unit = unit + str(ch)\n unit = unit.strip(\" \")\n if distance[-1] == \".\":\n distance = distance.strip(\".\")\n while True:\n try:\n float(distance)\n except ValueError:\n distance = raw_input(\"Invalid input. Please enter a positive number with no other non-decimal characters. \")\n continue\n else:\n break\n while float(distance) <= 0:\n distance = raw_input(\"Invalid input. Please enter a positive number. \")\n try:\n float(distance)\n except ValueError:\n distance = raw_input(\"Invalid input. Please enter a positive number with no other non-decimal characters. \")\n continue\n else:\n break\n distance = float(distance)\n unit = unit.lower()\n expected_units_m = [\"m\", \"meters\"]\n expected_units_km = [\"km\", \"kilometers\"]\n expected_units_mi = [\"mi\", \"miles\"]\n expected_units_au = [\"au\", \"ua\", \"astronomical units\", \"astronomical unit\", \"astronomic units\", \"astronomic unit\"]\n while unit not in expected_units_m and unit not in expected_units_km and unit not in expected_units_au and unit \\\n not in expected_units_mi:\n unit = raw_input(\"Unexpected unit type or label. Valid units are meters (m), kilometers (km), \"\n \"astronomical units (au), or miles (mi). \")\n if unit in expected_units_m:\n distance = distance\n elif unit in expected_units_km:\n distance = distance * 1000\n elif unit in expected_units_au:\n distance = distance * 149597870700\n elif unit in expected_units_mi:\n distance = round((distance * 1609.344), 0)\n return distance", "def test_distance_adc(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('ADC'), '13')", "def distance(\n dts: NDArrayFloat, gts: NDArrayFloat, metric: DistanceType\n) -> NDArrayFloat:\n if metric == DistanceType.TRANSLATION:\n translation_errors: NDArrayFloat = np.linalg.norm(dts - gts, axis=1)\n return translation_errors\n elif metric == DistanceType.SCALE:\n scale_errors: NDArrayFloat = 1 - iou_3d_axis_aligned(dts, gts)\n return scale_errors\n elif metric == DistanceType.ORIENTATION:\n yaws_dts: NDArrayFloat = mat_to_xyz(quat_to_mat(dts))[..., 2]\n yaws_gts: NDArrayFloat = mat_to_xyz(quat_to_mat(gts))[..., 2]\n orientation_errors = wrap_angles(yaws_dts - yaws_gts)\n return orientation_errors\n else:\n raise NotImplementedError(\"This distance metric is not implemented!\")", "def dist(x1, x2, distance):\n if distance == 'l2':\n return np.sqrt(np.sum(np.square(x1 - x2)))\n elif distance == 'squared_l2':\n return np.sum(np.square(x1 - x2))\n else:\n raise Exception(\"The distance '%s' is not supported.\" % distance)", "def test_sense_distance(self):\n\n\t\tmeasurements = [29, 29, 28]\n\t\tself.driver.us_dist.side_effect = lambda x: measurements.pop()\n\t\texpected_measurement = int(ultrasonic_sensor_error(29))\n\n\t\tself.assertEqual(self.s.sense_distance(60), expected_measurement)\n\t\tself.mount.move.assert_called_once_with(x=60)", "def distance2m(d, unit):\n if unit == UOM_M:\n d_m = d\n elif unit == UOM_KM:\n d_m = d * 1000\n elif unit == UOM_F:\n d_m = d * F_FEET2M\n elif unit == UOM_SM:\n d_m = d * F_SM2M\n elif unit == UOM_NM:\n d_m = d * F_NM2M\n \n return d_m", "def setDistanceUnits(self, units: Unit) -> None:\n self.units = ...", "def distmeter_err(self):\n from astropy import units\n return self.distmpc_err * units.Mpc.in_units(\"m\")", "def get_distance_metres(aLocation1, aLocation2):\n [dNorth, dEast, dDown] = get_position_error(aLocation1, aLocation2)\n \n return math.sqrt((dNorth*dNorth) + (dEast*dEast))", "def test_get_distance(self):\n meters = location_util.distance(COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_NEW_YORK[0],\n COORDINATES_NEW_YORK[1])\n self.assertAlmostEqual(meters / 1000, DISTANCE_KM, places=2)", "def length(self, length: float | None, from_unit: str) -> float:\n if not isinstance(length, Number):\n raise TypeError(f\"{length!s} is not a numeric value.\")\n\n # type ignore: https://github.com/python/mypy/issues/7207\n return DistanceConverter.convert( # type: ignore[unreachable]\n length, from_unit, self.length_unit\n )", "def get_mds_units(node):\n try:\n units=str(node.units)\n except:\n units=node.units_of()\n if not type(units)==type(\"\"):\n try:\n units=units.value_of()\n except:\n units=\"-\"\n return units", "def test_distance_aed(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('AED'), 'NO SUCH ROUTE')", "def set_distance_units(value=np.NaN, from_units='mm', to_units='cm'):\n if from_units == to_units:\n return value\n\n coeff = 1\n if from_units == 'cm':\n if to_units == 'mm':\n coeff = 10\n elif to_units == 'm':\n coeff = 0.01\n else:\n raise ValueError(\"to_units not supported ['cm','m','mm']!\")\n elif from_units == 'mm':\n if to_units == 'cm':\n coeff = 0.1\n elif to_units == 'm':\n coeff = 0.001\n else:\n raise ValueError(\"to_units not supported ['cm','m','mm']!\")\n elif from_units == 'm':\n if to_units == 'mm':\n coeff = 1000\n elif to_units == 'cm':\n coeff = 100\n else:\n raise ValueError(\"to_units not supported ['cm','m','mm']!\")\n else:\n raise ValueError(\"to_units not supported ['cm','m','mm']!\")\n\n return coeff * value", "def test_distance_ad(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('AD'), '5')", "def test_point_negative_distance(self):\n\n with self.assertRaises(ValueError) as err:\n Point(1, 7).distance('foo')\n self.assertEqual(err.args[0], \"Wrong type of argument. Distance is calculated between two Point objects.\",\n \"Test of Point(1, 7).distance('foo') failed, no ValueError was raised.\")", "def to_meters(d, d_unit):\n if d_unit == UOM_M:\n dm = d\n elif d_unit == UOM_KM:\n dm = d * 1000\n elif d_unit == UOM_FEET:\n dm = feet2m(d)\n elif d_unit == UOM_SM:\n dm = SM2m(d)\n elif d_unit == UOM_NM:\n dm = NM2m(d)\n return dm", "def test_distance(self):\n for emb_vals, point, dist_gt in self.DISTANCE_EXAMPLES:\n print(emb_vals, point, dist_gt)\n emb = to_emb(emb_vals)\n dist = emb.distance(point)\n assert np.allclose(dist, dist_gt), \\\n (\"Wrong distance for point {}: expected {} but was {};\"\n \"\\nembedding:\\n{}\").format(point, dist_gt, dist, str(emb))", "def test_convert_incompatible_units(self):\n self.assertRaises(ValueError, convert_units, self.arr, 'm')", "def test_not_int(self):\n invalid_args = [\"random string\", \"123\", 123.5]\n for arg in invalid_args:\n assert meters_to_km(arg) is arg", "def test_distance(self):\n self.assertTrue(np.allclose(self.vectors.distance('dog.n.01', 'mammal.n.01'), 4.5278745))\n self.assertEqual(self.vectors.distance('dog.n.01', 'dog.n.01'), 0)", "def distance_to_radians(distance, units=\"kilometers\"):\n # type: (Number, str) -> Number\n factor = factors[units]\n\n if factor is None:\n raise ValueError(\"Invalid unit\")\n\n return distance / factor", "def calc_error_dist(self):\n pass", "def _check_dimensions(self, a, b):\n units_a = self._get_units(a)\n units_b = self._get_units(b)\n dim_a = units_a.dimensions\n dim_b = units_b.dimensions\n if dim_a != dim_b:\n raise UnitConversionError(units_a, dim_a, units_b, dim_b)", "def test_distance_aebcd(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('AEBCD'), '22')", "def test_dewpoint_weird_units():\n assert_almost_equal(dewpoint(15825.6 * units('g * mbar / kg')),\n 13.8564 * units.degC, 4)", "def radians_to_distance(radians, units=\"kilometers\"):\n # type: (Number, str) -> Number\n\n factor = factors[units]\n\n if factor is None:\n raise ValueError('Invalid unit')\n\n return radians * factor", "def is_distance(x, y):\n assert (x.dtype == np.float64 and y.dtype == np.float64) or (\n x.dtype == np.float32 and y.dtype == np.float32)\n\n # TODO\n raise NotImplementedError", "def test_distance_source(self):\n s1 = Source([[10, 10], [10, 20]], values=[1.0, 2.0])\n s2 = Source([[20, 20], [20, 30]], values=[1.0, 2.0])\n assert(s1.distance(s2) == sqrt(200))", "def distance(self):\n return Distance(length_of(self.position.au))", "def distance(x1, y1, x2, y2):\n for num in (x1, x2, y1, y2):\n if type(num) is str:\n raise ValueError\n\n return math.sqrt(pow(x2 - x1, 2) + pow(y2 - y1, 2))", "def distance(point_1, point_2, units=1):\n\n distance = (((point_2[0]-point_1[0])*units)**2.0\n + ((point_2[1]-point_1[1])*units)**2.0\n + ((point_2[2]-point_1[2])*units)**2.0)**0.5\n \n return distance", "def test_length_to_imperial(self):\n self.assertEqual(\n 100,\n IMPERIAL_SYSTEM.length(100,\n IMPERIAL_SYSTEM.length_unit)\n )\n self.assertEqual(\n 3.106855,\n IMPERIAL_SYSTEM.length(5, METRIC_SYSTEM.length_unit)\n )", "def dist(unit, tile):\n return abs(tile['x'] - unit['x']) + abs(tile['y'] - unit['y'])", "def test_length_to_metric(self):\n self.assertEqual(\n 100,\n METRIC_SYSTEM.length(100, METRIC_SYSTEM.length_unit)\n )\n self.assertEqual(\n 8.04672,\n METRIC_SYSTEM.length(5, IMPERIAL_SYSTEM.length_unit)\n )", "def test_not_units(self):\n with self.assertRaises(AssertionError):\n _unit_map(\"WiB\")", "def test_distance_aba(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('ABC'), '9')", "def test_distance():\n t0 = time.time()\n c1 = coord.CelestialCoord(0.234 * coord.radians, 0.342 * coord.radians)\n c2 = coord.CelestialCoord(0.234 * coord.radians, -1.093 * coord.radians)\n c3 = coord.CelestialCoord((pi + 0.234) * coord.radians, -0.342 * coord.radians)\n c4 = coord.CelestialCoord((pi + 0.234) * coord.radians, 0.832 * coord.radians)\n c5 = coord.CelestialCoord(1.832 * coord.radians, -0.723 * coord.radians)\n c6 = coord.CelestialCoord((0.234 + 2.3e-9) * coord.radians, (0.342 + 1.2e-9) * coord.radians)\n t1 = time.time()\n\n a1 = astropy.coordinates.SkyCoord(0.234 * units.radian, 0.342 * units.radian)\n a2 = astropy.coordinates.SkyCoord(0.234 * units.radian, -1.093 * units.radian)\n a3 = astropy.coordinates.SkyCoord((pi + 0.234) * units.radian, -0.342 * units.radian)\n a4 = astropy.coordinates.SkyCoord((pi + 0.234) * units.radian, 0.832 * units.radian)\n a5 = astropy.coordinates.SkyCoord(1.832 * units.radian, -0.723 * units.radian)\n a6 = astropy.coordinates.SkyCoord(0.234 + 2.3e-9, 0.342 + 1.2e-9, unit=units.radian)\n t2 = time.time()\n\n coord_dist = [c1.distanceTo(c).rad for c in [c2,c3,c4,c5,c6]]\n t3 = time.time()\n astropy_dist = [a1.separation(a).rad for a in [a2,a3,a4,a5,a6]]\n t4 = time.time()\n\n np.testing.assert_almost_equal(coord_dist, astropy_dist, decimal=12)\n # For the last one, the distance is rather small in radians, so test in arcsec\n np.testing.assert_almost_equal(coord_dist[-1] * (coord.radians/coord.arcsec),\n astropy_dist[-1] * (coord.radians/coord.arcsec), decimal=10)\n\n print('Compare times for distance calculations:')\n print(' Make CelestialCoords: t = ',t1-t0)\n print(' Make SkyCoords: t = ',t2-t1)\n print(' Calculate distances with Coord: t = ',t3-t2)\n print(' Calculate distances with Astropy: t = ',t4-t3)", "def convertUnit(*args, fromUnit: AnyStr=\"\", toUnit: AnyStr=\"\", **kwargs)->float:\n pass", "def get_distance(start, end):\n\n\t\tloc_start, loc_end, dst_node = create_distance(start, end)\n\t\tdistance = cmds.getAttr(\"%s.distance\" % dst_node)\n\n\t\tcmds.delete([loc_start, loc_end, dst_node])\n\n\t\treturn distance", "def find_distance_in_same_type(self):\n pass", "def getDistMetric(self):\r\n\r\n def noDistMetric():\r\n \"\"\"\r\n Raises an error if the Feature type is not Continuous or Categorical\r\n \"\"\"\r\n raise NotImplementedError(\"Distance metric is not supported on feature type\")\r\n return noDistMetric", "def _determine_unit(self, unit, units, abbrev_units):\n determined_unit = unit \n if unit in abbrev_units:\n determined_unit = abbrev_units[unit].lower()\n \n if determined_unit not in units:\n raise TableException('%s is an invalid unit' % unit)\n return determined_unit", "def get_distance(latitude, longitude, del_latitude, del_longitude):\n coord = (latitude, longitude)\n del_coord = (del_latitude, del_longitude)\n return distance.geodesic(coord, del_coord).km", "def ensure_unit(arg, unit):\n if not isinstance(arg, u.Quantity):\n arg = arg * unit\n return arg.to(unit)", "def test_detector_distance(i07_nexus: I07Nexus, detector_distance):\n assert i07_nexus.detector_distance == detector_distance", "def test_convert_compatible_units(self):\n result = convert_units(self.arr, 'degC')\n expected_data = np.array([[-273.15, -272.15], [-271.15, -270.15]])\n expected_units = cf_units.Unit('degC')\n self.assertEquals(result.units, expected_units)\n self.assertArrayEqual(result.data, expected_data)", "def get_distance_meters(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5", "def noDistMetric():\r\n raise NotImplementedError(\"Distance metric is not supported on feature type\")\r\n return noDistMetric", "def distance(self, source, target):\r\n raise NotImplementedError('Distance calculation not implemented yet')", "def get_norm_distance(length: int, distance: float) -> float:\n return distance/(length*2)", "def distance_YMW16(self, source, DM):\n\n if not isinstance(DM, astropy.units.quantity.Quantity):\n # assume DM unit\n DM=DM*u.pc/u.cm**3\n if (len(DM.shape)>0 and DM.value.any() <= 0) or (len(DM.shape)==0 and DM.value < 0):\n raise ValueError('DM must be > 0')\n if not isinstance(source, astropy.coordinates.sky_coordinate.SkyCoord):\n if isinstance(source,str):\n # assume .par file\n source=parfile2SkyCoord(source)\n else:\n raise TypeError('Do not know how to interpret an object of type %s' % source.__class__)\n source=source.galactic\n\n if len(source.l.shape)==0:\n \n results=ymw16.dmdtau_c(source.l.value,\n source.b.value, \n DM.to(u.pc/u.cm**3).value,\n 1,\n self.datadir)\n distance=results*u.pc\n return distance,None\n else:\n distance=np.zeros_like(source.l.value)\n it = np.nditer(source.l, flags=['multi_index'])\n dm=DM.to(u.pc/u.cm**3).value\n if not (len(dm.shape)==0 or dm.shape==source.l.shape):\n raise IndexError('Shape of DM must be scalar or the same as shape of coordinates')\n while not it.finished:\n if len(dm.shape)==0:\n dm_touse=dm\n else:\n dm_touse=dm[it.multi_index]\n results=ymw16.dmdtau_c(source[it.multi_index].l.value,\n source[it.multi_index].b.value, \n dm_touse,\n 1,\n self.datadir)\n distance[it.multi_index]=results\n it.iternext()\n return distance*u.pc,None", "def get_distance_meters(location1, location2):\n dlat = location2.lat - location1.lat\n dlong = location2.lon - location1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5", "def set_dist(self, dist):\n dist = u.Quantity(dist, unit=u.AU)\n if dist.value < 0:\n warnings.warn(\"distance cannot be negative. Using absolute value.\")\n self.dist = np.absolute(dist.value)", "def distmeter(self):\n return self._distance.to(\"m\").value", "def test_dist_itslef(self):\n X = [[0, 10], [4, 2]] # Just some points. I've no idea where on globe.\n c = cdist(X[0], X[1])\n string_geopy = '{}'.format(great_circle(X[0], X[1]))\n float_geopy = float(string_geopy[:-3])\n self.assertTrue(np.round(c) == np.round(float_geopy))\n\n X = [[34.0522, 118.2437], # Lon Angeles\n [37.7749, 122.4194]] # San Francisco\n c = cdist(X[0], X[1])\n string_geopy = '{}'.format(great_circle(X[0], X[1]))\n float_geopy = float(string_geopy[:-3])\n self.assertTrue(np.round(c) == np.round(float_geopy))", "def validate_distance_input(X):\n\n \"\"\"The distance array should be square and rank 2\"\"\"\n msg='Distance Array should be 2-dimensional, got {} dimensions'\n assert(np.ndim(X) == 2), msg.format(np.ndim(X))\n msg='Distance Array should be square, got {} shape'\n assert(X.shape[0] == X.shape[1]), msg.format(X.shape)\n\n \"\"\"The array should be uppper triangular with zeros in the bottom\n diagonal AND along the diagonal\"\"\"\n lower_indicies = np.tril_indices(X.shape[0], 0)\n if not np.all(X[lower_indicies] == 0):\n msg=('All Lower Triangular elements of the distance array should be' +\n 'Zero. got {} Non-Zero Indicies')\n non_zero = np.where(X[lower_indicies] != 0)\n raise ValueError(msg.format(X[non_zero]))\n\n return None", "def distance_tolerance(distance: float) -> float:\n ret = 10.0\n if distance < 0:\n ret += distance * (100 - ret) / -2500.0\n return ret", "def get_distance(point1: tuple, point2: tuple, location_type: str = \"latlon\", distance_formula: str = \"great_circle\", unit:str = \"meters\") -> float:\n if distance_formula == \"great_circle\":\n return great_circle(point1, point2).meters\n elif distance_formula == \"vincenty\":\n return vincenty(point1, point2).meters\n else:\n raise Exception('undefined distance_formula')", "def distance_in_meters(coord1, coord2):\n return vincenty(coord1, coord2).meters", "def calc_distance(user_loc, space):\n geocode_result = gmaps.geocode(space['_location'])\n dest_loc = geocode_result[0]['geometry']['location']\n direction = gmaps.distance_matrix(user_loc, dest_loc, mode=\"walking\")\n distance = direction['rows'][0]['elements'][0]['distance']['value']\n # convert to mile\n distance = distance * 0.000621371\n return distance", "def distance_to_degrees(distance, units=\"kilometers\"):\n # type: (Number, str) -> Number\n factor = factors[units]\n\n if factor is None:\n raise ValueError(\"Invalid unit\")\n\n return (distance / factor) * 57.2958", "def useUnits():", "def test_typeerror_with_annotations(self):\n\n dispatcher = {\n \"distance\": distance,\n }\n\n req = '{\"jsonrpc\": \"2.0\", \"method\": \"distance\", \"params\": [], \"id\": 3}' # noqa\n result = JSONRPCResponseManager.handle(req, dispatcher)\n\n # Make sure this returns JSONRPCInvalidParams rather than raising\n # UnboundLocalError\n self.assertEqual(result.error['code'], -32602)", "def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat * dlat) + (dlong * dlong)) * 1.113195e5", "def test_mantel_test_invalid_distance_matrix(self):\r\n # Single asymmetric, non-hollow distance matrix.\r\n self.assertRaises(ValueError, mantel_test, array([[1, 2], [3, 4]]),\r\n array([[0, 0], [0, 0]]), 999)\r\n\r\n # Two asymmetric distance matrices.\r\n self.assertRaises(ValueError, mantel_test, array([[0, 2], [3, 0]]),\r\n array([[0, 1], [0, 0]]), 999)", "def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5", "def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5", "def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5", "def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5", "def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5", "def _drop_units(q):\n try:\n return q.magnitude\n except:\n return q", "def test_DistanceMatrices_setter_too_small(self):\r\n self.assertRaises(ValueError, setattr, self.size_dms,\r\n 'DistanceMatrices', [self.single_ele_dm, self.single_ele_dm])", "def test_inches_invalid_input(self):\n result = inch_to_cm(\"--\")\n self.assertIsNone(result)", "def _get_units(self, q) -> unyt.Unit:\n try:\n units = q.units\n except AttributeError:\n units = unyt.dimensionless\n return unyt.Unit(units, registry=self.registry)", "def test_distance_between_points_near_0_longitude(self) -> None:\n distance = mod_geo.distance(latitude_1=0, longitude_1=0.1, elevation_1=0, latitude_2=0, longitude_2=-0.1, elevation_2=0, haversine=True)\n print(distance)\n self.assertTrue(distance < 230000)\n distance = mod_geo.distance(latitude_1=0, longitude_1=0.1, elevation_1=0, latitude_2=0, longitude_2=-0.1, elevation_2=0, haversine=False)\n print(distance)\n self.assertTrue(distance < 230000)\n distance = mod_geo.distance(latitude_1=0, longitude_1=0.1, elevation_1=0, latitude_2=0, longitude_2=360-0.1, elevation_2=0, haversine=True)\n print(distance)\n self.assertTrue(distance < 230000)\n distance = mod_geo.distance(latitude_1=0, longitude_1=0.1, elevation_1=0, latitude_2=0, longitude_2=360-0.1, elevation_2=0, haversine=False)\n print(distance)\n self.assertTrue(distance < 230000)", "def test_findDirection_bad(self):\n startCoordinate = coordinate.Coordinate(4, 4)\n self.assertRaises(ValueError,\n rules.findDirection,\n startCoordinate,\n startCoordinate)", "def test_invalid_units(self):\n with self.assertRaises(ValueError):\n UnitSystem(SYSTEM_NAME, INVALID_UNIT, LENGTH_METERS, VOLUME_LITERS,\n MASS_GRAMS)\n\n with self.assertRaises(ValueError):\n UnitSystem(SYSTEM_NAME, TEMP_CELSIUS, INVALID_UNIT, VOLUME_LITERS,\n MASS_GRAMS)\n\n with self.assertRaises(ValueError):\n UnitSystem(SYSTEM_NAME, TEMP_CELSIUS, LENGTH_METERS, INVALID_UNIT,\n MASS_GRAMS)\n\n with self.assertRaises(ValueError):\n UnitSystem(SYSTEM_NAME, TEMP_CELSIUS, LENGTH_METERS, VOLUME_LITERS,\n INVALID_UNIT)", "def compute_distance(location_1, location_2):\n x = location_2.x - location_1.x\n y = location_2.y - location_1.y\n z = location_2.z - location_1.z\n norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps\n return norm", "def compute_distance(location_1, location_2):\n x = location_2.x - location_1.x\n y = location_2.y - location_1.y\n z = location_2.z - location_1.z\n norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps\n return norm", "def distcon():\n\n amount = int(input(\"How far?: \"))\n input_from = input(\"what unit of measure? \")\n input_to = input(\"what unit to convert to? \")\n meter_ft = float(amount) * 3.28084\n meter_km = amount * 1000\n meter_mi = amount * .000621371\n\n if input_from == \"mi\":\n return amount == amount * meter_mi\n elif input_from == \"m\":\n return amount\n elif input_from == \"km\":\n return amount == amount * meter_km\n elif input_from == \"ft\":\n return amount == amount * meter_ft\n # TODO give up all hope\n\n if input_to == \"mi\":\n return output == meter_mi / amount\n elif input_to == \"m\":\n return output == amount\n elif input_to == \"km\":\n return output == meter_km / amount\n elif input_to == \"ft\":\n return output == meter_ft / amount\n\n print(\"{} {} is {} in {}.\".format(amount, input_from, output, input_to))", "def get_distance_metres(aLocation1, aLocation2):\n \n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5", "def test_distance_function(self):\n if connection.ops.oracle:\n ref_dists = [0, 4899.68, 8081.30, 9115.15]\n elif connection.ops.spatialite:\n if connection.ops.spatial_version < (5,):\n # SpatiaLite < 5 returns non-zero distance for polygons and points\n # covered by that polygon.\n ref_dists = [326.61, 4899.68, 8081.30, 9115.15]\n else:\n ref_dists = [0, 4899.68, 8081.30, 9115.15]\n else:\n ref_dists = [0, 4891.20, 8071.64, 9123.95]\n htown = City.objects.get(name=\"Houston\")\n qs = Zipcode.objects.annotate(\n distance=Distance(\"poly\", htown.point),\n distance2=Distance(htown.point, \"poly\"),\n )\n for z, ref in zip(qs, ref_dists):\n self.assertAlmostEqual(z.distance.m, ref, 2)\n\n if connection.ops.postgis:\n # PostGIS casts geography to geometry when distance2 is calculated.\n ref_dists = [0, 4899.68, 8081.30, 9115.15]\n for z, ref in zip(qs, ref_dists):\n self.assertAlmostEqual(z.distance2.m, ref, 2)\n\n if not connection.ops.spatialite:\n # Distance function combined with a lookup.\n hzip = Zipcode.objects.get(code=\"77002\")\n self.assertEqual(qs.get(distance__lte=0), hzip)", "def distance(self) -> int:\n return 0", "def distance(self, location):\n return numpy.linalg.norm(self.vector_to(location))", "def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre", "def hasUnit(val):\n return hasattr(val, 'unit') or hasattr(val, 'units')", "def getDiameter(self):\n\n hdr = self.header\n if \"cd1_1\" in hdr:\n self.D = abs(hdr[\"cd1_1\"]) * hdr[\"naxis1\"]\n elif \"cdelt1\" in hdr:\n self.D = abs(hdr[\"cdelt1\"]) * hdr[\"naxis1\"]\n else:\n print(\"Warning: no coordinate information found in input header;\")\n print(\" pupil width assumed to be 6.5 meters\")\n self.D = 6.5", "def test_meters_validate_list(self):\n meter = inches_to.meters([1.0, 2.0, 3.0, 4.0])\n comparison = np.array([0.0254, 2*0.0254, 3*0.0254, 4*0.0254])\n\n try:\n for i in range(len(comparison)):\n self.assertTrue(math.isclose(meter[i], comparison[i], rel_tol=self.accepted_error))\n print('{:.40s}{}'.format(sys._getframe().f_code.co_name + self.padding, self.passed))\n except AssertionError:\n print('{:.40s}{}'.format(sys._getframe().f_code.co_name + self.padding, self.failed))", "def is_unit(self):\n return math.isclose(self.magnitude(), 1)", "def test_distances(self):\n for p1, p2, distance in DISTANCES:\n calculated = p1.approximate_distance_meters(p2)\n self.assertAlmostEqual(distance, calculated, delta=5)" ]
[ "0.68959516", "0.6531648", "0.61516035", "0.6054251", "0.6039936", "0.6039793", "0.60213655", "0.6009243", "0.59936506", "0.5992886", "0.5975478", "0.5958747", "0.5953431", "0.5900235", "0.589641", "0.5890246", "0.5827913", "0.58258593", "0.5817387", "0.5809252", "0.58037347", "0.57984966", "0.5754445", "0.5746012", "0.5742133", "0.5712109", "0.57061327", "0.56925386", "0.5655968", "0.56408906", "0.56406355", "0.5611592", "0.56068635", "0.55859506", "0.55827445", "0.55393916", "0.5539073", "0.5537572", "0.5518951", "0.55178684", "0.55107784", "0.54966754", "0.5487682", "0.5484482", "0.54606", "0.5449274", "0.54461175", "0.5439708", "0.5426469", "0.5394359", "0.5389154", "0.5385805", "0.5377512", "0.5362425", "0.5355676", "0.53545463", "0.53463733", "0.5344294", "0.53429806", "0.53356254", "0.5334129", "0.53283143", "0.5326396", "0.5325201", "0.53216046", "0.53200513", "0.53158796", "0.5309744", "0.53040105", "0.5295492", "0.52894413", "0.52880776", "0.5284959", "0.52703226", "0.52634996", "0.52608263", "0.52608263", "0.52608263", "0.52608263", "0.52608263", "0.52581793", "0.525772", "0.52546036", "0.52492434", "0.5238589", "0.52295965", "0.5216917", "0.52152836", "0.52152836", "0.5214488", "0.521053", "0.5210311", "0.52076733", "0.5207069", "0.5206256", "0.5205519", "0.5201156", "0.51874334", "0.51843345", "0.51811826" ]
0.7658923
0
The redshift for this distance assuming its physical distance is a luminosity distance.
def compute_z(self, cosmology=None): from ..cosmology import luminosity_distance from scipy import optimize # FIXME: array: need to make this calculation more vector-friendly f = lambda z, d, cos: (luminosity_distance(z, cos).value - d) ** 2 return optimize.brent(f, (self.Mpc, cosmology))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def luminosity_distance(self, z):\n return self.proper_distance(z) * (1 + z)", "def luminosity_distance(self, z):\n da = self.angular_diameter_distance(z)\n dl = da*(1.+z)**2.\n return(dl)", "def get_luminosity(self):\n\n if self.no_dist is False and self.no_flux is False:\n\n dist = self.distance\n snu = self.snu_at_1GHz\n lum = lumin(dist, snu)\n\n self.lum = lum\n else:\n self.lum = -1 # use -1 to indicate unknown luminosity\n return self.lum", "def luminosity_function(abs_mag, redshift):\n\n # L/L_*(z) = 10**(0.4 * (M_*(z) - M))\n L_L_star = 10 ** (0.4 * (m_star(redshift) - abs_mag))\n\n # Phi*(z) = 10**(log(Phi*(z))\n phi_star = 10 ** log_phi_star(redshift) * (cosmo.h / u.Mpc) ** 3\n\n # QLF slopes\n alpha1 = -3.35 # alpha in Table 2\n alpha2 = -0.37 # beta in Table 2\n\n Phi = 0.4 * np.log(10) * L_L_star * phi_star * (L_L_star ** -alpha1 + L_L_star ** -alpha2) ** -1\n\n return Phi", "def lidar_relative(self):\n return self.distance", "def luminance(self):\n \n return (self.r + self.g + self.b) // 3", "def doppler_redshift():\n rv_unit = si.km / si.s\n C_KMS = _si.c.to_value(rv_unit)\n\n def convert_z_to_rv(z):\n zponesq = (1 + z) ** 2\n return C_KMS * (zponesq - 1) / (zponesq + 1)\n\n def convert_rv_to_z(rv):\n beta = rv / C_KMS\n return np.sqrt((1 + beta) / (1 - beta)) - 1\n\n return Equivalency(\n [(dimensionless_unscaled, rv_unit, convert_z_to_rv, convert_rv_to_z)],\n \"doppler_redshift\",\n )", "def Luminosity_Distance(self, z):\n dl = (1 + z) * self.Comoving_Distance(z)\n return dl", "def get_luminosity(self):\n\n h, l, s = colorsys.rgb_to_hls(self.r, self.g, self.b)\n return l", "def norm(self):\n return self._color_mapper", "def Luminosity_Distance_dimless(self, z):\n dl = (1 + z) * self.Comoving_Distance(z) / (2. * self.Cd1)\n return dl", "def LuminosityDistance_to_z(self, ld):\n return self.Ld2z(ld)", "def diffuse_light(self):\n return self._diffuse_light", "def compute_effective_redshift(cat):\n # the total weight\n total_weight = cat['Weight']*cat['FKPWeight']\n\n # effective redshift\n zeff = (total_weight*cat['Z']).sum() / total_weight.sum()\n\n return cat.compute(zeff)", "def get_palace_diagonal_red(self):\n\n return self._palace_diagonal_red", "def redshift(self, a = 1.):\n return 1./a-1.", "def redshift(self) -> Optional[pulumi.Input['FlowDestinationFlowConfigDestinationConnectorPropertiesRedshiftArgs']]:\n return pulumi.get(self, \"redshift\")", "def redshift_type(line, RA, DEC, un):\n try:\n result_table = Ned.get_table(line[\"Object Name\"], table=\"redshifts\")\n except:\n return None\n if any(result_table[\"Published Redshift Uncertainty\"] < un):\n return line[RA, DEC, \"Redshift\"]\n else:\n return None", "def distmod(self):\n if self._distmod is not None:\n return self._distmod\n dm = ztodm(self.redshift, self.cosmo)\n self._distmod = np.asarray(dm, dtype=FTYPE)\n return self._distmod", "def lidar_absolute(self):\n return np.roll(self.distance, self.heading_idx)", "def get_drainage_data(self):\n im = self.result\n sizes = sp.unique(im)\n R = []\n Snwp = []\n Vp = sp.sum(im > 0)\n for r in sizes[1:]:\n R.append(r)\n Snwp.append(sp.sum(im >= r))\n Snwp = [s/Vp for s in Snwp]\n data = namedtuple('xy_data', ('radius', 'saturation'))\n return data(R, Snwp)", "def r_d(self, tl):\n\t return self.RD0*exp(self.HKR/(R*self.TO)*(1. - self.TO/tl))", "def whatsgreen2(image):\n green = image.hueDistance(color= Color('green'), minvalue=40).binarize()\n return green", "def average_luminosity(self, delta=1e-10):\n cumsum = 0.0\n for pix in self.pixels:\n cumsum += math.log10(delta + pix.luminosity())\n\n return math.pow(10, cumsum / len(self.pixels))", "def scale_factor(redshift):\n\n a = (1 + redshift)**-1.0\n return a", "def r(self):\n return self.tlH / (self.spanW / 2)", "def _check_redshift(self, red):\n if np.min(np.abs(red - self.zout)) > 0.01:\n return 0\n return 1", "def get_lthr_raw_data(self):\n temp = self._data.copy()\n temp[temp < self._view_min] = 0\n return np.rot90(temp, 3)", "def lla(self):\n return self._lla_shape", "def rond(self):\n return self._rond.get_waarde()", "def get_on_resistance(self):\n is_nchannel = True\n stack = 4\n is_cell = False\n return self.tr_r_on(self.nmos_width, is_nchannel, stack, is_cell)", "def get_R(self):\n return self.R_min * tf.exp(self.R_ * self.log_R_range)", "def scaling_factor_between_redshifts_from(\r\n self, redshift_0: float, redshift_1: float, redshift_final: float\r\n ) -> float:\r\n angular_diameter_distance_between_redshifts_0_and_1 = (\r\n self.angular_diameter_distance_z1z2(z1=redshift_0, z2=redshift_1)\r\n .to(\"kpc\")\r\n .value\r\n )\r\n\r\n angular_diameter_distance_to_redshift_final = (\r\n self.angular_diameter_distance(z=redshift_final).to(\"kpc\").value\r\n )\r\n\r\n angular_diameter_distance_of_redshift_1_to_earth = (\r\n self.angular_diameter_distance(z=redshift_1).to(\"kpc\").value\r\n )\r\n\r\n angular_diameter_distance_between_redshift_1_and_final = (\r\n self.angular_diameter_distance_z1z2(z1=redshift_0, z2=redshift_final)\r\n .to(\"kpc\")\r\n .value\r\n )\r\n\r\n return (\r\n angular_diameter_distance_between_redshifts_0_and_1\r\n * angular_diameter_distance_to_redshift_final\r\n ) / (\r\n angular_diameter_distance_of_redshift_1_to_earth\r\n * angular_diameter_distance_between_redshift_1_and_final\r\n )", "def lsd(self):\n return self._lsd", "def color_temp(self):\n return kelvin_to_mired(self._color_temp)", "def get_wavelet_radiant(row, wavelet_df):\n\twavelet_row = wavelet_df[wavelet_df.solar == row['solar']//1]\n\tif len(wavelet_row.index):\n\t\trow['radiant_ll0'] = wavelet_row.ll0.values[0]\n\t\trow['radiant_beta'] = wavelet_row.beta.values[0]\n\t\treturn row\n\telse:\n\t\tpeak_row = wavelet_df.iloc[wavelet_df.xsig.idxmax()]\n\t\trow['radiant_ll0'] = peak_row.ll0\n\t\trow['radiant_beta'] = peak_row.beta\n\t\treturn row", "def _ul_lr(self):\n ulx, xres, xskew, uly, yskew, yres = self.geotransform\n # Index from the end - GDal usually orders bands-first:\n lrx = ulx + (self.array.shape[-2] * xres)\n lry = uly + (self.array.shape[-1] * yres)\n return ulx, uly, lrx, lry", "def l1_rel_metric(depth_prediction: torch.Tensor, depth_gt: torch.Tensor, roi=None, max_distance=None):\n depth_prediction, depth_gt = preprocess_roi(depth_prediction, depth_gt, roi)\n depth_prediction, depth_gt = get_positive_depth(depth_prediction, depth_gt)\n depth_prediction, depth_gt = get_absolute_depth(depth_prediction, depth_gt, max_distance)\n\n return torch.mean(torch.abs(depth_prediction - depth_gt) / depth_gt)", "def vel2redshift(cat, col):\n try:\n cat[col].unit.to(u.m / u.s)\n cat.replace_column(col, cat[col] / const.c.to(cat[col].unit))\n cat[col].info.description = (\n cat[col].info.description + \", converted to redshift\"\n )\n cat[col].unit = u.dimensionless_unscaled\n except:\n pass\n return cat", "def r_dc(self, phi, tl):\n\t return self.r_d(tl)*(1. - exp(-phi))", "def GetLuminance(self):\n return _itkRGBAPixelPython.itkRGBAPixelUS_GetLuminance(self)", "def get_bandpass(self):\n return self.sum(axis=1)", "def rsdl(self):\n\n if self.opt['Monotone'] and self.k > 0:\n return np.linalg.norm((self.X - self.Y).ravel())\n return np.linalg.norm((self.X - self.Yprv).ravel())", "def GetRed(self):\n return _itkRGBAPixelPython.itkRGBAPixelUS_GetRed(self)", "def randomize_redshift(self):\n new_table = self.copy()\n random.shuffle(new_table['cdf_z'])\n\n return new_table", "def crd(self):\r\n return self.__trajectory[0]", "def get_scaled_distance(self, r):\n return (1. - torch.exp(-self.kappa * r))/self.kappa", "def to_tlwh(self):\n ret = self.mean[:4].copy()\n ret[2] -= ret[0]\n ret[3] -= ret[1]\n return ret", "def r_dc(self, phi, tl):\n\t # return self.r_d(tl)*(1. - exp(-phi))\n\t return 0.", "def get_luminosity(name):\n all_data = mc.get('sensor_values')\n name = _lookup(name)\n try:\n return all_data[name][3]\n except KeyError:\n raise KeyError(\"No sensor with that name\")", "def _value_as_luminance(self):\n return round(float(self._value), 1)", "def calculate_redshift_related_params(max_redshift=10.0, max_redshift_detection=1.0, redshift_step=0.001, z_first_SF = 10.0):\n # create a list of redshifts and record lengths\n redshifts = np.arange(0, max_redshift + redshift_step, redshift_step)\n n_redshifts_detection = int(max_redshift_detection / redshift_step)\n\n # convert redshifts to times and ensure all times are in Myr\n times = cosmology.age(redshifts).to(u.Myr).value\n\n # and time of first Sf\n time_first_SF = cosmology.age(z_first_SF).to(u.Myr).value\n\n # convert redshifts to distances and ensure all distances are in Mpc (also avoid D=0 because division by 0)\n distances = cosmology.luminosity_distance(redshifts).to(u.Mpc).value\n distances[0] = 0.001\n\n # convert redshifts to volumnes and ensure all volumes are in Gpc^3\n volumes = cosmology.comoving_volume(redshifts).to(u.Gpc**3).value\n\n # split volumes into shells and duplicate last shell to keep same length\n shell_volumes = np.diff(volumes)\n shell_volumes = np.append(shell_volumes, shell_volumes[-1])\n\n return redshifts, n_redshifts_detection, times, time_first_SF, distances, shell_volumes", "def compute_wall_distance(self):\n phi = sp.ones(self.image_red.shape)\n if (len(self.mask_id[0])>0):\n phi[self.mask_id] = 0\n self.wall_distance = skfmm.distance(phi, dx=self.pixel_size)\n grad = sp.gradient(self.wall_distance,edge_order=2)\n grad_X = grad[1]/self.pixel_size\n grad_Y = grad[0]/self.pixel_size\n norm = sp.sqrt(grad_X**2+grad_Y**2)\n norm = (norm>0)*norm+(norm==0)*0.001\n self.wall_grad_X = grad_X/norm\n self.wall_grad_Y = grad_Y/norm\n else:\n self.wall_distance = 1.0e99*sp.ones(self.image_red.shape)", "def GetLuminance(self):\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetLuminance(self)", "def index_of_refraction(self):\n return self.microsphere.index_of_refraction(self.wavelength)", "def getLightSensor() -> int:\n pass", "def lightness(self):\n min_component = min(self.red, self.green, self.blue)\n max_component = max(self.red, self.green, self.blue)\n avg = (max_component + min_component) / 2\n light = avg / 255\n return light", "def luminance(self, color):\n return 0.2426 * color[2] + 0.7152 * color[1] + 0.0722 * color[0]", "def __repr__(self):\n return (\n f'GalaxyCluster {self.unique_id}: '\n f'(ra={self.ra}, dec={self.dec}) at z={self.z}'\n f'\\n> with columns: {self._str_colnames()}'\n f'\\n> {len(self.galcat)} source galaxies'\n )", "def GetRed(self):\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetRed(self)", "def specular_light(self):\n return self._specular_light", "def asRM(self):\n return self.asDCM().T", "def distance_modulus(self, z):\n return(5.0*np.log10(self.luminosity_distance(z))+25.0)", "def determine_direction(self):\n # TODO: Implement when the format from the sensob is ready\n content = self.sensobs[0].content\n size = len(content)\n redCount = [0,0]\n for i in range(size):\n if i<=size/2:\n if content[i] == \"Red\":\n redCount[0]+=1\n if i>size/2:\n if content[i] == \"Red\":\n redCount[1]+=1\n if redCount[0]> redCount[1]:\n return self.LEFT\n elif redCount[0] < redCount[1]:\n return self.RIGHT\n else:\n #Same amount of red on both sides\n self.match_degree = 0.1\n return self.LEFT", "def wind_shear(self):\n return self.flow_field.wind_shear", "def line_lum(line_flux, dist): \n line_lum = 4 * pi * (dist*u.pc)**2 * line_flux * u.erg / (u.s * (u.cm)**2)\n line_lum = line_lum.decompose().to(u.W)\n return line_lum/u.W", "def generate_lut(self):\n colormap = self.get_colormap()\n\n if self.test:\n self.print_colormap(self.name, colormap)\n\n if self.centered:\n return self.generate_spi3d_from_colormap(colormap, centered=True)\n else:\n return self.generate_spi3d_from_colormap(colormap, centered=False)", "def to_index(self, r):\n lower = self.trainset.rating_scale[0]\n return int(r) - lower", "def lphot(self):\n return self._get_mean_and_samples_attribute('lphot')", "def llh(self):\n return Station._ellipsoid.geodetic(self.xyz())", "def red(self) -> float:\n return self._red", "def Luminosity(self):\n try:\n L = (self.E*self.Weight).sum()\n N = self.E.count()\n except:\n L = self.E.sum()\n N = self.E.count()\n return L, L/np.sqrt(N)", "def diffuse(self) -> float:\n return self.GetDiffuse()", "def tlwh(self):\n if self.mean is None: # no kalman filter\n return self._tlwh.copy()\n else: # with a kalman filter\n ret = self.mean[:4].copy() # cx, cy, w/h, h\n ret[2] *= ret[3] # cx, cy, w, h\n ret[:2] -= ret[2:] / 2 # x1, y1, w, h\n return ret", "def r(self) -> float:\n return self._ohms.real", "def clamped_rgb_r(self):\r\n\r\n return self._clamp_rgb_coordinate(self.rgb_r)", "def get_hue(name):\n all_data = mc.get('sensor_values')\n name = _lookup(name)\n try:\n r = all_data[name][0]\n g = all_data[name][1]\n b = all_data[name][2]\n denom = max(r,g,b) - min(r,g,b)\n if r > g and r > b:\n return (g - b)/denom\n elif g > r and g > b:\n return 2.0 + (b - r)/denom\n else:\n return 4.0 + (r - g)/denom\n except KeyError:\n raise KeyError(\"No Sensor with that name\")", "def distance_image(self):\n return exclusion_distance(self.mask)", "def get_der_scaled_distance(self, r, dr):\n return dr * torch.exp(-self.kappa * r.unsqueeze(1))", "def dloglam(self):\n # This number was determined using the resolution and sampling quoted on the FIRE website\n R = 6000.0 * 2.7\n dloglam = 1.0 / R / np.log(10.0)\n return dloglam", "def single_slit_diffraction_intensity(slit_width, wavelength, screen_distance, X):\n return ((np.sin((np.pi * slit_width * X) / (wavelength * screen_distance))) / (\n (np.pi * slit_width * X) / (wavelength * screen_distance))) ** 2", "def power(self):\n return irradiance_on_plane(self.vnorm, self.h,\n self.date, self.lat) * self.s * self.eff", "def to_tlwh(self):\n ret = self.mean[:4].copy()\n ret[2] *= ret[3]\n ret[:2] -= ret[2:] / 2\n return ret", "def potential_color(self):\n\n return (1., 1., 0.)", "def lam(self):\n return self.get_ratings().sum(axis=2)", "def colorDistance(self, color = (0, 0, 0)):\n return spsd.cdist(self.meanColor(), [color])[:,0]", "def rrint(self):\n if len(self.data.peaks):\n return (np.diff(self.data._masked) / self.data.fs).compressed()", "def red_channel(img):\n\n red = np.zeros(img.shape,dtype=float)\n\n red[:,:,2] = np.copy(img[:,:,2])\n\n return red", "def _get_distance(self, target): \r\n sensor_transform = self._sensors['rgb_front'].get_transform()\r\n\r\n distance = np.sqrt(\r\n (sensor_transform.location.x - target.x) ** 2 +\r\n (sensor_transform.location.y - target.y) ** 2 +\r\n (sensor_transform.location.z - target.z) ** 2)\r\n\r\n return distance", "def calcDist(self):\n rhoOp = self.rhoOp\n s = np.array([[1,0,0],[0,-1,0],[0,0,1]])\n sAdj = s.conj().T \n symRhoOp = np.dot(s,np.dot(rhoOp,sAdj))\n self.dist = Node.S1(rhoOp, symRhoOp)", "def get_index(self):\n return (np.sqrt(self.dielectric))", "def luminosity(r,T,autoDebug=True):\n\t#-----------BEGIN ERROR CHECKING----------\n\tif autoDebug:\n\t\tsam.type_check(r, sam.TYPES_math, \"r\")\n\t\tsam.type_check(T, sam.TYPES_math, \"T\")\n\t\tsam.value_check(r,.0,\">\",\"r\")\n\t\tsam.value_check(T,.0,\">\",\"T\")\n\t#-----------END ERROR CHECKING----------\n\n\tL = 4 * sam.CONSTANT_pi * r**2 * sam.CONSTANT_SB* T**4\n\treturn L", "def mrdc(self) -> str:\n return self._device_info[\"MRDC\"]", "def diffuse_coefficient(self):\n return self._diffuse_coefficient", "def add_lego_colors(df, color_df):\n # Can't use uint8 for variance, numbers become too large. \n df[['R', 'G', 'B']] = df[['R', 'G', 'B']].astype('int')\n\n # Determine which lego color is closest (Euclidean distance) to the image color.\n cmask = ((color_df.c_Palette2016==True) & (color_df.c_Transparent==False) \n & (color_df.c_Glow==False) & (color_df.c_Metallic==False))\n for index, row in color_df[cmask].iterrows():\n if index == color_df.index[0]:\n df['cvar_min'] = (df.R-row.R)**2 + (df.G-row.G)**2 + (df.B-row.B)**2\n df['R_lego'] = row.R\n df['G_lego'] = row.G\n df['B_lego'] = row.B\n df['color'] = row.Color\n else:\n df['cvar'] = (df.R-row.R)**2 + (df.G-row.G)**2 + (df.B-row.B)**2\n mask = df.cvar < df.cvar_min\n df.loc[mask, 'cvar_min'] = df.loc[mask, 'cvar']\n df.loc[mask, 'R_lego'] = row.R\n df.loc[mask, 'G_lego'] = row.G\n df.loc[mask, 'B_lego'] = row.B\n df.loc[mask, 'color'] = row.Color\n\n # Drop helper columns we no longer need\n df.drop(columns=['cvar', 'cvar_min'], inplace=True)\n return df", "def rms_db(self):\n mean_square = np.mean(self._samples ** 2, axis=0)\n return 10 * np.log10(mean_square)", "def compute_luminosity(red, green, blue):\r\n return (0.299 * red) + (0.587 * green) + (0.114 * blue)", "def get_saturation(self):\n cycles = 256 - self.read_byte_data(APDS_9960.ALS_ATIME_REG_ADDRESS)\n return min(65535, cycles * 1025)", "def all_distances(self):\n points = self.color_lookup_table_points\n\n red = np.repeat(np.expand_dims(points[0], axis=0), points[0].size, axis=0)\n green = np.repeat(np.expand_dims(points[1], axis=0), points[1].size, axis=0)\n blue = np.repeat(np.expand_dims(points[2], axis=0), points[2].size, axis=0)\n\n self.distances = np.sqrt(\n np.square(red - red.transpose())\n + np.square(green - green.transpose())\n + np.square(blue - blue.transpose()))", "def reduce_recurrents(h_prev):\n return a_reduce_recurrents(conv2d(h_prev, self.W_red_rec))", "def _get_hdr_dist_for_crispor(row):\n if 'hdr_dist' not in row.dtype.names:\n return None\n else:\n return int(row['hdr_dist']) * (\n 1 if row['target_loc'].strand == '+' else -1)" ]
[ "0.5890186", "0.58678305", "0.5807113", "0.5798565", "0.56901914", "0.5677453", "0.5615042", "0.5581174", "0.5569656", "0.5483461", "0.5445089", "0.52822375", "0.5251608", "0.5222585", "0.5203338", "0.5182899", "0.5171239", "0.5140096", "0.5124801", "0.51096433", "0.510625", "0.51029944", "0.5087992", "0.5087818", "0.5074232", "0.50504804", "0.50445956", "0.5027613", "0.50231045", "0.5017681", "0.50164145", "0.49950892", "0.49788114", "0.49098498", "0.4903835", "0.48942167", "0.48906112", "0.48592177", "0.48582166", "0.4851053", "0.4848842", "0.48455578", "0.48438588", "0.48406217", "0.48398632", "0.48245868", "0.48207164", "0.47972718", "0.47894686", "0.47832555", "0.4779168", "0.4771723", "0.47619987", "0.47551763", "0.47484148", "0.4741946", "0.47407982", "0.47325048", "0.47269326", "0.47212315", "0.47185174", "0.47151628", "0.47124612", "0.46979147", "0.4696633", "0.469654", "0.46949205", "0.469391", "0.46916693", "0.46873632", "0.46859753", "0.46813107", "0.4673621", "0.4673094", "0.46712503", "0.46692136", "0.4669125", "0.46651074", "0.46639922", "0.46632788", "0.46622783", "0.46620694", "0.46587884", "0.4658326", "0.46564546", "0.46558464", "0.4654359", "0.46533284", "0.46520647", "0.4650026", "0.46496993", "0.4645218", "0.46368203", "0.46350512", "0.46321923", "0.46317652", "0.462903", "0.4625041", "0.4622425", "0.46216193", "0.46202108" ]
0.0
-1
The distance modulus of this distance as a Quantity
def distmod(self): val = 5. * np.log10(self.to(u.pc).value) - 5. return u.Quantity(val, u.mag)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distmod(self):\n val = 5.0 * np.log10(self.to_value(u.pc)) - 5.0\n return u.Quantity(val, u.mag, copy=False)", "def modulus(self):\n return math.sqrt(self._reNum ** 2 + self._imNum ** 2)", "def modulus(self):\n return self._n", "def distance_modulus(d):\n dmag = 5 * log(d) - 5\n return dmag", "def distance_modulus(self):\n return 5*np.log10(self.parallax.to(u.pc, u.parallax())/10*u.pc)", "def distance(self):\n return self.value * len(self.alignment.query)", "def get_cosmologicalDistanceModulus(self):\n return numpy.zeros(len(self.column_by_name('galid')))", "def mod(self):\n p = self.end - self.start\n return p.mod()", "def bulk_modulus():\n\n return 10000.0", "def chem_pot_shift(self) -> float:\n return (self.trans_free_energy + self.rot_free_energy +\n self.vib_free_energy + self.spin_free_energy) / self.n_atoms", "def distance_modulus(self, z):\n return(5.0*np.log10(self.luminosity_distance(z))+25.0)", "def __pow__(self, exponent):\n return Quantity(pow(self._value, exponent), pow(self.unit, exponent))", "def __pos__(self):\n return Quantity(+(self._value), self.unit)", "def multiplier(self) -> global___Expression:", "def __mod__(self, other):\n return MyCustomNumber(self.value % other.value)", "def __divmod__(self, other):\r\n other = self._coerce(other)\r\n if other is NotImplemented:\r\n return NotImplemented\r\n\r\n r = runtime.mod(self, other)\r\n q = (self - r) * runtime.reciprocal(other)\r\n return q * 2**self.frac_length, r", "def mod(self, *_) -> 'PFElement':\n return self.zero", "def __mod__(self, other):\n return (self - other) + (other - self)", "def distmod(self):\n if self._distmod is not None:\n return self._distmod\n dm = ztodm(self.redshift, self.cosmo)\n self._distmod = np.asarray(dm, dtype=FTYPE)\n return self._distmod", "def _get_mutation_amount(self):\n return self._get_sign() * self._get_number()", "def distmeter(self):\n return self._distance.to(\"m\").value", "def inner_rad(self) -> Quantity:\n return self._inner_rad", "def manhatam_distance(self) -> int:\n raise NotImplementedError", "def degree(self):\n return sum(self)", "def d_i(self,q):\n di = np.roll(q,-1,axis=-1) - q\n return di", "def d_i(self,q):\n di = q.shift(x=-1) - q\n return di", "def testCatalogDistanceModulus(self):\n dbObj = myTestGals(database=self.dbName)\n cosmoCat = cosmologicalGalaxyCatalog(dbObj)\n controlCat = absoluteGalaxyCatalog(dbObj)\n cosmoIter = cosmoCat.iter_catalog(chunk_size=self.dbSize)\n controlIter = controlCat.iter_catalog(chunk_size=self.dbSize)\n\n cosmology = CosmologyObject()\n\n for (cosmoRow, controlRow) in zip(cosmoIter, controlIter):\n modulus = cosmology.distanceModulus(controlRow[25])\n self.assertEqual(cosmoRow[0], controlRow[0])\n self.assertEqual(cosmoRow[25], controlRow[25])\n self.assertEqual(cosmoRow[26], modulus)\n for i in range(1,25):\n self.assertAlmostEqual(cosmoRow[i], controlRow[i] + modulus, 6)", "def initial_shear_modulus(self):\n return self.c1 * self.c2", "def numerator(self):\n return +self", "def mass(self):\n\t\treturn self.volume*self.density", "def distance(self) -> int:\n return 0", "def distmpc(self):\n return self._distance.to(\"Mpc\").value", "def math(self):\n return self.__math", "def energy_shift(self) -> float:\n return (self.chem_pot_shift +\n self.zero_point_vibrational_energy / self.n_atoms)", "def __truediv__(self, other):\n if is_unit(other):\n # print \"quantity / unit\"\n return self * pow(other, -1.0)\n # unit = self.unit / other\n # return Quantity(self._value, unit).reduce_unit(self.unit)\n elif is_quantity(other):\n # print \"quantity / quantity\"\n # Delegate quantity/quantity to (quantity/scalar)/unit\n return (self/other._value) / other.unit\n else:\n # print \"quantity / scalar\"\n return self * pow(other, -1.0)\n # return Quantity(self._value / other, self.unit)", "def __divmod__(self, other: 'SInt') -> 'SInt':\r\n if type(self) != type(other):\r\n raise TypeError(\"Wrong type or length for other\")\r\n\r\n size = max(self.nbBytes, other.nbBytes)\r\n Divid, Divis = abs(self).cast(size), abs(other).cast(size)\r\n Quotient = SInt(size)\r\n one = SInt(size)\r\n one.binaire = '0' * (size * 8 - 1) + '1'\r\n Quotient.binaire = '0' * 8 * size\r\n while Divis < Divid or Divis == Divid:\r\n Quotient += one\r\n Divid -= Divis\r\n # Here, the remain is the dividende\r\n Remainer = Divid\r\n if self.signe != other.signe: # Problems occur only with different signs\r\n if Remainer.valeur() == 0: # When abs(a) % abs(b) == 0, there is specific instructions\r\n Quotient = -Quotient\r\n else:\r\n Quotient = -(Quotient + one)\r\n # ---------------------------------------------------------------------------------\r\n Remainer = ((self.cast(2 * size) - (Quotient * other)) << size * 8).cast(size)\r\n # ---------------------------------------------------------------------------------\r\n if self.signe == other.signe == '1':\r\n Remainer = - Remainer\r\n return Quotient, Remainer", "def __mul__(self, other):\n if isinstance(other, EncryptedNumber):\n raise NotImplementedError('Good luck with that...')\n if other < 0:\n other = other + self.public_key.n\n product = self._raw_mul(other)\n\n return EncryptedNumber(self.public_key, product)", "def get_distance(self, star):\n if self == star:\n return 0\n\n a_car = self.get_cartesian_coords()\n b_car = star.get_cartesian_coords()\n dab = math.degrees(math.acos(a_car[0] * b_car[0] +\n a_car[1] * b_car[1] +\n a_car[2] * b_car[2]))\n return dab", "def initial_shear_modulus(self):\n return 2.0 * (self.c1 + self.c2)", "def __mod__(self, i):\n return asarray(mod(self, i))", "def distance(self):\n return self._distance", "def __abs__(self):\n return Quantity(abs(self._value), self.unit)", "def get_k(self, modulus):\n\n\t\treturn (np.pi*self.z_thick*np.power(self.thick, 3.0)*modulus) \\\n\t\t\t\t/(6.0*self.length)", "def attraction(self, other: Body) -> Vector:\n dist = self.position - other.position\n dist_modsq = dist.lensq\n dist_unit = dist / math.sqrt(dist_modsq) # Unit vector\n G = 6.674384e-11\n force_mod = G * self.mass * other.mass / dist_modsq\n return dist_unit * force_mod", "def __itruediv__(self, scalar):\n return self.div_(scalar)", "def _qod_func(self, q):\n if self.qodulus is None:\n return q\n else:\n return q % self.qodulus", "def __mod__(self, other: 'SInt') -> 'SInt':\r\n return self.__divmod__(other)[1]", "def sommerfeld_number(self):\n modified_s = self.modified_sommerfeld_number()\n return (modified_s / np.pi) * (self.radius_stator * 2 / self.length) ** 2", "def distance(self):\n return Distance(length_of(self.position.au))", "def __mul__(self, dist):\n return CombinedDistribution(self, dist, mul)", "def modulation(minima, contrast, distance):\n \n numerator = contrast - minima\n denominator = contrast + minima\n \n return numerator / denominator", "def multiplier(self) :\n\t\ttry :\n\t\t\treturn self._multiplier\n\t\texcept Exception as e:\n\t\t\traise e", "def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre", "def length(self):\r\n\r\n return math.sqrt(self*self)", "def Dvec(self):\n return vec(-self.distance)", "def __mod__(self, other):\r\n T = type(other)\r\n # vec4%scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return vec4(self.x%other, self.y%other, self.z%other, self.w%other)\r\n # unsupported\r\n else:\r\n raise TypeError, \"unsupported operand type for %\"", "def manhatam_distance(self) -> int:\n return abs(self.position[0]) + abs(self.position[1])", "def __pow__(self, other):\n return MyCustomNumber(self.value ** other.value)", "def lidar_relative(self):\n return self.distance", "def mod(p):\n return (p[0]**2 + p[1]**2 + p[2]**2)**0.5", "def distance_factor(self):\n return self._distancefactor", "def modulus(vect):\n return np.sqrt(vect[0]**2 + vect[1]**2 + vect[2]**2)", "def __pow__(self, n): \n\n if n > 0:\n pow = self.clone()\n for i in range(1, n):\n pow *= self\n elif n == 0:\n return moeb_id\n else:\n pow = self.clone().inv()\n inv = self.inv().clone()\n for i in range(1, - n):\n pow *= inv\n\n return pow", "def __pow__(self, ???):", "def value(self):\n return self._adj_per_deg * self.temp * self.n_atoms", "def dist(self):\n return self._dist", "def sym_distance(cls, q0, q1):\n q = Quaternion.sym_log_map(q0, q1)\n return q.norm", "def __mod__(self, other):\r\n T = type(other)\r\n # mat4%scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return mat4(map(lambda x,other=other: x%other, self.mlist))\r\n # unsupported\r\n else:\r\n raise TypeError, \"unsupported operand type for %\"", "def multiply(self):\n return self._do_calc(self.multiplier)", "def multiply(self):\n return self._do_calc(self.multiplier)", "def multiply(self):\n return self._do_calc(self.multiplier)", "def proper_annulus_centres(self) -> Quantity:\n return self._proper_ann_centres", "def get_index(self):\n return (np.sqrt(self.dielectric))", "def n(self):\n return self._nx * self._ny", "def __div__(self, other):\n return self.__mul__(1 / other)", "def __div__(self, other):\n return self.__mul__(1 / other)", "def __mod__( self, value ):\r\n\t\tif ( type( value ) == type( self ) ):\r\n\t\t\treturnvalue = fraction( self )\r\n\t\t\tif ( returnvalue < 0 ):\r\n\t\t\t\twhile ( returnvalue < -value ): returnvalue += value\r\n\t\t\telse:\r\n\t\t\t\twhile ( returnvalue > value ): returnvlaue -= value\r\n\t\t\treturn returnvalue\r\n\t\telif ( type( value ) in ( types.IntType, types.LongType ) ):\r\n\t\t\treturn fraction( self.numerator % ( value * self.denominator ), self.denominator )\r\n\t\telif ( type ( value ) == types.FloatType ):\r\n\t\t\treturn float( self ) % value\r\n\t\telse: return NotImplemented", "def _getDistribution(self, dosage, slots = 4):\n dist = []\n base = dosage / slots\n remainder = dosage % slots\n\n for slot in range(slots):\n dist.append(base)\n if slot <= remainder:\n dist[slot] += 1\n\n return dist", "def getDistanceUnits(self) -> Unit:\n ...", "def __abs__(self):\r\n return math.sqrt(self*self)", "def product(self):\n raise NotImplementedError", "def getDistance(self):\n return sqrt(self.state[0] * self.state[0] + self.state[2] * self.state[2])", "def __mul__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Multiply, value)\n return out", "def __mul__(self,other):\n if(self.denominator*other.denominator<0):\n resultnumerator = -1*self.numerator*other.numerator\n resultdenominator = abs(self.denominator*other.denominator) \n else:\n resultnumerator = self.numerator*other.numerator\n resultdenominator = self.denominator*other.denominator \n newvalues = (resultnumerator,resultdenominator)\n return newvalues", "def diffpow(self, x, rot=0):\r\n N = len(x)\r\n if rot:\r\n x = rotate(x)\r\n return sum(np.abs(x)**(2.+4.*np.arange(N)/(N-1.)))**0.5", "def __imod__(self, d_value: float):\n self.set_value(self.get_value() % d_value)\n return self", "def __mul__(self, other):\n\n return self._mul_div(other, div=False)", "def getValue(self):\n return self.left.getValue() ** self.right.getValue()", "def distance(self, *args):\n return _osgAnimation.SwigPyIterator_distance(self, *args)", "def mu(self):\n return self.mass * G", "def quantity(self):\n return self._quantity", "def get_torque(self, theta, modulus):\n\n\t\treturn self.get_k(modulus)*theta", "def get_diameter(self) -> float:\r\n \r\n return (self.box[3] - self.box[1] + self.box[2] - self.box[0]) / 2", "def distance(self,a):\n return \"((self.x ** 2) + (self.y ** 2)) ** 0.5 \" + a", "def commutator(self, other) -> 'MultiVector':\n\n return ((self * other) - (other * self)) / 2", "def __pow__(self, other) -> 'MultiVector':\n\n if not isinstance(other, (int, float)):\n raise ValueError(\"exponent must be a Python int or float\")\n\n if abs(round(other) - other) > _eps:\n raise ValueError(\"exponent must have no fractional part\")\n\n other = int(round(other))\n\n if other == 0:\n unit_out = self._newMV(dtype=self.value.dtype) + 1\n return unit_out\n\n newMV = self._newMV(np.array(self.value)) # copy\n\n for i in range(1, other):\n newMV = newMV * self\n\n return newMV", "def __ipow__(self, d_value: float):\n self.set_value(self.get_value() ** d_value)\n return self", "def denom(self, a):\n return self.one", "def _dist(a, b):\n return torch.pow(a - b, 2).sum(-1)", "def __rdivmod__(self, other):\r\n return NotImplemented" ]
[ "0.7152383", "0.66799486", "0.65983427", "0.6582863", "0.6300369", "0.6256586", "0.6187268", "0.61243665", "0.6103785", "0.606291", "0.5966888", "0.58380646", "0.58076847", "0.57429963", "0.5727226", "0.56864965", "0.5662907", "0.565712", "0.56445813", "0.56345314", "0.56187904", "0.5613681", "0.55986375", "0.55853045", "0.556383", "0.55382466", "0.55359143", "0.55311215", "0.552916", "0.5490716", "0.548507", "0.5450974", "0.5444551", "0.54145706", "0.5406513", "0.5396656", "0.5396498", "0.5384588", "0.53817534", "0.5377274", "0.53589064", "0.5353454", "0.5331204", "0.53285015", "0.53249586", "0.5323691", "0.5318678", "0.5318128", "0.5311746", "0.53021705", "0.5295381", "0.5278856", "0.5270035", "0.5266159", "0.52650946", "0.5264247", "0.5253996", "0.52478886", "0.52435535", "0.5233868", "0.52299047", "0.5224613", "0.5223928", "0.52189404", "0.5203116", "0.51941025", "0.51924396", "0.5192138", "0.5189093", "0.5189093", "0.5189093", "0.5157267", "0.5148904", "0.51478165", "0.51471597", "0.51471597", "0.5146217", "0.51402617", "0.5137236", "0.5134295", "0.5132748", "0.51320446", "0.51318306", "0.5126131", "0.5125078", "0.5123783", "0.5122409", "0.51163673", "0.5111777", "0.5110904", "0.51075727", "0.51052815", "0.51049083", "0.5103781", "0.5101319", "0.5100573", "0.50802034", "0.50780046", "0.5077817", "0.506755" ]
0.69861215
1
Converts to the spherical representation of this point. Returns
def to_spherical(self): return cartesian_to_spherical(self.x, self.y, self.z)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cartesian_to_spherical(self, v):\n x = Vector.x(v)\n y = Vector.y(v)\n z = Vector.z(v)\n r = Vector.length(v)\n phi = atan2(y, x)\n theta = acos(z / r)\n \n return [r, phi, theta]", "def spherical(self, x, y):\n\t\twhile x >= self.planet.width or x < 0 or y >= self.planet.height or y < 0:\n\t\t\t#change x if x is out of boundary\n\t\t\tif x >= self.planet.width:\n\t\t\t\tx -= (self.planet.width)\n\t\t\telif x < 0:\n\t\t\t\tx += (self.planet.width)\n\t\t\t#change y if y is out of boundary\n\t\t\tif y >= self.planet.height:\n\t\t\t\ty -= (self.planet.height)\n\t\t\telif y < 0:\n\t\t\t\ty += (self.planet.height)\n\t\treturn x, y", "def spherical_parameters(self):\n phi_mu_list = []\n theta_mu_list = []\n \n for mu in self.mu_list:\n r, phi, theta = T_cartesian_to_spherical(x=mu[0], y=mu[1], z=mu[2])\n phi_mu_list.append(phi)\n theta_mu_list.append(theta)\n \n return phi_mu_list, theta_mu_list", "def cartesianToSpherical(x=0, y=0, z=0):\n\n hxy = np.hypot(x, y)\n radius = np.hypot(hxy, z)\n altitude = np.arctan2(z, hxy)\n azimuth = np.arctan2(y, x)\n return altitude, azimuth, radius", "def spherical_differential(self):\n r, theta, phi, v_r, v_t, v_p = self.convert_spherical()\n return SphericalDifferential(\n r * u.m,\n theta * u.rad,\n phi * u.rad,\n v_r * u.m / u.s,\n v_t * u.rad / u.s,\n v_p * u.rad / u.s,\n )", "def spherical_differential(self):\n r, theta, phi, v_r, v_t, v_p = self.convert_spherical()\n return SphericalDifferential(\n r * u.m,\n theta * u.rad,\n phi * u.rad,\n v_r * u.m / u.s,\n v_t * u.rad / u.s,\n v_p * u.rad / u.s,\n )", "def cartesian_to_spherical(x, y, z):\n import math\n\n xsq = x ** 2\n ysq = y ** 2\n zsq = z ** 2\n\n r = (xsq + ysq + zsq) ** 0.5\n s = (xsq + ysq) ** 0.5\n\n if np.isscalar(x) and np.isscalar(y) and np.isscalar(z):\n lon = math.atan2(y, x)\n lat = math.atan2(z, s)\n else:\n lon = np.arctan2(y, x)\n lat = np.arctan2(z, s)\n\n return r, lat, lon", "def spherical_function(j, x, y, z):\n theta = np.arccos(z)\n phi = np.arctan2(y, x)\n return angular_function(j, theta, phi)", "def coords_on_spherical_earth(self):\n self.create_3d_coord_on_sphere(on_sphere=True)\n self.df_attributes['coord_x_earth'] = 6371.009 * self.df_attributes['coord_x']\n self.df_attributes['coord_y_earth'] = 6371.009 * self.df_attributes['coord_y']\n self.df_attributes['coord_z_earth'] = 6371.009 * self.df_attributes['coord_z']", "def project_to_sphere(points):\n # for uv, the sphere: r=1, azimuth(phi): 2*pi*u, elevation(theta): 2*pi*v\n # theta is elevation, phi is azimuth\n r, theta, phi = cs.cart2sp(x=points[:, 0], y=points[:, 1], z=points[:, 2])\n # logger.info(f\"number of zero points in r: {np.sum(r==0)}\")\n assert np.sum(r == 0) == 0, \"points contains zeros\"\n points_sphere = points / r.reshape(-1, 1)\n return points_sphere, r, theta, phi\n\n # r, theta, phi = cs.cart2sp(x=1, y=1, z=1)\n\n # # spherical to cartesian\n # x, y, z = cs.sp2cart(r=1, theta=np.pi/4, phi=np.pi/4)\n\n # # cartesian to cylindrical\n # r, phi, z = cs.cart2cyl(x=1, y=1, z=1)", "def __cartesian2spherical(x: float, y: float, z: float) -> Tuple[float, float]:\n if x == 0 and y == 0:\n return 0, np.degrees(np.pi * 0.5 * np.sign(z))\n lat = np.arctan2(z, np.sqrt(x * x + y * y))\n lon = np.arctan2(y, x)\n return np.degrees(lon), np.degrees(lat)", "def xyz_to_spherical(self, xyz: np.ndarray, directions: bool = False) -> np.ndarray:\n if not directions:\n xyz = xyz - self.xyz\n r = np.sqrt(np.sum(xyz ** 2, axis=1))\n azimuth_iso = np.arctan2(xyz[:, 1], xyz[:, 0])\n altitude_iso = np.arccos(xyz[:, 2] / r)\n angles = np.column_stack(\n (\n (90 - (azimuth_iso * 180 / np.pi)) % 360,\n 90 - (altitude_iso * 180 / np.pi),\n )\n )\n if not directions:\n angles = np.column_stack((angles, r))\n return angles", "def unit_to_sphere(v):\n return (math.acos(v[2]), math.atan2(v[1], v[0]))", "def cartesian2spherical(v):\n theta = np.arcsin(v[2]) \n phi = np.arctan2(v[1], v[0])\n \n return [theta, phi]", "def CartesianToSpherical(Cartesian):\n\n # x,y,z -> r,theta,phi\n x = Cartesian[:,0]\n y = Cartesian[:,1]\n z = Cartesian[:,2]\n r = np.sqrt(x*x + y*y + z*z)\n projR = np.sqrt(x*x + y*y)\n theta = np.arccos(z/r)\n phi = np.arctan2(y,x)\n theta[theta<0.] +=2.*np.pi\n \n if (len(Cartesian[0,:])==3):\n Spherical = np.column_stack((r,theta,phi))\n return Spherical\n else:\n # vx,vy,vz -> vr,vtheta,vphi\n vx = Cartesian[:,3]\n vy = Cartesian[:,4]\n vz = Cartesian[:,5]\n vr = (x*vx + y*vy + z*vz)/r\n vt = (z*vr - r*vz)/projR\n vp = r*np.sin(theta)*(vy*x-y*vx)/(projR*projR) \n Spherical = np.column_stack((r,theta,phi,vr,vt,vp))\n return Spherical", "def spherical_to_cartesian(self, r, phi, theta):\n x = r*cos(phi)*sin(theta)\n y = r*sin(phi)*sin(theta)\n z = r*cos(theta)\n \n return Vector(float(x), float(y), float(z))", "def spherical2cartesian(v):\n \n x = np.cos(v[0]) * np.cos(v[1]) \n y = np.cos(v[0]) * np.sin(v[1]) \n z = np.sin(v[0]) \n \n return [x,y,z]", "def cartesian2spherical(vector: tuple[float, float, float]) -> tuple[float, float, float]:\n x, y, z = vector\n r = m.sqrt(x**2 + y**2 + z**2)\n # acos returns the angle in radians between 0 and pi\n theta = m.degrees(m.acos(z / r))\n # atan2 returns the angle in radians between -pi and pi\n phi = m.degrees(m.atan2(y, x))\n # lets ensure the angle in degrees is always between 0 and 360, as SHIELD-HIT12A requires\n if phi < 0.:\n phi += 360.\n return theta, phi, r", "def spherical_to_cartesian(r, lat, lon):\n import math\n\n if np.isscalar(r) and np.isscalar(lat) and np.isscalar(lon):\n x = r * math.cos(lat) * math.cos(lon)\n y = r * math.cos(lat) * math.sin(lon)\n z = r * math.sin(lat)\n else:\n x = r * np.cos(lat) * np.cos(lon)\n y = r * np.cos(lat) * np.sin(lon)\n z = r * np.sin(lat)\n\n return x, y, z", "def cartesian2spherical(coords):\n sphere = np.zeros(coords.shape)\n xy_sq = coords[:, 0]**2 + coords[:, 1]**2\n sphere[:, 0] = np.sqrt(xy_sq + coords[:, 2]**2)\n sphere[:, 1] = np.arctan2(coords[:, 1], coords[:, 0])\n sphere[:, 2] = np.arctan2(np.sqrt(xy_sq), coords[:, 2])\n return sphere", "def cartesian2spherical(cartesian):\n cartesian = np.array(cartesian).squeeze()\n x, y, z = cartesian\n distance = np.linalg.norm(cartesian)\n azimuth = np.arccos(z / distance)\n elevation = np.arctan2(y, x) # Use arctan2 instead of arctan to get proper sign!\n return np.array([distance, azimuth, elevation])", "def cart2spher(x: np.ndarray, y: np.ndarray,\n z: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n indexes = np.where((x == 0) & (y == 0))[0]\n if indexes.size:\n x[indexes] = np.nan\n y[indexes] = np.nan\n lat = np.arctan2(z, np.sqrt(x * x + y * y))\n lon = np.arctan2(y, x)\n if indexes.size:\n lon[indexes] = 0\n lat[indexes] = np.pi * 0.5 * np.sign(z[indexes])\n return np.degrees(lon), np.degrees(lat)", "def to_cartesian(self):\n\n if self.cartesian is None:\n theta = math.radians(self.lat)\n phi = math.radians(self.long)\n x = R_EARTH * math.cos(theta) * math.cos(phi)\n y = R_EARTH * math.cos(theta) * math.sin(phi)\n z = R_EARTH * math.sin(theta)\n self.cartesian = CartesianPoint(x, y, z)\n return self.cartesian", "def spherical2cartesian(spherical):\n spherical = np.array(spherical).squeeze()\n distance, azimuth, elevation = spherical\n x = distance * np.sin(azimuth) * np.cos(elevation)\n y = distance * np.sin(azimuth) * np.sin(elevation)\n z = distance * np.cos(azimuth)\n return np.array([x, y, z])", "def cart2spheric(x, y, z):\n # doesn't compute r because chosen egal to 1\n with np.errstate(all='ignore'):\n theta = np.arccos(z)\n phi = np.arctan2(y, x)\n\n return theta, phi", "def spherical_to_xyz(self, angles: np.ndarray) -> np.ndarray:\n # https://en.wikipedia.org/wiki/Spherical_coordinate_system\n azimuth_iso = (np.pi / 2 - angles[:, 0] * np.pi / 180) % (2 * np.pi)\n altitude_iso = (np.pi / 2 - angles[:, 1] * np.pi / 180) % (2 * np.pi)\n xyz = np.column_stack(\n (\n np.sin(altitude_iso) * np.cos(azimuth_iso),\n np.sin(altitude_iso) * np.sin(azimuth_iso),\n np.cos(altitude_iso),\n )\n )\n if angles.shape[1] > 2:\n xyz *= angles[:, 2:3]\n xyz += self.xyz\n return xyz", "def aspheresurface(self):\n\t\tR = self.coefficients[0]\n\t\ttheta = np.linspace(0, 2*np.pi, 100)\n\t\trho = np.linspace(0, R, 100)\n\t\t[u,r] = np.meshgrid(theta,rho)\n\t\tX = r*cos(u)\n\t\tY = r*sin(u)\n\t\tZ = aspherepolar(self.coefficients,r)\n\t\tfig = plt.figure(figsize=(12, 8), dpi=80)\n\t\tax = fig.gca(projection='3d')\n\t\tsurf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.RdYlGn,\n\t linewidth=0, antialiased=False, alpha = 0.6)\n\t\tplt.show()\n\t\treturn 0", "def toJSON(self):\n (latitude, longitude, altitude_msl) = self.getPosition()\n data = {\n 'latitude': latitude,\n 'longitude': longitude,\n 'altitude_msl': altitude_msl,\n 'sphere_radius': self.sphere_radius\n }\n return data", "def spherical_distances(x, y):\n # Compute the norms of all points, we do NOT check they actually all lie on\n # the same sphere (that's the caller's responsibility).\n \n xn = np.sqrt((x**2).sum(axis=1))\n yn = np.sqrt((y**2).sum(axis=1))\n ang_cos = np.dot(x, y.T)/(xn[:, None]*yn[None, :])\n # Protect against numerical noise giving us cosine values outside the -1,1\n # range, where arccos would return nans.\n ang_cos = np.clip(ang_cos, -1, 1)\n\n return xn[:, None]*np.arccos(ang_cos)", "def polySphericalProjection(*args, imageCenter: Union[List[float, float], bool]=None,\n imageCenterX: Union[float, bool]=0.5, imageCenterY: Union[float,\n bool]=0.5, imageScale: Union[List[float, float], bool]=None,\n imageScaleU: Union[float, bool]=1.0, imageScaleV: Union[float,\n bool]=1.0, projectionCenter: Union[List[float, float, float],\n bool]=None, projectionCenterX: Union[float, bool]=0.0,\n projectionCenterY: Union[float, bool]=0.0, projectionCenterZ:\n Union[float, bool]=0.0, projectionHorizontalSweep: Union[float,\n bool]=0.0, projectionScale: Union[List[float, float], bool]=None,\n projectionScaleU: Union[float, bool]=180.0, projectionScaleV:\n Union[float, bool]=90.0, radius: Union[float, bool]=0.0, rotate:\n Union[List[float, float, float], bool]=None, rotateX: Union[float,\n bool]=0.0, rotateY: Union[float, bool]=0.0, rotateZ: Union[float,\n bool]=0.0, rotationAngle: Union[float, bool]=10.0, seamCorrect:\n bool=True, caching: bool=True, constructionHistory: bool=True,\n createNewMap: bool=True, insertBeforeDeformers: bool=True,\n keepImageRatio: bool=True, mapDirection: AnyStr=\"\", name: AnyStr=\"\",\n nodeState: Union[int, bool]=0, perInstance: bool=True, smartFit:\n bool=True, worldSpace: bool=True, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def spherical_project(x, y, cos_lat, sin_lat,\n celestial_pole_x, celestial_pole_y,\n celestial_cos_lat, celestial_sin_lat, native_pole_x\n ): # pragma: no cover\n right_angle = np.pi / 2\n\n d_lon = x - celestial_pole_x\n if equal_angles(np.abs(celestial_pole_y), right_angle):\n if celestial_pole_y > 0:\n phi = native_pole_x + d_lon + np.pi\n theta = y\n else:\n phi = native_pole_x - d_lon\n theta = -y\n else:\n cos_d_lon = np.cos(d_lon)\n\n phi = native_pole_x + np.arctan2(\n -cos_lat * np.sin(d_lon),\n (sin_lat * celestial_cos_lat)\n - (cos_lat * celestial_sin_lat * cos_d_lon))\n\n theta = asin(\n (sin_lat * celestial_sin_lat)\n + (cos_lat * celestial_cos_lat * cos_d_lon))\n\n phi = np.fmod(phi, two_pi)\n\n return theta, phi", "def getS(self):\n\t\tsValue = math.sqrt((math.pow(self.x,2)) + (math.pow(self.y,2)))/self.radius\n\t\treturn sValue", "def _position_cylindrical2spherical(pos):\n\n rho=pos[:,0]\n theta_cylindrical=pos[:,1]\n z=pos[:,2]\n\n r=np.sqrt(rho**2+z**2)\n theta_spherical=np.arctan2(rho,z)\n phi=theta_cylindrical\n\n return np.dstack((r,theta_spherical,phi))[0]", "def sphericalFlip(points, center, param):\n n = len(points) # total n points\n points[:, 1] *= -1\n points[:, 2] *= -1\n points = points - np.repeat(center, n, axis=0) # Move C to the origin\n normPoints = np.linalg.norm(points, axis=1) # Normed points\n R = np.repeat(max(normPoints) * np.power(30, param), n, axis=0) # Radius of Sphere\n\n flippedPointsTemp = 2 * np.multiply(np.repeat((R - normPoints).reshape(n, 1), len(points[0]), axis=1), points)\n flippedPoints = np.divide(\n flippedPointsTemp, np.repeat(normPoints.reshape(n, 1), len(points[0]), axis=1)\n ) # Apply Equation to get Flipped Points\n flippedPoints += points\n\n return flippedPoints", "def SphericalToCartesian(Spherical):\n\n # r,theta,phi -> x,y,z\n r = Spherical[:,0]\n st = np.sin(Spherical[:,1])\n sp = np.sin(Spherical[:,2])\n ct = np.cos(Spherical[:,1])\n cp = np.cos(Spherical[:,2])\n x = r*st*cp\n y = r*st*sp\n z = r*ct\n\n if (len(Spherical[0,:])==3):\n Cartesian = np.column_stack((x,y,z))\n return Cartesian\n else:\n # vr,vtheta,vphi -> vx,vy,vz\n vr = Spherical[:,3]\n vt = Spherical[:,4]\n vp = Spherical[:,5]\n vx = vr*st*cp - vt*ct*cp - vp*sp\n vy = vr*st*sp + vt*ct*sp + vp*cp\n vz = vr*ct - vt*st\n Cartesian= np.column_stack((x,y,z,vx,vy,vz))\n return Cartesian", "def distance_sphere(self, other):\n if not self.crs == getattr(other, \"crs\", \"EPSG:4326\") == \"EPSG:4326\":\n raise ValueError(\"Only can calculate spherical distance with 'EPSG:4326' crs.\")\n return _binary_op(arctern.ST_DistanceSphere, self, other)", "def get_vertex(self):\n V = circumcenter(self.Cents)\n return V", "def _position_cartesian2spherical(pos):\n\n #save cartesian position of each particle\n x=pos[:,0]\n y=pos[:,1]\n z=pos[:,2]\n\n r=np.sqrt(x**2+y**2+z**2) #radius position of each particle\n\n #define theta and take care of r=0 case\n theta=np.zeros(np.size(x))\n ind_zero=(r == 0.) #is there any point where radius is 0 ?\n theta= np.arccos(z/r) \n theta[ind_zero]=0.\n\n phi=np.arctan2(y,x)\n\n return np.dstack((r,theta,phi))[0]", "def clone(self):\n return _osgAnimation.QuatSphericalLinearChannel_clone(self)", "def sphericalToCartesian(altitude=0, azimuth=0, radius=0):\n\n rcos_theta = radius * np.cos(altitude)\n x = rcos_theta * np.cos(azimuth)\n y = rcos_theta * np.sin(azimuth)\n z = radius * np.sin(altitude)\n return x, y, z", "def CG(self):\n return Sphere(radius=.25,\n position=Point(0, 0, self.cg),\n color='Red')", "def _convert_sky_coords(self):\n parsed_angles = [(x, y)\n for x, y in zip(self.coord[:-1:2], self.coord[1::2])\n if (isinstance(x, coordinates.Angle) and isinstance(y, coordinates.Angle))\n ]\n frame = coordinates.frame_transform_graph.lookup_name(self.coordsys)\n\n lon, lat = zip(*parsed_angles)\n if hasattr(lon, '__len__') and hasattr(lat, '__len__') and len(lon) == 1 and len(lat) == 1:\n # force entries to be scalar if they are length-1\n lon, lat = u.Quantity(lon[0]), u.Quantity(lat[0])\n else:\n # otherwise, they are vector quantities\n lon, lat = u.Quantity(lon), u.Quantity(lat)\n sphcoords = coordinates.UnitSphericalRepresentation(lon, lat)\n coords = [SkyCoord(frame(sphcoords))]\n\n if self.region_type != 'polygon':\n coords += self.coord[len(coords * 2):]\n\n return coords", "def galactic_latlon(self):\n vector = _GALACTIC.dot(self.position.au)\n d, lat, lon = to_polar(vector)\n return (Angle(radians=lat, signed=True),\n Angle(radians=lon),\n Distance(au=d))", "def sphere_to_unit(v):\n sin_theta = math.sin(v[0])\n cos_theta = math.cos(v[0])\n return (sin_theta * math.cos(v[1]),\n sin_theta * math.sin(v[1]),\n cos_theta)", "def sphere(self, x):\r\n # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]\r\n return sum((x+0)**2)", "def _random_spherical_position(u):\n n = u.size\n nhalf = n // 2\n cos_t = 2 * u[:nhalf] - 1\n phi = 2 * np.pi * u[nhalf:]\n\n sin_t = np.sqrt((1.0 - cos_t * cos_t))\n\n x = sin_t * np.cos(phi)\n y = sin_t * np.sin(phi)\n z = cos_t\n\n return x, y, z", "def hemisphere(self):\n return self._hemisphere", "def projection(self, point):\n return gs.copy(point)", "def spherical2cartesian(sphere):\n cart = np.zeros(sphere.shape, dtype=np.float64)\n sine_phi = np.sin(sphere[:, 2])\n\n cart[:, 0] = sphere[:, 0] * np.cos(sphere[:, 1]) * sine_phi\n cart[:, 1] = sphere[:, 0] * np.sin(sphere[:, 1]) * sine_phi\n cart[:, 2] = sphere[:, 0] * np.cos(sphere[:, 2])\n return cart", "def coords(self):\n return coord.SkyCoord(ra=self.ra, dec=self.dec,\n distance=self.get_distance(lutz_kelker=lutz_kelker))", "def get_spherical_coordinates(xyz: numpy.array) -> Tuple[float, float, float]:\n r = numpy.linalg.norm(xyz)\n if 0 == r:\n return (0, 0, 0)\n azimuth = _get_azimuth(xyz[0], xyz[1])\n polar_angle = numpy.arccos(xyz[2] / r)\n\n return (r, azimuth, polar_angle)", "def cartesianToSpherical(xComp, yComp, zComp, negateMagnitude=False, \r\n tolerance=1E-10):\r\n ans = None\r\n mag = math.sqrt(xComp*xComp + yComp*yComp + zComp*zComp)\r\n if mag < tolerance:\r\n ans = [0.0, 0.0, 0.0]\r\n\r\n proj2 = xComp*xComp + yComp*yComp\r\n if ans is None and proj2 < tolerance:\r\n ans = [mag, 0.0, 0.0]\r\n elif abs(zComp) < tolerance:\r\n if abs(xComp) < tolerance:\r\n ans = [mag, 90.0, 90.0]\r\n if abs(yComp) < tolerance:\r\n ans = [mag, 90.0, 0.0]\r\n else:\r\n ans = [mag, 90.0, math.acos(xComp/mag)*_CONV]\r\n else:\r\n azimuth = math.acos(zComp/mag)\r\n ans = [mag, azimuth*_CONV, \r\n math.acos(xComp/(mag*math.sin(azimuth)))*_CONV]\r\n \r\n if negateMagnitude:\r\n ans = [-1*ans[0], 180+ans[1], ans[2]]\r\n return ans", "def _position_spherical2cartesian(pos):\n \n r=pos[:,0]\n theta=pos[:,1]\n phi=pos[:,2]\n\n if any(theta>np.pi) or any(theta<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n\n x=r*np.sin(theta)*np.cos(phi)\n y=r*np.sin(theta)*np.sin(phi)\n z=r*np.cos(theta)\n\n return np.dstack((x,y,z))[0]", "def cylindrical2spherical(cyl):\n sph = np.zeros(cyl.shape)\n sph[:, 0] = np.sqrt(cyl[:, 0]**2 + cyl[:, 2]**2)\n sph[:, 1] = cyl[:, 1]\n sph[:, 2] = np.arctan2(cyl[:, 0], cyl[:, 2])\n return sph", "def convertStoichiometryMath(self):\n return _libsbml.Model_convertStoichiometryMath(self)", "def spherical_distance(coord_pair, radius=MEAN_EARTH_RADIUS_M):\n\n return spherical_distance_haversine(np.array([coord_pair]), radius)[0]", "def on_sphere():\n vec = np.random.standard_normal(3)\n return vec / np.linalg.norm(vec)", "def center(self):\n return self.map_.geom.center_skydir", "def output(self):\n if self.real != 1:\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n else:\n angle = 0.\n xyz = self.pure\n result = \"%g %g %g %g %g %g %g\" % (self.trans[0], self.trans[1], self.trans[2],\n xyz[0], xyz[1], xyz[2], angle)\n return result", "def spatial(self):\n return self._spatial", "def phi(self):\n return np.arctan2(np.sqrt(self.x**2 + self.y**2), self.z)", "def sphere_centers(r_x, r_y, r_z):\n a_ccs_p_trans_m = hom_translation_matrix(\n t_x=0.265, t_y=0, t_z=0.014)\n a_ccs_p_rot_m = hom_rotation(x_axis_rotation_matrix(r_x) @\n y_axis_rotation_matrix(r_y) @\n z_axis_rotation_matrix(r_z))\n a_p_sph_1_2 = hom_translation_matrix(\n t_x=0.015, t_y=0.029, t_z=-0.0965)\n a_p_sph_2_2 = hom_translation_matrix(\n t_x=0.015, t_y=-0.029, t_z=-0.0965)\n\n a_ccs_ = a_ccs_p_trans_m @ a_ccs_p_rot_m\n a_c1 = a_ccs_ @ a_p_sph_1_2\n a_c2 = a_ccs_ @ a_p_sph_2_2\n\n return get_translation(a_c1), get_translation(a_c2)", "def _spherical_to_cartesian(ra, dec):\n rar = np.radians(ra)\n decr = np.radians(dec)\n\n x = np.cos(rar) * np.cos(decr)\n y = np.sin(rar) * np.cos(decr)\n z = np.sin(decr)\n \n return x, y, z", "def sga(self) -> float:\n sgx = self.true[1:] - self.true[:self.true.size - 1]\n sgy = self.predicted[1:] - self.predicted[:self.predicted.size - 1]\n a = np.dot(sgx, sgy)\n b = np.linalg.norm(sgx) * np.linalg.norm(sgy)\n return float(np.arccos(a / b))", "def __call__(self, x):\n v = vector(RDF,x)\n if v.is_zero():\n raise ValueError, \"The origin must not be a vertex.\"\n v = v/norm(v) # normalize vertices to unit sphere\n v = self.house*v # reflect so self.projection_dir is at \"north pole\"\n denom = self.height-v[self.dim-1]\n if denom.is_zero():\n raise ValueError, 'Point cannot coincide with ' \\\n 'coordinate singularity at ' + repr(x)\n return vector(RDF, [ v[i]/denom for i in range(self.dim-1) ])", "def __init__(self, angle = 'deg'):\n \n name = \"Spherical\"\n Qstr = [\"r\", \"theta\", \"phi\"]\n Xstr = [\"x\", \"y\", \"z\"]\n \n super().__init__(self._csSpherical_q2x, nQ = 3,\n nX = 3, name = name, \n Qstr = Qstr, Xstr = Xstr,\n maxderiv = None, isatomic = False,\n zlevel = None)\n \n if angle == 'deg' or angle == 'rad':\n self.angle = angle # 'deg' or 'rad'\n else:\n raise ValueError('angle must be ''deg'' or ''rad''.')", "def to_cartes(self):\n if self.__coordsys in (Polar, PhySpherical, MathSpherical):\n self.__coordsys = Cartesian if self.__coordsys == Polar else Cartesian_3\n self.update_coord(vct.rec(self.list_repr()))", "def _position_spherical2cylindrical(pos):\n \n\n r=pos[:,0]\n theta_spherical=pos[:,1]\n phi_spherical=pos[:,2]\n\n if any(theta_spherical>np.pi) or any(theta_spherical<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n rho=r*np.sin(theta_spherical)\n theta_cylindrical=phi_spherical\n z=r*np.cos(theta_spherical)\n\n return np.dstack((rho,theta_cylindrical,z))[0]", "def st(self):\n # sidereal time polynomial coefficients in arcseconds\n sidereal_time = np.array([0.014506, 4612.156534, 1.3915817, -4.4e-7,\n -2.9956e-05, -3.68e-08])\n ST = self.polynomial_sum(sidereal_time, self.T)\n # get earth rotation angle and convert to arcseconds\n return np.mod(ST + self.era*self.deg2asec, self.turnasec)/self.turnasec", "def getValue(self, *args):\n return _osgAnimation.QuatSphericalLinearInterpolator_getValue(self, *args)", "def convert_coords_cart_sphere(coords_cart):\n shape = coords_cart.shape\n coords = coords_cart.reshape(3,-1)\n\n lat, lon, alt = np.zeros_like(coords)\n for i in range(coords.shape[1]):\n p_rec = [coords[0, i], coords[1, i], coords[2, i]]\n p_lat = sp.spiceypy.reclat(p_rec)\n alt[i], lon[i], lat[i] = p_lat\n \n lat = lat*180/np.pi\n lon = lon*180/np.pi\n alt = alt - mars_r \n\n coords_sphere = np.array([lat, lon, alt]).reshape(shape)\n return coords_sphere", "def Center(self, *args):\n return _Bnd.Bnd_Sphere_Center(self, *args)", "def sa(self) -> float:\n a = np.dot(self.predicted, self.true)\n b = np.linalg.norm(self.predicted) * np.linalg.norm(self.true)\n return float(np.arccos(a / b))", "def cart2sph(x: float, y: float, z: float) -> typing.Tuple[float, float, float]:\n hxy = hypot(x, y)\n r = hypot(hxy, z)\n el = atan2(z, hxy)\n az = atan2(y, x)\n return az, el, r", "def getCentroid(self) -> Vec3:\n return self.centroid()", "def getSphereRadius(self):\n return 1.5", "def __poprzeczna_s(self, sph_func, R):\n q = self.omega / self.c\n r = q * R.r\n a = self.l * ( self.l + 1 )\n return (-a * sph_func(self.l, r) *\n vsh1(self.m, self.l, R.theta, R.phi) / r -\n (sph_func(self.l, r, derivative=True) +\n sph_func(self.l, r) / r\n ) * vsh2(self.m, self.l, R.theta, R.phi)\n ) / np.sqrt(a) / q", "def spherical_project_array(x, y, cos_lat, sin_lat,\n celestial_pole_x, celestial_pole_y,\n celestial_cos_lat, celestial_sin_lat, native_pole_x\n ): # pragma: no cover\n x = np.atleast_1d(np.asarray(x))\n y = np.atleast_1d(np.asarray(y))\n cos_lat = np.atleast_1d(np.asarray(cos_lat))\n sin_lat = np.atleast_1d(np.asarray(sin_lat))\n celestial_pole_x = np.atleast_1d(np.asarray(celestial_pole_x))\n celestial_pole_y = np.atleast_1d(np.asarray(celestial_pole_y))\n celestial_cos_lat = np.atleast_1d(np.asarray(celestial_cos_lat))\n celestial_sin_lat = np.atleast_1d(np.asarray(celestial_sin_lat))\n native_pole_x = np.atleast_1d(np.asarray(native_pole_x))\n\n sizes = np.array([x.size, celestial_pole_x.size, native_pole_x.size])\n max_array = np.argmax(sizes)\n if max_array == 0:\n theta = np.empty_like(x, dtype=nb.float64)\n phi = np.empty_like(x, dtype=nb.float64)\n n = x.size\n else:\n theta = np.empty_like(celestial_pole_x, dtype=nb.float64)\n phi = np.empty_like(celestial_pole_x, dtype=nb.float64)\n n = celestial_pole_x.size\n\n singular_celestial = celestial_pole_x.size == 1\n singular_coordinate = x.size == 1\n singular_native = native_pole_x.size == 1\n\n for i in range(n):\n coord_i = 0 if singular_coordinate else i\n celes_i = 0 if singular_celestial else i\n nativ_i = 0 if singular_native else i\n\n theta[i], phi[i] = spherical_project(\n x=x[coord_i],\n y=y[coord_i],\n cos_lat=cos_lat[coord_i],\n sin_lat=sin_lat[coord_i],\n celestial_pole_x=celestial_pole_x[celes_i],\n celestial_pole_y=celestial_pole_y[celes_i],\n celestial_cos_lat=celestial_cos_lat[celes_i],\n celestial_sin_lat=celestial_sin_lat[celes_i],\n native_pole_x=native_pole_x[nativ_i])\n\n return theta, phi", "def sphericalToCartesian(magnitude, azimuthal, polar):\r\n azimuthal = azimuthal*math.pi/180.0\r\n polar = polar*math.pi/180.0\r\n xval = magnitude * math.sin(azimuthal) * math.cos(polar)\r\n yval = magnitude * math.sin(azimuthal) * math.sin(polar)\r\n zval = magnitude * math.cos(azimuthal)\r\n return [xval, yval, zval]", "def to_shapely(self):\n import shapely.geometry as sg\n return sg.Point(self.flat_values)", "def SphereFromVector(vector):\n xyproj = vector.x*vector.x + vector.y*vector.y\n dist = math.sqrt(xyproj + vector.z*vector.z)\n if xyproj == 0.0:\n if vector.z == 0.0:\n raise Exception('Zero-length vector not allowed.')\n lon = 0.0\n if vector.z < 0.0:\n lat = -90.0\n else:\n lat = +90.0\n else:\n lon = math.degrees(math.atan2(vector.y, vector.x))\n if lon < 0.0:\n lon += 360.0\n lat = math.degrees(math.atan2(vector.z, math.sqrt(xyproj)))\n return Spherical(lat, lon, dist)", "def _velocity_cartesian2spherical(pos,vel):\n\n \n #save cartesian position of each particle\n x=pos[:,0]\n y=pos[:,1]\n z=pos[:,2]\n\n #save cartesian velocities\n vx=vel[:,0]\n vy=vel[:,1]\n vz=vel[:,2]\n\n #convert to spherical coordinates\n pos_sph=_position_cartesian2spherical(pos) #spherical coordinates\n r=pos_sph[:,0]\n theta=pos_sph[:,1]\n phi=pos_sph[:,2]\n\n\n #compute spherical velocities\n vr = vx*np.sin(theta)*np.cos(phi) + vy*np.sin(theta)*np.sin(phi) + vz*np.cos(theta)\n vtheta = vx*np.cos(theta)*np.cos(phi) + vy*np.cos(theta)*np.sin(phi) - vz*np.sin(theta)\n vphi = -vx*np.sin(phi) + vy*np.cos(phi)\n\n if np.sum(r==0)!=0: #if some points are at the origin\n warnings.warn(\"Spherical velocity is not defined at origin. Returning 0.\")\n vr[r==0]=0\n vtheta[r==0]=0\n vphi[r==0]=0\n\n\n return np.dstack((vr,vtheta,vphi))[0]", "def toJSON(self):\n if self.gps_position is None:\n latitude = 0\n longitude = 0\n else:\n latitude = self.gps_position.latitude\n longitude = self.gps_position.longitude\n data = {\n 'latitude': latitude,\n 'longitude': longitude,\n 'cylinder_radius': self.cylinder_radius,\n 'cylinder_height': self.cylinder_height\n }\n return data", "def llh(self):\n return Station._ellipsoid.geodetic(self.xyz())", "def platform_to_spherical(plat_coordinates, light_dir, source_center,\n source_tilt, platform_tilt, height):\n source_pos, light_dir = platform_to_cartesian(plat_coordinates, light_dir,\n source_center, source_tilt, platform_tilt, height)\n x, y, z = source_pos\n rho = np.linalg.norm(source_pos)\n theta = np.arctan(y/x)\n phi = np.arccos(z/rho)\n return tidy(np.degrees(theta)), tidy(np.degrees(phi)), tidy(rho)", "def get_vertex_periodic(self):\n V = circumcenter_periodic(self.Cents,self.L)\n return V", "def line_sphere(l, s):\n o = l.o\n l = l.d\n c = s.o\n r = s.r\n LO = dot(l,o)\n LC = dot(l,c)\n OC = dot(o,c)\n A = LO - LC\n AA = A*A\n\n LL = dot(l,l)\n OO = dot(o,o)\n CC = dot(c,c)\n RR = r*r\n\n B = OO + CC - RR - 2*OC\n\n C = LL\n\n tsqr = AA - C*B\n\n if tsqr < 0:\n return tuple()\n\n tsqr = sqrt(tsqr)\n k1 = (-A + tsqr)/LL\n k2 = (-A - tsqr)/LL\n\n return (l*k1+o, l*k2+o)", "def s(self, position: Vector) -> float:\n return self.local_coordinates(position)[0]", "def create_pseudo_epsg4326_coordinates(self):\n self.create_3d_coord_on_sphere(on_sphere=True)\n self.df_attributes['lat'] = 180*(pi/2 - np.arccos(self.df_attributes['coord_z']))/pi\n self.df_attributes['lon'] = 180*np.arctan2(self.df_attributes['coord_y'], self.df_attributes['coord_x'])/pi", "def get_center(self,lonlat=False):\n lon, lat = np.asarray(self.rotator.rots[0][0:2])*180/pi\n if lonlat: return lon,lat\n else: return pi/2.-lat*dtor, lon*dtor", "def to_compas(self):\n return sphere_to_compas(self.geometry)", "def skycoord(self):\n return SkyCoord(self['raj'], self['decj'], unit=(uu.hour, uu.degree))", "def cosines_to_global(self):\n r = Rotation.from_matrix(self.R2global())\n a, b, g = r.as_euler('xyz', degrees=False)\n return np.cos(a), np.cos(b), np.cos(g)", "def cartesian_To_Center(self, x, y, z):\n\n if x > 0.0 and -self.L_cap <= y <= 0.0:\n s = self.L_cap + y\n xc = x - self.rb\n yc = z\n else:\n theta = full_arctan2(y, x)\n if theta <= self.ang:\n s = theta * self.rb + self.L_cap\n xc = np.sqrt(x ** 2 + y ** 2) - self.rb\n yc = z\n elif self.ang < theta <= 2 * np.pi: # i'm being lazy here and not limiting the real end\n x0, y0 = np.cos(self.ang) * self.rb, np.sin(self.ang) * self.rb\n thetaEndPerp = np.pi - np.arctan(-1 / np.tan(self.ang))\n x, y = x - x0, y - y0\n deltaS, xc = np.cos(thetaEndPerp) * x + np.sin(-thetaEndPerp) * y, np.sin(thetaEndPerp) * x + np.cos(\n thetaEndPerp) * y\n yc = z\n xc = -xc\n s = (self.ang * self.rb + self.L_cap) + deltaS\n else:\n raise ValueError\n return s, xc, yc", "def sqrt(self):\r\n getcontext().prec += 2\r\n mod = abs(self).sqrt()\r\n try:\r\n arg = atan2(self._imag, self._real) / 2\r\n except InvalidOperationError:\r\n arg = 0\r\n val = self.__class__.from_polar(mod, arg)\r\n getcontext().prec -= 2\r\n return (+val, -val)", "def center(self):\n try: \n return self._center\n except AttributeError:\n self._center = vector(ZZ, [0]*self.ambient_dim())\n for v in self.vertex_generator(): self._center += v.vector()\n self._center /= self.n_vertices()\n return self._center", "def V(self, *args):\n return _Bnd.Bnd_Sphere_V(self, *args)", "def cartesian_coordinates(self):\n # extract RA items\n ra_hours, ra_minutes, ra_seconds = RA_RE.match(str(self.ra)).groups()\n # then cast\n ra_hours = int(ra_hours)\n ra_minutes = int(ra_minutes)\n ra_seconds = float(ra_seconds)\n\n # extract DEC items\n dec_sign, dec_degrees, dec_minutes, dec_seconds = DEC_RE.match(str(self.dec)).groups()\n # then cast\n dec_sign = -1 if dec_sign == '-' else 1\n dec_degrees = int(dec_degrees)\n dec_minutes = int(dec_minutes)\n dec_seconds = float(dec_seconds)\n\n # to degrees\n a = (ra_hours*15) + (ra_minutes*0.25) + (ra_seconds*0.004166)\n b = abs(dec_degrees + dec_minutes/60 + dec_seconds/3600) * dec_sign\n\n # to radians\n a = math.radians(a)\n b = math.radians(b)\n\n distance = float(self.distance)\n\n x = (distance * math.cos(b)) * math.cos(a)\n y = (distance * math.cos(b)) * math.sin(a)\n z = distance * math.sin(b)\n\n return x, y, z", "def spatial(self):\n return self.spatial_x, self.spatial_y, self.spatial_data", "def cartesian_2_spherical(grid, vec=None):\n return cartesian_to_spherical(grid, vec)" ]
[ "0.7045606", "0.66927147", "0.6692273", "0.668019", "0.65737593", "0.65737593", "0.6492899", "0.6417639", "0.6404603", "0.6351715", "0.6329752", "0.6121212", "0.61188513", "0.6074496", "0.6060194", "0.6014923", "0.60014564", "0.5973966", "0.5957356", "0.5955025", "0.594932", "0.59456056", "0.5937532", "0.5905478", "0.5904932", "0.5897196", "0.58738714", "0.58499986", "0.58445084", "0.5829656", "0.58244574", "0.58023137", "0.57898223", "0.5787399", "0.57764846", "0.57653934", "0.57252306", "0.57236296", "0.57180655", "0.5648473", "0.5647906", "0.5644585", "0.56275815", "0.56089854", "0.5607407", "0.5597173", "0.5582999", "0.55657107", "0.556112", "0.55423063", "0.55378485", "0.5516967", "0.5480533", "0.5478475", "0.54732406", "0.5470079", "0.5469925", "0.54690796", "0.54652494", "0.54637593", "0.54624724", "0.545371", "0.54505986", "0.54446083", "0.54442555", "0.5444149", "0.5436865", "0.54079986", "0.5399667", "0.5390575", "0.53623414", "0.53546727", "0.534413", "0.5342842", "0.53390604", "0.5333492", "0.53295594", "0.53221655", "0.5321076", "0.5315659", "0.529995", "0.52932286", "0.52846104", "0.52820504", "0.52817136", "0.52801317", "0.5277665", "0.5273381", "0.5271351", "0.52690667", "0.52683234", "0.52563405", "0.5251177", "0.52506906", "0.5249884", "0.524987", "0.5247463", "0.5245366", "0.52413887", "0.5234218" ]
0.81549674
0
Converts 3D rectangular cartesian coordinates to spherical polar coordinates. Note that the resulting angles are latitude/longitude or elevation/azimuthal form. I.e., the origin is along the equator rather than at the north pole.
def cartesian_to_spherical(x, y, z): import math xsq = x ** 2 ysq = y ** 2 zsq = z ** 2 r = (xsq + ysq + zsq) ** 0.5 s = (xsq + ysq) ** 0.5 if np.isscalar(x) and np.isscalar(y) and np.isscalar(z): lon = math.atan2(y, x) lat = math.atan2(z, s) else: lon = np.arctan2(y, x) lat = np.arctan2(z, s) return r, lat, lon
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cart2polar3d(cartesian):\n radius = np.linalg.norm(cartesian)\n theta = np.cos", "def cartesian2polar(cartesian):\n cartesian = np.array(cartesian).squeeze()\n x, y = cartesian\n r = np.linalg.norm([x, y])\n azimuth = np.arctan2(y, x)\n return np.array([r, azimuth])", "def cartesianToSpherical(x=0, y=0, z=0):\n\n hxy = np.hypot(x, y)\n radius = np.hypot(hxy, z)\n altitude = np.arctan2(z, hxy)\n azimuth = np.arctan2(y, x)\n return altitude, azimuth, radius", "def SphericalToCartesian(Spherical):\n\n # r,theta,phi -> x,y,z\n r = Spherical[:,0]\n st = np.sin(Spherical[:,1])\n sp = np.sin(Spherical[:,2])\n ct = np.cos(Spherical[:,1])\n cp = np.cos(Spherical[:,2])\n x = r*st*cp\n y = r*st*sp\n z = r*ct\n\n if (len(Spherical[0,:])==3):\n Cartesian = np.column_stack((x,y,z))\n return Cartesian\n else:\n # vr,vtheta,vphi -> vx,vy,vz\n vr = Spherical[:,3]\n vt = Spherical[:,4]\n vp = Spherical[:,5]\n vx = vr*st*cp - vt*ct*cp - vp*sp\n vy = vr*st*sp + vt*ct*sp + vp*cp\n vz = vr*ct - vt*st\n Cartesian= np.column_stack((x,y,z,vx,vy,vz))\n return Cartesian", "def spherical_to_cartesian(r, lat, lon):\n import math\n\n if np.isscalar(r) and np.isscalar(lat) and np.isscalar(lon):\n x = r * math.cos(lat) * math.cos(lon)\n y = r * math.cos(lat) * math.sin(lon)\n z = r * math.sin(lat)\n else:\n x = r * np.cos(lat) * np.cos(lon)\n y = r * np.cos(lat) * np.sin(lon)\n z = r * np.sin(lat)\n\n return x, y, z", "def to_polar(self, physics=False):\n if self.__coordsys in (Cartesian, Cartesian_3):\n self.__coordsys = Polar if self.__coordsys == Cartesian \\\n else PhySpherical if physics else MathSpherical\n self.update_coord(vct.pol(self.list_repr()))", "def sphericalToCartesian(magnitude, azimuthal, polar):\r\n azimuthal = azimuthal*math.pi/180.0\r\n polar = polar*math.pi/180.0\r\n xval = magnitude * math.sin(azimuthal) * math.cos(polar)\r\n yval = magnitude * math.sin(azimuthal) * math.sin(polar)\r\n zval = magnitude * math.cos(azimuthal)\r\n return [xval, yval, zval]", "def polar2cartesian(polar):\n polar = np.array(polar).squeeze()\n r, azimuth = polar\n x = r * np.cos(azimuth)\n y = r * np.sin(azimuth)\n return np.array([x, y])", "def CartesianToSpherical(Cartesian):\n\n # x,y,z -> r,theta,phi\n x = Cartesian[:,0]\n y = Cartesian[:,1]\n z = Cartesian[:,2]\n r = np.sqrt(x*x + y*y + z*z)\n projR = np.sqrt(x*x + y*y)\n theta = np.arccos(z/r)\n phi = np.arctan2(y,x)\n theta[theta<0.] +=2.*np.pi\n \n if (len(Cartesian[0,:])==3):\n Spherical = np.column_stack((r,theta,phi))\n return Spherical\n else:\n # vx,vy,vz -> vr,vtheta,vphi\n vx = Cartesian[:,3]\n vy = Cartesian[:,4]\n vz = Cartesian[:,5]\n vr = (x*vx + y*vy + z*vz)/r\n vt = (z*vr - r*vz)/projR\n vp = r*np.sin(theta)*(vy*x-y*vx)/(projR*projR) \n Spherical = np.column_stack((r,theta,phi,vr,vt,vp))\n return Spherical", "def spherical_to_cartesian(self, r, phi, theta):\n x = r*cos(phi)*sin(theta)\n y = r*sin(phi)*sin(theta)\n z = r*cos(theta)\n \n return Vector(float(x), float(y), float(z))", "def sphericalToCartesian(altitude=0, azimuth=0, radius=0):\n\n rcos_theta = radius * np.cos(altitude)\n x = rcos_theta * np.cos(azimuth)\n y = rcos_theta * np.sin(azimuth)\n z = radius * np.sin(altitude)\n return x, y, z", "def CartesianToPolar(Cartesian):\n \n # x,y,z -> R,phi,z\n R = np.sqrt(Cartesian[:,0]*Cartesian[:,0]+Cartesian[:,1]*Cartesian[:,1])\n phi = np.arctan2(Cartesian[:,1],Cartesian[:,0])\n z = Cartesian[:,2]\n phi[phi<0.] += 2.*np.pi\n if (len(Cartesian[0,:])==3):\n Polar = np.column_stack((R,phi,z))\n else:\n # vx,vy,vz -> vR,vphi,vz\n cp = np.cos(phi)\n sp = np.sin(phi)\n vR = Cartesian[:,3]*cp+Cartesian[:,4]*sp\n vphi = Cartesian[:,4]*cp-Cartesian[:,3]*sp\n vz = Cartesian[:,5]\n Polar = np.column_stack((R,phi,z,vR,vphi,vz))\n\t\t\n return Polar", "def spherical2cartesian(spherical):\n spherical = np.array(spherical).squeeze()\n distance, azimuth, elevation = spherical\n x = distance * np.sin(azimuth) * np.cos(elevation)\n y = distance * np.sin(azimuth) * np.sin(elevation)\n z = distance * np.cos(azimuth)\n return np.array([x, y, z])", "def polarToCartesian(theta=0, radius=0):\n\n x = radius * np.cos(theta)\n y = radius * np.sin(theta)\n return x, y", "def cartesian2polar(x, y):\n r = (x**2+y**2)**.5\n phi = atan2(y, x)\n return phi, r", "def polar2cartesian(phi, r):\n phi_radians = radians(phi)\n x = r*cos(phi_radians)\n y = r*sin(phi_radians)\n return x, y", "def cartesianToPolar(x,y):\n r = np.sqrt(x**2 + y**2)\n theta = np.arctan2(y,x)\n\n return r,theta", "def cartesian_to_polar(self, x, y):\n # r = (x^2+y^2)^2, theta = tan^-1(y/x)\n # pole is the reference point of the coordinate system\n x, y = self.get_rel_to_pole(x, y)\n r = math.sqrt(pow(x, 2)+pow(y, 2))\n # set specific code for edge cases\n if x == 0 and y != 0:\n sign = lambda x: (1, -1)[x < 0]\n return r, sign(y)*math.pi/2\n if x == 0 and y == 0:\n return 0, 0\n else:\n theta = math.atan(y/x)\n return r, theta", "def cartesianToPolar(x=0, y=0):\n\n radius = np.hypot(x, y)\n theta = np.arctan2(y, x)\n return theta, radius", "def _position_spherical2cartesian(pos):\n \n r=pos[:,0]\n theta=pos[:,1]\n phi=pos[:,2]\n\n if any(theta>np.pi) or any(theta<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n\n x=r*np.sin(theta)*np.cos(phi)\n y=r*np.sin(theta)*np.sin(phi)\n z=r*np.cos(theta)\n\n return np.dstack((x,y,z))[0]", "def spherical2cartesian(phi, theta, depth):\n x = depth * np.sin(theta) * np.cos(phi)\n y = depth * np.cos(theta)\n z = depth * np.sin(theta) * np.sin(phi)\n\n return x, y, z", "def to_polar(center_coords, neighbors_coords):\n return cart2pol((neighbors_coords - center_coords)[:, 0],\n (neighbors_coords - center_coords)[:, 1])", "def cartesian2spherical(cartesian):\n cartesian = np.array(cartesian).squeeze()\n x, y, z = cartesian\n distance = np.linalg.norm(cartesian)\n azimuth = np.arccos(z / distance)\n elevation = np.arctan2(y, x) # Use arctan2 instead of arctan to get proper sign!\n return np.array([distance, azimuth, elevation])", "def coord_rotate_rad(x, y, z):\n #-- 1 --\n xt = math.asin ( math.sin(x) * math.sin(y) +\n math.cos(x) * math.cos(y) * math.cos(z) )\n #-- 2 --\n yt = math.acos ( ( math.sin(x) - math.sin(y) * math.sin(xt) ) /\n ( math.cos(y) * math.cos(xt) ) )\n #-- 3 --\n if math.sin(z) > 0.0:\n yt = TWO_PI - yt\n\n #-- 4 --\n return (xt, yt)", "def _spherical_to_cartesian(ra, dec):\n rar = np.radians(ra)\n decr = np.radians(dec)\n\n x = np.cos(rar) * np.cos(decr)\n y = np.sin(rar) * np.cos(decr)\n z = np.sin(decr)\n \n return x, y, z", "def __cartesian2spherical(x: float, y: float, z: float) -> Tuple[float, float]:\n if x == 0 and y == 0:\n return 0, np.degrees(np.pi * 0.5 * np.sign(z))\n lat = np.arctan2(z, np.sqrt(x * x + y * y))\n lon = np.arctan2(y, x)\n return np.degrees(lon), np.degrees(lat)", "def spherical_2_cartesian(r, phi, theta, units='degrees'):\n phi = np.copy(phi)\n theta = np.copy(theta)\n if units == 'degrees':\n phi, theta = np.deg2rad(phi), np.deg2rad(theta)\n elif units == 'radians':\n pass\n else:\n raise AssertionError(\"Unexpected value entered for 'units', only supports either degrees or radians\", units)\n x = r * np.cos(phi) * np.sin(theta)\n y = r * np.sin(phi) * np.sin(theta)\n z = r * np.cos(theta)\n return x, y, z", "def cart2spheric(x, y, z):\n # doesn't compute r because chosen egal to 1\n with np.errstate(all='ignore'):\n theta = np.arccos(z)\n phi = np.arctan2(y, x)\n\n return theta, phi", "def polar(position):\n return list(polar(complex(position[0], position[1])))", "def polar_to_cartesian(radius, angle_deg):\n\n theta = np.deg2rad(angle_deg)\n x = radius * np.cos(theta)\n y = radius * np.sin(theta)\n return(x, y)", "def to_polar(x, y):\n r = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y, x)\n return r, phi", "def polar_to_cartesian(r, theta):\n\n x = r * cos(theta)\n y = r * sin(theta)\n\n return x, y", "def GalacticToPolar(Galactic,SolarPosition): \n\n Cartesian = GalacticToCartesian(Galactic,SolarPosition)\n Polar = CartesianToPolar(Cartesian)\n\n return Polar", "def cartesian2spherical(vector: tuple[float, float, float]) -> tuple[float, float, float]:\n x, y, z = vector\n r = m.sqrt(x**2 + y**2 + z**2)\n # acos returns the angle in radians between 0 and pi\n theta = m.degrees(m.acos(z / r))\n # atan2 returns the angle in radians between -pi and pi\n phi = m.degrees(m.atan2(y, x))\n # lets ensure the angle in degrees is always between 0 and 360, as SHIELD-HIT12A requires\n if phi < 0.:\n phi += 360.\n return theta, phi, r", "def _position_cylindrical2spherical(pos):\n\n rho=pos[:,0]\n theta_cylindrical=pos[:,1]\n z=pos[:,2]\n\n r=np.sqrt(rho**2+z**2)\n theta_spherical=np.arctan2(rho,z)\n phi=theta_cylindrical\n\n return np.dstack((r,theta_spherical,phi))[0]", "def spherical_project(x, y, cos_lat, sin_lat,\n celestial_pole_x, celestial_pole_y,\n celestial_cos_lat, celestial_sin_lat, native_pole_x\n ): # pragma: no cover\n right_angle = np.pi / 2\n\n d_lon = x - celestial_pole_x\n if equal_angles(np.abs(celestial_pole_y), right_angle):\n if celestial_pole_y > 0:\n phi = native_pole_x + d_lon + np.pi\n theta = y\n else:\n phi = native_pole_x - d_lon\n theta = -y\n else:\n cos_d_lon = np.cos(d_lon)\n\n phi = native_pole_x + np.arctan2(\n -cos_lat * np.sin(d_lon),\n (sin_lat * celestial_cos_lat)\n - (cos_lat * celestial_sin_lat * cos_d_lon))\n\n theta = asin(\n (sin_lat * celestial_sin_lat)\n + (cos_lat * celestial_cos_lat * cos_d_lon))\n\n phi = np.fmod(phi, two_pi)\n\n return theta, phi", "def polarToCartesian(r,theta):\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n return x,y", "def _position_cartesian2spherical(pos):\n\n #save cartesian position of each particle\n x=pos[:,0]\n y=pos[:,1]\n z=pos[:,2]\n\n r=np.sqrt(x**2+y**2+z**2) #radius position of each particle\n\n #define theta and take care of r=0 case\n theta=np.zeros(np.size(x))\n ind_zero=(r == 0.) #is there any point where radius is 0 ?\n theta= np.arccos(z/r) \n theta[ind_zero]=0.\n\n phi=np.arctan2(y,x)\n\n return np.dstack((r,theta,phi))[0]", "def polarCameraToCartesian(self):\n x = self.cameraPolar[0] * np.sin(self.cameraPolar[1] * np.pi / 180) * np.sin(self.cameraPolar[2] * np.pi / 180)\n y = self.cameraPolar[0] * np.cos(self.cameraPolar[2] * np.pi / 180)\n z = self.cameraPolar[0] * np.cos(self.cameraPolar[1] * np.pi / 180) * np.sin(self.cameraPolar[2] * np.pi / 180)\n self.cameraPosition = [x, y, z]", "def spherical2cartesian(v):\n \n x = np.cos(v[0]) * np.cos(v[1]) \n y = np.cos(v[0]) * np.sin(v[1]) \n z = np.sin(v[0]) \n \n return [x,y,z]", "def polar_to_cartesian(self, r, theta):\n # x = rcos(theta), y = rsin(theta)\n x, y = r*math.cos(theta), r*math.sin(theta)\n x, y = self.add((x, y), self.pole)\n return x, y", "def cartesian_to_polar(cart: np.ndarray, radial_step: float, azimuth_step : float, radial_bins: int,\n azimuth_bins: int, cart_resolution: float) -> np.ndarray:\n max_range = radial_step * radial_bins\n angles = np.linspace(0, 2 * np.pi, azimuth_bins, dtype=np.float32).reshape(azimuth_bins, 1)\n ranges = np.linspace(0, max_range, radial_bins, dtype=np.float32).reshape(1, radial_bins)\n angles = np.tile(angles, (1, radial_bins))\n ranges = np.tile(ranges, (azimuth_bins, 1))\n x = ranges * np.cos(angles)\n y = ranges * np.sin(angles)\n cart_pixel_width = cart.shape[0]\n if (cart_pixel_width % 2) == 0:\n cart_min_range = (cart_pixel_width / 2 - 0.5) * cart_resolution\n else:\n cart_min_range = cart_pixel_width // 2 * cart_resolution\n u = (cart_min_range + y) / cart_resolution\n v = (cart_min_range - x) / cart_resolution\n cart_to_polar_warp = np.stack((u, v), -1)\n polar = np.expand_dims(cv2.remap(cart, cart_to_polar_warp, None, cv2.INTER_LINEAR), -1)\n return np.squeeze(polar)", "def spherical2cartesian(sphere):\n cart = np.zeros(sphere.shape, dtype=np.float64)\n sine_phi = np.sin(sphere[:, 2])\n\n cart[:, 0] = sphere[:, 0] * np.cos(sphere[:, 1]) * sine_phi\n cart[:, 1] = sphere[:, 0] * np.sin(sphere[:, 1]) * sine_phi\n cart[:, 2] = sphere[:, 0] * np.cos(sphere[:, 2])\n return cart", "def _position_spherical2cylindrical(pos):\n \n\n r=pos[:,0]\n theta_spherical=pos[:,1]\n phi_spherical=pos[:,2]\n\n if any(theta_spherical>np.pi) or any(theta_spherical<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n rho=r*np.sin(theta_spherical)\n theta_cylindrical=phi_spherical\n z=r*np.cos(theta_spherical)\n\n return np.dstack((rho,theta_cylindrical,z))[0]", "def polar_decomposition(self):\n return self.polar_unit_vector, self.polar_angle", "def cartesian_to_spherical(self, v):\n x = Vector.x(v)\n y = Vector.y(v)\n z = Vector.z(v)\n r = Vector.length(v)\n phi = atan2(y, x)\n theta = acos(z / r)\n \n return [r, phi, theta]", "def PolarToCartesian(Polar):\n\t \n # R,phi,z -> x,y,z\n cp = np.cos(Polar[:,1])\n sp = np.sin(Polar[:,1])\n x = Polar[:,0] * cp\n y = Polar[:,0] * sp\n z = Polar[:,2]\n\n if (len(Polar[0,:])==3):\n Cartesian = np.column_stack((x,y,z))\n else:\n # vR,vphi,vz -> vx,vy,vz\n vx = Polar[:,3]*cp-Polar[:,4]*sp\n vy = Polar[:,4]*cp+Polar[:,3]*sp\n vz = Polar[:,5]\n Cartesian = np.column_stack((x,y,z,vx,vy,vz))\n \n return Cartesian", "def _polar_to_cartesian(self, radius: float, radians: float) -> None:\n self.x = round(radius * math.cos(radians), EPSILON_EXP_MINUS_1)\n self.y = round(radius * math.sin(radians), EPSILON_EXP_MINUS_1)", "def cartesian2spherical(coords):\n sphere = np.zeros(coords.shape)\n xy_sq = coords[:, 0]**2 + coords[:, 1]**2\n sphere[:, 0] = np.sqrt(xy_sq + coords[:, 2]**2)\n sphere[:, 1] = np.arctan2(coords[:, 1], coords[:, 0])\n sphere[:, 2] = np.arctan2(np.sqrt(xy_sq), coords[:, 2])\n return sphere", "def cartesian2polar(coords, inputshape, origin):\n\n r_index, theta_index = coords\n\n r = r_index * (rangeX[1] - rangeX[0])/2.0/inputshape[0]\n theta = theta_index * 2.0*np.pi/inputshape[1] + np.pi\n\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n \n i = np.round(x/Lx*inputshape[0]) + origin[0]\n j = np.round(y/Ly*inputshape[0]) + origin[1]\n \n return (i,j)", "def get_spherical_coordinates(xyz: numpy.array) -> Tuple[float, float, float]:\n r = numpy.linalg.norm(xyz)\n if 0 == r:\n return (0, 0, 0)\n azimuth = _get_azimuth(xyz[0], xyz[1])\n polar_angle = numpy.arccos(xyz[2] / r)\n\n return (r, azimuth, polar_angle)", "def _sphere2cart(xyz, axtheta=0, axphi=1, unit='rad'):\n # Get theta / phi :\n theta, phi = xyz[:, 0], xyz[:, 1]\n if unit is 'degree':\n np.deg2rad(theta, out=theta)\n np.deg2rad(phi, out=phi)\n # Get radius :\n r = np.sin(theta)\n # Get cartesian coordinates :\n np.multiply(np.cos(phi), r, out=xyz[:, 0])\n np.multiply(np.sin(phi), r, out=xyz[:, 1])\n np.cos(theta, xyz[:, 2])\n return xyz", "def project_to_sphere(points):\n # for uv, the sphere: r=1, azimuth(phi): 2*pi*u, elevation(theta): 2*pi*v\n # theta is elevation, phi is azimuth\n r, theta, phi = cs.cart2sp(x=points[:, 0], y=points[:, 1], z=points[:, 2])\n # logger.info(f\"number of zero points in r: {np.sum(r==0)}\")\n assert np.sum(r == 0) == 0, \"points contains zeros\"\n points_sphere = points / r.reshape(-1, 1)\n return points_sphere, r, theta, phi\n\n # r, theta, phi = cs.cart2sp(x=1, y=1, z=1)\n\n # # spherical to cartesian\n # x, y, z = cs.sp2cart(r=1, theta=np.pi/4, phi=np.pi/4)\n\n # # cartesian to cylindrical\n # r, phi, z = cs.cart2cyl(x=1, y=1, z=1)", "def spherical_to_xyz(self, angles: np.ndarray) -> np.ndarray:\n # https://en.wikipedia.org/wiki/Spherical_coordinate_system\n azimuth_iso = (np.pi / 2 - angles[:, 0] * np.pi / 180) % (2 * np.pi)\n altitude_iso = (np.pi / 2 - angles[:, 1] * np.pi / 180) % (2 * np.pi)\n xyz = np.column_stack(\n (\n np.sin(altitude_iso) * np.cos(azimuth_iso),\n np.sin(altitude_iso) * np.sin(azimuth_iso),\n np.cos(altitude_iso),\n )\n )\n if angles.shape[1] > 2:\n xyz *= angles[:, 2:3]\n xyz += self.xyz\n return xyz", "def get_polar_coordinates(cup_position, bot_position):\n\n distance_x = cup_position[0] - bot_position[0]\n distance_y = cup_position[1] - bot_position[1]\n\n r = math.hypot(distance_x, distance_y)\n theta = math.degrees(math.atan(distance_y/distance_x))\n\n return r, theta", "def radar_polar_to_cartesian(azimuths, fft_data, radar_resolution, cart_resolution, cart_pixel_width,\n interpolate_crossover=True, fix_wobble=True):\n if (cart_pixel_width % 2) == 0:\n cart_min_range = (cart_pixel_width / 2 - 0.5) * cart_resolution\n else:\n cart_min_range = cart_pixel_width // 2 * cart_resolution\n coords = np.linspace(-cart_min_range, cart_min_range, cart_pixel_width, dtype=np.float32)\n Y, X = np.meshgrid(coords, -1 * coords)\n sample_range = np.sqrt(Y * Y + X * X)\n sample_angle = np.arctan2(Y, X)\n sample_angle += (sample_angle < 0).astype(np.float32) * 2. * np.pi\n\n # Interpolate Radar Data Coordinates\n azimuth_step = (azimuths[-1] - azimuths[0]) / (azimuths.shape[0] - 1)\n sample_u = (sample_range - radar_resolution / 2) / radar_resolution\n sample_v = (sample_angle - azimuths[0]) / azimuth_step\n # This fixes the wobble in the old CIR204 data from Boreas (keenan)\n if fix_wobble and radar_resolution == 0.0596:\n azimuths = azimuths.reshape((1, 1, 400)) # 1 x 1 x 400\n sample_angle = np.expand_dims(sample_angle, axis=-1) # H x W x 1\n diff = np.abs(azimuths - sample_angle)\n c3 = np.argmin(diff, axis=2)\n azimuths = azimuths.squeeze()\n c3 = c3.reshape(cart_pixel_width, cart_pixel_width) # azimuth indices (closest)\n mindiff = sample_angle.squeeze() - azimuths[c3]\n sample_angle = sample_angle.squeeze()\n mindiff = mindiff.squeeze()\n\n subc3 = c3 * (c3 < 399)\n aplus = azimuths[subc3 + 1]\n a1 = azimuths[subc3]\n delta1 = mindiff * (mindiff > 0) * (c3 < 399) / (aplus - a1)\n subc3 = c3 * (c3 > 0)\n a2 = azimuths[subc3]\n aminus = azimuths[1 + (c3 > 0) * (subc3 - 2)]\n delta2 = mindiff * (mindiff < 0) * (c3 > 0) / (a2 - aminus)\n sample_v = c3 + delta1 + delta2\n sample_v = sample_v.astype(np.float32)\n\n # We clip the sample points to the minimum sensor reading range so that we\n # do not have undefined results in the centre of the image. In practice\n # this region is simply undefined.\n sample_u[sample_u < 0] = 0\n\n if interpolate_crossover:\n fft_data = np.concatenate((fft_data[-1:], fft_data, fft_data[:1]), 0)\n sample_v = sample_v + 1\n\n polar_to_cart_warp = np.stack((sample_u, sample_v), -1)\n return cv2.remap(fft_data, polar_to_cart_warp, None, cv2.INTER_LINEAR)", "def polar(self):\n return PolarCoord((self._polar[0], self._polar[1]), self._polar[2])", "def cart2spher(vectors, axis_order=[0, 1, 2]):\n\n # print axis_order\n vectors = np.asarray(vectors)\n if vectors.shape[0] != 3:\n import ipdb\n\n ipdb.set_trace()\n raise ValueError(\n \"Expected vector shape is [3, N], actual shape is \" + str(vectors.shape)\n ) # , 'foo', 'bar', 'baz')\n # radius distance\n radius = np.linalg.norm(vectors, axis=0)\n normalized = vectors / radius\n\n # polar angle\n theta = np.arccos(normalized[axis_order[2]])\n # azimuth\n phi = np.arctan2(normalized[axis_order[1]], normalized[axis_order[0]])\n return np.asarray([radius, theta, phi])", "def cart2spher(x: np.ndarray, y: np.ndarray,\n z: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n indexes = np.where((x == 0) & (y == 0))[0]\n if indexes.size:\n x[indexes] = np.nan\n y[indexes] = np.nan\n lat = np.arctan2(z, np.sqrt(x * x + y * y))\n lon = np.arctan2(y, x)\n if indexes.size:\n lon[indexes] = 0\n lat[indexes] = np.pi * 0.5 * np.sign(z[indexes])\n return np.degrees(lon), np.degrees(lat)", "def corners_cartesian(self):\n x_corners, y_corners, z_corners = \\\n starwinds_magnetogram.coordinate_transforms.rectangular_coordinates_from_spherical(\n np.ones(self.polar_corners.shape),\n self.polar_corners,\n self.azimuthal_corners)\n\n return x_corners, y_corners, z_corners", "def sph2car(r, theta, phi):\n x = r * np.sin(theta) * np.cos(phi)\n y = r * np.sin(theta) * np.sin(phi)\n z = r * np.cos(theta)\n\n return x, y, z", "def uniform_cart_to_polar(x, y, data):\n # create a set of polar coordinates to interpolate onto\n xmin, xmax = x.min(), x.max()\n ymin, ymax = y.min(), y.max()\n\n _max = max(abs(e.asarray([xmin, xmax, ymin, ymax])))\n\n rho = e.linspace(0, _max, len(x))\n phi = e.linspace(0, 2 * e.pi, len(y))\n rv, pv = e.meshgrid(rho, phi)\n\n # map points to x, y and make a grid for the original samples\n xv, yv = polar_to_cart(rv, pv)\n\n # interpolate the function onto the new points\n f = interpolate.RegularGridInterpolator((y, x), data, bounds_error=False, fill_value=0)\n return rho, phi, f((yv, xv), method='linear')", "def polar(self):\n assert self.is_compact(), \"Not a polytope.\"\n\n verts = [list(v() - self.center()) for v in self.vertex_generator()]\n return Polyhedron(ieqs = [[1] + list(v) for v in verts], \n field = self.field())", "def polar_to_cartesian(dist, theta, phi):\n z = np.cos(phi)\n s = np.sin(phi)\n x = s * np.cos(theta)\n y = s * np.sin(theta)\n return np.stack((x, y, z), axis=-1) * np.expand_dims(dist, axis=-1)", "def Rpz(angle=0, units='deg'):\n\n if(units=='deg'):\n angle = angle*pi/180\n\n C = np.cos(angle)\n S = np.sin(angle)\n\n M = np.identity(3)\n\n M[0,0] = +C\n M[0,1] = -S\n M[1,0] = +S\n M[1,1] = +C\n\n return M", "def cartesian2spherical(v):\n theta = np.arcsin(v[2]) \n phi = np.arctan2(v[1], v[0])\n \n return [theta, phi]", "def coord_polar(mat):\n x = mat[:, 0].copy()\n y = mat[:, 1].copy()\n\n r = np.sqrt(x**2 + y**2)\n theta = np.arctan2(y, x)\n\n return r, theta", "def cartesianToSpherical(xComp, yComp, zComp, negateMagnitude=False, \r\n tolerance=1E-10):\r\n ans = None\r\n mag = math.sqrt(xComp*xComp + yComp*yComp + zComp*zComp)\r\n if mag < tolerance:\r\n ans = [0.0, 0.0, 0.0]\r\n\r\n proj2 = xComp*xComp + yComp*yComp\r\n if ans is None and proj2 < tolerance:\r\n ans = [mag, 0.0, 0.0]\r\n elif abs(zComp) < tolerance:\r\n if abs(xComp) < tolerance:\r\n ans = [mag, 90.0, 90.0]\r\n if abs(yComp) < tolerance:\r\n ans = [mag, 90.0, 0.0]\r\n else:\r\n ans = [mag, 90.0, math.acos(xComp/mag)*_CONV]\r\n else:\r\n azimuth = math.acos(zComp/mag)\r\n ans = [mag, azimuth*_CONV, \r\n math.acos(xComp/(mag*math.sin(azimuth)))*_CONV]\r\n \r\n if negateMagnitude:\r\n ans = [-1*ans[0], 180+ans[1], ans[2]]\r\n return ans", "def xyz_to_spherical(self, xyz: np.ndarray, directions: bool = False) -> np.ndarray:\n if not directions:\n xyz = xyz - self.xyz\n r = np.sqrt(np.sum(xyz ** 2, axis=1))\n azimuth_iso = np.arctan2(xyz[:, 1], xyz[:, 0])\n altitude_iso = np.arccos(xyz[:, 2] / r)\n angles = np.column_stack(\n (\n (90 - (azimuth_iso * 180 / np.pi)) % 360,\n 90 - (altitude_iso * 180 / np.pi),\n )\n )\n if not directions:\n angles = np.column_stack((angles, r))\n return angles", "def translate_polar(self, radius, angle): \n return Position.fromnp(translate_polar(self.tonp(), radius, angle))", "def sph2cart(az, el, r):\n \n rcos_theta = r * np.cos(el)\n x = rcos_theta * np.cos(az)\n y = rcos_theta * np.sin(az)\n z = r * np.sin(el)\n \n return (x, y, z)", "def vector_polar(v):\n return vector_mag(v), vector_angle(v)", "def euler_to_rot3d(psi, theta, phi):\n rphi = np.array([[np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n rtheta = np.array([[np.cos(theta), 0, np.sin(theta)],\n [0, 1, 0],\n [-np.sin(theta), 0, np.cos(theta)]])\n rpsi = np.array([[np.cos(psi), -np.sin(psi), 0],\n [np.sin(psi), np.cos(psi), 0],\n [0, 0, 1]])\n return np.dot(rpsi, np.dot(rtheta, rphi))", "def to_cartesian(self):\n\n if self.cartesian is None:\n theta = math.radians(self.lat)\n phi = math.radians(self.long)\n x = R_EARTH * math.cos(theta) * math.cos(phi)\n y = R_EARTH * math.cos(theta) * math.sin(phi)\n z = R_EARTH * math.sin(theta)\n self.cartesian = CartesianPoint(x, y, z)\n return self.cartesian", "def spherical_deproject(phi, theta,\n celestial_pole_x, celestial_pole_y,\n celestial_cos_lat, celestial_sin_lat,\n native_pole_x): # pragma: no cover\n\n d_phi = phi - native_pole_x\n right_angle = np.pi / 2\n\n if equal_angles(np.abs(celestial_pole_y), right_angle):\n if celestial_pole_y > 0:\n cx = celestial_pole_x + d_phi - np.pi\n cy = theta\n else:\n cx = celestial_pole_x - d_phi\n cy = -theta\n\n else:\n cos_theta = np.cos(theta)\n sin_theta = np.sin(theta)\n cos_d_phi = np.cos(d_phi)\n cx = celestial_pole_x + np.arctan2(\n -cos_theta * np.sin(d_phi),\n ((sin_theta * celestial_cos_lat)\n - (cos_theta * celestial_sin_lat * cos_d_phi)))\n cy = asin(\n (sin_theta * celestial_sin_lat)\n + (cos_theta * celestial_cos_lat * cos_d_phi))\n\n return cx, cy", "def polar_embedding(self):\n self.isomap_r, self.isomap_theta = coord_polar(self.isomap)\n\n return self", "def spherical_project_array(x, y, cos_lat, sin_lat,\n celestial_pole_x, celestial_pole_y,\n celestial_cos_lat, celestial_sin_lat, native_pole_x\n ): # pragma: no cover\n x = np.atleast_1d(np.asarray(x))\n y = np.atleast_1d(np.asarray(y))\n cos_lat = np.atleast_1d(np.asarray(cos_lat))\n sin_lat = np.atleast_1d(np.asarray(sin_lat))\n celestial_pole_x = np.atleast_1d(np.asarray(celestial_pole_x))\n celestial_pole_y = np.atleast_1d(np.asarray(celestial_pole_y))\n celestial_cos_lat = np.atleast_1d(np.asarray(celestial_cos_lat))\n celestial_sin_lat = np.atleast_1d(np.asarray(celestial_sin_lat))\n native_pole_x = np.atleast_1d(np.asarray(native_pole_x))\n\n sizes = np.array([x.size, celestial_pole_x.size, native_pole_x.size])\n max_array = np.argmax(sizes)\n if max_array == 0:\n theta = np.empty_like(x, dtype=nb.float64)\n phi = np.empty_like(x, dtype=nb.float64)\n n = x.size\n else:\n theta = np.empty_like(celestial_pole_x, dtype=nb.float64)\n phi = np.empty_like(celestial_pole_x, dtype=nb.float64)\n n = celestial_pole_x.size\n\n singular_celestial = celestial_pole_x.size == 1\n singular_coordinate = x.size == 1\n singular_native = native_pole_x.size == 1\n\n for i in range(n):\n coord_i = 0 if singular_coordinate else i\n celes_i = 0 if singular_celestial else i\n nativ_i = 0 if singular_native else i\n\n theta[i], phi[i] = spherical_project(\n x=x[coord_i],\n y=y[coord_i],\n cos_lat=cos_lat[coord_i],\n sin_lat=sin_lat[coord_i],\n celestial_pole_x=celestial_pole_x[celes_i],\n celestial_pole_y=celestial_pole_y[celes_i],\n celestial_cos_lat=celestial_cos_lat[celes_i],\n celestial_sin_lat=celestial_sin_lat[celes_i],\n native_pole_x=native_pole_x[nativ_i])\n\n return theta, phi", "def convert_coords_cart_sphere(coords_cart):\n shape = coords_cart.shape\n coords = coords_cart.reshape(3,-1)\n\n lat, lon, alt = np.zeros_like(coords)\n for i in range(coords.shape[1]):\n p_rec = [coords[0, i], coords[1, i], coords[2, i]]\n p_lat = sp.spiceypy.reclat(p_rec)\n alt[i], lon[i], lat[i] = p_lat\n \n lat = lat*180/np.pi\n lon = lon*180/np.pi\n alt = alt - mars_r \n\n coords_sphere = np.array([lat, lon, alt]).reshape(shape)\n return coords_sphere", "def cartesian_to_spherical(grid, vec=None):\n\n grid = np.atleast_2d(grid)\n\n if vec is None:\n return np.hstack([\n mkvc(np.sqrt(grid[:, 0]**2 + grid[:, 1]**2 + grid[:, 2]**2), 2),\n mkvc(np.arctan2(grid[:, 1], grid[:, 0]), 2),\n mkvc(\n np.arctan2(np.sqrt(grid[:, 0]**2 + grid[:, 1]**2), grid[:, 2]),\n 2\n ),\n ])\n\n if len(vec.shape) == 1 or vec.shape[1] == 1:\n vec = vec.reshape(grid.shape, order='F')\n\n theta = np.arctan2(grid[:, 1], grid[:, 0])\n phi = np.arctan2(np.sqrt(grid[:, 0]**2 + grid[:, 1]**2), grid[:, 2])\n\n r = (\n vec[:, 0] * np.sin(phi) * np.cos(theta) +\n vec[:, 1] * np.sin(phi) * np.sin(theta) +\n vec[:, 2] * np.cos(phi)\n )\n\n theta = - vec[:, 0] * np.sin(theta) + vec[:, 1] * np.cos(theta)\n\n phi = (\n vec[:, 0] * np.cos(phi) * np.cos(theta) +\n vec[:, 1] * np.cos(phi) * np.sin(theta) -\n vec[:, 2] * np.sin(phi)\n )\n\n newvec = [r, theta, phi]\n\n return np.vstack(newvec).T", "def GalacticToCartesian(Galactic,SolarPosition): \n \n # l,b,s->x,y,z\n cl = np.cos(Galactic[:,0])\n sl = np.sin(Galactic[:,0])\n cb = np.cos(Galactic[:,1])\n sb = np.sin(Galactic[:,1])\n x = SolarPosition[0]-Galactic[:,2]*cb*cl\n y = Galactic[:,2]*cb*sl\n z = Galactic[:,2]*sb+SolarPosition[1]\n\n if(len(Galactic[0,:])==3):\n Cartesian = np.column_stack((x,y,z))\n else:\n # vlos,mu_lcos(b),mu_b -> vx,vy,vz\n vl = pm2vel*Galactic[:,2]*Galactic[:,4]\n vb = pm2vel*Galactic[:,2]*Galactic[:,5]\n tmp2 = cb*Galactic[:,3]-sb*vb\n vx = cl*tmp2-sl*vl+SolarPosition[2]\n vy = sl*tmp2+cl*vl+SolarPosition[3]\n vz = sb*Galactic[:,3]+cb*vb+SolarPosition[4]\n Cartesian = np.column_stack((x,y,z,-vx,vy,vz))\n \n return Cartesian", "def _get_polar_sky_coords(self, x0, y0):\n x_sky, y_sky = self._get_cart_sky_coords(x0, y0)\n return np.hypot(y_sky, x_sky), np.arctan2(x_sky, y_sky)", "def spherical_to_planar_coord(axis: int, intensity: float, pitch: float, yaw: float) -> float:\n if axis == X_INDEX:\n return intensity * cos(yaw) * cos(pitch)\n if axis == Y_INDEX:\n return intensity * sin(yaw) * cos(pitch)\n if axis == Z_INDEX:\n return intensity * sin(pitch)", "def latlt2polar(lat,lt,hemisphere):\n from numpy import pi\n if hemisphere=='N':\n r = 90.-lat\n elif hemisphere=='S':\n r = 90.-(-1*lat)\n else:\n raise ValueError('%s is not a valid hemisphere, N or S, please!' % (hemisphere))\n #convert lt to theta (azimuthal angle) in radians\n theta = lt/24. * 2*pi\n\n #the pi/2 rotates the coordinate system from\n #theta=0 at negative y-axis (local time) to\n #theta=0 at positive x axis (traditional polar coordinates)\n return r,theta", "def cartesian_to_lon_lat(x, y, z, R = 1):\n lon = np.degrees(np.arctan2(y,x))\n lat = np.degrees(np.pi/2-np.arctan2((x**2+y**2)**0.5,z))\n\n return lon,lat", "def polar_from_rectangular(width_pol, height_pol, width_reg, height_reg):\n xcenter = (width_reg - 1.0) * 0.5\n ycenter = (height_reg - 1.0) * 0.5\n r_max = np.floor(max(xcenter, ycenter))\n xlist = (np.flipud(np.arange(width_reg)) - xcenter) * width_pol / r_max\n ylist = (np.flipud(np.arange(height_reg)) - ycenter) * width_pol / r_max\n x_mat, y_mat = np.meshgrid(xlist, ylist)\n r_mat = np.float32(\n np.clip(np.sqrt(x_mat ** 2 + y_mat ** 2), 0, width_pol - 1))\n theta_mat = np.float32(np.clip(\n (np.pi + np.arctan2(y_mat, x_mat)) * (height_pol - 1) / (2 * np.pi), 0,\n height_pol - 1))\n return r_mat, theta_mat", "def lon_lat_to_cartesian(lon, lat, R = 1):\n lon_r = np.radians(lon)\n lat_r = np.radians(lat)\n\n x = R * np.cos(lat_r) * np.cos(lon_r)\n y = R * np.cos(lat_r) * np.sin(lon_r)\n z = R * np.sin(lat_r)\n return x,y,z", "def euler2rot3D(psi, theta, phi):\n Rphi = np.array([[np.cos(phi), np.sin(phi), 0],\n [-np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n Rtheta = np.array([[np.cos(theta), 0, -np.sin(theta)],\n [0, 1, 0],\n [np.sin(theta), 0, np.cos(theta)]])\n Rpsi = np.array([[np.cos(psi), np.sin(psi), 0],\n [-np.sin(psi), np.cos(psi), 0],\n [0, 0, 1]])\n return np.dot(Rpsi, np.dot(Rtheta, Rphi))", "def _velocity_cartesian2spherical(pos,vel):\n\n \n #save cartesian position of each particle\n x=pos[:,0]\n y=pos[:,1]\n z=pos[:,2]\n\n #save cartesian velocities\n vx=vel[:,0]\n vy=vel[:,1]\n vz=vel[:,2]\n\n #convert to spherical coordinates\n pos_sph=_position_cartesian2spherical(pos) #spherical coordinates\n r=pos_sph[:,0]\n theta=pos_sph[:,1]\n phi=pos_sph[:,2]\n\n\n #compute spherical velocities\n vr = vx*np.sin(theta)*np.cos(phi) + vy*np.sin(theta)*np.sin(phi) + vz*np.cos(theta)\n vtheta = vx*np.cos(theta)*np.cos(phi) + vy*np.cos(theta)*np.sin(phi) - vz*np.sin(theta)\n vphi = -vx*np.sin(phi) + vy*np.cos(phi)\n\n if np.sum(r==0)!=0: #if some points are at the origin\n warnings.warn(\"Spherical velocity is not defined at origin. Returning 0.\")\n vr[r==0]=0\n vtheta[r==0]=0\n vphi[r==0]=0\n\n\n return np.dstack((vr,vtheta,vphi))[0]", "def Rpy(angle=0, units='deg'):\n\n if(units=='deg'):\n angle = angle*pi/180\n\n C = np.cos(angle)\n S = np.sin(angle)\n\n M = np.identity(3)\n\n M[0,0] = +C\n M[0,2] = +S\n M[2,0] = -S\n M[2,2] = +C\n\n return M", "def quaternion2rot3D(quaternion):\n theta, axis = quaternion2AngleAxis(quaternion)\n return angleAxis2rot3D(axis, theta)", "def polar(self):\n if self.dim() != 2:\n raise ValueError(\"p.polar() expects a point of 2 dimensions, %d given\" % self.dim())\n return self.norm(), self.arg()", "def convert_deg_to_rads(X, Y, Z):\r\n X = [math.radians(x) for x in X]\r\n Y = [math.radians(x) for x in Y]\r\n Z = [math.radians(x) for x in Z]\r\n return X, Y, Z", "def aspheresurface(self):\n\t\tR = self.coefficients[0]\n\t\ttheta = np.linspace(0, 2*np.pi, 100)\n\t\trho = np.linspace(0, R, 100)\n\t\t[u,r] = np.meshgrid(theta,rho)\n\t\tX = r*cos(u)\n\t\tY = r*sin(u)\n\t\tZ = aspherepolar(self.coefficients,r)\n\t\tfig = plt.figure(figsize=(12, 8), dpi=80)\n\t\tax = fig.gca(projection='3d')\n\t\tsurf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.RdYlGn,\n\t linewidth=0, antialiased=False, alpha = 0.6)\n\t\tplt.show()\n\t\treturn 0", "def polarizer(px,py,angle=0):\n M = np.array([[px,0],[0,py]])\n if angle != 0:\n return Jones.rotate(M,angle)\n else:\n return M", "def sph2cart(az: float, el: float, r: float) -> typing.Tuple[float, float, float]:\n rcos_theta = r * cos(el)\n x = rcos_theta * cos(az)\n y = rcos_theta * sin(az)\n z = r * sin(el)\n return x, y, z", "def _spherical_to_cartesian_fast(ra, dec, threads):\n import numexpr as ne\n\n #nthreads = ne.detect_number_of_cores()\n nthreads = threads\n ne.set_num_threads(nthreads)\n\n pi = math.pi\n rar = ne.evaluate('ra*pi/180.0')\n decr = ne.evaluate('dec*pi/180.0')\n\n hold1=ne.evaluate('cos(decr)') \n\n x = ne.evaluate('cos(rar) * hold1')\n y = ne.evaluate('sin(rar) * hold1')\n z = ne.evaluate('sin(decr)')\n \n return x, y, z", "def cartesian_coordinates(self):\n # extract RA items\n ra_hours, ra_minutes, ra_seconds = RA_RE.match(str(self.ra)).groups()\n # then cast\n ra_hours = int(ra_hours)\n ra_minutes = int(ra_minutes)\n ra_seconds = float(ra_seconds)\n\n # extract DEC items\n dec_sign, dec_degrees, dec_minutes, dec_seconds = DEC_RE.match(str(self.dec)).groups()\n # then cast\n dec_sign = -1 if dec_sign == '-' else 1\n dec_degrees = int(dec_degrees)\n dec_minutes = int(dec_minutes)\n dec_seconds = float(dec_seconds)\n\n # to degrees\n a = (ra_hours*15) + (ra_minutes*0.25) + (ra_seconds*0.004166)\n b = abs(dec_degrees + dec_minutes/60 + dec_seconds/3600) * dec_sign\n\n # to radians\n a = math.radians(a)\n b = math.radians(b)\n\n distance = float(self.distance)\n\n x = (distance * math.cos(b)) * math.cos(a)\n y = (distance * math.cos(b)) * math.sin(a)\n z = distance * math.sin(b)\n\n return x, y, z", "def spherical_to_cartesian(grid, vec=None):\n grid = np.atleast_2d(grid)\n\n if vec is None:\n return np.hstack([\n mkvc(grid[:, 0] * np.sin(grid[:, 2]) * np.cos(grid[:, 1]), 2),\n mkvc(grid[:, 0] * np.sin(grid[:, 2]) * np.sin(grid[:, 1]), 2),\n mkvc(grid[:, 0] * np.cos(grid[:, 2]), 2)\n ])\n\n if len(vec.shape) == 1 or vec.shape[1] == 1:\n vec = vec.reshape(grid.shape, order='F')\n\n x = (\n vec[:, 0] * np.sin(grid[:, 2]) * np.cos(grid[:, 1]) +\n vec[:, 2] * np.cos(grid[:, 2]) * np.cos(grid[:, 1]) -\n vec[:, 1] * np.sin(grid[:, 1])\n )\n y = (\n vec[:, 0] * np.sin(grid[:, 2]) * np.sin(grid[:, 1]) +\n vec[:, 2] * np.cos(grid[:, 2]) * np.sin(grid[:, 1]) -\n vec[:, 1] * np.cos(grid[:, 1])\n )\n z = (\n vec[:, 0] * np.cos(grid[:, 2]) -\n vec[:, 2] * np.sin(grid[:, 2])\n )\n\n newvec = [x, y, z]\n\n return np.vstack(newvec).T", "def create_azimuthal_polarization(dim, rotation):\n theta_array = np.zeros((dim, dim))\n\n for i in range(np.size(theta_array, 0)):\n for j in range(np.size(theta_array, 1)):\n x = -dim / 2 + i\n y = -dim / 2 + j\n # perform roation\n th = math.pi*rotation/180.0\n x = np.cos(th)*x - np.sin(th)*y\n y = np.sin(th)*x + np.cos(th)*y\n\n rot = math.atan2(x, y) + math.pi/2\n # factor = (rot % (2*math.pi))\n theta_array[i][j] = (rot % (2 * math.pi))\n return theta_array", "def getCartesian(self, phi, theta, radius):\n point_x = round(sin(theta) * cos(phi) * radius,4)\n point_y = round(sin(theta) * sin(phi) * radius,4)\n point_z = round(cos(theta) * radius,4)\n return [point_x, point_y, point_z]" ]
[ "0.76471215", "0.7625322", "0.7124141", "0.7048745", "0.7016254", "0.6994713", "0.69758683", "0.69751894", "0.6937516", "0.693436", "0.6899067", "0.6815604", "0.6741575", "0.6709526", "0.6708642", "0.66996217", "0.6681919", "0.6673441", "0.66703224", "0.665601", "0.6650638", "0.6648439", "0.6647601", "0.6612487", "0.66102684", "0.6607828", "0.6527136", "0.64772356", "0.647461", "0.64604145", "0.64564955", "0.6451568", "0.6449035", "0.6447963", "0.6445676", "0.64426774", "0.64366615", "0.64025015", "0.6397674", "0.63634", "0.6357328", "0.6339186", "0.63153493", "0.6311536", "0.6295835", "0.628893", "0.6286935", "0.6236479", "0.6198441", "0.6185981", "0.6182054", "0.6143109", "0.6126768", "0.61115974", "0.61081326", "0.6098696", "0.6076547", "0.60727084", "0.60678464", "0.6038593", "0.60261106", "0.6019213", "0.6010869", "0.6002104", "0.5987912", "0.59734267", "0.59682316", "0.59434575", "0.5943446", "0.59351057", "0.5934558", "0.59254307", "0.5924994", "0.5920051", "0.59043306", "0.589721", "0.5855404", "0.58436924", "0.5839966", "0.5838438", "0.58318746", "0.5827259", "0.5827034", "0.5825989", "0.5824689", "0.5818564", "0.5817106", "0.58157504", "0.5801487", "0.57928085", "0.5784488", "0.57815146", "0.5772305", "0.57692325", "0.57632416", "0.5759994", "0.5759365", "0.5759258", "0.5748719", "0.574385" ]
0.692961
10
Converts spherical polar coordinates to rectangular cartesian coordinates. Note that the input angles should be in latitude/longitude or elevation/azimuthal form. I.e., the origin is along the equator rather than at the north pole.
def spherical_to_cartesian(r, lat, lon): import math if np.isscalar(r) and np.isscalar(lat) and np.isscalar(lon): x = r * math.cos(lat) * math.cos(lon) y = r * math.cos(lat) * math.sin(lon) z = r * math.sin(lat) else: x = r * np.cos(lat) * np.cos(lon) y = r * np.cos(lat) * np.sin(lon) z = r * np.sin(lat) return x, y, z
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cartesian2polar(cartesian):\n cartesian = np.array(cartesian).squeeze()\n x, y = cartesian\n r = np.linalg.norm([x, y])\n azimuth = np.arctan2(y, x)\n return np.array([r, azimuth])", "def polar2cartesian(polar):\n polar = np.array(polar).squeeze()\n r, azimuth = polar\n x = r * np.cos(azimuth)\n y = r * np.sin(azimuth)\n return np.array([x, y])", "def polar_to_cartesian(radius, angle_deg):\n\n theta = np.deg2rad(angle_deg)\n x = radius * np.cos(theta)\n y = radius * np.sin(theta)\n return(x, y)", "def polar2cartesian(phi, r):\n phi_radians = radians(phi)\n x = r*cos(phi_radians)\n y = r*sin(phi_radians)\n return x, y", "def polar_to_cartesian(r, theta):\n\n x = r * cos(theta)\n y = r * sin(theta)\n\n return x, y", "def polarToCartesian(theta=0, radius=0):\n\n x = radius * np.cos(theta)\n y = radius * np.sin(theta)\n return x, y", "def sphericalToCartesian(magnitude, azimuthal, polar):\r\n azimuthal = azimuthal*math.pi/180.0\r\n polar = polar*math.pi/180.0\r\n xval = magnitude * math.sin(azimuthal) * math.cos(polar)\r\n yval = magnitude * math.sin(azimuthal) * math.sin(polar)\r\n zval = magnitude * math.cos(azimuthal)\r\n return [xval, yval, zval]", "def polarToCartesian(r,theta):\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n return x,y", "def polar_to_cartesian(self, r, theta):\n # x = rcos(theta), y = rsin(theta)\n x, y = r*math.cos(theta), r*math.sin(theta)\n x, y = self.add((x, y), self.pole)\n return x, y", "def spherical_to_xyz(self, angles: np.ndarray) -> np.ndarray:\n # https://en.wikipedia.org/wiki/Spherical_coordinate_system\n azimuth_iso = (np.pi / 2 - angles[:, 0] * np.pi / 180) % (2 * np.pi)\n altitude_iso = (np.pi / 2 - angles[:, 1] * np.pi / 180) % (2 * np.pi)\n xyz = np.column_stack(\n (\n np.sin(altitude_iso) * np.cos(azimuth_iso),\n np.sin(altitude_iso) * np.sin(azimuth_iso),\n np.cos(altitude_iso),\n )\n )\n if angles.shape[1] > 2:\n xyz *= angles[:, 2:3]\n xyz += self.xyz\n return xyz", "def polar_to_cartesian(dist, theta, phi):\n z = np.cos(phi)\n s = np.sin(phi)\n x = s * np.cos(theta)\n y = s * np.sin(theta)\n return np.stack((x, y, z), axis=-1) * np.expand_dims(dist, axis=-1)", "def cartesian2polar(x, y):\n r = (x**2+y**2)**.5\n phi = atan2(y, x)\n return phi, r", "def spherical_2_cartesian(r, phi, theta, units='degrees'):\n phi = np.copy(phi)\n theta = np.copy(theta)\n if units == 'degrees':\n phi, theta = np.deg2rad(phi), np.deg2rad(theta)\n elif units == 'radians':\n pass\n else:\n raise AssertionError(\"Unexpected value entered for 'units', only supports either degrees or radians\", units)\n x = r * np.cos(phi) * np.sin(theta)\n y = r * np.sin(phi) * np.sin(theta)\n z = r * np.cos(theta)\n return x, y, z", "def spherical_to_cartesian(self, r, phi, theta):\n x = r*cos(phi)*sin(theta)\n y = r*sin(phi)*sin(theta)\n z = r*cos(theta)\n \n return Vector(float(x), float(y), float(z))", "def _polar_to_cartesian(self, radius: float, radians: float) -> None:\n self.x = round(radius * math.cos(radians), EPSILON_EXP_MINUS_1)\n self.y = round(radius * math.sin(radians), EPSILON_EXP_MINUS_1)", "def from_rpy(self, angles: np.ndarray) -> np.ndarray:\n _assert_iterables(angles, 'Roll-Pitch-Yaw angles')\n angles = np.array(angles)\n if angles.ndim != 1 or angles.shape[0] != 3:\n raise ValueError(f\"Expected `angles` must have shape (3,), got {angles.shape}.\")\n for angle in angles:\n if angle < -2.0* np.pi or angle > 2.0 * np.pi:\n raise ValueError(f\"Expected `angles` must be in the range [-2pi, 2pi], got {angles}.\")\n roll, pitch, yaw = angles\n cy = np.cos(0.5*yaw)\n sy = np.sin(0.5*yaw)\n cp = np.cos(0.5*pitch)\n sp = np.sin(0.5*pitch)\n cr = np.cos(0.5*roll)\n sr = np.sin(0.5*roll)\n q = np.zeros(4)\n q[0] = cy*cp*cr + sy*sp*sr\n q[1] = cy*cp*sr - sy*sp*cr\n q[2] = cy*sp*cr + sy*cp*sr\n q[3] = sy*cp*cr - cy*sp*sr\n return q", "def cartesianToPolar(x,y):\n r = np.sqrt(x**2 + y**2)\n theta = np.arctan2(y,x)\n\n return r,theta", "def cartesian_to_polar(self, x, y):\n # r = (x^2+y^2)^2, theta = tan^-1(y/x)\n # pole is the reference point of the coordinate system\n x, y = self.get_rel_to_pole(x, y)\n r = math.sqrt(pow(x, 2)+pow(y, 2))\n # set specific code for edge cases\n if x == 0 and y != 0:\n sign = lambda x: (1, -1)[x < 0]\n return r, sign(y)*math.pi/2\n if x == 0 and y == 0:\n return 0, 0\n else:\n theta = math.atan(y/x)\n return r, theta", "def rotation_matrices_from_angles(angles):\n\n angles = np.atleast_1d(angles)\n npts = len(angles)\n\n sina = np.sin(angles)\n cosa = np.cos(angles)\n\n R = np.zeros((npts, 2, 2))\n R[:, 0, 0] = cosa\n R[:, 1, 1] = cosa\n\n R[:, 0, 1] = -sina\n R[:, 1, 0] = sina\n\n return R", "def cartesianToPolar(x=0, y=0):\n\n radius = np.hypot(x, y)\n theta = np.arctan2(y, x)\n return theta, radius", "def SphericalToCartesian(Spherical):\n\n # r,theta,phi -> x,y,z\n r = Spherical[:,0]\n st = np.sin(Spherical[:,1])\n sp = np.sin(Spherical[:,2])\n ct = np.cos(Spherical[:,1])\n cp = np.cos(Spherical[:,2])\n x = r*st*cp\n y = r*st*sp\n z = r*ct\n\n if (len(Spherical[0,:])==3):\n Cartesian = np.column_stack((x,y,z))\n return Cartesian\n else:\n # vr,vtheta,vphi -> vx,vy,vz\n vr = Spherical[:,3]\n vt = Spherical[:,4]\n vp = Spherical[:,5]\n vx = vr*st*cp - vt*ct*cp - vp*sp\n vy = vr*st*sp + vt*ct*sp + vp*cp\n vz = vr*ct - vt*st\n Cartesian= np.column_stack((x,y,z,vx,vy,vz))\n return Cartesian", "def to_polar(center_coords, neighbors_coords):\n return cart2pol((neighbors_coords - center_coords)[:, 0],\n (neighbors_coords - center_coords)[:, 1])", "def CartesianToPolar(Cartesian):\n \n # x,y,z -> R,phi,z\n R = np.sqrt(Cartesian[:,0]*Cartesian[:,0]+Cartesian[:,1]*Cartesian[:,1])\n phi = np.arctan2(Cartesian[:,1],Cartesian[:,0])\n z = Cartesian[:,2]\n phi[phi<0.] += 2.*np.pi\n if (len(Cartesian[0,:])==3):\n Polar = np.column_stack((R,phi,z))\n else:\n # vx,vy,vz -> vR,vphi,vz\n cp = np.cos(phi)\n sp = np.sin(phi)\n vR = Cartesian[:,3]*cp+Cartesian[:,4]*sp\n vphi = Cartesian[:,4]*cp-Cartesian[:,3]*sp\n vz = Cartesian[:,5]\n Polar = np.column_stack((R,phi,z,vR,vphi,vz))\n\t\t\n return Polar", "def cartesian_to_polar(cart: np.ndarray, radial_step: float, azimuth_step : float, radial_bins: int,\n azimuth_bins: int, cart_resolution: float) -> np.ndarray:\n max_range = radial_step * radial_bins\n angles = np.linspace(0, 2 * np.pi, azimuth_bins, dtype=np.float32).reshape(azimuth_bins, 1)\n ranges = np.linspace(0, max_range, radial_bins, dtype=np.float32).reshape(1, radial_bins)\n angles = np.tile(angles, (1, radial_bins))\n ranges = np.tile(ranges, (azimuth_bins, 1))\n x = ranges * np.cos(angles)\n y = ranges * np.sin(angles)\n cart_pixel_width = cart.shape[0]\n if (cart_pixel_width % 2) == 0:\n cart_min_range = (cart_pixel_width / 2 - 0.5) * cart_resolution\n else:\n cart_min_range = cart_pixel_width // 2 * cart_resolution\n u = (cart_min_range + y) / cart_resolution\n v = (cart_min_range - x) / cart_resolution\n cart_to_polar_warp = np.stack((u, v), -1)\n polar = np.expand_dims(cv2.remap(cart, cart_to_polar_warp, None, cv2.INTER_LINEAR), -1)\n return np.squeeze(polar)", "def get_R(angles):\n cs, ss = np.cos(angles), np.sin(angles)\n zeros, ones = np.zeros(len(cs)), np.ones(len(cs))\n Rs = np.array(\n [[cs, ss, zeros], [-ss, cs, zeros], [zeros, zeros, ones]], dtype=np.float32\n ) # (3, 3, N)\n\n return Rs.transpose((2, 0, 1))", "def Rpy(angle=0, units='deg'):\n\n if(units=='deg'):\n angle = angle*pi/180\n\n C = np.cos(angle)\n S = np.sin(angle)\n\n M = np.identity(3)\n\n M[0,0] = +C\n M[0,2] = +S\n M[2,0] = -S\n M[2,2] = +C\n\n return M", "def to_polar(x, y):\n r = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y, x)\n return r, phi", "def to_cartesian(dimensions, angles):\n return Operator(transform=np.transpose(np.array(_basis_vectors(dimensions, angles))))", "def to_cartesian(r, phi):\n x = r*np.cos(phi)\n y = r*np.sin(phi)\n return x, y", "def _spherical_to_cartesian(ra, dec):\n rar = np.radians(ra)\n decr = np.radians(dec)\n\n x = np.cos(rar) * np.cos(decr)\n y = np.sin(rar) * np.cos(decr)\n z = np.sin(decr)\n \n return x, y, z", "def polar_from_rectangular(width_pol, height_pol, width_reg, height_reg):\n xcenter = (width_reg - 1.0) * 0.5\n ycenter = (height_reg - 1.0) * 0.5\n r_max = np.floor(max(xcenter, ycenter))\n xlist = (np.flipud(np.arange(width_reg)) - xcenter) * width_pol / r_max\n ylist = (np.flipud(np.arange(height_reg)) - ycenter) * width_pol / r_max\n x_mat, y_mat = np.meshgrid(xlist, ylist)\n r_mat = np.float32(\n np.clip(np.sqrt(x_mat ** 2 + y_mat ** 2), 0, width_pol - 1))\n theta_mat = np.float32(np.clip(\n (np.pi + np.arctan2(y_mat, x_mat)) * (height_pol - 1) / (2 * np.pi), 0,\n height_pol - 1))\n return r_mat, theta_mat", "def radar_polar_to_cartesian(azimuths, fft_data, radar_resolution, cart_resolution, cart_pixel_width,\n interpolate_crossover=True, fix_wobble=True):\n if (cart_pixel_width % 2) == 0:\n cart_min_range = (cart_pixel_width / 2 - 0.5) * cart_resolution\n else:\n cart_min_range = cart_pixel_width // 2 * cart_resolution\n coords = np.linspace(-cart_min_range, cart_min_range, cart_pixel_width, dtype=np.float32)\n Y, X = np.meshgrid(coords, -1 * coords)\n sample_range = np.sqrt(Y * Y + X * X)\n sample_angle = np.arctan2(Y, X)\n sample_angle += (sample_angle < 0).astype(np.float32) * 2. * np.pi\n\n # Interpolate Radar Data Coordinates\n azimuth_step = (azimuths[-1] - azimuths[0]) / (azimuths.shape[0] - 1)\n sample_u = (sample_range - radar_resolution / 2) / radar_resolution\n sample_v = (sample_angle - azimuths[0]) / azimuth_step\n # This fixes the wobble in the old CIR204 data from Boreas (keenan)\n if fix_wobble and radar_resolution == 0.0596:\n azimuths = azimuths.reshape((1, 1, 400)) # 1 x 1 x 400\n sample_angle = np.expand_dims(sample_angle, axis=-1) # H x W x 1\n diff = np.abs(azimuths - sample_angle)\n c3 = np.argmin(diff, axis=2)\n azimuths = azimuths.squeeze()\n c3 = c3.reshape(cart_pixel_width, cart_pixel_width) # azimuth indices (closest)\n mindiff = sample_angle.squeeze() - azimuths[c3]\n sample_angle = sample_angle.squeeze()\n mindiff = mindiff.squeeze()\n\n subc3 = c3 * (c3 < 399)\n aplus = azimuths[subc3 + 1]\n a1 = azimuths[subc3]\n delta1 = mindiff * (mindiff > 0) * (c3 < 399) / (aplus - a1)\n subc3 = c3 * (c3 > 0)\n a2 = azimuths[subc3]\n aminus = azimuths[1 + (c3 > 0) * (subc3 - 2)]\n delta2 = mindiff * (mindiff < 0) * (c3 > 0) / (a2 - aminus)\n sample_v = c3 + delta1 + delta2\n sample_v = sample_v.astype(np.float32)\n\n # We clip the sample points to the minimum sensor reading range so that we\n # do not have undefined results in the centre of the image. In practice\n # this region is simply undefined.\n sample_u[sample_u < 0] = 0\n\n if interpolate_crossover:\n fft_data = np.concatenate((fft_data[-1:], fft_data, fft_data[:1]), 0)\n sample_v = sample_v + 1\n\n polar_to_cart_warp = np.stack((sample_u, sample_v), -1)\n return cv2.remap(fft_data, polar_to_cart_warp, None, cv2.INTER_LINEAR)", "def rectangular_from_polar(width_reg, height_reg, width_pol, height_pol):\n xcenter = (width_reg - 1.0) * 0.5\n ycenter = (height_reg - 1.0) * 0.5\n r_max = np.floor(max(xcenter, ycenter))\n r_list = np.linspace(0.0, r_max, width_pol)\n theta_list = np.arange(0.0, height_pol, 1.0) * 2 * np.pi / (height_pol - 1)\n r_mat, theta_mat = np.meshgrid(r_list, theta_list)\n x_mat = np.float32(\n np.clip(xcenter + r_mat * np.cos(theta_mat), 0, width_reg - 1))\n y_mat = np.float32(\n np.clip(ycenter + r_mat * np.sin(theta_mat), 0, height_reg - 1))\n return x_mat, y_mat", "def sphericalToCartesian(altitude=0, azimuth=0, radius=0):\n\n rcos_theta = radius * np.cos(altitude)\n x = rcos_theta * np.cos(azimuth)\n y = rcos_theta * np.sin(azimuth)\n z = radius * np.sin(altitude)\n return x, y, z", "def _position_spherical2cartesian(pos):\n \n r=pos[:,0]\n theta=pos[:,1]\n phi=pos[:,2]\n\n if any(theta>np.pi) or any(theta<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n\n x=r*np.sin(theta)*np.cos(phi)\n y=r*np.sin(theta)*np.sin(phi)\n z=r*np.cos(theta)\n\n return np.dstack((x,y,z))[0]", "def to_polar(self, physics=False):\n if self.__coordsys in (Cartesian, Cartesian_3):\n self.__coordsys = Polar if self.__coordsys == Cartesian \\\n else PhySpherical if physics else MathSpherical\n self.update_coord(vct.pol(self.list_repr()))", "def from_angles(self, angles: np.ndarray) -> np.ndarray:\n return self.from_rpy(angles)", "def angular_to_cartesian(theta, phi):\n return array([sin(theta) * cos(phi),\n sin(theta) * sin(phi),\n cos(theta)])", "def PolarToCartesian(Polar):\n\t \n # R,phi,z -> x,y,z\n cp = np.cos(Polar[:,1])\n sp = np.sin(Polar[:,1])\n x = Polar[:,0] * cp\n y = Polar[:,0] * sp\n z = Polar[:,2]\n\n if (len(Polar[0,:])==3):\n Cartesian = np.column_stack((x,y,z))\n else:\n # vR,vphi,vz -> vx,vy,vz\n vx = Polar[:,3]*cp-Polar[:,4]*sp\n vy = Polar[:,4]*cp+Polar[:,3]*sp\n vz = Polar[:,5]\n Cartesian = np.column_stack((x,y,z,vx,vy,vz))\n \n return Cartesian", "def set_rama_angles(moving_h, angles):\n # print \"angles\", angles\n # STOP()\n result_h = moving_h.deep_copy()\n result_h.reset_atom_i_seqs()\n phi_psi_atoms = utils.get_phi_psi_atoms(moving_h)\n assert len(phi_psi_atoms) == len(angles)\n for ps_atoms, target_angle_pair in zip(phi_psi_atoms, angles):\n phi_psi_pair = ps_atoms[0]\n phi_psi_angles = utils.get_pair_angles(phi_psi_pair)\n # phi\n if target_angle_pair[0] is not None:\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[0][1],\n phi_psi_pair[0][2],\n angle=-phi_psi_angles[0]+target_angle_pair[0])\n # psi\n if target_angle_pair[1] is not None:\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[1][1],\n phi_psi_pair[1][2],\n angle=-phi_psi_angles[1]+target_angle_pair[1])\n return result_h", "def spherical2cartesian(spherical):\n spherical = np.array(spherical).squeeze()\n distance, azimuth, elevation = spherical\n x = distance * np.sin(azimuth) * np.cos(elevation)\n y = distance * np.sin(azimuth) * np.sin(elevation)\n z = distance * np.cos(azimuth)\n return np.array([x, y, z])", "def polarCameraToCartesian(self):\n x = self.cameraPolar[0] * np.sin(self.cameraPolar[1] * np.pi / 180) * np.sin(self.cameraPolar[2] * np.pi / 180)\n y = self.cameraPolar[0] * np.cos(self.cameraPolar[2] * np.pi / 180)\n z = self.cameraPolar[0] * np.cos(self.cameraPolar[1] * np.pi / 180) * np.sin(self.cameraPolar[2] * np.pi / 180)\n self.cameraPosition = [x, y, z]", "def calculate_angles(self, x, y):\n Oimat = inv(self.Omat)\n Mat = self.pixel_size * inv(self.Dmat) * Oimat\n polar_angles = []\n azimuthal_angles = []\n for i in range(len(x)):\n peak = Oimat * (vec(x[i], y[i]) - self.Cvec)\n v = norm(Mat * peak)\n polar_angle = np.arctan(v / self.distance)\n polar_angles.append(polar_angle)\n azimuthal_angles.append(np.arctan2(-peak[1, 0], peak[2, 0]))\n return (np.array(polar_angles) * degrees,\n np.array(azimuthal_angles) * degrees)", "def cartesian_to_ellipse(center, angle, lengths):\n xInd, yInd = np.mgrid[:512, :512]\n major = max(lengths)/np.mean(lengths)\n minor = min(lengths)/np.mean(lengths)\n xInd, yInd = xInd - center[0], yInd - center[1]\n xInd, yInd = rotate(xInd, yInd, angle=-angle)\n xInd, yInd = xInd*minor, yInd*major\n xInd, yInd = rotate(xInd, yInd, angle=angle)\n return xInd, yInd", "def getRotatedPixels(x,y,p0,angles):\n # Current pixel\n phi0 = np.array([x[p0[0], p0[1]], y[p0[0], p0[1]]])\n # Convert to polar coordinates\n rphi0 = cart_to_pol(phi0)\n angles_rad = rphi0[1] - np.array([a*np.pi/180 for a in angles]) \n \n # Rotate the polar coordinates by each frame angle\n angles_ind = [[rphi0[0],phi] for phi in angles_rad]\n angles_pol = np.array(list(zip(*angles_ind)))\n \n # Convert from polar to cartesian and pixel coordinates\n angles_px = np.array(grid_pol_to_cart(angles_pol[0], angles_pol[1]))+int(x.shape[0]/2)\n angles_px = angles_px.T\n angles_px = np.fliplr(angles_px)\n return angles_px", "def spherical2cartesian(phi, theta, depth):\n x = depth * np.sin(theta) * np.cos(phi)\n y = depth * np.cos(theta)\n z = depth * np.sin(theta) * np.sin(phi)\n\n return x, y, z", "def _pole_to_cart(self,angles,distances):\n cart=[]\n for i in xrange(0,len(angles)-1):\n angle = angles[i]\n distance = distances[i] \n xs, ys = distance*cos(angle), distance*sin(angle)\n cart.append(tuple((xs,ys)))\n return cart", "def from_rpy(self, Angles: np.ndarray) -> np.ndarray:\n _assert_iterables(Angles, 'Roll-Pitch-Yaw angles')\n Angles = np.copy(Angles)\n if Angles.ndim != 2 or Angles.shape[-1] != 3:\n raise ValueError(f\"Expected `angles` must have shape (N, 3), got {Angles.shape}.\")\n # RPY to Quaternion\n cy = np.cos(0.5*Angles[:, 2])\n sy = np.sin(0.5*Angles[:, 2])\n cp = np.cos(0.5*Angles[:, 1])\n sp = np.sin(0.5*Angles[:, 1])\n cr = np.cos(0.5*Angles[:, 0])\n sr = np.sin(0.5*Angles[:, 0])\n Q = np.zeros((Angles.shape[0], 4))\n Q[:, 0] = cy*cp*cr + sy*sp*sr\n Q[:, 1] = cy*cp*sr - sy*sp*cr\n Q[:, 2] = sy*cp*sr + cy*sp*cr\n Q[:, 3] = sy*cp*cr - cy*sp*sr\n return Q/np.linalg.norm(Q, axis=1)[:, None]", "def corners_cartesian(self):\n x_corners, y_corners, z_corners = \\\n starwinds_magnetogram.coordinate_transforms.rectangular_coordinates_from_spherical(\n np.ones(self.polar_corners.shape),\n self.polar_corners,\n self.azimuthal_corners)\n\n return x_corners, y_corners, z_corners", "def make_cartesian(r: float, phi: float):\n x = r*np.cos(phi)\n y = r*np.sin(phi)\n return x, y", "def createFromPolar(cls, norm, angle, **kwargs):\n x, y = cls.cartesian([norm, angle])\n return cls(x, y, **kwargs)", "def get_polar_coordinates(cup_position, bot_position):\n\n distance_x = cup_position[0] - bot_position[0]\n distance_y = cup_position[1] - bot_position[1]\n\n r = math.hypot(distance_x, distance_y)\n theta = math.degrees(math.atan(distance_y/distance_x))\n\n return r, theta", "def GalacticToPolar(Galactic,SolarPosition): \n\n Cartesian = GalacticToCartesian(Galactic,SolarPosition)\n Polar = CartesianToPolar(Cartesian)\n\n return Polar", "def cartesian2polar(coords, inputshape, origin):\n\n r_index, theta_index = coords\n\n r = r_index * (rangeX[1] - rangeX[0])/2.0/inputshape[0]\n theta = theta_index * 2.0*np.pi/inputshape[1] + np.pi\n\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n \n i = np.round(x/Lx*inputshape[0]) + origin[0]\n j = np.round(y/Ly*inputshape[0]) + origin[1]\n \n return (i,j)", "def CartesianToSpherical(Cartesian):\n\n # x,y,z -> r,theta,phi\n x = Cartesian[:,0]\n y = Cartesian[:,1]\n z = Cartesian[:,2]\n r = np.sqrt(x*x + y*y + z*z)\n projR = np.sqrt(x*x + y*y)\n theta = np.arccos(z/r)\n phi = np.arctan2(y,x)\n theta[theta<0.] +=2.*np.pi\n \n if (len(Cartesian[0,:])==3):\n Spherical = np.column_stack((r,theta,phi))\n return Spherical\n else:\n # vx,vy,vz -> vr,vtheta,vphi\n vx = Cartesian[:,3]\n vy = Cartesian[:,4]\n vz = Cartesian[:,5]\n vr = (x*vx + y*vy + z*vz)/r\n vt = (z*vr - r*vz)/projR\n vp = r*np.sin(theta)*(vy*x-y*vx)/(projR*projR) \n Spherical = np.column_stack((r,theta,phi,vr,vt,vp))\n return Spherical", "def make_sample_rot_matrix(self, angles):\n (phi, chi, omega) = angles[0:3]\n return numpy_utils.rotation_matrix(phi, chi, omega)", "def polarizer(px,py,angle=0):\n M = np.array([[px,0],[0,py]])\n if angle != 0:\n return Jones.rotate(M,angle)\n else:\n return M", "def turnangles(M):\n Mt = M.transpose()\n x0, y0 = Mt[0], Mt[1] # arrays of all x's & y's\n x1, y1 = Mt[0][ 0], Mt[1][ 0] # start point\n x2, y2 = Mt[0][-1], Mt[1][-1] # end point\n\n vec1_x, vec1_y = x1 - x0, y1 - y0 \n vec2_x, vec2_y = x2 - x0, y2 - y0 \n vec1dot2 = vec1_x * vec2_x + vec1_y * vec2_y\n vec1_len = np.sqrt(vec1_x * vec1_x + vec1_y * vec1_y)\n vec2_len = np.sqrt(vec2_x * vec2_x + vec2_y * vec2_y)\n\n cos = vec1dot2 / np.maximum(vec1_len * vec2_len, EPS)\n cos = np.minimum(np.maximum(cos, -1.), 1.) \n turn_angles = np.pi - np.arccos(cos) \n # TODO convert [-360,360] -> [-180,180]\n # turn_angles = np.mod(turn_angles + 3*np.pi, 2.*np.pi) - np.pi \n # -2pi->0, -pi->-pi 0->0, pi->-pi 2pi->0 \n turn_angles[0], turn_angles[-1] = 0., 0. # endpoints\n return np.rad2deg(turn_angles)", "def spherical2cartesian(sphere):\n cart = np.zeros(sphere.shape, dtype=np.float64)\n sine_phi = np.sin(sphere[:, 2])\n\n cart[:, 0] = sphere[:, 0] * np.cos(sphere[:, 1]) * sine_phi\n cart[:, 1] = sphere[:, 0] * np.sin(sphere[:, 1]) * sine_phi\n cart[:, 2] = sphere[:, 0] * np.cos(sphere[:, 2])\n return cart", "def cartesian2spherical(cartesian):\n cartesian = np.array(cartesian).squeeze()\n x, y, z = cartesian\n distance = np.linalg.norm(cartesian)\n azimuth = np.arccos(z / distance)\n elevation = np.arctan2(y, x) # Use arctan2 instead of arctan to get proper sign!\n return np.array([distance, azimuth, elevation])", "def _euler_angles_to_rotation_matrix(theta):\n R_x = np.array([[1, 0, 0],\n [0, math.cos(theta[0]), -math.sin(theta[0])],\n [0, math.sin(theta[0]), math.cos(theta[0])]\n ])\n\n R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],\n [0, 1, 0],\n [-math.sin(theta[1]), 0, math.cos(theta[1])]\n ])\n\n R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],\n [math.sin(theta[2]), math.cos(theta[2]), 0],\n [0, 0, 1]\n ])\n\n R = np.dot(R_z, np.dot(R_y, R_x))\n\n return R", "def polar_box(rad_min, rad_max, ang_min, ang_max, **kwargs):\n delta_angle = (ang_max - ang_min) % 360.0\n ang_max_sequential = ang_min + delta_angle\n maxarc_coords = list(approxArc((0.0, 0.0), rad_max, ang_min,\n \t\t\t\t\t\t\t ang_max_sequential).coords)\n minarc_coords = list(approxArc((0.0, 0.0), rad_min, ang_min,\n \t\t\t\t\t\t\t ang_max_sequential).coords)\n minarc_coords.reverse()\n poly = geo.Polygon(maxarc_coords + minarc_coords)\n return desc.PolygonPatch(poly, **kwargs)", "def make_sample_rot_matrix(self, angles):\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n return numpy_utils.rotation_matrix(phi, chi, omega)", "def make_sample_rot_matrix(self, angles):\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n return numpy_utils.rotation_matrix(phi, chi, omega)", "def make_sample_rot_matrix(self, angles):\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n return numpy_utils.rotation_matrix(phi, chi, omega)", "def cart2polar3d(cartesian):\n radius = np.linalg.norm(cartesian)\n theta = np.cos", "def make_sample_rot_matrix(self, angles):\n (phi, chi) = angles[0:2]\n omega = np.deg2rad(self.omega)\n return numpy_utils.rotation_matrix(phi, chi, omega)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = angles[0:3]\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def pol2cart(theta, rho):\n x = rho * np.cos(theta)\n y = rho * np.sin(theta)\n return x, y", "def cartesian_to_spherical(x, y, z):\n import math\n\n xsq = x ** 2\n ysq = y ** 2\n zsq = z ** 2\n\n r = (xsq + ysq + zsq) ** 0.5\n s = (xsq + ysq) ** 0.5\n\n if np.isscalar(x) and np.isscalar(y) and np.isscalar(z):\n lon = math.atan2(y, x)\n lat = math.atan2(z, s)\n else:\n lon = np.arctan2(y, x)\n lat = np.arctan2(z, s)\n\n return r, lat, lon", "def pol2cart(theta: float, rho: float) -> typing.Tuple[float, float]:\n return rho * cos(theta), rho * sin(theta)", "def get_R_torch(angles):\n cs, ss = torch.cos(angles), torch.sin(angles)\n zeros = torch.zeros(len(cs), device=angles.device)\n ones = torch.ones(len(cs), device=angles.device)\n Rs = torch.empty((angles.shape[0], 3, 3), device=angles.device).float() # (N, 3, 3)\n Rs[:, 0] = torch.stack((cs, ss, zeros), dim=1)\n Rs[:, 1] = torch.stack((-ss, cs, zeros), dim=1)\n Rs[:, 2] = torch.stack((zeros, zeros, ones), dim=1)\n\n return Rs", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def multi_rot_Y(angle_rads: numpy.ndarray) -> numpy.ndarray:\n ry = numpy.empty((angle_rads.shape[0], 4, 4))\n ry[...] = numpy.identity(4)\n ry[:, 0, 0] = ry[:, 2, 2] = numpy.cos(angle_rads)\n ry[:, 0, 2] = numpy.sin(angle_rads)\n ry[:, 2, 0] = -ry[:, 0, 2]\n\n return ry", "def make_sample_rot_matrix(self, angles):\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n return numpy_utils.rotation_matrix(phi, chi, omega)", "def make_sample_rot_matrix(self, angles):\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n return numpy_utils.rotation_matrix(phi, chi, omega)", "def polar(cls, angle, length=1.0):\n x, y = cos_sin_deg(angle)\n vec = tuple.__new__(cls, (x * length, y * length))\n vec.__dict__['length'] = length * 1.0\n return vec", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi) = angles[0:2]\n omega = np.deg2rad(self.omega)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def polar_decomposition(self):\n return self.polar_unit_vector, self.polar_angle", "def polar(position):\n return list(polar(complex(position[0], position[1])))", "def uniform_cart_to_polar(x, y, data):\n # create a set of polar coordinates to interpolate onto\n xmin, xmax = x.min(), x.max()\n ymin, ymax = y.min(), y.max()\n\n _max = max(abs(e.asarray([xmin, xmax, ymin, ymax])))\n\n rho = e.linspace(0, _max, len(x))\n phi = e.linspace(0, 2 * e.pi, len(y))\n rv, pv = e.meshgrid(rho, phi)\n\n # map points to x, y and make a grid for the original samples\n xv, yv = polar_to_cart(rv, pv)\n\n # interpolate the function onto the new points\n f = interpolate.RegularGridInterpolator((y, x), data, bounds_error=False, fill_value=0)\n return rho, phi, f((yv, xv), method='linear')", "def cartesian_coordinates(self):\n # extract RA items\n ra_hours, ra_minutes, ra_seconds = RA_RE.match(str(self.ra)).groups()\n # then cast\n ra_hours = int(ra_hours)\n ra_minutes = int(ra_minutes)\n ra_seconds = float(ra_seconds)\n\n # extract DEC items\n dec_sign, dec_degrees, dec_minutes, dec_seconds = DEC_RE.match(str(self.dec)).groups()\n # then cast\n dec_sign = -1 if dec_sign == '-' else 1\n dec_degrees = int(dec_degrees)\n dec_minutes = int(dec_minutes)\n dec_seconds = float(dec_seconds)\n\n # to degrees\n a = (ra_hours*15) + (ra_minutes*0.25) + (ra_seconds*0.004166)\n b = abs(dec_degrees + dec_minutes/60 + dec_seconds/3600) * dec_sign\n\n # to radians\n a = math.radians(a)\n b = math.radians(b)\n\n distance = float(self.distance)\n\n x = (distance * math.cos(b)) * math.cos(a)\n y = (distance * math.cos(b)) * math.sin(a)\n z = distance * math.sin(b)\n\n return x, y, z", "def cartesianToSpherical(x=0, y=0, z=0):\n\n hxy = np.hypot(x, y)\n radius = np.hypot(hxy, z)\n altitude = np.arctan2(z, hxy)\n azimuth = np.arctan2(y, x)\n return altitude, azimuth, radius", "def snap_angles(angles):\n\tpi_over_four = np.pi / 4\n\treturn np.round(angles / pi_over_four) * pi_over_four", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def solve(self, angles):\n return reduce(\n lambda a, m: np.dot(m, a),\n reversed(self._matrices(angles)),\n np.array([0., 0., 0., 1.])\n )[:3]", "def translate_polar(self, radius, angle): \n return Position.fromnp(translate_polar(self.tonp(), radius, angle))", "def convert_deg_to_rads(X, Y, Z):\r\n X = [math.radians(x) for x in X]\r\n Y = [math.radians(x) for x in Y]\r\n Z = [math.radians(x) for x in Z]\r\n return X, Y, Z", "def set_rama_angles(moving_h, angles, direction_forward=True, check_omega=False):\n # print \"angles\", angles\n # STOP()\n result_h = moving_h.deep_copy()\n result_h.reset_atom_i_seqs()\n fixed_omega = False\n phi_psi_atoms = utils.get_phi_psi_atoms(moving_h, omega=True)\n assert len(phi_psi_atoms) == len(angles), \"%d != %d\" % (len(phi_psi_atoms), len(angles))\n if not direction_forward:\n phi_psi_atoms.reverse()\n angles.reverse()\n for ps_atoms, target_angle_pair in zip(phi_psi_atoms, angles):\n phi_psi_pair = ps_atoms[0]\n # print \"phi_psi_pair\", phi_psi_pair\n omega = ps_atoms[2]\n phi_psi_angles = utils.get_pair_angles(phi_psi_pair)\n # print \"ps_atoms, target_angle_pair\", phi_psi_angles, target_angle_pair\n # phi\n if target_angle_pair[0] is not None and phi_psi_angles[0] is not None:\n rotation_angle = -phi_psi_angles[0]+target_angle_pair[0]\n # print \"rot angle\", rotation_angle\n # if not direction_forward:\n # rotation_angle = -rotation_angle\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[0][1],\n phi_psi_pair[0][2],\n angle=rotation_angle,\n direction_forward=direction_forward)\n # psi\n if target_angle_pair[1] is not None and phi_psi_angles[1] is not None:\n rotation_angle = -phi_psi_angles[1]+target_angle_pair[1]\n # print \"rot angle\", rotation_angle\n # if not direction_forward:\n # rotation_angle = -rotation_angle\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[1][1],\n phi_psi_pair[1][2],\n angle=rotation_angle,\n direction_forward=direction_forward)\n # omega\n if omega is not None and abs(abs(omega)-180) > 10 and check_omega:\n rotation_angle= -omega+180\n # print \"Omega rotation:\", omega, rotation_angle\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[0][0],\n phi_psi_pair[0][1],\n angle=rotation_angle,\n direction_forward=direction_forward)\n fixed_omega = True\n # print utils.list_rama_outliers_h(result_h)\n # result_h.write_pdb_file(file_name=\"variant_%s.pdb\" % direction_forward)\n # STOP()\n return result_h, fixed_omega", "def axangle2rotmat(axangles):\r\n\r\n if type(axangles) is not np.ndarray:\r\n raise ValueError('Rodrigues only works on numpy arrays')\r\n \r\n # store original shape\r\n shape = axangles.shape\r\n assert shape[-1] % 3 == 0, \"inputs are not axis angles\"\r\n axangles = axangles.reshape((-1, 3))\r\n\r\n rotmats = []\r\n for i in range(axangles.shape[0]):\r\n rotmat, _ = cv2.Rodrigues(axangles[i])\r\n rotmats.append(rotmat)\r\n\r\n # restore original shape\r\n new_shape = shape[:-1] + (shape[-1]//3*9,)\r\n return np.array(rotmats).reshape(new_shape)", "def circular_mean(angles):\n\n # Convert the angles to cartesian points on the unit circle\n cartesian = np.column_stack((np.cos(angles), np.sin(angles)))\n #breakpoint()\n\n # Find the mean of the cartesian coordinates\n mean_cart = np.mean(cartesian, axis=0)\n\n # Find the angle of the mean point\n mean_angle = np.arctan2(mean_cart[1], mean_cart[0])\n\n # And return it\n return mean_angle", "def GalacticToCartesian(Galactic,SolarPosition): \n \n # l,b,s->x,y,z\n cl = np.cos(Galactic[:,0])\n sl = np.sin(Galactic[:,0])\n cb = np.cos(Galactic[:,1])\n sb = np.sin(Galactic[:,1])\n x = SolarPosition[0]-Galactic[:,2]*cb*cl\n y = Galactic[:,2]*cb*sl\n z = Galactic[:,2]*sb+SolarPosition[1]\n\n if(len(Galactic[0,:])==3):\n Cartesian = np.column_stack((x,y,z))\n else:\n # vlos,mu_lcos(b),mu_b -> vx,vy,vz\n vl = pm2vel*Galactic[:,2]*Galactic[:,4]\n vb = pm2vel*Galactic[:,2]*Galactic[:,5]\n tmp2 = cb*Galactic[:,3]-sb*vb\n vx = cl*tmp2-sl*vl+SolarPosition[2]\n vy = sl*tmp2+cl*vl+SolarPosition[3]\n vz = sb*Galactic[:,3]+cb*vb+SolarPosition[4]\n Cartesian = np.column_stack((x,y,z,-vx,vy,vz))\n \n return Cartesian", "def xyz_to_spherical(self, xyz: np.ndarray, directions: bool = False) -> np.ndarray:\n if not directions:\n xyz = xyz - self.xyz\n r = np.sqrt(np.sum(xyz ** 2, axis=1))\n azimuth_iso = np.arctan2(xyz[:, 1], xyz[:, 0])\n altitude_iso = np.arccos(xyz[:, 2] / r)\n angles = np.column_stack(\n (\n (90 - (azimuth_iso * 180 / np.pi)) % 360,\n 90 - (altitude_iso * 180 / np.pi),\n )\n )\n if not directions:\n angles = np.column_stack((angles, r))\n return angles", "def _rotate_coords(self, x, y, theta, ox, oy):\n s, c = self._pkgs['numpy'].sin(theta), self._pkgs['numpy'].cos(theta)\n x, y = self._pkgs['numpy'].asarray(x) - ox, self._pkgs['numpy'].asarray(y) - oy\n return x * c - y * s + ox, x * s + y * c + oy", "def polar_angle(points):\n\n\tpolar_angle = []\n\n\tfor each in points:\n\t\tdy = each[1] - P0[1]\n\t\tdx = each[0] - P0[0]\n\t\tpolar_angle.append(atan2(dy, dx))\n\n\treturn polar_angle", "def convert_to_cartesian(grid: List[Tuple[float, float]], radius: float = 1.0) -> List[Tuple[float, float, float]]:\n\n # conversion radians -> degrees\n r2d = 180.0 / np.pi\n\n # calculate x/y/z coordinates, assuming r=1\n return [\n (\n radius * np.cos(lat / r2d) * np.cos(lon / r2d),\n radius * np.cos(lat / r2d) * np.sin(lon / r2d),\n radius * np.sin(lat / r2d),\n )\n for lon, lat in grid\n ]", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(2.0*(self.w*self.x + self.y*self.z), 1.0 - 2.0*(self.x**2 + self.y**2))\n theta = np.arcsin(2.0*(self.w*self.y - self.z*self.x))\n psi = np.arctan2(2.0*(self.w*self.z + self.x*self.y), 1.0 - 2.0*(self.y**2 + self.z**2))\n return np.c_[phi, theta, psi]" ]
[ "0.69084823", "0.6874946", "0.68709445", "0.6757234", "0.6717186", "0.66640395", "0.6525707", "0.65130717", "0.6473055", "0.63773054", "0.636866", "0.63315773", "0.6329993", "0.6318075", "0.63170975", "0.62099665", "0.61673385", "0.6121615", "0.6089441", "0.6082668", "0.6054056", "0.60411054", "0.5995764", "0.5991739", "0.59687823", "0.5947852", "0.5908668", "0.5900712", "0.5899968", "0.58964306", "0.58700764", "0.58634454", "0.5855301", "0.58482754", "0.5843043", "0.58354557", "0.58204585", "0.5811855", "0.5810132", "0.580818", "0.5780068", "0.5770583", "0.5763146", "0.57451713", "0.5741074", "0.5729888", "0.5720142", "0.5714858", "0.5705889", "0.56882054", "0.5654576", "0.56535494", "0.5624848", "0.5615567", "0.5597171", "0.55884314", "0.556873", "0.55686337", "0.5568038", "0.55526036", "0.55370873", "0.5534635", "0.55329543", "0.55329543", "0.55329543", "0.5526635", "0.55225545", "0.5491322", "0.5486953", "0.5480613", "0.54783356", "0.5466241", "0.546169", "0.54517204", "0.54509443", "0.54509443", "0.54492366", "0.54461366", "0.54461366", "0.5443594", "0.5440405", "0.5434975", "0.54288477", "0.54276705", "0.5412179", "0.5411153", "0.54026306", "0.54026306", "0.5401249", "0.54002327", "0.53999865", "0.53941244", "0.5393515", "0.53902537", "0.5357311", "0.53570753", "0.534666", "0.5336511", "0.5330301", "0.5322741" ]
0.6114368
18
Compute career pathways (i.e. userperformed and recommended) for each user.
def compute_pathways(users, job_graph, debug, min_likelihood_thr=0.2): start_time = time.time() __print_msg('Computing career pathways...', debug) user_pathways = {} tot_users = len(users) i = 0 for user, user_jobs in users.items(): user_pathway = compute_user_pathway(user_jobs, job_graph) recommended_pathway = recommend_pathway(user_jobs, job_graph, user_pathway[-1], min_likelihood_thr) user_pathways[user] = (user_pathway, recommended_pathway) i += 1 if i % 1000 == 0: __print_msg('Num users processed: {}/{}'.format(i, tot_users), debug) end_time = time.time() __print_msg('Execution time: {} seconds'.format(end_time - start_time), debug) return user_pathways
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_user_pathway(user_jobs, job_graph, debug=False):\r\n pathway = []\r\n for i, job in enumerate(user_jobs):\r\n if i == 0:\r\n continue\r\n cluster, _ = job_graph.assign_job_to_jobgraph_state(job)\r\n pathway.append(cluster)\r\n return pathway", "def metric_path_length(pathways):\r\n num_users = len(pathways)\r\n num_good_recommendations = 0\r\n sum_u_path_len = 0\r\n sum_r_path_len = 0\r\n career_goal_reached = 0\r\n for user, pathway_tuple in pathways.items():\r\n u_path = pathway_tuple[0]\r\n r_path = pathway_tuple[1]\r\n sum_u_path_len += len(u_path)\r\n sum_r_path_len += len(r_path)\r\n if r_path[-1]==u_path[-1]:\r\n career_goal_reached += 1\r\n if len(r_path) < len(u_path):\r\n num_good_recommendations += 1\r\n return 100.0 * career_goal_reached/num_users, 100.0 * num_good_recommendations / num_users, sum_u_path_len/num_users, sum_r_path_len/num_users", "def getAllSocialPaths(self, userID):\n visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n q = Queue()\n q.put([userID])\n\n while not q.empty():\n user_path = q.get()\n user = user_path[-1]\n\n if user not in visited.keys():\n visited[user] = user_path\n for friend in self.friendships[user]:\n new_path = user_path[::]\n new_path.append(friend)\n q.put(new_path)\n\n # get average degree of separation per user\n degrees = 0\n for key, item in visited.items():\n degrees += len(item)\n \n print('average degree of separation', degrees/len(self.users))\n\n return visited", "def pathways(self) -> str:\n return self._pathways", "def get_all_social_paths(self, user_id):\n visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n # BFTs starting at user_id, return first path to every reachable person\n q = [[user_id]]\n while q:\n path = q.pop(0)\n person = path[-1]\n # add the person and the path to the person\n for friend in self.friendships[person]:\n if friend not in visited and friend != user_id:\n q.append(path + [friend])\n visited[friend] = path + [friend]\n\n return visited", "def get_all_social_paths(self, user_id):\n visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n # graphs=Graph()\n # for i in self.users:\n # graphs.add_vertex(i)\n \n # for i in self.users:\n # for x in self.friendships[i]:\n # graphs.add_edge(i,x)\n\n # for i in graphs.vertices:\n # if graphs.bfs(i,user_id):\n # visited[i]=graphs.bfs(i,user_id)\n queue=Queue()\n queue.enqueue([user_id])\n while queue.size()>0:\n path=queue.dequeue()\n current_user = path[-1]\n if current_user not in visited:\n visited[current_user]=path\n for ID in self.friendships[current_user]:\n new_path=list(path)\n new_path.append(ID)\n queue.enqueue(new_path)\n return visited", "def compute_social_welfare(self):\r\n #self.social_welfare = 0\r\n #for i in range(self.num_routes):\r\n # self.social_welfare += self.routes[i].flow * self.routes[i].get_route_utility()\r", "def get_all_social_paths(self, user_id):\n if len(self.friendships) > 0:\n visited = {}\n q = Queue()\n q.enqueue([user_id])\n\n while q.size() > 0:\n curr_path = q.dequeue()\n curr_vertex = curr_path[-1]\n\n if curr_vertex not in visited:\n visited[curr_vertex] = curr_path\n\n for friend in self.friendships[curr_vertex]:\n path_copy = curr_path[:]\n path_copy.append(friend)\n q.enqueue(path_copy)\n\n return visited\n\n else:\n print(\"There are currently no friendship paths in the network\")", "def evaluate_metrics(pathways, debug):\r\n __print_msg('Evaluating metrics...', debug)\r\n metrics = {}\r\n metrics['CareerGoalReached'], metrics['ShorterRecommendedPath'], metrics['UserPathAvgLength'], metrics['RecPathAvgLength'] = metric_path_length(pathways)\r\n __print_msg('Career goal reached: {}'.format(metrics['CareerGoalReached']), debug)\r\n __print_msg('Recommended path shorter: {}'.format(metrics['ShorterRecommendedPath']), debug)\r\n __print_msg('User pathway average length: {}'.format(metrics['UserPathAvgLength']), debug)\r\n __print_msg('Recommended pathway average length: {}'.format(metrics['RecPathAvgLength']), debug)", "def getAllSocialPaths(self, userID):\n visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n pass", "async def handle_trophy_road(self, user: discord.User):\n\n trophies = await self.get_trophies(user)\n tppased = await self.get_player_stat(user, 'tppassed')\n\n for tier in self.TROPHY_ROAD:\n if tier in tppased:\n continue\n threshold = self.TROPHY_ROAD[tier]['Trophies']\n\n if trophies > threshold:\n async with self.config.user(user).tppassed() as tppassed:\n tppassed.append(tier)\n async with self.config.user(user).tpstored() as tpstored:\n tpstored.append(tier)\n\n reward_name, reward_emoji, reward_str = self.tp_reward_strings(\n self.TROPHY_ROAD[tier], tier)\n\n desc = \"Claim the reward by using the `-rewards` command!\"\n title = f\"Trophy Road Reward [{threshold} trophies]\"\n embed = discord.Embed(\n color=EMBED_COLOR, title=title, description=desc)\n embed.set_author(name=user.name, icon_url=user.avatar_url)\n embed.add_field(name=reward_name,\n value=f\"{reward_emoji} {reward_str}\")\n\n return embed\n\n else:\n return False", "def test_make_pathways(self):\n basic_test_runner(self, 'pathways')", "def getAllSocialPaths(self, userID):\n visited = {} # Note that this is a dictionary, not a set\n print(f\"user ID {userID}\")\n\n for i in range(1, len(self.users)):\n visited[i] = self.bfs(userID, i)\n\n return visited", "def relationships_for_user(self, user):\n # List of users who follow \"user\".\n followers_users_list = [ relationship.from_user for relationship in self.filter(to_user=user) ]\n \n # List of relationships for users \"user\" follows, who also follow \"user\".\n friend_list = self.filter(from_user=user, to_user__in=followers_users_list)\n \n # List of users \"user\" is friends with.\n friends_users_list = [ relationship.to_user for relationship in friend_list ]\n \n # List of relatiosnhips for users who follow \"user\", but \"user\" does not follow back.\n follower_list = self.filter(to_user=user).exclude(from_user__in=friends_users_list)\n \n # List of relationships for users \"user\" follows, but do not follow \"user\" back.\n following_list = self.filter(from_user=user).exclude(to_user__in=friends_users_list)\n\n relationships = {\n 'friends': friend_list,\n 'followers': follower_list,\n 'following': following_list,\n }\n return relationships", "def automated(user):\n\n # Start should be a Plan() and will include any classes the user has already taken to this point. It should have\n # accurate counts of its course typesTaken before starting the search. Goal is the end state desired by the user.\n # Usually this will be the gradReqs for the users Curriculum object, but this could also be used to create\n # limited searches. number_of_classes_per_quarter is the max number of courses a user is willing to take in a\n # given quarter.\n\n # Cost should represent how many quarters are needed to graduate. They should however be multiplied by some factor\n # so that they are larger than our heuristic values. Our heuristics should produce values for rarity that are 8 - #\n # of times offered in 8 quarters. So the values will range between 0-8. Unlocks could be anything between 0 and\n # the # of total classes opened up by taking it. Unlocks seems to range between 0-281. We opted to cap this number\n # to 50. We decided to cap the unlocks score to 50 and in order to have rarity weigh as much as unlocks we opted to\n # multiply its results by 6. The result is that rarity ranges between 0-42 and unlocks ranges from 0-50. We never\n # want to over-estimate the cost of a path to graduation. 50 is chosen because it is the maximum value of h(n) seen\n # in our data.\n # Should equal max(rarity) + max(unlocks) + bonus\n # g(n) = (quarters x stdCost) or cost so far\n # h(n) = stdCost - (rarity + unlocks + bonus)\n # f(n) = g(n) + h(n)\n # Record actual cost of a path as g(n) / stdCost = number of quarters to graduate.\n # A course that is offered every quarter and unlocks nothing and does not match a preferred elective type will cost\n # stdCost which is exactly what adding a class costs. Selecting something more rare, and/or unlocks more classes\n # will appear to cost less than a normal quarter. So the path will be an under-estimate of cost and therefore it\n # will be admissible. As long as stdCost is >= h(n) we will never have negative costs therefore h(n) will be\n # considered consistent.\n # stdCost must = max(rarity) + max(unlocks) + max(bonus)\n\n # Should we disallow online courses?\n removeOnline = user.disallowOnline\n\n # Get students undergrad degree type\n undergrad = user.undergraduate_degree\n\n # Setup Curriculum\n curriculum = user.curriculum\n\n # Setup concentration\n if curriculum is CS:\n userPref = int(user.getCSFocus)\n else:\n userPref = 1\n\n\n # Create null node\n start = Plan(\n selectionOrder = list(),\n coursesTaken = user.getCoursesTaken,\n termNum = user.getTerm,\n currTermIdx = 0,\n daysFilled = [],\n maxCourses = user.max_courses,\n typesTaken = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n selectionsWithDay = list()\n )\n\n # If the users undergrad type matches their graduate type waive introductory courses\n start = waiveCourses(start, undergrad, curriculum)\n\n # Initialize variables\n frontier = PriorityQueue()\n frontier.put(start, 0)\n costSoFar = {}\n stdCost = setStdCost(curriculum)\n courseLimit = 20 # max number of courses in a solution\n\n # Store results of the query once for reuse later in search\n terms = ['0975', '0980', '0985', '0990', '0995', '1000', '1005']\n queryResults = dict((term, TermCourses.getAvailableCourses(term)) for term in terms)\n\n # Modify the heuristic score of classes to emphasize certain course types and the students focus in particular\n queryResults = modifyHeuristics(userPref, queryResults, terms, curriculum)\n\n plansPopped = 0\n while not frontier.empty():\n # Select current plan\n curr_plan = frontier.get()\n plansPopped += 1\n\n # If the search has gone on too long return an empty list so the user can restart the search\n if timedOut(plansPopped):\n return list(('Please see advisor', 'mon'))\n\n # Goal Checking\n if isGoal(curr_plan, curriculum, courseLimit, userPref):\n break\n\n # Count up non-capstone courses in plan\n cur = curr_plan.typesTaken\n tot = cur[0] + cur[1] + cur[2] + cur[13]\n\n # Filter the query removing courses that the student cannot take\n subsetResults = queryResults[TermCourses.convert_stream(curr_plan.termNum)]\n filteredResults = filter(subsetResults, curr_plan, curr_plan.daysFilled, curriculum, tot, removeOnline)\n\n # Loop through the top 8 filtered results and try each suggested plan\n for suggestedCourseInfo in filteredResults[:8]:\n suggestedPlan = Plan(\n selectionOrder = copy.deepcopy(curr_plan.selectionOrder),\n coursesTaken = copy.deepcopy(curr_plan.coursesTaken),\n termNum = copy.deepcopy(curr_plan.termNum),\n currTermIdx = copy.deepcopy(curr_plan.currTermIdx),\n daysFilled = copy.deepcopy(curr_plan.daysFilled),\n maxCourses = user.max_courses,\n typesTaken = copy.deepcopy(curr_plan.typesTaken),\n selectionsWithDay = copy.deepcopy(curr_plan.selectionsWithDay))\n\n # Add suggested course to current plan\n addUpdateCourse(suggestedPlan, suggestedCourseInfo, curriculum)\n\n # Calculate the true cost of the current plan (non heuristic)\n new_cost = costSoFar.get(str(curr_plan.coursesTaken), costSoFar.get(str(curr_plan.coursesTaken), 0))+stdCost\n\n # Do not explore plans with excessive numbers of courses\n taken = suggestedPlan.typesTaken\n totCourses = taken[0] + taken[1] + taken[2] + taken [3] + taken[4] + taken[13]\n if curriculum == CS:\n if totCourses >= courseLimit or suggestedPlan.typesTaken[2] > 8:\n continue\n else:\n if totCourses >= courseLimit or suggestedPlan.typesTaken[2] > 3:\n continue\n\n # Only explore a plan if it has not been seen or it is a better plan than a previously seen version\n if str(suggestedPlan.coursesTaken) not in costSoFar or new_cost < costSoFar[str(suggestedPlan.coursesTaken)]:\n costSoFar[str(suggestedPlan.coursesTaken)] = new_cost\n priority = -new_cost + heuristics(suggestedCourseInfo, suggestedPlan, user)\n frontier.put(suggestedPlan, priority)\n\n return curr_plan.selectionsWithDay", "def summarize_paths(trips):\n\t# get a set of distinct Path objects. We have to use equality rather than \n\t# hashing here. \n\titins = []\n\t# add all OTP paths to the Itinerary each belongs to, creating new ones\n\t# along the way \n\tfor trip in trips:\n\t\tif trip.path not in itins:\n\t\t\titins.append( Itinerary(trip.path) )\n\t\ti = itins.index(trip.path)\n\t\titins[i].add_OTP_trip( trip )\n\t# remove any itineraries which have no travel times in the DB at all \n\tbad_itins = []\n\tfor i, itin in enumerate(itins):\n\t\ttts = [ d for d in itin.departures if d.travel_time ]\n\t\tif len(tts) == 0: bad_itins.append(i)\n\tfor i in reversed(bad_itins):\n\t\titins.pop(i)\n\treturn itins", "def IteratePaths(self):\n self.w = self.setwage(self.K, self.N)\n self.r = self.setrate(self.K, self.N)\n self.b = self.benefit(self.N)\n\n a1, aT = [-1,], []\n\n for q in range(self.Nq):\n if q == 0:\n self.apath[-1] = 0.2\n elif q == 1:\n self.apath[-1] = 0.3\n else:\n self.apath[-1] = max(0,aT[-1]-(aT[-1]-aT[-2])*a1[-1]/(a1[-1]-a1[-2]))\n \n self.npath[-1] = 0\n self.cpath[-1] = self.apath[-1]*(1+self.r) + self.b\n\n for y in range(-2,-(self.T+1),-1): # y = -2, -3,..., -60\n self.apath[y], self.npath[y], self.cpath[y] = self.DirectSolve(y)\n\n aT.append(self.apath[-1])\n a1.append(self.apath[-self.T])\n if (fabs(self.apath[-self.T])<self.tol):\n break\n for y in range(-1,-(self.T+1),-1):\n self.upath[y] = self.util(self.cpath[y],self.npath[y])", "def getAllSocialPaths(self, userID):\n visited = {}\n # use a queue\n q = []\n q.append([userID])\n # add userID as its own key and value to visited\n visited[userID] = [userID]\n\n while len(q) > 0:\n path = q.pop(0)\n curr_friend = path[-1]\n\n # for all the userID keys inside self.friendships\n for friend in self.friendships[curr_friend]:\n # add neighbor as a key, if not visited, in visited with an empty list as value\n if friend not in visited:\n visited[friend] = list()\n # break out of loop if already in visited\n else: \n continue\n \n # create a new list that holds the path from userID to friend\n friend_path = list(path)\n # add the friend onto the end of the list\n friend_path.append(friend)\n # also add path to the queue\n q.append(friend_path) \n # add path as the value to the friend\n visited[friend].extend(friend_path)\n \n return visited", "def total_infection(user, version):\n frontier = [user] + [s for s in user.students] + [c for c in user.coaches]\n discovered = []\n while len(frontier) > 0:\n # Take first user from frontier\n curr_user = frontier.pop(0)\n # Add the user to discovered users\n discovered.append(curr_user)\n # Add that user's connections to frontier (if not discovered)\n frontier += [s for s in curr_user.students if s not in discovered]\n frontier += [c for c in curr_user.coaches if c not in discovered]\n # Infect the user\n curr_user.version = version", "def compute_path_metric(self, sw, path, util, time_now):\n pathmetric = 1\n linkmetrics = []\n links = zip(path[:-1], path[1:])\n # calculate available capacity for each link in path\n for link in links:\n u, v = link\n #DESIGN CHOICE: Should we 1) always include extra-domain state, 2)\n #only include extra-domain state when not stale (timestamp), 3) always exclude\n #extra-domain state when calculating the path metric? Here we do (1)\n used = self.graph[u][v]['used'] + util\n capacity = self.graph[u][v]['capacity']\n linkmetric = float(used) / capacity\n # If the controller estimates it would oversubscribe this link\n if linkmetric > 1:\n logging.info(\"[%s] MAY be OVERSUBSCRIBED [%f] at switch [%s]\", str(time_now), linkmetric, str(sw))\n break\n else:\n linkmetrics.append(linkmetric)\n\n # We define pathmetric to be the worst link metric in path\n if len(linkmetrics) > 0:\n pathmetric = max(linkmetrics)\n\n funname = sys._getframe().f_code.co_name\n logging.debug(\"[%s] [%s] [%s] [%s]\", funname, str(time_now), str(self),\n str((path, linkmetrics)))\n return (pathmetric, len(links))", "def calculate_path(self):\n\n mid_states = []\n\n # Add in between states\n for i in range(Constants.NUMBER_LAPS):\n mid_states = mid_states + Constants.LAP_STATES\n\n # Concatenate beginning, middle and end states to obtain full path of states\n self.path_states = Constants.BEGINNING_STATES + mid_states + Constants.END_STATES\n\n # Determine the amount of times that the smallbot will drive forward during the path\n self.times_driven_forward = self.path_states.count('CREEP_FORWARD')\n\n print(\"Calculated path: \", self.path_states)", "def path_correction(data, user_coords):\n # Return list if it only has the destination\n if len(data) == 1:\n return data\n\n # Calculate distance from user to second waypoint\n second_coords = (data[1][\"lat\"], data[1][\"lon\"])\n user_second_dist = geopy.distance.distance(user_coords, second_coords).miles\n\n # Calculate distance from user to first waypoint\n first_coords = (data[0][\"lat\"], data[0][\"lon\"])\n user_first_dist = geopy.distance.distance(user_coords, first_coords).km\n\n # Calculate distance from first waypoint to second waypoint\n first_second_dist = geopy.distance.distance(first_coords, second_coords).miles\n\n # Determine if path correction is applicable\n if user_second_dist < first_second_dist or user_first_dist < 0.01:\n # Delete first element of list so that user doesn't backtrack\n return data[1:]\n else:\n # No path correction needed\n return data", "def two_user_route_statistics(i,j, source_data, destination_data, source_destination_data, delta=1.2):\n\toccupancy_ratio = 0.0\n\tminimum_distance_so_far = 0.0\n\tcommon_travel_distance = 0.0\n\n\ttry:\n\t\tif source_destination_data[j][i] + source_data[i][j] <= 1.2*source_destination_data[i][i] and source_destination_data[j][i] + destination_data[i][j] <= 1.2*source_destination_data[j][j]:\n\t\t\tfirst = ((source_destination_data[j][i] + source_data[i][j])/(source_destination_data[j][i] + source_data[i][j]+destination_data[i][j]))\n\t\t\tsecond = ((source_destination_data[j][i] + destination_data[i][j])/(source_destination_data[j][i] + source_data[i][j]+destination_data[i][j]))\n\t\t\toccupancy_ratio = (first+second)/2\n\t\t\tcommon_travel_distance = source_destination_data[j][i]\n\t\t\tminimum_distance_so_far = source_data[i][j] + source_destination_data[j][i] + destination_data[i][j]\n\n\t\tif source_destination_data[i][j] + destination_data[j][i] <= 1.2*source_destination_data[i][i] and source_destination_data[i][j] + source_data[j][i] <= 1.2*source_destination_data[j][j]:\n\t\t\tfirst = ((source_destination_data[i][j] + destination_data[j][i])/(source_destination_data[i][j] + destination_data[j][i]+source_data[j][i]))\t\t\n\t\t\tsecond = ((source_destination_data[i][j] + source_data[j][i])/(source_destination_data[i][j] + destination_data[j][i]+source_data[j][i]))\n\t\t\ttotal_distance = source_data[j][i] + source_destination_data[i][j] + destination_data[j][i]\n\n\t\t\tif total_distance < minimum_distance_so_far:\n\t\t\t\tminimum_distance_so_far = total_distance\n\t\t\t\tcommon_travel_distance = source_destination_data[i][j]\n\t\t\t\toccupancy_ratio = (first+second)/2\n\n\t\tif source_data[i][j]+source_destination_data[j][j]+destination_data[j][i] <= 1.2*source_destination_data[i][i]:\n\t\t\tfirst = (1)\n\t\t\tsecond = (source_destination_data[j][j]/(source_data[i][j]+source_destination_data[j][j]+destination_data[j][i]))\n\n\t\t\ttotal_distance = source_data[i][j] + source_destination_data[j][j] + destination_data[j][i]\n\n\t\t\tif total_distance < minimum_distance_so_far:\n\t\t\t\tminimum_distance_so_far = total_distance\n\t\t\t\tcommon_travel_distance = source_destination_data[j][j]\n\t\t\t\toccupancy_ratio = (first+second)/2\n\n\t\tif source_data[j][i]+source_destination_data[i][i]+destination_data[i][j] <= 1.2*source_destination_data[j][j]:\n\t\t\tfirst = (source_destination_data[i][i]/(source_data[j][i]+source_destination_data[i][i]+destination_data[i][j]))\n\t\t\tsecond = (1)\n\n\t\t\ttotal_distance = source_data[j][i]+source_destination_data[i][i]+destination_data[i][j]\n\n\t\t\tif total_distance < minimum_distance_so_far:\n\t\t\t\tminimum_distance_so_far = total_distance\n\t\t\t\tcommon_travel_distance = source_destination_data[i][i]\n\t\t\t\toccupancy_ratio = (first+second)/2\n\n\texcept Exception as e:\n\t\toccupancy_ratio = 1.0\n\t\tminimum_distance_so_far = 0.0\n\t\tcommon_travel_distance = 0.0\n\n\n\treturn occupancy_ratio, common_travel_distance, minimum_distance_so_far", "def calculate_path(self):\n #Se repite el ciclo para el número especificado de veces\n for i in range(self.iterations):\n for ant in self.ants:\n ant.setup_ant()\n while not ant.final_node_reached:\n #Seleccion aleatoria del nodo a visitar\n node_to_vist = self.select_next_node(self.map.nodes_array[int(ant.actual_node[0])][int(ant.actual_node[1])])\n #Mover la hormiga al siguiente nodo seleccionado al azar\n ant.move_ant(node_to_visit)\n #Compruebe si se ha alcanzado la solución\n ant.is_final_node_reached()\n #Agregar la ruta resultante a la lista de rutas\n self.add_to_path_results(self.delete_loops(ant.get_visited_nodes()))\n # Habilitar a la hormiga para otra busqueda\n ant.enable_start_new_path()\n \n # Actualizar el nivel global de feromonas\n self.pheromone_update()\n self.best_result = self.paths[0]\n\n #Vaciar la lista de rutas\n self.empty_paths()\n print('Iteration: ', i, 'lenght of the path: ', len(self.best_result))\n return self.best_result", "def user_path(a, b):\n tx = cypher_transaction()\n\n # Limit the number of relationships in the path?\n # p = shortestPath((a)-[*..15]-(b))\n query = \"\"\"\n MATCH\n (a:user {username:{username_a}}),\n (b:user {username:{username_b}}),\n p = shortestPath((a)-[]->(b))\n RETURN LENGTH(p), p\n \"\"\"\n params = {\n 'username_a': a['username'],\n 'username_b': b['username']\n }\n tx.append(query, parameters=params)\n results = _first(tx.commit())\n paths = []\n for record in results:\n length, path = record.values\n m = \"There are {0} hops from {1} to {2}:\\n\"\n print(m.format(length, a['name'], b['name']))\n for rel in path.relationships:\n print(\" ({0})-[:{1}]->({2})\".format(\n rel.start_node['name'],\n rel.type,\n rel.end_node['name']\n ))\n paths.append(path)\n return paths", "def compute_user_local_sensitivity(sc, dataset, user_id, num_iters_ls):\n\n res = defaultdict(lambda: 0.0)\n\n original_recs, original_qii = compute_recommendations_and_qii(sc, dataset,\n user_id)\n original_recs = recommendations_to_dd(original_recs)\n\n res[\"recommendee_user_id\"] = user_id\n res[\"recommendee_recs_l1_norm\"] = l1_norm(original_recs)\n res[\"recommendee_qii_l1_norm\"] = l1_norm(original_qii)\n res[\"recommendee_recs_l0_norm\"] = len(original_recs)\n res[\"recommendee_qii_l0_norm\"] = len(original_qii)\n res[\"perturbations\"] = []\n\n all_users = get_user_list(dataset)\n for x in xrange(num_iters_ls):\n if perturb_specific_user:\n other_user_id = perturb_specific_user\n else:\n other_user_id = random.choice(list(set(all_users) - {user_id}))\n print \"Perturbing user\", other_user_id, \"(\", x+1, \"out of\",\\\n num_iters_ls, \")\"\n perturbed_dataset = perturb_user_ratings(sc, dataset, other_user_id)\n start = time.time()\n recs, qii = compute_recommendations_and_qii(sc, perturbed_dataset, user_id)\n stop = time.time()\n recs = recommendations_to_dd(recs)\n rec_ls = calculate_l1_distance(original_recs, recs)\n qii_ls = calculate_l1_distance(original_qii, qii)\n\n report = {}\n report[\"perturbed_user_id\"] = other_user_id\n report[\"perturbed_recs_l1_norm\"] = l1_norm(recs)\n report[\"perturbed_qii_l1_norm\"] = l1_norm(qii)\n report[\"perturbed_recs_l0_norm\"] = len(recs)\n report[\"perturbed_qii_l0_norm\"] = len(qii)\n report[\"recs_ls\"] = rec_ls\n report[\"qii_ls\"] = qii_ls\n report[\"recs_ls_norm\"] = rec_ls/float((len(recs)*4))\n report[\"qii_ls_norm\"] = qii_ls/float((len(qii)*4))\n print \"Local sensitivity of recs: \", rec_ls/float((len(recs)*4))\n print \"Local sensitivity of QII: \", qii_ls/float((len(qii)*4))\n report[\"computation_time\"] = stop - start\n\n\n res[\"perturbations\"].append(report)\n\n for per in res[\"perturbations\"]:\n res[\"avg_recs_ls\"] += float(per[\"recs_ls\"])/len(res[\"perturbations\"])\n res[\"max_recs_ls\"] = max(res[\"max_recs_ls\"], per[\"recs_ls\"])\n res[\"avg_recs_ls_norm\"] +=\\\n float(per[\"recs_ls_norm\"])/len(res[\"perturbations\"])\n res[\"max_recs_ls_norm\"] = max(res[\"max_recs_ls_norm\"],\n per[\"recs_ls_norm\"])\n res[\"avg_qii_ls\"] += float(per[\"qii_ls\"])/len(res[\"perturbations\"])\n res[\"max_qii_ls\"] = max(res[\"max_qii_ls\"], per[\"qii_ls\"])\n res[\"avg_qii_ls_norm\"] +=\\\n float(per[\"qii_ls_norm\"])/len(res[\"perturbations\"])\n res[\"max_qii_ls_norm\"] = max(res[\"max_recs_qii_norm\"],\n per[\"qii_ls_norm\"])\n return dict(res)", "def get_all_social_paths(self, user_id): #each user v v each path V V\n # output example- {1: [1], 8: [1, 8], 10: [1, 10], 5: [1, 5], 2: [1, 10, 2], 6: [1, 10, 6], 7: [1, 10, 2, 7]}\n visited = {} # Note that this is a dictionary, not a set\n # Need to do a bfs using the user id\n # first step is to traverse the graph and record all the vertices as keys in visited using bft\n # then take those keys and use bfs on each, using user_id as the starting node and and the key as\n # the destination node\n\n # Modification of BFT\n # create an empty dict\n # q = Queue()\n q = []\n\n # init enqueue the starting node\n q.append(user_id)\n\n while len(q) > 0:\n # Dequeue the first item\n v = q.pop(0)\n # If it's not been visited:\n if v not in visited:\n # Mark as visited (i.e. add to the visited set)\n visited[v] = []\n\n # Do something with the node\n print(f\"Visited {v}\")\n\n # Add all neighbors to the queue\n for next_vert in self.friendships[v]:\n q.append(next_vert)\n\n # once visited is filled, then we start the bfs\n #print('vv',visited)\n possible_paths = {}\n #run a bfs for each key in visited\n for v in visited:\n possible_paths[v] = []\n \n if v == user_id:\n visited[v] = [user_id]\n\n path = []\n while len(path) < len(visited):\n\n # Add all neighbors to the queue\n for next_vert in self.friendships[v]:\n print(possible_paths[v])\n # copy the path\n # temp_path = list(path)\n # temp_path.append(next_vert)\n # add path to possible_paths\n path.append(next_vert)\n\n possible_paths[v].append(path) # HAVE TO USE QUEUE OR STACK, THEY ENSURE THE NEIGHBORS\n # FOLLOW THE CORRECT ORDER WHEN LOOPING \n\n if v == path[-1]:\n \n # IF SO, RETURN PATH\n visited[v] = path\n break\n\n # for x in visited:\n # bfs(user_id, x)\n # visited[x].add(path)\n \n print('pct of total users in network', len(visited[1])/len(visited))\n print('degrees of separation', len(visited[1]) - 1)\n return visited", "def getPaths(self):\n\n trafficEndPoints = []\n # A job denotes a traffic flow, which corresponds to an iperf task.\n for job in self.config.trace.jobs:\n trafficEndPoints.append((job['src'], job['dst']))\n\n # Obtain details about user-specified non-default links.\n configuredLinks = []\n for linkInfo in self.config.topoData['linkInfos']:\n configuredLinks.append((linkInfo['src'], linkInfo['dst']))\n\n paths = None\n spec = self.config.topoData['flowSpec']\n if spec == 'shortest_path':\n # export paths info and create routing conf using shortest paths\n adjFile = self.config.adjacencyFile\n writeAdjList(self.net, adjFile)\n info(\"**** [G2]: adjacency list written to file\", adjFile, \"\\n\")\n\n outfile = os.path.join(self.config.outPath, SHORTEST_PATH_FILE)\n paths = generateShortestPaths(adjFile, outfile, trafficEndPoints, configuredLinks)\n info(\"**** [G2]: shortest paths written to file\", outfile, \"\\n\")\n # Note: Since there can be multiple shortest paths between two endpoints, solution could vary.\n elif \".json\" in spec:\n info(\"**** [G2]: reading path info from\", spec, \"\\n\")\n paths = readFromPathFile(spec)\n else:\n paths = None\n return paths", "def _compute_paths(self):\n\n distance_mat, preds = sp.dijkstra(self._adj_mat, directed=True,\n return_predecessors=True)\n # assert not np.any(np.isinf(distance_mat))\n if np.any(np.isinf(distance_mat)):\n logger.debug(\"Found inf distance!!\")\n\n return distance_mat, preds", "def _calculate_costs(self):\n cost = 0\n cost += self._cost_route_fine()\n cost += self._cost_petrol()\n cost += self._cost_wage()\n cost += self._cost_refueling()\n cost += self._cost_caught_by_police()\n cost += self._cost_vehicle_malfunction()\n return cost", "def heuristics(course, suggestedPlan, user):\n score = course.score\n bonus = 0\n return score + bonus", "def get_paths(self, target, use_edges=False, downwards=None):\n raise NotImplementedError()", "def create_path(network, user_A, user_B, path=[]):\n path = path + [user_A] # all paths include starting node\n if user_A == user_B: # id the last node is user_B a valid path exists\n return path # base case\n for node in network[user_A][0]:\n if node not in path: # otherwise path is an infinite loop\n path = create_path(network, node, user_B, path)\n if path: # after the recursion hits the base case\n return path\n return None", "def compute_path_metric(self, sw, path, util, time_now, local_contrib):\n pathmetric = 1\n linkmetrics = []\n links = zip(path[:-1], path[1:])\n # calculate available capacity for each link in path\n for link in links:\n u, v = link\n # Use the last-learned-via-sync value for a link\n if (not local_contrib) and 'sync_learned' in self.graph[u][v]:\n used1 = self.graph[u][v]['sync_learned'] + util\n used2 = self.graph[u][v]['used'] + util\n # ['used'] is a strict lower bound for ['sync_learned']\n if used1 > used2: \n used = used1\n logging.debug(\"CS [%s] using sync_learned value 1 [%f]\", str(self.name), used1)\n else:\n used = used2\n logging.debug(\"CS [%s] using sync_learned value 2 [%f]\", str(self.name), used2)\n else:\n logging.debug(\"CS [%s] using tracking value\", str(self.name))\n used = self.graph[u][v]['used'] + util\n\n capacity = self.graph[u][v]['capacity']\n linkmetric = float(used) / capacity\n # If the controller estimates it would oversubscribe this link\n if linkmetric > 1:\n logging.info(\"[%s] MAY be OVERSUBSCRIBED [%f] at switch [%s]\", str(time_now), linkmetric, str(sw))\n break\n else:\n linkmetrics.append(linkmetric)\n\n # We define pathmetric to be the worst link metric in path\n if len(linkmetrics) > 0:\n pathmetric = max(linkmetrics)\n\n funname = sys._getframe().f_code.co_name\n logging.debug(\"[%s] [%s] [%s] [%s]\", funname, str(time_now), str(self),\n str((path, linkmetrics)))\n return (pathmetric, len(links))", "def its_because_school(connection):\n print(\"Shortest path between two nodes\")\n answer = connection.execute(connection.get_path, 0, 4)\n for a in answer.values():\n print(a)\n print(\"Centrality closeness\")\n answer = connection.execute(connection.get_closeness, 1, True)\n for a in answer.values():\n print(a)\n print(\"Betweenness centrality\")\n answer = connection.execute(connection.get_betweenness, 2)\n for a in answer.values():\n print(a)\n print(\"Eigenvector\")\n answer = connection.execute(connection.get_eigenvector, 3)\n for a in answer.values():\n print(a)\n print(\"Degree centrality\")\n answer = connection.execute(connection.get_degree_centrality)\n for a in answer.values():\n print(a)", "def greedy_path():\n itinerary = []\n cities = all_cities(data_set)\n starting_city = randomize_city_start(cities.keys()) # start from a random city\n # print \"starting_city: %s\" % starting_city\n cities_visited = {}\n \n # iterate through all cities\n count = 1\n while True:\n possible_routes = []\n #distance = []\n # print \"starting_city: %s\" % starting_city\n for path in data_set:\n # we only start with city that we have assigned in starting_city\n if starting_city in path['city_start']:\n # we don't go to cities we have visited\n if path['city_end'] in cities_visited:\n continue\n else:\n # print \"path: \", path\n possible_routes.append(path) # add the city if not in the list\n \n if not possible_routes:\n break\n # append this to itinerary\n route = get_shortest_route(possible_routes)\n count += 1\n itinerary.append(route)\n # add this city to visited_cities list\n cities_visited[route[0]] = count\n starting_city = route[1]\n \n return itinerary", "def all_ways(context):\n current = context['current']\n\n roads = RoadSegment.objects.filter(prescription=current)\n trails = TrailSegment.objects.filter(prescription=current)\n ways = Way.objects.filter(prescription=current)\n inspections = SignInspection.objects.filter(way__prescription=current)\n traffic_diagrams = TrafficControlDiagram.objects.filter(\n roadsegment__prescription=current).exclude(name=\"custom\").distinct()\n\n for qs in [roads, trails, ways, inspections]:\n qs.modified = qs.aggregate(Max('modified'))[\"modified__max\"]\n\n return {\n \"roads\": roads,\n \"trails\": trails,\n \"ways\": ways,\n \"standard_traffic_diagrams\": traffic_diagrams,\n \"inspections\": inspections,\n \"modified\": max([modified for modified in\n roads.modified, trails.modified,\n ways.modified, inspections.modified,\n current.created\n if modified is not None])\n }", "def find_path_to_friend(network, user_A, user_B, path=None):\n if path is None:\n path = []\n\n if user_A in network and user_B in network:\n path.append(user_A)\n current_connections = get_connections(network, user_A)\n if user_B in current_connections:\n return [user_A, user_B]\n for u in current_connections:\n if u not in path:\n next_path = find_path_to_friend(network, u, user_B, path)\n if next_path:\n return [user_A] + next_path", "def comm_all_best_paths(self, peer):\n LOG.debug('Communicating current best path for all afi/safi except'\n ' 1/132')\n # We will enqueue best path from all global destination.\n for route_family, table in self._table_manager.iter:\n if route_family == RF_RTC_UC:\n continue\n if peer.is_mbgp_cap_valid(route_family):\n for dest in table.values():\n if dest.best_path:\n peer.communicate_path(dest.best_path)", "def optimizedRoutePossibilities(routes,cities):\n\tgraph = createOptimizedGraph(routes)\n\tfor couple in permutationsFromOrigin(cities):\n\t\tif couple is not None:\n\t\t\t#yield find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tprint(find_all_paths2(graph,couple[0],couple[1])[0])", "def __routes(self, with_return):\n nonzeo_pois = list(filter(None, self.pois.keys()))\n\n for path in itertools.permutations(nonzeo_pois):\n steps = self.poi_distance(0, path[0])\n for i, j in zip(path, path[1:]):\n steps += self.poi_distance(i, j)\n if with_return:\n steps += self.poi_distance(path[-1], 0)\n yield steps", "def __compute_unit_routes(self):\n unit_rules_graph = NearCNF.__UnitRulesGraph(self)\n unit_routes_as_trees = {var: self.__dijkstra_max_prob_tree(unit_rules_graph, var)\n for var in unit_rules_graph.vertices}\n\n def tree_to_lists(root, current_route=[], routes=None):\n if routes is None:\n routes = []\n current_route.append(root.key)\n if not root.children:\n routes.append(current_route[:])\n else:\n for child in root.children:\n tree_to_lists(child, current_route, routes)\n current_route.pop()\n return routes\n\n unit_routes_as_lists = {var: tree_to_lists(tree.root)\n for var, tree in unit_routes_as_trees.items()}\n return unit_routes_as_lists", "def generate_trivial_tours(self):\n self.routes = []\n for c in range(1, self.vrpdata.NumCust+1):\n self.routes.append(VRP_Route([c]))\n return self.get_objective()", "def run_travel_optimisation(trip_start_date, is_min_co2_search = False, is_force_compute = False):\n \n waypoint_co2 = {}\n waypoint_durations = {}\n\n # get all prefectures referential\n db_connector = Connector()\n with db_connector:\n results = db_connector.execute_query(sql.SQL_GET_ALL_PREFECTURE)\n all_waypoints = pd.DataFrame(results.fetchall())\n\n # Vérification si les trajets péfecture à préfecture ont été déjà calculés\n db_connector = Connector()\n with db_connector:\n saved_waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)\n\n # Dans le précalcul des trajets optimaux, utilisation de la date courante\n travel_date = datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n bad_waypoints = []\n\n if saved_waypoints.rowcount > 0 and not is_force_compute:\n print(\"le référentiel des voyage existe déjà\")\n else:\n try:\n bdd_management.truncate_journey()\n\n for (from_city, to_city) in combinations(all_waypoints[0].values, 2):\n try:\n if int(from_city) in bad_waypoints or int(to_city) in bad_waypoints:\n continue\n\n route = requests.get(API_NAVITIA.format(\n int(from_city), int(to_city), travel_date, API_KEY))\n response = json.loads(route.text)\n\n mid_duration = 0\n mid_co2 = 0\n for journey in response[\"journeys\"]:\n mid_duration += journey[\"duration\"]\n mid_co2 += journey[\"co2_emission\"][\"value\"]\n\n waypoint_co2[frozenset([from_city, to_city])\n ] = mid_co2/len(response[\"journeys\"])\n waypoint_durations[frozenset(\n [from_city, to_city])] = mid_duration/len(response[\"journeys\"])\n\n except Exception as e:\n print(\"Error with finding the route between %s and %s : %s\" %\n (from_city, to_city, response[\"error\"][\"message\"]))\n if 'no destination point' == response[\"error\"][\"message\"]:\n bad_waypoints.append(int(to_city))\n\n if 'no origin point' == response[\"error\"][\"message\"]:\n bad_waypoints.append(int(from_city))\n\n for bad_insee_code in re.findall('The entry point: admin:fr:([0-9]+) is not valid', response[\"error\"][\"message\"]):\n if not int(bad_insee_code) in bad_waypoints:\n bad_waypoints.append(int(bad_insee_code))\n\n # Enregistrement des trajets point à point (préfecture à préfecture)\n db_connector = Connector()\n with db_connector:\n for (waypoint1, waypoint2) in waypoint_co2.keys():\n waypoint = [waypoint1,\n waypoint2,\n str(waypoint_co2[frozenset([waypoint1, waypoint2])]),\n str(int(waypoint_durations[frozenset([waypoint1, waypoint2])]))]\n \n db_connector.execute_nonquery(sql.SQL_INSERT_WAYPOINT, waypoint)\n # commit trajets unitaires dans la bdd\n db_connector.commit()\n\n # enregistrement des préfectures non trouvée (pas de gare)\n print(bad_waypoints)\n db_connector = Connector()\n with db_connector:\n for bad_city in bad_waypoints:\n db_connector.execute_nonquery(\n sql.SQL_INSERT_CITY_WITHOUT_STATION, str(bad_city))\n #db_connector.commit()\n except Exception as e:\n print('Erreur durant la génération des trajets de préfecture en préfecture. Rollback effectué')\n\n waypoint_co2 = {}\n waypoint_durations = {}\n processed_waypoints = set()\n\n db_connector = Connector()\n with db_connector:\n waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)\n\n for row in waypoints:\n waypoint_co2[frozenset([int(row[0]), int(row[1])])] = row[2]\n waypoint_durations[frozenset([int(row[0]), int(row[1])])] = row[3]\n processed_waypoints.update([row[0], row[1]])\n\n travel_results = algorithms.run_genetic_algorithm(waypoints = list(processed_waypoints), is_min_co2_search = is_min_co2_search, generations=300, population_size=100 )\n\n # take most represented trip order\n journey_groups = Counter(chain(*travel_results))\n top_journeys = journey_groups.most_common(1)[0][0]\n\n print('Le voyage le plus représentatif est :')\n print(top_journeys)\n\n # calcul des horaires de voyage réels pour le trajet le plus optimisé\n\n print('Départ du calcul du voyage le %s' %\n (datetime_str_to_datetime_str(trip_start_date)))\n travel_date = trip_start_date\n\n db_connector = Connector()\n with db_connector:\n try:\n #vidage de la table contenant les informations du voyage\n bdd_management.truncate_roadtrip()\n\n for i in range(len(top_journeys)-1):\n try:\n from_city_insee = top_journeys[i]\n to_city_insee = top_journeys[i+1]\n route = requests.get(API_NAVITIA.format(\n int(from_city_insee), int(to_city_insee), travel_date, API_KEY))\n travels = json.loads(route.text)\n\n # Contrôle des voyage reçus pour identifier le plus adapté à recherche\n best_travel = travels[\"journeys\"][0]\n for travel in travels[\"journeys\"]:\n if is_min_co2_search and float(best_travel['co2_emission']['value']) > float(travel['co2_emission']['value']):\n best_travel = travel\n if best_travel['arrival_date_time'] > travel['arrival_date_time']:\n best_travel = travel\n\n # sauvegarde du trajet 'i' en base\n save_trip_section(db_connector, all_waypoints, from_city_insee, to_city_insee, best_travel)\n\n # le prochain trajet devra avoir une date de départ > à la date de ce trajet\n travel_date = best_travel['arrival_date_time']\n\n except Exception as e:\n print(\"!! Erreur durant le calcul du trajet entre '%s' et '%s'\" %\n (from_city_insee, to_city_insee))\n\n #Ecriture du résumé du voyage\n resume = db_connector.execute_query(sql.SQL_GET_C02_CONSUMPTION_RESUME)\n resume = resume.fetchone()\n\n resume_description = \"\"\"Début du voyage le {} . Arrivée le {}. \n Le voyage à durée {} pour un total de {:d} kgeC\"\"\".format(\n datetime_str_to_datetime_str(trip_start_date),\n datetime_str_to_datetime_str(travel_date),\n str(timedelta(seconds=resume[0])) ,\n trunc( resume[1]/1000))\n\n store_section(db_connector, resume_description, None, None, 'INFO', resume[0], resume[1])\n\n db_connector.commit()\n\n except Exception as e:\n db_connector.rollback()\n print('Erreur durant la création du voyage. rollback effectué!!!')\n\n print('print map with road-trip data')\n visualization.generate_visualization()\n\n print('Travel complete. Have nive trip!!!')", "def calculate_distances(drives):\n for d in drives:\n d.set_distance()", "def route(self, is_check_lanes=True):\n print 'route'\n # TODO: if too mant vtypes, better go through id_modes\n exectime_start = time.clock()\n\n net = self.get_scenario().net\n edges = net.edges\n vtypes = self.parent.vtypes\n\n ids_edges = []\n ids_trip = []\n costs = []\n for id_vtype in self.get_vtypes():\n id_mode = vtypes.ids_mode[id_vtype]\n\n # no routing for pedestrians\n if id_mode != net.modes.get_id_mode('pedestrian'):\n weights = edges.get_times(id_mode=id_mode,\n speed_max=vtypes.speeds_max[id_vtype],\n is_check_lanes=is_check_lanes)\n\n ids_trip_vtype = self.get_trips_for_vtype(id_vtype)\n # print ' id_vtype,id_mode',id_vtype,id_mode#,ids_trip_vtype\n # print ' weights',weights\n ids_edge_depart = self.ids_edge_depart[ids_trip_vtype]\n ids_edge_arrival = self.ids_edge_arrival[ids_trip_vtype]\n\n for id_trip, id_edge_depart, id_edge_arrival in zip(ids_trip_vtype, ids_edge_depart, ids_edge_arrival):\n cost, route = routing.get_mincostroute_edge2edge(id_edge_depart,\n id_edge_arrival,\n edges=edges,\n weights=weights)\n if len(route) > 0:\n ids_edges.append(route)\n ids_trip.append(id_trip)\n costs.append(cost)\n\n ids_route = self.routes.get_value().add_rows(ids_trip=ids_trip,\n ids_edges=ids_edges,\n costs=costs,\n )\n self.add_routes(ids_trip, ids_route)\n print ' exectime', time.clock()-exectime_start\n return ids_trip, ids_route", "def flightpaths(affiliations):\n papersets = dict()\n for a in affiliations:\n for p in affiliations[a]['papers']:\n if p in papersets:\n papersets[p].append(a)\n else:\n papersets[p] = [a]\n coords = dict()\n for p in papersets:\n coords[p] = set(combinations(papersets[p], 2))\n flightpaths = dict()\n i = 0\n for p in coords:\n for c in coords[p]:\n s = \"\".join([\"_\", str(i)]) if i else \"\"\n flightpaths[\"\".join([\"coauthorship\", s])] = [\n {'lat': float(c[0].split(',')[0]),\n 'lng': float(c[0].split(',')[1])},\n {'lat': float(c[1].split(',')[0]),\n 'lng': float(c[1].split(',')[1])}\n ]\n i = i + 1\n return(flightpaths)", "def compute_follow(self):\n compute_follow_sets(self)", "def calculate_what_to_shift(self, paths, sw):\n\n pathmetrics = {}\n for path in paths:\n metric, length = self.compute_path_metric(sw, path, 0, 0, local_contrib=False)\n assert metric >= 0 \n pathmetrics[metric] = path\n\n metrics = pathmetrics.keys() \n logging.debug(\"SS CWTS PATH METRICS:, %s\", str(pathmetrics))\n balanced_metric = sum(metrics)/len(metrics)\n if max(metrics) == 0:\n logging.debug(\"SS CWTS MAX METRIC is 0\")\n shift_by = 0\n shift_from_path = None\n else:\n logging.debug(\"SS max(metrics) is %s\", str(max(metrics)))\n logging.debug(\"SS balanced metrics is %s\", str(balanced_metric))\n shift_by = (max(metrics) - balanced_metric)/max(metrics)\n shift_from_path = pathmetrics[max(metrics)]\n\n logging.debug(\"SS CWTS SHIFT FROM: %s\", str(shift_from_path))\n logging.debug(\"SS CWTS SHIFT BY: %s\", str(shift_by))\n return(shift_from_path, shift_by)", "def get_total_distance_by_user_on_bike(self, user_id: int):\n return self._get_total_distance_by_user(user_id, [ActivityType.Ride])", "def minkowski_distance(user1: User, user2: User) -> float:\r\n # predefined p_value\r\n p_value = 3\r\n common_animes = set.intersection(set(user1.neighbor_anime.keys()),\r\n set(user2.neighbor_anime.keys()))\r\n return _nth_root(sum(pow(abs(anime.neighbor_users[user1] - anime.neighbor_users[user2]),\r\n p_value) for anime in common_animes), p_value)", "def get_car_path(graph,home_clusters,source,all_pairs_distances,all_pairs_shortest_paths,\n source_in_clusters = False, christofides = False):\n \n if source_in_clusters:\n add_vertex_to_clusters(home_clusters,source)\n\n dropoff_vertices = get_dropoff_vertices_efficient(graph, home_clusters, all_pairs_distances)\n\n # Add the source to the dropoff vertices\n dropoff_vertices.append(source)\n # Get rid of any repeating entries in the dropoff vertices\n dropoff_vertices = list(set(dropoff_vertices))\n # Construct the fully connected sub-graph with the dropoff vertices\n # on which TSP is computed\n dropoff_subgraph = tsp_routines.complete_shortest_path_subgraph_efficient(graph,dropoff_vertices,all_pairs_distances)\n \n if christofides:\n tsp_route = tsp_routines.metric_christofides_tsp(dropoff_subgraph,source)\n else:\n tsp_route = tsp_routines.metric_mst_tsp(dropoff_subgraph,source)\n\n final_path = tsp_routines.tsp_solution_to_path(graph,tsp_route,all_pairs_shortest_paths)\n return final_path", "def path_cost(self, c, state1, action, state2):\n # This should probably just be 1 every state....\n return c + 1", "def path_cost(self, c, state1, action, state2):\n # This should probably just be 1 every state....\n return c + 1", "def calculate_waypoints(global_start, global_goal, global_home, data, drone_altitude, safety_distance):\n # Calculate graph and offsets\n graph, north_offset, east_offset = create_graph(data, drone_altitude, safety_distance)\n\n map_offset = np.array([north_offset, east_offset, .0])\n\n # Convert start position from global to local.\n local_position = global_to_local(global_start, global_home) - map_offset\n\n # Find closest point to the graph for start\n graph_start = closest_point(graph, local_position)\n\n # Convert goal postion from global to local\n local_goal = global_to_local(global_goal, global_home) - map_offset\n\n # Find closest point to the graph for goal\n graph_goal = closest_point(graph, local_goal)\n\n # Find path\n path, _ = a_star(graph, graph_start, graph_goal)\n path.append(local_goal)\n\n # Prune path\n path = collinearity_prune(path, epsilon=1e-3)\n\n # Calculate waypoints\n return [[int(p[0] + north_offset), int(p[1] + east_offset), drone_altitude, 0] for p in path]", "def handle_request(self, sw, util, duration, time_now):\n\n #logging.debug(str(self.graph.edges(data=True)))\n\n #1 Get available paths from servers to switch\n paths = self.get_srv_paths(sw, self.graph)\n\n #2 choose the path which mins the max link utilization for all links\n # along the path\n bestpath, bestpm = self.find_best_path(paths, sw, util, duration, time_now)\n\n if len(bestpath) > 0:\n self.allocate_resources(bestpath, util, time_now, duration)\n else:\n logging.warn(\"[%s] No best path found at switch [%s]\", str(time_now), str(sw))\n\n return bestpath", "def Option3_routing(self, S, D, L):\n if self.has_path(S, D): \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n Opt_path = Shortest_path\n path_cost_with_concave_function = self.calculate_path_cost_with_concave_function(Shortest_path, 'c1', 'c2')\n return path_cost_with_concave_function, Opt_path\n while len(Shortest_path) != 0:\n path_cost = self.additive_path_cost(Shortest_path, 'w') \n #self.logger.info('Path cost - %d', path_cost)\n if path_cost <= L:\n \"\"\"go to path cost with weighted sum\"\"\"\n path_cost_with_concave_function = self.calculate_path_cost_with_concave_function(Shortest_path, 'c1', 'c2')\n self.G = self.rm_edge_constraint(path_cost_with_concave_function) # remove all links where the concave link is greater than PathConcave_cost \n Opt_path = Shortest_path\n if self.has_path(S, D):\n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w')\n else:\n Shortest_path = [] \n else:\n break \n else:\n self.logger.info('No path from %s to %s', S, D)\n Opt_path = []\n path_cost_with_concave_function = 0\n return path_cost_with_concave_function, Opt_path", "def create_user_path_assoc():\n if not check_content_type():\n return jsonify(status=CONTENT_TYPE_ERROR)\n reqdata = request.json\n if not check_token(reqdata[\"token\"]):\n return jsonify(status=TOKEN_ERROR)\n #TODO check that request body contain needed data\n #if [\"user_id\", \"path_id\", \"ready\", \"coordinate\", \"first_name\", \"second_name\", \"phone\"].sort() != (data.keys()).sort():\n # return jsonify(status=\"err\")\n user_id = reqdata[\"user_id\"]\n path_id = reqdata[\"path_id\"]\n ready = True\n coordinate = reqdata[\"coordinate\"]\n first_name = reqdata[\"first_name\"]\n second_name = reqdata[\"second_name\"]\n phone = reqdata[\"phone\"]\n #TODO data validation\n user = db.session.query(User).filter(User.id==user_id).scalar() is not None\n path = db.session.query(Path).filter(Path.id==path_id).scalar() is not None\n if user and path:\n db.session.add(UserPathAssociation(user_id=user_id, path_id=path_id, ready=ready,coordinate=coordinate, first_name=first_name, second_name=second_name,phone=phone))\n try:\n db.session.commit()\n return jsonify(status=OK_STATUS)\n except:\n db.session.rollback()\n return jsonify(status=DATABASE_INTEGRITY_ERROR)\n else:\n return jsonify(status=\"err\")", "def calcApproxDuration(self, loaded_user):\n if not loaded_user.getUserAttrs().hasAttr(\"normal_charge\"):\n return []\n \n charge_obj=charge_main.getLoader().getChargeByID(int(loaded_user.getUserAttrs()[\"normal_charge\"]))\n credit=loaded_user.getBasicUser().getCredit()\n approx=[]\n \n for rule_obj in charge_obj.getRules().itervalues():\n cpm=0\n if rule_obj.cpm:\n cpm += rule_obj.cpm\n \n if rule_obj.cpk:\n cpm += rule_obj.assumed_kps/rule_obj.cpk\n \n if rule_obj.ras_id!=rule_obj.ALL:\n ras_ip=ras_main.getLoader().getRasByID(rule_obj.ras_id).getRasIP()\n else:\n ras_ip=rule_obj.ALL\n\n if cpm:\n duration=credit/cpm*60\n else:\n duration=\"Infinite\"\n approx.append([duration, ras_ip, rule_obj.day_of_weeks.getDayNames(), rule_obj.start_time, rule_obj.end_time])\n \n return approx", "def calculate_path_cost_with_weighted_sum(self, path, attr1, attr2): \n costs = [] \n for i in range(len(path) - 1):\n a = (1- self.G[path[i]][path[i+1]][attr2]) / (2 - self.G[path[i]][path[i+1]][attr1] - self.G[path[i]][path[i+1]][attr2]) \n b = (1- self.G[path[i]][path[i+1]][attr1]) / (2 - self.G[path[i]][path[i+1]][attr1] - self.G[path[i]][path[i+1]][attr2]) \n costs.append(a * self.G[path[i]][path[i+1]][attr1] + b * self.G[path[i]][path[i+1]][attr2]) \n return max(costs)", "def get_user_relationships(user):\n transactions = {}\n\n for transaction in Transaction.ready.filter(Q(created_by=user) | Q(sent_to=user)):\n other_user_handle = transaction.created_by.handle\n\n if user == transaction.created_by:\n other_user_handle = transaction.sent_to.handle\n\n if other_user_handle not in transactions:\n transactions[other_user_handle] = []\n\n transactions[other_user_handle].append(transaction)\n\n return transactions", "def process(self, user_id: str, all_days: List[str]):\n if self.CC is not None:\n # Office Time Calculation from GPS\n self.CC.logging.log(\"Processing Working Days\")\n self.listing_all_work_days(user_id, all_days)\n\n arrival_data_feature = ArrivalTimes(self.CC)\n arrival_data_feature.process(user_id, all_days)\n\n expected_arrival_data_feature = ExpectedArrivalTimes(self.CC)\n expected_arrival_data_feature.process(user_id, all_days)\n\n staying_time_data_feature = StayingTimes(self.CC)\n staying_time_data_feature.process(user_id, all_days)\n\n expected_staying_time_data_feature = ExpectedStayingTimes(self.CC)\n expected_staying_time_data_feature.process(user_id, all_days)\n\n # Office Time Calculation from Beacon\n working_days_from_beacon_feature = WorkingDaysFromBeacon(self.CC)\n working_days_from_beacon_feature.process(user_id, all_days)\n\n arrival_data_from_beacon_feature = ArrivalTimesFromBeacon(self.CC)\n arrival_data_from_beacon_feature.process(user_id, all_days)\n\n expected_arrival_data_from_beacon_feature = ExpectedArrivalTimesFromBeacon(self.CC)\n expected_arrival_data_from_beacon_feature.process(user_id, all_days)\n\n staying_time_data_from_beacon_feature = StayingTimesFromBeacon(self.CC)\n staying_time_data_from_beacon_feature.process(user_id, all_days)\n\n expected_staying_time_data_from_beacon_feature = ExpectedStayingTimesFromBeacon(self.CC)\n expected_staying_time_data_from_beacon_feature.process(user_id, all_days)", "def evaluate_distance(self):\n\n fitness = 0\n routes = split_to_routes(self)\n\n for route in routes:\n route = [home] + route + [home]\n for i in range(1,len(route)):\n # Calculates full distance, including from last city\n # to first, to terminate the trip\n pos_from = route[i - 1]\n pos_to = route[i]\n distance = dm[pos_from][pos_to]\n fitness += distance\n\n return int(fitness)", "def GetPathCost(self, bucketOfActions):", "def score(path, gate_dist):\n\tsum = 0\n\t# Each path is really two, one that terminates, and one that continues\n\t# For now, don't reward termination\n\tfor i in range(len(path))[1:-1]:\n#\t\tcos_angle = 1\n#\t\tif i > 1:\n#\t\t\tu = (path[i-1].obj.Lon - path[i-2].obj.Lon, \\\n#\t\t\t\tpath[i-1].obj.Lat - path[i-2].obj.Lat)\n#\t\t\tv = (path[i].obj.Lon - path[i-1].obj.Lon, \\\n#\t\t\t\tpath[i].obj.Lat - path[i-1].obj.Lat)\n#\t\t\tcos_angle = (u[0]*v[0] + u[1]*v[1]) / \\\n#\t\t\t\t(math.sqrt(u[0]**2 + u[1]**2) * \\\n#\t\t\t\tmath.sqrt(v[0]**2 + v[1]**2))\n\t\tsum += ( gate_dist - path[i].dist ) / (1<<(i-1)) #* (1 - cos_angle) )\n\treturn sum", "def writePathways( self ):\n\n self.logger.info( 'writePathways: START' )\n\n # Generate inserts for meabolic pathways.\n self.importerPathway.writePathways()\n\n self.logger.info( 'writePathways: DONE' )", "def CalculateChebyPaths(self):\n Kmin, Kmax = self.Kmin, self.Kmax\n self.apath = array([0 for y in range(self.T)], dtype=float)\n self.cpath = array([0 for y in range(self.T)], dtype=float)\n self.npath = array([0 for y in range(self.T)], dtype=float)\n # generate each generation's asset, consumption and labor supply forward\n for y in range(self.T-1): # y = 0, 1,..., 58\n self.cpath[y] = self.chebeval(array([self.apath[y]]),self.ac[y],Kmin,Kmax)\n # if self.cpath[y] < 0:\n # self.cpath[y] = 0\n if y >= self.W:\n income = self.b\n else:\n self.npath[y] = self.chebeval(array([self.apath[y]]),self.an[y],Kmin,Kmax)\n income = (1-self.tau)*self.w*self.npath[y]\n self.apath[y+1] = (1+self.r)*self.apath[y] + income - self.cpath[y]\n self.upath[y] = self.util(self.cpath[y], self.npath[y])\n # the oldest generation's consumption and labor supply\n self.cpath[self.T-1] = (1+self.r)*self.apath[self.T-1] + self.b\n # self.cpath[self.T-1] = self.chebeval(array([self.apath[self.T-1]]),self.ac[self.T-1],Kmin,Kmax)\n self.upath[self.T-1] = self.util(self.cpath[self.T-1], self.npath[self.T-1])\n # print self.cpath, self.apath, self.npath", "def computeNearestNeighbor(r, username, users):\r\n distances = []\r\n for user in users:\r\n if user != username:\r\n if ( r == 1 ) or ( r == 2 ):\r\n distance = minkowski(r, users[user], users[username])\r\n distances.append((round(distance, 2), user))\r\n if (r == 0 ):\r\n distance = pearson(users[user], users[username])\r\n distances.append((round(distance, 2), user))\r\n # sort based on distance -- closest first\r\n if ( r == 1 ) or ( r == 2 ):\r\n distances.sort()\r\n if( r == 0 ):\r\n distances.sort(reverse=True)\r\n return distances", "def walk(self, priv_path:list):\n # End conditions for recursive loop\n current_node = priv_path[-1]\n if current_node.location in self.destination and len(priv_path)>1:\n self.addItinerary(priv_path)\n self.n_routes+=1\n return\n if self.n_routes >= self.max_n_routes:\n return\n\n if len(priv_path)>1:\n # Get metadata of last edge type\n last_edge = self.EdgeType(priv_path[-2], priv_path[-1])\n else: # If it's start of itinerary, next edge would be travel edge\n # So, make last edge as stay\n last_edge = 'stay'\n if last_edge == 'stay': # next edge will be travel i.e., ship not None\n next_nodes = [node for node in self.G.neighbors(current_node) \n if self.G.edges[current_node, node]['ship'] is not None]\n else: # Next edge will be stay, i.e., ship = None\n next_nodes = [node for node in self.G.neighbors(current_node)\n if self.G.edges[current_node, node]['ship'] is None]\n \n for node in next_nodes:\n self.walk(priv_path+[node])", "def kPaths(veh, currentEdge):\n # A set of all of the edges, used to reset the vehicle's internal estimated edge store\n edgesSet = set()\n k = 1\n # This is a fail safe in case there are less paths than K_MAX available for the vehicle to take\n timeOut = 0\n\n # Finding the best possible route for the vehicle\n traci.vehicle.rerouteTraveltime(veh, currentTravelTimes=False)\n\n # The vehicle's current route\n bestRoute = traci.vehicle.getRoute(veh)\n # Element of the current edge within the currentRoute\n currentEdgeIndex = bestRoute.index(currentEdge)\n # Altered route with the first element of the route being the current edge\n currentRoute = bestRoute[currentEdgeIndex:]\n edgesSet.update(currentRoute)\n\n # Recording down the current best route and time\n bestTime = sim.getGlobalRoutePathTime(currentRoute)\n routes = {}\n routes['{}_best'.format(k)] = (bestTime, currentRoute,)\n\n # This records the estimated travel time for each road segment in which the vehicle may take (with or without\n # penalisation applied) -- this is done on and reset on a vehicle-per-vehicle basis\n adjustedEdgeVehicle = {}\n for edge in currentRoute:\n adjustedEdgeVehicle[edge] = edgeSpeedGlobal[edge]\n\n # Creating up to k-1 additional routes\n while k < K_MAX:\n penalisePathTimeVehicle(veh, currentRoute, adjustedEdgeVehicle)\n\n traci.vehicle.rerouteTraveltime(veh, currentTravelTimes=False)\n newRoute = traci.vehicle.getRoute(veh)\n currentRoute = newRoute[currentEdgeIndex:]\n newRouteTime = sim.getGlobalRoutePathTime(currentRoute)\n\n # These are the routes which have already been selected\n currentEligibleRoutes = [x[1] for x in routes.values()]\n # Ensuring the route doesn't exist within the existing eligible routes, the route contains the edge in which the\n # vehicle is currently occupying, and the route is not currently the best route\n if currentRoute not in currentEligibleRoutes and currentEdge in currentRoute:\n timeOut = 0\n # This keeps track if the calculated 'best' route time is above that of the calculated new route time\n bestRouteMoreThanNewRouteTime = False\n\n \"\"\"\n Sometimes the roads suffer so much congestion that there are issues with reliable estimation of travel\n times given by TraCI. TraCI may sometimes give overblown estimations of the travel times of an edge, for\n example if the edge is being blocked and no movement is being made, the estimated time may be \n disproportionally large compared to it's real-world equivalent predicted time taken; this problem persists\n even with a rerouting device being connected to the vehicles which allows for more accurate 'smoothed' \n aggregated travel times- this is a fault inherent to TraCI. \n \n In an attempt to alleviate this, the estimated travel times are bounded to 15x their free-flow speed. \n However, this sometimes causes the best time to no longer be the best time depending on the number of \n edge travel time boundings in a route (which could alter the predicted time for a route given we are \n now estimating some of the edge times). \n \n Given this, we instead work out the ratio between the best travel time and the currentRoute travel time, \n we multiply this ratio against the best travel time to give a better, more accurate estimation of the \n currentRoute's travel time.\n \"\"\"\n if newRouteTime < bestTime:\n # These are the predicted route times which are given directly from TraCI\n bestTimeGivenByTraci = 0\n newRouteTimeGivenByTraci = 0\n\n # These are the smoothed travel times which are generated through the vehicle's individual rerouting\n # device\n smoothedBestTime = 0\n smoothedNewTime = 0\n\n # Times for the best route\n for edge in routes['1_best'][1]:\n bestTimeGivenByTraci += traci.edge.getTraveltime(edge)\n smoothedBestTime += float(traci.vehicle.getParameter(veh, \"device.rerouting.edge:{}\".format(edge)))\n\n # Times for the new route\n for edge in currentRoute:\n newRouteTimeGivenByTraci += traci.edge.getTraveltime(edge)\n smoothedNewTime += float(traci.vehicle.getParameter(veh, \"device.rerouting.edge:{}\".format(edge)))\n\n traciRatio = newRouteTimeGivenByTraci / bestTimeGivenByTraci\n smoothedRatio = smoothedNewTime / smoothedBestTime\n\n \"\"\"\n In extremely rare cases, TraCI can erroneously return an incorrect edge travel time which means\n that the 'best' travel time may not actually be the best when taking these estimated travel time\n measurements; this can result in ratios < 1.\n \"\"\"\n if traciRatio < 1 and smoothedRatio < 1:\n bestRouteMoreThanNewRouteTime = True\n\n # Add the new time to the list so that it can be determined whether or not the existing times can\n # exist given this new best time (with boundaries in mind)\n routes['{}_best'.format(k + 1)] = (newRouteTime, currentRoute,)\n\n # All of the route and time pair combinations\n tupleList = []\n for key in routes:\n timeRouteTuple = routes[key]\n tupleList.append(timeRouteTuple)\n\n # Sort the tuples with the lowest time being at index 0, with the longest being at index k-1\n sortedTuples = sorted(tupleList, key=lambda x: x[0])\n\n # Placing the contents of the sortedTuple into routes\n counter = 0\n for key in routes:\n routes[key] = sortedTuples[counter]\n counter += 1\n\n # Best time will now be in the first position\n bestTime = routes['1_best'][0]\n\n # Delete any routes which don't conform to <= new best time *\n for key in deepcopy(routes):\n if routes[key][0] >= bestTime * KPATH_MAX_ALLOWED_TIME:\n del routes[key]\n\n # Resetting k depending on how many elements are left after removal\n k = len(routes)\n else:\n # This takes the most accurate ratio (which is deemed to be the ratio which is closest to 1)\n traciRatio = min([traciRatio, smoothedRatio], key=lambda v: abs(v - 1))\n # Work out the new, more accurate currentRoute travel time based on this ratio\n newRouteTime = bestTime * traciRatio\n\n # New route's estimated time doesn't exceed bestTime*KPATH_MAX_ALLOWED_TIME of the optimal route time\n if newRouteTime <= bestTime*KPATH_MAX_ALLOWED_TIME and not bestRouteMoreThanNewRouteTime:\n k += 1\n for edge in currentRoute:\n if edge not in adjustedEdgeVehicle:\n adjustedEdgeVehicle[edge] = edgeSpeedGlobal[edge]\n edgesSet.update(currentRoute)\n routes['{}_best'.format(k)] = (newRouteTime, currentRoute,)\n else:\n break\n else:\n timeOut += 1\n # Time out limit exceeded\n if timeOut == KPATH_TIMEOUT:\n break\n\n # Selecting a random route\n ranNum = random.randint(1, len(routes))\n\n routeChoice = routes['{}_best'.format(ranNum)]\n\n # Setting the additional (estimated) extra time in which the vehicle has taken due to reroutings\n routeChoiceTimeTaken = routeChoice[0]\n bestChoiceTimeTaken = routes['1_best'][0]\n extraTime = routeChoiceTimeTaken - bestChoiceTimeTaken\n cumulativeExtraTime[veh] += abs(extraTime)\n\n traci.vehicle.setRoute(veh, routeChoice[1])\n\n resetVehicleAdaptedTravelTime(veh, edgesSet)\n\n # These are the routes which were available to be selected\n routeList = [x[1] for x in routes.values()]\n\n return routeList", "def _all_node_paths(self):\n workflows = self._build_keyed_workflow_map()\n referrers = self._build_referrer_map()\n\n paths = {}\n\n for (workflow_name, workflow) in six.iteritems(workflows):\n for node in self.get_all_nodes(workflow):\n paths[(workflow_name, node['name'])] = \\\n self._get_path_to_node(\n workflow_name, node['name'], referrers)\n\n return paths", "def calculate_all_distances(self):\n self.close_distance = self.calculate_distance(self.close_distance_factor)\n self.medium_distance = self.calculate_distance(self.medium_distance_factor)\n self.far_distance = self.calculate_distance(self.far_distance_factor)", "def get_rec(self):\n\n #to address cold start problem: checks if user activity is above 5 or so lessons\n # if yes returns recs based on user2user_similarity\n # else returns recs based on item2item_similarity\n pass", "def compute_costs(self, node, observation=None, area=None):\n if (node.RRT.hierarchy_number == 0) and (\n node in node.RRT.starts.values()): # in case an observation is made immediately (root node has no parent)\n return node.path_costs, node.terminal_costs, node.node_costs\n\n C = self.get_C(observation, area)\n node.vs = self.get_vs(node, C)\n\n # Compute node, and terminal costs\n h = []\n hN = []\n for i in range(self.N_goal_states):\n h.append(self.cost_h(node, self.goal_states[i]))\n hN.append(self.cost_hN(node, self.goal_states[i]))\n\n path_costs = node.parent.path_costs.copy() + node.parent.node_costs.copy()\n\n N_vs = len(node.vs)\n node_costs = []\n terminal_costs = []\n for i in range(N_vs):\n node_costs.append(np.dot(h, node.vs[i]))\n terminal_costs.append(np.dot(hN, node.vs[i]))\n node_costs = np.array(node_costs).reshape((1, N_vs))\n terminal_costs = np.array(terminal_costs).reshape((1, N_vs))\n\n if path_costs.shape[1] == node_costs.shape[1] / self.N_goal_states:\n path_costs_temp = np.zeros(node_costs.shape)\n for i in range(int(node_costs.shape[1] / self.N_goal_states)):\n for j in range(self.N_goal_states):\n path_costs_temp[0, self.N_goal_states * i + j] = path_costs[0, i].copy() + node_costs[\n 0, self.N_goal_states * i + j].copy()\n path_costs = path_costs_temp.copy()\n\n return path_costs, terminal_costs, node_costs", "def Option2_routing(self, S, D, L):\n if self.has_path(S, D): \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n Opt_path = Shortest_path\n path_cost_with_weighted_sum = self.calculate_path_cost_with_weighted_sum(Shortest_path, 'c1', 'c2')\n return path_cost_with_weighted_sum, Opt_path\n\n while len(Shortest_path) != 0:\n path_cost = self.additive_path_cost(Shortest_path, 'w') \n #self.logger.info('Path cost - %d', path_cost)\n if path_cost <= L:\n \"\"\"go to path cost with weighted sum\"\"\"\n path_cost_with_weighted_sum = self.calculate_path_cost_with_weighted_sum(Shortest_path, 'c1', 'c2')\n self.G = self.rm_edge_constraint(path_cost_with_weighted_sum) # remove all links where the concave link is greater than PathConcave_cost \n Opt_path = Shortest_path\n if self.has_path(S, D):\n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w')\n else:\n Shortest_path = [] \n else:\n break \n else:\n self.logger.info('No path from %s to %s', S, D)\n Opt_path = []\n path_cost_with_weighted_sum = 0\n return path_cost_with_weighted_sum, Opt_path", "def computeNearestNeighbor(username, users, distance_algorithm='euclidean'):\n distances = []\n for user in users:\n if user != username:\n if distance_algorithm == 'manhatten':\n distance = manhattan_distance(users[user], users[username])\n elif distance_algorithm == 'euclidean':\n distance = euclidean_distance(users[user], users[username])\n elif distance_algorithm == 'minkowski':\n distance = minkowski_distance(users[user], users[username], 5)\n distances.append((distance, user))\n\n # sort based on distance -- closest first!\n distances.sort()\n return distances", "def flight_paths(city1, city2, data=data):\n cities_to_travel = Graph() # instantiate a new graph\n location_dict = {} # empty dictionary to hold city, location, and distances\n for city in data: # creates dictionary of key cities, values: lat and long\n try:\n location_dict[city['city']] # check if city is already in dictionary\n except KeyError:\n location_dict[city['city']] = city['lat_lon'] # add's city as key and it's lat/long as value\n for city in data: # adds distances between each connected city\n for destination in city['destination_cities']:\n try: # adding edge and weights (distances) between cities\n cities_to_travel.add_edge(city['city'], destination, calculate_distance(city['lat_lon'], location_dict[destination]))\n except KeyError: # edge case; if connection already exists or points to city that doesn't have a lat/long\n pass\n try:\n to_return = cities_to_travel.bellman_ford(city1, city2) # Bellman Ford shortest path through city\n if to_return[0] == float(\"inf\"):\n raise KeyError(\"City does not exist\")\n else:\n return to_return\n except KeyError:\n raise KeyError('City has no Lat or Long given, or does not exist')", "def evaluateAllRroutes(self):\n isTrain = 1 # 1 for train, 0 for test\n\n performance = 0\n normalizedPerformance = 0\n priceTolerance = 5 # price to be tolerated\n\n normPerforms = []\n for i in range(8):\n print \"Route: {}\".format(i)\n [perfor, normaPerfor] = self.evaluateOneRouteForMultipleTimes(self.routes[i], priceTolerance)\n normPerforms.append(normaPerfor)\n performance += perfor\n normalizedPerformance += normaPerfor\n\n performance = round(performance/8, 2)\n normalizedPerformance = round(normalizedPerformance/8, 2)\n\n if self.isTrain:\n print \"\\nTRAIN:\"\n else:\n print \"\\nTEST:\"\n print \"Average Performance: {}%\".format(performance)\n print \"Average Normalized Performance: {}%\".format(normalizedPerformance)\n print \"Normalized Performance Variance: {}\".format(np.var(normPerforms))", "def path(most_important_up, most_important_down, total_distance, to_source2, to_source1):\n\n if total_distance == min(total_distance, to_source2[0], to_source1[0]):\n return source_to_source(most_important_up, most_important_down), total_distance\n elif to_source2[0] == min(total_distance, to_source2[0], to_source1[0]):\n return most_important_to_source(to_source2[1]), to_source2[0]\n else:\n return most_important_to_source(to_source1[1], up=False), to_source1[0]", "def consolidation_heuristics(to_print = False):\n # Instantiate the data problem.\n data = create_data_model()\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['depot'])\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n # Create and register a transit callback.\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n def pending_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['post'][to_node]\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n pending_callback_index = routing.RegisterTransitCallback(pending_callback)\n # Define cost of each arc.\n for i in range(data['num_vehicles']-1):\n routing.SetArcCostEvaluatorOfVehicle(transit_callback_index, i) #Transit cost\n routing.SetFixedCostOfVehicle(data['fixed_cost'], i) #Fixed cost\n routing.SetArcCostEvaluatorOfVehicle(pending_callback_index, data['num_vehicles']-1) #Postponement and/or NonService cost\n # Add Capacity constraint.\n def demand_callback(from_index): #\n \"\"\"Returns the demand of the node.\"\"\"\n # Convert from routing variable Index to demands NodeIndex.\n from_node = manager.IndexToNode(from_index) \n return data['demands'][from_node]\n demand_callback_index = routing.RegisterUnaryTransitCallback(\n demand_callback)\n routing.AddDimensionWithVehicleCapacity(\n demand_callback_index,\n 0, # null capacity slack\n data['vehicle_capacities'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Capacity')\n # Add time constraint.\n def time_callback(from_index,to_index): #\n \"\"\"Returns the demand of the node.\"\"\"\n # Convert from routing variable Index to NodeIndex in time\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return time_matrix[from_node][to_node] \n time_callback_index = routing.RegisterTransitCallback(time_callback) \n routing.AddDimensionWithVehicleCapacity(\n time_callback_index,\n 0, # null capacity slack\n data['time_capacities'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Time')\n # Setting solution heuristic-procedure.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.time_limit.seconds = 5 #10 # 60 #20 #3000\n search_parameters.log_search = True\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n # Print solution on console.\n if assignment:\n sent, sol_results, routes_results = print_solution(data, manager, routing, assignment) \n return sent, sol_results, routes_results", "def get_distance(restuarant, user):\n lat1 = math.radians(restuarant[1])\n lon1 = math.radians(restuarant[0])\n lat2 = math.radians(user[1])\n lon2 = math.radians(user[0])\n dlon = lon1 - lon2\n dlat = lat1 - lat2\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * \\\n math.cos(lat2) * math.sin(dlon / 2) ** 2\n dist = 2 * math.asin(math.sqrt(a))*6371\n return dist", "def calculate_paths(self):\n self.paths = {}\n for node in self.nodes:\n path = self.find_path_to_root(node)\n self.paths[node] = path\n self.path_dists[node] = [0.0] + [n.branch for n in path[1:]]", "def optimise(self):\n route = str(sorted(self.heuristic_path))\n\n if route in self.routes:\n saved = TSP.routes[route]\n self.heuristic_path = saved[\"path\"]\n self.heuristic_cost = saved[\"cost\"]\n else:\n self._optimise()\n\n return self.heuristic_path, self.heuristic_cost", "def calculate_path_cost_with_concave_function(self, path, attr1, attr2): \n c1 = max([self.G[path[i]][path[i+1]][attr1] for i in range(len(path)-1)])\n c2 = max([self.G[path[i]][path[i+1]][attr2] for i in range(len(path)-1)]) \n return max([c1,c2])", "def find_path_to_friend(network, user_A, user_B):\n if user_A not in network or user_B not in network:\n return None\n # if both users exist there may be a path\n return create_path(network, user_A, user_B)", "def recommend_pathway(user_jobs, job_graph, goal_state, min_likelihood_thr):\r\n user_jobs_for_mdp = [user_jobs[0]]\r\n mdp = MDP(job_graph, user_jobs_for_mdp, goal_state, min_likelihood_thr=min_likelihood_thr)\r\n return mdp.solve_mdp()", "def compute_statistic(path_user, path_refs, tp_consensus='mean', path_dataset=None,\n path_visu=None):\n assert path_user and path_refs, 'missing user or reference annotation'\n lnds_user, _ = create_consensus_landmarks([path_user])\n lnds_refs, _ = create_consensus_landmarks(path_refs, method=tp_consensus)\n\n list_stats = []\n name_set, name_user_scale = path_user.split(os.sep)[-2:]\n user, scale = parse_path_user_scale(name_user_scale)\n for csv_name in lnds_user:\n if csv_name not in lnds_refs:\n continue\n im_size = find_image_full_size(path_dataset, name_set,\n os.path.splitext(csv_name)[0])\n d_stat = compute_landmarks_statistic(lnds_refs[csv_name],\n lnds_user[csv_name],\n use_affine=False, im_size=im_size)\n d_stat.update({'image_set': name_set,\n 'user': user,\n 'scale': scale,\n 'landmarks': csv_name})\n list_stats.append(d_stat)\n if path_visu is not None and os.path.isdir(path_visu):\n img_name = os.path.splitext(csv_name)[0]\n visual_coannotation(lnds_user[csv_name], lnds_refs[csv_name], path_dataset,\n path_user, img_name, path_visu)\n return list_stats", "def euclidean_distance(user1: User, user2: User) -> float:\r\n common_animes = set.intersection(set(user1.neighbor_anime.keys()),\r\n set(user2.neighbor_anime.keys()))\r\n return sqrt(sum(pow(anime.neighbor_users[user1] - anime.neighbor_users[user2], 2)\r\n for anime in common_animes))", "def calc_cost(self):\n cost = 0\n for i,[source, sinks] in enumerate(self.nets):\n self.costs[i] = self.calc_half_perimeter(source, sinks)\n cost += self.costs[i]\n self.cost = cost\n return True", "def get_context_route_condition(self, pathology_choose,\n way_choose, pathologies, ways):\n essentials_oils = 1\n if way_choose.name == \"orale\":\n essentials_oils = EssentialOil.objects.filter(\n pathology__name=pathology_choose.name).filter(\n way__name=way_choose.name)[0:2]\n\n vegetable_oil = NeutralProduct.objects.get(name=\"miel\")\n protocole = MethodOfUse.objects.get(name=\"orale\")\n\n elif way_choose.name == \"bain\":\n essentials_oils = EssentialOil.objects.filter(\n pathology__name=pathology_choose.name).filter(\n way__name=way_choose.name)[0:1]\n\n vegetable_oil = NeutralProduct.objects.get(name=\"gel douche\")\n protocole = MethodOfUse.objects.get(name=\"bain\")\n\n elif way_choose.name == \"diffusion\":\n essentials_oils = EssentialOil.objects.filter(\n pathology__name=pathology_choose.name).filter(\n way__name=way_choose.name)\n\n vegetable_oil = NeutralProduct.objects.get(name=\"alcool\")\n protocole = MethodOfUse.objects.get(name=\"diffusion\")\n\n elif way_choose.name == \"Inhalation\":\n essentials_oils = EssentialOil.objects.filter(\n pathology__name=pathology_choose.name).filter(\n way__name=way_choose.name)\n\n vegetable_oil = NeutralProduct.objects.get(name=\"bol d'eau\")\n protocole = MethodOfUse.objects.get(name=\"inhalation\")\n\n elif way_choose.name == \"cutanée\":\n essentials_oils = EssentialOil.objects.filter(\n pathology__name=pathology_choose.name).filter(\n way__name=way_choose.name)\n\n vegetable_oil = pathology_choose.vegetable_oil\n\n if pathology_choose.zone == \"general\":\n protocole = MethodOfUse.objects.get(\n name=\"cutanée générale\")\n else:\n protocole = MethodOfUse.objects.get(name=\"cutanée\")\n\n number_he = essentials_oils.count()\n amount = Recipe.objects.filter(\n way__name=way_choose.name).get(number_he=number_he)\n sides_effects = SideEffect.objects.filter(\n essential_oil__in=essentials_oils).distinct()\n contraindication = Contraindication.objects.filter(\n essential_oil__in=essentials_oils).distinct()\n\n context = {\n \"pathologies\": pathologies,\n \"pathology_choose\": pathology_choose,\n \"essentials_oils\": essentials_oils,\n \"vegetable_oil\": vegetable_oil,\n \"way_choose\": way_choose,\n \"ways\": ways,\n \"amount\": amount,\n \"protocole\": protocole,\n \"sides_effects\": sides_effects,\n \"contraindications\": contraindication,\n }\n\n return context", "def generate_tour_times(self, user):\n if ItineraryModel.retrieve_unfinished_itinerary(user).exists():\n itinerary = ItineraryModel.objects.exclude(finished=True).get(client=user)\n start_times = itinerary.start_times\n\n for start_time in start_times.all():\n elapsed_time = 0\n for home_visit in HomeVisitModel.objects.filter(itinerary=itinerary).order_by(\"visit_index\").all():\n if home_visit.visit_index is not 0:\n elapsed_time += 20 * 60\n elapsed_time += home_visit.travel_time\n time_slot = start_time.time + timedelta(seconds=elapsed_time)\n _ = ViableTourTimeModel.objects.get_or_create(\n home_visit=home_visit,\n visit_time=time_slot)\n return True\n return False", "def calculateWeights(self):\n return self.distances #En lo que encontramos una funcion que represente", "def main():\n users = {}\n\n with open('../the_data.json', 'r') as f:\n the_data = json.loads(f.read())\n\n user_data = the_data['data']\n the_user_ids = json.loads(the_data['user_ids_list'])\n \n def p(vector):\n vector = json.loads(vector)\n return {field: vector[i] for i,field in enumerate(the_data['vector_fields'])}\n\n result = {}\n for step in range(1, 20):\n step = str(step)\n\n users = {}\n the_user_ids_for_this_step = []\n for uid in the_user_ids:\n try:\n users[uid] = p(user_data[uid][step])\n the_user_ids_for_this_step.append(uid) \n except:\n pass\n\n for user_id in the_user_ids_for_this_step: \n nearest = computeNearestNeighbor(user_id,\n users,\n distance_algorithm='minkowski')\n # print user_id\n if user_id not in result:\n result[user_id] = {}\n\n result[user_id][step] = nearest[:3]\n\n\n\n # print result\n\n for u in result.keys():\n woha = []\n print '%s, step_count: %s' % (u, user_data[u]['step_count'])\n ls = result[u].keys()\n ls.sort()\n for s in ls: \n print s\n for near in result[u][s]:\n if near[1] in woha:\n ulala = '>'*woha.count(near[1])\n else:\n ulala = ''\n woha.append(near[1])\n print '\\t'*int(s), '%s %s, %s, step_count: %s' % (ulala, near[1], near[0], user_data[near[1]]['step_count'])\n\n print", "def pairing(self):\n if len(self._paths) == 0:\n second_values = self.data\n get_flight = lambda x: x\n first = True\n else:\n second_values = self._paths\n get_flight = lambda x: x.get_last_flight()\n first = False\n\n for value in second_values:\n f1 = get_flight(value)\n for f2 in self.data:\n if f1.connects_to(f2):\n if first:\n self._paths.append(FlightPath(f1, f2))\n else:\n path_copy = copy.copy(value)\n added = path_copy.try_add(f2)\n if added:\n self._paths.append(path_copy)", "def calc_critical_paths(self):\n visited = set([])\n nodes = set(self.get_leaves())\n\n while nodes:\n n = nodes.pop()\n n.update_critical_path(n.latency())\n visited.add(n)\n cp = n.critical_path\n\n for p in n.parents:\n p.update_critical_path(cp + p.latency())\n # if p not in visited:\n nodes.add(p)\n # visited.add(p)\n\n for d in n.serial_parents:\n d.update_critical_path(cp + 1)\n # if d not in visited:\n nodes.add(d)\n # visited.add(d)", "def _compute_connection(current_waypoint, next_waypoint, threshold=35):\n n = next_waypoint.transform.rotation.yaw\n n = n % 360.0\n\n c = current_waypoint.transform.rotation.yaw\n c = c % 360.0\n\n diff_angle = (n - c) % 180.0\n if diff_angle < threshold or diff_angle > (180 - threshold):\n return RoadOption.STRAIGHT\n elif diff_angle > 90.0:\n return RoadOption.LEFT\n else:\n return RoadOption.RIGHT", "def paths(self):\n base = self.base_link\n graph = self.graph()\n paths = {}\n for b in self.links.values():\n try:\n paths[b.name] = shortest(graph, base, b.name)\n except BaseException as E:\n print('exception:', E)\n\n joint_paths = {}\n for body, path in paths.items():\n joint_paths[body] = [graph.get_edge_data(a, b)['joint']\n for a, b in zip(path[:-1], path[1:])]\n return joint_paths", "def calc_distances(client_list):\n distances = {}\n for x in client_list:\n distances[x] = {}\n for y in client_list:\n distances[x][y] = dis(x, y)\n return distances", "def getMutationPathways(node, gPathway, distance = [2, 1], include = None):\n rpInteractions = reverseInteractions(gPathway.interactions)\n if include == None:\n include = set(gPathway.nodes.keys())\n upPathway = Pathway({node : gPathway.nodes[node]}, {})\n downPathway = Pathway({node : gPathway.nodes[node]}, {})\n seenUp = set([node])\n seenDown = set([node])\n unresolvedUp = [node]\n unresolvedDown = [node]\n for d in range(distance[0]): \n ## Up-\n frontierUp = []\n while len(unresolvedUp) > 0:\n currNode = unresolvedUp.pop()\n ## Add complex as upstream for seed node\n if currNode == node:\n if currNode in gPathway.interactions:\n for target in gPathway.interactions[currNode].keys():\n if gPathway.interactions[currNode][target] == \"component>\":\n seenUp.update([target])\n upPathway.nodes[target] = gPathway.nodes[target]\n upPathway.interactions[currNode] = {}\n upPathway.interactions[currNode][target] = \"component>\"\n unresolvedUp.append(target)\n ## Add upstream\n if currNode in gPathway.rinteractions:\n for target in gPathway.rinteractions[currNode].keys():\n if target not in seenUp:\n seenUp.update([target])\n if gPathway.nodes[target] == \"protein\":\n if target in include:\n upPathway.nodes[target] = gPathway.nodes[target]\n upPathway.interactions[target] = {}\n upPathway.interactions[target][currNode] = gPathway.interactions[target][currNode]\n frontierUp.append(target)\n elif gPathway.nodes[target] == \"complex\":\n upPathway.nodes[target] = gPathway.nodes[target]\n upPathway.interactions[target] = {}\n upPathway.interactions[target][currNode] = gPathway.interactions[target][currNode]\n unresolvedUp.append(target)\n else:\n if target not in upPathway.interactions:\n upPathway.interactions[target] = {}\n if currNode not in upPathway.interactions[target]:\n upPathway.interactions[target][currNode] = gPathway.interactions[target][currNode]\n unresolvedUp = deepcopy(frontierUp)\n for d in range(distance[1]):\n ## Down-\n frontierDown = []\n while len(unresolvedDown) > 0:\n currNode = unresolvedDown.pop()\n ## Add downstream\n if currNode in gPathway.interactions:\n for target in gPathway.interactions[currNode].keys():\n if target not in seenDown:\n seenDown.update([target])\n if gPathway.nodes[target] == \"protein\":\n if target in include:\n downPathway.nodes[target] = gPathway.nodes[target]\n if currNode not in downPathway.interactions:\n downPathway.interactions[currNode] = {}\n downPathway.interactions[currNode][target] = gPathway.interactions[currNode][target]\n frontierDown.append(target)\n elif gPathway.nodes[target] == \"complex\":\n downPathway.nodes[target] = gPathway.nodes[target]\n if currNode not in downPathway.interactions:\n downPathway.interactions[currNode] = {}\n downPathway.interactions[currNode][target] = gPathway.interactions[currNode][target]\n unresolvedDown.append(target)\n else:\n if currNode not in downPathway.interactions:\n downPathway.interactions[currNode] = {}\n if target not in downPathway.interactions[currNode]:\n downPathway.interactions[currNode][target] = gPathway.interactions[currNode][target]\n ## Add upstream for non-seed node\n # if currNode != node:\n # if currNode in gPathway.rinteractions:\n # for target in gPathway.rinteractions[currNode].keys():\n # if target not in seenDown:\n # seenDown.update([target])\n # if gPathway.nodes[target] == \"protein\":\n # if target in include:\n # downPathway.nodes[target] = gPathway.nodes[target]\n # downPathway.interactions[target] = {}\n # downPathway.interactions[target][currNode] = gPathway.interactions[target][currNode]\n # elif gPathway.nodes[target] == \"complex\":\n # downPathway.nodes[target] = gPathway.nodes[target]\n # downPathway.interactions[target] = {}\n # downPathway.interactions[target][currNode] = gPathway.interactions[target][currNode]\n # unresolvedDown.append(target)\n # else:\n # if target not in downPathway.interactions:\n # downPathway.interactions[target] = {}\n # if currNode not in downPathway.interactions[target]:\n # downPathway.interactions[target][currNode] = gPathway.interactions[target][currNode]\n unresolvedDown = deepcopy(frontierDown)\n return(upPathway, downPathway)", "def _get_recommend(self, user):\n return self.user_cf.calculate(target_user_id=user, user_n=self.user_n,\n item_n=self.item_n, type=2)" ]
[ "0.5953887", "0.59538865", "0.56319934", "0.56293374", "0.55796844", "0.55332", "0.55092543", "0.5428622", "0.5426576", "0.54115033", "0.5401471", "0.53886443", "0.5361762", "0.53074896", "0.521614", "0.5194592", "0.51802075", "0.51579696", "0.51287794", "0.5086181", "0.50838554", "0.5083594", "0.50697035", "0.5037754", "0.500392", "0.49754417", "0.49681717", "0.49641618", "0.4960874", "0.49448317", "0.49175182", "0.491394", "0.49094963", "0.49045765", "0.4903287", "0.48863605", "0.48767874", "0.48692188", "0.4828763", "0.4817143", "0.48114127", "0.47815302", "0.47763097", "0.47753403", "0.47614607", "0.47594947", "0.47560018", "0.47493365", "0.47491658", "0.47292268", "0.4723507", "0.47217023", "0.47070175", "0.47070175", "0.46894124", "0.4683329", "0.46789703", "0.46604156", "0.46466953", "0.46439028", "0.4642728", "0.46335524", "0.46237093", "0.46221605", "0.46221066", "0.46208957", "0.46193835", "0.4612621", "0.4609883", "0.46071813", "0.4595925", "0.4595056", "0.45902327", "0.45887306", "0.45881593", "0.45774072", "0.4575136", "0.45671386", "0.45660186", "0.45577744", "0.45504346", "0.45492", "0.45483556", "0.45374835", "0.45374268", "0.4536813", "0.45327932", "0.45295882", "0.45292205", "0.4528797", "0.45266247", "0.45180884", "0.4506184", "0.45055553", "0.45040888", "0.4503536", "0.45025247", "0.44836888", "0.4480889", "0.44804496" ]
0.73937947
0
Compute the user pathway, given the sequence of job titles. The first job will be excluded, because it is only used to build the user's initial profile.
def compute_user_pathway(user_jobs, job_graph, debug=False): pathway = [] for i, job in enumerate(user_jobs): if i == 0: continue cluster, _ = job_graph.assign_job_to_jobgraph_state(job) pathway.append(cluster) return pathway
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_pathways(users, job_graph, debug, min_likelihood_thr=0.2):\r\n start_time = time.time()\r\n __print_msg('Computing career pathways...', debug)\r\n user_pathways = {}\r\n tot_users = len(users)\r\n i = 0\r\n for user, user_jobs in users.items():\r\n user_pathway = compute_user_pathway(user_jobs, job_graph)\r\n recommended_pathway = recommend_pathway(user_jobs, job_graph, user_pathway[-1], min_likelihood_thr)\r\n user_pathways[user] = (user_pathway, recommended_pathway)\r\n i += 1\r\n if i % 1000 == 0:\r\n __print_msg('Num users processed: {}/{}'.format(i, tot_users), debug)\r\n end_time = time.time()\r\n __print_msg('Execution time: {} seconds'.format(end_time - start_time), debug)\r\n return user_pathways", "def recommend_pathway(user_jobs, job_graph, goal_state, min_likelihood_thr):\r\n user_jobs_for_mdp = [user_jobs[0]]\r\n mdp = MDP(job_graph, user_jobs_for_mdp, goal_state, min_likelihood_thr=min_likelihood_thr)\r\n return mdp.solve_mdp()", "def create_path(network, user_A, user_B, path=[]):\n path = path + [user_A] # all paths include starting node\n if user_A == user_B: # id the last node is user_B a valid path exists\n return path # base case\n for node in network[user_A][0]:\n if node not in path: # otherwise path is an infinite loop\n path = create_path(network, node, user_B, path)\n if path: # after the recursion hits the base case\n return path\n return None", "def user_path(a, b):\n tx = cypher_transaction()\n\n # Limit the number of relationships in the path?\n # p = shortestPath((a)-[*..15]-(b))\n query = \"\"\"\n MATCH\n (a:user {username:{username_a}}),\n (b:user {username:{username_b}}),\n p = shortestPath((a)-[]->(b))\n RETURN LENGTH(p), p\n \"\"\"\n params = {\n 'username_a': a['username'],\n 'username_b': b['username']\n }\n tx.append(query, parameters=params)\n results = _first(tx.commit())\n paths = []\n for record in results:\n length, path = record.values\n m = \"There are {0} hops from {1} to {2}:\\n\"\n print(m.format(length, a['name'], b['name']))\n for rel in path.relationships:\n print(\" ({0})-[:{1}]->({2})\".format(\n rel.start_node['name'],\n rel.type,\n rel.end_node['name']\n ))\n paths.append(path)\n return paths", "def get_pathway(identifier, organism):\n pass", "def process_user(user_name):\n \n try:\n user_pwd = getpwnam(user_name)\n except KeyError:\n print('Error: User {0} is not recognized.'.format(user_name))\n sys.exit(25)\n \n qstat = subprocess.getoutput(\"qstat -f\").split('-'.center(81, '-')) #81 -'s\n \n node_list = []\n pending_jobs = ''\n pending_search = '#'.center(79, '#') #denotes pending jobs in qstat 79 #'s\n #Weeding out nonessential nodes\n for node in qstat:\n if user_name in (node.split()):\n if pending_search in node: #Taking pending jobs out\n if \".crc.nd.edu\" in node:\n # This means its the last node. We must only accept up tp the pending jobs ONLY. Below we are doing that and taking out an\n # Additional newline by stripping it but adding one back in to keep formatting correct. (there were two instead of one).\n tempNode = (node[:node.find(pending_search)].rstrip())+'\\n'\n if user_name in tempNode:\n node_list.append(tempNode)\n pending_jobs += (node[node.find(pending_search):]) #reaping pending jobs\n else:\n node_list.append(node)\n \n final_list = []\n \n numU_jobs = 0 # Will hold the number of jobs attributed to the specified user\n numU_cores = 0 # The number of cores the user is currently using. Starts at 0 and counts up as jobs encountered.\n \n for host in node_list:\n # Grabbing the node's name in qstat and making a Node() instance of it\n temp_node = Node((host.split()[0]))\n host_used_cores = host.split()[2].split('/')[1]\n host_total_cores = host.split()[2].split('/')[2]\n # If within the first line of the node there is a 'd' at the end, disable it\n if len(host.split('\\n')[0]) == 6 and host.split()[5] == 'd':\n temp_node.set_disabled_switch(True)\n disabled_cores += int(host_total_cores)\n else: \n temp_node.set_disabled_switch(False)\n \n temp_node.set_cores(host_total_cores, host_used_cores)\n # In qstat -F, qf:min_cpu . . . . is the last item before the jobs are listed, \n # 28 is how many char's that string is (don't want it)\n node_stat= host[host.find('qf:min_cpu_interval=00:05:00') + 28\\\n :host.find('\\n---------------------------------------------------------------------------------\\n')]\n \"\"\"Possibly do a host.split('\\n') and join the rest of 2 - end\"\"\"\n\n # There is always an extra '\\n' in here, so subtract 1 to get rid of it\n num_jobs = len(node_stat.split('\\n')) -1\n # If there are any jobs, parse them and gather info\n if num_jobs > 0:\n # Python is non-inclusive for the right operand, and we want to \n # skip another extra '\\n' so start at 1, and want to go num_jobs\n for i in range(1, num_jobs + 1):\n info = node_stat.split('\\n')[i].split()\n temp_job = Job(info[2], info[3], info[7])\n temp_job.set_id(info[0])\n temp_job.set_priority(info[1])\n temp_node.add_job(temp_job)\n if info[3] == user_name:\n numU_jobs += 1 #info[3] is the user-name of job, if == spec. user, increment user_jobs\n numU_cores += int(info[7]) # info[7] is the number of cores occupied by the user's job\n \n final_list.append(temp_node)\n \n pending_list = []\n if len(pending_jobs): #As long as the user has pending jobs T if len != 0\n p_lines = pending_jobs.split('\\n')\n pending_list.append((p_lines[0] + '\\n' + p_lines[1] + '\\n' + p_lines[2] + '\\n'))\n for i in range(3, len(p_lines)):\n if p_lines[i].find(user_name) != (-1):\n pending_list.append(p_lines[i])\n \n if len(sys.argv) == 4:\n if sys.argv[3] == '--details':\n print_detailed_user(final_list, pending_list, user_name, numU_jobs, numU_cores)\n else:\n print('Error: Arg syntax error with: ' + sys.argv[3])\n show_usage(23)\n else:\n print_short_user(final_list, pending_list, user_name, numU_jobs, numU_cores)", "def calculateFirstPath(self):\n rd.shuffle(self.goals)\n self.path = self.goals", "def find_path_to_friend(network, user_A, user_B, path=None):\n if path is None:\n path = []\n\n if user_A in network and user_B in network:\n path.append(user_A)\n current_connections = get_connections(network, user_A)\n if user_B in current_connections:\n return [user_A, user_B]\n for u in current_connections:\n if u not in path:\n next_path = find_path_to_friend(network, u, user_B, path)\n if next_path:\n return [user_A] + next_path", "def gohome(username):\n generate_plan(username)\n generate_mileage_line(username)\n # if username == 'alex':\n # last_date = '2018-04-18'\n # generate_map(username, last_date)\n\n return redirect(url_for('.foo', username=username))", "def first_path(self):\n\t\treturn self.args[1]", "def find_path_to_friend(network, user_A, user_B):\n if user_A not in network or user_B not in network:\n return None\n # if both users exist there may be a path\n return create_path(network, user_A, user_B)", "def getAllSocialPaths(self, userID):\n visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n pass", "def goal_(s):\n a, b = path.split(s)\n return path.join(a, b[len('goal_'):])", "def best_path(self, unlabeled_sequence):\n unlabeled_sequence = self._transform(unlabeled_sequence)\n return self._best_path(unlabeled_sequence)", "def constructShortestPath(self):", "def automated(user):\n\n # Start should be a Plan() and will include any classes the user has already taken to this point. It should have\n # accurate counts of its course typesTaken before starting the search. Goal is the end state desired by the user.\n # Usually this will be the gradReqs for the users Curriculum object, but this could also be used to create\n # limited searches. number_of_classes_per_quarter is the max number of courses a user is willing to take in a\n # given quarter.\n\n # Cost should represent how many quarters are needed to graduate. They should however be multiplied by some factor\n # so that they are larger than our heuristic values. Our heuristics should produce values for rarity that are 8 - #\n # of times offered in 8 quarters. So the values will range between 0-8. Unlocks could be anything between 0 and\n # the # of total classes opened up by taking it. Unlocks seems to range between 0-281. We opted to cap this number\n # to 50. We decided to cap the unlocks score to 50 and in order to have rarity weigh as much as unlocks we opted to\n # multiply its results by 6. The result is that rarity ranges between 0-42 and unlocks ranges from 0-50. We never\n # want to over-estimate the cost of a path to graduation. 50 is chosen because it is the maximum value of h(n) seen\n # in our data.\n # Should equal max(rarity) + max(unlocks) + bonus\n # g(n) = (quarters x stdCost) or cost so far\n # h(n) = stdCost - (rarity + unlocks + bonus)\n # f(n) = g(n) + h(n)\n # Record actual cost of a path as g(n) / stdCost = number of quarters to graduate.\n # A course that is offered every quarter and unlocks nothing and does not match a preferred elective type will cost\n # stdCost which is exactly what adding a class costs. Selecting something more rare, and/or unlocks more classes\n # will appear to cost less than a normal quarter. So the path will be an under-estimate of cost and therefore it\n # will be admissible. As long as stdCost is >= h(n) we will never have negative costs therefore h(n) will be\n # considered consistent.\n # stdCost must = max(rarity) + max(unlocks) + max(bonus)\n\n # Should we disallow online courses?\n removeOnline = user.disallowOnline\n\n # Get students undergrad degree type\n undergrad = user.undergraduate_degree\n\n # Setup Curriculum\n curriculum = user.curriculum\n\n # Setup concentration\n if curriculum is CS:\n userPref = int(user.getCSFocus)\n else:\n userPref = 1\n\n\n # Create null node\n start = Plan(\n selectionOrder = list(),\n coursesTaken = user.getCoursesTaken,\n termNum = user.getTerm,\n currTermIdx = 0,\n daysFilled = [],\n maxCourses = user.max_courses,\n typesTaken = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n selectionsWithDay = list()\n )\n\n # If the users undergrad type matches their graduate type waive introductory courses\n start = waiveCourses(start, undergrad, curriculum)\n\n # Initialize variables\n frontier = PriorityQueue()\n frontier.put(start, 0)\n costSoFar = {}\n stdCost = setStdCost(curriculum)\n courseLimit = 20 # max number of courses in a solution\n\n # Store results of the query once for reuse later in search\n terms = ['0975', '0980', '0985', '0990', '0995', '1000', '1005']\n queryResults = dict((term, TermCourses.getAvailableCourses(term)) for term in terms)\n\n # Modify the heuristic score of classes to emphasize certain course types and the students focus in particular\n queryResults = modifyHeuristics(userPref, queryResults, terms, curriculum)\n\n plansPopped = 0\n while not frontier.empty():\n # Select current plan\n curr_plan = frontier.get()\n plansPopped += 1\n\n # If the search has gone on too long return an empty list so the user can restart the search\n if timedOut(plansPopped):\n return list(('Please see advisor', 'mon'))\n\n # Goal Checking\n if isGoal(curr_plan, curriculum, courseLimit, userPref):\n break\n\n # Count up non-capstone courses in plan\n cur = curr_plan.typesTaken\n tot = cur[0] + cur[1] + cur[2] + cur[13]\n\n # Filter the query removing courses that the student cannot take\n subsetResults = queryResults[TermCourses.convert_stream(curr_plan.termNum)]\n filteredResults = filter(subsetResults, curr_plan, curr_plan.daysFilled, curriculum, tot, removeOnline)\n\n # Loop through the top 8 filtered results and try each suggested plan\n for suggestedCourseInfo in filteredResults[:8]:\n suggestedPlan = Plan(\n selectionOrder = copy.deepcopy(curr_plan.selectionOrder),\n coursesTaken = copy.deepcopy(curr_plan.coursesTaken),\n termNum = copy.deepcopy(curr_plan.termNum),\n currTermIdx = copy.deepcopy(curr_plan.currTermIdx),\n daysFilled = copy.deepcopy(curr_plan.daysFilled),\n maxCourses = user.max_courses,\n typesTaken = copy.deepcopy(curr_plan.typesTaken),\n selectionsWithDay = copy.deepcopy(curr_plan.selectionsWithDay))\n\n # Add suggested course to current plan\n addUpdateCourse(suggestedPlan, suggestedCourseInfo, curriculum)\n\n # Calculate the true cost of the current plan (non heuristic)\n new_cost = costSoFar.get(str(curr_plan.coursesTaken), costSoFar.get(str(curr_plan.coursesTaken), 0))+stdCost\n\n # Do not explore plans with excessive numbers of courses\n taken = suggestedPlan.typesTaken\n totCourses = taken[0] + taken[1] + taken[2] + taken [3] + taken[4] + taken[13]\n if curriculum == CS:\n if totCourses >= courseLimit or suggestedPlan.typesTaken[2] > 8:\n continue\n else:\n if totCourses >= courseLimit or suggestedPlan.typesTaken[2] > 3:\n continue\n\n # Only explore a plan if it has not been seen or it is a better plan than a previously seen version\n if str(suggestedPlan.coursesTaken) not in costSoFar or new_cost < costSoFar[str(suggestedPlan.coursesTaken)]:\n costSoFar[str(suggestedPlan.coursesTaken)] = new_cost\n priority = -new_cost + heuristics(suggestedCourseInfo, suggestedPlan, user)\n frontier.put(suggestedPlan, priority)\n\n return curr_plan.selectionsWithDay", "def getAllSocialPaths(self, userID):\n visited = {} # Note that this is a dictionary, not a set\n print(f\"user ID {userID}\")\n\n for i in range(1, len(self.users)):\n visited[i] = self.bfs(userID, i)\n\n return visited", "def replanning_path(self):\n start_state = self.extract_start_state()\n goal_state = self.extract_goal_state()", "def calculate_path(self):\n #Se repite el ciclo para el número especificado de veces\n for i in range(self.iterations):\n for ant in self.ants:\n ant.setup_ant()\n while not ant.final_node_reached:\n #Seleccion aleatoria del nodo a visitar\n node_to_vist = self.select_next_node(self.map.nodes_array[int(ant.actual_node[0])][int(ant.actual_node[1])])\n #Mover la hormiga al siguiente nodo seleccionado al azar\n ant.move_ant(node_to_visit)\n #Compruebe si se ha alcanzado la solución\n ant.is_final_node_reached()\n #Agregar la ruta resultante a la lista de rutas\n self.add_to_path_results(self.delete_loops(ant.get_visited_nodes()))\n # Habilitar a la hormiga para otra busqueda\n ant.enable_start_new_path()\n \n # Actualizar el nivel global de feromonas\n self.pheromone_update()\n self.best_result = self.paths[0]\n\n #Vaciar la lista de rutas\n self.empty_paths()\n print('Iteration: ', i, 'lenght of the path: ', len(self.best_result))\n return self.best_result", "def path_correction(data, user_coords):\n # Return list if it only has the destination\n if len(data) == 1:\n return data\n\n # Calculate distance from user to second waypoint\n second_coords = (data[1][\"lat\"], data[1][\"lon\"])\n user_second_dist = geopy.distance.distance(user_coords, second_coords).miles\n\n # Calculate distance from user to first waypoint\n first_coords = (data[0][\"lat\"], data[0][\"lon\"])\n user_first_dist = geopy.distance.distance(user_coords, first_coords).km\n\n # Calculate distance from first waypoint to second waypoint\n first_second_dist = geopy.distance.distance(first_coords, second_coords).miles\n\n # Determine if path correction is applicable\n if user_second_dist < first_second_dist or user_first_dist < 0.01:\n # Delete first element of list so that user doesn't backtrack\n return data[1:]\n else:\n # No path correction needed\n return data", "def _get_path_to_node(self, workflow, node, referrers):\n paths = []\n\n def _backtrack(_workflow, _node, path):\n if not _workflow or _workflow == self.primary['name']:\n paths.append([(None, _node)] + list(reversed(path)))\n return\n\n for (parent_workflow, parent_node) in referrers[_workflow]:\n if parent_workflow == self.primary['name']:\n # To avoid double-counting, we only take paths that\n # encode the primary workflow as None, rather than by\n # its name\n continue\n\n _backtrack(parent_workflow, parent_node,\n path + [(_workflow, _node)])\n\n _backtrack(workflow, node, [])\n\n if len(paths) > 1:\n raise Exception(\n 'Multiple paths found to node {}: cannot prune'.format(\n ('{}.{}'.format(workflow, node)) if workflow else node))\n\n return paths[0]", "def find_candidate_paths(G, Alice, Bob):\r\n min_path_length = nx.shortest_path_length(G, source=Alice, target=Bob, weight='weight')\r\n candidate_paths_gen = nx.all_simple_paths(G, source=Alice, target=Bob)\r\n\r\n candidate_paths = [tuple(candidate_paths)\r\n for candidate_paths in candidate_paths_gen]\r\n return candidate_paths, min_path_length", "def shorter_path(start, goal):\n if start == goal:\n return [start]\n explored = set() \n queue = [ [start] ] \n while queue:\n path = queue.pop(0)\n s = path[-1]\n for state, action in bj_subway[s].items():\n if state not in explored:\n explored.add(state)\n path2 = path + [action, state]\n if state == goal:\n\t\t\t\t\t# print path2\n\t\t\t\t\t# for x in queue:\n\t\t\t\t\t# print x\n\t\t\t\t\treturn path2\n else:\n queue.append(path2)\n return []", "def get_home_dir(self, username):\n return self.user_table[username]['home']", "def GuessHistoryPaths(self, username):\n client = data_store.REL_DB.ReadClientSnapshot(self.client_id)\n system = client.knowledge_base.os\n user_info = flow_utils.GetUserInfo(client.knowledge_base, username)\n\n if not user_info:\n self.Error(\"Could not find homedir for user {0}\".format(username))\n return\n\n paths = []\n if system == \"Windows\":\n path = (\"{app_data}\\\\{sw}\\\\User Data\\\\Default\\\\\")\n for sw_path in [\"Google\\\\Chrome\", \"Chromium\"]:\n paths.append(path.format(app_data=user_info.localappdata, sw=sw_path))\n elif system == \"Linux\":\n path = \"{homedir}/.config/{sw}/Default/\"\n for sw_path in [\"google-chrome\", \"chromium\"]:\n paths.append(path.format(homedir=user_info.homedir, sw=sw_path))\n elif system == \"Darwin\":\n path = \"{homedir}/Library/Application Support/{sw}/Default/\"\n for sw_path in [\"Google/Chrome\", \"Chromium\"]:\n paths.append(path.format(homedir=user_info.homedir, sw=sw_path))\n else:\n raise OSError(\"Invalid OS for Chrome History\")\n return paths", "def calculate_path(self):\n\n mid_states = []\n\n # Add in between states\n for i in range(Constants.NUMBER_LAPS):\n mid_states = mid_states + Constants.LAP_STATES\n\n # Concatenate beginning, middle and end states to obtain full path of states\n self.path_states = Constants.BEGINNING_STATES + mid_states + Constants.END_STATES\n\n # Determine the amount of times that the smallbot will drive forward during the path\n self.times_driven_forward = self.path_states.count('CREEP_FORWARD')\n\n print(\"Calculated path: \", self.path_states)", "def dir_user(assignment, user):\n return os.path.join(repository, assignment, user)", "def get_job_query(self):\n context = aq_inner(self.context)\n catalog = getToolByName(context, 'portal_catalog')\n mt = getToolByName(self, 'portal_membership') \n currentUser = mt.getAuthenticatedMember() \n \n if \"Site Administrators\" not in currentUser.getGroups():\n\treturn catalog.searchResults(portal_type= 'SeniorProject.PloneAddOn.job', \t\t\t\t Creator = currentUser.getUserName())\n else: \n return catalog.searchResults(portal_type= 'SeniorProject.PloneAddOn.job')", "def webdirprx(self, **kwargs):\n if 'workflow' not in kwargs or not kwargs['workflow']:\n raise InvalidParameter(\"Task name not found in the input parameters\")\n workflow = kwargs['workflow']\n self.logger.info(\"Getting proxied url for %s\", workflow)\n\n try:\n row = self.Task.ID_tuple(*next(self.api.query(None, None, self.Task.ID_sql, taskname=workflow)))\n except StopIteration:\n raise ExecutionError(\"Impossible to find task %s in the database.\" % kwargs[\"workflow\"])\n\n if row.user_webdir:\n #extract /cms1425/taskname from the user webdir\n suffix = re.search(r\"(/[^/]+/[^/]+/?)$\", row.user_webdir).group(0)\n else:\n raise ExecutionError(\"Webdir not set in the database. Cannot build proxied webdir\")\n\n #=============================================================================\n # scheddObj is a dictionary composed like this (see the value of htcondorSchedds):\n # \"htcondorSchedds\": {\n # \"[email protected]\": {\n # \"proxiedurl\": \"https://cmsweb.cern.ch/scheddmon/5\"\n # },\n # ...\n # }\n # so that they have a \"proxied URL\" to be used in case the schedd is\n # behind a firewall.\n #=============================================================================\n scheddsObj = self.centralcfg.centralconfig['backend-urls'].get('htcondorSchedds', {})\n self.logger.info(\"ScheddObj for task %s is: %s\\nSchedd used for submission %s\", workflow, scheddsObj, row.schedd)\n #be careful that htcondorSchedds could be a list (backward compatibility). We might want to remove this in the future\n if row.schedd in list(scheddsObj) and isinstance(scheddsObj, dict):\n self.logger.debug(\"Found schedd %s\", row.schedd)\n proxiedurlbase = scheddsObj[row.schedd].get('proxiedurl')\n self.logger.debug(\"Proxied url base is %s\", proxiedurlbase)\n if proxiedurlbase:\n yield proxiedurlbase + suffix\n else:\n self.logger.info(\"Could not determine proxied url for task %s\", workflow)", "def get_user_job_detail(user_id):\n\n return JobDetail.query.filter(JobCompletedApplication.user_id == user_id).join(JobCompletedApplication).order_by(JobCompletedApplication.application_date_submitted.desc()).all()", "def solve(self):\n return breadth_first_search(self) + [self.goal_url]", "def GuessHistoryPaths(self, username):\n client = data_store.REL_DB.ReadClientSnapshot(self.client_id)\n system = client.knowledge_base.os\n user_info = flow_utils.GetUserInfo(client.knowledge_base, username)\n\n if not user_info:\n self.Error(\"Could not find homedir for user {0}\".format(username))\n return\n\n paths = []\n if system == \"Windows\":\n path = \"{app_data}\\\\Mozilla\\\\Firefox\\\\Profiles/\"\n paths.append(path.format(app_data=user_info.appdata))\n elif system == \"Linux\":\n path = \"{homedir}/.mozilla/firefox/\"\n paths.append(path.format(homedir=user_info.homedir))\n elif system == \"Darwin\":\n path = (\"{homedir}/Library/Application Support/\" \"Firefox/Profiles/\")\n paths.append(path.format(homedir=user_info.homedir))\n else:\n raise OSError(\"Invalid OS for Chrome History\")\n return paths", "def get_success_url(self):\n is_same_user = self.get_object().userid == self.request.user\n return reverse(\"certhelper:shiftleader\") if not is_same_user else \"/\"", "def getPath(self):\r\n\t\treturn self.pathToGoal", "def reflow(self, user: User) -> Optional[Job]:\n subjobs = []\n for file in self.files.filter(\n current=True,\n upstreams__isnull=False,\n # Currently limited to convert jobs but in future there\n # may be other jobs that create a derived file\n # e.g. running a script that create files.\n job__method=JobMethod.convert.name,\n ).exclude(\n # Currently exclude index.html files because dealt with\n # in an explicit step in snapshot\n Q(path=\"index.html\")\n # Exclude .bib and image files which are created\n # as children of a parent file's generation\n # See https://github.com/stencila/hub/issues/1024#issuecomment-799128207\n | Q(path__endswith=\".bib\")\n | Q(path__endswith=\".png\")\n | Q(path__endswith=\".jpg\"),\n ):\n # Convert jobs only have one upstream\n upstream = file.upstreams.first()\n subjob = upstream.convert(user, file.path)\n subjobs.append(subjob)\n\n if len(subjobs) > 0:\n parallel = Job.objects.create(\n project=self,\n creator=user,\n method=JobMethod.parallel.name,\n description=\"Update derived files\",\n )\n parallel.children.set(subjobs)\n return parallel\n else:\n return None", "def get_home_dir(self, username):\n user = connection.User.find_one({'email': str(username) })\n return str(user['_id'])", "def pathDAG(graph, value, path, onePath):\n for node in graph:\n if node.value == value:\n for vertex in node.arrow:\n if vertex == None:\n path.append(onePath)\n break\n \n else:\n onePath.append(vertex.value)\n pathDAG(graph, vertex.value, path, onePath)\n onePath = [onePath[0]]\n \n return path", "def second_path(self):\n\t\treturn self.args[2]", "def reconstruct_path(goal: Vector2D, prev_node: dict) -> list:\n path = []\n prev = prev_node[goal] # remove 'goal' from path\n \n while prev != None:\n path.append(prev)\n prev = prev_node[prev]\n \n path = path[:-1] # remove 'start' from path\n path.reverse()\n return path", "def path(visited,node):\n solution_path = [node]\n while solution_path[-1][\"parent\"]:\n solution_path.append(visited[tuple(solution_path[-1][\"parent\"])])\n return solution_path", "def job_title(self, job):\n def _format_num(num):\n if isinstance(num, bool):\n return str(num)\n elif isinstance(num, Real):\n return str(round(num, 2))\n return str(num)\n\n try:\n s = []\n for keys in sorted(self._schema_variables()):\n v = job.statepoint()[keys[0]]\n try:\n for key in keys[1:]:\n v = v[key]\n except KeyError: # Particular key is present in overall\n continue # schema, but not this state point.\n else:\n s.append('{}={}'.format('.'.join(keys), _format_num(v)))\n return ' '.join(s)\n except Exception as error:\n logger.debug(\n \"Error while generating job title: '{}'. \"\n \"Returning job-id as fallback.\".format(error))\n return str(job)", "def getBestPath(self):\n if self._bestPathVertex.getNextWaypoint() is None:\n numWaypointsCompleted = len(self._waypoints)\n quality = 2\n if self._vertexQueue.isEmpty():\n quality += 1\n else:\n numWaypointsCompleted = self._bestPathVertex.getNextWaypoint().getIndex()\n quality = 1\n if self._vertexQueue.isEmpty():\n quality -= 1\n \n return outputPath.generatePath(self._bestPathVertex, self._params.waypointAcceptanceRadii, quality, numWaypointsCompleted)", "def metric_path_length(pathways):\r\n num_users = len(pathways)\r\n num_good_recommendations = 0\r\n sum_u_path_len = 0\r\n sum_r_path_len = 0\r\n career_goal_reached = 0\r\n for user, pathway_tuple in pathways.items():\r\n u_path = pathway_tuple[0]\r\n r_path = pathway_tuple[1]\r\n sum_u_path_len += len(u_path)\r\n sum_r_path_len += len(r_path)\r\n if r_path[-1]==u_path[-1]:\r\n career_goal_reached += 1\r\n if len(r_path) < len(u_path):\r\n num_good_recommendations += 1\r\n return 100.0 * career_goal_reached/num_users, 100.0 * num_good_recommendations / num_users, sum_u_path_len/num_users, sum_r_path_len/num_users", "def userPath2Path(arg, frame):\n arg = ingest(arg)\n if isabs(arg):\n return Path(arg)\n else:\n return Path(arg, frame)", "def _compute_next_task_for_user(user, project, language_pair):\n # Check if project is valid for the given user.\n if not project in user.project_set.all():\n LOGGER.debug('User {0} does not work on project {1}.'.format(\n user, project\n ))\n return None\n \n # Check if language_pair is valid for the given user.\n if not user.groups.filter(name=language_pair):\n LOGGER.debug('User {0} does not know language pair {1}.'.format(\n user, language_pair))\n return None\n\n # Check if there exists a current HIT for the given user.\n current_hitmap = UserHITMapping.objects.filter(user=user,\n project=project, hit__language_pair=language_pair)\n\n # If there is no current HIT to continue with, find a random HIT for the\n # given user. We keep generating a random block_id in [1, 1000] until we\n # find a matching HIT which the current user has not yet completed.\n if not current_hitmap:\n LOGGER.debug('No current HIT for user {0}, fetching HIT.'.format(\n user))\n \n # Compatible HIT instances need to match the given language pair!\n # Furthermore, they need to be active and not reserved for MTurk.\n hits = HIT.objects.filter(active=True, mturk_only=False,\n completed=False, project=project, language_pair=language_pair)\n \n LOGGER.debug(\"HITs = {0}\".format(hits))\n \n # Compute list of compatible block ids and randomise its order.\n #\n # cfedermann: for WMT14 Matt did not provide block ids anymore.\n # This meant that our shuffled list of block ids only contained\n # [-1, ..., -1] entries; using these to filter and check for\n # respective HIT status is a quadratic increase of redundant work\n # which will take prohibitively long when there is no next HIT.\n #\n # Converting to unique HIT ids will speed up things drastically.\n hit_ids = list(set(hits.values_list('hit_id', flat=True)))\n shuffle(hit_ids)\n LOGGER.debug(\"HIT IDs = {0}\".format(hit_ids))\n \n # Find the next HIT for the current user.\n random_hit = None\n for hit_id in hit_ids:\n for hit in hits.filter(hit_id=hit_id):\n hit_users = list(hit.users.all())\n \n # Check if this HIT is mapped to users. This code prevents\n # that more than MAX_USERS_PER_HIT users complete a HIT.\n for hitmap in UserHITMapping.objects.filter(hit=hit):\n if not hitmap.user in hit_users:\n hit_users.append(hitmap.user)\n \n if not user in hit_users:\n if len(hit_users) < MAX_USERS_PER_HIT:\n random_hit = hit\n break\n \n if random_hit:\n break\n \n # If we still haven't found a next HIT, there simply is none...\n if not random_hit:\n # TODO: We should now investigate if there is any HIT assigned\n # to a user but has not been finished in a certain time span.\n # Such a HIT can be freed and assigned to the current user. \n return None\n \n # Update User/HIT mappings s.t. the system knows about the next HIT.\n current_hitmap = UserHITMapping.objects.create(user=user,\n project=project, hit=random_hit)\n \n # Otherwise, select first match from QuerySet.\n else:\n current_hitmap = current_hitmap[0]\n \n # Sanity check preventing stale User/HIT mappings to screw up things.\n #\n # Before we checked if `len(hit_users) >= 3`.\n hit_users = list(current_hitmap.hit.users.all())\n if user in hit_users or len(hit_users) >= 1 \\\n or not current_hitmap.hit.active:\n LOGGER.debug('Detected stale User/HIT mapping {0}->{1}'.format(\n user, current_hitmap.hit))\n current_hitmap.delete()\n return _compute_next_task_for_user(user, project, language_pair)\n \n LOGGER.debug('User {0} currently working on HIT {1}'.format(user,\n current_hitmap.hit))\n \n return current_hitmap.hit", "def get_path(prevs, goal, start):\n path = OD({goal: 0})\n cur = goal\n while cur != start:\n (cost, node) = prevs.get(cur)\n if node == None or node in path:\n print(\"ERROR: No path found from %s -> %s\" % (start, goal))\n return (0, None)\n path[node] = path[cur] + cost\n cur = node\n return (path[start], path.keys()[::-1])", "def getAllSocialPaths(self, userID):\n visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n q = Queue()\n q.put([userID])\n\n while not q.empty():\n user_path = q.get()\n user = user_path[-1]\n\n if user not in visited.keys():\n visited[user] = user_path\n for friend in self.friendships[user]:\n new_path = user_path[::]\n new_path.append(friend)\n q.put(new_path)\n\n # get average degree of separation per user\n degrees = 0\n for key, item in visited.items():\n degrees += len(item)\n \n print('average degree of separation', degrees/len(self.users))\n\n return visited", "def find_trip_based_paths_process_worker(iteration, worker_num, input_network_dir, input_demand_dir,\n output_dir, todo_pathset_queue, done_queue, hyperpath, bump_wait_df, stop_times_df):\n worker_str = \"_worker%02d\" % worker_num\n\n from .FastTrips import FastTrips\n setupLogging(infoLogFilename = None,\n debugLogFilename = os.path.join(output_dir, FastTrips.DEBUG_LOG % worker_str), \n logToConsole = False,\n append = True if iteration > 1 else False)\n FastTripsLogger.info(\"Iteration %d Worker %2d starting\" % (iteration, worker_num))\n\n # the child process doesn't have these set to read them\n Assignment.read_configuration(override_input_network_dir=output_dir,\n override_input_demand_dir=input_demand_dir,\n config_file=Assignment.CONFIGURATION_OUTPUT_FILE)\n\n # this passes those read parameters and the stop times to the C++ extension\n Assignment.initialize_fasttrips_extension(worker_num, output_dir, stop_times_df)\n\n # the extension has it now, so we're done\n stop_times_df = None\n\n if iteration > 1:\n Assignment.set_fasttrips_bump_wait(bump_wait_df)\n\n while True:\n # go through my queue -- check if we're done\n todo = todo_pathset_queue.get()\n if todo == 'DONE':\n done_queue.put( (worker_num, 'DONE') )\n FastTripsLogger.debug(\"Received DONE from the todo_pathset_queue\")\n return\n\n # do the work\n pathset = todo\n\n FastTripsLogger.info(\"Processing person %20s path %d\" % (pathset.person_id, pathset.trip_list_id_num))\n # communicate it to the parent\n done_queue.put( (worker_num, \"STARTING\", pathset.person_id, pathset.trip_list_id_num ))\n\n trace_person = False\n if pathset.person_id in Assignment.TRACE_PERSON_IDS:\n FastTripsLogger.debug(\"Tracing assignment of person %s\" % pathset.person_id)\n trace_person = True\n\n try:\n (pathdict, perf_dict) = Assignment.find_trip_based_pathset(iteration, pathset, hyperpath, trace=trace_person)\n done_queue.put( (worker_num, \"COMPLETED\", pathset.trip_list_id_num, pathdict, perf_dict) )\n except:\n FastTripsLogger.exception(\"Exception\")\n # call it a day\n done_queue.put( (worker_num, \"EXCEPTION\", str(sys.exc_info()) ) )\n return", "def _setJob_getPath(job, shot=False):\n\tjobpath = j.getPath(job, translate=True)\n\n\tif shot:\n\t\tpath = os_wrapper.absolutePath(\"%s/$IC_SHOTSDIR/%s\" % (jobpath, shot))\n\telse:\n\t\tpath = os_wrapper.absolutePath(\"%s/$IC_SHOTSDIR\" % jobpath)\n\n\treturn path", "def job_priority_key(self, job):\n camp, user = job.camp, job.user\n end = camp.time_left / user.shares # lower value -> higher priority\n # The `end` should be further multiplied by\n # `_stats.active_shares` / `_stats.cpu_used`.\n # However, that gives the same value for all the jobs\n # and we only need the ordering, not the absolute value.\n return (end, camp.created, user.ID, camp.ID,\n job.submit, job.ID)", "def generate_path(goal_node, visited):\n goal_state = goal_node['state']\n path = [goal_state]\n while goal_node['parent']:\n path.append(goal_node['state'])\n goal_node = visited[goal_node['parent']]\n return path", "def best_path_simple(self, unlabeled_sequence):\n unlabeled_sequence = self._transform(unlabeled_sequence)\n return self._best_path_simple(unlabeled_sequence)", "def follow_way_succed(board, x, y, path):\n step = 0\n endX, endY = x, y\n for d in path:\n step += 1\n if d == 'U':\n endY -= 1\n elif d == 'D':\n endY += 1\n elif d == 'R':\n endX += 1\n else:\n endX -= 1\n if board[endY][endX] == 1:\n return False\n elif board[endY][endX] == 8:\n return path[:step]", "def shortJourney(Alist,s,d):\n \"\"\"Find shortest distances to s in weighted graph, G\"\"\"\n \n #Initialize dictionaries\n dinit = 10**6\n Edict = {} #Explored nodes\n Udict = {} #Unexplored nodes\n path = [[] for l in Alist]\n\n Alen = len(Alist) #length of Alist\n dinits = [dinit]*Alen #list of airport indexes\n Udict = dict(zip(list(range(Alen)),dinits)) #zip into dictionary\n Udict[s] = 0\n path[s] = [s]\n \n #Main search\n while len(Udict)>0:\n #Find node with min d in Udict and move to Edict\n dmin = dinit\n for n,w in Udict.items():\n if w<dmin:\n dmin=w\n nmin=n\n Edict[nmin] = Udict.pop(nmin)\n print(\"moved node\", nmin)\n\n #Update provisional distances for unexplored neighbors of nmin \n for item in Alist[nmin]: #nminth element is a list of two element tuples (node, weight)\n n = item[0] #first elt of tuple is node/neighbour\n w = item[1] #2nd elt is density/weigh\n #for n,w in etc_______________________-\n \n if n in Edict:\n pass\n elif n in Udict:\n #key difference below\n dcomp = (w+dmin) #take sum as you go along\n if dcomp<Udict[n]:\n print(Udict)\n Udict[n]=dcomp\n path[n] = path[nmin] + [n]\n print(path) \n if nmin == d: #if current node is destination\n return [path[d],Edict[d]]\n return [] #no path", "def compactuser(path):\n userPath = expanduser('~')\n otherPath = expanduser(path)\n \n prefix = commonprefix([userPath, otherPath])\n if prefix == userPath:\n return '~' + otherPath[len(prefix):]\n else:\n return otherPath", "async def async_step_user(\n self, user_input: dict[str, Any] | None = None\n ) -> FlowResult:\n if is_hassio(self.hass):\n return await self.async_step_on_supervisor()\n\n return await self.async_step_manual()", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n queue = util.PriorityQueue() # PrioritQueue for searshing the graph/ it expand the node with the lowest cost\n visited = [] # Keep track of visited nodes\n path = [] # Keep track of the path\n start =problem.getStartState() # The start node\n\n queue.push((start, path,0), 0) \n \n while not queue.isEmpty():\n (vrtx, path, costparent) = queue.pop() \n if vrtx not in visited: \n if problem.isGoalState(vrtx):\n return [p[1] for p in path]\n\n visited.append(vrtx) \n for successor in problem.getSuccessors(vrtx):\n cost = successor[2]+ costparent\n queue.push((successor[0], path+[successor],cost),cost)\n \n\n util.raiseNotDefined()", "def resolve_worker_evaluation_url(request, user):\n return request.build_absolute_uri(reverse('hirer:evaluate', args=[user.id]))", "def _build_reverse_url(self, name, args=None, kwargs=None): \n pk = kwargs.pop('pk')\n object = self.obj_get(pk=pk)\n kwargs['user__username'] = object.user.username\n return reverse(name, args=args, kwargs=kwargs)", "def reponames(gh, user):\n return [u.split('/')[-1] for u in urls(gh, user)]", "def solution_path(self) -> list[State]:", "async def async_step_user(self, user_input=None):\n\n self._servers = await discover(self.hass)\n\n # We discovered one or more roon - so skip to authentication\n if self._servers:\n return await self.async_step_link()\n\n return await self.async_step_fallback()", "def backtracking(goal):\n path = []\n current = goal\n while current.came_from:\n path.insert(0, current.move)\n current = current.came_from\n return ''.join(path)", "def topo_shortestpathij(self, i, j):\n pathlist = []\n self.pathij(i, j, pathlist)\n distance = []\n \n for i in range(len(pathlist)):\n distance.append(len(pathlist[i]) - 1)\n \n if(len(distance) == 0):\n return None\n else:\n return min(distance)", "def path(self): # Path taken to reach Goal\n node, path_back = self, []\n while node:\n path_back.append(node)\n node = node.parent\n return list(reversed(path_back))", "def resolve_project_prefix(self, path):\n\n for nodePath, node in self.cache.get_tree(self.userProjects).items():\n if type(node) is gitlab.v4.objects.Project and path.startswith(nodePath):\n remainingPath = pathlib.Path(path).relative_to(pathlib.Path(nodePath))\n return node, remainingPath\n\n return None, None", "def userlist_path(address):\n return path.join(conf.userlistdir, match_userlist(address))", "def one_way_path(most_important, total_distance, to_source2, to_source1):\n\n if total_distance == min(total_distance, to_source2[0], to_source1[0]):\n return most_important_to_source(most_important), total_distance\n elif to_source2[0] == min(total_distance, to_source2[0], to_source1[0]):\n return most_important_to_source(to_source2[1]), to_source2[0]\n else:\n return most_important_to_source(to_source1[1], up=False), to_source1[0]", "def comm_all_best_paths(self, peer):\n LOG.debug('Communicating current best path for all afi/safi except'\n ' 1/132')\n # We will enqueue best path from all global destination.\n for route_family, table in self._table_manager.iter:\n if route_family == RF_RTC_UC:\n continue\n if peer.is_mbgp_cap_valid(route_family):\n for dest in table.values():\n if dest.best_path:\n peer.communicate_path(dest.best_path)", "def path(most_important_up, most_important_down, total_distance, to_source2, to_source1):\n\n if total_distance == min(total_distance, to_source2[0], to_source1[0]):\n return source_to_source(most_important_up, most_important_down), total_distance\n elif to_source2[0] == min(total_distance, to_source2[0], to_source1[0]):\n return most_important_to_source(to_source2[1]), to_source2[0]\n else:\n return most_important_to_source(to_source1[1], up=False), to_source1[0]", "def findJob():\n candidate = Candidate()\n print('Enchanté, je suis BOBI, un agent DAVEO. Je souhaiterais savoir quelles sont les tâches que vous voulez '\n 'faire dans votre prochain métier. Pouvez vous me les lister ?')\n finish = False\n\n jobsPointsCount = dict()\n # This boolean is here to allow user to quit conversation by entering two empty lines in console.\n keyboardEntry = False\n while not finish:\n action = input()\n # If line is empty, verify if user want to exit conversation\n if action == \"\":\n if keyboardEntry:\n finish = True\n else:\n keyboardEntry = True\n # If line is too long, inform user (System is more efficient with simple phrases)\n elif len(action) > 140:\n keyboardEntry = False\n print('Je suis encore jeune et aie beaucoup à apprendre, pourriez vous écrire moins de 140 caractères '\n 's\\'il vous plait ?')\n else:\n keyboardEntry = False\n # Create triples from user phrase to match with rdf graph\n triples = createTripleSpacyLefff(action, \"\")\n for triple in triples:\n triple = formatTriple(triple)\n # Request rdf graph to check if triple match with known triple and get job name associate\n results, usedTriples = requestJobName(triple[1], triple[2], candidate.usedTriples)\n candidate.usedTriples.append(usedTriples)\n # For each job name found, increment score\n for result in results:\n if result in jobsPointsCount:\n jobsPointsCount[result] += 1\n else:\n jobsPointsCount[result] = 1\n topPoints = []\n while len(topPoints) < 3 and len(jobsPointsCount) > 0:\n bestScore = -1\n bestJob = \"\"\n for job in jobsPointsCount:\n if jobsPointsCount[job] > bestScore:\n bestJob = job\n bestScore = jobsPointsCount[job]\n del jobsPointsCount[bestJob]\n topPoints.append((bestJob, bestScore))\n\n print(topPoints)\n\n formCount = 0\n noMoreProposals = False\n while not noMoreProposals and len(topPoints) >= 2 and topPoints[1][1] >= (topPoints[0][1] * 0.5) and \\\n topPoints[0][1] < 3 and formCount < 3:\n proposals = []\n for index, job in enumerate(topPoints):\n action = requestActionFromJob(job[0], candidate.usedTriples)\n if action:\n proposals.append(str(index + 1) + \" : \" + action[1] + \" \" + action[2])\n if proposals:\n print(\"\\nQue préférez-vous entre les propositions suivantes ?\")\n for prop in proposals:\n print(prop)\n else:\n noMoreProposals = True\n selectedIndex = int(input()) - 1\n if len(topPoints) > selectedIndex >= 0:\n topPoints[selectedIndex] = (topPoints[selectedIndex][0], topPoints[selectedIndex][1] + 1)\n formCount += 1\n print(topPoints)\n return topPoints", "def test_make_pathways(self):\n basic_test_runner(self, 'pathways')", "def run_alt_path_extraction(job, context, inputGraphFileIDs, graph_names, index_name):\n \n assert(len(inputGraphFileIDs) == len(graph_names))\n \n if len(inputGraphFileIDs) > 1:\n # We have been given multiple chromosome graphs. \n \n RealtimeLogger.info(\"Breaking up alt path GAM computation for {}\".format(str(graph_names)))\n \n sub_jobs = []\n for i, (file_id, file_name) in enumerate(zip(inputGraphFileIDs, graph_names)):\n # For each input graph, make a child job to index it.\n sub_jobs.append(job.addChildJobFn(run_alt_path_extraction, context, [file_id], [file_name],\n index_name + '.{}'.format(i) if index_name else None,\n cores=context.config.chunk_cores,\n memory=context.config.chunk_mem,\n disk=context.config.chunk_disk))\n \n # Make a job to concatenate the indexes all together \n concat_job = sub_jobs[0].addFollowOnJobFn(run_concat_files, context, [job.rv() for job in sub_jobs],\n index_name + '_alts.gam' if index_name is not None else None,\n memory=context.config.chunk_mem,\n disk=context.config.chunk_disk)\n \n for i in range(1, len(sub_jobs)):\n # And make it wait for all of them\n sub_jobs[i].addFollowOn(concat_job)\n \n return concat_job.rv()\n \n else:\n # Base case: single graph\n \n start_time = timeit.default_timer()\n \n # Define work directory for docker calls\n work_dir = job.fileStore.getLocalTempDir()\n\n # Download the one graph\n graph_id = inputGraphFileIDs[0]\n graph_filename = graph_names[0]\n job.fileStore.readGlobalFile(graph_id, os.path.join(work_dir, graph_filename))\n\n # Where do we put the gam?\n gam_filename = os.path.join(work_dir, \"{}_alts.gam\".format(index_name if index_name is not None else \"part\"))\n\n cmd = ['vg', 'paths', '-v', graph_filename, '-Q', '_alt_', '-X']\n with open(gam_filename, 'wb') as gam_file:\n try:\n # Compute snarls to the correct file\n context.runner.call(job, cmd, work_dir=work_dir, outfile=gam_file)\n except:\n # Dump everything we need to replicate the indexing\n logging.error(\"Alt path gam extraction failed. Dumping files.\")\n context.write_output_file(job, os.path.join(work_dir, graph_filename))\n raise\n \n if index_name is not None:\n # Checkpoint index to output store\n gam_file_id = context.write_output_file(job, gam_filename)\n else:\n # Just save the index as an intermediate\n gam_file_id = context.write_intermediate_file(job, gam_filename)\n \n \n end_time = timeit.default_timer()\n run_time = end_time - start_time\n RealtimeLogger.info(\"Finished GAM extraction. Process took {} seconds.\".format(run_time))\n\n return gam_file_id", "def reconstruct_path(came_from, start, goal):\n current = goal\n path = [current]\n\n # Append configuartion to board as a step until the begin situation is reached\n while current != start:\n current = came_from[current][0]\n path.append(current)\n path.append(start)\n path.reverse()\n return [path[1:]]", "def get_building_by_user(self, user):\r\n\t\t\r\n\t\treturn self.transactions[user][1]", "def getAllSocialPaths(self, userID):\n visited = {}\n # use a queue\n q = []\n q.append([userID])\n # add userID as its own key and value to visited\n visited[userID] = [userID]\n\n while len(q) > 0:\n path = q.pop(0)\n curr_friend = path[-1]\n\n # for all the userID keys inside self.friendships\n for friend in self.friendships[curr_friend]:\n # add neighbor as a key, if not visited, in visited with an empty list as value\n if friend not in visited:\n visited[friend] = list()\n # break out of loop if already in visited\n else: \n continue\n \n # create a new list that holds the path from userID to friend\n friend_path = list(path)\n # add the friend onto the end of the list\n friend_path.append(friend)\n # also add path to the queue\n q.append(friend_path) \n # add path as the value to the friend\n visited[friend].extend(friend_path)\n \n return visited", "def _checking_path(self, node_name, first_name, path=0):\n if not self.successors[node_name]:\n return True\n for nd_in in self.successors[node_name]:\n if nd_in.name in self.max_paths[first_name].keys():\n # chose the maximum paths\n self.max_paths[first_name][nd_in.name] = max(\n self.max_paths[first_name][nd_in.name], path + 1\n )\n else:\n self.max_paths[first_name][nd_in.name] = path + 1\n self._checking_path(\n node_name=nd_in.name, first_name=first_name, path=path + 1\n )", "def generate_user_link(user):\n return '[@{0}](https://github.com/{0})'.format(user)", "def uniformCostSearch(problem):\n pq = util.PriorityQueue()\n startState = problem.getStartState()\n pq.push((\"\", None, startState), 0)\n # moves[state] returns (dir, parent) the direction and the parent from which this state was reached\n moves = {}\n # Current cost to reach this state\n stateCost = {startState: 0}\n goalState = None\n while not pq.isEmpty():\n (move, parent, currentState) = pq.pop()\n if currentState in moves: # already visited\n continue\n moves[currentState] = (move, parent)\n if problem.isGoalState(currentState): # Found goal state\n goalState = currentState\n break\n children = problem.getSuccessors(currentState)\n for successor, action, cost in children:\n if successor not in stateCost or stateCost[successor] > stateCost[currentState] + cost:\n pq.push((action, currentState, successor), stateCost[currentState] + cost)\n stateCost[successor] = stateCost[currentState] + cost\n\n currentState = goalState\n path = []\n while currentState != startState:\n (move, parent) = moves[currentState]\n path.append(move)\n currentState = parent\n path.reverse()\n return path", "def _get_job_name(path, beam):\n file_parts = os.path.splitext(os.path.basename(MADX_TEMPLATE))\n out_parts = [file_parts[0].replace(\"template\", \"job\"),\n \"b{:d}\".format(beam),\n file_parts[1].strip(\".\")]\n return os.path.join(path, \".\".join(out_parts))", "def get_user_job_type():\n email = helpers.get_user_email()\n privileged_user_emails = (db_config.get_value('privileged_users') or\n '').splitlines()\n for privileged_user_email in privileged_user_emails:\n if ';' in privileged_user_email:\n tokens = privileged_user_email.split(';')\n privileged_user_real_email = tokens[0]\n privileged_user_job_type = tokens[1]\n if utils.emails_equal(email, privileged_user_real_email):\n return privileged_user_job_type\n return None", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n e = Directions.EAST\n\n result = []\n qu = util.PriorityQueue()\n visited = set([])\n current = (problem.getStartState(), \"\", 0)\n qu.update(current, 0)\n costs = {}\n parents = {}\n parents[problem.getStartState()] = (problem.getStartState(), \"\")\n\n while not qu.isEmpty():\n cost, current= qu.pop()\n visited.add(current[0])\n\n if problem.isGoalState(current[0]):\n result = current[0]\n break\n\n for each in problem.getSuccessors(current[0]):\n if each[0] not in visited:\n qu.update(each, cost+each[2])\n if each[0] not in costs:\n costs[each[0]] = cost+each[2]\n parents[each[0]] = (current[0], each[1])\n elif costs[each[0]] > cost+each[2]:\n costs[each[0]] = cost + each[2]\n parents[each[0]] = (current[0], each[1])\n\n path = []\n while parents[result][0] != result:\n path.append(parents[result][1])\n result = parents[result][0]\n\n path.reverse()\n result = []\n for each in path:\n if each == \"South\":\n result.append(s)\n elif each == \"West\":\n result.append(w)\n elif each == \"North\":\n result.append(n)\n elif each == \"East\":\n result.append(e)\n\n return result\n util.raiseNotDefined()", "def create_user_path_assoc():\n if not check_content_type():\n return jsonify(status=CONTENT_TYPE_ERROR)\n reqdata = request.json\n if not check_token(reqdata[\"token\"]):\n return jsonify(status=TOKEN_ERROR)\n #TODO check that request body contain needed data\n #if [\"user_id\", \"path_id\", \"ready\", \"coordinate\", \"first_name\", \"second_name\", \"phone\"].sort() != (data.keys()).sort():\n # return jsonify(status=\"err\")\n user_id = reqdata[\"user_id\"]\n path_id = reqdata[\"path_id\"]\n ready = True\n coordinate = reqdata[\"coordinate\"]\n first_name = reqdata[\"first_name\"]\n second_name = reqdata[\"second_name\"]\n phone = reqdata[\"phone\"]\n #TODO data validation\n user = db.session.query(User).filter(User.id==user_id).scalar() is not None\n path = db.session.query(Path).filter(Path.id==path_id).scalar() is not None\n if user and path:\n db.session.add(UserPathAssociation(user_id=user_id, path_id=path_id, ready=ready,coordinate=coordinate, first_name=first_name, second_name=second_name,phone=phone))\n try:\n db.session.commit()\n return jsonify(status=OK_STATUS)\n except:\n db.session.rollback()\n return jsonify(status=DATABASE_INTEGRITY_ERROR)\n else:\n return jsonify(status=\"err\")", "def _get_upgrade_step_by_title(self, title):\n self.setup.setLastVersionForProfile(self.profile_id, self.from_)\n upgrades = self.setup.listUpgrades(self.profile_id)\n steps = [s for s in upgrades[0] if s['title'] == title]\n return steps[0] if steps else None", "def _get_upgrade_step_by_title(self, title):\n self.setup.setLastVersionForProfile(self.profile_id, self.from_)\n upgrades = self.setup.listUpgrades(self.profile_id)\n steps = [s for s in upgrades[0] if s['title'] == title]\n return steps[0] if steps else None", "def action(self):\n\n # assume the smart opponent can always choose the best step\n # Depth First Search\n steps = 2\n stack = [(self.game_in_head, (), 0)]\n maxmin = None\n good_paths = []\n\n while len(stack) > 0:\n parent_node, path, score = stack.pop(-1)\n if len(path) >= steps*2:\n \n # leaf node in the search tree\n if maxmin is None:\n maxmin = score\n good_paths.append(path)\n elif maxmin == score:\n good_paths.append(path)\n elif maxmin < score:\n maxmin = score\n good_paths.clear()\n good_paths.append(path)\n else:\n # root node, find its leaves\n children_nodes = self.one_step_infe(parent_node, path, score)\n stack += children_nodes\n\n path_dec = random.choice(good_paths) \n if self.colour == 'upper':\n return path_dec[0] \n elif self.colour == 'lower':\n return path_dec[1]", "def findShortestPath(self):\r\n pass", "def get_depth_first_task(app_id, user_id=None, user_ip=None, n_answers=30, offset=0):\r\n # Uncomment the next three lines to profile the sched function\r\n #import timeit\r\n #T = timeit.Timer(lambda: get_candidate_tasks(app_id, user_id,\r\n # user_ip, n_answers))\r\n #print \"First algorithm: %s\" % T.timeit(number=1)\r\n candidate_tasks = get_candidate_tasks(app_id, user_id, user_ip, n_answers, offset=offset)\r\n total_remaining = len(candidate_tasks)\r\n #print \"Available tasks %s \" % total_remaining\r\n if total_remaining == 0:\r\n return None\r\n if (offset == 0):\r\n return candidate_tasks[0]\r\n else:\r\n if (offset < len(candidate_tasks)):\r\n return candidate_tasks[offset]\r\n else:\r\n return None", "def _find_fastest_path(self):\n from simulator import Robot\n clone_robot = Robot(exploration_status=self._robot.exploration_status,\n facing=self._robot.facing,\n discovered_map=self._robot.discovered_map,\n real_map=[[0] * 15 for _ in range(20)])\n\n fastest_path_start_way_point = get_shortest_path_moves(clone_robot,\n start=(1, 1),\n goal=self._way_point)\n\n if fastest_path_start_way_point:\n for move in fastest_path_start_way_point:\n clone_robot.move_robot(move)\n\n before_way_point = previous_cell(clone_robot.center, clone_robot.facing)\n\n fastest_path_way_point_goal = get_shortest_path_moves(clone_robot,\n start=self._way_point,\n goal=(18, 13),\n before_start_point=before_way_point)\n\n return fastest_path_start_way_point + fastest_path_way_point_goal", "def get_all_social_paths(self, user_id):\n visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n # graphs=Graph()\n # for i in self.users:\n # graphs.add_vertex(i)\n \n # for i in self.users:\n # for x in self.friendships[i]:\n # graphs.add_edge(i,x)\n\n # for i in graphs.vertices:\n # if graphs.bfs(i,user_id):\n # visited[i]=graphs.bfs(i,user_id)\n queue=Queue()\n queue.enqueue([user_id])\n while queue.size()>0:\n path=queue.dequeue()\n current_user = path[-1]\n if current_user not in visited:\n visited[current_user]=path\n for ID in self.friendships[current_user]:\n new_path=list(path)\n new_path.append(ID)\n queue.enqueue(new_path)\n return visited", "def breadthFirstSearchPaths(problem):\n #import pdb;pdb.set_trace()\n frontier = util.Queue()\n start_node = problem.getStartState()\n if problem.isGoalState(start_node):\n return ['Stop']\n frontier.push((start_node,()))\n explored = set()\n while True:\n if frontier.isEmpty():\n return []\n node = frontier.pop()\n\n #explored.add(node[0])\n # exploratory code for SUPER-optimal solution:\n # by saving the path in explored, we assure that we explore the same cell even if\n # two different actions go through it:\n explored.add(node)\n for successor in problem.getSuccessors(node[0]):\n nextState, action, cost = successor\n if nextState in explored or nextState in [f[0] for f in frontier.list]:\n continue\n actions = node[1]\n next_actions = actions + (action,)\n new_node = (nextState, next_actions)\n if problem.isGoalState(new_node[0]):\n return new_node[1]\n frontier.push(new_node)\n #print frontier.list\n return []", "def get_current_user_tasks_route():\n user = current_user\n\n if user.get_id() is not None:\n return get_user_jobs_route(user.id)\n else:\n response_object = {'status': 'error'}\n return jsonify(response_object)", "def path(self):\n return pjoin(self._dj._jobsdir, self._status, self.full_name())", "def extract_path_from_walking_history(self, goal, path):\n if path[goal] is None:\n self.bfs_path.reverse()\n print (\"Path Size: \" + str(len(self.bfs_path)))\n return self.bfs_path\n self.bfs_path.append(path[goal][1])\n return self.extract_path_from_walking_history(path[goal][0], path)", "def get_home_directory(self, user: str) -> str:\n process = self.run(\n \"/\",\n \"root\",\n [\"sh\", \"-c\", f\"realpath ~{user}\"],\n encoding=\"utf-8\",\n stdout=subprocess.PIPE,\n )\n return process.stdout.strip()", "def get_history(user):\n if user in resteems and user in honours:\n return \"**\"+str(resteems[user])+\"** Resteems, **\"+str(honours[user])+\"** Honours\"\n elif user in resteems:\n return \"**\"+str(resteems[user])+\"** Resteems, **0** Honours\"\n elif user in honours:\n return \"**0** Resteems, **\"+str(honours[user])+\"** Honours\"\n else:\n return \"**0** Resteems, **0** Honours\"", "def shortest_path_search(start, successors, is_goal):\n if is_goal(start):\n return [start]\n explored = set()\n frontier = [ [start] ] \n while frontier:\n path = frontier.pop(0)\n s = path[-1]\n for (state, action) in successors(s).items():\n if state not in explored:\n explored.add(state)\n path2 = path + [action, state]\n if is_goal(state):\n return path2\n else:\n frontier.append(path2)\n return Fail", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import PriorityQueue\n\n pq = PriorityQueue()\n visited = []\n start = problem.getStartState()\n mapper = {}\n \n mapper[problem.getStartState()] = None\n pq.push(problem.getStartState(), 1)\n\n while (not pq.isEmpty()):\n point = pq.pop()\n if problem.isGoalState(point):\n current = point\n l = []\n while mapper[current] != None:\n tup = mapper[current]\n l.append(tup[1])\n current = tup[0]\n l.reverse()\n print l\n return l\n #util.raiseNotDefined()\n if not (point in visited):\n visited.append(point)\n succs = problem.getSuccessors(point)\n succs.reverse()\n for child in succs:\n if not (child[0] in mapper):\n pq.push(child[0], child[2]) #child has (xy, direction, weight)\n mapper[child[0]] = point, child[1]\n # util.raiseNotDefined()", "def path_cost(path):\n # path = [state, (action, total_cost), state, ... ]\n if len(path) < 2:\n return 0\n else:\n return path[-2][-1]", "def all_pairs_shortest_paths(self, n_jobs=1, engine='cython', verbose=0, *args, **kwargs):\n\t\tif self.verbose:\n\t\t\tprint('Calculating APSP - All Pairs Shortest Paths')\n\n\t\tfor path in self.local_paths:\n\t\t\tif path is None:\n\t\t\t\traise Exception(\"Shortest distances and local paths must be calculated first. Run `all_pairs_shortest_distances`.\")\n\n\t\tif engine == 'python':\n\t\t\tpoolresults = Parallel(n_jobs=n_jobs,verbose=verbose)(delayed(_py_single_source_complete_paths)(node, self.N, self.local_paths[node]) for node in self.N)\n\t\telif engine == 'cython':\n\t\t\t#\n\t\t\tpoolresults = range(len(self.N))\n\t\t\tfor node in self.N:\n\t\t\t\tpoolresults[node] = _cy_single_source_complete_paths(node, self.N, self.local_paths[node])\n\n\t\t# PoolResults returns a list, map into a dict of nodes\n\t\tself.shortest_paths = dict( zip( self.N , poolresults ) )\n\n\t\treturn self.shortest_paths" ]
[ "0.66621715", "0.64165413", "0.5436636", "0.5328678", "0.5032977", "0.49962577", "0.49273804", "0.488265", "0.46935585", "0.46905273", "0.46765473", "0.4644199", "0.46357313", "0.45190293", "0.45152992", "0.4504067", "0.44798177", "0.44772255", "0.4473935", "0.44730064", "0.4469855", "0.44681513", "0.44655383", "0.44641146", "0.4447365", "0.44448233", "0.44412494", "0.44308916", "0.44179028", "0.4414", "0.44121516", "0.44060656", "0.4402885", "0.43979636", "0.43963856", "0.43951946", "0.43878987", "0.4378589", "0.43727455", "0.43711752", "0.4364602", "0.43614453", "0.4359469", "0.43464455", "0.4344042", "0.43438944", "0.43395293", "0.43390968", "0.43336037", "0.43283153", "0.4324601", "0.4321761", "0.43167308", "0.43027005", "0.42842185", "0.4284032", "0.4283664", "0.42733607", "0.42648473", "0.4235987", "0.42335474", "0.42303073", "0.42278275", "0.42149752", "0.4214224", "0.42135668", "0.42099598", "0.4208787", "0.4205271", "0.4204941", "0.4202335", "0.4201987", "0.42017698", "0.41987005", "0.41933292", "0.41910467", "0.41900823", "0.418943", "0.4182926", "0.41748697", "0.41708037", "0.41662624", "0.41586423", "0.4156433", "0.4156433", "0.41462728", "0.41462082", "0.4138641", "0.41364598", "0.4132431", "0.41290063", "0.41286728", "0.4123909", "0.41224146", "0.41208005", "0.41192487", "0.41117695", "0.41095555", "0.4108159", "0.4103668" ]
0.71870023
0
Recommend a pathway, given the sequence of job titles.
def recommend_pathway(user_jobs, job_graph, goal_state, min_likelihood_thr): user_jobs_for_mdp = [user_jobs[0]] mdp = MDP(job_graph, user_jobs_for_mdp, goal_state, min_likelihood_thr=min_likelihood_thr) return mdp.solve_mdp()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recommendations(artist, title):\n rec = Recommendation(artist, title)\n if rec.find_track_info():\n rec.load_recommendations()\n return render_template('recommendations.html', title='Your Recommendations', rec=rec)\n flash('Whoops, we did not find the track \"{}\" by {}!'.format(\\\n title, artist), category='error')\n return render_template('whoops.html', title='Song Not Found')", "def solve_example(parser: ArgumentParser) -> None:\n parser.add_argument(\"--word\", type=str, help=\"Word representing the one relator\", required=True)", "def findJob():\n candidate = Candidate()\n print('Enchanté, je suis BOBI, un agent DAVEO. Je souhaiterais savoir quelles sont les tâches que vous voulez '\n 'faire dans votre prochain métier. Pouvez vous me les lister ?')\n finish = False\n\n jobsPointsCount = dict()\n # This boolean is here to allow user to quit conversation by entering two empty lines in console.\n keyboardEntry = False\n while not finish:\n action = input()\n # If line is empty, verify if user want to exit conversation\n if action == \"\":\n if keyboardEntry:\n finish = True\n else:\n keyboardEntry = True\n # If line is too long, inform user (System is more efficient with simple phrases)\n elif len(action) > 140:\n keyboardEntry = False\n print('Je suis encore jeune et aie beaucoup à apprendre, pourriez vous écrire moins de 140 caractères '\n 's\\'il vous plait ?')\n else:\n keyboardEntry = False\n # Create triples from user phrase to match with rdf graph\n triples = createTripleSpacyLefff(action, \"\")\n for triple in triples:\n triple = formatTriple(triple)\n # Request rdf graph to check if triple match with known triple and get job name associate\n results, usedTriples = requestJobName(triple[1], triple[2], candidate.usedTriples)\n candidate.usedTriples.append(usedTriples)\n # For each job name found, increment score\n for result in results:\n if result in jobsPointsCount:\n jobsPointsCount[result] += 1\n else:\n jobsPointsCount[result] = 1\n topPoints = []\n while len(topPoints) < 3 and len(jobsPointsCount) > 0:\n bestScore = -1\n bestJob = \"\"\n for job in jobsPointsCount:\n if jobsPointsCount[job] > bestScore:\n bestJob = job\n bestScore = jobsPointsCount[job]\n del jobsPointsCount[bestJob]\n topPoints.append((bestJob, bestScore))\n\n print(topPoints)\n\n formCount = 0\n noMoreProposals = False\n while not noMoreProposals and len(topPoints) >= 2 and topPoints[1][1] >= (topPoints[0][1] * 0.5) and \\\n topPoints[0][1] < 3 and formCount < 3:\n proposals = []\n for index, job in enumerate(topPoints):\n action = requestActionFromJob(job[0], candidate.usedTriples)\n if action:\n proposals.append(str(index + 1) + \" : \" + action[1] + \" \" + action[2])\n if proposals:\n print(\"\\nQue préférez-vous entre les propositions suivantes ?\")\n for prop in proposals:\n print(prop)\n else:\n noMoreProposals = True\n selectedIndex = int(input()) - 1\n if len(topPoints) > selectedIndex >= 0:\n topPoints[selectedIndex] = (topPoints[selectedIndex][0], topPoints[selectedIndex][1] + 1)\n formCount += 1\n print(topPoints)\n return topPoints", "def test_recommendations_single(self):\n parser = ParlaiParser(False, False)\n parser.add_argument(\n '-bs',\n '--batchsize',\n default=1,\n type=int,\n help='batch size for minibatch training schemes',\n recommended=1337,\n )\n parser.parse_args([])\n help_str = parser.format_help()\n assert 'recommended:' in help_str\n assert '1337' in help_str", "def test_job_title(self):\n inv_search = 'title:engineer not title:programmer'\n spi_search = 'find job engineer not position programmer'\n self._compare_searches(inv_search, spi_search)", "async def question(self, ctx, *, args=None):\n if not args:\n msg = f\"Type `;q <question text>` to make a question, or do `;help q`. For now, here's the questions list:\"\n await hf.safe_send(ctx, msg)\n await ctx.invoke(self.question_list)\n return\n args = args.split(' ')\n\n if len(args) == 2: # in case someone accidentally writes ;q 1 a instead of ;q a 1\n try:\n index = int(args[0])\n if args[1] != 'a':\n raise ValueError\n except ValueError:\n pass\n else:\n await ctx.invoke(self.answer, args=args[0])\n return\n\n try: # there is definitely some text in the arguments\n target_message = await ctx.channel.fetch_message(int(args[0])) # this will work if the first arg is an ID\n await ctx.message.add_reaction('⤴')\n if len(args) == 1:\n title = target_message.content # if there was no text after the ID\n else:\n title = ' '.join(args[1:]) # if there was some text after the ID\n except (discord.errors.NotFound, ValueError): # no ID cited in the args\n target_message = ctx.message # use the current message as the question link\n title = ' '.join(args) # turn all of args into the title\n\n await self.add_question(ctx, target_message, title)", "async def Suggestion(self, ctx, *, sug:str=None):\r\n\t\tif not sug:\t\r\n\t\t\treturn await ctx.send('No Suggestions given')\r\n\r\n\t\tif \tself.settings.BotConfig('SuggestionChannel') != 0:\r\n\t\t\tch = self.bot.get_channel(self.settings.BotConfig('SuggestionChannel'))\r\n\t\t\tif ctx.author.top_role.colour:\r\n\t\t\t\tcol = ctx.author.top_role.colour\r\n\t\t\telse:\r\n\t\t\t\tcol =self.settings.randomColor()\r\n\r\n\t\t\tembed=discord.Embed(title=\"Suggestion\", description=f\"{sug}\", color=col)\r\n\t\t\tembed.set_footer(text=f\"Server: {ctx.guild} || User: {ctx.author}\")\r\n\t\t\tawait ctx.send('I have sent Suggestion')\r\n\t\t\tawait ch.send(embed=embed)\r\n\t\telse:\r\n\t\t\tawait ctx.send('No Suggestion channel found')", "def handle(self, *args, **options):\n\n if options[\"organisms\"]:\n organism_names = options[\"organisms\"].split(\",\")\n organisms = Organism.objects.filter(name__in=organism_names)\n else:\n organisms = Organism.objects.all()\n\n for organism in organisms:\n dispatch_qn_job_if_eligible(organism)", "def find_paths(documents: List[str], question: str, candidate: str,\n style='wikihop') -> Optional[List]:\n sentlimit = 1\n nearest_only = False\n d = process_data(documents, question, candidate)\n\n doc_ners = d['docners']\n doc_postags = d['docpostags']\n doc_sents = d['docsents']\n\n qpos = d[\"qpos\"]\n qner = d[\"qner\"]\n qlemma = d['qlemma']\n rel = qlemma[0]\n entity = ' '.join(qlemma[1:]).lower()\n candidates = []\n orig_candidates = [d['candidate']]\n for ctoks in orig_candidates:\n sctoks = [stemmer.stem(ca) for ca in ctoks]\n if sctoks in candidates:\n candidates.append(ctoks)\n else:\n candidates.append(sctoks)\n candidates = [' '.join(cand) for cand in candidates]\n candpos = [d['cpos']]\n candner = [d['cner']]\n\n doc_sents_lemma = lemmatize_docsents(doc_sents, stem)\n\n if style.strip().lower() == \"wikihop\":\n pf = PathFinder(\"qid\", doc_sents_lemma,\n entity, rel,\n candidates,\n answer=None,\n sentlimit=sentlimit,\n nearest_only=nearest_only)\n else:\n pf = ObqaPathFinder(\"qid\", doc_sents_lemma,\n qlemma, qpos, qner,\n candidates, candpos, candner,\n answer=None, sentlimit=sentlimit,\n nearest_only=nearest_only)\n\n paths = pf.get_paths(doc_ners, doc_postags)\n if len(paths) == 0:\n print(\"No Paths Found !!\")\n return None\n # pathdict = {\"id\": \"qid\", \"pathlist\": paths[list(paths.keys())[0]]}\n return paths[list(paths.keys())[0]]", "def check_directed_scan_job(self, label: str, job: 'JobAdapter'):\n if job.job_status[1]['status'] == 'done':\n xyz = parser.parse_geometry(path=job.local_path_to_output_file)\n is_isomorphic = self.species_dict[label].check_xyz_isomorphism(xyz=xyz, verbose=False)\n for rotor_dict in self.species_dict[label].rotors_dict.values():\n if rotor_dict['pivots'] == job.pivots:\n key = tuple(f'{dihedral:.2f}' for dihedral in job.dihedrals)\n rotor_dict['directed_scan'][key] = {'energy': parser.parse_e_elect(\n path=job.local_path_to_output_file),\n 'xyz': xyz,\n 'is_isomorphic': is_isomorphic,\n 'trsh': job.ess_trsh_methods,\n }\n else:\n self.troubleshoot_ess(label=label,\n job=job,\n level_of_theory=self.scan_level)", "def google_suggest(self, callback, who, arg, store=True):\n\t\t\n sugs = self.get_xml('http://google.com/complete/search', {'output':'toolbar', 'q': arg})\n\n if sugs is not None:\n try:\n sugs = [x[0].get('data') for x in sugs]\n except Exception, e:\n print \"XML error with Google Suggest: %s\" % e\n\t\t\t\n suggestions = self.remove_lyrics(sugs)\n random_sug = choice(suggestions)\n\t\t\t\n # Same string as we started with - roll again\n if random_sug == arg:\n try:\n suggestions.pop(suggestions.index(random_sug))\n except:\n pass\n random_sug = choice(suggestions)\n\t\t\t\t\n if random_sug is not None:\n if store:\n self.store_suggestion(who, arg)\n random_sug.strip('')\n random_sug.strip('\\r')\n w = random_sug.split()\n if w[0].lower() in ('what', 'why', 'was', 'where', 'who', 'which', 'whom', 'when', 'how', 'is', 'are', 'did'):\n if '?' not in w[-1:]:\n random_sug = random_sug + '?'\n return random_sug", "def advancement(self, name_parts: ResourceIdentifier, display: Json = None, parent: str = None, criteria: Dict[str, Dict[str, Json]] = None, requirements: Sequence[Sequence[str]] = None, rewards: Dict[str, Json] = None):\n res = utils.resource_location(self.domain, name_parts)\n if requirements is None or requirements == 'or':\n requirements = [[k for k in criteria.keys()]]\n elif requirements == 'and':\n requirements = [[k] for k in criteria.keys()]\n self.write((*self.resource_dir, 'data', res.domain, 'advancements', res.path), {\n 'parent': parent,\n 'criteria': criteria,\n 'display': display,\n 'requirements': requirements,\n 'rewards': rewards\n })", "def waStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE FOR TASK 2 ***\"\n\n priorityFunc = lambda x: x[2] + 2*heuristic(x[0], problem)\n\n # initialize a priority queue\n open = util.PriorityQueue()\n closed = []\n consistencyCheck = []\n\n # Retrieve the init state\n init = (problem.getStartState(), ['Stop'], 0)\n open.push(init, priorityFunc(init))\n while not open.isEmpty():\n currNode = open.pop()\n currState = currNode[0]\n currPath = currNode[1]\n currPathCost = currNode[2]\n\n h0 = heuristic(currState, problem)\n\n if problem.isGoalState(currState):\n print(\"consistent? :\", consistencyCheck.count(True) == len(consistencyCheck))\n return currPath[1:]\n else:\n closed.append(currState)\n successors = problem.getSuccessors(currState)\n\n if len(successors) > 0:\n for each in successors:\n newState = each[0]\n newPathCost = currPathCost + each[2]\n h1 = heuristic(newState, problem)\n consistencyCheck.append(h0-h1 <= 1)\n if newState not in closed:\n temp = (each[0], currPath + [each[1]], newPathCost)\n open.update(temp, priorityFunc(temp))\n\n return False", "def pickupSearch(self):\n self.__searchJob = self.loadSavedHyperSearchJob(\n permWorkDir=self._options[\"permWorkDir\"],\n outputLabel=self._options[\"outputLabel\"])\n\n\n self.monitorSearchJob()", "def approve_lyrics():\n pass", "def _label_path_args_required(path, endpoint_params, indexed_parameters):\n # Find and clean all path args.\n path_split = path.split(\"/\")\n path_args = []\n for path_element in path_split:\n if path_element.startswith(\"{\"):\n path_args.append(path_element[1: -1])\n\n positional = endpoint_params['positional']\n for i in range(max(len(list(path_args)), len(list(positional)))):\n # If this path arg for this base route hasn't been seen, add it to possible args.\n if i >= len(positional):\n argument = path_args[i]\n argrequired = indexed_parameters[argument]['req']\n arg_params = indexed_parameters[argument]\n positional.append({\n 'argument': argument,\n 'required': argrequired and not endpoint_params['seen'],\n 'required_for': [], # see explanation below\n 'description': arg_params.get('description', \"\"),\n 'type': arg_params.get('type', None),\n 'format': arg_params.get('format', None),\n 'pattern': arg_params.get('pattern', None)\n })\n # If there are more args that weren't seen on this base route, mark then not\n # required because they're not used in every endpoint.\n elif i >= len(path_args):\n positional[i]['required'] = False\n\n # If the arg is required on this endpoint, add this endpoint to the list of\n # endpoints for which this arg is needed.\n if i < len(path_args) and indexed_parameters[path_args[i]]['req']:\n positional[i]['required_for'].append(path)", "def execute_task(self, *args):\n item, key = args\n from flankers.textsemantics import TextSemantics\n if not (item.title == '' and item.abstract == ''):\n # if item is not a media or a link from Twitter\n # it is or a feed or a tweet\n text = item.abstract if len(item.abstract) != 0 else item.title\n text = text[:1799] if len(text) >= 1800 else text\n if Indexer.query().filter(Indexer.webres == key).count() == 0:\n semantics = TextSemantics(text)\n labels = semantics.find_related_concepts()\n for l in labels:\n index = Indexer(keyword=l.strip(), webres=key)\n index.put()\n print \"indexing stored: \" + item.url + \">\" + l\n else:\n raise Exception(\"storeIndexer(): Resource already indexed\")", "def question_new_search():", "def astar_search(problem, h=None, display=False):\n\n\th = memoize(h or problem.h, 'h')\n\treturn best_first_graph_search(problem, lambda n: n.path_cost + h(n), display)", "def query_get_song_recommendation(songtitle, artist, root):\n for child in root:\n if (song_information.get_songtitle(child) == songtitle\n and song_information.get_artist(child) == artist):\n song = child\n else:\n answer = (\"Sorry, '\" + songtitle + \"' by \" + artist\n + \"could not be found in this corpus\")\n similar_songs = find_similar_songs(song, root)\n if len(similar_songs) > 0:\n answer = (\"If you like '\" + songtitle + \"' by \" + artist\n + \", you might like \" + \", \".join(similar_songs))\n else:\n answer = (\"Sorry, there is no similar song to '\" + songtitle + \"' by \"\n + artist + \" in this corpus\")\n return answer", "def taketurn(self):\n # get my options from the game\n opts = self.game.options()\n rec_opt = self._primestrat.recommend(opts, self.board)\n if rec_opt is not None:\n self.implementstrategy(rec_opt)\n else:\n super().taketurn()", "def search(self):\n if self.pruning == 'mpp':\n while not self.frontier.empty():\n path = self.frontier.pop()\n if path.end() not in self.explored:\n self.display(2, \"Expanding:\", path,\n \"(cost:\", path.cost, \")\")\n self.explored.add(path.end())\n self.num_expanded += 1\n if self.problem.is_goal(path.end()):\n self.display(1, self.num_expanded, \"paths have been expanded and\",\n len(self.frontier.frontierpq), \"paths remain in the frontier\")\n return path\n else:\n neighs = self.problem.neighbors(path.end())\n for arc in neighs:\n self.add_to_frontier(Path(path, arc))\n self.display(3, \"Frontier:\", self.frontier)\n elif self.pruning == 'cycle':\n while not self.frontier.empty():\n path = self.frontier.pop()\n if path.end() not in path.initial_nodes(): # new part for cycle pruning\n self.display(2, \"Expanding:\", path,\n \"(cost:\", path.cost, \")\")\n self.num_expanded += 1\n if self.problem.is_goal(path.end()):\n self.display(1, self.num_expanded, \"paths have been expanded and\",\n len(self.frontier.frontierpq), \"paths remain in the frontier\")\n return path\n else:\n neighs = self.problem.neighbors(path.end())\n for arc in neighs:\n self.add_to_frontier(Path(path, arc))\n self.display(3, \"Frontier:\", self.frontier)\n\n else: # no pruning\n while not self.frontier.empty() and self.num_expanded < self.max_expanded:\n path = self.frontier.pop()\n self.display(2, \"Expanding:\", path, \"(cost:\", path.cost, \")\")\n self.num_expanded += 1\n if self.problem.is_goal(path.end()):\n self.display(1, self.num_expanded, \"paths have been expanded and\",\n len(self.frontier.frontierpq), \"paths remain in the frontier\")\n return path\n else:\n neighs = self.problem.neighbors(path.end())\n for arc in neighs:\n self.add_to_frontier(Path(path, arc))\n self.display(3, \"Frontier:\", self.frontier)\n\n self.display(1, \"Total of\", self.frontier.frontier_index,\n \"paths expanded.\")", "def search_for_plans(start, exits, pig_neighbours, moves, state, actions):\n goals = exits + pig_neighbours\n paths, _ = GamePlanner.astar_multi_search(start=start,\n goals=goals,\n state=state,\n actions=actions)\n plans = GamePlanner.paths_to_plans(paths=paths,\n exits=exits,\n pig_neighbours=pig_neighbours,\n moves=moves)\n return plans", "def get_suggestion(artist_name):\n return 'do some magic!'", "def boldlyGo(self, edges):\n\t\t\n\t\t# gets list of edges\n\t\t# runs through and calculates straighline lengths for all of them\n\t\t\n\t\t# chooses the one with the least cost - probably just straightline distance\n\t\t\t#in the future, we could run Astar on all of them and choose the one with best path\n\t\t\t# or have a history which picks the biggest one eventually\n\t\t# sends that as a goal to astar, lets robot move there and report it is done the move", "def search(self):\n while not self.empty_frontier():\n path = self.frontier.pop()\n self.display(2, \"Expanding:\",path,\"(cost:\",path.cost,\")\")\n self.num_expanded += 1\n if self.problem.is_goal(path.end()): # solution found\n self.display(1, self.num_expanded, \"paths have been expanded and\",\n len(self.frontier), \"paths remain in the frontier\")\n self.solution = path # store the solution found\n return path\n else:\n neighs = self.problem.neighbors(path.end())\n self.display(3,\"Neighbors are\", neighs)\n for arc in reversed(list(neighs)):\n self.add_to_frontier(Path(path,arc))\n self.display(3,\"Frontier:\",self.frontier)\n self.display(1,\"No (more) solutions. Total of\",\n self.num_expanded,\"paths expanded.\")", "async def activity(self, ctx, activity:str):\n if activity.lower() not in recommendations.keys():\n await ctx.send(f\"Sorry, I don't have a playlist for that! Maybe you could try `?activity {random.choice(list(recommendations.keys()))}`.\")\n else:\n\n playlists = ''\n \n for i in range(len(recommendations[activity.lower()])): \n playlists += f'\\n[Playlist {i+1}]({recommendations[activity.lower()][i]})'\n\n embed = discord.Embed(title=f\"A Sound Mood's Recommendations for {activity.lower()}\",\n description=playlists,\n color=random.randint(0, 0xFFFFFF))\n\n embed.set_footer(text=f\"Requested by @{ctx.message.author}\", icon_url=ctx.message.author.avatar_url)\n\n await ctx.send('', embed=embed)", "def executePathSim(env,robot,waypts):\n\n traj = RaveCreateTrajectory(env,'')\n traj.Init(robot.GetActiveConfigurationSpecification())\n for i in range(len(waypts)):\n traj.Insert(i, np.append(waypts[i], np.zeros(3)))\n robot.ExecutePath(traj)", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def test_make_pathways(self):\n basic_test_runner(self, 'pathways')", "def recommend_a_command(self):\n if not self._disable_aladdin_service():\n self._set_aladdin_recommendations()\n\n recommend_command = ''\n if self.help_examples and self.aladdin_recommendations:\n # all the recommended commands from help examples and aladdin\n all_commands = self.help_examples + self.aladdin_recommendations\n all_commands.sort(key=len)\n\n filtered_commands = []\n filtered_choices = []\n target = ''.join(self.parameters)\n example_command_name = self.help_examples[0].split(' -')[0]\n\n for command in all_commands:\n # keep only the commands which begin with a same command name with examples\n if command.startswith(example_command_name):\n parameters = self._get_parameter_list(command)\n normalized_parameters = self._normalize_parameters(parameters)\n filtered_choices.append(''.join(normalized_parameters))\n filtered_commands.append(command)\n\n # sort the commands by argument matches\n candidates = difflib.get_close_matches(target, filtered_choices, cutoff=0)\n\n if candidates:\n index = filtered_choices.index(candidates[0])\n recommend_command = filtered_commands[index]\n\n # fallback to use the first recommended command from Aladdin\n elif self.aladdin_recommendations:\n recommend_command = self.aladdin_recommendations[0]\n\n # set the recommened command into Telemetry\n self._set_recommended_command_to_telemetry(recommend_command)\n\n return recommend_command", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n\n start = problem.getStartState()\n frontier = util.PriorityQueue() # in heap stored as ( cost,priority,location)\n frontier.push(start, 0)\n explored = []\n\n location = 0 # to remember which successor part im accessing\n action = 1\n heap_location = 2\n cost = 2\n\n history = []\n total_cost = 0 # need something to process total path cost\n\n while not frontier.isEmpty():\n\n current_position = frontier.pop()\n if problem.isGoalState(current_position):\n break\n if current_position not in explored:\n explored.append(current_position)\n else:\n continue\n\n for path in problem.getSuccessors(current_position):\n # if path[location] not in explored: # hasn't been expanded from\n if path[location] not in [item[heap_location] for item in frontier.heap]: # if not in frontier\n # print(\"valid successor (no frontier)\", each_successor[location])\n\n for entry in history:\n if entry['To'] == current_position:\n total_cost = entry['Cost']\n heuristic_cost = total_cost + heuristic(path[location], problem)\n frontier.push(path[location], path[cost] + total_cost + heuristic_cost)\n history.append({'From': current_position, 'To': path[location], 'By': path[action],\n 'Cost': total_cost + path[cost]})\n else:\n # print(\"in frontier\")\n for entry in history:\n if entry['To'] == current_position:\n total_cost = entry['Cost']\n frontier.update(path[location], total_cost + path[cost])\n # should prob add something that goes through history and wipes old entry for that point\n for entry in history:\n if entry['To'] == path[location] and entry['Cost'] > total_cost + path[cost]:\n history.remove(entry)\n history.append({'From': current_position, 'To': path[location], 'By': path[action],\n 'Cost': total_cost + path[cost]})\n break\n while not problem.isGoalState(history[-1]['To']): # loop removes last couple of movements which don't lead to goal\n history.remove(history[-1])\n\n x = len(history)\n while x - 1 != 0: # loop clears out actions that dont come from previous position\n if history[x - 1]['From'] != history[x - 2]['To']: # starts from goal and works backwards\n history.remove(history[x - 2])\n x = len(history)\n else:\n x -= 1\n\n return [path['By'] for path in history]", "def perform_re_qualify(responder, options):\n match = options['<match-id>']\n tla = options['<tla>']\n scores.re_qualify(match, tla)\n responder('Re-qualified {0} in match {1}'.format(tla, match))", "def route_accepted(self, prefix, next_hop, as_path):", "def suggest(suggestions):\n weight_sum = sum(suggestions.values())\n prob_ranges = []\n lower_bound = 0.0\n\n # generate probability ranges\n for task, weight in suggestions.iteritems():\n upper_bound = lower_bound + weight / weight_sum\n prob_ranges.append((task, (lower_bound, upper_bound)))\n\n # update lower bound\n lower_bound = upper_bound\n\n rand_number = random.random()\n\n for task, (low, high) in prob_ranges:\n if low <= rand_number < high:\n return task\n\n raise AssertionError('Should not be here. O_O');", "def suggestion(self):\n raise NotImplementedError()", "def optimal_route(graph,homes,source):\n number_of_homes = len(homes)\n all_pairs_distances = dict(nx.shortest_path_length(graph, weight = 'weight'))\n all_pairs_shortest_paths = dict(nx.shortest_path(graph, weight = 'weight'))\n homes_subgraph = tsp_routines.complete_shortest_path_subgraph_efficient(graph,homes,all_pairs_distances)\n num_clusters_to_clustering = clustering_routines.all_k_clusters(homes_subgraph,number_of_homes)\n \n cluster_list = range(1,number_of_homes+1)\n optimal_cost = np.Inf\n optimal_dropoffs = dict()\n optimal_route = []\n optimal_num_clusters = 0\n\n\n for num_clusters in cluster_list:\n home_clusters = num_clusters_to_clustering[num_clusters]\n cost, dropoffs, route = solver(graph,homes,source,home_clusters,all_pairs_distances,all_pairs_shortest_paths)\n if cost < optimal_cost:\n optimal_cost = cost\n optimal_route = route \n optimal_dropoffs = dropoffs\n optimal_num_clusters = num_clusters\n\n return optimal_cost, optimal_dropoffs, optimal_route, optimal_num_clusters", "def askSentence(self, sentence, goals, context, theta, cache, renamer, askOne, results, alreadyAsking):\n varRenamedSentence = VariableRenamer().rename(sentence)\n if not self.fixedAnswerCache.contains(varRenamedSentence) and not cache.contains(varRenamedSentence):\n # Prevent infinite loops on certain recursive queries.\n if alreadyAsking.contains(sentence):\n return False\n alreadyAsking.add(sentence)\n candidates.addAll(self.knowledgeBase.fetch(sentence))\n candidates.addAll(context.fetch(sentence))\n for rule in candidates:\n if thetaPrime != None:\n while i < r.arity():\n sentenceGoals.add(r.get(i))\n i += 1\n isConstant &= self.ask(sentenceGoals, context, theta.compose(thetaPrime), cache, renamer, False, sentenceResults, alreadyAsking)\n if isConstant:\n self.fixedAnswerCache.put(sentence, varRenamedSentence, sentenceResults)\n else:\n cache.put(sentence, varRenamedSentence, sentenceResults)\n alreadyAsking.remove(sentence)\n cachedResults = self.fixedAnswerCache.get(sentence, varRenamedSentence)\n isConstant = (cachedResults != None)\n if cachedResults == None:\n cachedResults = cache.get(sentence, varRenamedSentence)\n for thetaPrime in cachedResults:\n isConstant &= self.ask(goals, context, theta.compose(thetaPrime), cache, renamer, askOne, results, alreadyAsking)\n if askOne and (len(results) > 0):\n break\n return isConstant", "def shortestPath(haystack, needle):\n if needle == haystack:\n return 0\n hyponyms = haystack.hyponyms()\n if len(hyponyms) > 0:\n return 1 + min([shortestPath(hypo, needle) for hypo in hyponyms])\n else:\n return 1000", "def path_search(start, goal):\n if start == goal:\n return [start]\n explored = {}\n explored[start] = 2\n queue = [ [start, ('', 0)] ]\n bestPath = [start, ('', 1110)]\n bestPathList = []\n total = 0\n costSearchingNow = 0\n while queue:\n total += 1\n # if total>40000:\n # return -1,' fail'\n if queue[0][-1][-1] != costSearchingNow:\n \tqueue.sort(key=lambda path:path[-1][-1])\n \n path = queue.pop(0)\n costSearchingNow = path[-1][-1]\n s = path[-2]\n # print len(queue)\n # cout(path)\n # print queue\n\n if s == goal:\n bestPath = path\n # print 'Find one best path ↑'\n bestPathList.append(bestPath)\n if len(queue)==0:\n # print '~~~~',total,getString \n return total,getString(bestPathList,start,goal)\n else:\n if path[-1][-1] > bestPath[-1][-1]:\n return total,getString(bestPathList,start,goal)\n\n linenum, changetimes = path[-1]\n \n for state, actions in sh_subway[s].items():\n for action in actions:\n linechange = changetimes + 1\n if linenum != action:\n linechange += changePunishment\n path2 = path[:-1] + [action, state, (action, linechange)]\n\n if (path2[-1][-1]-len(path2)/2-1)/changePunishment <= 4:\n if len(path2)>6:\n if (path2[-2] == '上海赛车场' and path2[-4]=='嘉定新城' and path2[-6]=='马陆') or (path2[-6] == '上海赛车场' and path2[-4]=='嘉定新城' and path2[-2]=='马陆') or (path2[-2] == '龙柏新村' and path2[-4]=='龙溪路' and path2[-6]=='水城路') or (path2[-6] == '龙柏新村' and path2[-4]=='龙溪路' and path2[-2]=='水城路'):\n linechange -= changePunishment\n path2 = path[:-1] + [action, state, (action, linechange)]\n\n if path2.count(state)<=1:\n if state not in explored:\n explored[state] = linechange\n queue.append(path2)\n \n elif linechange <= explored[state]+changePunishment: # 考虑马上到终点\n \n explored[state] = linechange\n queue.append(path2)\n\n\n return total,getString(bestPathList,start,goal)", "def required_slots(tracker):\n # type: () -> List[Text]\n\n return [\"suggestion\"]", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n print(\"\\t===========================================\")\n print(\"\\t Processing ... Please Wait for 11 seconds!\")\n print(\"\\t===========================================\")\n startState = problem.getStartState();\n fringe = util.PriorityQueue()\n costs = 0 \n visitedNodes = []\n actions = [] \n if ( problem.isGoalState(startState) == True):\n return actions\n else:\n newFringeItem = (startState , actions , costs)\n fringe.push(newFringeItem,costs)\n while(fringe.isEmpty() == False ):\n #f(x) = h(x) + g(x)\n currentState , actions , costs = fringe.pop()\n if ( problem.isGoalState(currentState) == True):\n #print(\"Final Actions : \" + str(actions)) \n \"\"\"\n If you want the Analyzer Class analizes the chosen path and heuristic , \n Uncomment these two lines of code otherwise leave it be commented cause it increases the run time by 2 seconds.\n \"\"\"\n \"\"\"Start : Analyzer Properties \"\"\"\n #analyzer = Analyzer(problem,actions)\n #analyzer.start()\n \"\"\"End : Analyzer Properties \"\"\"\n return actions\n else:\n if(not currentState in visitedNodes ):\n visitedNodes.append(currentState)\n currentNodeSuccessors = problem.getSuccessors(currentState)\n for node in currentNodeSuccessors :\n state , action , stateCost = node\n heuristicAmount = heuristic(state , problem)\n newFringeItem = state , actions + [action] , costs + stateCost\n priority = costs + heuristicAmount\n fringe.push( newFringeItem , priority )\n \n util.raiseNotDefined()", "def _suggest(self, trial_id: int) -> Optional[TrialSuggestion]:\n raise NotImplementedError", "def run_q_agent(policy='ε–greedy', save=False):\n\tagent = Q_Agent()\n\tall_iterations, all_rewards, step_count = agent.train(env, iter_n=1000, policy=policy)\n\tplot_reward(all_iterations, all_rewards)\n\tplot_steps(all_iterations, step_count)\n\t# print(\"best route is {}\".format(agent.test(env)))\n\t# if save:\n\t# \tsave_results(all_iterations, all_rewards, step_count)\n\t# optimum_route = agent.test(env, stations)\n\t# print(optimum_route)", "def run(self):\n\t\tself.endpoint = self.from_reference.coord\t# the endpoint of every job is the thing ordering this job\n\n\t\tif to_do == \"carry\":\t# carry sth. from A to B\n\t\t\tpass\n\t\telif to_do == \"grade\":\t# grade an area for a building\n\t\t\tunit = self.searchUnit(self.endpoint, \"grader\")\n\t\t\t\n\t\telif to_do == \"build\":\t# build a building\n\t\t\tunit = self.searchUnit(self.endpoint, \"builder\")\n\t\t#TODO: if no settler fits to the building to be seized, one settler has to learn the job\n\t\telif to_do == \"learn a job\":\t# learn a job like lumberjack, butcher ... also for the soldier training\n\t\t\tpass\n\t\telif to_do == \"seize a building\":\t# civil and also military buildings\n\t\t\tpass", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***\"\n\n priorityFunc = lambda x: x[2] + 1*heuristic(x[0], problem)\n\n # initialize a priority queue\n open = util.PriorityQueue()\n closed = []\n\n # Retrieve the init state\n init = (problem.getStartState(), ['Stop'], 0)\n open.push(init, priorityFunc(init))\n while not open.isEmpty():\n currNode = open.pop()\n currState = currNode[0]\n currPath = currNode[1]\n currPathCost = currNode[2]\n if problem.isGoalState(currState):\n return currPath[1:]\n else:\n closed.append(currState)\n successors = problem.getSuccessors(currState)\n\n if len(successors) > 0:\n for each in successors:\n newPos = each[0]\n newPathCost = currPathCost + each[2]\n\n if newPos not in closed:\n temp = (each[0], currPath + [each[1]], newPathCost)\n open.update(temp, priorityFunc(temp))\n\n return False", "def ask_0(self, goals, context, theta, cache, renamer, askOne, results, alreadyAsking):\n if len(goals) == 0:\n results.add(theta)\n return True\n else:\n if isinstance(qPrime, (GdlDistinct, )):\n isConstant = askDistinct(distinct, goals, context, theta, cache, renamer, askOne, results, alreadyAsking)\n elif isinstance(qPrime, (GdlNot, )):\n isConstant = askNot(not_, goals, context, theta, cache, renamer, askOne, results, alreadyAsking)\n elif isinstance(qPrime, (GdlOr, )):\n isConstant = askOr(or_, goals, context, theta, cache, renamer, askOne, results, alreadyAsking)\n else:\n isConstant = askSentence(sentence, goals, context, theta, cache, renamer, askOne, results, alreadyAsking)\n goals.addFirst(literal)\n return isConstant", "def airbnb_commands(synthetic=True, model_search=False, folder='exp_rerr_airbnb',\n evaluation_method=EvaluationMethod.RELATIVE_ERROR):\n\n experiment_commands = []\n\n eval_setups, synthetic_conf = determine_setups(evaluation_method, synthetic)\n\n tables = [('listings', ['listings'], completion_command),\n ('hosts', ['hosts'], completion_command)]\n\n attributes = {\n 'listings': [\n ('room_type_1',\n '--removal_method categorical_prob_bias --removal_attr listings.room_type --removal_attr_values 1'),\n ('property_type_3',\n '--removal_method categorical_prob_bias --removal_attr listings.property_type --removal_attr_values 3'),\n ('price', '--removal_method bias --removal_attr listings.price')\n ],\n 'hosts': [\n ('host_since', '--removal_method bias --removal_attr hosts.host_since'),\n ('host_response_rate', '--removal_method bias --removal_attr hosts.host_response_rate')\n ],\n }\n\n if model_search:\n model_selection_strategies, models, setups = model_search_setup(tables)\n else:\n models = [('ar_ep30', 'ar_ep30'), ('ar_ep20', 'ar_ep20'), ('ssar_ep30', 'ssar_ep30'),\n ('ssar_ep20_1st', 'ssar_ep20_1st')]\n model_selection_strategies = [('', 'none')]\n setups = {'listings': [('fp_h', '--fixed_completion_path neighborhoods listings'),\n ('fp_n', '--fixed_completion_path hosts listings')],\n 'hosts': [('', '')]\n }\n\n for removal_table, completion_tables, path_function in tables:\n experiment_commands += path_function(attributes[removal_table], setups[removal_table], synthetic_conf,\n removal_table,\n completion_tables, model_selection_strategies, models, folder, eval_setups)\n\n experiment_commands += airbnb_join_aqp_commands(synthetic_conf, evaluation_method, attributes, eval_setups,\n model_selection_strategies, models, setups, folder)\n\n return experiment_commands", "def original_solver(state, agent, verbose=3):\n tasks = state.goals[agent]\n if verbose>0: print('** pyhop, verbose={}: **\\n state = {}\\n tasks = {}'.format(verbose, state.__name__, tasks))\n result = seek_plan(state,tasks,[],0,verbose)\n if verbose>0: print('** result =',result,'\\n')\n return result", "def spiderbotAdd(spider, crawlerid, searchterm, fullink):\n sclogic.spiderbotAdd(spider, crawlerid, searchterm, fullink) \n # call as:\n # sc-cli.py spiderbotadd hardverapro 3 -f\"testlink\"\n # sc-cli.py spiderbotadd hardverapro 3 -s\"RX470\"", "def goal(*args, goal: Union[AnyStr, List[AnyStr], bool]=\"\", index: bool=True,\n useTransformAsGoal: bool=True, weight: float=0.0, q=True, query=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def handle(self, *args, **options):\n # used to test the speed of determining the closest\n # trajectory\n if options[\"benchmark\"]:\n self.benchmark(**options)\n\n # finds the closest trajectory\n else:\n file_name, _ = self.find_closest_trajectory(**options)\n return \"solved_trajectories/\" + file_name", "def suggest(ctx, request: str):\n replacer = Replacer(ctx.obj.get('GKG_API_KEY'))\n suggestion = replacer.suggest(request)\n if suggestion == request:\n logger.info(\n 'Result from Google Knowledge Graph equals input: \"{0}\"', request,\n )\n elif suggestion:\n logger.info('Result from Google Knowledge Graph: \"{0}\"', suggestion)\n else:\n logger.info(\n 'No results in the Google Knowledge Graph for: \"{0}\"', request,\n )", "def get_recommendations(df,song_title, similarity_score, num_recommends = 5):\r\n indices = pd.Series(df.index, index = df['track_name']).drop_duplicates()\r\n idx = indices[song_title]\r\n sim_scores = list(enumerate(similarity_score[idx]))\r\n sim_scores = sorted(sim_scores, key = lambda x: x[1],reverse = True)\r\n top_scores = sim_scores[1:num_recommends+1]\r\n song_indices = [i[0] for i in top_scores]\r\n return df[\"track_name\"].iloc[song_indices]", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n from game import Actions\n\n waiting_list = util.PriorityQueue()\n COSTS = {}\n start_state = problem.getStartState()\n COSTS[start_state] = 0\n waiting_list.push(start_state,0)\n parents = {}\n \n while not waiting_list.isEmpty():\n q_state = waiting_list.pop()\n if problem.isGoalState(q_state):\n target_state = q_state\n break\n for child in problem.getSuccessors(q_state):\n n_cost = COSTS[q_state] + child[2]\n \n if child[0] not in COSTS or n_cost < COSTS[q_state]:\n COSTS[child[0]] = n_cost\n prior = n_cost + heuristic(child[0], problem)\n waiting_list.push(child[0], prior)\n parents[child[0]] = q_state\n\n sequence = []\n prev_state = target_state\n while target_state in parents.keys():\n target_state = parents[target_state]\n direction = Actions.vectorToDirection([prev_state[0] - target_state[0], prev_state[1] - target_state[1]])\n prev_state = target_state\n sequence.append(direction)\n \n return sequence[::-1]", "def train(self, reward: str):\r\n pass", "def recommended_songs(user_input, features_df, knn_spotify, filepath):\n # making user input lower to not worry about capitalizations\n user_input = user_input.lower()\n key = the_key(filepath)\n # find what name_artist combo contains the user_input:\n selected_song = key.loc[key.str.contains(user_input)]\n # search the key df and return the song id\n song_id = selected_song.index.tolist()\n # feed the song id into the model\n song_row = features_df.loc[song_id, :]\n # model finds the NN and gives you back song id\n neigh_dist, neigh_index = knn_spotify.kneighbors(song_row)\n # random nn\n index = neigh_index.flat[0:10].tolist()\n # song_index = random.choice(index)\n # converting list to df for easier access\n recom_songs = key.iloc[index].to_frame()\n # list of songs with no ID and formatted as title\n recom_songs_list = recom_songs['name_artist'].to_list()\n for i in range(len(recom_songs_list)):\n recom_songs_list[i] = recom_songs_list[i].title()\n return recom_songs_list", "def user_suggested(username):\n raise NotImplementedError()", "def a_star_search(problem, heuristic=null_heuristic):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = util.PriorityQueue()\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.push(state, 0)\r\n\r\n while (True):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n states = problem.get_successors(state)\r\n # push into fringe\r\n for stat in states:\r\n if stat[0] not in path:\r\n \"\"\"\r\n it does worse in corners problems, to work better needs heavy huristic, not worth in\r\n in corners problem expandend nodes grow expo\r\n all others are better\r\n counter = 0 # in some situation it helps, in some it doesnt\r\n #print(stat[0].pieces)\r\n for x in stat[0].pieces[0]:\r\n if x:\r\n counter += 1\r\n \"\"\"\r\n counter = 0\r\n fringe.push(stat[0], stat[2] + counter + heuristic(stat[0], problem)) # problem.get_cost_of_actions([stat[1]])\r\n\r\n while (True):\r\n\r\n for key, val in acts.items():\r\n for va in val:\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n if state == problem.get_start_state():\r\n break\r\n\r\n final.reverse()\r\n\r\n return final", "def change_way(coins, opponentLocation, player_location):\n global best_weight, best_path\n dist_matrix, route_matrix = u.update_dists_from_each(dists_matrix, routes_matrix, player_location, mazeMap, coins)\n coins_to_search = get_n_shortest(5, coins, player_location, dists_matrix)\n ennemy_dists = algo.dijkstra(mazeMap, opponentLocation)\n for c in coins_to_search:\n if len(coins_to_search) >= 2 and ennemy_dists[1][c] < dists_matrix[player_location][c]:\n coins_to_search.remove(c)\n break\n best_weight = float(\"inf\")\n best_path = []\n api.debug(coins_to_search)\n exhaustive(coins_to_search, player_location, [], 0, dist_matrix)\n meta_route = [player_location] + best_path\n api.debug(meta_route)\n route = u.location_list_to_route(meta_route, route_matrix)\n \n return coins_to_search, meta_route, route, dist_matrix[player_location][meta_route[1]]", "def recommend_by_keywords(self, key_words_list=None):\n pass", "def general_search(problem, fringe):\n\n fringe.push(PQItem((problem.get_start_state(), [], 0, None))) # curr_node, action, cost, parent\n closed = set()\n\n while not fringe.isEmpty():\n i = 1\n curr = fringe.pop()\n if problem.is_goal_state(curr.get_node()):\n build_first_plan_dict(curr)\n return curr.get_action()\n elif is_visited_by_plan(curr.get_node()):\n return get_past_path(curr.get_action())\n elif curr.get_node() not in closed:\n successors = problem.get_successors(curr.get_node())\n for i in range(len(successors)):\n next_state, action, cost = successors[i]\n fringe.push(PQItem((next_state, curr.get_action() + [action], cost + curr.get_cost(), None)))\n closed.add(curr.get_node())\n i += 1\n return \"failed\"", "def rrt_search(self):\n self.tree.AddVertex(self.start_config)\n self.tree.AddEdge(self.start_config, self.start_config)\n\n while True:\n x_new, x_nearest = self.new_and_near()\n if x_new is None:\n # print(\"it's None\")\n continue\n # connect shortest valid edge\n # print(\"new point\", x_new)\n self.connect_to_point(x_nearest, x_new)\n\n # probabilistically check if solution found\n if self.goal_config in self.tree.vertices:\n print(\"find it\")\n path = self.planning_env.reconstruct_path(self.tree.edges, self.start_config, self.goal_config)\n if path is not None:\n return path\n\n if self.name=='rrtstar' and self.tree.samples_taken > 10:\n return []\n # # check if can connect to goal after generating max_samples\n if self.tree.samples_taken >= self.tree.max_samples:\n return []", "def rPathway(inf, reverse = False, retProteins = False, delim = \"\\t\"):\n proteins = set()\n readPathway = Pathway(dict(), dict())\n f = open(inf, \"r\")\n for line in f:\n if line.isspace():\n continue\n line = line.rstrip(\"\\r\\n\")\n pline = re.split(delim, line)\n if len(pline) == 2:\n readPathway.nodes[pline[1]] = pline[0]\n if pline[0] == \"protein\":\n proteins.update([pline[1]])\n elif len(pline) == 3:\n if reverse:\n if pline[1] not in readPathway.interactions:\n readPathway.interactions[pline[1]] = dict()\n if pline[0] not in readPathway.interactions[pline[1]]:\n readPathway.interactions[pline[1]][pline[0]] = pline[2]\n else:\n readPathway.interactions[pline[1]][pline[0]] += \";\"+pline[2]\n else:\n if pline[0] not in readPathway.interactions:\n readPathway.interactions[pline[0]] = dict()\n if pline[1] not in readPathway.interactions[pline[0]]:\n readPathway.interactions[pline[0]][pline[1]] = pline[2]\n else:\n readPathway.interactions[pline[0]][pline[1]] += \";\"+pline[2]\n else:\n print >> sys.stderr, \"ERROR: line length not 2 or 3: \\\"%s\\\"\" % (line)\n sys.exit(1)\n f.close()\n if retProteins:\n return(readPathway.nodes, readPathway.interactions, proteins)\n else:\n return(readPathway.nodes, readPathway.interactions)", "def syn_hei(num_workers, gp, acq_optimiser, anc_data):\n recommendations = [asy_ei(gp, acq_optimiser, anc_data)]\n for _ in range(1, num_workers):\n anc_data.evals_in_progress = recommendations\n recommendations.append(_halluc_ei(gp, acq_optimiser, anc_data))\n return recommendations", "def learn(self, s, a, reward, sprime, done):\n pass", "def learn(self, s, a, reward, sprime, done):\n pass", "def get_pathway(identifier, organism):\n pass", "def Option3_routing(self, S, D, L):\n if self.has_path(S, D): \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n Opt_path = Shortest_path\n path_cost_with_concave_function = self.calculate_path_cost_with_concave_function(Shortest_path, 'c1', 'c2')\n return path_cost_with_concave_function, Opt_path\n while len(Shortest_path) != 0:\n path_cost = self.additive_path_cost(Shortest_path, 'w') \n #self.logger.info('Path cost - %d', path_cost)\n if path_cost <= L:\n \"\"\"go to path cost with weighted sum\"\"\"\n path_cost_with_concave_function = self.calculate_path_cost_with_concave_function(Shortest_path, 'c1', 'c2')\n self.G = self.rm_edge_constraint(path_cost_with_concave_function) # remove all links where the concave link is greater than PathConcave_cost \n Opt_path = Shortest_path\n if self.has_path(S, D):\n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w')\n else:\n Shortest_path = [] \n else:\n break \n else:\n self.logger.info('No path from %s to %s', S, D)\n Opt_path = []\n path_cost_with_concave_function = 0\n return path_cost_with_concave_function, Opt_path", "def __call__(self, seq_path, result_path=None, log_path=None):\r\n raise NotImplementedError(\"Aligner is an abstract class\")", "def _send_task_objective_reminders(self, agent: Agent):\n agent_id = agent.agent_id\n if agent_id == constants.ROLE_NAMES[constants.WIZARD]:\n # Checks if wizard has used search enough so far\n if (self.turn_idx >= self.search_warning_turn) and (\n self.num_search_queries < self.search_warning_threshold\n ):\n _coordinator_send_message(\n agent, message=constants.USE_SEARCH_WARNING_MESSAGE\n )\n # Checks if wizard has selected search results enough times so far\n elif (self.turn_idx >= self.select_warning_turn) and (\n self.num_times_search_resutls_selected < self.select_warning_threshold\n ):\n _coordinator_send_message(\n agent, message=constants.USE_SEARCH_RESULTS_WARNING_MESSAGE\n )", "def enum_high_mass_shortest_paths(G, pool, use_scores=False, use_genes=False, seen_paths=None):\n if seen_paths == None:\n seen_paths = []\n unq_sorted_paths = set([])\n # in case orientation obliv. sorted path strings passed in\n for p in seen_paths:\n unq_sorted_paths.add(p)\n paths = []\n\n nodes = []\n nodes = list(G.nodes()) # creates a copy\n\n logger.info(\"Getting edge weights\")\n\n # use add_edge to assign edge weights to be 1/mass of starting node\n # TODO: only calculate these if they haven't been/need to be updated\n for e in G.edges():\n if use_genes and G.nodes[e[1]]['gene'] == True:\n G.add_edge(e[0], e[1], cost = 0.0)\n elif use_scores==True:\n G.add_edge(e[0], e[1], cost = (1.-(G.nodes[e[1]]['score']))/get_spades_base_mass(G, e[1]))\n else:\n G.add_edge(e[0], e[1], cost = (1./get_spades_base_mass(G, e[1])))\n\n logger.info(\"Getting shortest paths\")\n paths_list = []\n if pool._processes > 1 and pool._processes <= 2*len(nodes): # otherwise, run single threaded\n paths_list=Manager().list()\n pool.map(get_shortest, [[node, G, paths_list] for node in nodes])\n else:\n for node in nodes:\n get_shortest([node,G,paths_list])\n\n for path in paths_list:\n # below: create copy of path with each node as rc version\n # use as unique representation of a path and rc of its whole\n unoriented_sorted_path_str = get_unoriented_sorted_str(path)\n\n # here we avoid considering cyclic rotations of identical paths\n # by sorting their string representations\n # and comparing against the set already stored\n if unoriented_sorted_path_str not in unq_sorted_paths:\n unq_sorted_paths.add(unoriented_sorted_path_str)\n paths.append(tuple(path))\n\n return paths", "def respond(self,obs):\n if obs.timestep == 0:\n #If it's the first timestep, we have no clue. Since we don't even know if we are going to ask questions in the\n #future, we go ahead and init the inference engine for future use.\n self.p_obs = copy.deepcopy(obs)\n self.tracking_stations = self.get_remaining_stations(obs)\n self.inference_engine = inference_engine(self.tracking_agent,self.tracking_stations)\n #And set the knowledge source to inference so the next step we know where to look for in the upcoming step.\n self.knowledge.source[0] = ORIGIN.Inference\n\n #And pick a target station at random since we have to move forward.\n target_station = np.random.choice(self.tracking_stations) #pick a station at random.\n\n else:\n curr_k_id = self.knowledge.get_current_job_station_id()\n\n #Checking what knowledge we have.\n if (self.knowledge.source[curr_k_id]==ORIGIN.Answer):\n #Then we simply work on the station because we have an answer telling us that that's the station to work on.\n target_station = self.knowledge.station_order[curr_k_id]\n\n elif (self.knowledge.source[curr_k_id] == None):\n #which means we just finished a station in the last time-step. This calls for re-initalizing the inference_engine\n self.tracking_stations = self.get_remaining_stations(obs)\n self.inference_engine = inference_engine(self.tracking_agent,self.tracking_stations)\n target_station = np.random.choice(self.tracking_stations)\n\n elif (self.knowledge.source[curr_k_id]==ORIGIN.Inference):\n #Which means we have been working on a inference for a station.\n target_station = self.inference_engine.inference_step(self.p_obs,obs)\n self.knowledge.update_knowledge_from_inference(target_station)\n warnings.WarningMessage(\"Provision resetting inference_engine when a station is finished\")\n\n else:\n #it should never come to this.\n raise Exception(\"Some mistake around\")\n\n \"\"\"\n Okay, now that we know which station we should be headed to, we need to ensure the nitty-gritty details.\n Do we have a tool?\n If yes,\n if it matches our target station:\n destination: station\n else:\n destination: base\n else:\n destination: base\n \n Are we near our destination?\n Yes:\n Is it the base?\n Pick up the tool.\n else:\n execute work action.\n No:\n keep moving. \n \"\"\" \n\n if self.tool is not None:\n if self.tool == target_station:\n destination = obs.allPos[obs.stationIndices[target_station]]\n else:\n destination = global_defs.TOOL_BASE\n else:\n destination = global_defs.TOOL_BASE\n\n if utils.is_neighbor(self.pos,destination):\n if destination == global_defs.TOOL_BASE:\n #We are at the base to pick up a tool.\n desired_action = global_defs.Actions.NOOP\n self.tool = target_station\n else:\n #we are the station to work.\n desired_action = global_defs.Actions.WORK\n else:\n #Navigate to destination.\n desired_action = None\n\n obstacles = copy.deepcopy(obs.allPos).remove(self.pos)\n proposal = utils.generate_proposal(self.pos,destination,obstacles,desired_action)\n return proposal", "def reference_path_example(mode):\n config = habitat.get_config(\n config_path=\"benchmark/nav/vln_r2r.yaml\",\n overrides=[\n \"habitat.task.measurements.success.success_distance=0.1\",\n \"habitat.dataset.split=val_seen\",\n ],\n )\n with habitat.config.read_write(config):\n config.habitat.task.measurements.update(\n {\"top_down_map\": TopDownMapMeasurementConfig()}\n )\n config.habitat.task.lab_sensors.update(\n {\"heading_sensor\": HeadingSensorConfig()}\n )\n with SimpleRLEnv(config=config) as env:\n follower = ShortestPathFollower(\n env.habitat_env.sim, goal_radius=0.5, return_one_hot=False\n )\n follower.mode = mode\n print(\"Environment creation successful\")\n\n for episode in range(3):\n env.reset()\n episode_id = env.habitat_env.current_episode.episode_id\n print(\n f\"Agent stepping around inside environment. Episode id: {episode_id}\"\n )\n\n dirname = os.path.join(\n IMAGE_DIR, \"vln_reference_path_example\", mode, \"%02d\" % episode\n )\n if os.path.exists(dirname):\n shutil.rmtree(dirname)\n os.makedirs(dirname)\n\n images: List[np.ndarray] = []\n steps = 0\n reference_path = env.habitat_env.current_episode.reference_path + [\n env.habitat_env.current_episode.goals[0].position\n ]\n for point in reference_path:\n done = False\n while not done:\n best_action = follower.get_next_action(point)\n if (\n best_action is None\n or best_action == HabitatSimActions.stop\n ):\n done = True\n continue\n observations, reward, done, info = env.step(best_action)\n save_map(observations, info, images)\n steps += 1\n\n print(f\"Navigated to goal in {steps} steps.\")\n images_to_video(images, dirname, str(episode_id))", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n from util import PriorityQueue\n import math\n\n frontier = PriorityQueue()\n explored = []\n actions = []\n\n class node:\n def __init__(self, path, dad, action):\n self.path = path\n self.dad = dad\n self.action = action\n h = heuristic(path,problem.goal)\n if dad == None:\n self.g=0\n else:\n self.g = dad.g + heuristic(dad.path,path)\n self.cost = round(self.g + h,1)\n\n start = node(problem.getStartState(),None,'')\n frontier.push(start,start.cost)\n\n while frontier.isEmpty() == False:\n path = frontier.pop()\n successors = problem.getSuccessors(path.path)\n explored.append(path)\n for vertex in successors:\n achou = False\n for path_ex in explored:\n if vertex[0] == path_ex.path:\n achou = True\n\n if achou == False:\n successor = node(vertex[0],path,vertex[1])\n frontier.push(successor,successor.cost)\n if problem.isGoalState(successor.path):\n while len(explored) > 0:\n ant = explored.pop()\n if ant.path == successor.dad.path:\n actions.append(successor.action)\n successor = ant\n actions.reverse()\n return actions", "def search_again(self):\n\n response = input(\n \"\\nWould you like to search for something else? (Yes or No): \")\n\n while response.lower().strip() != 'yes' or response.lower().strip() != 'no':\n\n if response.lower().strip() == 'yes':\n search_method_choice = self.search_method_menu()\n self.search_tasks(search_method_choice)\n elif response.lower().strip() == \"no\":\n self.main_menu()\n else:\n response = input(\"\\nInvalid choice, please try again: \")", "def teach(self,**kwargs):\n try:\n side = kwargs['side']\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n try:\n number = kwargs['number']\n except:\n number = \"\"\n try:\n parent = kwargs['parent']\n except:\n parent = \"teachMenu\"\n entries = {}\n self.baxter.yes() # head nod confirm\n entries[\"Stop recording\"] = self.saveTeachPath\n self.mm.addGenericMenu(\"teach\",parent,\"Saving current path...\", entries)\n self.mm.loadMenu(\"teach\")\n self.bl.update_watch_parameters('before')\n self.baxter.br.post.record(side,str(number))", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n queue = util.PriorityQueue() # PriorityQueue for searshing the graph, priorityqueue helps to pop the element with the lowest priority (cost)\n visited = [] # cKepp track of visited nodes\n path = [] # Keep track of the path\n start =problem.getStartState() # The start node\n \n queue.push((start, path,0), 0) # we push (vertex, path , cost from parent to the vertex), \n #priority(which is the cost of getting to the vertex)\n \n while not queue.isEmpty():\n (vrtx, path, costparent) = queue.pop() \n if vrtx not in visited: \n if problem.isGoalState(vrtx):\n return [p[1] for p in path] # return the actions\n visited.append(vrtx) \n for successor in problem.getSuccessors(vrtx):\n gn = successor[2]+ costparent # the real cost from root to the expanded node(successor).\n fn = gn+heuristic(successor[0], problem) # the heursitic from the expanded node to the goal node.\n queue.push((successor[0], path+[successor],gn),fn)# push the noe with f(n) as the priority element.\n \n util.raiseNotDefined()", "def construct_paths(data, relation_level_words, qald=False,goldorpred='gold'):\n abstract_question = data[goldorpred]['abstract_question'].replace('<e>', 'entity1').replace('<l>', 'literal1')\n question = ei.vocabularize(nlutils.tokenize(abstract_question))\n\n \"\"\"======\"\"\"\n question_dep = []\n if 'abstract_question_deppath' in data['gold']:\n for abstract_question_deppath_simple in data['gold']['abstract_question_deppath']:\n abstract_question_deppath_simple = abstract_question_deppath_simple.replace('<E0>', 'entity1')\n abstract_question_deppath_simple = abstract_question_deppath_simple.replace('<E1>', 'entity2')\n abstract_question_deppath_simple = abstract_question_deppath_simple.replace('<E2>', 'entity3')\n abstract_question_deppath_simple = abstract_question_deppath_simple.replace('<L1>', 'literal1')\n abstract_question_deppath_simple = abstract_question_deppath_simple.replace('<L2>', 'literal2')\n abstract_question_deppath_simple = abstract_question_deppath_simple.replace('<L3>', 'literal3')\n question_dep.append([int(id_) for id_ in list(\n ei.vocabularize(nlutils.tokenize(abstract_question_deppath_simple.strip())))])\n if len(question_dep) == 0:\n question_dep.append([int(id_) for id_ in list(ei.vocabularize(nlutils.tokenize(abstract_question.strip())))])\n question_dep_mask_matrix = 1.0*np.ones((1, len(question_dep)))\n\n \"\"\"======\"\"\"\n\n '''goldpathindex 可能要用于mrr计算,有了goldpathindex其实就不需要no_positive_path'''\n candidates=[]\n for key in ['hop4','hop3_2','hop3_1','hop3_0','hop3','hop2','hop1']:\n if key in data[goldorpred]:\n candidates+=data[goldorpred][key]\n\n ####get gold path####\n goldpathindex = -1\n for index,candidate in enumerate(candidates):\n if np.array_equal(candidate, data['gold']['path']):\n goldpathindex=index\n break\n\n ##########get candidate path#####\n candidate_paths = []\n candidate_paths_words = []\n for cand_path in candidates:\n candidate_path=[]\n candidate_path_words=[]\n add=True\n for p in cand_path:\n # p = p.lower() lcquad\n if p in embeddings_interface.SPECIAL_CHARACTERS:\n candidate_path.extend( vocabularize_relation(p))\n else:\n if p not in relation_level_words:\n # add=False\n # break\n candidate_path.extend( ei.vocabularize([p.replace(\"http://dbpedia.org/property/\", \"\")]))\n candidate_path_words.extend( ei.vocabularize([p.replace(\"http://dbpedia.org/property/\", \"\")]))\n else:\n if \"0\" not in relation_level_words[p]:\n # add=False\n # break\n # print('pppp', p, p.replace(\"http://dbpedia.org/property/\", \"\"), relation_level_words[p])\n candidate_path.extend( ei.vocabularize([p.replace(\"http://dbpedia.org/property/\", \"\")]))\n # print('before',candidate_path_words)\n candidate_path_words.extend( ei.vocabularize([p.replace(\"http://dbpedia.org/property/\", \"\")]))\n # print('end',ei.vocabularize([p.replace(\"http://dbpedia.org/property/\", \"\")]),candidate_path_words)\n else:\n candidate_path.extend( ei.vocabularize(relation_level_words[p]['0']).tolist())\n candidate_path_words.extend( ei.vocabularize(relation_level_words[p]['0']).tolist())\n if add:\n candidate_paths.append(np.asarray(candidate_path))\n candidate_paths_words.append(np.asarray(candidate_path_words))\n\n return question,\\\n np.asarray(question_dep), np.asarray(question_dep_mask_matrix),\\\n np.asarray(candidate_paths), np.asarray(candidate_paths_words),\\\n goldpathindex, candidates", "def aStarSearch(problem, heuristic=nullHeuristic):\n # Initialization\n startState = problem.getStartState()\n\n if problem.isGoalState(startState):\n return [] # No action needed\n\n closedSet = set()\n queue = util.PriorityQueue()\n queue.push((startState, None, 0), heuristic(startState, problem))\n cameFrom = dict() # Stores most efficient previous action\n gScore = dict() # Stores current cost from start\n gScore[startState] = 0\n\n # Search\n while queue.heap: # Do while open set is not empty\n (currentState, action, cost) = queue.pop()\n\n if problem.isGoalState(currentState):\n # Goal reached. Construct path\n path = util.Queue()\n\n # Backtrack to start state\n while currentState is not startState and currentState in cameFrom:\n currentState, action = cameFrom[currentState]\n path.push(action)\n\n return path.list\n\n # Expand current state\n closedSet.add(currentState)\n for successor in problem.getSuccessors(currentState):\n successorState, successorAction, successorCost = successor\n \n if successorState in closedSet:\n continue # Skip expanded states\n\n # Initialize entries not already in dictionaries to a big number\n if currentState not in gScore:\n gScore[currentState] = 999999999999\n if successorState not in gScore:\n gScore[successorState] = 999999999999\n\n # Compare this path to best path\n gTentative = gScore[currentState] + successorCost\n if gTentative >= gScore[successorState]:\n continue # Not a better path\n\n # A better path is found, store this path\n cameFrom[successorState] = (currentState, successorAction)\n gScore[successorState] = gTentative # Store new cost\n\n # Update priority queue with new heuristic estimate\n queue.update(successor, (gScore[successorState]\n + heuristic(successorState, problem)))", "def greedy_path():\n itinerary = []\n cities = all_cities(data_set)\n starting_city = randomize_city_start(cities.keys()) # start from a random city\n # print \"starting_city: %s\" % starting_city\n cities_visited = {}\n \n # iterate through all cities\n count = 1\n while True:\n possible_routes = []\n #distance = []\n # print \"starting_city: %s\" % starting_city\n for path in data_set:\n # we only start with city that we have assigned in starting_city\n if starting_city in path['city_start']:\n # we don't go to cities we have visited\n if path['city_end'] in cities_visited:\n continue\n else:\n # print \"path: \", path\n possible_routes.append(path) # add the city if not in the list\n \n if not possible_routes:\n break\n # append this to itinerary\n route = get_shortest_route(possible_routes)\n count += 1\n itinerary.append(route)\n # add this city to visited_cities list\n cities_visited[route[0]] = count\n starting_city = route[1]\n \n return itinerary", "def graph_search(self, initial_path, goal):\n nodes_considered = 0\n frontier = PathPriorityQueue([Path(initial_path, 0)])\n explored = set()\n while True:\n if frontier.is_empty():\n return \"FAIL\"\n path = frontier.pop()\n node = path.end\n explored.add(node)\n if node == goal:\n return path\n for action in self.ACTIONS(path):\n print(str(action))\n res = action.result(path)\n if res not in explored:\n new_path = path.combine(action)\n frontier.add(new_path)\n nodes_considered+=1", "def Option2_routing(self, S, D, L):\n if self.has_path(S, D): \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n Opt_path = Shortest_path\n path_cost_with_weighted_sum = self.calculate_path_cost_with_weighted_sum(Shortest_path, 'c1', 'c2')\n return path_cost_with_weighted_sum, Opt_path\n\n while len(Shortest_path) != 0:\n path_cost = self.additive_path_cost(Shortest_path, 'w') \n #self.logger.info('Path cost - %d', path_cost)\n if path_cost <= L:\n \"\"\"go to path cost with weighted sum\"\"\"\n path_cost_with_weighted_sum = self.calculate_path_cost_with_weighted_sum(Shortest_path, 'c1', 'c2')\n self.G = self.rm_edge_constraint(path_cost_with_weighted_sum) # remove all links where the concave link is greater than PathConcave_cost \n Opt_path = Shortest_path\n if self.has_path(S, D):\n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w')\n else:\n Shortest_path = [] \n else:\n break \n else:\n self.logger.info('No path from %s to %s', S, D)\n Opt_path = []\n path_cost_with_weighted_sum = 0\n return path_cost_with_weighted_sum, Opt_path", "def add_args(parser):\n rescore_add_args(parser)\n parser.add_argument(\n \"--rl-weight\",\n type=float,\n default=0.1,\n help=\"trade-off coefficient of rl loss\",\n )\n parser.add_argument(\n \"--rl-num-trajectory\",\n type=int,\n default=3,\n help=\"num trajectory in rl training\",\n )", "def a_star(self, mapdata, start, goal):\n\n print \"Inside A star\"\n rospy.loginfo(\"Generate path from (%d,%d) to (%d,%d)\" % (start[0], start[1], goal[0], goal[1]))\n if not PathPlanner.is_cell_walkable(mapdata, goal[0], goal[1]):\n rospy.logerr(\"not walkable goal\")\n return[]\n #calculated from goal\n frontier = PriorityQueue()\n frontier.put(start, 0)\n came_from = {}\n cost_so_far = {}\n came_from[start] = None\n cost_so_far[start] = 0\n\n while not frontier.empty():\n frontier_msg = GridCells()\n frontier_cells = []\n for e in frontier.elements:\n frontier_cells.append(PathPlanner.grid_to_world(mapdata, e[1][0], e[1][1]))\n frontier_msg.header = mapdata.header\n frontier_msg.header.stamp = rospy.get_rostime()\n frontier_msg.cell_width = mapdata.info.resolution\n frontier_msg.cell_height = mapdata.info.resolution\n frontier_msg.cells = frontier_cells\n expanded_msg = GridCells()\n expanded_cells = []\n for e in cost_so_far: \n expanded_cells.append(PathPlanner.grid_to_world(mapdata, e[0], e[1]))\n \n expanded_msg.header = mapdata.header\n expanded_msg.header.stamp = rospy.get_rostime()\n expanded_msg.cell_width = mapdata.info.resolution\n expanded_msg.cell_height = mapdata.info.resolution\n expanded_msg.cells = expanded_cells\n self.expanded_pub.publish(expanded_msg)\n rospy.sleep(0.01)\n\n current = frontier.get()\n\n #creates path\n if current == goal:\n entry = goal\n listOfCoord = []\n while entry != None:\n listOfCoord.append(entry)\n entry = came_from[entry]\n listOfCoord.reverse()\n self.expanded_pub.publish(PathPlanner.createGridcells(mapdata, listOfCoord))\n return listOfCoord\n \n for next in PathPlanner.neighbors_of_8(mapdata, current[0], current[1]):\n new_cost = cost_so_far[current] + 1 #assume cost to move each unit is 1\n if next not in cost_so_far or new_cost < cost_so_far[next]:\n cost_so_far[next] = new_cost\n priority = new_cost + PathPlanner.euclidean_distance(next[0], next[1], goal[0], goal[1])\n frontier.put(next, priority)\n came_from[next] = current\n\n \n return[]", "def graph_2hop(profile, recom, filename):\n g = Graph()\n\n # ! ids are raw -> strings\n clickedItems = set(map(lambda x: str(x[0]), profile)) # set of clicked items\n recomItems = set() # set of recommended items\n\n # get recommended items\n for click in range(0, len(profile)): # for all the clicks\n for rec in recom[click]: # for the topN recommendations\n recomItems.add(str(rec[0]))\n\n # write clicked item-nodes in an outter ring\n angleStep = 2*np.pi / float(len(clickedItems)) # polar coordinates angle step\n angle = 0 # polar coordinates angle [0, 2pi]\n R = 1000 # outter\n for item in clickedItems: # for all the clicks\n target = str(item)\n g.add_node(target)\n g.nodes[target]['color'] = [255,0,0,1] # RGBA format\n g.nodes[target]['pos'] = [R * np.cos(angle), R * np.sin(angle)]\n g.nodes[target]['text'] = target\n\n angle += angleStep\n \n # write the rest item-nodes in an inner ring\n angleStep = 2*np.pi / float(len(recomItems - clickedItems)) # polar coordinates angle step\n angle = 0 # polar coordinates angle [0, 2pi]\n R = 600 # outter\n for item in recomItems - clickedItems: # for the rest of the items\n target = str(item)\n g.add_node(target)\n g.nodes[target]['color'] = [0,0,255,1] # RGBA format\n g.nodes[target]['pos'] = [R * np.cos(angle), R * np.sin(angle)]\n g.nodes[target]['text'] = target\n\n angle += angleStep\n \n # construct edges\n edges = {} # dictionary: (source_iid, target_iid) -> Vertex object\n weight_prop = g.new_edge_property('float')\n \n for click in range(0, len(profile)): # for all the clicks\n for rec in recom[click]: # for the topN recommendations\n target= str(rec[0])\n source = str(profile[click][0])\n weight = rec[1]\n\n g.add_edge(source, target)\n g.edges(source, target)['weight'] = weight\n \n return g", "def astar(grid, heuristic):\r\n\r\n print (grid.getStart())\r\n frontier = PriorityQueue()\r\n frontierCpy = {}\r\n\r\n goal = grid.getGoals()[0]\r\n\r\n startX = grid.getStart()[0]\r\n startY = grid.getStart()[1]\r\n startNode = Node(((startX, startY), 0), None)\r\n\r\n init_heu = heuristic(startNode.cell[0], goal)\r\n frontierCpy[startNode.cell[0]] = init_heu\r\n frontier.put((init_heu, 0, startNode))\r\n\r\n while frontier.qsize() != 0:\r\n tup = frontier.get()\r\n\r\n currNode = tup[2]\r\n currG = tup[1] * -1\r\n grid.addVisited(currNode.cell[0])\r\n frontierCpy.pop(currNode.cell[0], None)\r\n\r\n if currNode.cell[0] == goal:\r\n path = []\r\n while currNode != None:\r\n path.insert(0, currNode.cell[0])\r\n currNode = currNode.parent\r\n grid.setPath(path)\r\n return path\r\n\r\n\r\n neighbors = grid.getNeighbors(currNode.cell[0])\r\n\r\n for n in neighbors:\r\n if n[0] not in grid.getVisited():\r\n newNode = Node(n, currNode)\r\n\r\n h = heuristic(n[0], goal)\r\n\r\n oneStepCost = n[1]\r\n g = oneStepCost + currG\r\n if n[0] not in frontierCpy or frontierCpy[n[0]] > h + g:\r\n frontier.put((h+g, -1*g, newNode))\r\n frontierCpy[n[0]] = h+g\r\n print(\"CANT FIND A PATH\")", "def get_new_suggestions(self, study_id, trials=[], number=1):\n\n search_space = hyperopt.hp.uniform('x', -10, 10)\n\n search_space_instance = search_space\n rstate = np.random.RandomState()\n trials = hyperopt.Trials()\n domain = hyperopt.Domain(None, search_space_instance,\n pass_expr_memo_ctrl=None)\n algorithm = hyperopt.tpe.suggest\n rval = hyperopt.FMinIter(algorithm, domain, trials, max_evals=-1, rstate=rstate, verbose=0)\n rval.catch_eval_exceptions = False\n\n algorithm = rval.algo\n new_ids = rval.trials.new_trial_ids(1)\n rval.trials.refresh()\n random_state = rval.rstate.randint(2**31-1)\n new_trials = algorithm(new_ids, rval.domain, trials, random_state)\n rval.trials.refresh()\n\n # Example: {'x': [8.721658602103911]}\n vals = new_trials[0]['misc']['vals']\n\n #import ipdb;ipdb.set_trace()\n\n\n \"\"\"\n parameter = dict()\n for key in vals:\n try:\n parameter[key] = vals[key][0].item()\n except Exception:\n parameter[key] = None\n \"\"\"\n\n\n \"\"\"\n trials =rval.trials\n\n trial = trials.new_trial_docs([new_id], rval_specs, rval_results, rval_miscs)[0]\n trial['result'] = {'loss': reward, 'status': 'ok'}\n trial['state'] = hp.JOB_STATE_DONE\n trials.insert_trial_docs([trial])\n trials.refresh()\n \"\"\"\n\n\n \"\"\"\n def _choose_tuner(self, algorithm_name):\n if algorithm_name == 'tpe':\n return hp.tpe.suggest\n if algorithm_name == 'random_search':\n return hp.rand.suggest\n if algorithm_name == 'anneal':\n return hp.anneal.suggest\n raise RuntimeError('Not support tuner algorithm in hyperopt.')\n \"\"\"\n\n\n return_trial_list = []\n\n study = Study.objects.get(id=study_id)\n study_configuration_json = json.loads(study.study_configuration)\n params = study_configuration_json[\"params\"]\n\n for i in range(number):\n trial = Trial.create(study.id, \"TpeTrial\")\n parameter_values_json = {}\n\n for param in params:\n\n if param[\"type\"] == \"INTEGER\" or param[\"type\"] == \"DISCRETE\" or param[\"type\"] == \"CATEGORICAL\":\n pass\n\n elif param[\"type\"] == \"DOUBLE\":\n # TODO: Get the specified value from hyperopt\n suggest_value = vals[\"x\"][0]\n parameter_values_json[param[\"parameterName\"]] = suggest_value\n\n parameter_values_json[param[\"parameterName\"]] = suggest_value\n\n\n trial.parameter_values = json.dumps(parameter_values_json)\n trial.save()\n return_trial_list.append(trial)\n\n return return_trial_list", "def test_planning():\n\n joints1 = [0.0, 2.9, 1.3, 4.2, 1.4, 0.0]\n joints2 = [4.80, 2.92, 1.00, 4.20, 1.45, 1.32]\n\n\n path_planner = PathPlanner(\"manipulator\")\n\n print path_planner.group.get_end_effector_link()\n\n while True:\n raw_input(\"Press Enter to move to position 1\")\n plan = path_planner.plan_to_config(joints1)\n path_planner.execute_path(plan)\n rospy.sleep(0.5)\n\n raw_input(\"Press Enter to move to position 2\")\n plan = path_planner.plan_to_config(joints2)\n path_planner.execute_path(plan)\n rospy.sleep(0.5)", "def recommendations(self, identifier_type, identifier, novelist_api=None,\n feed_class=AcquisitionFeed):\n\n library = flask.request.library\n work = self.load_work(library, identifier_type, identifier)\n if isinstance(work, ProblemDetail):\n return work\n\n search_engine = self.search_engine\n if isinstance(search_engine, ProblemDetail):\n return search_engine\n\n lane_name = \"Recommendations for %s by %s\" % (work.title, work.author)\n try:\n lane = RecommendationLane(\n library=library, work=work, display_name=lane_name,\n novelist_api=novelist_api\n )\n except CannotLoadConfiguration as e:\n # NoveList isn't configured.\n return NO_SUCH_LANE.detailed(_(\"Recommendations not available\"))\n\n facets = self.manager.load_facets_from_request(worklist=lane)\n if isinstance(facets, ProblemDetail):\n return facets\n\n # We use a normal Pagination object because recommendations\n # are looked up in a third-party API and paginated through the\n # database lookup.\n pagination = load_pagination_from_request(Pagination)\n if isinstance(pagination, ProblemDetail):\n return pagination\n\n annotator = self.manager.annotator(lane)\n url = annotator.feed_url(\n lane,\n facets=facets,\n pagination=pagination,\n )\n\n return feed_class.page(\n _db=self._db, title=lane.DISPLAY_NAME, url=url, worklist=lane,\n facets=facets, pagination=pagination,\n annotator=annotator, search_engine=search_engine\n )", "def mirror (self, res_paths):\n\n\n self.start_time = time.time()\n\n self.todo = []\n self.done = set()\n\n for res_path in res_paths:\n\n resolved_paths = map( \n lambda p: \n map( \n lambda p: (self.resolve_shortcuts(p[0]), p[1]) if type(p) is tuple else\n self.resolve_shortcuts(p), p), res_path[1])\n\n for resource in res_path[0]:\n\n if isinstance(resource, basestring):\n rs = [ self.resolve_shortcuts (resource) ]\n else:\n rs = []\n for t in self._fetch_ldf(p=self.resolve_shortcuts(resource[0]), \n o=self.resolve_shortcuts(resource[1])):\n rs.append(t[0])\n \n for r in rs:\n for resolved_path in resolved_paths:\n # import pdb; pdb.set_trace()\n\n logging.debug ('adding task: %s %s' % (r, repr(resolved_path)))\n self.todo.append((rdflib.URIRef(r), resolved_path))\n \n while len(self.todo)>0:\n\n resource, path = self.todo.pop()\n\n todo_new = set()\n\n # fetch resources from LDF only once\n\n if resource in self.done:\n triples = list(self.graph.triples((resource, None, None)))\n # logging.debug (u'LDF: DONE, %d triples' % len(triples))\n do_add = False\n\n else:\n\n triples = self._fetch_ldf (s=resource)\n self.done.add(resource)\n do_add = True\n\n # transformations\n\n if len(path)>0:\n res_filter = path[0]\n\n if type(res_filter) is tuple:\n pred, f = res_filter\n\n for t in triples:\n\n s = t[0]\n p = t[1]\n o = t[2]\n\n if unicode(p) != pred:\n continue\n\n np, no = f(o)\n\n np = self.resolve_shortcuts(np)\n\n if do_add:\n triples.append ((s, np, no))\n\n res_filter = unicode(np)\n\n if do_add:\n for t in triples:\n self.graph.add(t)\n\n if len(path)>0:\n\n new_path = path[1:]\n\n for t in triples:\n\n if len(t)<3:\n logging.error('triple of 2?! %s' % repr(t))\n continue\n\n s = t[0]\n p = t[1]\n o = t[2]\n\n if not isinstance(o, rdflib.URIRef):\n continue\n\n # logging.debug ('LDF checking %s %s' % (p, o))\n\n if res_filter == '*' or res_filter == unicode(p):\n\n # import pdb; pdb.set_trace()\n\n task = (o, new_path)\n\n # logging.debug ('LDF adding new task: %s' % repr(task))\n self.todo.append(task)", "def aStarSearch(problem, heuristic=nullHeuristic):\n visited = []\n solution = []\n intialCost = 0\n priorityQueue = util.PriorityQueue()\n priorityQueue.push((problem.getStartState(),solution,intialCost),intialCost)\n \n while not priorityQueue.isEmpty():\n coord, solution, totalStep = priorityQueue.pop()\n if problem.isGoalState(coord):\n return solution\n if not coord in visited:\n visited+=[coord]\n for position, direction, step in problem.getSuccessors(coord):\n newSolution = solution+[direction]\n g = totalStep + step\n newTotalCost = g + heuristic(position, problem)\n priorityQueue.push((position, newSolution, g), newTotalCost)", "def add_title_criteria(self, title):\n self.criteria.append({'title': title})", "def recommend_by_event(self, event = None):\n pass", "def parSearch(self, mode=False):\r\n # research\r\n profprint()\r\n w = slicer.modules.NeedleFinderWidget\r\n l = w.logic\r\n path = [ 0 for i in range(100)]\r\n \r\n if 0:\r\n path[24] = '/Users/guillaume/Dropbox/AMIGO Gyn Data NRRD/Case 24 NRRD/Manual/2013-02-25-Scene-without-CtrPt.mrml'\r\n path[29] = '/Users/guillaume/Dropbox/AMIGO Gyn Data NRRD/Case 29 NRRD/Manual/2013-02-26-Scene-without-CtrPts.mrml'\r\n path[30] = '/Users/guillaume/Dropbox/AMIGO Gyn Data NRRD/Case 30 NRRD/Manual/2013-02-26-Scene-without-CtrPt.mrml'\r\n path[31] = '/Users/guillaume/Dropbox/AMIGO Gyn Data NRRD/Case 31 NRRD/Manual/2013-02-27-Scene-without-CtrPts.mrml'\r\n path[34] = '/Users/guillaume/Dropbox/AMIGO Gyn Data NRRD/Case 34 NRRD/Manual/2013-02-27-Scene-without-CtrPts.mrml'\r\n path[35] = '/Users/guillaume/Dropbox/AMIGO Gyn Data NRRD/Case 35 NRRD/Manual/2013-02-27-Scene-without-CtrPts.mrml'\r\n path[37] = '/Users/guillaume/Dropbox/AMIGO Gyn Data NRRD/Case 37 NRRD/Manual/2013-02-27-Scene-without-CtrPts.mrml'\r\n path[38] = '/Users/guillaume/Dropbox/AMIGO Gyn Data NRRD/Case 38 NRRD/Manual/2013-02-27-Scene-without-CtrPts.mrml'\r\n path[40] = '/Users/guillaume/Dropbox/AMIGO Gyn Data NRRD/Case 40 NRRD/Manual/2013-02-27-Scene-without-CtrPts.mrml'\r\n\r\n #Andre's file system (case copies from AMIGO share)\r\n # stripped OTHER cases\r\n if 0: path[33] = '/home/mastmeyer/Dropbox/GYN Cases/Case 033/NRRD/Auto-Eval-LB/2013-02-27-Scene.mrml'\r\n if 0:\r\n path[ 8] = '/home/mastmeyer/Dropbox/GYN Cases/Case 008/NRRD/Auto-Eval-LB/2013-05-07-Scene.mrml'\r\n path[12] = '/home/mastmeyer/Dropbox/GYN Cases/Case 012/NRRD/Auto-Eval-LB/2013-04-22-Scene.mrml'\r\n path[16] = '/home/mastmeyer/Dropbox/GYN Cases/Case 016/NRRD/Auto-Eval-LB/2013-04-21-Scene.mrml'\r\n path[21] = '/home/mastmeyer/Dropbox/GYN Cases/Case 021/NRRD/Auto-Eval-LB/2013-04-21-Scene.mrml'\r\n path[22] = '/home/mastmeyer/Dropbox/GYN Cases/Case 022/NRRD/Auto-Eval-LB/2013-04-21-Scene.mrml'\r\n path[25] = '/home/mastmeyer/Dropbox/GYN Cases/Case 025/NRRD/Auto-Eval-LB/2013-04-21-Scene.mrml'\r\n path[26] = '/home/mastmeyer/Dropbox/GYN Cases/Case 026/NRRD/Auto-Eval-LB/2013-04-17-Scene.mrml'\r\n path[27] = '/home/mastmeyer/Dropbox/GYN Cases/Case 027/NRRD/Auto-Eval-LB/2013-04-17-Scene.mrml'\r\n #stripped MICCAI13 cases (just manual seg. by LB/AM)\r\n if 1:\r\n path[24] = '/home/mastmeyer/Dropbox/GYN Cases/Case 024/NRRD/Auto-Eval-LB/2013-02-28-Scene.mrml'\r\n path[28] = '/home/mastmeyer/Dropbox/GYN Cases/Case 028/NRRD/Auto-Eval-LB/2013-02-28-Scene.mrml'\r\n path[29] = '/home/mastmeyer/Dropbox/GYN Cases/Case 029/NRRD/Auto-Eval-LB/2013-02-26-Scene.mrml'\r\n path[30] = '/home/mastmeyer/Dropbox/GYN Cases/Case 030/NRRD/Auto-Eval-LB/2013-02-26-Scene.mrml'\r\n path[31] = '/home/mastmeyer/Dropbox/GYN Cases/Case 031/NRRD/Auto-Eval-LB/2013-02-27-Scene.mrml'\r\n path[33] = '/home/mastmeyer/Dropbox/GYN Cases/Case 033/NRRD/Auto-Eval-LB/2013-02-27-Scene.mrml'\r\n path[34] = '/home/mastmeyer/Dropbox/GYN Cases/Case 034/NRRD/Auto-Eval-LB/2013-02-27-Scene.mrml'\r\n path[37] = '/home/mastmeyer/Dropbox/GYN Cases/Case 037/NRRD/Manual Alireza/2013-02-27-Scene.mrml'\r\n path[38] = '/home/mastmeyer/Dropbox/GYN Cases/Case 038/NRRD/Manual Alireza/2013-02-27-Scene.mrml'\r\n path[40] = '/home/mastmeyer/Dropbox/GYN Cases/Case 040/NRRD/Manual Alireza/2013-02-27-Scene.mrml'\r\n #show a directory selector for saving the results\r\n self.dirDialog = qt.QFileDialog(w.parent)\r\n self.dirDialog.setDirectory('/tmp')\r\n self.dirDialog.options = self.dirDialog.ShowDirsOnly\r\n self.dirDialog.acceptMode = self.dirDialog.AcceptSave\r\n #self.dirDialog.show()\r\n dir=self.dirDialog.getExistingDirectory()\r\n w.logDir=dir\r\n print \"saving results to \", dir\r\n try: shutil.copyfile('/home/amast/WualaDrive/mastmeyer/Homes/NeedleFinder/NeedleFinder/NeedleFinder.py',dir+'/NeedleFinder_ref.py')\r\n except: breakbox(\"/!\\ reference source NeedleFinder.py not found!\")\r\n if mode == 0:\r\n #save a copy of the source file as reference\r\n # simple run with current parameters/algo over several patients\r\n self.writeTableHeader(dir+'/AP-All_stats.csv')\r\n filLog=open(dir+'/allog.tsv', 'w')\r\n #filLog.write(\"case\\tman.-seg_\\tiStep\\tcrit\\treject\\tvalue\\tlimit\\n\")\r\n filLog.close()\r\n nUsers=1 #CONST\r\n for user in range(nUsers): \r\n w.userNr=user\r\n print \"simulated user (offset): \",user\r\n for id in range(100): #<o> range(100)\r\n if path[id]:\r\n w.caseNr=id\r\n print \"processing \", path[id]\r\n self.writeTableHeader(dir+'/User-'+str(user)+'_AP-' + str(id) + '.csv', 1)\r\n slicer.mrmlScene.Clear(0)\r\n slicer.util.loadScene(path[id])\r\n #TODO implement random tips in a sphere (d=2mm) from tube center \r\n l.startValidation(script=True, offset=user*50/nUsers)\r\n results, outliers = l.evaluate(script=True) # calculate HD distances\r\n for result in results:\r\n result[0:0]=[user,id]\r\n l.exportEvaluation(results, dir+'/User-'+str(user)+'_AP-' + str(id) + '.csv')\r\n #slicer.util.saveScene(dir+'/AP-' + str(id) + '.mrb') # may use lots of disk space\r\n # stats\r\n HD = np.array(results)\r\n # HD.shape = (int(len(results)/float(3)),3)\r\n maxTipHD = HD[:, 2].max()\r\n maxHD = HD[:, 3].max()\r\n avgHD = HD[:, 3].mean()\r\n stdHD = HD[:, 3].std()\r\n sl = np.sort(HD[:, 3])\r\n medHD = sl[sl.size / 2]\r\n resultsEval = [user,id,maxTipHD, maxHD, avgHD, stdHD, medHD]+[len(results)]+[len(outliers)] +[str(outliers)]+ l.valuesExperience + [id]\r\n l.exportEvaluation(resultsEval, dir+'/AP-All_stats.csv')\r\n #pause()\r\n msgbox(\"parSearch mode 0 done, results in \"+dir)\r\n elif mode == 1:\r\n id = 'Current'\r\n # simple brute force search in the dimensions (Guillaumes parameterSearch.py)\r\n self.writeTableHeader(dir+'/BF-' + str(id) + '.csv', 1)\r\n self.writeTableHeader(dir+'/BF-' + str(id) + '_stats.csv')\r\n for i in range(3, 12):\r\n # l.resetNeedleDetection(script=True) # ??? this resets the parameters to default\r\n w.numberOfPointsPerNeedle.setValue(i) # change parameter control points\r\n l.startValidation(script=True)\r\n results, outliers = l.evaluate(script=True) # calculate HD distances\r\n for result in results:\r\n result[0:0]=[user,id]\r\n l.exportEvaluation(results, dir+'/BF-' + str(id) + '.csv')\r\n slicer.util.saveScene(dir+'/BF-' + str(id) + '.mrb') # may use lots of disk space\r\n # stats\r\n HD = np.array(results)\r\n # HD.shape = (int(len(results)/float(3)),3)\r\n maxTipHD = HD[:, 2].max()\r\n maxHD = HD[:, 3].max()\r\n avgHD = HD[:, 3].mean()\r\n stdHD = HD[:, 3].std()\r\n sl = np.sort(HD[:, 3])\r\n medHD = sl[sl.size / 2]\r\n resultsEval = [user,id,maxTipHD,maxHD, avgHD, stdHD, medHD] +[len(results)]+[len(outliers)] +[str(outliers)]+ l.valuesExperience + [id]\r\n l.exportEvaluation(resultsEval, dir+'/BF-' + str(id) + '_stats.csv')\r\n #pause()\r\n msgbox(\"parSearch mode 1 done, results in \"+dir)\r\n elif mode == 2:\r\n # code piece from Guillaumes (bruteForce.py) multi patient mode search\r\n for id in range(100):\r\n if path[id]:\r\n w.caseNr=id\r\n print \"processing \", path[id]\r\n slicer.mrmlScene.Clear(0)\r\n slicer.util.loadScene(path[id])\r\n self.writeTableHeader(dir+'/RS-' + str(id) + '.csv', 1)\r\n self.writeTableHeader(dir+'/RS-' + str(id) + '_stats.csv')\r\n for i in range(1, 10000):\r\n # l.resetNeedleDetection(script=True) # ??? this resets the parameters to default\r\n w.radiusNeedleParameter.setValue(np.random.randint(1, 6))\r\n w.stepsize.setValue(np.random.randint(1, 40))\r\n w.sigmaValue.setValue(np.random.randint(1, 40)) # change parameter sigma\r\n w.gradientPonderation.setValue(np.random.randint(1, 20))\r\n w.exponent.setValue(np.random.randint(1, 20))\r\n w.numberOfPointsPerNeedle.setValue(np.random.randint(3, 11))\r\n l.startValidation(script=True)\r\n results, outliers = l.evaluate(script=True) # calculate HD distances\r\n for result in results:\r\n result[0:0]=[user,id]\r\n l.exportEvaluation(results, dir+'/RS-' + str(id) + '.csv')\r\n slicer.util.saveScene(dir+'/RS-' + str(id) + '.mrb') # may use lots of disk space\r\n # stats\r\n HD = np.array(results)\r\n maxTipHD = HD[:, 2].max()\r\n maxHD = HD[:, 3].max()\r\n avgHD = HD[:, 3].mean()\r\n stdHD = HD[:, 3].std()\r\n sl = np.sort(HD[:, 3])\r\n medHD = sl[sl.size / 2]\r\n resultsEval = [user,id,maxTipHD,maxHD, avgHD, stdHD, medHD] +[len(results)]+[len(outliers)] +[str(outliers)]+ l.valuesExperience + [id]\r\n l.exportEvaluation(resultsEval, dir+'/RS-' + str(id) + '_stats.csv')\r\n # end = time.time()\r\n # print 'processing time: ', end-start\r\n # start = time.time()\r\n #pause()\r\n msgbox(\"parSearch mode 2 done, results in \"+dir)\r\n #rof id\r\n #file mode 2\r\n slicer.mrmlScene.Clear(0) #clean up to save memory\r", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n e = Directions.EAST\n\n result = []\n qu = util.PriorityQueue()\n visited = set([])\n current = (problem.getStartState(), \"\", 0)\n qu.update(current, 0)\n costs = {}\n parents = {}\n parents[problem.getStartState()] = (problem.getStartState(), \"\")\n\n while not qu.isEmpty():\n cost, current = qu.pop()\n visited.add(current[0])\n\n if problem.isGoalState(current[0]):\n result = current[0]\n break\n\n for each in problem.getSuccessors(current[0]):\n if each[0] not in visited:\n qu.update(each, cost + each[2] + heuristic(each[0], problem))\n if each[0] not in costs:\n costs[each[0]] = cost + each[2]\n parents[each[0]] = (current[0], each[1])\n elif costs[each[0]] > cost + each[2] + heuristic(each[0], problem):\n costs[each[0]] = cost + each[2] + heuristic(each[0], problem)\n parents[each[0]] = (current[0], each[1])\n\n path = []\n while parents[result][0] != result:\n path.append(parents[result][1])\n result = parents[result][0]\n\n path.reverse()\n result = []\n for each in path:\n if each == \"South\":\n result.append(s)\n elif each == \"West\":\n result.append(w)\n elif each == \"North\":\n result.append(n)\n elif each == \"East\":\n result.append(e)\n\n return result\n util.raiseNotDefined()\n\n util.raiseNotDefined()", "def url_shortner(self):", "def pyhop(state,agent,verbose=0, all_solutions=False, plantree=True, rand=False):\n tasks = state.goals[agent]\n if verbose>0: print('** pyhop, verbose={}: **\\n state = {}\\n agent={}\\n tasks = {}'\n .format(verbose, state.__name__, agent, tasks))\n \n # At the beginning of planning, reset \"visited\" from the planning world.\n if plantree:\n planTrees = seek_plantrees(state, tasks, None, 0, verbose, rand=rand)\n if planTrees[0] == None:\n return [False]\n # print(\"**** Final PlanNodes: **** \")\n # print(\"\\tall_plans:{}\\n\\trand:{}\".format(all_solutions, rand))\n # print(\"found {} plans:\".format(len(planTrees)))\n # for tree in planTrees:\n # print(\"\\tcost:{}\".format(tree.cost))\n min_cost = min([tree.cost for tree in planTrees])\n to_return = [tree for tree in planTrees if tree.cost == min_cost]\n # print(\"returning {} plantree(s) with min cost {}\".format(len(to_return), min_cost), to_return)\n # print(\"Inspecting plantree: \\n\", to_return)\n\n return to_return\n else:\n results = seek_plan_all(state,tasks,[],0,verbose, all_plans=all_solutions, rand=rand)\n # print(\"**** Final Results: **** \\n{}\".format(results))\n # print(len(results))\n # print(results)\n return results", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n # Priority Queue to hold the node along with the path taken from the start node to reach that node\n pqueue = PriorityQueue()\n #Set to hold the node explored.\n explorednode = set()\n # Get the start node.\n startnode = problem.getStartState()\n # Push the starting node on the Queue along with an empty set to know the direction in order to reach the node.\n pqueue.push((startnode, []), 0)\n\n # Loop till the priority queue is empty\n while pqueue.isEmpty() is not True:\n # Pop the currentnode and the direction from the priority queue\n (currentnode, direction) = pqueue.pop()\n # Check if the currentnode is not in the explored node.\n if currentnode not in explorednode:\n # We will now add the node to set of explored node.\n explorednode.add(currentnode)\n # If the node is the goal. We made it!!\n if problem.isGoalState(currentnode):\n # The direction holds the way to reach till the goal from the start node.\n return direction\n # Loop for each successor(child) of the current node.\n for (successor, action, stepCost) in problem.getSuccessors(currentnode):\n # Add the successor to the queue along with the path to reach it.\n if successor not in explorednode:\n # Add the successor to the queue along with the path to reach it.\n pqueue.push((successor, direction + [action]), problem.getCostOfActions(direction + [action]) + heuristic(successor, problem))\n util.raiseNotDefined()" ]
[ "0.55554616", "0.49509117", "0.49274385", "0.49202466", "0.47717598", "0.4758381", "0.47329575", "0.4687782", "0.46796837", "0.46436214", "0.4615047", "0.46136823", "0.45894393", "0.45886946", "0.4578933", "0.45743513", "0.456499", "0.4564924", "0.45648426", "0.45518708", "0.45362183", "0.45218349", "0.45028758", "0.4502335", "0.44996375", "0.44986585", "0.44975463", "0.44925264", "0.44902766", "0.44902766", "0.44846177", "0.4484214", "0.4476998", "0.4475581", "0.44755724", "0.4473395", "0.44685763", "0.44646803", "0.4462094", "0.44556355", "0.44402522", "0.44234702", "0.44145575", "0.4410577", "0.4398791", "0.43974528", "0.43959305", "0.4394704", "0.43886876", "0.43765172", "0.4375676", "0.4373435", "0.43733394", "0.43719593", "0.43701714", "0.43697432", "0.43686345", "0.43684134", "0.4364032", "0.4361914", "0.4355865", "0.4343231", "0.4337484", "0.43283623", "0.43234822", "0.43232143", "0.43228564", "0.43228564", "0.43212405", "0.43158898", "0.43082345", "0.42988002", "0.42881444", "0.42830878", "0.42817962", "0.42762604", "0.42732668", "0.42724848", "0.4271022", "0.4270999", "0.42697057", "0.42697042", "0.4266862", "0.4263932", "0.42560604", "0.42510515", "0.42490482", "0.42479715", "0.42460164", "0.4242932", "0.4240571", "0.42390463", "0.4236986", "0.42335698", "0.42320222", "0.42318606", "0.42300087", "0.42278907", "0.4225863", "0.42254344" ]
0.6105465
0
Evaluation of the recommended career pathways.
def evaluate_metrics(pathways, debug): __print_msg('Evaluating metrics...', debug) metrics = {} metrics['CareerGoalReached'], metrics['ShorterRecommendedPath'], metrics['UserPathAvgLength'], metrics['RecPathAvgLength'] = metric_path_length(pathways) __print_msg('Career goal reached: {}'.format(metrics['CareerGoalReached']), debug) __print_msg('Recommended path shorter: {}'.format(metrics['ShorterRecommendedPath']), debug) __print_msg('User pathway average length: {}'.format(metrics['UserPathAvgLength']), debug) __print_msg('Recommended pathway average length: {}'.format(metrics['RecPathAvgLength']), debug)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_make_pathways(self):\n basic_test_runner(self, 'pathways')", "def its_because_school(connection):\n print(\"Shortest path between two nodes\")\n answer = connection.execute(connection.get_path, 0, 4)\n for a in answer.values():\n print(a)\n print(\"Centrality closeness\")\n answer = connection.execute(connection.get_closeness, 1, True)\n for a in answer.values():\n print(a)\n print(\"Betweenness centrality\")\n answer = connection.execute(connection.get_betweenness, 2)\n for a in answer.values():\n print(a)\n print(\"Eigenvector\")\n answer = connection.execute(connection.get_eigenvector, 3)\n for a in answer.values():\n print(a)\n print(\"Degree centrality\")\n answer = connection.execute(connection.get_degree_centrality)\n for a in answer.values():\n print(a)", "def evaluate(G, path, home_idxs, verbose=False):\n dropoffs = assign_dropoffs(G, path, home_idxs)\n cost, msg = cost_of_solution(G, path, dropoffs, shortest=all_pairs_dists)\n if verbose:\n print(msg)\n\n return cost", "def IteratePaths(self):\n self.w = self.setwage(self.K, self.N)\n self.r = self.setrate(self.K, self.N)\n self.b = self.benefit(self.N)\n\n a1, aT = [-1,], []\n\n for q in range(self.Nq):\n if q == 0:\n self.apath[-1] = 0.2\n elif q == 1:\n self.apath[-1] = 0.3\n else:\n self.apath[-1] = max(0,aT[-1]-(aT[-1]-aT[-2])*a1[-1]/(a1[-1]-a1[-2]))\n \n self.npath[-1] = 0\n self.cpath[-1] = self.apath[-1]*(1+self.r) + self.b\n\n for y in range(-2,-(self.T+1),-1): # y = -2, -3,..., -60\n self.apath[y], self.npath[y], self.cpath[y] = self.DirectSolve(y)\n\n aT.append(self.apath[-1])\n a1.append(self.apath[-self.T])\n if (fabs(self.apath[-self.T])<self.tol):\n break\n for y in range(-1,-(self.T+1),-1):\n self.upath[y] = self.util(self.cpath[y],self.npath[y])", "def evaluate_distance(self):\n\n fitness = 0\n routes = split_to_routes(self)\n\n for route in routes:\n route = [home] + route + [home]\n for i in range(1,len(route)):\n # Calculates full distance, including from last city\n # to first, to terminate the trip\n pos_from = route[i - 1]\n pos_to = route[i]\n distance = dm[pos_from][pos_to]\n fitness += distance\n\n return int(fitness)", "def optimise(self):\n route = str(sorted(self.heuristic_path))\n\n if route in self.routes:\n saved = TSP.routes[route]\n self.heuristic_path = saved[\"path\"]\n self.heuristic_cost = saved[\"cost\"]\n else:\n self._optimise()\n\n return self.heuristic_path, self.heuristic_cost", "def calculate_path(self):\n #Se repite el ciclo para el número especificado de veces\n for i in range(self.iterations):\n for ant in self.ants:\n ant.setup_ant()\n while not ant.final_node_reached:\n #Seleccion aleatoria del nodo a visitar\n node_to_vist = self.select_next_node(self.map.nodes_array[int(ant.actual_node[0])][int(ant.actual_node[1])])\n #Mover la hormiga al siguiente nodo seleccionado al azar\n ant.move_ant(node_to_visit)\n #Compruebe si se ha alcanzado la solución\n ant.is_final_node_reached()\n #Agregar la ruta resultante a la lista de rutas\n self.add_to_path_results(self.delete_loops(ant.get_visited_nodes()))\n # Habilitar a la hormiga para otra busqueda\n ant.enable_start_new_path()\n \n # Actualizar el nivel global de feromonas\n self.pheromone_update()\n self.best_result = self.paths[0]\n\n #Vaciar la lista de rutas\n self.empty_paths()\n print('Iteration: ', i, 'lenght of the path: ', len(self.best_result))\n return self.best_result", "def heuristic_function(self, node_current: PriorityNode) -> float:\n ########################################################################\n # todo: Implement your own heuristic cost calculation here. #\n # Hint: #\n # Use the State of the current node and the information from the #\n # planning problem, as well as from the scenario. #\n # Some helper functions for your convenience can be found in #\n # ./search_algorithms/base_class.py #\n ########################################################################\n output_logs = False\n if output_logs:\n print(\"##################\")\n print(\"current time step: \", node_current.list_paths[-1][-1].time_step)\n print(\"current problem mode\", self.planningProblemType)\n print(\"depth tree: \", node_current.depth_tree)\n currentorient = node_current.list_paths[-1][-1].orientation\n currentpos = node_current.list_paths[-1][-1].position\n currenttimestep = node_current.list_paths[-1][-1].time_step\n currentVel = node_current.list_paths[-1][-1].velocity\n\n # Test if reached goal:\n if self.reached_goal(node_current.list_paths[-1]):\n return 0.0\n # Test if route planner failed to find a path: \n if self.routeplannerresult is None:\n return np.inf\n\n ############ Detect cars in front:\n # calc cost based on distance to gool following the refrence path:\n # loop through all obstacles at time step x and find if any is close of current pos:\n if not self.disableObstAvoidance:\n for obst in self.list_obstacles:\n obstPos = obst.state_at_time(currenttimestep)\n if currentorient is not None and obstPos is not None:\n disttoobst = self.euclidean_distance(currentpos, obstPos.position)\n lookaheadVar = 1.375 * currentVel + 2.5\n if disttoobst <= lookaheadVar:\n # calc orientation diff between car and obstacle:\n vectorToObst = np.array([currentpos, obstPos.position])\n vectorToObstOrient = self.calc_angle_of_position(vectorToObst, currentpos)\n orientdiff = self.calc_orientation_diff(currentorient, vectorToObstOrient)\n if abs(orientdiff) <= 0.261799:\n if not 'velocity' in obstPos.attributes:\n continue\n if node_current.list_paths[-1][-1].velocity > obstPos.velocity and obstPos.velocity != 0:\n return np.inf\n \n # get index of closest object to the ego vehicle:\n index_smallest_dist = self.get_index_nearest_obst_infront(node_current)\n \n # use the index to locate vehicle to calc cost: \n if index_smallest_dist != -1:\n # found the index of vehicle with smallest distance to ego car:\n obst = self.list_obstacles[index_smallest_dist]\n obstPos = obst.state_at_time(currenttimestep)\n if obstPos is not None and 'velocity' in obstPos.attributes:\n if obstPos.velocity == 0:\n cost = node_current.list_paths[-1][-1].velocity\n return cost\n if node_current.list_paths[-1][-1].velocity > obstPos.velocity:\n return np.inf\n cost = abs(node_current.list_paths[-1][-1].velocity - obstPos.velocity)\n return cost\n #########################################################\n\n # Decide based on planning problem type how to calculate cost\n if self.planningProblemType == 'ModeA':\n # Call function for planning problem with desired time, position, speed and orientation\n cost = self.cost_for_modeA_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeA cost func: \", cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'ModeB':\n # Call function for planning problem with desired time, position and velocity:\n cost = self.cost_for_modeB_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeB cost func: \", cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'ModeC':\n # Call function for planning problem with desired time, position and orientation:\n cost = self.cost_for_modeC_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeB cost func: \", cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'ModeD':\n # Call function for planning problem with desired time and position:\n cost = self.cost_for_modeD_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeB cost func: \", cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'Survival':\n # Call function for planning problem with desired time:\n cost = self.cost_for_Survival_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeB cost func: \", cost)\n if cost < 0:\n return 0\n return cost", "def Option3_routing(self, S, D, L):\n if self.has_path(S, D): \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n Opt_path = Shortest_path\n path_cost_with_concave_function = self.calculate_path_cost_with_concave_function(Shortest_path, 'c1', 'c2')\n return path_cost_with_concave_function, Opt_path\n while len(Shortest_path) != 0:\n path_cost = self.additive_path_cost(Shortest_path, 'w') \n #self.logger.info('Path cost - %d', path_cost)\n if path_cost <= L:\n \"\"\"go to path cost with weighted sum\"\"\"\n path_cost_with_concave_function = self.calculate_path_cost_with_concave_function(Shortest_path, 'c1', 'c2')\n self.G = self.rm_edge_constraint(path_cost_with_concave_function) # remove all links where the concave link is greater than PathConcave_cost \n Opt_path = Shortest_path\n if self.has_path(S, D):\n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w')\n else:\n Shortest_path = [] \n else:\n break \n else:\n self.logger.info('No path from %s to %s', S, D)\n Opt_path = []\n path_cost_with_concave_function = 0\n return path_cost_with_concave_function, Opt_path", "def _cost_route_fine(self):\n return self.fine", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n GhostLocs = successorGameState.getGhostPositions()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n capsuleLocations = successorGameState.getCapsules()\n\n \"*** YOUR CODE HERE ***\"\n \"\"\" factors: proximity to food, proximity to ghosts \n \"\"\" \n if successorGameState.isWin():\n return 10000\n if successorGameState.isLose():\n return -10000\n\n FoodDistances = []\n foodLocations = newFood.asList()\n for food in foodLocations:\n FoodDistances.append(manhattanDistance(newPos,food))\n closestFood = min(FoodDistances)\n closestFoodLocation = foodLocations[FoodDistances.index(closestFood)]\n\n\n GhostsToMe = []\n GhostsToFood = []\n for ghost in GhostLocs:\n GhostsToMe.append(manhattanDistance(newPos,ghost))\n GhostsToFood.append(manhattanDistance(closestFoodLocation,ghost))\n closestGhostToMe = min(GhostsToMe)\n closestGhostToClosestFood = min(GhostsToFood)\n closestGhostLocation = GhostLocs[GhostsToMe.index(closestGhostToMe)] \n Hueristic = 0.0\n if closestGhostToClosestFood < closestFood:\n if closestGhostToMe > 5:\n Hueristic = (1.0/(closestFood+1.0))*20 - len(foodLocations)*10 - (1/closestGhostToMe)*5\n else:\n Hueristic = (-1/closestGhostToMe)*10000\n #Ghost is closer to me than nearest food so avoid ghost\n else:\n Hueristic = (1.0/(closestFood+1.0))*20 - len(foodLocations)*10 - (1/closestGhostToMe)*5\n return Hueristic", "def optimal_route(graph,homes,source):\n number_of_homes = len(homes)\n all_pairs_distances = dict(nx.shortest_path_length(graph, weight = 'weight'))\n all_pairs_shortest_paths = dict(nx.shortest_path(graph, weight = 'weight'))\n homes_subgraph = tsp_routines.complete_shortest_path_subgraph_efficient(graph,homes,all_pairs_distances)\n num_clusters_to_clustering = clustering_routines.all_k_clusters(homes_subgraph,number_of_homes)\n \n cluster_list = range(1,number_of_homes+1)\n optimal_cost = np.Inf\n optimal_dropoffs = dict()\n optimal_route = []\n optimal_num_clusters = 0\n\n\n for num_clusters in cluster_list:\n home_clusters = num_clusters_to_clustering[num_clusters]\n cost, dropoffs, route = solver(graph,homes,source,home_clusters,all_pairs_distances,all_pairs_shortest_paths)\n if cost < optimal_cost:\n optimal_cost = cost\n optimal_route = route \n optimal_dropoffs = dropoffs\n optimal_num_clusters = num_clusters\n\n return optimal_cost, optimal_dropoffs, optimal_route, optimal_num_clusters", "def solve_environment(self):\n \n #The first problem formulation\n #K kinds of towers\n #See more details about problem formulation in the writeup \n \n #Get a full matrix of the concatenated coverage matrices for \n #each tower type. THis new matrix has dimensions:\n #(Ntowers) x (sum(potential sites)), where the sum o=is over all tower types\n coverage = np.hstack(i for i in self.coverage_matrices)\n print coverage\n print coverage.shape \n \n #Diagonal matrix of the values of each target\n #(for the scenarios where we don't care about maximizing covered value,\n #target_values is just all ones, so this is just the identity matrix)\n V = np.diag(self.target_values)\n \n #If doing scenario where we want to fortify weakest link, only makes\n #sense if all targets are equal value:\n if self.objective_type == 'min_entries':\n V = np.eye(len(self.target_values))\n\n #Get the matrix of coverage values / expected value saved:\n C = np.dot(V,coverage)\n print 'V', V\n print 'coverage', coverage\n print 'C', C\n \n \n #Since not gauranteed to reach global optimum on any particular initialization,\n #run a few times and take the best result.\n #Just define \"best result\" as the result which had the most overall \n #\"converged\" x, combined over all tower kinds. \n# for j in xrange(self.N_random_starts_max):\n \n \n a = 2. #1.\n tau = 1e-4\n N = sum(i for i in self.N_tower_sites)\n w = np.zeros(N)\n ones = np.ones(N)\n p = 1. #the exponents power when doing he exponent method:\n \n for i in xrange(self.N_reweighting_iterations_max):\n #The concatenated vector of occupancies: Concatenated over all\n #of the kinds of towers.\n x = cvx.Variable(N)\n \n #Different objective functions depending on which optimization problem.\n #These are defined in the scenarios in the main function.\n if self.objective_type == 'min_entries':\n operation = cvx.min_entries\n elif self.objective_type == 'sum_entries':\n operation = cvx.sum_entries\n else:\n raise Exception('must specify valid objective_type')\n \n #Objective function includes penalty term for non-binary x values\n if self.penalty_type == 'reweighted_L1':\n #objective = cvx.Maximize(t - x.T*w)\n objective = cvx.Maximize(operation(C*x - x.T*w))\n\n\n #Main constraints on 0<=x<=1\n constraints = [0<=x, x<=1]\n \n \n #And then for each kind of tower, append the constraint that there\n #be exactly N_i towers, or <= quota (depending on constraint type)\n if self.constraints__type == 'fixed_N_towers' or self.constraints__type == 'tower_quotas':\n for tk in xrange(self.N_tower_kinds):\n before_sum = np.concatenate(([0],np.cumsum(self.N_tower_sites)))[tk]\n print before_sum\n print before_sum + self.N_tower_sites[tk]\n if self.constraints__type == 'fixed_N_towers':\n constraints.append(cvx.sum_entries(\n x[before_sum : before_sum + self.N_tower_sites[tk]]\n )==self.N_towers[tk])\n elif self.constraints__type == 'tower_quotas':\n constraints.append(cvx.sum_entries(\n x[before_sum : before_sum + self.N_tower_sites[tk]]\n )<=self.budget__tower_quotas[tk])\n print x[before_sum : before_sum + self.N_tower_sites[tk]]\n \n elif self.constraints__type == 'total_cost':\n costs = np.hstack([np.repeat(self.budget__tower_unit_costs[tk],self.N_tower_sites[tk]) for tk in xrange(self.N_tower_kinds)])\n constraints.append(cvx.sum_entries(costs * x) <= self.budget__total_cost) \n \n \n \n\n\n \n \n print 'penalty_type', self.penalty_type\n print 'objective_type', self.objective_type\n print 'constraints__type', self.constraints__type\n print 'budget__tower_quotas', self.budget__tower_quotas\n print 'operation', operation\n print 'objective', objective\n print 'constraints', constraints\n cvx.Problem(objective, constraints).solve(verbose=self.VERBOSE)\n x = np.array(x.value).flatten()\n print 'x', x\n w = a/(tau+np.abs(x))\n p += 1.\n plt.figure(figsize=(5,5))\n plt.plot(x,marker='o')\n plt.savefig('histrograms_{}.png'.format(i))\n print \n \n \n \n \n #From the solution x, get the coordinates of those tower sites where we\n #really do want to place a tower\n #use = np.isclose(x,1.)\n for tk in xrange(self.N_tower_kinds):\n before_sum = np.concatenate(([0],np.cumsum(self.N_tower_sites)))[tk]\n y = x[before_sum : before_sum + self.N_tower_sites[tk]]\n inds = np.argsort(y)\n s = y[inds]\n use = np.where(s>.5)[0]\n print inds\n print s\n print use \n if self.constraints__type == 'fixed_N_towers':\n if len(use) != self.N_towers[tk]:\n print 'Solution did not converge properly. Choosing the K best towers.'\n print self.N_towers[tk], len(use)\n # use = use[-self.N_towers[tk]:]\n use = inds[-self.N_towers[tk]:]\n elif self.constraints__type == 'tower_quotas':\n pass #Just use the towers thresholded at > .5\n print use\n \n \n self.coordinates__solved_towers.append([self.coordinates__tower_sites[tk][mm] for mm in inds[use]])", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n pacmanPos = currentGameState.getPacmanPosition()\n\n food = currentGameState.getFood()\n capsules = currentGameState.getCapsules()\n return currentGameState.getScore() - 10 * capsuleDistancePlan(pacmanPos, capsules) - foodDistPlan(pacmanPos, food)", "def consolidation_heuristics(to_print = False):\n # Instantiate the data problem.\n data = create_data_model()\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['depot'])\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n # Create and register a transit callback.\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n def pending_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['post'][to_node]\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n pending_callback_index = routing.RegisterTransitCallback(pending_callback)\n # Define cost of each arc.\n for i in range(data['num_vehicles']-1):\n routing.SetArcCostEvaluatorOfVehicle(transit_callback_index, i) #Transit cost\n routing.SetFixedCostOfVehicle(data['fixed_cost'], i) #Fixed cost\n routing.SetArcCostEvaluatorOfVehicle(pending_callback_index, data['num_vehicles']-1) #Postponement and/or NonService cost\n # Add Capacity constraint.\n def demand_callback(from_index): #\n \"\"\"Returns the demand of the node.\"\"\"\n # Convert from routing variable Index to demands NodeIndex.\n from_node = manager.IndexToNode(from_index) \n return data['demands'][from_node]\n demand_callback_index = routing.RegisterUnaryTransitCallback(\n demand_callback)\n routing.AddDimensionWithVehicleCapacity(\n demand_callback_index,\n 0, # null capacity slack\n data['vehicle_capacities'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Capacity')\n # Add time constraint.\n def time_callback(from_index,to_index): #\n \"\"\"Returns the demand of the node.\"\"\"\n # Convert from routing variable Index to NodeIndex in time\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return time_matrix[from_node][to_node] \n time_callback_index = routing.RegisterTransitCallback(time_callback) \n routing.AddDimensionWithVehicleCapacity(\n time_callback_index,\n 0, # null capacity slack\n data['time_capacities'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Time')\n # Setting solution heuristic-procedure.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.time_limit.seconds = 5 #10 # 60 #20 #3000\n search_parameters.log_search = True\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n # Print solution on console.\n if assignment:\n sent, sol_results, routes_results = print_solution(data, manager, routing, assignment) \n return sent, sol_results, routes_results", "def search(self):\n if self.pruning == 'mpp':\n while not self.frontier.empty():\n path = self.frontier.pop()\n if path.end() not in self.explored:\n self.display(2, \"Expanding:\", path,\n \"(cost:\", path.cost, \")\")\n self.explored.add(path.end())\n self.num_expanded += 1\n if self.problem.is_goal(path.end()):\n self.display(1, self.num_expanded, \"paths have been expanded and\",\n len(self.frontier.frontierpq), \"paths remain in the frontier\")\n return path\n else:\n neighs = self.problem.neighbors(path.end())\n for arc in neighs:\n self.add_to_frontier(Path(path, arc))\n self.display(3, \"Frontier:\", self.frontier)\n elif self.pruning == 'cycle':\n while not self.frontier.empty():\n path = self.frontier.pop()\n if path.end() not in path.initial_nodes(): # new part for cycle pruning\n self.display(2, \"Expanding:\", path,\n \"(cost:\", path.cost, \")\")\n self.num_expanded += 1\n if self.problem.is_goal(path.end()):\n self.display(1, self.num_expanded, \"paths have been expanded and\",\n len(self.frontier.frontierpq), \"paths remain in the frontier\")\n return path\n else:\n neighs = self.problem.neighbors(path.end())\n for arc in neighs:\n self.add_to_frontier(Path(path, arc))\n self.display(3, \"Frontier:\", self.frontier)\n\n else: # no pruning\n while not self.frontier.empty() and self.num_expanded < self.max_expanded:\n path = self.frontier.pop()\n self.display(2, \"Expanding:\", path, \"(cost:\", path.cost, \")\")\n self.num_expanded += 1\n if self.problem.is_goal(path.end()):\n self.display(1, self.num_expanded, \"paths have been expanded and\",\n len(self.frontier.frontierpq), \"paths remain in the frontier\")\n return path\n else:\n neighs = self.problem.neighbors(path.end())\n for arc in neighs:\n self.add_to_frontier(Path(path, arc))\n self.display(3, \"Frontier:\", self.frontier)\n\n self.display(1, \"Total of\", self.frontier.frontier_index,\n \"paths expanded.\")", "def test_5_correctness(self):\n # TermTestState (see below) is designed so that the first solution found\n # has a higher cost than the second solution.\n # Start in stateindex 1 and look for path to index 0.\n plan = list(astar(TermTestState(),\n lambda state: (state.state == 0), # goal test\n TermTestState.TermTestH)) # function: distance to goal\n\n correct = [Action(\"1\", \"3\", cost=1.0),\n Action(\"3\", \"4\", cost=0.5),\n Action(\"4\", \"5\", cost=0.5),\n Action(\"5\", \"6\", cost=0.5),\n Action(\"6\", \"G\", cost=0.5)]\n \n cost = sum(p.cost for p in plan)\n c_cost = sum(c.cost for c in correct)\n # Check cost\n self.assertEqual(cost, c_cost,\n f\"Correct cost {c_cost}, your plan cost: {cost}. Check so you return the best solution, and not only the first one found if you have too high cost. Check so you return the full path if too low.\"\n )\n # Check path in general.\n self.assertTrue(len(plan) == len(correct) and all(p == c for p,c in zip(plan,correct)),\n f\"Correct plan: {correct}; Your plan: {plan}; Make sure that the plan isn't e.g. reversed.\")", "def savings_algorithm(self):\n self.generate_trivial_tours() # generate trivial solution\n while True: # endless loop\n maxSavings = 0 # values for best savings decision\n bestr1 = None\n bestr2 = None\n for r1 in self.routes: # loop through all route combinations\n for r2 in self.routes:\n if r1 != r2:\n currentSavings = self.savings2routes(r1,r2)\n if currentSavings > maxSavings: # if the savings are greater than the so far best savings\n bestr1 = r1 # store the routes and the savings value\n bestr2 = r2\n maxSavings = currentSavings\n if (bestr1 == None): # if no savings or no feasible joins exist break out of the loop\n break\n newRoute = VRP_Route(bestr1.route+bestr2.route) # generate new route and delete old routes\n self.routes.remove(bestr1)\n self.routes.remove(bestr2)\n self.routes.append(newRoute)\n self.get_objective()\n return self.objective", "def solve_tsp(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n drop_off_dict = {}\n car_path = []\n home_map = {}\n home_indexes = convert_locations_to_indices(list_of_homes, list_of_locations)\n\n start = list_of_locations.index(starting_car_location)\n graph, msg = adjacency_matrix_to_graph(adjacency_matrix)\n all_paths = dict(nx.all_pairs_dijkstra(graph))\n\n start_in_home = start in home_indexes\n if start in home_indexes:\n home_indexes.remove(start)\n home_indexes.insert(0, start)\n home_count = 0;\n\n for home in home_indexes:\n #print(home, end = \" \")\n home_map[home_count] = home\n home_count += 1\n # Instantiate the data problem.\n #print(len(home_map))\n data = create_data_model(home_indexes, 0)\n\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['locations']),\n data['num_vehicles'], data['depot'])\n\n #print(manager.NodeToIndex(15))\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n #print(home_map[to_index], end = \" \")\n from_index = manager.IndexToNode(from_index)\n to_index = manager.IndexToNode(to_index)\n dist_to = all_paths.get(home_map[from_index])[0][home_map[to_index]]\n #if from_index >= 25 or to_index >= 25:\n # print(\"from\" if from_index >= 25 else \"to\", end = \" \")\n #dist_to = all_paths[from_index][0][to_index]\n return dist_to\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic.\n \"\"\"\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n \"\"\"\n\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.time_limit.seconds = 3\n #search_parameters.log_search = True\n\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n\n # if assignment:\n # print_solution(manager, routing, assignment)\n # Print solution on console.\n\n if start in home_indexes:\n drop_off_dict[start] = [start]\n\n\n index = routing.Start(0)\n car_path.append(start)\n\n while not routing.IsEnd(index):\n previous_index = manager.IndexToNode(index)\n index = assignment.Value(routing.NextVar(index))\n\n car_path.pop();\n to_index = manager.IndexToNode(index)\n path_to = all_paths.get(home_map[previous_index])[1][home_map[to_index]]\n drop_off_dict[home_map[to_index]] = [home_map[to_index]]\n #print(to_index, end = ' ')\n car_path.extend(path_to)\n #route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n # for i in car_path:\n # print(i)\n if start in drop_off_dict.keys() and not start_in_home:\n drop_off_dict.pop(start, None)\n\n return car_path, drop_off_dict", "def Option2_routing(self, S, D, L):\n if self.has_path(S, D): \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n Opt_path = Shortest_path\n path_cost_with_weighted_sum = self.calculate_path_cost_with_weighted_sum(Shortest_path, 'c1', 'c2')\n return path_cost_with_weighted_sum, Opt_path\n\n while len(Shortest_path) != 0:\n path_cost = self.additive_path_cost(Shortest_path, 'w') \n #self.logger.info('Path cost - %d', path_cost)\n if path_cost <= L:\n \"\"\"go to path cost with weighted sum\"\"\"\n path_cost_with_weighted_sum = self.calculate_path_cost_with_weighted_sum(Shortest_path, 'c1', 'c2')\n self.G = self.rm_edge_constraint(path_cost_with_weighted_sum) # remove all links where the concave link is greater than PathConcave_cost \n Opt_path = Shortest_path\n if self.has_path(S, D):\n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w')\n else:\n Shortest_path = [] \n else:\n break \n else:\n self.logger.info('No path from %s to %s', S, D)\n Opt_path = []\n path_cost_with_weighted_sum = 0\n return path_cost_with_weighted_sum, Opt_path", "def road_multiplier(highway, bicycle_pref, motorway_pref, highway_pref, residential_pref):\n if highway == 'motorway':\n result = -motorway_pref*0.5\n elif highway == 'primary':\n result = -highway_pref*0.5\n elif highway == 'residential' or highway == 'service':\n result = -residential_pref*0.5\n elif highway == 'cycleway':\n result = -bicycle_pref * 0.8\n else:\n result = 0\n return result", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newCapsules = successorGameState.getCapsules()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n # Ghost Heuristic\n food = 0\n dist_pm_g = self.distGhosts(newGhostStates, successorGameState, newPos)\n\n #if food in this state\n if (currentGameState.hasFood(newPos[0], newPos[1])):\n food = 5\n #if capsule in this state\n elif (newPos in currentGameState.getCapsules()):\n return 1000\n #distance to the closest food\n d = self.distClosestFood(newPos, newFood)\n #distance to the closest capsule\n c = self.distClosestCap(newPos, newCapsules)\n #if the ghosts are scared\n if (sum(newScaredTimes) > 1):\n return food + d\n elif (d < 2 and 5 < dist_pm_g):\n return food + d\n elif (d > 2 and 6 < dist_pm_g):\n return food + d * 3\n else:\n return dist_pm_g + (food + d + c)", "def kPaths(veh, currentEdge):\n # A set of all of the edges, used to reset the vehicle's internal estimated edge store\n edgesSet = set()\n k = 1\n # This is a fail safe in case there are less paths than K_MAX available for the vehicle to take\n timeOut = 0\n\n # Finding the best possible route for the vehicle\n traci.vehicle.rerouteTraveltime(veh, currentTravelTimes=False)\n\n # The vehicle's current route\n bestRoute = traci.vehicle.getRoute(veh)\n # Element of the current edge within the currentRoute\n currentEdgeIndex = bestRoute.index(currentEdge)\n # Altered route with the first element of the route being the current edge\n currentRoute = bestRoute[currentEdgeIndex:]\n edgesSet.update(currentRoute)\n\n # Recording down the current best route and time\n bestTime = sim.getGlobalRoutePathTime(currentRoute)\n routes = {}\n routes['{}_best'.format(k)] = (bestTime, currentRoute,)\n\n # This records the estimated travel time for each road segment in which the vehicle may take (with or without\n # penalisation applied) -- this is done on and reset on a vehicle-per-vehicle basis\n adjustedEdgeVehicle = {}\n for edge in currentRoute:\n adjustedEdgeVehicle[edge] = edgeSpeedGlobal[edge]\n\n # Creating up to k-1 additional routes\n while k < K_MAX:\n penalisePathTimeVehicle(veh, currentRoute, adjustedEdgeVehicle)\n\n traci.vehicle.rerouteTraveltime(veh, currentTravelTimes=False)\n newRoute = traci.vehicle.getRoute(veh)\n currentRoute = newRoute[currentEdgeIndex:]\n newRouteTime = sim.getGlobalRoutePathTime(currentRoute)\n\n # These are the routes which have already been selected\n currentEligibleRoutes = [x[1] for x in routes.values()]\n # Ensuring the route doesn't exist within the existing eligible routes, the route contains the edge in which the\n # vehicle is currently occupying, and the route is not currently the best route\n if currentRoute not in currentEligibleRoutes and currentEdge in currentRoute:\n timeOut = 0\n # This keeps track if the calculated 'best' route time is above that of the calculated new route time\n bestRouteMoreThanNewRouteTime = False\n\n \"\"\"\n Sometimes the roads suffer so much congestion that there are issues with reliable estimation of travel\n times given by TraCI. TraCI may sometimes give overblown estimations of the travel times of an edge, for\n example if the edge is being blocked and no movement is being made, the estimated time may be \n disproportionally large compared to it's real-world equivalent predicted time taken; this problem persists\n even with a rerouting device being connected to the vehicles which allows for more accurate 'smoothed' \n aggregated travel times- this is a fault inherent to TraCI. \n \n In an attempt to alleviate this, the estimated travel times are bounded to 15x their free-flow speed. \n However, this sometimes causes the best time to no longer be the best time depending on the number of \n edge travel time boundings in a route (which could alter the predicted time for a route given we are \n now estimating some of the edge times). \n \n Given this, we instead work out the ratio between the best travel time and the currentRoute travel time, \n we multiply this ratio against the best travel time to give a better, more accurate estimation of the \n currentRoute's travel time.\n \"\"\"\n if newRouteTime < bestTime:\n # These are the predicted route times which are given directly from TraCI\n bestTimeGivenByTraci = 0\n newRouteTimeGivenByTraci = 0\n\n # These are the smoothed travel times which are generated through the vehicle's individual rerouting\n # device\n smoothedBestTime = 0\n smoothedNewTime = 0\n\n # Times for the best route\n for edge in routes['1_best'][1]:\n bestTimeGivenByTraci += traci.edge.getTraveltime(edge)\n smoothedBestTime += float(traci.vehicle.getParameter(veh, \"device.rerouting.edge:{}\".format(edge)))\n\n # Times for the new route\n for edge in currentRoute:\n newRouteTimeGivenByTraci += traci.edge.getTraveltime(edge)\n smoothedNewTime += float(traci.vehicle.getParameter(veh, \"device.rerouting.edge:{}\".format(edge)))\n\n traciRatio = newRouteTimeGivenByTraci / bestTimeGivenByTraci\n smoothedRatio = smoothedNewTime / smoothedBestTime\n\n \"\"\"\n In extremely rare cases, TraCI can erroneously return an incorrect edge travel time which means\n that the 'best' travel time may not actually be the best when taking these estimated travel time\n measurements; this can result in ratios < 1.\n \"\"\"\n if traciRatio < 1 and smoothedRatio < 1:\n bestRouteMoreThanNewRouteTime = True\n\n # Add the new time to the list so that it can be determined whether or not the existing times can\n # exist given this new best time (with boundaries in mind)\n routes['{}_best'.format(k + 1)] = (newRouteTime, currentRoute,)\n\n # All of the route and time pair combinations\n tupleList = []\n for key in routes:\n timeRouteTuple = routes[key]\n tupleList.append(timeRouteTuple)\n\n # Sort the tuples with the lowest time being at index 0, with the longest being at index k-1\n sortedTuples = sorted(tupleList, key=lambda x: x[0])\n\n # Placing the contents of the sortedTuple into routes\n counter = 0\n for key in routes:\n routes[key] = sortedTuples[counter]\n counter += 1\n\n # Best time will now be in the first position\n bestTime = routes['1_best'][0]\n\n # Delete any routes which don't conform to <= new best time *\n for key in deepcopy(routes):\n if routes[key][0] >= bestTime * KPATH_MAX_ALLOWED_TIME:\n del routes[key]\n\n # Resetting k depending on how many elements are left after removal\n k = len(routes)\n else:\n # This takes the most accurate ratio (which is deemed to be the ratio which is closest to 1)\n traciRatio = min([traciRatio, smoothedRatio], key=lambda v: abs(v - 1))\n # Work out the new, more accurate currentRoute travel time based on this ratio\n newRouteTime = bestTime * traciRatio\n\n # New route's estimated time doesn't exceed bestTime*KPATH_MAX_ALLOWED_TIME of the optimal route time\n if newRouteTime <= bestTime*KPATH_MAX_ALLOWED_TIME and not bestRouteMoreThanNewRouteTime:\n k += 1\n for edge in currentRoute:\n if edge not in adjustedEdgeVehicle:\n adjustedEdgeVehicle[edge] = edgeSpeedGlobal[edge]\n edgesSet.update(currentRoute)\n routes['{}_best'.format(k)] = (newRouteTime, currentRoute,)\n else:\n break\n else:\n timeOut += 1\n # Time out limit exceeded\n if timeOut == KPATH_TIMEOUT:\n break\n\n # Selecting a random route\n ranNum = random.randint(1, len(routes))\n\n routeChoice = routes['{}_best'.format(ranNum)]\n\n # Setting the additional (estimated) extra time in which the vehicle has taken due to reroutings\n routeChoiceTimeTaken = routeChoice[0]\n bestChoiceTimeTaken = routes['1_best'][0]\n extraTime = routeChoiceTimeTaken - bestChoiceTimeTaken\n cumulativeExtraTime[veh] += abs(extraTime)\n\n traci.vehicle.setRoute(veh, routeChoice[1])\n\n resetVehicleAdaptedTravelTime(veh, edgesSet)\n\n # These are the routes which were available to be selected\n routeList = [x[1] for x in routes.values()]\n\n return routeList", "def evaluateAllRroutes(self):\n isTrain = 1 # 1 for train, 0 for test\n\n performance = 0\n normalizedPerformance = 0\n priceTolerance = 5 # price to be tolerated\n\n normPerforms = []\n for i in range(8):\n print \"Route: {}\".format(i)\n [perfor, normaPerfor] = self.evaluateOneRouteForMultipleTimes(self.routes[i], priceTolerance)\n normPerforms.append(normaPerfor)\n performance += perfor\n normalizedPerformance += normaPerfor\n\n performance = round(performance/8, 2)\n normalizedPerformance = round(normalizedPerformance/8, 2)\n\n if self.isTrain:\n print \"\\nTRAIN:\"\n else:\n print \"\\nTEST:\"\n print \"Average Performance: {}%\".format(performance)\n print \"Average Normalized Performance: {}%\".format(normalizedPerformance)\n print \"Normalized Performance Variance: {}\".format(np.var(normPerforms))", "def main():\r\n # Instantiate the data problem.\r\n data = create_data_model()\r\n\r\n # Create the routing index manager.\r\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']), data['num_vehicles'], data['depot'])\r\n\r\n # Create Routing Model.\r\n routing = pywrapcp.RoutingModel(manager)\r\n\r\n\r\n # Create and register a transit callback.\r\n def distance_callback(from_index, to_index):\r\n \"\"\"Returns the distance between the two nodes.\"\"\"\r\n # Convert from routing variable Index to distance matrix NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n to_node = manager.IndexToNode(to_index)\r\n return data['distance_matrix'][from_node][to_node]\r\n\r\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\r\n\r\n # Define cost of each arc.\r\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\r\n\r\n\r\n # Add Capacity constraint.\r\n def demand_callback(from_index):\r\n \"\"\"Returns the demand of the node.\"\"\"\r\n # Convert from routing variable Index to demands NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n return data['demands'][from_node]\r\n\r\n demand_callback_index = routing.RegisterUnaryTransitCallback(\r\n demand_callback)\r\n routing.AddDimensionWithVehicleCapacity(\r\n demand_callback_index,\r\n 0, # null capacity slack\r\n data['vehicle_capacities'], # vehicle maximum capacities\r\n True, # start cumul to zero\r\n 'Capacity')\r\n\r\n # Setting first solution heuristic.\r\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\r\n search_parameters.first_solution_strategy = (\r\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\r\n\r\n\r\n # Solve the problem.\r\n assignment = routing.SolveWithParameters(search_parameters)\r\n\r\n # Print solution on console.\r\n if assignment:\r\n print_solution(data, manager, routing, assignment)", "def heuristics(course, suggestedPlan, user):\n score = course.score\n bonus = 0\n return score + bonus", "def test_make_pathways_with_coverage_min(self):\n basic_test_runner(self, 'pathways', coverage_min=0.5)", "def solver(graph,homes,source,home_clusters,all_pairs_distances,all_pairs_shortest_paths):\n\n car_path = [get_car_path(graph,home_clusters,source,all_pairs_distances,all_pairs_shortest_paths, \n source_in_clusters = B1, christofides = B2) for B1 in [False,True] for B2 in [False,True]]\n\n dropoffs = [cluster_solver_utils.nearest_dropoff_efficient(graph,path,homes,all_pairs_distances) for path in car_path]\n cost = [cluster_solver_utils.eval_cost_efficient(graph,car_path[i],dropoffs[i],all_pairs_distances) for i in range(len(car_path))]\n\n minimum_cost = min(cost)\n idx = cost.index(minimum_cost)\n\n return minimum_cost, dropoffs[idx], car_path[idx]", "def CalculateChebyPaths(self):\n Kmin, Kmax = self.Kmin, self.Kmax\n self.apath = array([0 for y in range(self.T)], dtype=float)\n self.cpath = array([0 for y in range(self.T)], dtype=float)\n self.npath = array([0 for y in range(self.T)], dtype=float)\n # generate each generation's asset, consumption and labor supply forward\n for y in range(self.T-1): # y = 0, 1,..., 58\n self.cpath[y] = self.chebeval(array([self.apath[y]]),self.ac[y],Kmin,Kmax)\n # if self.cpath[y] < 0:\n # self.cpath[y] = 0\n if y >= self.W:\n income = self.b\n else:\n self.npath[y] = self.chebeval(array([self.apath[y]]),self.an[y],Kmin,Kmax)\n income = (1-self.tau)*self.w*self.npath[y]\n self.apath[y+1] = (1+self.r)*self.apath[y] + income - self.cpath[y]\n self.upath[y] = self.util(self.cpath[y], self.npath[y])\n # the oldest generation's consumption and labor supply\n self.cpath[self.T-1] = (1+self.r)*self.apath[self.T-1] + self.b\n # self.cpath[self.T-1] = self.chebeval(array([self.apath[self.T-1]]),self.ac[self.T-1],Kmin,Kmax)\n self.upath[self.T-1] = self.util(self.cpath[self.T-1], self.npath[self.T-1])\n # print self.cpath, self.apath, self.npath", "def path_cost(self, c, state1, action, state2):\n # This should probably just be 1 every state....\n return c + 1", "def path_cost(self, c, state1, action, state2):\n # This should probably just be 1 every state....\n return c + 1", "def PathEnzReg(dic,organism):\n\t#calculation\n\tppcorrp = [0] * (len(dic.plst)*len(dic.plst))\n\tppcorrn = [0] * (len(dic.plst)*len(dic.plst))\n\tfor e in dic.elst:\n\t\tcs = e.cs[0]\n\t\tr = e.rct\n\t\tif e.activation == None:\n\t\t\tcontinue\n\t\tfor (i,p1) in enumerate(dic.plst):\n\t\t\tfor (j,p2) in enumerate(dic.plst):\n\t\t\t\tif cs in p1.sclst and r not in p1.srlst and cs not in p2.sclst and r in p2.srlst and p1 not in p2.splst and p2 not in p1.splst and p1 != p2:\n\t\t\t\t\tif e.activation == True:\n\t\t\t\t\t\tppcorrp[i*len(dic.plst)+j] += 1\n\t\t\t\t\telif e.activation == False:\n\t\t\t\t\t\tppcorrn[i*len(dic.plst)+j] += 1\n\n\t\n\t#output\n\twith open('./results/'+organism+'/logfiles/logpath.log','w') as out:\n\t\tout.write('#List of the XX couples of most interactive pathways:\\n#those pathways that have the biggest number of enzymes (EITHER + OR -) going exclusively from one to the other.\\n#FORMAT:\\n#number of interactions from p1 to p2 (from p2 to p1) [from p1 to p2 with opposite sign, from p2 to p1 with opposite sign]\\n#p1 name \\t p2 name\\n\\nACTIVATION:\\n')\n\t\tppcorrtp = ppcorrp[:]\n\t\tppcorrtn = ppcorrn[:]\n\t\tfor i in range(20):\n\t\t\tj = ppcorrp.index(max(ppcorrp))\n\t\t\t(m,n) = ((j-j%len(dic.plst))/len(dic.plst),j%len(dic.plst))\n\t\t\t(p1,p2) = (dic.plst[m],dic.plst[n])\n\t\t\tout.write(p1.name+'\\t'+p2.name+'\\n'+str(ppcorrtp[j])+'\\t('+str(ppcorrtp[n*len(dic.plst)+m])+')\\t'+str([ppcorrtn[j],ppcorrtn[n*len(dic.plst)+m]])+'\\n')\n\t\t\tppcorrp[m*len(dic.plst)+n] = 0\n\t\t\tppcorrp[n*len(dic.plst)+m] = 0\n\n\n\t\tout.write('\\n-------------------------------------------------------------------------------------\\n\\nINHIBITION:\\n')\n\t\tfor i in range(20):\n\t\t\tj = ppcorrn.index(max(ppcorrn))\n\t\t\t(m,n) = ((j-j%len(dic.plst))/len(dic.plst),j%len(dic.plst))\n\t\t\t(p1,p2) = (dic.plst[m],dic.plst[n])\n\t\t\tout.write(p1.name+'\\t'+p2.name+'\\n'+str(ppcorrtn[j])+'\\t('+str(ppcorrtn[n*len(dic.plst)+m])+')\\t'+str([ppcorrtp[j],ppcorrtp[n*len(dic.plst)+m]])+'\\n')\n\t\t\tppcorrn[m*len(dic.plst)+n] = 0\n\t\t\tppcorrn[n*len(dic.plst)+m] = 0", "def get_context_route_condition(self, pathology_choose,\n way_choose, pathologies, ways):\n essentials_oils = 1\n if way_choose.name == \"orale\":\n essentials_oils = EssentialOil.objects.filter(\n pathology__name=pathology_choose.name).filter(\n way__name=way_choose.name)[0:2]\n\n vegetable_oil = NeutralProduct.objects.get(name=\"miel\")\n protocole = MethodOfUse.objects.get(name=\"orale\")\n\n elif way_choose.name == \"bain\":\n essentials_oils = EssentialOil.objects.filter(\n pathology__name=pathology_choose.name).filter(\n way__name=way_choose.name)[0:1]\n\n vegetable_oil = NeutralProduct.objects.get(name=\"gel douche\")\n protocole = MethodOfUse.objects.get(name=\"bain\")\n\n elif way_choose.name == \"diffusion\":\n essentials_oils = EssentialOil.objects.filter(\n pathology__name=pathology_choose.name).filter(\n way__name=way_choose.name)\n\n vegetable_oil = NeutralProduct.objects.get(name=\"alcool\")\n protocole = MethodOfUse.objects.get(name=\"diffusion\")\n\n elif way_choose.name == \"Inhalation\":\n essentials_oils = EssentialOil.objects.filter(\n pathology__name=pathology_choose.name).filter(\n way__name=way_choose.name)\n\n vegetable_oil = NeutralProduct.objects.get(name=\"bol d'eau\")\n protocole = MethodOfUse.objects.get(name=\"inhalation\")\n\n elif way_choose.name == \"cutanée\":\n essentials_oils = EssentialOil.objects.filter(\n pathology__name=pathology_choose.name).filter(\n way__name=way_choose.name)\n\n vegetable_oil = pathology_choose.vegetable_oil\n\n if pathology_choose.zone == \"general\":\n protocole = MethodOfUse.objects.get(\n name=\"cutanée générale\")\n else:\n protocole = MethodOfUse.objects.get(name=\"cutanée\")\n\n number_he = essentials_oils.count()\n amount = Recipe.objects.filter(\n way__name=way_choose.name).get(number_he=number_he)\n sides_effects = SideEffect.objects.filter(\n essential_oil__in=essentials_oils).distinct()\n contraindication = Contraindication.objects.filter(\n essential_oil__in=essentials_oils).distinct()\n\n context = {\n \"pathologies\": pathologies,\n \"pathology_choose\": pathology_choose,\n \"essentials_oils\": essentials_oils,\n \"vegetable_oil\": vegetable_oil,\n \"way_choose\": way_choose,\n \"ways\": ways,\n \"amount\": amount,\n \"protocole\": protocole,\n \"sides_effects\": sides_effects,\n \"contraindications\": contraindication,\n }\n\n return context", "def heuristic(self):\n game_score = (self.get_game_score(), 0.85)\n road_score = (self.get_longest_road_score(), 0.05)\n steps_score = (self.get_steps_available_score(), 0.05)\n reachable_nodes_score = (self.get_reachable_nodes_score(), 0.05)\n heuristics = [game_score, road_score, steps_score, reachable_nodes_score]\n result = 0\n for score, weight in heuristics:\n result += score * weight\n if DEBUG_PRINT:\n print(f\"Heuristic value for location {self.loc} is {result}\")\n print(f\"\\treachable score: {reachable_nodes_score[0] * reachable_nodes_score[1]}\")\n print(f\"\\tsteps score: {steps_score[0] * steps_score[1]}\")\n print(f\"\\tlongest road score: {road_score[0] * road_score[1]}\")\n print(f\"\\tgame score: {game_score[0] * game_score[1]}\")\n return result", "def solve(\n self,\n initial_routes=None,\n solver=\"cbc\",\n cspy=False,\n exact=True,\n pricing_strategy=\"PrunePaths\",\n ):\n if cspy:\n self.G.graph[\"subproblem\"] = \"cspy\"\n else:\n self.G.graph[\"subproblem\"] = \"lp\"\n print(self.G.graph[\"name\"], self.G.graph[\"subproblem\"])\n print(\"===========\")\n prob = VehicleRoutingProblem(\n self.G,\n duration=self.max_duration,\n load_capacity=self.max_load,\n drop_penalty=self.penalty,\n pickup_delivery=self.activate_pickup_delivery,\n distribution_collection=self.activate_distribution_collection,\n time_windows=self.activate_time_windows,\n )\n prob.solve(\n initial_routes=initial_routes,\n cspy=cspy,\n exact=exact,\n pricing_strategy=pricing_strategy,\n solver=solver,\n )\n self.best_value, self.best_routes = prob.best_value, prob._best_routes_as_graphs\n self.best_routes_nodes = prob.best_routes", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n newGhostPoses = [ghostState.getPosition() for ghostState in newGhostStates]\n\n numberOfFood = successorGameState.getNumFood()\n\n if numberOfFood == 0:\n return 1000\n\n closestFoodDistance = min([manhattanDistance(newPos, foodPos) for foodPos in self.gridToList(successorGameState.getFood())])\n\n stopPoint = 0\n if action == Directions.STOP:\n stopPoint = 20\n\n closestGhostDistance = min([manhattanDistance(newPos, ghostPosition) for ghostPosition in successorGameState.getGhostPositions()])\n\n if newPos in newGhostPoses:\n return -1000\n\n return - closestFoodDistance + 3*math.sqrt(closestGhostDistance) + 1.3*successorGameState.getScore()", "def _calculate_costs(self):\n cost = 0\n cost += self._cost_route_fine()\n cost += self._cost_petrol()\n cost += self._cost_wage()\n cost += self._cost_refueling()\n cost += self._cost_caught_by_police()\n cost += self._cost_vehicle_malfunction()\n return cost", "def change_way(coins, opponentLocation, player_location):\n global best_weight, best_path\n dist_matrix, route_matrix = u.update_dists_from_each(dists_matrix, routes_matrix, player_location, mazeMap, coins)\n coins_to_search = get_n_shortest(5, coins, player_location, dists_matrix)\n ennemy_dists = algo.dijkstra(mazeMap, opponentLocation)\n for c in coins_to_search:\n if len(coins_to_search) >= 2 and ennemy_dists[1][c] < dists_matrix[player_location][c]:\n coins_to_search.remove(c)\n break\n best_weight = float(\"inf\")\n best_path = []\n api.debug(coins_to_search)\n exhaustive(coins_to_search, player_location, [], 0, dist_matrix)\n meta_route = [player_location] + best_path\n api.debug(meta_route)\n route = u.location_list_to_route(meta_route, route_matrix)\n \n return coins_to_search, meta_route, route, dist_matrix[player_location][meta_route[1]]", "def choose_paths_without_simulation(FT, output_dir, iteration, pathset_paths_df, pathset_links_df, veh_trips_df):\n simulation_iteration = 0\n ######################################################################################################\n FastTripsLogger.info(\" Step 1. Find out board/alight times for all pathset links from vehicle times\")\n\n # could do this just to chosen path links but let's do this to the whole pathset\n pathset_links_df = Assignment.find_passenger_vehicle_times(pathset_links_df, veh_trips_df)\n\n # instead of flag_missed_transfers(), set these to pathfinding results\n pathset_links_df[Assignment.SIM_COL_PAX_ALIGHT_DELAY_MIN] = 0\n pathset_links_df[Assignment.SIM_COL_PAX_A_TIME ] = pathset_links_df[Passenger.PF_COL_PAX_A_TIME]\n pathset_links_df[Assignment.SIM_COL_PAX_B_TIME ] = pathset_links_df[Passenger.PF_COL_PAX_B_TIME]\n pathset_links_df[Assignment.SIM_COL_PAX_LINK_TIME ] = pathset_links_df[Passenger.PF_COL_LINK_TIME]\n pathset_links_df[Assignment.SIM_COL_PAX_WAIT_TIME ] = pathset_links_df[Passenger.PF_COL_WAIT_TIME]\n pathset_links_df[Assignment.SIM_COL_PAX_MISSED_XFER ] = 0\n\n ######################################################################################################\n FastTripsLogger.info(\" Step 2. Calculate costs and probabilities for all pathset paths\")\n (pathset_paths_df, pathset_links_df) = PathSet.calculate_cost(\n iteration, simulation_iteration, Assignment.STOCH_DISPERSION,\n pathset_paths_df, pathset_links_df, FT.passengers.trip_list_df,\n FT.transfers.transfers_df, FT.tazs.walk_df, FT.tazs.drive_df, veh_trips_df, FT.stops)\n\n ######################################################################################################\n FastTripsLogger.info(\" Step 3. Choose a path for each passenger from their pathset\")\n\n # Choose path for each passenger -- pathset_paths_df and pathset_links_df will now have\n # SIM_COL_PAX_CHOSEN >=0 for chosen paths/path links\n (num_passengers_arrived, num_chosen, pathset_paths_df, pathset_links_df) = Passenger.choose_paths(\n True, # choose for everyone\n iteration, simulation_iteration,\n pathset_paths_df, pathset_links_df)\n\n # Write the pathsets\n Passenger.write_paths(output_dir, iteration, simulation_iteration, pathset_paths_df, False, Assignment.OUTPUT_PATHSET_PER_SIM_ITER)\n Passenger.write_paths(output_dir, iteration, simulation_iteration, pathset_links_df, True, Assignment.OUTPUT_PATHSET_PER_SIM_ITER)\n\n # write the final chosen paths for this iteration\n chosen_links_df = Passenger.get_chosen_links(pathset_links_df)\n chosen_links_df[\"iteration\"] = iteration\n Util.write_dataframe(chosen_links_df, \"chosen_links_df\", os.path.join(output_dir, \"chosenpaths_links.csv\"), append=(iteration>1))\n chosen_links_df.drop([\"iteration\"], axis=1, inplace=True)\n\n chosen_paths_df = Passenger.get_chosen_links(pathset_paths_df)\n chosen_paths_df[\"iteration\"] = iteration\n Util.write_dataframe(chosen_paths_df, \"chosen_paths_df\", os.path.join(output_dir, \"chosenpaths_paths.csv\"), append=(iteration>1))\n chosen_paths_df.drop([\"iteration\"], axis=1, inplace=True)\n\n return (num_passengers_arrived, pathset_paths_df, pathset_links_df)", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n from util import manhattanDistance\n\n def closestFood( position, foodList ):\n if len(foodList) == 0:\n return 0\n foodDistances = []\n for food in foodList:\n foodDistances.append( manhattanDistance( position, food) )\n nearestFoodDist = min(foodDistances)\n bestIndices = [index for index in range(len(foodDistances)) if foodDistances[index] == nearestFoodDist]\n foodClosest = [foodList[index] for index in bestIndices]\n\n\n return min(foodClosest)\n\n\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newFoodList = newFood.asList()\n if len(newFoodList) == 0:\n return 10000\n curFood = currentGameState.getFood()\n curFoodList = curFood.asList()\n newGhostStates = successorGameState.getGhostStates()\n for ghost in newGhostStates:\n newGhostPos = ghost.getPosition()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n scoreCalc = 0\n powerCapsul = successorGameState.getCapsules()\n if len(powerCapsul) == 0:\n powerCapsul.append(min(newFoodList))\n distToGhost = manhattanDistance(newPos, newGhostPos)\n\n\n distToPower = manhattanDistance(newPos, powerCapsul[0])\n\n\n if distToGhost < 3:\n return 0\n if distToPower == 0:\n return 1000\n if newPos == powerCapsul[0]:\n return 1000\n\n closestFood = closestFood(newPos, curFoodList) #coord tup\n distToFood = manhattanDistance(newPos, closestFood)\n if distToFood == 0:\n scoreCalc += 500\n scoreCalc -= (75 / distToGhost)\n scoreCalc += (50 / distToPower)\n scoreCalc += successorGameState.getScore()\n return scoreCalc\n\n scoreCalc += (50 / distToPower)\n scoreCalc += (100 / distToFood)\n scoreCalc -= (75 / distToGhost)\n scoreCalc += successorGameState.getScore()\n return scoreCalc", "def evaluationFunction(self, currentGameState, action):\n # getting successor GameSate needed info\n successorGameState = currentGameState.generatePacmanSuccessor(action) # successor\n newPos = successorGameState.getPacmanPosition() # Ms. Pacman position\n newFoodGrid = successorGameState.getFood().asList() # positions of remaining food\n newGhostStates = successorGameState.getGhostStates() # ghosts states\n newGhostStates = successorGameState.getGhostStates() # ghosts states\n\n # evaluation function starting value\n value = 0\n\n # banning the stop action due to its penalty for iddling\n if(action == Directions.STOP):\n value -= 5\n\n # FOOD distances\n foodDists = []\n for food in newFoodGrid:\n foodDists.append(manhattanDistance(food,newPos))\n foodDists.sort()\n\n # further the closest food is, lower the value is\n if(len(foodDists) > 0):\n foodMin = foodDists[0]\n else:\n foodMin = float(\"inf\")\n\n # lesser the food on board, smaller the value\n value -= (successorGameState.getNumFood() * 15)\n\n # successors score\n value += successorGameState.getScore() * 15\n\n # ACTIVE+SCARED GHOSTS distances\n # ghost.scaredTimer = number of ghost's steps in scared state (Ms. Pacman ate powerup, ghost is edible)\n ghostDists = []\n for ghost in newGhostStates:\n ghostD = manhattanDistance(ghost.getPosition(),newPos)\n if ghost.scaredTimer - ghostD > 0: # ms. pacman is able to catch ghost\n value += ghost.scaredTimer * 10 + ghostD * (-1)\n else:\n ghostDists.append(ghostD)\n\n if(len(ghostDists) > 0):\n ghostDists.sort()\n ghostMin = ghostDists[0] # closer the closest ghost is, lower the value is\n else:\n ghostMin = float(\"-inf\")\n finalValue = value + float(ghostMin)/float(foodMin)\n #print \"value: \",value,\"+ (ghostMin: \",ghostMin,\"/foodMin: \",foodMin,\") = >>>FINALLY: \",finalValue\n return finalValue", "def betterEvaluationFunction(currentGameState):\n pmPos = currentGameState.getPacmanPosition()\n foodGrid = currentGameState.getFood()\n capsulesGrid = currentGameState.getCapsules()\n ghostStates = currentGameState.getGhostStates()\n scaredTimes = [ghostState.scaredTimer for ghostState in ghostStates]\n\n\n #distances\n dist_pm_g = distGhosts(pmPos, ghostStates, currentGameState)\n distFood = distClosestFood(pmPos, foodGrid, currentGameState)\n distCapsule = distClosestCap(pmPos, capsulesGrid, currentGameState)\n\n numfood = numFood(foodGrid)\n score = currentGameState.getScore()\n if (currentGameState.isWin()):\n return score + 1000\n if (currentGameState.isLose()):\n return score - 1000\n if (sum(scaredTimes) > 0):\n # Case 1\n if (dist_pm_g > 0):\n return (score+50) * 1.1 - distCapsule - distFood + 1/dist_pm_g * 1.5\n # Case 2\n else:\n # print(\"case scared 2: \", score * 1.1 + 100)\n return (score+50) * 1/dist_pm_g * 1.5 + 1.1 + 500\n # Case 3\n elif (dist_pm_g > 4 and distFood < 4):\n return score*1.2 - distFood*.7 - distCapsule*.5 - numfood\n # Case 4\n else:\n return score - distFood - distCapsule*.5 + dist_pm_g*.7 - numfood", "def main():\n\n print('Drones capacity = {}'.format(DRONES_CAPACITY))\n\n # Instantiate the data of the problem\n data = create_data_model(MAX_POINT_DEMAND, USE_CACHE)\n\n # Create the routing index manager\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['depot'])\n\n # Create Routing Model\n routing = pywrapcp.RoutingModel(manager)\n\n # Defining weights of the edges\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Addding capacity constraints.\n def demand_callback(from_index):\n \"\"\"Returns the demand for tests of the node.\"\"\"\n from_node = manager.IndexToNode(from_index)\n return data['demands'][from_node]\n\n demand_callback_index = routing.RegisterUnaryTransitCallback(\n demand_callback)\n\n def counter_callback(from_index):\n \"\"\"Returns the number of stops done at the node.\"\"\"\n from_node = manager.IndexToNode(from_index)\n return data['counter'][from_node]\n\n counter_callback_index = routing.RegisterUnaryTransitCallback(\n counter_callback)\n\n # Limiting the number of tests each drone can carry\n routing.AddDimensionWithVehicleCapacity(\n demand_callback_index,\n 0, # null capacity slack\n data['vehicle_capacities'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Capacity')\n\n # Limiting the overall number of nodes a drone can serve in one tour\n routing.AddDimensionWithVehicleCapacity(\n counter_callback_index,\n 0, # null capacity slack\n data['vehicle_max_number_of_stops'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Counter')\n\n # Setting parameters of the solver\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.time_limit.seconds = HEURISTIC_TIME_LIMIT\n search_parameters.log_search = True\n\n\n print('START SOLVING')\n assignment = routing.SolveWithParameters(search_parameters)\n\n if assignment:\n print_and_save_solution(data, manager, routing, assignment)", "def greedy_path():\n itinerary = []\n cities = all_cities(data_set)\n starting_city = randomize_city_start(cities.keys()) # start from a random city\n # print \"starting_city: %s\" % starting_city\n cities_visited = {}\n \n # iterate through all cities\n count = 1\n while True:\n possible_routes = []\n #distance = []\n # print \"starting_city: %s\" % starting_city\n for path in data_set:\n # we only start with city that we have assigned in starting_city\n if starting_city in path['city_start']:\n # we don't go to cities we have visited\n if path['city_end'] in cities_visited:\n continue\n else:\n # print \"path: \", path\n possible_routes.append(path) # add the city if not in the list\n \n if not possible_routes:\n break\n # append this to itinerary\n route = get_shortest_route(possible_routes)\n count += 1\n itinerary.append(route)\n # add this city to visited_cities list\n cities_visited[route[0]] = count\n starting_city = route[1]\n \n return itinerary", "def topo_efficiency_cal(self):\n Temp = 0\n for i in self.supplyseries:\n for j in self.demandseries:\n if(self.topo_shortestpathij(i, j) == None):\n continue\n Temp += 1/self.topo_shortestpathij(i, j)\n \n self.topo_efficiency = 1/(self.supplynum*self.demandnum)*Temp", "def run_travel_optimisation(trip_start_date, is_min_co2_search = False, is_force_compute = False):\n \n waypoint_co2 = {}\n waypoint_durations = {}\n\n # get all prefectures referential\n db_connector = Connector()\n with db_connector:\n results = db_connector.execute_query(sql.SQL_GET_ALL_PREFECTURE)\n all_waypoints = pd.DataFrame(results.fetchall())\n\n # Vérification si les trajets péfecture à préfecture ont été déjà calculés\n db_connector = Connector()\n with db_connector:\n saved_waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)\n\n # Dans le précalcul des trajets optimaux, utilisation de la date courante\n travel_date = datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n bad_waypoints = []\n\n if saved_waypoints.rowcount > 0 and not is_force_compute:\n print(\"le référentiel des voyage existe déjà\")\n else:\n try:\n bdd_management.truncate_journey()\n\n for (from_city, to_city) in combinations(all_waypoints[0].values, 2):\n try:\n if int(from_city) in bad_waypoints or int(to_city) in bad_waypoints:\n continue\n\n route = requests.get(API_NAVITIA.format(\n int(from_city), int(to_city), travel_date, API_KEY))\n response = json.loads(route.text)\n\n mid_duration = 0\n mid_co2 = 0\n for journey in response[\"journeys\"]:\n mid_duration += journey[\"duration\"]\n mid_co2 += journey[\"co2_emission\"][\"value\"]\n\n waypoint_co2[frozenset([from_city, to_city])\n ] = mid_co2/len(response[\"journeys\"])\n waypoint_durations[frozenset(\n [from_city, to_city])] = mid_duration/len(response[\"journeys\"])\n\n except Exception as e:\n print(\"Error with finding the route between %s and %s : %s\" %\n (from_city, to_city, response[\"error\"][\"message\"]))\n if 'no destination point' == response[\"error\"][\"message\"]:\n bad_waypoints.append(int(to_city))\n\n if 'no origin point' == response[\"error\"][\"message\"]:\n bad_waypoints.append(int(from_city))\n\n for bad_insee_code in re.findall('The entry point: admin:fr:([0-9]+) is not valid', response[\"error\"][\"message\"]):\n if not int(bad_insee_code) in bad_waypoints:\n bad_waypoints.append(int(bad_insee_code))\n\n # Enregistrement des trajets point à point (préfecture à préfecture)\n db_connector = Connector()\n with db_connector:\n for (waypoint1, waypoint2) in waypoint_co2.keys():\n waypoint = [waypoint1,\n waypoint2,\n str(waypoint_co2[frozenset([waypoint1, waypoint2])]),\n str(int(waypoint_durations[frozenset([waypoint1, waypoint2])]))]\n \n db_connector.execute_nonquery(sql.SQL_INSERT_WAYPOINT, waypoint)\n # commit trajets unitaires dans la bdd\n db_connector.commit()\n\n # enregistrement des préfectures non trouvée (pas de gare)\n print(bad_waypoints)\n db_connector = Connector()\n with db_connector:\n for bad_city in bad_waypoints:\n db_connector.execute_nonquery(\n sql.SQL_INSERT_CITY_WITHOUT_STATION, str(bad_city))\n #db_connector.commit()\n except Exception as e:\n print('Erreur durant la génération des trajets de préfecture en préfecture. Rollback effectué')\n\n waypoint_co2 = {}\n waypoint_durations = {}\n processed_waypoints = set()\n\n db_connector = Connector()\n with db_connector:\n waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)\n\n for row in waypoints:\n waypoint_co2[frozenset([int(row[0]), int(row[1])])] = row[2]\n waypoint_durations[frozenset([int(row[0]), int(row[1])])] = row[3]\n processed_waypoints.update([row[0], row[1]])\n\n travel_results = algorithms.run_genetic_algorithm(waypoints = list(processed_waypoints), is_min_co2_search = is_min_co2_search, generations=300, population_size=100 )\n\n # take most represented trip order\n journey_groups = Counter(chain(*travel_results))\n top_journeys = journey_groups.most_common(1)[0][0]\n\n print('Le voyage le plus représentatif est :')\n print(top_journeys)\n\n # calcul des horaires de voyage réels pour le trajet le plus optimisé\n\n print('Départ du calcul du voyage le %s' %\n (datetime_str_to_datetime_str(trip_start_date)))\n travel_date = trip_start_date\n\n db_connector = Connector()\n with db_connector:\n try:\n #vidage de la table contenant les informations du voyage\n bdd_management.truncate_roadtrip()\n\n for i in range(len(top_journeys)-1):\n try:\n from_city_insee = top_journeys[i]\n to_city_insee = top_journeys[i+1]\n route = requests.get(API_NAVITIA.format(\n int(from_city_insee), int(to_city_insee), travel_date, API_KEY))\n travels = json.loads(route.text)\n\n # Contrôle des voyage reçus pour identifier le plus adapté à recherche\n best_travel = travels[\"journeys\"][0]\n for travel in travels[\"journeys\"]:\n if is_min_co2_search and float(best_travel['co2_emission']['value']) > float(travel['co2_emission']['value']):\n best_travel = travel\n if best_travel['arrival_date_time'] > travel['arrival_date_time']:\n best_travel = travel\n\n # sauvegarde du trajet 'i' en base\n save_trip_section(db_connector, all_waypoints, from_city_insee, to_city_insee, best_travel)\n\n # le prochain trajet devra avoir une date de départ > à la date de ce trajet\n travel_date = best_travel['arrival_date_time']\n\n except Exception as e:\n print(\"!! Erreur durant le calcul du trajet entre '%s' et '%s'\" %\n (from_city_insee, to_city_insee))\n\n #Ecriture du résumé du voyage\n resume = db_connector.execute_query(sql.SQL_GET_C02_CONSUMPTION_RESUME)\n resume = resume.fetchone()\n\n resume_description = \"\"\"Début du voyage le {} . Arrivée le {}. \n Le voyage à durée {} pour un total de {:d} kgeC\"\"\".format(\n datetime_str_to_datetime_str(trip_start_date),\n datetime_str_to_datetime_str(travel_date),\n str(timedelta(seconds=resume[0])) ,\n trunc( resume[1]/1000))\n\n store_section(db_connector, resume_description, None, None, 'INFO', resume[0], resume[1])\n\n db_connector.commit()\n\n except Exception as e:\n db_connector.rollback()\n print('Erreur durant la création du voyage. rollback effectué!!!')\n\n print('print map with road-trip data')\n visualization.generate_visualization()\n\n print('Travel complete. Have nive trip!!!')", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n oldFood = currentGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n oldPellets = currentGameState.getCapsules()\n newPellets = successorGameState.getCapsules()\n\n #moving onto a food pellet is preferable to not moving onto a food pellet\n #moving onto a line of food pellets is even more preferable\n #being far way from a ghost is preferable\n #unless the ghost is scared, then being close to the ghost is preferable because we might eat\n \"*** YOUR CODE HERE ***\"\n directions = [-1, 0, 1]\n foodList = newFood.asList()\n #moving decreases the score by 1\n #eating increase the score by 10\n score = 10 * (oldFood[newPos[0]][newPos[1]] or newFood[newPos[0]][newPos[1]]) #start off with a score of 10 if we just ate a food pellet\n \n closestDist = float('inf')\n for food in foodList:\n dist = util.manhattanDistance(food, newPos)\n if dist < closestDist:\n closestDist = dist\n if closestDist == float('inf'):\n #this means that the move we make eats the last dot\n score = 1000\n else:\n if (10 - closestDist) > 0:\n score += (10 - closestDist)\n\n for ghostState in newGhostStates:\n ghostPos = ghostState.getPosition()\n if ghostPos == newPos:\n score += -1000\n else:\n for dx in directions:\n for dy in directions:\n if (ghostPos[0]+dx, ghostPos[1]+dy) == newPos:\n score += -500\n\n return score + (100 * (len(oldPellets) - len(newPellets)))", "def _optimise(self):\n better = True\n self.solutions = set()\n\n # Rebuild the neighbours\n self.neighbours = {}\n\n for i in self.heuristic_path:\n self.neighbours[i] = []\n\n for j, dist in enumerate(TSP.edges[i]):\n if dist > 0 and j in self.heuristic_path:\n self.neighbours[i].append(j)\n\n # Restart the loop each time we find an improving candidate\n while better:\n better = self.improve()\n # Paths always begin at 0 so this should manage to find duplicate\n # solutions\n self.solutions.add(str(self.heuristic_path))\n\n self.save(self.heuristic_path, self.heuristic_cost)", "def rules(self):\n self.rule1 = min(self.location_is_lessDemand, self.area_is_small, self.unfunishing)\n self.rule2 = min(self.location_is_lessDemand, max(self.area_is_small, self.area_is_average), self.access_is_good)\n self.rule3 = min(self.location_is_veryHighDemand, self.area_is_average, self.fac_is_low, self.access_is_average)\n self.rule4 = min(self.location_is_veryLessDemand, self.area_is_verysmall, self.fully_funishing)\n self.rule5 = min(self.location_is_lessDemand, self.fac_is_average, max(self.area_is_small, self.area_is_average))\n self.rule6 = min(max(self.location_is_lessDemand, self.location_is_averageDemand), self.access_is_good)\n self.rule7 = min(self.location_is_lessDemand, self.access_is_good, self.area_is_large, self.partially_funishing)\n self.rule8 = min(self.location_is_highDemand, self.access_is_good, max(self.bed_is_less, self.bath_is_average))\n self.rule9 = min(self.location_is_veryHighDemand, self.area_is_large, self.unfunishing)\n self.rule10 = min(self.access_is_good, self.area_is_average, (1 - self.unfunishing))\n self.rule11 = min(self.access_is_good, self.area_is_large, self.partially_funishing, self.bed_is_more, self.bath_is_more)", "def plan_path(self, msg):\n # Request the map\n # In case of error, return an empty path\n mapdata = PathPlanner.request_map()\n\n if mapdata is None:\n return Path()\n # Calculate the C-space and publish it\n cspacedata = self.calc_cspace(mapdata, 3)\n # Execute A*\n start = PathPlanner.world_to_grid(mapdata, msg.start.pose.position)\n goal = PathPlanner.world_to_grid(mapdata, msg.goal.pose.position)\n \n path = self.a_star(cspacedata, start, goal) #, self.c_space_array, self.frontier, self.expanded)\n \n # Optimize waypoints\n waypoints = PathPlanner.optimize_path(path)\n # print waypoints\n waypoints.remove(waypoints[0])\n # print waypoints\n\n self.path_pub.publish(self.path_to_message(cspacedata, waypoints))\n # Return a Path message\n return self.path_to_message(cspacedata, waypoints)", "def solve(self, indices_to_visit: List[int] = None) -> Dict[str, Any]:\n if indices_to_visit is None:\n indices_to_visit = list(range(len(self.matrix)))\n \n # make sure home location is in the listed, and that the list is sorted\n if self.home_index not in indices_to_visit:\n indices_to_visit.append(self.home_index)\n indices_to_visit.sort()\n \n data = self._create_data_model(indices_to_visit)\n\n # create routing index manager\n manager = RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['home'])\n\n # create routing model\n routing = RoutingModel(manager)\n\n def distance_callback(from_index, to_index):\n # returns distance between two nodes\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n dist = data['distance_matrix'][from_node][to_node]\n\n return dist\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # define cost of each arc\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # set first solution heuristic\n search_params = pywrapcp.DefaultRoutingSearchParameters()\n search_params.first_solution_strategy = (routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n # solve problem\n assignment = routing.SolveWithParameters(search_params)\n\n return self._extract_solution(manager, routing, assignment, indices_to_visit)", "def astar(grid, heuristic):\r\n\r\n print (grid.getStart())\r\n frontier = PriorityQueue()\r\n frontierCpy = {}\r\n\r\n goal = grid.getGoals()[0]\r\n\r\n startX = grid.getStart()[0]\r\n startY = grid.getStart()[1]\r\n startNode = Node(((startX, startY), 0), None)\r\n\r\n init_heu = heuristic(startNode.cell[0], goal)\r\n frontierCpy[startNode.cell[0]] = init_heu\r\n frontier.put((init_heu, 0, startNode))\r\n\r\n while frontier.qsize() != 0:\r\n tup = frontier.get()\r\n\r\n currNode = tup[2]\r\n currG = tup[1] * -1\r\n grid.addVisited(currNode.cell[0])\r\n frontierCpy.pop(currNode.cell[0], None)\r\n\r\n if currNode.cell[0] == goal:\r\n path = []\r\n while currNode != None:\r\n path.insert(0, currNode.cell[0])\r\n currNode = currNode.parent\r\n grid.setPath(path)\r\n return path\r\n\r\n\r\n neighbors = grid.getNeighbors(currNode.cell[0])\r\n\r\n for n in neighbors:\r\n if n[0] not in grid.getVisited():\r\n newNode = Node(n, currNode)\r\n\r\n h = heuristic(n[0], goal)\r\n\r\n oneStepCost = n[1]\r\n g = oneStepCost + currG\r\n if n[0] not in frontierCpy or frontierCpy[n[0]] > h + g:\r\n frontier.put((h+g, -1*g, newNode))\r\n frontierCpy[n[0]] = h+g\r\n print(\"CANT FIND A PATH\")", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n\n\n\n path = [starting_car_location]\n dict = {}\n index = 0\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == starting_car_location:\n index = i\n\n path = [index]\n\n G, m = adjacency_matrix_to_graph(adjacency_matrix)\n\n home_indexes = []\n\n for home in list_of_homes:\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == home:\n home_indexes.append(i)\n break\n\n new_adjacency = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n # for sake of figuring out where to walk\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, index, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2:\n di_path = nx.dijkstra_path(G, home1, home2)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n\n all_driving_path = list(nx.dfs_edges(G2))\n\n\n\n\n walking_to = []\n walking_from = {}\n\n for i in range(len(new_adjacency)):\n if i in home_indexes:\n count = 0\n edge_to = 0\n for j in range(len(new_adjacency)):\n if new_adjacency[i][j] != \"x\":\n count += 1\n edge_to = j\n\n #must ensure that this is not a home that we are already dropping someone off at, otherwise it will cut off a line of two homes\n if count == 1 and i != index and i not in walking_from.keys():\n new_adjacency[i][edge_to] = \"x\"\n new_adjacency[edge_to][i] = \"x\"\n walking_to.append(i)\n if edge_to in walking_from:\n walking_from[edge_to] = walking_from[edge_to] + [i]\n else:\n walking_from[edge_to] = [i]\n\n #\n # for i in range(len(all_driving_path) - 1):\n # #if first vertex in edge is the same, we should walk\n # if all_driving_path[i][0] == all_driving_path[i + 1][0]:\n # print(all_driving_path[i][0])\n # print(all_driving_path[i][1])\n # #get rid of only edge connected to this home\n # new_adjacency[all_driving_path[i][0]][all_driving_path[i][1]] = \"x\"\n # new_adjacency[all_driving_path[i][1]][all_driving_path[i][0]] = \"x\"\n # walking_to.append(all_driving_path[i][1])\n # if all_driving_path[i][0] in walking_from:\n # walking_from[all_driving_path[i][0]] = walking_from[all_driving_path[i][0]] + [all_driving_path[i][1]]\n # else:\n # walking_from[all_driving_path[i][0]] = [all_driving_path[i][1]]\n\n\n\n dropoff_locations = list(walking_from.keys())\n for loc in dropoff_locations:\n if loc in home_indexes:\n dropoff_locations.remove(loc)\n\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n # G = G2\n # pos=nx.spring_layout(G2)\n # nx.draw_networkx_nodes(G2,pos)\n # nx.draw_networkx_labels(G2, pos)\n # nx.draw_networkx_edges(G2,pos,width=1.0,alpha=0.5)\n #\n # plt.draw()\n # plt.show()\n\n # condensed shortest paths to edges - use G3 for real\n\n new_adjacency2 = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n for home in home_indexes:\n if home not in walking_to:\n di_path = nx.dijkstra_path(G2, index, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2 and home1 not in walking_to and home2 not in walking_to:\n di_path = nx.dijkstra_path(G2, home1, home2)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G2, index, loc)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G2, loc, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n\n\n final_G, m = adjacency_matrix_to_graph(new_adjacency2)\n drive_path = list(nx.dfs_edges(final_G, source=index))\n drive_path.append(index)\n\n mst = nx.minimum_spanning_tree(final_G)\n\n\n\n new_mst = nx.MultiGraph(mst)\n for edge in mst.edges():\n new_mst.add_edge(edge[0], edge[1])\n\n\n if new_mst.degree[index] != 0:\n to_remove = []\n for node in new_mst:\n if (new_mst.degree[node] == 0):\n to_remove.append(node)\n new_mst.remove_nodes_from(to_remove)\n\n eulerian = list(nx.eulerian_circuit(new_mst, index))\n\n path = []\n for edge in eulerian:\n path.append(edge[0])\n\n path.append(eulerian[len(eulerian) - 1][1])\n\n already_seen = []\n to_remove = []\n for i in range(len(path) - 1):\n if path[i] in already_seen:\n to_remove.append(i)\n else:\n already_seen.append(path[i])\n\n new_path = []\n for i in range(len(path) - 1):\n if i not in to_remove:\n new_path.append(path[i])\n path = new_path\n print(eulerian)\n else:\n path = [index]\n print(path)\n\n\n\n\n\n\n\n # print(path)\n final_path = []\n for node in path:\n if node == index:\n final_path.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path.append(node)\n # print(\"Dropoff loc: \", node)\n final_path.append(index)\n #print(walking_from)\n # print(final_path)\n # nx.draw(mst)\n # plt.draw()\n # plt.show()\n for node in final_path:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path = []\n for i in range(len(final_path) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path[i], final_path[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path.append(condensed_path[j])\n\n if len(very_final_path) >= 1 and [len(very_final_path) - 1] != index:\n very_final_path.append(index)\n\n if len(very_final_path) == 0:\n very_final_path = [index]\n\n print(very_final_path)\n print(dict)\n\n\n path2 = list(nx.dfs_preorder_nodes(mst, index))\n\n final_path2 = []\n for node in path2:\n if node == index:\n final_path2.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path2.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path2.append(node)\n # print(\"Dropoff loc: \", node)\n final_path2.append(index)\n\n\n for node in final_path2:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path2 = []\n for i in range(len(final_path2) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path2[i], final_path2[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path2.append(condensed_path[j])\n\n if len(very_final_path2) >= 1 and [len(very_final_path2) - 1] != index:\n very_final_path2.append(index)\n\n if len(very_final_path2) == 0:\n very_final_path2 = [index]\n\n opt1 = cost_of_solution(G, very_final_path, dict)\n opt2 = cost_of_solution(G, very_final_path2, dict)\n\n ultra_final_path = []\n if (opt1 <= opt2):\n ultra_final_path = very_final_path\n else:\n ultra_final_path = very_final_path2\n\n return ultra_final_path, dict\n\n pass", "def traveling_salesman(destinations_1):\n # Instantiate the data problem.\n data = create_data_model()\n\n # NEW SPOT TO MAKE distance_matrix\n distance_matrix = compute_euclidean_distance_matrix(destinations_1)\n manager = pywrapcp.RoutingIndexManager(\n len(destinations_1), data['num_vehicles'], data['depot'])\n\n# # Create the routing index manager.\n# manager = pywrapcp.RoutingIndexManager(\n# len(data['locations']), data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n# distance_matrix = compute_euclidean_distance_matrix(data['locations'])\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return distance_matrix[from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n\n # Print solution on console.\n# if assignment:\n# print_solution(manager, routing, assignment)\n if assignment:\n address1,address2,address3,address4,address5,address6,address7,address8,address9,address10=\\\n set_address_path(manager, routing, assignment,destinations_1)\n return address1,address2,address3,address4,address5,address6,address7,address8,address9,address10", "def calculate_routes(outposts, vehicles, graph, starting_point=0, **kwargs):\n number_of_vehicles = len(vehicles)\n number_of_nodes = len(outposts) - 1\n\n # Source: https://stackoverflow.com/questions/28965734/general-bars-and-stars\n vehicles_partitions = []\n total_load = outposts.load.sum()\n capacities = list(vehicles.capacity)\n for combination in itertools.combinations(range(number_of_nodes+number_of_vehicles-1), number_of_vehicles-1):\n current_partition = [b-a-1 for a, b in zip((-1,) + combination, combination+(number_of_nodes+number_of_vehicles-1,))]\n current_partition = sorted(current_partition)\n if current_partition not in vehicles_partitions:\n vehicle_presence_vector = [0 if number==0 else 1 for number in current_partition]\n total_capacity = np.dot(vehicle_presence_vector, capacities)\n if total_capacity >= total_load:\n vehicles_partitions.append(current_partition)\n\n dwave_solver = DWaveEngine.default()\n if 'use_capacity_constraints' in kwargs:\n use_capacity_constraints = kwargs['use_capacity_constraints']\n del kwargs['use_capacity_constraints']\n else:\n use_capacity_constraints = True\n print(\"All partitions:\", vehicles_partitions)\n best_solution = None\n for current_partition in vehicles_partitions:\n print(\"Current partition: \", current_partition)\n problem = Problem(vehicles=vehicles,\n outposts=outposts,\n vehicles_partition=current_partition,\n graph=graph,\n starting_point=starting_point,\n use_capacity_constraints=use_capacity_constraints)\n current_solution = dwave_solver.solve(problem)\n if current_solution is None:\n print(\"No valid solutions found with D-Wave\")\n elif best_solution is None:\n best_solution = current_solution\n else:\n current_cost = sum(sub_solution[2] for sub_solution in current_solution)\n best_cost = best_solution.total_cost\n if current_cost < best_cost:\n best_solution = current_solution\n\n if best_solution is None:\n return None\n return best_solution.to_dataframe()", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n # Again, we use the fundamental foundation built in Q2 for Q4, however here we modify our minimizer function\n # to serve the purpose of finding the expected value\n actionList = gameState.getLegalActions(0)\n pacmanAgentIndex = 0\n ghostAgentIndices = list(range(1,gameState.getNumAgents())) # List of each agent index for looping\n count = util.Counter()\n agentEnd = gameState.getNumAgents()-1 # Last agent in the list\n def maximizer(curState, agentIndex, depth):\n\n ghostActions = curState.getLegalActions(agentIndex)\n maxDepth = self.depth # Quantifying the end of the tree so we know when we reached a leaf node\n weight = -99999999 # Worst case starting value to be changed in the code\n if depth == maxDepth: # If we are at a leaf node\n return self.evaluationFunction(curState) # evaluate the state of this leaf node\n # Otherwise, we progress the tree until the above condition is reached\n if len(ghostActions) != 0:\n for x in ghostActions:\n if weight >= minimizer(curState.generateSuccessor(agentIndex, x), agentIndex + 1, depth):\n weight = weight\n else:\n weight = minimizer(curState.generateSuccessor(agentIndex, x), agentIndex + 1, depth)\n return weight\n else:\n # if there are no legal actions left then evaluate at the last known state\n return self.evaluationFunction(curState)\n\n def minimizer(curState, agentIndex, depth):\n ghostActions = curState.getLegalActions(agentIndex)\n weight = 0 # Starting value of zero to be incremented below\n if len(ghostActions) != 0:\n if agentIndex == agentEnd: # If we've reached the last ghost, we maximise\n for x in ghostActions: # For each legal action in the current position\n temp = (float(1.0) / len(ghostActions))*maximizer(curState.generateSuccessor(agentIndex, x), pacmanAgentIndex, depth+1)\n weight = weight + temp\n else: # Otherwise, we continue to minimize\n for x in ghostActions: # For each legal action in the current position\n temp = (float(1.0) / len(ghostActions))*minimizer(curState.generateSuccessor(agentIndex, x), agentIndex+1, depth)\n weight = weight + temp\n return weight\n else:\n # if there are no legal actions left then evaluate at the last known state\n return self.evaluationFunction(curState)\n\n # Executing the minimizer for all possible actions\n for x in actionList:\n tempState = gameState.generateSuccessor(pacmanAgentIndex,x)\n count[x] = minimizer(tempState,1,0)\n # print('HELLO THERE')\n # print(count)\n return count.argMax()", "def greedy_initial(self):\r\n sol = [] # [[0;2;5;0;4;6;0],[],...]\r\n sol_veh_type = [] # corresponding vehicle type for the solution\r\n route_way_time = []\r\n\r\n to_vist = [i+1 for i in range(store_num - 1)] # [1,5,8,...]\r\n itr = 0\r\n\r\n while len(to_vist) > 0 and itr < 500:\r\n itr += 1\r\n\r\n if itr <= small_veh_cnt:\r\n vehicle_type0 = 2\r\n elif itr <= small_veh_cnt + medium_veh_cnt:\r\n vehicle_type0 = 3\r\n else:\r\n vehicle_type0 = 5\r\n\r\n sol_veh_type.append(vehicle_type0)\r\n\r\n used_res = [0, 0, 0, 0] # used volume, and travel time of the vehicle, leave time, travel distance\r\n veh_rout = [0]\r\n\r\n # print '\\nA new vehicle will be used.'\r\n way_time = 0 # travel time of coming to the store + wait time at the store + operation time at this store\r\n while True:\r\n curr_cust = veh_rout[-1]\r\n\r\n next_one, way_time = self.time_nn(way_time, curr_cust, to_vist, used_res, len(veh_rout), vehicle_type0)\r\n next_cust, next_start = next_one[0], next_one[1]\r\n # print('next start', next_cust, next_start)\r\n if next_cust == 0: # next visiting customer is depot\r\n # print 'Get back to the depot, and ready for a new round.'\r\n veh_rout.append(next_cust)\r\n break\r\n\r\n else: # next visiting customer is a store\r\n used_res[0] += (num_demd[next_cust][0] * bskt_vol + num_demd[next_cust][1] * trsf_vol + (num_demd[next_cust][2] + \\\r\n num_demd[next_cust][3]) * milk_vol + num_demd[next_cust][4] * paper_bskt)\r\n used_res[2] = (next_start + oprt_t)\r\n used_res[3] += dist_mat[curr_cust, next_cust]\r\n\r\n\r\n veh_rout.append(next_cust)\r\n # print 'Vehicle used resource: ', used_res\r\n to_vist.remove(next_cust)\r\n\r\n sol.append(veh_rout)\r\n route_way_time.append(way_time)\r\n\r\n # print 'Last point 0 earliest leave time: ', int(used_res[-1]) / 60, ':', int(used_res[-1]) % 60\r\n # print 'Route %s is: ' % itr, veh_rout\r\n print('*'*10, 'Iteration:', itr, '*'*10)\r\n\r\n\r\n if len(to_vist) > 0:\r\n print('number of stores remained: ', len(to_vist))\r\n\r\n return sol, sol_veh_type, route_way_time", "def comm_all_best_paths(self, peer):\n LOG.debug('Communicating current best path for all afi/safi except'\n ' 1/132')\n # We will enqueue best path from all global destination.\n for route_family, table in self._table_manager.iter:\n if route_family == RF_RTC_UC:\n continue\n if peer.is_mbgp_cap_valid(route_family):\n for dest in table.values():\n if dest.best_path:\n peer.communicate_path(dest.best_path)", "def aStarSearch(problem, heuristic=myHeuristic):\n\n #frontier = util.PriorityQueue()\n #startState = problem.getStartState()\n #startNode = (startState, ['East'], 0)\n #frontier.push(startNode, 0)\n\n #currentState, actions, currentCost = frontier.pop()\n #return ['West','West', 'West','West','South','South','East', 'South','South','West','West']\n\n fronteira = util.PriorityQueue()\n\n nohExplorado = [] #(state, cost)\n\n startState = problem.getStartState()\n nohInicial = (startState, [], 0) #(state, action, cost)\n\n fronteira.push(nohInicial, 0)\n\n while not fronteira.isEmpty():\n\n #pega o Noh de menor \"custo\" na fila\n curEstado, todasAcoes, curCusto = fronteira.pop()\n\n #Coloca Noh atual na lista de explorados\n nohAtual = (curEstado, curCusto)\n nohExplorado.append((curEstado, curCusto))\n\n if problem.isGoalState(curEstado):\n #print(todasAcoes)\n return todasAcoes\n\n else:\n #Lista de Sucessores (successor, action, stepCost) e examina cada um\n sucessores = problem.getSuccessors(curEstado)\n for sucEstado, sucAcao, sucCusto in sucessores:\n novaAcao = todasAcoes + [sucAcao]\n novoCusto = problem.getCostOfActions(novaAcao)\n novoNoh = (sucEstado, novaAcao, novoCusto)\n\n #Checa se o sucessor jah foi visitado\n jah_foi_explorado = False\n for explorado in nohExplorado:\n exEstado, exCusto = explorado\n if (sucEstado == exEstado) and (novoCusto >= exCusto):\n jah_foi_explorado = True\n\n #Se nao foi explorado, coloca na fronteira\n if not jah_foi_explorado:\n fronteira.push(novoNoh, novoCusto + heuristic(sucEstado, problem))\n\n\n return todasAcoes", "def computeForces(self, neighbors=[]): #computing forces to drive the agents and avoid collisions \n if not self.atGoal:\n if self.entry_state % 2 == 0 and len(self.entrancex) > 0 and self.id != 4 : #checks if assigned curve is entry and switches to state 1 to follow entry bezier curve\n time2=0.5 # time used to calculate driving force \n self.local_goal = [self.entrancex[0], self.entrancey[0]] #assigning waypoint as goal\n self.rel_posi = self.local_goal - self.pos #calculating relative position between agents\n self.n_bez = (self.rel_posi + (self.prefspeed*time2))/(abs(self.rel_posi + (self.prefspeed*time2))) #calculating direction vector\n self.F = ((max(self.timehor - time2/100, 0)/time2)*self.n_bez) #driving force\n self.entrancex = np.delete(self.entrancex,0) #eliminating the used waypoints from the list \n self.entrancey = np.delete(self.entrancey,0) #eliminating the used waypoints from the list \n \n elif self.force_state == 1 and (abs(self.pos[0] - self.goal[0]) >400 or abs(self.pos[1] - self.goal[1]) >400): #checks if force-based navigation is assigned, switches to state 2\n self.F = (self.gvel-self.vel)/self.ksi #driving force\n for neighbor in neighbors:\n if neighbor.id != self.id: #and not neighbor.atGoal: \n distSq = (neighbor.pos-self.pos).dot(neighbor.pos-self.pos)\n #print(distSq, self.dhorSq)\n if distSq < self.dhorSq: # neighbor is inside the sensing radius\n tau = self.ttc(neighbor)\n #print(tau, self.timehor)\n if tau < self.timehor: # will the two agents collide in less than timehor?\n dir = self.pos + self.vel*tau - neighbor.pos - neighbor.vel*tau \n length = sqrt(dir.dot(dir))\n if length > 0:\n dir = dir/length # the direction of the force\n mag = (self.timehor - tau)/(tau + 1e-6) # the magnitude of the force\n self.F += mag*dir # add the force\n \n else: #state 3 - following the exit bezier curve\n time2=0.5 # time used to calculate driving force\n self.local_goal = [self.exitx[0], self.exity[0]]\n if abs(sqrt((self.local_goal - self.pos).dot((self.local_goal - self.pos)))) >10: #to reach first point of exit curve from agents previous state position\n self.F = ((self.local_goal - self.pos)/(sqrt((self.local_goal - self.pos).dot((self.local_goal - self.pos) )))*self.prefspeed)/self.ksi\n else:\n self.rel_posi = self.local_goal - self.pos #calculating relative position between agents\n self.n_bez = (self.rel_posi + (self.prefspeed*time2))/(abs(self.rel_posi + (self.prefspeed*time2)))\n self.F = ((max(self.timehor - time2/100, 0)/time2)*self.n_bez)\n #print(self.pos, self.local_goal)\n if len(self.exitx) > 1 :\n self.exitx = np.delete(self.exitx,0)\n self.exity = np.delete(self.exity,0)", "def path_cost(self, c, state1, action, state2):\n\t\treturn c + 1", "def betterEvaluationFunction(gameState):\n\n score = gameState.getScore()\n food_grid = gameState.getFood()\n longest_road = food_grid.height + food_grid.width # longest manheten distance.\n score -= longest_road * len(gameState.getCapsules()) # giving the number of pills left some values.\n num_food = gameState.getNumFood()\n\n #Calculating the closest capsule distance\n capsules_distances = [util.manhattanDistance(gameState.getPacmanPosition(), capsule) for capsule in\n gameState.getCapsules()]\n closest_capsule_dist = 1\n if len(capsules_distances) > 0:\n closest_capsule_dist = min(capsules_distances)\n capsules = gameState.getCapsules()\n capsule_value = closest_capsule_dist\n\n #Calculating ghosts distances and if we are chasing them or they're chasing us\n scared_value = 0\n ghost_distance = 0\n num_of_ghosts = len(gameState.getGhostStates())\n for ghost_state in gameState.getGhostStates():\n if ghost_state.scaredTimer > 0:\n scared_value = util.manhattanDistance(gameState.getPacmanPosition(), ghost_state.configuration.pos)\n else:\n curr_ghost_distance = util.manhattanDistance(gameState.getPacmanPosition(), ghost_state.configuration.pos)\n if curr_ghost_distance <= 1:\n return -100000000\n ghost_distance += curr_ghost_distance\n if num_of_ghosts == 0:\n ghost_distance /= 1\n\n #Calculating the distances to all food.\n food_distances = []\n food_grid = gameState.getFood()\n for x in range(food_grid.width):\n for y in range(food_grid.height):\n if food_grid[x][y] is True:\n food_distances.append(util.manhattanDistance(gameState.getPacmanPosition(), (x, y)))\n\n #Calcukating the closest food distance(top 3 if available)\n closest_food_list = []\n closest_food_value = 0\n total_food_dist = 0\n if (num_food > 0):\n if num_food <= 2:\n closest_food_value = min(food_distances)\n else:\n for _ in range(3):\n if len(food_distances) != 0:\n closest_food_list.append(min(food_distances))\n food_distances.remove(closest_food_list[-1])\n closest_food_value = random.choice(closest_food_list)\n total_food_dist = sum(food_distances) / num_food\n\n #Giving more and less value to some of the parameters\n N_score = 1000000\n N_scared = 50\n if (num_food >= 0.3 * food_grid.width * food_grid.height):\n N_capsules = 5 # if food is more than 30%+- then chase capsules more\n else:\n N_capsules = 20\n N_closest_food = 12\n N_total_food = 5\n N_ghosts = 1\n return N_score * (score) ** 3 - N_capsules * capsule_value - N_scared * (scared_value) ** 2 - N_closest_food * (\n closest_food_value) ** 2 - N_total_food * (total_food_dist) + N_ghosts * (ghost_distance) ** 2", "def solve(num_wizards, num_constraints, wizards, constraints):\n\n # print(num_wizards)\n # print(num_constraints)\n # print(wizards)\n # print(constraints)\n # node_set = set(wizards)\n \n\n\n def cost(sol,num_constraints,constraints):\n constraints_satisfied = 0\n constraints_failed = []\n output_ordering_map = {k: v for v, k in enumerate(sol)}\n for c in constraints:\n\n m = output_ordering_map # Creating an alias for easy reference\n\n wiz_a = m[c[0]]\n wiz_b = m[c[1]]\n wiz_mid = m[c[2]]\n\n if (wiz_a < wiz_mid < wiz_b) or (wiz_b < wiz_mid < wiz_a):\n constraints_failed.append(c)\n else:\n constraints_satisfied += 1\n return num_constraints - constraints_satisfied\n\n def neighbors(sol):\n wiz1 = random.randint(0,num_wizards-1)\n wiz2 = random.randint(0,num_wizards-1)\n\n new_sol = copy.copy(sol)\n temp = new_sol[wiz1]\n new_sol[wiz1] = new_sol[wiz2]\n new_sol[wiz2] = temp\n \n return new_sol\n\n def acceptance_probability(old_cost,new_cost,T):\n exponent = (old_cost - new_cost) / T\n \n try:\n ans = math.exp(exponent)\n except OverflowError:\n ans = float('inf')\n return ans\n\n\n def anneal(solution, num_constraints, constraints):\n old_cost = 0\n new_cost = 0\n old_cost = cost(solution,num_constraints,constraints)\n T = 1.0\n T_min = 0.000001\n alpha = 0.98\n while T > T_min:\n i = 1\n while i <= 1000:\n new_solution = neighbors(solution)\n new_cost = cost(new_solution,num_constraints,constraints)\n if new_cost == 0:\n return new_solution,new_cost\n ap = acceptance_probability(old_cost, new_cost, T)\n if ap > random.random():\n solution = new_solution\n old_cost = new_cost\n i += 1\n T = T*alpha\n return solution, old_cost\n\n s = copy.copy(wizards)\n random.shuffle(s)\n ret = anneal(s,num_constraints,constraints)\n \n for i in range(10):\n if ret[1] == 0:\n break\n random.shuffle(s)\n new_ret = anneal(s,num_constraints,constraints)\n print(i)\n if new_ret[1] < ret[1]:\n ret = new_ret\n print(\"constraints failed: {0}\".format(ret[1]))\n return ret[0]", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n pacman_pos = successorGameState.getPacmanPosition()\n food = successorGameState.getFood()\n old_food = currentGameState.getFood()\n capsules = successorGameState.getCapsules()\n old_capsules = currentGameState.getCapsules()\n ghost_states = successorGameState.getGhostStates()\n old_ghost_states = currentGameState.getGhostStates()\n scared_times = [g.scaredTimer for g in ghost_states]\n old_scared_times = [g.scaredTimer for g in old_ghost_states]\n\n food_score = sum([f for row in food for f in row])\n old_food_score = sum([f for row in old_food for f in row])\n capsule_score = len(capsules)\n old_capsule_score = len(old_capsules)\n food_distance = [manhattanDistance(pacman_pos, (i, j))\n for i, row in enumerate(food)\n for j, f in enumerate(row) if f]\n food_distance = min(food_distance) if food_distance else 0\n food_distance = -0.01 if old_food_score > food_score else food_distance\n capsule_distance = [manhattanDistance(pacman_pos, c)\n for c in capsules]\n capsule_distance = min(capsule_distance) if capsule_distance else 0\n capsule_distance = -0.01 if old_capsule_score > capsule_score else capsule_distance\n action_score = (action == 'Stop') * 0.01\n\n enemy_dist = [manhattanDistance(pacman_pos, g.configuration.pos)\n for g in ghost_states]\n for i, d in enumerate(enemy_dist):\n d = d if d != 0 else 0.00001\n d = 1 / d if d < 5 else 0\n d = 2 * d if scared_times[i] == 0 else -d\n if old_scared_times[i] - scared_times[i] > 1:\n d = -10\n enemy_dist[i] = d\n enemy_dist = sum(enemy_dist)\n\n score = food_distance + enemy_dist * 5 + \\\n capsule_distance + action_score\n # print(action, food_distance, capsule_distance, enemy_dist,\n # action_score, '--', score)\n return -score", "def solve(customerCount, vehicleCount, vehicleCapacity, depotIndex, customers):\n \n N, locations, locations_r, distances, closest = precalculate(customers)\n \n #print locations\n #print locations_r\n angle_order = range(1, N)\n angle_order.sort(key=lambda i: (locations_r[i, 1], locations_r[i, 0])) \n \n vehicleTours = best_order(customerCount, customers, vehicleCount, vehicleCapacity, angle_order)\n if not vehicleTours:\n vehicleTours = solve0(customerCount, vehicleCount, vehicleCapacity, depotIndex, customers)\n check(customerCount, customers, vehicleCapacity, vehicleTours)\n vehicleTours = get_shortest_paths('file_path XXX', customers, depotIndex, vehicleTours)\n check(customerCount, customers, vehicleCapacity, vehicleTours)\n \n vehicleTours0 = copy.deepcopy(vehicleTours)\n dist0 = total_dist(customers, depotIndex, vehicleTours)\n if False:\n for _ in range(100):\n vehicleTours = copy.deepcopy(vehicleTours0) \n adjust_tours(customers, vehicleCapacity, vehicleCount, vehicleTours)\n vehicleTours = get_shortest_paths('file_path XXX', customers, depotIndex, vehicleTours)\n #check(customerCount, customers, vehicleCapacity, vehicleTours)\n if not is_valid(customerCount, customers, vehicleCapacity, vehicleTours):\n continue\n dist = total_dist(customers, depotIndex, vehicleTours)\n if dist < dist0:\n print '%s => %s' % (dist0, dist)\n vehicleTours0 = vehicleTours[:]\n dist0 = dist\n \n \n vehicleTours = copy.deepcopy(vehicleTours0) \n check(customerCount, customers, vehicleCapacity, vehicleTours)\n while len(vehicleTours) < vehicleCount:\n vehicleTours.append([])\n \n print '*', vehicleTours \n \n return vehicleTours", "def betterEvaluationFunction(currentGameState):\n pos = currentGameState.getPacmanPosition() # current position\n food = currentGameState.getFood() # food grid\n legalMoves = currentGameState.getLegalActions() # possible moves\n successors = [currentGameState.generatePacmanSuccessor(action) for action in legalMoves]\n successorPos = [currentGameState.generatePacmanSuccessor(action).getPacmanPosition() for action in legalMoves] # position of possible next state\n\n sFood = [s.getNumFood() for s in successors]\n if sFood:\n avgFood = currentGameState.getNumFood() - float(sum(sFood))/len(sFood)\n else:\n avgFood = 0\n\n numFood = 0\n for s in successorPos:\n if food[s[0]][s[1]]:\n numFood += 1\n # counts food pellets around current position\n\n pellets = currentGameState.getCapsules() # positions of power pellets\n if pellets:\n pelletDistance = [util.manhattanDistance(pos, d) for d in pellets]\n closestPellet = min(pelletDistance)\n else:\n closestPellet = 0\n\n minDist = 9999\n total = 0\n n = 0\n for x in range(food.width):\n for y in range(food.height):\n if food[x][y]:\n dist = util.manhattanDistance((x,y), pos)\n total += dist\n n += 1\n if dist < minDist: minDist = dist # returns distance to closest food, want to min\n if n != 0:\n avgDist = total/n\n else:\n avgDist = 0\n\n newGhostStates = currentGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n newGhostPositions = currentGameState.getGhostPositions() # list of ghost positions\n disList = [util.manhattanDistance((x,y), pos) for x,y in newGhostPositions]\n ghostHeuristic = min(disList) # returns distance from closest ghost, want to max\n\n score = currentGameState.getScore()\n numMoves = len(legalMoves) # number of available moves\n if currentGameState.getNumFood() == 0:\n return 999 + score\n if ghostHeuristic == 0:\n return -999\n if newScaredTimes[0] > ghostHeuristic + 1: # if ate pellet, chase ghost\n return score + newScaredTimes[0] - ghostHeuristic\n if ghostHeuristic > 4: # if far away from ghost\n return score + avgFood*20 + numMoves + numFood - minDist*4 - closestPellet/2 # 10/(minDist)\n return score + ghostHeuristic**2 + numMoves/2 + avgFood*10 + numFood/2 - minDist - closestPellet/4", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n locations = student_utils.convert_locations_to_indices(list_of_locations, list_of_locations)\n homes = student_utils.convert_locations_to_indices(list_of_homes, list_of_locations)\n start = list_of_locations.index(starting_car_location)\n\n start_time = time.time()\n\n if params[0] == 'naive':\n car_path, drop_off = naive_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'greedy':\n car_path, drop_off = greedy_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'three_opt':\n car_path, drop_off = three_opt_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'ant_colony':\n car_path, drop_off = ant_colony(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'greedy_clustering_three_opt':\n car_path, drop_off = greedy_clustering_three_opt(locations, homes, start, adjacency_matrix, int(params[1]))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'mst':\n car_path, drop_off = mst_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'two_opt':\n car_path, drop_off = two_opt_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'greedy_clustering_two_opt':\n car_path, drop_off = greedy_clustering_two_opt(locations, homes, start, adjacency_matrix, int(params[1]))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n else:\n pass", "def evaluationFunction(self, currentGameState, action):\n\t\t# Useful information you can extract from a GameState (pacman.py)\n\t\tsuccessor_game_state = currentGameState.generatePacmanSuccessor(action)\n\t\tnext_pacman_pos = successor_game_state.getPacmanPosition()\n\t\tnext_food_list = successor_game_state.getFood().asList()\n\t\tnext_ghost_states = successor_game_state.getGhostStates()\n\t\tnext_scared_times = [ghostState.scaredTimer for ghostState in next_ghost_states]\n\n\t\t\"*** YOUR CODE HERE ***\"\n\t\tres = successor_game_state.getScore()\n\t\tmin_ghost_dist = min([manhattanDistance(ghost_state.getPosition(), next_pacman_pos) for ghost_state in next_ghost_states])\n\t\tmax_ghost_dist = max([manhattanDistance(ghost_state.getPosition(), next_pacman_pos) for ghost_state in next_ghost_states])\n\t\tif min(next_scared_times) > 2:\n\t\t\tif min_ghost_dist <= 1:\n\t\t\t\treturn CONST_MAX\n\t\t\tif len(next_food_list) == 0:\n\t\t\t\treturn res\n\t\t\tbonus = max([manhattanDistance(food, next_pacman_pos) / max_ghost_dist for food in next_food_list])\n\t\t\tres = res + bonus\n\t\telse:\n\t\t\tif min_ghost_dist <= 1:\n\t\t\t\treturn -CONST_MAX\n\t\t\tif len(next_food_list) == 0:\n\t\t\t\treturn res\n\t\t\tbonus = max([min_ghost_dist / manhattanDistance(food, next_pacman_pos) for food in next_food_list])\n\t\t\tres = res + bonus\n\t\treturn res\n\t\t# return successorGameState.getScore()", "def obtain_results(self,assignment,show=False):\n\t\t# logger.debug('Objective: {} meters'.format(assignment.ObjectiveValue()))\n\t\tindex=self.routing.Start(0)\n\t\tplan_output='Route for vehicle 0:\\n'\n\t\troute_distance=0\n\t\tnodes=list()\n\t\twhile True:\n\t\t\tnode=self.manager.IndexToNode(index)\n\t\t\tnodes.append(node)\n\t\t\tplan_output+=' {} ->'.format(node)\n\t\t\tprevious_index=index\n\t\t\tif self.routing.IsEnd(index): break\n\t\t\tindex=assignment.Value(self.routing.NextVar(index))\n\t\t\troute_distance+=self.routing.GetArcCostForVehicle(previous_index,index,0)\n\t\t# plan_output+=' {}\\n'.format(manager.IndexToNode(index))\n\t\t# plan_output+='Route distance: {} meters\\n'.format(route_distance)\n\t\t# logger.debug('plan_output:\\n%s',plan_output)\n\t\t# logger.debug('nodes:\\n%s',nodes)\n\t\tif show:\n\t\t\tplt.scatter(self.city_coors[:,0],self.city_coors[:,1])\n\t\t\t# plt.scatter(*depot_point)\n\t\t\tplt.plot(self.city_coors[nodes][:,0],self.city_coors[nodes][:,1])\n\t\t\tplt.show()\n\t\treturn nodes", "def calculate_path(self):\n\n mid_states = []\n\n # Add in between states\n for i in range(Constants.NUMBER_LAPS):\n mid_states = mid_states + Constants.LAP_STATES\n\n # Concatenate beginning, middle and end states to obtain full path of states\n self.path_states = Constants.BEGINNING_STATES + mid_states + Constants.END_STATES\n\n # Determine the amount of times that the smallbot will drive forward during the path\n self.times_driven_forward = self.path_states.count('CREEP_FORWARD')\n\n print(\"Calculated path: \", self.path_states)", "def algorithm(self):\n t = time.clock()\n self.calculateFirstPath()\n improve = True\n while improve and (self.allowedTime > (time.clock() - t)):\n improve = False\n\n for i in range(self.NB_OF_NODES):\n for j in range(self.NB_OF_NODES):\n if j in [(i - 1) % self.NB_OF_NODES, i, (i + 1) % self.NB_OF_NODES]:\n continue\n\n if self.getDistance(i, i + 1) + self.getDistance(j, j + 1) > self.getDistance(i, j) + self.getDistance(i + 1, j + 1):\n self.exchange(i, j)\n improve = True", "def optimizedRoutePossibilities(routes,cities):\n\tgraph = createOptimizedGraph(routes)\n\tfor couple in permutationsFromOrigin(cities):\n\t\tif couple is not None:\n\t\t\t#yield find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tprint(find_all_paths2(graph,couple[0],couple[1])[0])", "def two_opt_annealing(T, scheme, route, adjacency_matrix, max_chain_length, c):\n best = route.copy()\n cost_list, T_list = [], []\n accept_list = [[],[]]\n\n chains, iterations = 0, 0\n T_0 = T\n iterations = 0\n\n while T > 0:\n for i in range(1, len(route) - 2):\n # test\n # Adjust temperature\n if scheme == \"exp\":\n T = T*c\n if scheme == \"log\":\n alpha = 50\n T = T_0/(1+alpha*np.log(1+iterations))\n # if scheme == \"std\":\n # delta = .01\n # T = T / (1 + ((np.log(1+delta)* T) / (3 * sd0)))\n if scheme == \"quad\":\n alpha = 1\n T = T_0/(1+alpha*iterations**2)\n\n iterations += 1\n \n for j in range(i + 1, len(route)):\n chains += 1\n\n if j - i == 1: continue\n\n cost_list.append(calculate_cost(best,adjacency_matrix)[1])\n T_list.append(T)\n\n if cost_change(adjacency_matrix, best[i - 1], best[i], \\\n best[j - 1], best[j]) < 0:\n best[i:j] = best[j - 1:i - 1:-1]\n else:\n temp = best.copy()\n sd0, cost0 = calculate_cost(temp,adjacency_matrix)\n temp[i:j] = temp[j - 1:i - 1:-1]\n _, cost1 = calculate_cost(temp,adjacency_matrix)\n\n U = rs.uniform()\n Z = np.exp((cost0-cost1)/T)\n\n if U < Z:\n accept_list[1].append(Z)\n accept_list[0].insert(0,T)\n\n best[i:j] = best[j - 1:i - 1:-1]\n\n if chains > max_chain_length:\n print(\"End by chainlength: \",chains,\"T =\",T,\"Cost:\",cost0)\n print(\"route\",best)\n return best, cost_list, accept_list\n\n route = best.copy()\n print(\"End by T: \",T,\"Chains: \",chains,\"Cost:\",cost0)\n print(\"route\",best)\n return best, cost_list, accept_list", "def pathways(self) -> str:\n return self._pathways", "def betterEvaluationFunction(currentGameState):\n pos = currentGameState.getPacmanPosition()\n foodList = currentGameState.getFood().asList()\n ghostStates = currentGameState.getGhostStates()\n scaredTimes = [ghostState.scaredTimer for ghostState in ghostStates]\n pellets = currentGameState.getCapsules()\n #moving onto a food pellet is preferable to not moving onto a food pellet\n #moving onto a line of food pellets is even more preferable\n #being far way from a ghost is preferable\n #unless the ghost is scared, then being close to the ghost is preferable because we might eat\n \"*** YOUR CODE HERE ***\"\n directions = [-1, 0, 1]\n #moving decreases the score by 1\n #eating increase the score by 10\n # score = 10 * (oldFood[newPos[0]][newPos[1]] or newFood[newPos[0]][newPos[1]]) #start off with a score of 10 if we just ate a food pellet\n if currentGameState.isWin():\n return float('inf')\n elif currentGameState.isLose():\n return float('-inf')\n\n closestDist = float('inf')\n score = currentGameState.getScore()\n for food in foodList:\n dist = util.manhattanDistance(food, pos)\n if dist < closestDist:\n closestDist = dist\n if (10 - closestDist) > 0:\n score += (10 - closestDist)\n\n for ghostState in ghostStates:\n ghostPos = ghostState.getPosition()\n for dx in directions:\n for dy in directions:\n if (ghostPos[0]+dx, ghostPos[1]+dy) == pos:\n if ghostState.scaredTimer == 0:\n score += -500\n else:\n score += 500 \n\n score -= 100 * len(pellets)\n return score\n util.raiseNotDefined()", "def path_cost(self, c, state1, action, state2):\n return c + 1", "def path_cost(self, c, state1, action, state2):\n return c + 1", "def path_cost(self, c, state1, action, state2):\n return c + 1", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n pos = currentGameState.getPacmanPosition()\n newPos = successorGameState.getPacmanPosition()\n food = currentGameState.getFood()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n minDist = 9999\n for x in range(newFood.width):\n for y in range(newFood.height):\n if newFood[x][y]:\n dist = util.manhattanDistance((x,y), newPos)\n if dist < minDist: minDist = dist # returns distance to closest food, want to min\n # total distance from food, want to min\n newGhostPositions = successorGameState.getGhostPositions() # list of ghost positions\n disList = [util.manhattanDistance((x,y), newPos) for x,y in newGhostPositions]\n ghostHeuristic = min(disList) # returns distance from closest ghost, want to max\n\n handicap = 0\n if newScaredTimes[0] > ghostHeuristic + 1: # if ate pellet, chase ghost\n return newScaredTimes[0]-ghostHeuristic\n if newPos == pos:\n handicap = -40\n if food[newPos[0]][newPos[1]] == True:\n handicap += 60\n if ghostHeuristic > 4: # if far away from ghost\n return 10/(minDist) + handicap # - min **2\n return ghostHeuristic**2", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [\n ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n # print \"newScaredTimes\", newScaredTimes\n # print successorGameState.getCapsules()\n\n newGhostPos = newGhostStates[0].getPosition()\n ghost_dist = ghost_distance(newPos, newGhostPos)\n capsules = successorGameState.getCapsules()\n # food_dist = food_distance(newPos, newFood)\n\n # approach 1: 2/4 win = 10, average < 500\n # if ghost_dist <= 1:\n # return -999999\n # return -food_num(newFood)\n\n # approach 2: 2/4 win = 10, average < 500 but close to 500\n # if newScaredTimes[0] == 0:\n # if ghost_dist <= 1:\n # return -999999\n # return -food_num(newFood) -capsule_distance(newPos, capsules)\n\n # final approach: 4/4 win = 10, average = 1310.5\n if newScaredTimes[0] == 0:\n if ghost_dist <= 1:\n return -999999\n return -food_distance(newPos, newFood) * .01 - food_num(newFood) - capsule_distance(newPos, capsules)", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n \n #find the closest food\n foodDistance = float(\"inf\") #set food distance as maximum value\n for food in newFood.asList():\n \n #get distance from current pacman position to food using manhattan distance\n distance = util.manhattanDistance(newPos, food)\n \n #find the closest food distance\n if distance <= foodDistance:\n foodDistance = distance\n \n ghostDistance = 1\n nearbyDistance = 0\n \n #find the distance from current pacman position to ghosts using manhattan distance\n for ghost in successorGameState.getGhostPositions():\n distance = util.manhattanDistance(newPos, ghost)\n \n #if the ghost is close to pacman increase the nearby distance\n if distance <= 1:\n nearbyDistance += distance\n \n #add the distance to the ghost distance\n ghostDistance += distance\n \n #subtract the nearby distance from the total ghost distance\n ghostDistance = ghostDistance - nearbyDistance\n \n #use the reciprocal values of the food and ghost distances\n updatedScore = (1/foodDistance - 1/ghostDistance)\n \n #add updated score to the successor game state score\n return successorGameState.getScore() + updatedScore", "def evaluate(self):\r\n\r\n fitness = 0\r\n\r\n for i in range(len(self.representation)):\r\n # Calculates full distance, including from last city\r\n # to first, to terminate the trip\r\n fitness += distance_matrix[self.representation[i - 1]][self.representation[i]]\r\n\r\n return int(fitness)", "def Optimum_prun_based_routing(self, S, D, L):\n if self.has_path(S, D):\n \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n Opt_path = Shortest_path\n PathConcave_cost = self.max_path_cost(Shortest_path, 'c1') \n while len(Shortest_path) != 0:\n path_cost = self.additive_path_cost(Shortest_path, 'w') \n if path_cost <= L:\n \"\"\"go to concave cost\"\"\"\n PathConcave_cost = self.max_path_cost(Shortest_path, 'c1') \n self.G = self.rm_edge_constraint(PathConcave_cost) # remove all links where the concave link is greater than PathConcave_cost\n \n Opt_path = Shortest_path\n if self.has_path(S, D):\n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w')\n else:\n Shortest_path = [] \n else:\n break \n else:\n self.logger.info('No path from %s to %s', S, D)\n PathConcave_cost = 0\n Opt_path = []\n return PathConcave_cost, Opt_path", "def evaluationFunction(self, currentGameState, action):\r\n # Useful information you can extract from a GameState (pacman.py)\r\n successorGameState = currentGameState.generatePacmanSuccessor(action)\r\n newPos = successorGameState.getPacmanPosition()\r\n newFood = successorGameState.getFood()\r\n newGhostStates = successorGameState.getGhostStates()\r\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\r\n\r\n \"*** YOUR CODE HERE ***\"\r\n #evaluation function - assigning weights to important aspects of the game state(good food, ghosts, high scaredTimes of ghosts)\r\n #retVal - value to be returned after calculating the weights\r\n retVal = 0\r\n \r\n #Getting the distance of pacman from food\r\n foodDist = 'Inf'\r\n for i in newFood.asList():\r\n #Need to get the closest food \r\n foodDist = min(foodDist, manhattanDistance(i, newPos))\r\n \r\n #weight for min food dist\r\n if(foodDist != 0):\r\n retVal += 1.0/(1000*float(foodDist))\r\n else:\r\n retVal = 0\r\n \r\n #Getting the distance of pacman from ghosts\r\n ghostDist = 0\r\n for j in newGhostStates:\r\n #max dist from ghosts\r\n ghostDist = max(ghostDist, manhattanDistance(j.getPosition(), newPos))\r\n #min dist from ghosts\r\n ghostMinDist = min('Inf', manhattanDistance(j.getPosition(), newPos))\r\n \r\n #weight for min dist from ghost\r\n if ghostMinDist < 2:\r\n retVal -= 1000\r\n \r\n #Getting the scaredTimes of the ghosts and adding weights\r\n for k in newScaredTimes:\r\n retVal += k \r\n \r\n #Final retVal\r\n retVal = retVal + successorGameState.getScore()\r\n \r\n return retVal", "def get_car_path(graph,home_clusters,source,all_pairs_distances,all_pairs_shortest_paths,\n source_in_clusters = False, christofides = False):\n \n if source_in_clusters:\n add_vertex_to_clusters(home_clusters,source)\n\n dropoff_vertices = get_dropoff_vertices_efficient(graph, home_clusters, all_pairs_distances)\n\n # Add the source to the dropoff vertices\n dropoff_vertices.append(source)\n # Get rid of any repeating entries in the dropoff vertices\n dropoff_vertices = list(set(dropoff_vertices))\n # Construct the fully connected sub-graph with the dropoff vertices\n # on which TSP is computed\n dropoff_subgraph = tsp_routines.complete_shortest_path_subgraph_efficient(graph,dropoff_vertices,all_pairs_distances)\n \n if christofides:\n tsp_route = tsp_routines.metric_christofides_tsp(dropoff_subgraph,source)\n else:\n tsp_route = tsp_routines.metric_mst_tsp(dropoff_subgraph,source)\n\n final_path = tsp_routines.tsp_solution_to_path(graph,tsp_route,all_pairs_shortest_paths)\n return final_path", "def evaluate(self, gstate: gamestate.Gamestate, move: util.Move):\n closed_set = []\n open_set = [gstate]\n came_from = parent\n gScore = float(\"inf\")\n gScore[0] = 0\n fScore = float(\"inf\")\n fScore[0] = heurtistic(gstate.pacman, gstate.win)\n while open_set:\n current = min(fScore)\n if current == gstate.win:\n return path(came_from, current)\n\n open_set.remove(current)\n closed_set.append(current)\n for neighbor in current:\n if neighbor in closed_set:\n continue\n if neighbor not in open_set:\n open_set.append(neighbor)\n tentative_gScore = gScore[current] + util.manhattan(current, neighbor)\n if tentative_gScore >= gScore[neighbor]:\n continue\n came_from[neighbor] = current\n gScore[neighbor] = tentative_gScore\n fScore[neighbor] = gScore[neighbor] + heuristic(neighbor, goal)\n return failure", "def cost(self):\n lg = len(self.guarantees.cnf)\n la = len(self.assumptions.cnf)\n\n \"\"\"heuristic\n Low: guarantees while assuming little (assumption set is bigger)\n High: guarantees while assuming a lot (assumption set is smaller)\"\"\"\n\n return la / lg", "def action(self):\n\n self.start_timer()\n\n minimax_probability = self.norm.cdf(self.root.branching)\n use_minimax = boolean_from_probability(minimax_probability)\n if self.time_consumed > 53:\n # Time is starting to run low, use the faster option\n use_minimax=True\n\n if self.time_consumed < 59:\n if self.root.turn < 4:\n result = book_first_four_moves(self.root)\n elif use_minimax:\n result = minimax_paranoid_reduction(self.root)\n else:\n result = monte_carlo_tree_search(\n self.root,\n playout_amount=3,\n node_cutoff=4,\n outer_cutoff=4,\n num_iterations=1200,\n turn_time=0.75,\n exploration_constant=1.7,\n use_slow_culling=False,\n verbosity=0,\n use_prior=True,\n num_priors=4,\n use_fast_prune_eval=False,\n use_fast_rollout_eval=False,\n )\n else:\n result = greedy_choose(self.root)\n\n self.end_timer()\n\n return result", "def compute_social_welfare(self):\r\n #self.social_welfare = 0\r\n #for i in range(self.num_routes):\r\n # self.social_welfare += self.routes[i].flow * self.routes[i].get_route_utility()\r", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n \n newPos = currentGameState.getPacmanPosition()\n newFood = currentGameState.getFood()\n newGhostStates = currentGameState.getGhostStates()\n GhostLocs = currentGameState.getGhostPositions()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n capsuleLocations = currentGameState.getCapsules()\n Hueristic = 0.0\n \n if currentGameState.isWin():\n return 10000\n if currentGameState.isLose():\n return -10000\n\n FoodDistances = []\n foodLocations = newFood.asList()\n for food in foodLocations:\n FoodDistances.append(manhattanDistance(newPos,food))\n closestFood = min(FoodDistances)\n closestFoodLocation = foodLocations[FoodDistances.index(closestFood)]\n\n GhostsToMe = []\n GhostsToFood = []\n for ghost in GhostLocs:\n GhostsToMe.append(manhattanDistance(newPos,ghost))\n GhostsToFood.append(manhattanDistance(closestFoodLocation,ghost))\n closestGhostToMe = min(GhostsToMe)\n closestGhostToClosestFood = min(GhostsToFood)\n closestGhostLocation = GhostLocs[GhostsToMe.index(closestGhostToMe)]\n\n if newPos in currentGameState.getCapsules():\n capsule = 100\n else: \n capsule = 0\n \n if closestGhostToClosestFood < closestFood:\n if closestGhostToMe > 4:\n Hueristic = (1.0/(closestFood+1.0))*20 - len(foodLocations)*50 - (1/closestGhostToMe)*5\n else:\n Hueristic = (-1/closestGhostToMe)*50\n else:\n Hueristic = (1.0/(closestFood+1.0))*20 - len(foodLocations)*50 - (1/closestGhostToMe)*5\n return Hueristic", "def edge_direction_evaluation(direction):\n results = {'_edge-direction':direction}\n\n print '------ CLASSIFICATION EVALUATION --------'\n\n print '> Reading cases..'\n descriptions_path = '../data/tasa/TASA900_dependencies'\n texts, labels = data.read_files(descriptions_path)\n\n print '> Creating representations..'\n rep = []\n for i, text in enumerate(texts):\n if i%100==0: print ' ',str(i)+'/'+str(len(texts))\n g = graph_representation.construct_dependency_network(text, direction=direction)\n metric = graph.GraphMetrics.CLOSENESS\n d = graph_representation.graph_to_dict(g, metric)\n rep.append(d)\n g = None # just to make sure..\n rep = graph_representation.dicts_to_vectors(rep)\n\n print '> Evaluating..'\n score = evaluation.evaluate_classification(rep, labels)\n print ' score:', score\n results['classification'] = score\n\n print '------ RETRIEVAL EVALUATION --------'\n print '> Reading cases..'\n descriptions_path = '../data/air/problem_descriptions_dependencies'\n description_texts, labels = data.read_files(descriptions_path)\n solutions_path = '../data/air/solutions_preprocessed'\n solution_texts, labels = data.read_files(solutions_path)\n solution_vectors = freq_representation.text_to_vector(solution_texts, freq_representation.FrequencyMetrics.TF_IDF)\n\n print '> Creating representations..'\n rep = []\n for i, text in enumerate(description_texts):\n if i%100==0: print ' ',str(i)+'/'+str(len(description_texts))\n g = graph_representation.construct_dependency_network(text, direction=direction)\n metric = graph.GraphMetrics.EIGENVECTOR\n d = graph_representation.graph_to_dict(g, metric)\n rep.append(d)\n g = None # just to make sure..\n rep = graph_representation.dicts_to_vectors(rep)\n\n print '> Evaluating..'\n score = evaluation.evaluate_retrieval(rep, solution_vectors)\n print ' score:', score\n results['retrieval'] = score\n\n data.pickle_to_file(results, 'output/dependencies/stop_words_retr_'+direction)\n\n pp.pprint(results)\n return results", "def decision_process(self) -> None:\n # order routes by preference\n self.adj_rib_in.preference_ordering()\n # for each route insert the best in the loc_rib\n for destination in self.adj_rib_in:\n best_route = destination[0]\n # if there as been a change insert the new route in the adj-rib-out\n old_best = None\n if self.loc_rib.exists(best_route):\n old_best = self.loc_rib[best_route]\n if self.loc_rib.insert(best_route) is not None:\n for neigh in self.nodes_rib_out:\n # Case 1, the RIB out doesn't contains a route for the destination\n if not self.nodes_rib_out[neigh].exists(best_route):\n # Insert the new best as an Advertisement\n self.nodes_rib_out[neigh].insert(best_route)\n # If the Old best is not none insert it as a withdraw\n if old_best is not None and \\\n not self.nodes_rib_out[neigh].exists_withdraws(best_route):\n self.nodes_rib_out[neigh].insert_withdraw(old_best)\n # Case 2, The Rib contains a Route for the detination\n else:\n # Remove the route from the advertisements\n self.nodes_rib_out[neigh].remove(old_best)\n if len(self.nodes_rib_out[neigh][old_best]) == 0:\n del self.nodes_rib_out[neigh][old_best]\n # If the route in the withdraws is equal to the new best don't do anything\n # Otherwise insert the new route as an advertisement\n if self.nodes_rib_out[neigh].exists_withdraws(best_route) and \\\n best_route in self.nodes_rib_out[neigh].get_withdraws(best_route):\n self.nodes_rib_out[neigh].remove_from_withdraws(best_route)\n else:\n self.nodes_rib_out[neigh].insert(best_route)\n # Evaluation if something has to be removed from the LOC rib and withdrawd\n for destination in self.loc_rib:\n if not self.adj_rib_in.exists(destination):\n del self.loc_rib[destination]\n for neigh in self.nodes_rib_out:\n # if self.nodes_rib_out[neigh].exists(destination):\n # del self.nodes_rib_out[neigh][destination]\n self.nodes_rib_out[neigh].insert_withdraw(destination)", "def print_solution(self):\n print(f'Objective: {self.solution.ObjectiveValue()}')\n total_distance = 0\n total_load = 0\n max_route_distance = 0\n for vehicle_id in range(self.data['num_vehicles']):\n index = self.routingManager.Start(vehicle_id)\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n route_load = 0\n while not self.routingManager.IsEnd(index):\n node_index = self.manager.IndexToNode(index)\n route_load += self.data['demands'][node_index]\n plan_output += ' {0} Load({1}) -> '.format(self.data['names'][node_index], route_load)\n\n previous_index = index\n index = self.solution.Value(self.routingManager.NextVar(index))\n route_distance += self.routingManager.GetArcCostForVehicle(\n previous_index, index, vehicle_id\n )\n print(route_distance)\n\n plan_output += '{0}, Load({1}) \\n '.format(self.data['names'][self.manager.IndexToNode(index)], route_load)\n\n plan_output += 'Distance of the route: {}\\n'.format(route_distance)\n plan_output += 'Load of the route: {}\\n'.format(route_load)\n\n print(plan_output)\n total_distance += route_distance\n total_load += route_load\n\n print('Total distance of all routes: {}km'.format(total_distance))\n print('Total load of all routes: {}'.format(total_load))", "def cost(distance, highway, bicycle, incline, preferences):\n\n #unpack preferences\n (flatness_pref, bicycle_pref, distance_pref,\n motorway_pref, highway_pref, residential_pref) = preferences\n multiplier = 1 + bike_multiplier(bicycle, bicycle_pref) + road_multiplier(highway, bicycle_pref, motorway_pref, highway_pref, residential_pref)\n if multiplier <= 0:\n multiplier = 0.01\n incl = incline_multiplier(float(incline))*flatness_pref\n cost = float(distance) * multiplier + incl\n if cost <= 0:\n cost = 0.01\n return cost", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n numFood = successorGameState.getNumFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n ghostPositions = successorGameState.getGhostPositions()\n\n \"*** YOUR CODE HERE ***\"\n\n #print type(ghostState), dir(ghostState)\n totalScaredTimes = reduce(lambda x,y: x+y , newScaredTimes)\n foodDistances = helper(newPos,newFood.asList())\n capsuleDistances = helper(newPos,successorGameState.getCapsules())\n ghostDistances = helper(newPos,ghostPositions) \n if numFood is 0:\n foodUtility = 1000 \n else:\n foodUtility = (1/numFood)\n distanceToClosestFood = 1\n distanceToClosestGhost = 1\n distanceToClosestCapsule = 1 \n if (foodDistances and min(foodDistances) != 0):\n distanceToClosestFood = min(foodDistances) \n if (ghostDistances and min(ghostDistances) != 0):\n distanceToClosestGhost = min(ghostDistances) \n if (capsuleDistances and min(capsuleDistances) == 0):\n distanceToClosestCapsule = min(capsuleDistances)\n arg11 = 1/distanceToClosestFood - 1/distanceToClosestGhost\n arg22 = successorGameState.getScore() + totalScaredTimes + 1/distanceToClosestCapsule\n result = arg11 + arg22\n return result", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n foodPellets = newFood.asList()\n currPos = currentGameState.getPacmanPosition()\n closestFood, closestFoodPos = self.getClosestFood(foodPellets, newPos) \n closestGhostDist = 1000000\n closestGhost = ()\n for ghost in newGhostStates:\n distToGhost = manhattanDistance(newPos, ghost.getPosition())\n if distToGhost < closestGhostDist:\n closestGhostDist = distToGhost\n closestGhost = ghost\n\n if closestGhostDist > 0:\n if closestGhost.scaredTimer / closestGhostDist > 1:\n return successorGameState.getScore() + 25 / closestGhostDist + 1 / closestFood\n\n if closestGhostDist <= 2:\n return -len(foodPellets) - 5 / closestGhostDist + 1 / closestFood\n\n if closestFoodPos != ():\n if newPos == self.previousLoc: \n return -len(foodPellets) - 1 + 1 / closestFood\n\n if currPos == newPos:\n return -len(foodPellets) - .5 + 1 / closestFood\n\n return -len(foodPellets) + 1 / closestFood", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n\n # Keeping track of all parameters\n score = currentGameState.getScore()\n curfood = currentGameState.getFood()\n foodlist = curfood.asList()\n curcapsules = currentGameState.getCapsules()\n ghostposition = currentGameState.getGhostPositions()\n pacmanposition = currentGameState.getPacmanPosition()\n\n # Distance between ghost and pacman\n distx = abs(ghostposition[0][0] - pacmanposition[0])\n disty = abs(ghostposition[0][1] - pacmanposition[1])\n\n # If we are on a safe zone : we can move freely without being afraid to die\n if distx > 1 or disty > 1:\n score += 30\n foods = []\n caps = []\n # Tries to return the best score , for the closest food\n for f in foodlist:\n dstfx = abs(pacmanposition[0] - f[0])\n dstfy = abs(pacmanposition[1] - f[1])\n # distance factor for pac's - food distance\n foods.append(dstfx + dstfy)\n if foods != []:\n # return score for closest food , which will outtake the other food dots\n score *= (50 - min(foods))\n elif distx == 1 and disty == 1:\n score += 20.0\n elif distx == 0 and disty == 1:\n score += 10.0\n elif distx == 1 and disty == 0:\n score += 10.0\n else:\n score -= 10\n\n return score\n util.raiseNotDefined()", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n # Volem que s'apropi a les fruites i s'allunyi dels fantasmes\n\n foodDistance = [util.manhattanDistance(newPos, food) for food in newFood.asList()]\n if foodDistance:\n foodMinima = min(foodDistance)\n else:\n foodMinima = -1 # perque si la llista esta buida vol dir que hem hem d'anar cap aquesta direcció, i per tant necessitem un valor molt gran.\n ghostDistance = [util.manhattanDistance(newPos, ghostState.getPosition()) for ghostState in newGhostStates]\n fantasmaMoltAprop = 0\n for i in ghostDistance:\n if i <= 1:\n fantasmaMoltAprop += 1\n distanciaFantasmes = sum(ghostDistance)\n if distanciaFantasmes == 0:\n distanciaFantasmes = -1 # perque aixo voldra dir que tenim els fantasmes al voltant, i per tant ens en volem allunyar si o si d'aquesta direcció\n #print(foodMinima, distanciaFantasmes, fantasmaMoltAprop)\n\n result = successorGameState.getScore() + 1 / float(foodMinima) - 1 / float(\n distanciaFantasmes) - fantasmaMoltAprop\n\n return result", "def main():\n # create graph\n city = create_city()\n\n # create costs cities\n costs = create_costs()\n\n # create min path to city\n path = create_min_path()\n\n # create list not check city\n list_cities = ['biysk', 'barnaul', 'novosibirsk', 'belokurikha',\n 'tomsk', 'krasnoyarsk', 'omsk']\n\n used_city = list_cities.pop(0) # Сity that we processing\n while list_cities:\n used_costs = costs[used_city] # Cost of the current city\n used_path = path[used_city]\n for neighbor in city[used_city]:\n costs_neighbor = costs[neighbor]\n path_to_neighbor = city[used_city][neighbor]\n\n # If path on current node less then rewrite the neighbor node\n if used_costs + path_to_neighbor < costs_neighbor:\n costs[neighbor] = used_costs + path_to_neighbor\n path[neighbor] = used_path + [neighbor]\n\n # Finding the min path to the neighbor\n min_path = min(city[used_city].values())\n # Entry in used_city the city with min path to it\n used_city = find_city(city, used_city, min_path)\n # Deleting city from the uninitiated cities\n list_cities.remove(used_city)\n\n for city, value in costs.items():\n print(f'{city:13} {value:2}', end=\" \")\n print(*path[city], sep=\", \")", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n oldFood = currentGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n\n score = 10000\n if successorGameState.isWin():\n return 100000000\n for ghost in newGhostStates:\n ghostPos = ghost.getPosition()\n if util.manhattanDistance(ghostPos, newPos) < 2:\n score -= 10000\n else:\n score += util.manhattanDistance(ghostPos, newPos) * 1\n \n nearFood = 1000\n farFood = 1000\n for foodPos in oldFood.asList():\n dist = util.manhattanDistance(foodPos, newPos)\n if (dist < nearFood):\n nearFood = dist\n if (dist > farFood):\n farFood = dist\n if (currentGameState.getNumFood() < successorGameState.getNumFood()):\n score += 5\n\n if action == Directions.WEST:\n score -= 1\n if action == Directions.STOP:\n score -= 2\n \n for scareTime in newScaredTimes:\n score += scareTime * 1\n\n score -= 2 * farFood\n score -= 5 * nearFood\n capsuleplaces = currentGameState.getCapsules()\n if successorGameState.getPacmanPosition() in capsuleplaces:\n score += 5\n return max(score, 0)\n \n #their original return\n #return successorGameState.getScore()" ]
[ "0.5988142", "0.5983045", "0.5924726", "0.5891215", "0.58438075", "0.5810156", "0.5804012", "0.5724945", "0.56719637", "0.56543106", "0.56524086", "0.5631604", "0.5616083", "0.5610215", "0.5607715", "0.5578848", "0.5551957", "0.5545246", "0.55360115", "0.55338514", "0.5530114", "0.5529325", "0.5524376", "0.552436", "0.5511657", "0.5500719", "0.5476586", "0.54696506", "0.5465611", "0.5457232", "0.5457232", "0.5452609", "0.5448495", "0.54438996", "0.54400593", "0.5432577", "0.540746", "0.540266", "0.5397914", "0.5390129", "0.53864384", "0.5377698", "0.5375581", "0.53695476", "0.536922", "0.5359443", "0.5355248", "0.5340143", "0.53391", "0.53340924", "0.53328204", "0.5329491", "0.53270215", "0.5321414", "0.5317331", "0.53066903", "0.53044546", "0.529919", "0.5296985", "0.52964556", "0.52959466", "0.5291856", "0.5271938", "0.52694404", "0.5268364", "0.5265526", "0.5263583", "0.52586627", "0.52565664", "0.5255726", "0.52526045", "0.5250287", "0.52385354", "0.5238441", "0.5237494", "0.52296215", "0.52296215", "0.52296215", "0.52257377", "0.52242583", "0.5222649", "0.52212965", "0.5219037", "0.52153516", "0.52117544", "0.5209612", "0.5206975", "0.52036244", "0.5201714", "0.51986474", "0.51969254", "0.5192116", "0.5190449", "0.51888245", "0.5187549", "0.5186318", "0.5174906", "0.51690215", "0.5165995", "0.5164947" ]
0.5730355
7
Compute the percentage of times the recommended pathway is shorter than the user's one.
def metric_path_length(pathways): num_users = len(pathways) num_good_recommendations = 0 sum_u_path_len = 0 sum_r_path_len = 0 career_goal_reached = 0 for user, pathway_tuple in pathways.items(): u_path = pathway_tuple[0] r_path = pathway_tuple[1] sum_u_path_len += len(u_path) sum_r_path_len += len(r_path) if r_path[-1]==u_path[-1]: career_goal_reached += 1 if len(r_path) < len(u_path): num_good_recommendations += 1 return 100.0 * career_goal_reached/num_users, 100.0 * num_good_recommendations / num_users, sum_u_path_len/num_users, sum_r_path_len/num_users
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def h_score(self):\n if self.estimated_moves_to_goal is None:\n self.estimated_moves_to_goal = \\\n max(nx.single_source_shortest_path_length(self.graph, self.head_node).items(), key=lambda x: x[1])[1]\n return self.estimated_moves_to_goal", "def percent_passing(self) -> float:\n num_meas = Enumerable(self.mlc_meas).select_many(lambda m: m.passed).count()\n num_pass = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: m.passed)\n .count(lambda p: bool(p) is True)\n )\n return float(100 * num_pass / num_meas)", "def _fraction_latency(self, users_distances):\n\n users_desired_latency = np.array(list(map(lambda a: self.services_desired_latency[a],\n self.users_services)))\n check = users_distances < users_desired_latency\n fraction = np.count_nonzero(check==True) / self.num_of_users\n return fraction", "def computeShortestPathCoherence(node1, node2, w):\n\n\tif node1.strip()==node2.strip():\n\t\treturn w\n\n\tfromCache=rds.get(\"%s:%s\" % (node1, node2))\n\tif fromCache:\n\t\treturn float(fromCache)*w\n\telse:\n\t\tg = Graph()\n\t\tq=\"MATCH path=shortestPath((m:Page {name:\\\"%s\\\"})-[LINKS_TO*1..10]-(n:Page {name:\\\"%s\\\"})) RETURN LENGTH(path) AS length, path, m, n\" % (node1, node2)\n\n\t\tcursor=g.run(q)\n\t\tpath=None\n\t\tfor c in cursor:\n\t\t\tpath=c\n\n\t#\n\t\tif path:\n\t\t\trds.set(\"%s:%s\" % (node1, node2), 1/path[\"length\"])\n\t\t\trds.set(\"%s:%s\" % (node2, node1), 1/path[\"length\"])\n\t\t\treturn w/path[\"length\"]\n\t\telse:\n\t\t\trds.set(\"%s:%s\" % (node1, node2), 0.0)\n\t\t\trds.set(\"%s:%s\" % (node2, node1), 0.0)\n\t\t\treturn 0.0", "def _calculate_hours_percent(used_hours, estimated_hours):\n percent = (used_hours * 100) / estimated_hours\n return percent", "def percent_left(self):\n return 100 - self.percent_complete", "def percentCheck(currentTimeLabel, totalTimeLabel):\n # Updated 11/19/16\n try:\n progPercent = float(currentTimeLabel) / float(totalTimeLabel) * 100\n except (ValueError , ZeroDivisionError):\n progPercent = 0\n \n return progPercent", "def tunnel2_rekey_fuzz_percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"tunnel2_rekey_fuzz_percentage\")", "def tunnel2_rekey_fuzz_percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"tunnel2_rekey_fuzz_percentage\")", "def tunnel1_rekey_fuzz_percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"tunnel1_rekey_fuzz_percentage\")", "def tunnel1_rekey_fuzz_percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"tunnel1_rekey_fuzz_percentage\")", "def calc_slow(self, demands: Demand, routing: Routing) -> float:\n edge_capacities = [e[2]['weight'] for e in\n sorted(self.graph.edges(data=True))]\n link_utilisation = self.calc_overall_link_utilisation(demands, routing)\n # Because utilisation compared to link width is what we care about here\n ratio_capacities = np.divide(link_utilisation, edge_capacities)\n\n return np.max(ratio_capacities)", "def tunnel1_rekey_fuzz_percentage(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"tunnel1_rekey_fuzz_percentage\")", "def tunnel2_rekey_fuzz_percentage(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"tunnel2_rekey_fuzz_percentage\")", "def evaluate_metrics(pathways, debug):\r\n __print_msg('Evaluating metrics...', debug)\r\n metrics = {}\r\n metrics['CareerGoalReached'], metrics['ShorterRecommendedPath'], metrics['UserPathAvgLength'], metrics['RecPathAvgLength'] = metric_path_length(pathways)\r\n __print_msg('Career goal reached: {}'.format(metrics['CareerGoalReached']), debug)\r\n __print_msg('Recommended path shorter: {}'.format(metrics['ShorterRecommendedPath']), debug)\r\n __print_msg('User pathway average length: {}'.format(metrics['UserPathAvgLength']), debug)\r\n __print_msg('Recommended pathway average length: {}'.format(metrics['RecPathAvgLength']), debug)", "def path_cost(path):\n return len(path)", "def getScore(self):\n tempscore = 1000 - 0.01*self.timeDriving \n tempscore -= 0.1*getDist(self.maze.checkpoints[self.checkpoint].getMid(),self.pos)\n tempscore += self.checkpoint *1000\n tempscore += self.laps * 1000 * len(self.maze.checkpoints)\n return tempscore", "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def get_estimated_percentage(self):\n now_id = now_as_id()\n message_id = self.last_message_id\n if message_id >= now_id:\n return 100.0\n \n channel_id = self.source_channel.id\n if channel_id >= message_id:\n return 0.0\n \n if self.is_polling_done():\n return 100.0\n \n return (1.0 - (now_id - message_id) / (now_id - channel_id)) * 100.0", "def compute_path_metric(self, sw, path, util, time_now):\n pathmetric = 1\n linkmetrics = []\n links = zip(path[:-1], path[1:])\n # calculate available capacity for each link in path\n for link in links:\n u, v = link\n #DESIGN CHOICE: Should we 1) always include extra-domain state, 2)\n #only include extra-domain state when not stale (timestamp), 3) always exclude\n #extra-domain state when calculating the path metric? Here we do (1)\n used = self.graph[u][v]['used'] + util\n capacity = self.graph[u][v]['capacity']\n linkmetric = float(used) / capacity\n # If the controller estimates it would oversubscribe this link\n if linkmetric > 1:\n logging.info(\"[%s] MAY be OVERSUBSCRIBED [%f] at switch [%s]\", str(time_now), linkmetric, str(sw))\n break\n else:\n linkmetrics.append(linkmetric)\n\n # We define pathmetric to be the worst link metric in path\n if len(linkmetrics) > 0:\n pathmetric = max(linkmetrics)\n\n funname = sys._getframe().f_code.co_name\n logging.debug(\"[%s] [%s] [%s] [%s]\", funname, str(time_now), str(self),\n str((path, linkmetrics)))\n return (pathmetric, len(links))", "def percent_updated(self):\n return self.percent_complete - self.previous_percent_complete", "def compute_path_metric(self, sw, path, util, time_now, local_contrib):\n pathmetric = 1\n linkmetrics = []\n links = zip(path[:-1], path[1:])\n # calculate available capacity for each link in path\n for link in links:\n u, v = link\n # Use the last-learned-via-sync value for a link\n if (not local_contrib) and 'sync_learned' in self.graph[u][v]:\n used1 = self.graph[u][v]['sync_learned'] + util\n used2 = self.graph[u][v]['used'] + util\n # ['used'] is a strict lower bound for ['sync_learned']\n if used1 > used2: \n used = used1\n logging.debug(\"CS [%s] using sync_learned value 1 [%f]\", str(self.name), used1)\n else:\n used = used2\n logging.debug(\"CS [%s] using sync_learned value 2 [%f]\", str(self.name), used2)\n else:\n logging.debug(\"CS [%s] using tracking value\", str(self.name))\n used = self.graph[u][v]['used'] + util\n\n capacity = self.graph[u][v]['capacity']\n linkmetric = float(used) / capacity\n # If the controller estimates it would oversubscribe this link\n if linkmetric > 1:\n logging.info(\"[%s] MAY be OVERSUBSCRIBED [%f] at switch [%s]\", str(time_now), linkmetric, str(sw))\n break\n else:\n linkmetrics.append(linkmetric)\n\n # We define pathmetric to be the worst link metric in path\n if len(linkmetrics) > 0:\n pathmetric = max(linkmetrics)\n\n funname = sys._getframe().f_code.co_name\n logging.debug(\"[%s] [%s] [%s] [%s]\", funname, str(time_now), str(self),\n str((path, linkmetrics)))\n return (pathmetric, len(links))", "def suspected_per_hour(self):\r\n return (3600.*(self.circ_suspected+self.strm_suspected\r\n +self.circ_failed+self.strm_failed))/self.current_uptime()", "def look_ahead_heuristic(game, player):\n if game.is_loser(player):\n return float('-inf')\n\n if game.is_winner(player):\n return float('inf')\n\n own_legal_moves = game.get_legal_moves(player)\n own_moves = len(own_legal_moves)\n for m in own_legal_moves:\n own_moves += len(game.__get_moves__(m))\n\n opp_legal_moves = game.get_legal_moves(game.get_opponent(player))\n opp_moves = len(opp_legal_moves)\n for m in opp_legal_moves:\n opp_moves += len(game.__get_moves__(m))\n\n return float(own_moves - opp_moves)", "def custom_score_2(game, player):\n if game.is_loser(player):\n return -math.inf\n\n if game.is_winner(player):\n return math.inf\n\n opponent = game.get_opponent(player)\n\n # At the start of the game when there are lots of options,\n # use a simple heuristic.\n if len(game.get_blank_spaces()) > 25:\n opp_moves = game.get_legal_moves(opponent)\n own_moves = game.get_legal_moves(player)\n return len(own_moves) / max(len(opp_moves), 1e-6)\n # Once the board starts to fill up, use the difference between longest paths.\n else:\n return longest_path(game, player) - longest_path(game, opponent)", "def total_cost(path: Path) -> float:\n\t\n\tdistance = calc_total_dist(path)\n\tavg_speed = calc_average_speed(path)\n\t\n\t# Speed is less important, but gets a huge multiplier, because speed and\n\t# \tdistance are in different units. Speed requires a high ratio to have\n\t# \tsimilar amounts of variation.\n\tSPEED_DISTANCE_COST_RATIO = 7865.099\n\t\n\treturn (\n\t\t(distance * 1) +\n\t\t(-avg_speed * SPEED_DISTANCE_COST_RATIO)\n\t)", "def usage_percent(used, total, _round=None):\r\n try:\r\n ret = (used / total) * 100\r\n except ZeroDivisionError:\r\n ret = 0\r\n if _round is not None:\r\n return round(ret, _round)\r\n else:\r\n return ret", "def get_strength(self):\n return 10 - self.get_agility()", "def pct(self):\n\t\treturn self.bottle.pct()", "def heuristics(course, suggestedPlan, user):\n score = course.score\n bonus = 0\n return score + bonus", "def remaining_percent(self):\n return (self.remaining_words / self.total_words) * 100", "def percentage_complete(self) -> float:\n return self.__percentage_complete", "def _cost_route_fine(self):\n return self.fine", "def custom_score(game, player):\n\n if game.is_loser(player):\n return float(\"-inf\")\n if game.is_winner(player):\n return float(\"inf\")\n\n # Longest Path Heuristic (used towards end game)\n\n game_phase = len(game.get_blank_spaces()) # high if early, low if late in game\n max_phase = game.width*game.height\n\n def longestPath(player,game,path=0,longest=0):\n moves = game.get_legal_moves(player)\n if path > longest:\n longest = path\n if len(moves) == 0:\n path = 0\n for move in moves:\n new_board = game.forecast_move(move)\n longestPath(player,new_board,path+1,longest)\n return longest\n\n if (game_phase<15): # only feasible to calculate late-game\n game_phase = abs(game_phase-max_phase) # low if early, high if late in game\n return (longestPath(player,game)-longestPath(game.get_opponent(player),game))\n else:\n opponent = game.get_opponent(player)\n return float(len(game.get_legal_moves(player)))-2.0*float(len(game.get_legal_moves(opponent)))", "def percentage_used(self):\n return self.volume_used/self.total_volume * 100.0", "def get_opinion_percent(self):\n return (self.get_percent()+100)/2", "def cost(self):\n lg = len(self.guarantees.cnf)\n la = len(self.assumptions.cnf)\n\n \"\"\"heuristic\n Low: guarantees while assuming little (assumption set is bigger)\n High: guarantees while assuming a lot (assumption set is smaller)\"\"\"\n\n return la / lg", "def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)", "def pulse_width_percent(self) -> float:", "def administer(self):\n\n score = 0.0\n for question in self.questions:\n if question.ask_and_evaluate() is True:\n score += 1\n return (score / len(self.questions)) * 100", "def timeLeft(minUsed):\n\n\n h = (input(\"How many hours does the battery last?\"))\n hours = int(h)\n minutes = float(hours * 60)\n\n minsLeft = (minutes - minUsed)\n\n p = (minsLeft / minutes) * 100\n percent = int(p)\n\n print (percent)\n\n return minsLeft", "def getGCpercentage(DNA):\n dnaLength = len(DNA) #counts the length of the DNA string\n findG = DNA.count(\"G\") #finds the letter G in DNA string\n findC = DNA.count(\"C\") #finds the letter C in DNA string\n print(findG)\n print(findC)\n print(dnaLength)\n GCpercent = ((findC + findG)/dnaLength) * 100 #calculates percentage of Gs and Cs\n print(\"Percentage of G and C:\",\" %6.2f\" % GCpercent)\n \n return getGCpercentage", "def percent_complete(self) -> int:\n return pulumi.get(self, \"percent_complete\")", "def player_goal_distance(self) -> float:\n route = self.best_route\n return sum(route.values())", "def percent_raised(self):\n total_cost = self.total_cost()\n if total_cost:\n return round(self.total_raised() * 100 / total_cost, 2)\n else:\n return 0", "def compute_uct(self):\n if self.visits != 0:\n return - self.reward / self.visits + self.C * math.sqrt(math.log(self.parent.visits) / self.visits)\n else:\n return float('inf')", "def fs_percent_used_capacity(self):\n return self._fs_percent_used_capacity", "def get_percent_completed(self):\n completed = self.object_list.filter(status__exact=True).count()\n total = len(self.object_list)\n return int(100 * completed / total) if total > 0 else 0", "def rate(way):\n cost = 0\n for i in range(len(way)-1):\n cost += DISTANCES[way[i]][way[i+1]]\n return cost", "def calculate_best_way(self) -> int:\n node = self._find_lowest_cost_node()\n while node:\n cost = self.costs[node]\n neighbors = self.graph[node]\n for neighbor in neighbors.keys():\n node_cost = cost + neighbors[neighbor]\n if self.costs[neighbor] > node_cost:\n self.costs[neighbor] = node_cost\n self.parents[neighbor] = node\n self.closed_nodes.append(node)\n node = self._find_lowest_cost_node()\n\n return self.costs[\"fin\"]", "def overall_progress(app_id):\r\n sql = text('''SELECT task.id, n_answers,\r\n COUNT(task_run.task_id) AS n_task_runs\r\n FROM task LEFT OUTER JOIN task_run ON task.id=task_run.task_id\r\n WHERE task.app_id=:app_id GROUP BY task.id''')\r\n results = db.engine.execute(sql, app_id=app_id)\r\n n_expected_task_runs = 0\r\n n_task_runs = 0\r\n for row in results:\r\n tmp = row[2]\r\n if row[2] > row[1]:\r\n tmp = row[1]\r\n n_expected_task_runs += row[1]\r\n n_task_runs += tmp\r\n pct = float(0)\r\n if n_expected_task_runs != 0:\r\n pct = float(n_task_runs) / float(n_expected_task_runs)\r\n return (pct * 100)", "def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"", "def part_1(distances: Distances) -> int:\n\n result, _ = min(generate_routes(distances))\n print(f\"part 1: shortest route has distance {result}\")\n return result", "def calculate_made_up_dist(self):\n\n # Ensure if current state equals goal, cost is only the current cost\n if self._goal_loc == self._current_loc:\n return self._current_cost\n\n # Distance is at least the Manhattan distance as cannot move diagonal\n estimated_distance = self.calculate_manhattan_dist()\n\n # Assume two board parts in the priority queue have the same weight.\n # For those board paths with higher actual cost and lower heuristic\n # cost, there is more assurance in the accuracy of the actual cost\n # than in the heuristic cost. Give a very small penalty (i.e. less\n # than one step) to prefer a path with a higher known cost than a\n # path with a higher heuristic cost.\n # Extract the number of portion of the move cost from the heuristic\n heuristic_cost = estimated_distance - self._current_cost\n # Heuristic cost penalty is normalized to a maximum of 0.1 steps\n # This is achieved by dividing the heuristic cost by the size of the\n # board. Since the heuristic cost can never be larger than the board\n # size, this quotient is less than or equal to 1. To normalize to a\n # maximum of 0.1, just multiply the number by 0.1. This is than added\n # to the estimated distance determined so far.\n heuristic_cost_penalty = 0.1 * heuristic_cost\n heuristic_cost_penalty /= BoardPath._traversed_board_size\n # Add what is essentially an \"uncertainty penalty\"\n estimated_distance += heuristic_cost_penalty\n\n # In case where all neighboring spaces are blocked or already\n # traversed, then set the path cost prohibitively large so it is\n # given minimum priority.\n if not (self.is_move_valid(\"d\", BoardPath._traversed_board)) \\\n and not (self.is_move_valid(\"u\", BoardPath._traversed_board)) \\\n and not (self.is_move_valid(\"l\", BoardPath._traversed_board)) \\\n and not (self.is_move_valid(\"r\", BoardPath._traversed_board)):\n # Total board area is sufficient as a prohibitive distance\n estimated_distance += BoardPath._traversed_board_size\n return estimated_distance\n\n # If all next steps that load directly to the goal are blocked, then\n # it takes at least two additional moves to get around the blocked\n # paths it (due to an obstacle or already traversed square) so add\n # two to the estimated distance to include that cost.\n if self._is_all_direct_next_moves_blocked(BoardPath._traversed_board):\n estimated_distance += 2\n\n # In a heap, if two nodes have the same cost, the object that was\n # put into the heap first in many implementations will be on top of the\n # heap. To make the algorithm more efficient, apply a slight penalty to\n # a non valid solution to ensure if an invalid solution and a valid\n # solution have the same cost that the valid solution would always be\n # on top of the heap. This is done by giving all non-valid solutions a\n # penalty term that is greater than zero and less than the minimum step\n # size (e.g. in this case 0 < 0.1 < 1).\n estimated_distance += 0.1\n\n # Return estimated distance\n return estimated_distance", "def percent_busy(self):\n return self._percent_busy", "def exceeded_ratio(self) -> float:\n return self.amount_spent / self.total_amount", "def displayed_percent(self):\n return (self.displayed_words / self.total_words) * 100", "def percent_usage(value, total):\n if total:\n return float(value) * 100.0 / (float(total) + float(value))\n else:\n return 100.0", "def calculate_time_percentage_left(self):\n time_left = self.calculate_time_left()\n return time_left / self.attributes[AT.TIME_TO_EXPIRE]", "def get_crawlera_incapsula_percent(crawlera_user):\n if crawlera_user:\n return 0\n else:\n return 100", "def averageExcessCost(self):\n TSTT = 0\n for l in self.link:\n if self.link[l].flow <0:\n print(\"what in tarnation?\") #this really shouldn't happen, but the sheer number of leetcode problems I've been doing make me want to check against it anyway\n break\n if self.link[l].flow >0:\n contribution = self.link[l].flow * self.link[l].cost\n TSTT += contribution\n SPTT = 0\n TotalDemand = 0\n for od in self.ODpair:\n if self.ODpair[od].demand >0:\n O = self.ODpair[od].origin\n D = self.ODpair[od].destination\n BL,C = self.shortestPath(O)\n SPTT += C[D]*self.ODpair[od].demand\n TotalDemand+=self.ODpair[od].demand\n AEC = (TSTT - SPTT)/TotalDemand\n \n return AEC", "def failed_per_hour(self):\r\n return (3600.*(self.circ_failed+self.strm_failed))/self.current_uptime()", "def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))", "def evaluate_distance(self):\n\n fitness = 0\n routes = split_to_routes(self)\n\n for route in routes:\n route = [home] + route + [home]\n for i in range(1,len(route)):\n # Calculates full distance, including from last city\n # to first, to terminate the trip\n pos_from = route[i - 1]\n pos_to = route[i]\n distance = dm[pos_from][pos_to]\n fitness += distance\n\n return int(fitness)", "def improved_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "def contains_percentage_of(self, other: 'Interval') -> float:\n if other.length == 0:\n return other.a in self\n intersection = Interval.intersection([self, other])\n return intersection.length / other.length if intersection else 0.0", "def pct_status(self):\r\n # DEPRECATED: self.info.n_answers will be removed\r\n # DEPRECATED: use self.t.n_answers instead\r\n if (self.info.get('n_answers')):\r\n self.n_answers = int(self.info['n_answers'])\r\n if self.n_answers != 0 and self.n_answers != None:\r\n return float(len(self.task_runs)) / self.n_answers\r\n else: # pragma: no cover\r\n return float(0)", "def completion_percent(self) -> Optional[float]:\n return pulumi.get(self, \"completion_percent\")", "def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent", "def percent_waiting(self):\n return self._percent_waiting", "def get_reached(g, source, hops, timeout):\n\n paths = nx.single_source_shortest_path(g, source, hops)\n total = len(paths)\n reached = total\n\n for v in paths.itervalues():\n for i in range(1, len(v)):\n lat = g[v[i-1]][v[i]][\"latency\"]\n if lat > timeout:\n reached -= 1\n break\n\n return float(reached)/float(total)", "def percentage(self) -> str:\n return ranged_value_to_percentage(\n self._device.fan_speed_limits, self._device.fan_speed\n )", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def custom_score_7(game, player):\n \"\"\"custom_score_7 heuristic function also aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(1.5*length_my_player_moves*length_my_player_moves - length_opp_payer_moves*length_opp_payer_moves)", "def heuristic(self):\n game_score = (self.get_game_score(), 0.85)\n road_score = (self.get_longest_road_score(), 0.05)\n steps_score = (self.get_steps_available_score(), 0.05)\n reachable_nodes_score = (self.get_reachable_nodes_score(), 0.05)\n heuristics = [game_score, road_score, steps_score, reachable_nodes_score]\n result = 0\n for score, weight in heuristics:\n result += score * weight\n if DEBUG_PRINT:\n print(f\"Heuristic value for location {self.loc} is {result}\")\n print(f\"\\treachable score: {reachable_nodes_score[0] * reachable_nodes_score[1]}\")\n print(f\"\\tsteps score: {steps_score[0] * steps_score[1]}\")\n print(f\"\\tlongest road score: {road_score[0] * road_score[1]}\")\n print(f\"\\tgame score: {game_score[0] * game_score[1]}\")\n return result", "def local_efficiency(self, node_list1, node_list2, link_attribute=None):\n path_lengths = self.cross_path_lengths(node_list1, node_list2,\n link_attribute)\n return np.mean(1/path_lengths, axis=1)", "def penalty(self):\n return 0", "def disk_usage(path):\n st = os.statvfs(path)\n free = (st.f_bavail * st.f_frsize)\n total = (st.f_blocks * st.f_frsize)\n used = (st.f_blocks - st.f_bfree) * st.f_frsize\n try:\n percent = ret = (float(used) / total) * 100\n except ZeroDivisionError:\n percent = 0\n # NB: the percentage is -5% than what shown by df due to\n # reserved blocks that we are currently not considering:\n # http://goo.gl/sWGbH\n #return usage_ntuple(total, used, free, round(percent, 1))\n return round(percent,1)", "def KPI(self, total=True):\n \n data = self.select_table('ChordLog')\n correct = data[data['PredictedLabel'] == data['ActualLabel']]\n\n # % correctly predicted in chord net\n human_level_performance = (len(correct) / len(data)) * 100\n \n # round value\n human_level_performance = round(human_level_performance, 4) \n \n return human_level_performance", "def task_progress(project):\n complete = Task.objects.filter(project=project, status='C').count()\n total = Task.objects.filter(project=project).count()\n if total == 0:\n return 0\n\n return round(complete/total * 100, 2)", "def calculate_task_potential(self) -> float:\n cur_xy = self.agent.get_position()[:2]\n goal_xy = np.array([1e3, 0])\n return -np.linalg.norm(cur_xy - goal_xy) * 60", "def percent_of(part, whole):\n return part * 100 / whole", "def determine_spammer_by_percentage(self, reviewer_id):\n cut_value = 0.8\n\n fake_sql = \"select count(*) from reviews_simple where reviewerID = '%s' and fake = 1\" % reviewer_id\n legitimate_sql = \"select count(*) from reviews_simple where reviewerID = '%s' and fake = 0\" % reviewer_id\n\n self.cursor.execute(fake_sql)\n fake_num = self.cursor.fetchone()[0]\n self.cursor.execute(legitimate_sql)\n legitimate_num = self.cursor.fetchone()[0]\n\n total_num = float(fake_num + legitimate_num)\n if total_num == 0:\n return 2 # 2 represents unknown label\n else:\n\n if fake_num/total_num > cut_value:\n return 1\n else:\n return 0", "def path_lengths(self):\n trip_id2length = defaultdict(float)\n prev_id = 0\n cur_id = 0\n prev_lat = 0\n prev_lon = 0\n num_big_hops = 0\n big_hops = {}\n print \"Bad Distances\"\n for line in self.lines:\n #normalized = dg.normalize(line)\n normalized = normalize_simple(line)\n cur_id = normalized[0]\n lat = normalized[1]\n lon = normalized[2]\n if cur_id == prev_id:\n distance = gps_dist_miles(prev_lat,prev_lon,lat,lon)\n if distance > 1:\n big_hops[cur_id] = 1\n num_big_hops += 1\n print cur_id\n trip_id2length[cur_id] += distance \n prev_lat = lat\n prev_lon = lon\n prev_id = cur_id\n\n print len(trip_id2length.keys())\n #for bad_id in big_hops.keys():\n # del trip_id2length[bad_id]\n\n for i in (15,18,333,24,12345):\n print \"%d: %f\" % (i,trip_id2length[i])\n\n #for i in range(1,25001):\n # if i not in trip_id2length.keys():\n # print i\n num_trips = len(trip_id2length.keys())\n print num_trips\n total_len = 0.0\n for i in trip_id2length.keys():\n if trip_id2length[i] > 50:\n print \"Big trip: %d\" % i\n #del trip_id2length[i]\n total_len += trip_id2length[i]\n heap = []\n for i in trip_id2length.keys():\n heapq.heappush(heap,trip_id2length[i])\n quarter_len = num_trips/4\n for i in range(quarter_len):\n heapq.heappop(heap)\n print \"25th percentile: %f\" % heapq.heappop(heap)\n for i in range(quarter_len):\n heapq.heappop(heap)\n print \"median: %f\" % heapq.heappop(heap)\n for i in range(quarter_len):\n heapq.heappop(heap)\n print \"75th percentile: %f\" % heapq.heappop(heap)\n\n num_trips = len(trip_id2length.keys())\n print num_trips\n avg_len = total_len/num_trips\n print \"average length: %f\" % avg_len \n print \"total length %f\" % total_len\n print \"number of big hops: %d\" % num_big_hops\n return trip_id2length,avg_len", "def get_online_price_diff_percent_method(self):\n try:\n if self.overclockerskz and self.overclockerskz.online_price:\n return int((self.get_online_price_diff_method() / self.overclockerskz.online_price) * 100)\n else:\n return 0\n except (TypeError, ValueError):\n return 0", "def get_percentComplete(self):\n val = self.resource.get_cdmi_sys_meta().get(\"cdmi_percentComplete\",\n \"100\")\n return val", "def state_score_naive(self, game_state, player, weights):\n # walls score\n other_players = [p for p in game_state.players if p != player]\n my_walls = player.num_walls\n their_walls = max([p.num_walls for p in other_players])\n walls_diff = (my_walls - their_walls)\n # path length score\n my_path = len(game_state.get_shortest_path_player(player))\n their_path = min([len(game_state.get_shortest_path_player(p)) for p in other_players])\n paths_diff = their_path - my_path\n \n return weights[0]*walls_diff + weights[1]*paths_diff", "def heuristic_cost_estimate(self, current):\n relevants = 0\n accurate_relevants = 0\n for i in range(len(self.sample)):\n if is_relevant(self.sample.iloc[i], current.anchor):\n relevants += 1\n if self.pred_sample.iloc[i] == self.pred_example:\n accurate_relevants += 1\n accuracy = accurate_relevants/relevants\n if self.threshold-accuracy <= 0:\n x = 5\n return max(0, self.threshold - accuracy)", "def fractionPassing(self):\n return self.cut.entries / self.entries", "def auto_headroom_percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"auto_headroom_percentage\")", "def auto_headroom_percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"auto_headroom_percentage\")", "def __heuristic1__(game, player):\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n if game.move_count < ((game.height * game.width)/2):\n return float(own_moves - 3 * opp_moves)\n else:\n return float(own_moves - opp_moves)", "def self_loop_proportion(Gu):\n if nx.is_directed(Gu): # pragma: no cover\n msg = \"`Gu` must be undirected\"\n raise ValueError(msg)\n return sum(u == v for u, v, k in Gu.edges) / len(Gu.edges)", "def completion_proximity_score(prefix, completion):\n if prefix == completion:\n return float(\"inf\")\n else:\n return 1.0 / float(len(completion))", "def max_occupancy_percent_for_deferred_work(self):\n return self._max_occupancy_percent_for_deferred_work", "def get_song_percent_remaining(result):\n return int((1 - (get_song_elapsed_milliseconds(result) / get_song_length_milliseconds(result))) * 100)", "def calc_total_dist(path: Path) -> float:\n\t\n\treturn sum(\n\t\tcalc_dist(*cs)\n\t\tfor cs in iter_n(path, 2)\n\t)", "def ratio_local_cons(self):\n if self.current_energy_consumed == 0.0:\n return 1.0\n else:\n return self.local_cons / self.current_energy_consumed", "def custom_score_6(game, player):\n \"\"\"custom_score_6 heuristic function aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(length_my_player_moves*length_my_player_moves - 1.5*length_opp_payer_moves*length_opp_payer_moves)", "def pct_match(self, s1, s2, comp_length):\n\n matches = self.max_freq[s1:s1+comp_length] \\\n == self.max_freq[s2:s2+comp_length]\n return np.ma.sum(matches) / np.ma.count(matches)" ]
[ "0.62298673", "0.61328906", "0.6114768", "0.6096005", "0.6058874", "0.60417265", "0.5953899", "0.59453714", "0.59453714", "0.59438455", "0.59438455", "0.59332836", "0.5882768", "0.58660126", "0.5852752", "0.5846534", "0.58149326", "0.58115447", "0.57984257", "0.57840717", "0.5775737", "0.5768955", "0.5762734", "0.5759235", "0.5748409", "0.5746831", "0.56966144", "0.567402", "0.5670847", "0.5670203", "0.5657068", "0.5637864", "0.5633915", "0.56336296", "0.56293994", "0.5627931", "0.5614011", "0.5605996", "0.560176", "0.5593335", "0.5575713", "0.5570689", "0.55568844", "0.5541081", "0.5524674", "0.55172276", "0.5513114", "0.5502809", "0.55024004", "0.54951715", "0.5487412", "0.54809296", "0.5479067", "0.54780436", "0.5472175", "0.5463939", "0.5452674", "0.54395914", "0.54360366", "0.54313576", "0.5430557", "0.54272974", "0.54238605", "0.5419484", "0.5415722", "0.5414998", "0.5409635", "0.54075366", "0.5403882", "0.5399788", "0.53969747", "0.5395292", "0.53874546", "0.53874445", "0.5386219", "0.5385526", "0.53837", "0.5381428", "0.5380509", "0.5375892", "0.53750634", "0.53739685", "0.5365964", "0.53641593", "0.5363473", "0.53628516", "0.53567034", "0.535288", "0.53504", "0.53443635", "0.53443635", "0.53421414", "0.53412294", "0.53407884", "0.5337724", "0.5329437", "0.5316692", "0.53141004", "0.53109294", "0.5307621" ]
0.76785594
0
Do not return anything, modify nums inplace instead.
def rotate(self, nums, k): k = k % len(nums) num = nums[:] num1 = num[-k:] del num[-k:] nums[:] = num1 + num print(nums)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fn(i):\n if i == len(nums): ans.append(nums.copy())\n for j in range(i, len(nums)): \n nums[i], nums[j] = nums[j], nums[i]\n fn(i+1)\n nums[i], nums[j] = nums[j], nums[i]", "def double_nums(num_list):", "def remove_dups(nums):\r\n nums[:] = sorted(list(set(nums)))\r\n return nums", "def _permuteUnique(self, curr_arr, nums):\r\n mem = dict()\r\n\r\n if not nums:\r\n self.output.append(curr_arr)\r\n return\r\n\r\n for i, v in enumerate(nums):\r\n if v in mem:\r\n continue\r\n else:\r\n mem[v] = 1\r\n new_arr = list(curr_arr)\r\n new_arr.append(v)\r\n self._permuteUnique(new_arr, nums[:i] + nums[i+1:])", "def moveZeroes2(self, nums: List[int]) -> None:\n i = 0\n for num in nums:\n if num != 0:\n nums[i] = num\n i += 1\n\n while i < len(nums):\n nums[i] = 0\n i += 1", "def removeDuplicates(self, nums: List[int]) -> int:\n trail = 0\n \n for i, n in enumerate(nums):\n if nums[trail] != nums[i]:\n trail += 1\n nums[trail] = nums[i]\n\n return trail + 1", "def fn(i):\n if i == len(nums): ans.append(nums.copy())\n seen = set()\n for k in range(i, len(nums)):\n if nums[k] not in seen:\n seen.add(nums[k])\n nums[i], nums[k] = nums[k], nums[i]\n fn(i+1)\n nums[i], nums[k] = nums[k], nums[i]", "def single_number(nums):\n i = 0\n for num in nums:\n i ^= num\n return i", "def moveZeroes(self, nums: List[int]) -> None:\n zero_count = Counter(nums)\n for num in range(zero_count[0]):\n nums.remove(0)\n nums.extend([0]*zero_count[0])\n print(nums)", "def moveZeroes(self, nums: List[int]) -> None:\n j = 0\n for i, value in enumerate(nums):\n if value != 0:\n nums[j], nums[i] = nums[i], nums[j]\n j += 1\n return nums", "def moveZeroes(self, nums: List[int]) -> None:\n i = 0\n while i != len(nums) and nums[i]:\n i += 1\n j = i\n while j != len(nums):\n if not nums[j]:\n j += 1\n else:\n nums[i] = nums[j]\n i += 1\n j += 1\n while i != len(nums):\n nums[i] = 0\n i += 1", "def moveZeroes(self, nums: [int]) -> None:\n count = 0\n try:\n while 1:\n nums.remove(0)\n count += 1\n except:\n pass\n if count:\n nums.extend([0] * count)", "def reset(self) -> List[int]:\n return self.nums", "def removeDuplicates(nums):\n i = 0\n j = 1\n for r in range(len(nums)):\n if j >= len(nums):\n print(i + 1, nums)\n return i + 1\n if nums[i] != nums[j]:\n i = i + 1\n nums[i] = nums[j]\n j = j + 1", "def moveZeroes(self, nums):\n count = 0 \n for i, val in enumerate(nums):\n if val != 0:\n nums[count] = val\n count += 1\n \n while(count < len(nums)):\n nums[count] = 0\n count += 1 #increment the count\n \n return nums", "def moveZeroes(self, nums: List[int]) -> None:\n # 双指针操作:\n # 记录非零元数下标:\n i = 0\n for j in range(len(nums)):\n # 遇到非零元素时,i加1:\n if nums[j] != 0:\n # 如果i与j不等时,才调换:\n if i != j:\n nums[i] , nums[j] = nums[j] , nums[i]\n i += 1", "def moveZeroes(self, nums: List[int]) -> None:\n \n store = []\n zero_cnt = 0\n \n for i in nums:\n if i == 0:\n zero_cnt += 1\n else:\n store.append(i)\n \n store += [0 for _ in range(zero_cnt)]\n\n for i in range(len(nums)):\n nums[i] = store[i]", "def permuteClever(self, nums):\n self.visited = [False] * len(nums)\n results = []\n tmp = []\n step = 0\n \n self.helper(results, tmp, nums, step)\n \n return results", "def moveZeroes(self, nums: List[int]) -> None:\n\n i, count = 0, 0\n while i < len(nums):\n if not nums[i]:\n nums.pop(i)\n count+=1\n else:\n i += 1\n \n nums.extend([0]*count)", "def moveZeroes(self, nums: [int]) -> None:\n for num in nums:\n if num == 0:\n nums.remove(num)\n nums.append(num)\n # print(nums)", "def moveZeroes(self, nums):\r\n\r\n pos = 0\r\n for i in range(len(nums)):\r\n if nums[i]:\r\n nums[pos] = nums[i]\r\n pos += 1\r\n # print(nums[i])\r\n # print(pos)\r\n for i in range(pos, len(nums)):\r\n nums[i] = 0", "def moveZeroes(self, nums: List[int]) -> None:\n\n i = 0\n for cur in range(len(nums)):\n if nums[cur]:\n temp = nums[i], nums[cur]\n nums[cur], nums[i] = temp\n i += 1", "def wiggleSort(self, nums: List[int]) -> None:\n nums.sort()\n for i in range(len(nums) // 2):\n nums.insert(i*2+1, nums.pop())", "def remove_element(self, nums, val):\n\n storeIndex = 0\n\n for num in nums:\n if num != val:\n nums[storeIndex] = num\n storeIndex += 1\n\n return storeIndex", "def moveZeroes(self, nums: List[int]) -> None:\n index = 0\n for i in range(len(nums)):\n if nums[i] != 0:\n nums[index] = nums[i]\n index += 1\n \n for i in range(index, len(nums)):\n nums[i] = 0", "def moveZeroes(self, nums) -> None:\n start, end = 0, 0\n while end < len(nums):\n if nums[end] != 0:\n nums[start], nums[end] = nums[end], nums[start]\n start += 1\n end += 1\n else:\n end += 1", "def moveZeroes(self, nums: List[int]) -> None:\n pos = 0\n for i, x in enumerate(nums):\n if x:\n nums[pos], nums[i] = x, nums[pos]\n pos += 1", "def moveZeroes3(self, nums: List[int]) -> None:\n pos = 0 \n for i in range(len(nums)):\n if nums[i] != 0 :\n nums[i], nums[pos] = nums[pos], nums[i]\n pos += 1", "def moveZeroes(self, nums):\n if len(nums)<2:\n return nums\n left = 0\n right = 1\n while right < len(nums):\n if nums[left] == 0:\n if nums[right] == 0:\n right += 1\n else:\n nums[left], nums[right] = nums[right], nums[left]\n left += 1\n right += 1\n else:\n left += 1\n right += 1\n return nums", "def wiggleSort(self, nums: List[int]) -> None:\n # todo: do this problem again later\n nums.sort(reverse=True)\n l = len(nums) // 2\n nums[::2],nums[1::2] = nums[l:],nums[:l]", "def square_numbers_1(nums):\n result = []\n for i in nums:\n result.append(i*i)\n return result", "def nextPermutation(self, nums: List[int]) -> None:\n if len(nums)==1: return\n \n replace= len(nums)-2\n while nums[replace]>= nums[replace+1] and replace>=0:\n replace-=1\n if replace==-1:\n nums.sort()\n return\n else:\n temp= len(nums)-1 ##temp=2; replace=0\n while nums[temp]<=nums[replace] and temp>replace:\n temp-=1\n # temp-=1\n # print(temp, replace)\n nums[temp], nums[replace]=nums[replace], nums[temp]\n for i in range((len(nums)-replace-1)//2):\n nums[i+ replace+1], nums[-(i+1)] =nums[-(i+1)], nums[i+ replace+1]\n return", "def moveZeroes(self, nums: List[int]) -> None:\n i = 0\n for j in range(len(nums)):\n if nums[j] != 0:\n nums[i], nums[j] = nums[j], nums[i]\n i += 1\n j += 1", "def remove_duplicates(nums: List[int]) -> int:\n\n if not nums:\n return 0\n\n slow = 0\n for fast in range(1, len(nums)):\n # compare element with a next one in order to find a duplicate in a non-decreasing array\n # if current element is unique,\n # slow runner grows one step and copys the current value\n if nums[slow] != nums[fast]:\n slow += 1\n nums[slow] = nums[fast]\n return slow + 1", "def moveZeroes(self, nums: List[int]) -> None:\n try:\n j = nums.index(0)\n except ValueError:\n return\n nums_len = len(nums)\n i = j + 1\n while i < nums_len and nums[i] == 0:\n i += 1\n\n while i < nums_len:\n nums[j] = nums[i]\n nums[i] = 0\n j += 1\n while i < nums_len and nums[i] == 0:\n i += 1", "def moveZeroes1(self, nums: List[int]) -> None:\n count = 0 \n for i in range(len(nums)-1) :\n i = i - count\n if nums[i] == 0 :\n count += 1\n nums.pop(i)\n \n nums.extend([0]*count)", "def removeDuplicates(self, nums: List[int]) -> int:\n lead = 0\n trail = 0\n \n nums_len = len(nums)\n\n while lead < nums_len - 1:\n while nums[lead] <= nums[trail]:\n lead += 1\n if lead >= nums_len:\n break\n else:\n trail += 1\n nums[trail] = nums[lead]\n\n return trail + 1", "def moveZeroes(self, nums: List[int]) -> None:\n l,r=0,len(nums)-1\n while l<r:\n if nums[l]==0:\n nums.append(nums.pop(l))\n r-=1\n continue\n l+=1\n return nums", "def moveZeroes(self, nums: [int]) -> None:\n if len(nums) < 2 :\n return\n point = 0\n for fast_point in range(0,len(nums)):\n if nums[fast_point] != 0:\n nums[point],nums[fast_point] = nums[fast_point],nums[point]\n point += 1\n pass\n pass", "def moveZeroes(self, nums: List[int]) -> None:\n ##brute force\n # counts= nums.count(0)\n # while 0 in nums: nums.remove(0)\n # nums+=[0]*counts\n \n ## two pointer (swapping)\n if not nums: return None\n anchor, explore= 0, 0\n while explore <len(nums):\n if nums[explore]!=0 and explore!=anchor:\n temp= nums[anchor]\n nums[anchor]=nums[explore]\n nums[explore]=temp\n if nums[anchor]!= 0:\n anchor+=1\n explore+=1", "def moveZeroes(self, nums: List[int]) -> None:\n i = j = 0\n for i in range(len(nums)):\n if nums[i] != 0:\n nums[j] , nums[i]= nums[i] , nums[j]\n j += 1", "def moveZeroes(self, nums: List[int]) -> None:\n i = j = 0\n N = len(nums)\n while j < N:\n while j < N and nums[j] == 0:\n j += 1\n if j >= N:\n break\n nums[i] = nums[j]\n i += 1\n j += 1\n while i < N:\n nums[i] = 0\n i += 1", "def moveZeroes(self, nums: List[int]) -> None:\n n = len(nums)\n non0 = 0\n p = 0\n while (p < n):\n if nums[p] != 0:\n nums[p], nums[non0] = nums[non0], nums[p]\n non0 +=1\n \n p +=1", "def moveZeroes(self, nums: [int]) -> None:\n length = len(nums)\n i = 0\n ctr = 0\n while i < length:\n if ctr == length - 1:\n break\n if nums[i] == 0:\n nums.append(0)\n nums.pop(i)\n else:\n i += 1\n ctr += 1", "def moveZeroes(self, nums: List[int]) -> None:\n count = 0\n ans = []\n for num in nums:\n if num != 0:\n ans.append(num)\n else:\n count += 1\n for zero in range(count):\n ans.append(0)\n return ans", "def singleNumber2(self, nums):\n hash_table={}\n \n for i in nums:\n try:\n hash_table.pop(i)\n except:\n hash_table[i] = 1\n \n return hash_table.popitem()[0]", "def moveZeroes(self, nums: List[int]) -> None:\n pos =0\n for i in nums:\n if i==0:\n continue\n else:\n nums[pos]=i\n pos+=1\n nums[pos:] = [0] * (len(nums)-pos)", "def moveZeroes(self, nums: 'List[int]') -> 'None':\n\n i = 0\n len_nums = len(nums)\n while i < len_nums:\n if nums[i] == 0:\n nums.append(nums.pop(i))\n len_nums -= 1\n else:\n i += 1", "def fn(nums):\n ans, vals = [], []\n for i, x in enumerate(nums): \n k = bisect_left(vals, x)\n if k == len(vals): vals.append(x)\n else: vals[k] = x\n ans.append(k)\n return ans", "def moveZeroes(self, nums: List[int]) -> None:\n # 循环记录0元素的个数,并且遇到非0元素时候,将非0元素替换到0元素的位置\n # count 记录0元素的个数, i - count实际上是记录了零元素的位置。\n count = 0\n for i in range(len(nums)):\n if nums[i] == 0:\n count += 1\n elif count > 0:\n nums[i - count], nums[i] = nums[i], 0\n return nums", "def _one_pass(nums):\n pattern = [0, 1, 0, -1]\n return [\n int(str(sum(\n v * pattern[(i // n) % len(pattern)]\n for i, v in enumerate(nums, start=1)\n ))[-1])\n for n in range(1, len(nums) + 1)\n ]", "def moveZeroes(self, nums: List[int]) -> None:\n index = 0\n for i in range(len(nums)):\n if nums[i] == 0:\n index += 1\n else:\n nums[i - index] = nums[i]\n\n index0 = len(nums) - index\n while index0 <= len(nums) -1:\n nums[index0] = 0\n index0 +=1", "def permutation(nums):\n list = []\n temp = []\n backtrack(list, temp, nums)\n return list", "def moveZeroes(nums):\n \n zero_count = nums.count(0)\n \n for x in range(zero_count):\n nums.remove(0)\n nums.append(0)", "def moveZeroes(self, nums: List[int]) -> None:\n \n zero_cnt = 0\n for i in range(len(nums)):\n if nums[i] == 0:\n nums.pop(i)\n nums.insert(0, 0)\n zero_cnt += 1\n \n while zero_cnt > 0:\n nums.pop(0)\n nums.append(0)\n zero_cnt -= 1", "def fn(nums):\n if len(nums) == 1: return nums\n return fn(nums[::2]) + fn(nums[1::2])", "def single_number(nums):\n tmp = 0\n for num in nums:\n tmp ^= num\n marker = 1\n while marker & tmp != marker:\n marker <<= 1\n a = 0\n for num in nums:\n if marker & num:\n a ^= num\n b = tmp ^ a\n return [a, b]", "def __init__(self, nums):\n acc = 0\n for i in range(len(self.sumArr)):\n self.sumArr.pop()\n for i in nums:\n acc += i\n self.sumArr.append(acc)\n print self.sumArr", "def moveZeroes(self, nums: List[int]) -> None:\n s = 0\n \n while nums.count(0) > 0:\n nums.remove(0)\n s += 1\n \n for i in range(s):\n nums.append(0)", "def wiggleSort(self, nums: List[int]) -> None:\n newlist=sorted(nums)\n nums[::2]=newlist[:int(len(nums)/2)+len(nums)%2]\n nums[1::2]=newlist[int(len(nums)/2)+len(nums)%2:]", "def moveZeroes(self, nums: List[int]) -> None:\n\n pos = 0\n \n for i in range(len(nums)):\n element = nums[i]\n \n if element != 0:\n nums[pos],nums[i] = nums[i], nums[pos]\n pos +=1", "def sort(self, nums: List[int]) -> None:\n n = len(nums)\n\n # Traverse through th list\n for i in range(n):\n for j in range(n-i-1):\n\n # Swap adjacent elements if they are out of order\n if nums[j] > nums[j+1]:\n nums[j], nums[j+1] = nums[j+1], nums[j]", "def moveZeroes(self, nums: List[int]) -> None:\n N = len(nums)\n l = 0\n r = 0\n while r < N:\n if nums[l] == 0 and nums[r] != 0:\n nums[l], nums[r] = nums[r], nums[l]\n l += 1\n elif nums[r] == 0:\n r += 1\n else:\n l += 1\n r += 1", "def moveZeroes(self, nums: list) -> None:\n# p, q = 0, 1\n# for p in range(len(nums)):\n# if nums[p] == 0:\n# for q in range(p+1, len(nums)):\n# if nums[q] != 0:\n# nums[p], nums[q] = nums[q], nums[p]\n# break\n q = 0\n for p in range(len(nums)):\n if nums[p] != 0:\n nums[q] = nums[p] #直接把非零数塞到前面,从而避免了又一重循环\n q += 1\n for k in range(q, len(nums)):\n nums[k] = 0\n k += 1", "def wiggleSort(self, nums: List[int]) -> None:\n temp = sorted(nums)\n s, t = (len(nums) + 1) >> 1, len(nums)\n for i in range(len(nums)):\n if i & 1 == 0:\n s -= 1\n nums[i] = temp[s]\n else:\n t -= 1\n nums[i] = temp[t]", "def wiggleSort(self, nums: List[int]) -> None:\n for ind in range(len(nums) - 1):\n\n if ind % 2 == 0:\n if nums[ind] > nums[ind + 1]:\n nums[ind], nums[ind + 1] = nums[ind + 1], nums[ind]\n\n else:\n if nums[ind] < nums[ind + 1]:\n nums[ind], nums[ind + 1] = nums[ind + 1], nums[ind]", "def moveZeroes(self, nums: List[int]) -> None:\n count = 0 # Shift count variable\n for i in range(len(nums)):\n if nums[i] == 0:\n count += 1\n else:\n nums[i-count], nums[i] = nums[i], nums[i-count] # Swap the nubmer and zero\n\n return", "def moveZeroes(self, nums) -> None:\n zero_count = 0\n for index, each_data in enumerate(nums):\n if each_data == 0:\n zero_count += 1\n continue\n nums[index - zero_count] = each_data\n start = len(nums) - zero_count\n while start < len(nums):\n nums[start] = 0\n start += 1", "def merge(self, nums1, m, nums2, n):\n nums1.extend([0]*n)\n j=0\n for i in range(len(nums1)):\n if nums2[j]<nums1[i]:\n nums1.remove(0)\n nums1.insert(i,nums2[j])\n j=j+1", "def reset(self):\n return self.nums", "def moveZeroes(self, nums: List[int]) -> None:\n pos=0\n for i in range(len(nums)):\n if(nums[i]!=0):\n nums[pos],nums[i]= nums[i],nums[pos]\n pos+=1", "def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n for i in range(n):\n nums1[m + i] = nums2[i]\n nums1.sort()", "def moveZeroes(nums):\n for i in nums:\n if (i == 0):\n nums.pop(nums.index(i))\n nums.append(i)", "def merge(self, nums1, m, nums2, n):\n n=len(nums1)\n j=0\n for i in range(n):\n if nums2[j]<nums1[i] and nums2[j]<len(nums2):\n nums1.remove(0)\n nums1.insert(i,nums2[j])\n j=j+1\n if nums1[i]==0 and nums2[j]<len(nums2):\n nums1[i]=nums2[j]", "def sort(self, nums: List[int]) -> None:\n\n # Seperates negative and positive integers\n neg, pos = [], []\n for num in nums:\n if num < 0:\n neg.append(-num)\n else:\n pos.append(num)\n\n # Sorts the negative numbers\n self._sort(neg)\n neg.reverse()\n\n # Sorts the positiv numbers\n self._sort(pos)\n \n # Remerges the sorted subarrays back into the original array.\n i = j = k = 0\n while j < len(neg):\n nums[i] = -neg[j]\n j += 1\n i += 1\n\n while k < len(pos):\n nums[i] = pos[k]\n k += 1\n i += 1", "def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n j = 0\n i = 0\n while i < m:\n if j >= n:\n break\n if nums1[i] == 0 and i >= m:\n \n nums1[i:] = nums2[j:]\n break\n else:\n if nums1[i] < nums2[j]:\n i+= 1\n else:\n\n nums1[i:] = [nums2[j]]+nums1[i:-1]\n j+=1\n i+=1\n m+=1", "def reset(self) -> List[int]:\n return self.nums", "def moveZeroes2(self, nums) -> None:\n p1, p2 = 0, 0\n N = len(nums)\n\n while p2 < N:\n while p1 < N and nums[p1] != 0:\n p1 += 1\n\n if p2 <= p1:\n p2 = p1 + 1\n\n if p2 >= N:\n break\n\n if nums[p2] != 0:\n nums[p1], nums[p2] = nums[p2], nums[p1]\n p1 += 1\n p2 += 1\n else:\n p2 += 1", "def merge(self, nums1, m, nums2, n):\n nums1.extend([0]*len(nums2))\n j=0\n for i in range(len(nums2)):\n if nums2[i]<nums1[j]:\n nums1.pop()\n print(nums1)\n nums1.insert(j,nums2[i])\n j=j+1", "def nextPermutation(self, nums: List[int]) -> None:\n pass", "def __init__(self, nums):\n self.sums,tmp =[],0\n for n in nums:\n tmp +=n\n self.sums.append(tmp)", "def reset(self):\n return self.nums", "def nextPermutation(self, nums) -> None:\n\n def helper(a, i):\n while i > 0:\n for j in range(i - 1, a - 1, - 1):\n if nums[j] < nums[i]:\n nums[j], nums[i] = nums[i], nums[j]\n nums[j + 1:] = reversed(nums[j + 1:])\n return\n elif nums[j] == nums[i]:\n helper(j, i - 1)\n i -= 1\n nums.reverse()\n\n helper(0, len(nums) - 1)", "def square_nums(number_list):", "def fn(i):\n if len(nums) == i: return ans.append(stack.copy())\n fn(i+1)\n stack.append(nums[i])\n fn(i+1)\n stack.pop()", "def nextPermutation(self, nums: List[int]) -> None:\n n = len(nums)\n i = n-2\n while i >= 0 and nums[i] >= nums[i+1]:\n i -= 1\n j = n-1\n while j > i and nums[j] <= nums[i]:\n j -= 1\n\n nums[i], nums[j] = nums[j], nums[i]\n # 当nums已经是最大的,那么 i=j=-1\n nums[i+1:] = sorted(nums[i+1:])", "def merge(self, nums1: [int], m: int, nums2: [int], n: int) -> None:\n for i in range(m, len(nums1)):\n del nums1[m]\n for i in range(n, len(nums2)):\n del nums2[n]\n\n nums1 += nums2\n nums1.sort()", "def smallerNumbersThanCurrent(nums: List[int]) -> List[int]:\n i, count = 0, 0\n arr = []\n for j in range(len(nums)):\n if nums[i] > nums[j]:\n count += 1\n arr.append(count)\n return arr", "def moveZeroes(self, nums) -> None:\n nums.sort(key=lambda x : x != 0)\n print(nums)\n\n point = 0\n for index, value in enumerate(nums):\n if value != 0:\n point = index\n break\n\n nums[point:] = list(reversed(nums[point:]))\n print(nums)\n nums.reverse()\n print(nums)", "def moveZeroes(self, nums: List[int]) -> None:\n zeros=0\n for i in range(len(nums)):\n if nums[i]==0:\n zeros+=1\n else:\n nums[i-zeros]=nums[i]\n for i in range(len(nums)-zeros,len(nums)):\n nums[i]=0", "def delete_numbers(n, nums):\n for i in range(len(nums)):\n if nums[i] % n == 0:\n nums[i] = 0", "def moveZeroes(self, nums: List[int]) -> None:\n # Treating the corner case first:\n if len(nums) == 0 or len(nums) == 1:\n return\n # Treating the general cases...\n counter = 0\n for i in range(0, len(nums)):\n if nums[i] == 0:\n counter += 1\n left = 0\n right = 0\n while right < len(nums):\n if nums[left] == 0 and nums[right] == 0:\n right += 1\n elif nums[left] == 0 and nums[right] != 0:\n nums[left] = nums[right]\n left += 1\n right += 1\n elif nums[left] != 0 and nums[right] == 0:\n right += 1\n else:\n nums[left] = nums[right]\n left += 1\n right += 1\n i = -1\n while counter > 0:\n nums[i] = 0\n counter -=1\n i -= 1\n return", "def sort(self, nums: List[int]) -> None:\n n = len(nums)\n for i in range(n):\n\n # Set the lowest to the beginning of the unsorted subarray\n low = i\n for j in range(i+1,n):\n\n # Find the lowest in the unsorted array\n if nums[j] < nums[low]:\n low = j\n \n # Swap the beginning of the unsorted subarray and the lowest.\n # The beginning of the unsorted subarray now becomes the end of the sorted subarray\n nums[i], nums[low] = nums[low], nums[i]", "def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n nums3 = nums1[:m]\n i = 0\n j = 0\n k = 0\n while i < m and j < n:\n if nums3[i] < nums2[j]:\n nums1[k] = nums3[i]\n i += 1\n else:\n nums1[k] = nums2[j]\n j += 1\n k += 1\n \n for v in range(i, m):\n nums1[k] = nums3[v]\n k += 1\n \n for v in range(j, n):\n nums1[k] = nums2[v]\n k += 1", "def moveZeroes(self, nums: List[int]) -> None:\n left = 0\n zero = 0\n right = len(nums) - 1\n while left <= right:\n if nums[left] == 0:\n nums.pop(left)\n nums.append(0)\n right -= 1\n else:\n left += 1", "def nextPermutation(self, nums: list[int]) -> None:\n for i in range(len(nums) - 2, -1, -1):\n if nums[i] < nums[i + 1]: break\n firstGreater = self.findFirstGreater(nums, nums[i], i + 1, len(nums) - 1)\n nums[i], nums[firstGreater] = nums[firstGreater], nums[i]\n nums[i + 1:] = sorted(nums[i + 1:])", "def task4(nums):\n if len(nums) > 1:\n mid = len(nums) // 2\n left = nums[:mid]\n right = nums[mid:]\n\n task4(left)\n task4(right)\n\n i = j = k = 0\n\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n nums[k] = left[i]\n i += 1\n else:\n nums[k] = right[j]\n j += 1\n k += 1\n\n while i < len(left):\n nums[k] = left[i]\n i += 1\n k += 1\n\n while j < len(right):\n nums[k] = right[j]\n j += 1\n k += 1\n\n return nums", "def moveZeroes(self, nums: List[int]) -> None:\n count=0\n i=0\n while i<len(nums)-count:\n if nums[i]==0: \n count+=1 \n x = i\n y = x+1\n while y <= len(nums) - count:\n nums[x] = nums[y]\n x = y\n y = y+1\n \n nums[len(nums) - count] = 0\n else:\n i = i+1 \n \n print(nums)", "def merge1(self, nums1, m, nums2, n): \n nums1[:m].extend(nums2[:n]) # 此方法改变了nums1的指向,无效\n # extend没改变指向,但是切片改了 \n nums1[:m] + nums2[:n] # 此方法改变了nums1的指向,无效\n # +号改变了指向\n\n nums1.sort()", "def __init__(self, nums):\n d = [0] if nums else []\n for i in xrange(len(nums)):\n d.append(d[i] + nums[i])\n self.d = d", "def single_number(self, nums: List[int]) -> int:\n if not nums:\n return False\n if len(nums) == 1:\n return nums[0]\n\n visited = set()\n\n for i in nums:\n if i not in visited:\n visited.add(i)\n else:\n visited.remove(i)\n\n return visited.pop()" ]
[ "0.70469916", "0.67161703", "0.66934896", "0.6586775", "0.6501143", "0.6482345", "0.6442288", "0.6407945", "0.6376896", "0.6372343", "0.63671577", "0.6365932", "0.63512594", "0.6328759", "0.6298402", "0.62855035", "0.62671727", "0.62472045", "0.62221444", "0.6193869", "0.6188269", "0.6184157", "0.61814266", "0.6180073", "0.6167664", "0.6158324", "0.6148441", "0.61476713", "0.613699", "0.6119307", "0.61044604", "0.60979587", "0.6087138", "0.60849226", "0.60818934", "0.6079748", "0.607389", "0.6066264", "0.6053259", "0.6053041", "0.6046262", "0.60457736", "0.60294896", "0.6022872", "0.60060567", "0.59951115", "0.59945714", "0.5977443", "0.596527", "0.5964904", "0.59526557", "0.5948315", "0.5936405", "0.59317774", "0.5928145", "0.592811", "0.59255713", "0.59133506", "0.591277", "0.59082997", "0.59076744", "0.59042895", "0.590009", "0.5897842", "0.58946174", "0.58878344", "0.58807635", "0.587345", "0.5870372", "0.5865296", "0.5863181", "0.58609647", "0.5850446", "0.5843022", "0.5833699", "0.581921", "0.5814978", "0.5814781", "0.5811194", "0.5795042", "0.5785132", "0.5777276", "0.5764196", "0.57611984", "0.5755437", "0.5752843", "0.57499003", "0.5747163", "0.57464033", "0.57400894", "0.5727857", "0.5726958", "0.5722932", "0.57216763", "0.5717795", "0.5711458", "0.57025313", "0.56988806", "0.5677013", "0.5675979", "0.56753594" ]
0.0
-1
Executes 'num_steps' number of generations for each island in the archipelago's list of islands
def step_through_generations(self, num_steps): for island in self._islands: for _ in range(num_steps): island.execute_generational_step() self.archipelago_age += num_steps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_generations(init_len):\n num_graphs = 0\n current_gen = [nx.path_graph(init_len)]\n complete_graph_list = current_gen.copy()\n while len(current_gen) and current_gen[0].size() < (3*init_len - 7):\n current_gen = generation_next(current_gen)\n num_graphs += show_graph_list(current_gen)\n complete_graph_list.extend(filter_bridge_case(current_gen))\n print(num_graphs)\n return complete_graph_list", "def generate(self, num_timesteps):\n self.north_arrivals = []\n self.south_arrivals = []\n self.east_arrivals = []\n self.west_arrivals = []\n self.total_cars = 0\n\n north_south = np.random.poisson(15)/50\n east_west = .5-north_south\n\n for i in range(num_timesteps):\n if i% 10==0:\n north_south = np.random.poisson(15)/50\n east_west = .5-north_south\n\n # Used to determine if a new car is added\n chance_token = random.random() \n\n # North South\n if chance_token <= north_south:\n self.north_arrivals.append(1)\n self.south_arrivals.append(1)\n self.total_cars += 2\n else:\n self.north_arrivals.append(0)\n self.south_arrivals.append(0)\n\n # East West\n if chance_token <= east_west:\n self.east_arrivals.append(1)\n self.west_arrivals.append(1)\n self.total_cars += 2\n else:\n self.east_arrivals.append(0)\n self.west_arrivals.append(0)", "def startGeneration(variant, resolution, loops):\n # Check for valid resolution\n if resolution % 2 != 0:\n print (\"Resolution should be an even integer.\")\n return\n\n # Set high score:\n if variant == 20:\n high_score = 11365950\n if variant == 40:\n high_score = 17858670\n if variant == 60:\n high_score = 24239310\n\n # House distirbution:\n familyHome_count = 0.6 * variant\n bungalow_count = 0.25 * variant\n maison_count = 0.15 * variant\n\n for loops in range(loops):\n\n # Initialize Classlist\n placed_houses = []\n placed_water = []\n\n # Initialize values\n gr = generic.genMap(180 * resolution, 160 * resolution)\n\n # Set length and width based on resultion.\n fam_length = int(resolution * 8)\n fam_width = int(resolution * 8)\n fam_freespace = int(resolution * 2)\n\n bung_length = int(resolution * 7.5)\n bung_width = int(resolution * 10)\n bung_freespace = int(resolution * 3)\n\n mais_length = int(resolution * 10.5)\n mais_width = int(resolution * 11)\n mais_freespace = int(resolution * 6)\n\n # Water\n # Generate water parts\n water_parts = genWater(gr, resolution)\n\n # Place water parts in grid:\n for part in range(len(water_parts)):\n W = 0\n\n # Loop until correctly placed.\n while W != 1:\n\n # Define class instance\n Water = class_house.House(water_parts[part][1], water_parts[part][0],\n 1, 0, 0, 4, \"W\", resolution)\n\n ngrid = genHome(gr, Water)\n\n # Check for success:\n if ngrid == False:\n print (\"No succesfull placement Water\")\n else:\n print (\"Water {0} placed!\".format(W))\n gr = list(ngrid)\n\n # Add water to list\n placed_houses.append(Water)\n\n W = 1\n\n # Maisons\n M = 0\n while M != maison_count:\n\n # Define class instance\n Maison = class_house.House(mais_length, mais_width,\n mais_freespace, 610000, 6, 1, \"M\", resolution)\n\n ngrid = genHome(gr, Maison)\n\n # Check if house succsfully placed:\n if ngrid == False:\n print (\"No succesfull placement Maison\")\n else:\n print (\"Maison {0} placed!\".format(M))\n gr = list(ngrid)\n\n # Add maison to list\n placed_houses.append(Maison)\n\n M += 1\n\n # Then bungalows\n B = 0\n while B != bungalow_count:\n\n # Define class instance\n Bungalow = class_house.House(bung_length, bung_width,\n bung_freespace, 399000, 4, 2, \"B\", resolution)\n\n ngrid = genHome(gr, Bungalow)\n\n # Check for succes:\n if ngrid == False:\n print (\"No succesfull placement Bungalow\")\n else:\n print (\"Bungalow {0} placed!\".format(B))\n gr = list(ngrid)\n\n # Add maison to list\n placed_houses.append(Bungalow)\n\n B += 1\n\n # Then Family homes\n F = 0\n while F != familyHome_count:\n\n # Define class instance\n Familyhome = class_house.House(fam_length, fam_width,\n fam_freespace, 285000, 3, 3, \"F\", resolution)\n\n ngrid = genHome(gr, Familyhome)\n\n # Check for succes:\n if ngrid == False:\n print (\"No succesfull placement Family Home\")\n else:\n print (\"Family home {0} placed!\".format(F))\n gr = list(ngrid)\n\n # Add maison to list\n placed_houses.append(Familyhome)\n\n F += 1\n\n # Calculate score using Placed houses\n sc = generic.calculateScore(gr, placed_houses)\n name = (\"Score: \" + str(sc))\n\n # Only save to file when new record.\n fname = \"Type{0} - {1}\".format(variant, sc)\n\n\n if sc > high_score:\n #read_write.write(fname, placed_houses)\n high_score = sc\n print (\"New high score ({0}) in loop: {1}\".format(sc, loops))\n print (\"Writing to file..\")\n\n return gr, placed_houses, sc", "def compute_map(current_agent_id,agent_order,number_of_timestep,state_schedules, conv :StateConverter):\r\n #Find the agent has the highest number of time steps\r\n highest_timestep = 0\r\n # Find the highest time step\r\n if len(number_of_timestep) >0:\r\n highest_timestep = np.max(number_of_timestep)\r\n occupancy_map = []\r\n # Since we don't know yet how many time step of the current id so\r\n # the number of time steps of the occupancy map == highest number of time step\r\n # of the current schedule\r\n for time_step in range(int(highest_timestep)):\r\n # Initialize the occupancy for current time step\r\n current_occupancy_map = np.zeros(conv.num_tiles)\r\n # We loop through schedule of each agent at current time step\r\n for i in range(len(state_schedules)):\r\n # Get the agent id of current schedule\r\n agent_of_schedule = agent_order[i]\r\n if time_step < len(state_schedules[i]):\r\n # The first case when the agent of current schedule is executed after the current agent\r\n if agent_of_schedule > current_agent_id:\r\n # Get the current state\r\n current_state = state_schedules[i][time_step]\r\n # Convert the current state to tile index\r\n current_tile = conv.state_to_tile(current_state)\r\n # Occupied the current tile in the occupancy map\r\n current_occupancy_map[current_tile] = 1\r\n if time_step + 1 < len(state_schedules[i]):\r\n # Get the next state\r\n next_state = state_schedules[i][time_step + 1]\r\n # Convert next state to next tile will be occupied\r\n next_tile_index = conv.state_to_tile(next_state)\r\n # Occupied the next tile in the occupancy map\r\n current_occupancy_map[next_tile_index] = 1\r\n # The second case when the agent of current schedule is executed before the current agent\r\n else:\r\n if time_step + 1 < len(state_schedules[i]):\r\n # Get the next state\r\n next_state = state_schedules[i][time_step + 1]\r\n # Convert next state to next tile will be occupied\r\n next_tile_index = conv.state_to_tile(next_state)\r\n # Occupied the next tile in the occupancy map\r\n current_occupancy_map[next_tile_index] = 1\r\n if time_step + 2 < len(state_schedules[i]):\r\n # Get the next 2 state\r\n next_2state = state_schedules[i][time_step+2]\r\n # Convert the current state to tile index\r\n next_2tile = conv.state_to_tile(next_2state)\r\n # Occupied the current tile in the occupancy map\r\n current_occupancy_map[next_2tile] = 1\r\n occupancy_map.append(current_occupancy_map)\r\n return occupancy_map", "def test_can_traverse_tall_grid(self):\n grid = [\n [\"0\"],\n [\"1\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n ]\n result = num_islands(grid)\n self.assertEqual(result, 4)", "def numIslands(grid):\n # count to store each new island found\n count = 0\n # If the grid is empty, return 0\n if not grid:\n return count\n\n y_max = len(grid)\n x_max = len(grid[0])\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '1':\n dfs(grid, i, j)\n count += 1\n return count", "def main():\n\n rules = parse_input(get_input())\n for part in [5, 18]:\n image = np.array(START_PATTERN).astype(bool)\n for i in range(part):\n image = enlarge(image, rules)\n count = sum(sum(ch for ch in row) for row in image)\n\n print(\"Number of # in the final matrix after {} iterations is {}.\".format(part, count))\n return", "def setup_steps(self):\n step1 = ground_step.Ground(5745, 495, 40, 44)\n step2 = ground_step.Ground(5788, 452, 40, 44)\n step3 = ground_step.Ground(5831, 409, 40, 44)\n step4 = ground_step.Ground(5874, 366, 40, 176)\n\n step5 = ground_step.Ground(6001, 366, 40, 176)\n step6 = ground_step.Ground(6044, 408, 40, 40)\n step7 = ground_step.Ground(6087, 452, 40, 40)\n step8 = ground_step.Ground(6130, 495, 40, 40)\n\n step9 = ground_step.Ground(6345, 495, 40, 40)\n step10 = ground_step.Ground(6388, 452, 40, 40)\n step11 = ground_step.Ground(6431, 409, 40, 40)\n step12 = ground_step.Ground(6474, 366, 40, 40)\n step13 = ground_step.Ground(6517, 366, 40, 176)\n\n step14 = ground_step.Ground(6644, 366, 40, 176)\n step15 = ground_step.Ground(6687, 408, 40, 40)\n step16 = ground_step.Ground(6728, 452, 40, 40)\n step17 = ground_step.Ground(6771, 495, 40, 40)\n\n step18 = ground_step.Ground(7760, 495, 40, 40)\n step19 = ground_step.Ground(7803, 452, 40, 40)\n step20 = ground_step.Ground(7845, 409, 40, 40)\n step21 = ground_step.Ground(7888, 366, 40, 40)\n step22 = ground_step.Ground(7931, 323, 40, 40)\n step23 = ground_step.Ground(7974, 280, 40, 40)\n step24 = ground_step.Ground(8017, 237, 40, 40)\n step25 = ground_step.Ground(8060, 194, 40, 40)\n step26 = ground_step.Ground(8103, 194, 40, 360)\n\n step27 = ground_step.Ground(8488, 495, 40, 40)\n\n self.step_group = pygame.sprite.Group(step1, step2,\n step3, step4,\n step5, step6,\n step7, step8,\n step9, step10,\n step11, step12,\n step13, step14,\n step15, step16,\n step17, step18,\n step19, step20,\n step21, step22,\n step23, step24,\n step25, step26,\n step27)", "def evolve(self, generations=10000):\n\n for gen in range(generations):\n # run the tournament\n self.tournament()\n\n # generate the next generation\n self.p = self.nextGen()", "def steps(self,num_steps):\n if self.last_sensation == TERMINAL_STATE:\n self.start_episode()\n for step in range(num_steps):\n next_sensation,reward = self.env(self.next_action)\n self.collect_data(self.last_sensation, self.next_action, reward, next_sensation)\n self.next_action = self.agent(next_sensation,reward)\n self.last_sensation = next_sensation\n if self.last_sensation == TERMINAL_STATE:\n self.start_episode()", "def test_can_traverse_wide_grid(self):\n grid = [[\"1\", \"0\", \"1\", \"1\", \"0\", \"1\", \"0\", \"0\", \"1\", \"0\"]]\n result = num_islands(grid)\n self.assertEqual(result, 4)", "def count_islands(grid):\n grid_copy = list(grid)\n count = 0\n for i in range(0, len(grid_copy)):\n for j in range (0, len(grid_copy[0])):\n if grid[i][j] and grid_copy[i][j]:\n _dfs(grid_copy, i, j)\n count += 1\n return count", "def n_steps(self, actions):\n return [self.step(action) for action in actions]", "def run_lots(num_rooms, max_paths, valid_puzzle_found_callback, min_difficulty=6):\n for _ in range(num_rooms):\n make_room(max_paths, valid_puzzle_found_callback, min_difficulty=min_difficulty)", "def _insertAllSteps(self): \n self.uMics = self.inputCoordinatesTiltedPairs.get().getUntilted().getMicrographs()\n self.tMics = self.inputCoordinatesTiltedPairs.get().getTilted().getMicrographs()\n\n self.inputMics = self._createSetOfParticles('auxMics')\n self.inputMics.copyInfo(self.uMics)\n self.inputMics.setStore(False)\n \n for micU, micT in izip(self.uMics, self.tMics):\n micU.cleanObjId()\n micT.cleanObjId()\n self.inputMics.append(micU)\n self.inputMics.append(micT)\n\n self.samplingInput = self.uMics.getSamplingRate()\n \n\n if self.downsampleType.get() != OTHER:\n # If 'same as picking' or 'original' get sampling rate from input micrographs\n #TODO: Review this when downsampling before picking is possible\n self.samplingFinal = self.samplingInput\n else:\n # If 'other' multiply the input sampling rate by the factor provided\n self.samplingFinal = self.samplingInput*self.downFactor.get()\n \n # Write pos files for each micrograph\n firstStepId = self._insertFunctionStep('writePosFilesStep')\n \n # For each micrograph insert the steps\n #run in parallel\n \n deps = []\n for mic in self.inputMics:\n localDeps = [firstStepId]\n micrographToExtract = mic.getFileName()\n micName = removeBaseExt(mic.getFileName())\n micId = mic.getObjId()\n\n # If downsample type is 'other' perform a downsample\n if self.downsampleType == OTHER:\n fnDownsampled = self._getTmpPath(micName+\"_downsampled.xmp\")\n downFactor = self.downFactor.get()\n args = \"-i %(micrographToExtract)s -o %(fnDownsampled)s --step %(downFactor)f --method fourier\"\n localDeps=[self._insertRunJobStep(\"xmipp_transform_downsample\", args % locals(),prerequisites=localDeps)]\n micrographToExtract = fnDownsampled\n \n # If remove dust \n if self.doRemoveDust:\n fnNoDust = self._getTmpPath(micName+\"_noDust.xmp\")\n \n thresholdDust = self.thresholdDust.get() #TODO: remove this extra variable\n args=\" -i %(micrographToExtract)s -o %(fnNoDust)s --bad_pixels outliers %(thresholdDust)f\"\n localDeps=[self._insertRunJobStep(\"xmipp_transform_filter\", args % locals(),prerequisites=localDeps)]\n micrographToExtract = fnNoDust\n \n #self._insertFunctionStep('getCTF', micId, micName, micrographToExtract)\n micName = removeBaseExt(mic.getFileName())\n \n # Actually extract\n deps.append(self._insertFunctionStep('extractParticlesStep', micId, micName, \n None, micrographToExtract, prerequisites=localDeps))\n # TODO: Delete temporary files\n \n # Insert step to create output objects \n self._insertFunctionStep('createOutputStep', prerequisites=deps)", "def iterativeDeepeningGeneration(self, param_dict):\n ans_set_manager_list = []\n for num_steps in range(1, int(param_dict['maxSteps']) + 1):\n param_dict['maxSteps'] = str(num_steps)\n ans_set_manager_list.append(self.computeAnsSets(param_dict))\n return ans_set_manager_list", "def iterate(self):\n for i in range(self.generations):\n sorted_polulation = sorted(self.data, key=lambda item: - item.fitness_function(item))\n print(\n [item.to_string() for item in sorted_polulation[:8]],\n [round(item.fitness_function(item),2) for item in sorted_polulation]\n )\n\n # print([item.to_string() for item in self.data])\n\n self.step()\n print(\"result\")\n sorted_polulation = sorted(self.data, key=lambda item: - item.fitness_function(item))\n print([str(item) for item in sorted_polulation])", "def step(self):\n for c in self.spill_list:\n \n self._schedule.step()", "def gen_tasks(self):\n for zoom in range(MIN_ZOOM, MAX_ZOOM + 1):\n seen = set() # (x, y)\n M = 2 ** zoom - 1\n # Find all areas suitable for zoom\n for area in Area.objects.filter(is_active=True,\n min_zoom__lte=zoom,\n max_zoom__gte=zoom):\n # Get area tiles\n SW = ll_to_xy(zoom, area.SW)\n NE = ll_to_xy(zoom, area.NE)\n left = max(SW[0] - PAD_TILES, 0)\n right = min(NE[0] + PAD_TILES, M)\n top = max(NE[1] - PAD_TILES, 0)\n bottom = min(SW[1] + PAD_TILES, M)\n a_size = (right - left + 1) * (bottom - top + 1)\n self.log(\"Checking area '%s' at zoom level %d \"\\\n \" (%d x %d = %d tiles)\" % (area.name, zoom,\n right - left + 1,\n bottom - top + 1,\n a_size))\n seen |= set((tc.x, tc.y) for tc in TileCache.objects.filter(\n map=self.map.id, zoom=zoom).only(\"x\", \"y\"))\n for x in range(left, right + 1):\n for y in range(top, bottom + 1):\n c = (x, y)\n if c in seen:\n continue\n seen.add(c)\n if not self.force:\n # Check tile is ready\n tc = TileCache.objects.filter(map=self.map.id,\n zoom=zoom, x=x,\n y=y).first()\n if tc and tc.ready:\n continue\n yield (zoom, x, y)", "def runSimulation(numSteps):\r\n\r\n # TO DO\r\n #pass\r\n rabbits = []\r\n foxes = []\r\n for i in range(numSteps):\r\n rabbitGrowth()\r\n foxGrowth()\r\n rabbits.append(CURRENTRABBITPOP)\r\n foxes.append(CURRENTFOXPOP)\r\n return rabbits, foxes", "def runSimulation(numSteps):\r\n\r\n # TO DO\r\n #pass\r\n rabbits = []\r\n foxes = []\r\n for i in range(numSteps):\r\n rabbitGrowth()\r\n foxGrowth()\r\n rabbits.append(CURRENTRABBITPOP)\r\n foxes.append(CURRENTFOXPOP)\r\n return rabbits, foxes", "def getNumTiles(self):\n return len(list(product(list(range(self.width+1))[1:], list(range(self.height+1))[1:])))", "def run(self, n):\n new_trajectories = self.enumerate_trajectories(self.gpm.Graph, n, self.source, self.target, max_iter=self.max_iter)\n self._trajectories += new_trajectories", "def task2_extra():\n N = 0\n lam = 0\n Ls = numpy.array([2*L for L in range(1,23)])\n h = 0.01\n tau = 0.000099\n\n iterss = []\n\n for L in Ls:\n a = L // 2\n print(L)\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n # eps = int(0.1 * len(x))\n\n Vm = V1D(lam, x)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center\")\n plt.xlabel(\"$L$\")\n plt.ylabel(\"Time\")\n plt.plot(Ls, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel.pdf\", bbox_inches=\"tight\")", "def test_returns_correct_number_of_islands(self):\n grid = [\n [\"1\", \"1\", \"1\", \"0\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"1\"],\n [\"1\", \"0\", \"1\", \"0\", \"1\"],\n [\"0\", \"1\", \"1\", \"1\", \"1\"],\n [\"1\", \"0\", \"1\", \"1\", \"1\"],\n ]\n result = num_islands(grid)\n self.assertEqual(result, 3)", "def run(\n self, num_actions: int, debug: bool = False, step_num: int = 0\n ) -> list:\n overlap_values = []\n for zone_list in self.area_strategy:\n for _ in range(0, num_actions):\n overlap = self._call_object(object=random.choice(zone_list))\n overlap_values.append(overlap)\n\n self.area_strategy.reset()\n\n return (\n zone_list,\n round(sum(overlap_values[0:9]) / 10, 2),\n round(sum(overlap_values[-10:-1]) / 10, 2),\n )", "def step(self, n, dlist):\n pass", "def _generate_walks(self):\n\n flatten = lambda l: [item for sublist in l for item in sublist]\n\n # Split num_walks for each worker\n num_walks_lists = np.array_split(range(self.num_walks), self.workers)\n\n walk_results = Parallel(n_jobs=self.workers, temp_folder=self.temp_folder, require=self.require)(\n delayed(self.parallel_generate_walks)(self.d_graph,\n self.walk_length,\n len(num_walks),\n idx,\n self.sampling_strategy,\n self.NUM_WALKS_KEY,\n self.WALK_LENGTH_KEY,\n self.NEIGHBORS_KEY,\n self.PROBABILITIES_KEY,\n self.FIRST_TRAVEL_KEY,\n self.quiet) for\n idx, num_walks\n in enumerate(num_walks_lists, 1))\n\n walks = flatten(walk_results)\n\n return walks", "def grid_frame(self, steps, figure_size=(12, 12)):\r\n\r\n x = self.seed\r\n counts = []\r\n for n in np.arange(0, steps):\r\n x, stats = self.update_grid(x)\r\n counts.append(stats)\r\n\r\n counts = np.array(counts)\r\n\r\n fig, ax = plt.subplots(figsize=figure_size)\r\n ax.get_xaxis().set_visible(False)\r\n ax.get_yaxis().set_visible(False)\r\n color_map = matplotlib.colors.ListedColormap(['white', 'black'])\r\n img = plt.imshow(x, interpolation='nearest', cmap=color_map)\r\n img.axes.grid(False)\r\n plt.title(self.title + ' | Step ' + str(steps))\r\n plt.show()\r\n\r\n return x, counts", "def print_generations(start, n):\n pb(start)\n for c in range(n):\n print(\"---\")\n start = life_generation(start)\n pb(start)\n return start", "def island_cycle(self):\n self.island_fodder_growth()\n self.island_feeding()\n self.island_procreate()\n self.island_migration()\n self.island_aging()\n self.island_loss_of_weight()\n self.island_deaths()", "def generate_all_locations(grid, shape):", "def add_building_output_locations2(self,areasList,start,end,step): \n print \"Getting buildings locations...\"\n \n dictionaries = []\n dictionary = {}\n \n for a in areasList:\n \n dictionaries.append(self.grid.get_building_output_locations(a[0],a[1]))\n \n for dict in dictionaries:\n for row in dict.iteritems(): \n dictionary[row[0]] = row[1] \n\n print \"Number of buildings = %s\" % (len(dictionary))\n\n if (dictionary != {}):\n self.run_nc.add_building_output_locations(dictionary, start, end,step)", "def run(self, generations=1000):\n gcount = 0\n \n while gcount<=generations:\n try:\n print \"Gen: \"+str(gcount),\n self.population = zip (self.population, [self.target]*len(self.population))\n self.population = self.pool.map(f, self.population)\n except:\n pass\n for i in self.population:\n print i[0],i[1]\n self.population = [organism.Organism(x[0], x[1]) for x in self.population]\n self.population.sort()\n print \" Max fitness: \"+str(self.population[::-1][1].fitness)\n try:\n if self.population[0] <= self.ppop[0]:\n self.ppop = self.population[::-1][0:10] # The top ten organisms\n else:\n self.population = self.ppop # We got worse! go back!\n except:\n self.ppop = self.population\n self.population = self.population[::-1][0:10]\n try:\n self.breed()\n except:\n print \"Breeding error\"\n gcount+=1", "def siftRegionsOfInterest(options,mapped_data_per_size_per_register,phase,cycle):\n for chromosome in sorted(mapped_data_per_size_per_register):\n # Make separate files for each chromosome\n output_filename=options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\"_\"+chromosome+\".regionsOfInterest\"\n fhw=open(output_filename,\"w\")\n for register in sorted(mapped_data_per_size_per_register[chromosome]):\n start,end=0,0\n for coordinate in sorted(mapped_data_per_size_per_register[chromosome][register]):\n if start == 0:\n start = coordinate\n elif end == 0:\n if coordinate-start < phase*cycle:\n end = coordinate\n else:\n start = coordinate\n else:\n if coordinate-end < phase*cycle:\n end = coordinate\n else:\n fhw.write(str(register)+\"\\t\"+str(start)+\"\\t\"+str(end+phase-1)+\"\\n\")\n end=0\n start=coordinate\n if end!=0:\n fhw.write(str(register)+\"\\t\"+str(start)+\"\\t\"+str(end+phase-1)+\"\\n\")\n fhw.close()", "def iterate_paths_map(riv_dirs,paths_map,nlat=360,nlong=720):\n\n if np.count_nonzero(paths_map) == paths_map.size:\n return False\n for i in range(nlat+2):\n for j in range(nlong):\n if i == 0 or i == nlat+1:\n paths_map[i,j] = 1\n elif j == 0:\n paths_map[i,j] = count_accumulated_inflow(np.append(riv_dirs[i-1:i+2,nlong-1:nlong],riv_dirs[i-1:i+2,j:j+2],axis=1),\n np.append(paths_map[i-1:i+2,nlong-1:nlong],paths_map[i-1:i+2,j:j+2],axis=1))\n elif j == nlong-1:\n paths_map[i,j] = count_accumulated_inflow(np.append(riv_dirs[i-1:i+2,j-1:j+1],riv_dirs[i-1:i+2,0:1],axis=1),\n np.append(paths_map[i-1:i+2,j-1:j+1],paths_map[i-1:i+2,0:1],axis=1))\n else:\n paths_map[i,j] = count_accumulated_inflow(riv_dirs[i-1:i+2,j-1:j+2],\n paths_map[i-1:i+2,j-1:j+2])\n return True", "def spawn_visitors(self, n):\n spawnable_positions = self.get_all_spawnable_cells()\n for _ in range(n):\n\n visitor = Visitor(self.next_id(), self, female_ratio=self.female_ratio, adult_ratio=self.adult_ratio,\n familiarity=self.familiarity)\n\n pos = random.choice(spawnable_positions)\n\n self.grid.place_agent(agent=visitor, pos=pos)\n self.schedule.add(visitor)", "def runSimulation(numSteps):\n\n rabbit_pop = []\n fox_pop = [] \n \n for steps in range(numSteps):\n rabbitGrowth()\n foxGrowth()\n rabbit_pop.append(CURRENTRABBITPOP)\n fox_pop.append(CURRENTFOXPOP)\n \n return (rabbit_pop, fox_pop)", "def runSimulation(numSteps):\r\n rabbit_populations = []\r\n fox_populations = []\r\n for step in range(numSteps):\r\n rabbitGrowth()\r\n foxGrowth()\r\n rabbit_populations.append(CURRENTRABBITPOP)\r\n fox_populations.append(CURRENTFOXPOP)\r\n return (rabbit_populations, fox_populations)", "def stage_1_generator(option):\n # Stage Size + Player Starting Position\n STAGE_1 = ([10,10], [1,1])\n\n # Non-Ocean tiles\n STAGE_1_TILES = { \n \"1,2\":\"rock\",\n \"1,3\":\"mountain\",\n \"2,4\":\"rock\",\n \"2,7\":\"rock\",\n \"2,8\":\"rock\",\n \"3,3\":\"rock\",\n \"3,4\":\"rock\",\n \"3,8\":\"mountain\",\n \"3,9\":\"rock\",\n \"3,10\":\"rock\",\n \"4,4\":\"rock\",\n \"4,5\":\"mountain\",\n \"5,6\":\"rock\",\n \"6,1\":\"rock\",\n \"6,2\":\"rock\",\n \"6,7\":\"rock\",\n \"6,10\":\"rock\",\n \"7,1\":\"rock\",\n \"7,2\":\"rock\",\n \"7,6\":\"rock\",\n \"7,10\":\"rock\",\n \"8,5\":\"rock\",\n \"8,6\":\"rock\",\n \n \"8,10\":\"rock\",\n \"9,1\":\"sign\",\n \"9,3\":\"rock\",\n \"9,4\":\"rock\",\n \"9,9\":\"rock\",\n \"9,10\":\"rock\",\n \"10,1\":\"cave\",\n \"10,3\":\"rock\",\n \"10,4\":\"cave\",\n \"10,8\":\"rock\",\n \"10,9\":\"rock\",\n \"10,10\":\"rock\",\n \n \"1,10\":\"end\",\n }\n\n # Special Tiles that trigger an event\n STAGE_1_SPECIAL = { \n \"1,10\":\"end\",\n \"9,1\":\"sign_cave\",\n \"10,1\":\"cave_entrance_1\",\n \"10,4\":\"cave_entrance_2\",\n \"1,9\":\"dark_water\",\n \"2,9\":\"dark_water\",\n \"2,10\":\"dark_water\"\n }\n\n # Decide what data to return\n if option == \"stage\":\n return STAGE_1\n elif option == \"tiles\":\n return STAGE_1_TILES\n elif option == \"special\":\n return STAGE_1_SPECIAL\n else:\n print(\"Something Broke! map_generator_1\")", "def tsp_walk(n, op, nsteps):\n result = []\n t = list(range(n))\n result.append(tuple(t))\n for i in range(nsteps):\n t = op(t)\n result.append(tuple(t))\n return result", "def take_a_walk(num_steps, start_step, seq_per_set, num_sets, seed=-1):\n # Set the random seed, if supplied\n if seed > 0:\n np.random.seed(seed)\n # Preallocate the entire training data list of lists of NumPy arrays\n training = num_sets * [seq_per_set * [None]]\n # Iterate to build the training data randomly\n for this_set in range(num_sets): # Each set\n for seq in range(seq_per_set): # Each sequence\n if start_step == -1: # Random start location\n start_step = np.random.randint(1, num_steps)\n # Initialize the sequence\n step = start_step\n sequence = np.zeros(num_steps).astype(int)\n sequence[step] = 1\n while (step != 0 and step != num_steps - 1): # While not in absorbing state\n if np.random.uniform() >= 0.5: # Uniformly random L v R step\n step += 1 # Go right\n else:\n step -= 1 # Go left\n # Generate the vector representing this step\n this_sequence = np.zeros(num_steps).astype(int)\n # Set the appropriate element to 1\n this_sequence[step] = 1\n # Add this step to the sequence\n sequence = np.vstack((sequence, this_sequence))\n # Assign the sequence to its position in the training data\n training[this_set][seq] = sequence\n return training", "def _compute_step_sizes(self, number_knots, knots):\n # expected bounds on the knots sequence\n x_lower = self.model.workers.lower\n x_upper = self.model.workers.upper\n\n if (number_knots is not None) and (knots is None):\n step_size = (x_upper - x_lower) / (number_knots - 1)\n step_sizes = np.repeat(step_size, number_knots - 1)\n elif (number_knots is None) and (knots is not None):\n #assert knots[0] == x_lower\n #assert knots[-1] == x_upper\n step_sizes = np.diff(knots, 1)\n else:\n raise ValueError(\"Either 'number_knots' or 'knots' must be specified!\")\n \n if self.model.assortativity == 'positive':\n step_sizes = step_sizes[::-1]\n\n return step_sizes", "def generate_population(size, w, h, N):\r\n population = []\r\n for _ in range(size):\r\n entity = gen_mines(w, h, randint(0, w*h))\r\n entity = (entity[:], count_numbers(gen_board(w, h, entity), N))\r\n population.append(entity)\r\n \r\n return population", "def all_pairs_number_of_walks(G, walk_length):\n # TODO This algorithm can certainly be parallelized.\n return {v: single_source_number_of_walks(G, v, walk_length) for v in G}", "def ordinary_is(self, n_episodes, start_state=None, step_list=None):\n step_list = [] if step_list is None else step_list\n q_steps = []\n for episode in range(n_episodes + 1):\n trajs = self.generate_trajectory(start_state=start_state, det=False)\n G = 0\n W = 1\n for (i, (s, a, r)) in enumerate(trajs[::-1]):\n G = self.gamma * G + r\n self.is_returns[(s, a)].append(W * G)\n W *= self.target[(a, s)] / self.b[(a, s)]\n if W == 0:\n break\n if episode in step_list:\n for a in self.env.moves:\n self.Q[(start_state, a)] = np.sum(self.is_returns[(s, a)]) / episode\n self.estimates.append(self.target_estimate(start_state))", "def collect_all_gw(season, gameweek_path, data_path, player_path): \n if season == '2019-20':\n max_gw = 47\n else:\n max_gw = 38\n for i in list(range(1, max_gw + 1)): # Check here\n collect_gw(i, gameweek_path=gameweek_path, data_path=data_path, player_path=player_path)\n merge_gw(type='FPL', gameweek_path=gameweek_path)", "def make_land_agents_2016(self):\r\n # add non-gtgp\r\n for hh_row in agents: # from excel_import\r\n hh_id = return_values(hh_row, 'hh_id')\r\n self.total_rice = return_values(hh_row, 'non_gtgp_rice_mu')\r\n if self.total_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.total_dry = return_values(hh_row, 'non_gtgp_dry_mu')\r\n if self.total_dry in ['-3', '-4', -3, None]:\r\n self.total_dry = 0\r\n self.gtgp_rice = return_values(hh_row, 'gtgp_rice_mu')\r\n if self.gtgp_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.gtgp_dry = return_values(hh_row, 'gtgp_dry_mu')\r\n if self.gtgp_dry in ['-3', '-4', -3, None]:\r\n self.gtgp_dry = 0\r\n\r\n landposlist = self.determine_landpos(hh_row, 'non_gtgp_latitude', 'non_gtgp_longitude')\r\n self.age_1 = return_values(hh_row, 'age')[0]\r\n self.gender_1 = return_values(hh_row, 'gender')[0]\r\n self.education_1 = return_values(hh_row, 'education')[0]\r\n\r\n for landpos in landposlist:\r\n try:\r\n self.pre_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n\r\n try:\r\n self.non_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.land_time = return_values(hh_row, 'non_gtgp_travel_time')[landposlist.index(landpos)]\r\n try:\r\n self.plant_type = return_values(hh_row, 'non_gtgp_plant_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_type = return_values(hh_row, 'non_gtgp_land_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.hh_size = len(return_values(hh_row, 'age'))\r\n self.gtgp_enrolled = 0\r\n lp = LandParcelAgent(hh_row, self, hh_id, hh_row, landpos, self.gtgp_enrolled,\r\n self.age_1, self.gender_1, self.education_1,\r\n self.gtgp_dry, self.gtgp_rice, self.total_dry, self.total_rice,\r\n self.land_type, self.land_time, self.plant_type, self.non_gtgp_output,\r\n self.pre_gtgp_output)\r\n self.space.place_agent(lp, landpos)\r\n self.schedule.add(lp)\r\n if self.gtgp_enrolled == 0 and landpos not in nongtgplist and landpos not in gtgplist:\r\n nongtgplist.append(landpos)\r\n # except:\r\n # pass\r\n\r\n # add gtgp\r\n for hh_row in agents: # from excel_import\r\n hh_id = return_values(hh_row, 'hh_id')\r\n self.total_rice = return_values(hh_row, 'non_gtgp_rice_mu')\r\n if self.total_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.total_dry = return_values(hh_row, 'non_gtgp_dry_mu')\r\n if self.total_dry in ['-3', '-4', -3, None]:\r\n self.total_dry = 0\r\n self.gtgp_rice = return_values(hh_row, 'gtgp_rice_mu')\r\n if self.gtgp_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.gtgp_dry = return_values(hh_row, 'gtgp_dry_mu')\r\n if self.gtgp_dry in ['-3', '-4', -3, None]:\r\n self.gtgp_dry = 0\r\n landposlist = self.determine_landpos(hh_row, 'gtgp_latitude', 'gtgp_longitude')\r\n self.age_1 = return_values(hh_row, 'age')[0]\r\n self.gender_1 = return_values(hh_row, 'gender')[0]\r\n self.education_1 = return_values(hh_row, 'education')[0]\r\n for landpos in landposlist:\r\n try:\r\n self.pre_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.non_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_time = return_values(hh_row, 'gtgp_travel_time')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.plant_type = return_values(hh_row, 'pre_gtgp_plant_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_type = return_values(hh_row, 'pre_gtgp_land_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.hh_size = len(return_values(hh_row, 'age'))\r\n self.gtgp_enrolled = 1\r\n\r\n lp_gtgp = LandParcelAgent(hh_id, self, hh_id, hh_row, landpos, self.gtgp_enrolled,\r\n self.age_1, self.gender_1, self.education_1,\r\n self.gtgp_dry, self.gtgp_rice, self.total_dry, self.total_rice,\r\n self.land_type, self.land_time, self.plant_type, self.non_gtgp_output,\r\n self.pre_gtgp_output)\r\n self.space.place_agent(lp_gtgp, landpos)\r\n self.schedule.add(lp_gtgp)\r\n if self.gtgp_enrolled == 1 and landpos not in gtgplist and landpos in nongtgplist:\r\n gtgplist.append(landpos)", "def generate_cubes(num_states, distance, startstate='RRRRRRRRRBBBBBBBBBOOOOOOOOOGGGGGGGGGWWWWWWWWWYYYYYYYYY'):\n terminal_states = {'RRRRRRRRRYYYYYYYYYOOOOOOOOOWWWWWWWWWBBBBBBBBBGGGGGGGGG',\n 'BBBBBBBBBYYYYYYYYYGGGGGGGGGWWWWWWWWWOOOOOOOOORRRRRRRRR',\n 'YYYYYYYYYBBBBBBBBBWWWWWWWWWGGGGGGGGGRRRRRRRRROOOOOOOOO',\n 'BBBBBBBBBWWWWWWWWWGGGGGGGGGYYYYYYYYYRRRRRRRRROOOOOOOOO',\n 'OOOOOOOOOGGGGGGGGGRRRRRRRRRBBBBBBBBBWWWWWWWWWYYYYYYYYY',\n 'GGGGGGGGGOOOOOOOOOBBBBBBBBBRRRRRRRRRYYYYYYYYYWWWWWWWWW',\n 'GGGGGGGGGRRRRRRRRRBBBBBBBBBOOOOOOOOOWWWWWWWWWYYYYYYYYY',\n 'GGGGGGGGGYYYYYYYYYBBBBBBBBBWWWWWWWWWRRRRRRRRROOOOOOOOO',\n 'RRRRRRRRRWWWWWWWWWOOOOOOOOOYYYYYYYYYGGGGGGGGGBBBBBBBBB',\n 'OOOOOOOOOWWWWWWWWWRRRRRRRRRYYYYYYYYYBBBBBBBBBGGGGGGGGG',\n 'WWWWWWWWWRRRRRRRRRYYYYYYYYYOOOOOOOOOBBBBBBBBBGGGGGGGGG',\n 'BBBBBBBBBOOOOOOOOOGGGGGGGGGRRRRRRRRRWWWWWWWWWYYYYYYYYY',\n 'BBBBBBBBBRRRRRRRRRGGGGGGGGGOOOOOOOOOYYYYYYYYYWWWWWWWWW',\n 'RRRRRRRRRGGGGGGGGGOOOOOOOOOBBBBBBBBBYYYYYYYYYWWWWWWWWW',\n 'YYYYYYYYYRRRRRRRRRWWWWWWWWWOOOOOOOOOGGGGGGGGGBBBBBBBBB',\n 'YYYYYYYYYOOOOOOOOOWWWWWWWWWRRRRRRRRRBBBBBBBBBGGGGGGGGG',\n 'GGGGGGGGGWWWWWWWWWBBBBBBBBBYYYYYYYYYOOOOOOOOORRRRRRRRR',\n 'WWWWWWWWWGGGGGGGGGYYYYYYYYYBBBBBBBBBRRRRRRRRROOOOOOOOO',\n 'OOOOOOOOOYYYYYYYYYRRRRRRRRRWWWWWWWWWGGGGGGGGGBBBBBBBBB',\n 'RRRRRRRRRBBBBBBBBBOOOOOOOOOGGGGGGGGGWWWWWWWWWYYYYYYYYY',\n 'WWWWWWWWWBBBBBBBBBYYYYYYYYYGGGGGGGGGOOOOOOOOORRRRRRRRR',\n 'WWWWWWWWWOOOOOOOOOYYYYYYYYYRRRRRRRRRGGGGGGGGGBBBBBBBBB',\n 'OOOOOOOOOBBBBBBBBBRRRRRRRRRGGGGGGGGGYYYYYYYYYWWWWWWWWW',\n 'YYYYYYYYYGGGGGGGGGWWWWWWWWWBBBBBBBBBOOOOOOOOORRRRRRRRR'}\n states = []\n while len(states) < num_states:\n x = RubiksCubeOld(startstate)\n for j in range(distance):\n x.apply_move(np.random.randint(0,18))\n newstate = x.get_state()\n if newstate not in terminal_states: states.append(newstate)\n states = list(set(states))\n\n return states", "def iterations(num_range, sub):\n\t\n\ti=1\n\t\n\twhile i<len(all_states_explored):\n\t\t\n\t\tdo(num_range,i, sub)\n\t\t\n\t\t#Incrementing iterator. \t\n\t\ti+=1", "def next_life_generation(a):\n w = len(a[0])\n h = len(a)\n new_a = create_board(w, h)\n\n for n in range(h):\n for m in range(w):\n if 0 < n < h - 1 and 0 < m < w - 1:\n count = count_neighbours(n, m, a)\n if count < 2 or count > 3:\n new_a [n][m] = 0\n elif count == 3:\n new_a[n][m] =1\n else:\n new_a[n][m] = a[n][m]\n else:\n new_a[n][m] = 0\n \n return new_a", "def number_of_iterations(self) -> int:\n pass", "def island_procreate(self):\n for y in self.island_map:\n for cell in y:\n cell.procreate()", "def WarpStep(iters=5):\n MSG(\"WarpStep\")\n for j in range(iters):\n warp.step()\n return", "def populate_tiles(self):\n\n # grid format :\n # grid(x,y,z)[0]: A valid WorldTile type (i.e. WorldTile.door)\n # grid(x,y,z)[1]: A list of ASCII color or format codes for ColorIze\n # grid(x,y,z)[2]: The tile object\n\n self.t_count = 0 # Tile count, increment for each tile added\n self.build_start = time.clock()\n self.logger.info(\"[*] Starting world building script\")\n\n script_list = [\n self.build_boss_room,\n self.build_rooms,\n self.build_halls,\n self.build_doors,\n self.build_chests,\n self.build_traps,\n self.build_mobs,\n self.build_npcs\n ]\n for func in script_list:\n self.logger.debug(\"\\tRunning {}\".format(func.__name__))\n if not func():\n e_text = \"Build script failed : {}\".format(func.__name__)\n raise AssertionError(e_text)\n\n self.logger.info(\"[*] World building script completed\")\n self.logger.debug(\"\\tTiles Placed : {}\".format(self.t_count))\n build_time = time.clock()-self.build_start\n self.logger.debug(\"\\tTook {}s\".format(build_time))\n self.logger.debug(\"\\tTiles/s : {}\".format(t_count/build_time))", "def main():\n row, col, island = make_matrix()\n print(count_island(row, col, island))", "def run_iterations(self, n, verbose = False):\n for i in range(n):\n # Calculate total number of neighbors for each cell\n all_neighbors = self.get_all_neighbors()\n all_num_neighbors = np.sum(all_neighbors, axis = (-2,-1)) - self.board\n # Determine new state for each cell using lookup table and number of neighbors\n self.board[:] = np.where(self.board, \n self.lookup[1][all_num_neighbors], \n self.lookup[0][all_num_neighbors])\n # Verbosity check\n if verbose:\n print(self.board)", "def make_times_with_n_step(self, start, end, n):\n self.target_times = []\n step = start\n delta = old_div((end - start), float(n))\n while step <= end:\n self.target_times.append(step)\n step += delta", "def island_migration(self):\n for y in self.island_map:\n for cell in y:\n cell.migration()\n\n for y in self.island_map:\n for cell in y:\n for animal in cell.population:\n animal.has_moved = False", "def get_next_steps(self, steps):\n for step in range(steps):\n # Actual calulation: Runge-Kutta 2\n\n # Step 1\n k1 = [\n self.vel * self.dt,\n self.get_next_acc() * self.dt\n ]\n\n # Step 2\n next_pos = self.pos + k1[0] * 0.5\n next_vel = self.vel + k1[1] * 0.5\n self.disps, self.dists = self.get_relative_distances(positions=next_pos)\n k2 = [\n next_vel * self.dt,\n self.get_next_acc(save=False) * self.dt\n ]\n\n # Step 3\n next_pos = self.pos + k2[0] * 0.5\n next_vel = self.vel + k2[1] * 0.5\n self.disps, self.dists = self.get_relative_distances(positions=next_pos)\n k3 = [\n next_vel * self.dt,\n self.get_next_acc(save=False) * self.dt\n ]\n\n # Step 4\n next_pos = self.pos + k3[0]\n next_vel = self.vel + k3[1]\n self.disps, self.dists = self.get_relative_distances(positions=next_pos)\n k4 = [\n next_vel * self.dt,\n self.get_next_acc(save=False) * self.dt\n ]\n\n # Move forward\n self.pos = self.pos + 1/6 * (k1[0] + 2*k2[0] + 2*k3[0] + k4[0])\n self.vel = self.vel + 1/6 * (k1[1] + 2*k2[1] + 2*k3[1] + k4[1])\n\n # Saving of statistics\n self.save_system_information(self.pos, self.vel)", "def generate_population(population_size, nn_architecture):\n population = []\n for _ in range(population_size):\n population.append(nn.create_nn_from_arch(nn_architecture))\n\n return population", "def increment_time_step(self):\n for grid in self.get_grid_list():\n try:\n self[grid].increment_time_step()\n except AttributeError:\n pass", "def initialize_states(templates, number_of_templates, number_of_states=5):\n number_of_frames_in_each_state_for_each_template = []\n for i in xrange(number_of_templates):\n # get number_of_frames_in_each_state_for_each_template\n length = len(templates[i])\n small_number_of_elements_in_current_state = length / number_of_states # if length is 12,\n # then there are 3 states have 2 frames and 2 states have 3 frames,we call 2 small number and 3 big number\n number_of_big_number = length % number_of_states\n number_of_frames_in_each_state = [small_number_of_elements_in_current_state for j in \\\n xrange(number_of_states - number_of_big_number)]\n number_of_frames_in_each_state.extend \\\n ([small_number_of_elements_in_current_state + 1 for j in xrange(number_of_big_number)])\n number_of_frames_in_each_state_for_each_template.append(number_of_frames_in_each_state)\n # print number_of_frames_in_each_state_for_each_template\n return number_of_frames_in_each_state_for_each_template", "def makeGrid(self, width, height, rewardLocs, exit, nPick=1, nAux=1, walls=[]):\n # Make mapping from coordinate (x, y, (takenreward1, takenreward2, ...))\n # to state number, and vice-versa.\n rTaken = iter([(),])\n for nPicked in range(1, nPick+1):\n rTaken = itertools.chain(rTaken, \n myCombinations(rewardLocs, r=nPicked)\n )\n # Iterators are hard to reset, so we list it.\n rTaken = list(rTaken)\n\n # Mappings from state to coordinates, vice-versa\n coordToState = {}\n stateToCoord = {}\n stateIdx = 0\n for x in range(width):\n for y in range(height):\n for stuff in rTaken:\n for holding in self.holdingPossibilities:\n coordToState[(x, y, stuff, holding)] = stateIdx\n stateToCoord[stateIdx] = (x, y, stuff, holding)\n stateIdx += 1\n self.deadEndState = stateIdx\n\n # Actually make the transition function\n def trans(f, p): \n aux = p\n (x, y, stuff, holding) = stateToCoord[f]\n actionMap = {}\n default = {(f, aux): 1}\n # Make the transition dictionary if the dead-end state (state width*height)\n if f == self.F-1:\n for action in range(5):\n actionMap[action] = default\n return actionMap\n\n # Otherwise, determine directions of motion, etc. \n for i in range(4):\n actionMap[i] = default\n if x != 0 and ((x-1, y) not in walls):\n actionMap[0] = {(coordToState[(x-1,y,stuff, holding)], aux): 1}\n if x < width-1 and ((x+1, y) not in walls):\n actionMap[1] = {(coordToState[(x+1,y,stuff, holding)], aux): 1}\n if y != 0 and ((x, y-1) not in walls):\n actionMap[2] = {(coordToState[(x,y-1,stuff, holding)], aux): 1}\n if y < height-1 and ((x, y+1) not in walls):\n actionMap[3] = {(coordToState[(x,y+1,stuff, holding)], aux): 1}\n # What happens when the agent uses action 4?\n if (x, y) == exit:\n # Some cases, depending on self.oneAtATime\n if not self.oneAtATime:\n # The agent is leaving.\n actionMap[4] = {(self.deadEndState, aux): 1}\n else:\n # The agent is dropping off a reward. holeFiller will\n # take care of the reward value.\n if len(stuff) >= nPick:\n # The agent is not allowed to pick up more stuff\n actionMap[4] = {(self.deadEndState, aux): 1}\n else:\n # The agent drops off the object.\n actionMap[4] = {(coordToState[(x,y,stuff, -1)], aux): 1}\n elif (x, y) not in rewardLocs:\n # No reward to pick up. Do nothing.\n actionMap[4] = default\n elif (x, y) in stuff:\n # This reward has already been used. Do nothing.\n actionMap[4] = default\n elif len(stuff) >= nPick or (holding != -1 and holding < len(stuff)\n and self.oneAtATime):\n # The agent has its hands full.\n actionMap[4] = default\n else:\n # The agent is allowed to pick up an object.\n newStuff = tuple(sorted(list(stuff) + [(x, y)]))\n if self.oneAtATime:\n newHoldingIdx = newStuff.index((x, y))\n else:\n newHoldingIdx = -1\n actionMap[4] = {(coordToState[(x, y, newStuff, newHoldingIdx)], aux): 1}\n return actionMap\n\n # Man, I'm outputting a lot of stuff.\n # coordToState[(x, y, rewardsLeft, holding)] -> index of this state\n # stateToCoord[index] -> (x, y, rewardsLeft, holding)\n # rTaken is a list of all possible combinations of leftover rewards.\n return (trans, coordToState, stateToCoord, rTaken)", "def dynamic_programming_path_counter(grid_size):\n G = [1] * grid_size\n for i in range(grid_size):\n for j in range(i):\n G[j] = G[j] + G[j-1]\n G[i] = 2 * G[i - 1]\n return G[grid_size - 1]", "def fixed_steps_trajectories(self, noise=0, nt=1, ll=0.1, limit=None):\n\n print('Generating Trajectories...')\n for i in tqdm.tqdm(range(self.ntraj)):\n\n if self.hop_distribution == 'gaussian' or self.hop_distribution == 'Gaussian':\n z_position = np.cumsum(\n np.random.normal(loc=0, scale=self.hop_sigma, size=self.nsteps)) # accumulate gaussian steps\n else:\n sys.exit('Please enter a valid hop distance probability distribution')\n\n self.trajectories[i, :, 1] = z_position - z_position[0] # make initial z equal to 0\n\n # hop at random time intervals according to one of the following PDFs\n if self.dwell_distribution == 'exponential':\n time = sampling.random_exponential_dwell(self.lamb, size=self.nsteps)\n elif self.dwell_distribution == 'power':\n time = sampling.random_power_law_dwell(1 + self.alpha, size=self.nsteps, ll=ll, limit=limit)\n else:\n sys.exit('Please enter a valid dwell time probability distribution')\n\n time = np.cumsum(time) # accumulate dwell times\n time -= time[0]\n\n self.trajectories[i, :, 0] = time\n\n # Add to array with all corners of hop distribution for visualization purposes\n self.trajectory_hops[i, 1::2, 0] = time[1:]\n self.trajectory_hops[i, 2::2, 0] = time[1:]\n\n self.trajectory_hops[i, ::2, 1] = self.trajectories[i, :, 1]\n self.trajectory_hops[i, 1:-1:2, 1] = self.trajectories[i, :-1, 1]\n self.trajectory_hops[i, -1, 1] = self.trajectories[i, -1, 1]\n\n print('Interpolating Trajectories...')\n # make uniform time intervals with the same interval for each simulated trajectory\n max_time = np.min(self.trajectories[:, -1, 0])\n self.time_uniform = np.linspace(0, max_time, self.nsteps*10)\n\n if nt > 1:\n # self.pbar = tqdm.tqdm(total=self.ntraj)\n pool = Pool(nt)\n for i, t in enumerate(pool.map(self.interpolate_trajectories, range(self.ntraj))):\n self.z_interpolated[i, :] = t\n else:\n for t in tqdm.tqdm(range(self.ntraj)):\n self.z_interpolated[t, :] = self.trajectories[t, np.digitize(self.time_uniform,\n self.trajectories[t, :, 0], right=False) - 1, 1]\n #self.z_interpolated[t, :] = self.interpolate_trajectories(t, noise=noise)", "def generate_goal(self):\r\n\t\t# Creates a flat list of correct values\r\n\t\ttempList = [x for x in range(self.n**2)]\r\n\r\n\t\t# Nests those lists into a NxN\r\n\t\tBoardClass.goal = [tempList[self.n*i:self.n*(i+1)] for i in range(self.n)]\r\n\r\n\t\t# Creates a dictionary for the intended location of any specific tile. Used in\r\n\t\t# Manhatten Distance calculation.\r\n\t\tfor i in range(self.n**2):\r\n\t\t\trow = i // self.n\r\n\t\t\tcol = i % self.n\r\n\t\t\tBoardClass.goalTileLocations[i] = [row, col]", "def _generate_walks(self):\n return parallel_generate_walks(\n self.d_graph,\n self.walk_length,\n self.num_walks,\n 'Single process!',\n self.sampling_strategy,\n self.NUM_WALKS_KEY,\n self.WALK_LENGTH_KEY,\n self.NEIGHBORS_KEY,\n self.PROBABILITIES_KEY,\n self.FIRST_TRAVEL_KEY,\n self.quiet\n )", "def simulate_memories(simulation_length):\n \n \n pass", "def countSimulationEventGenerators(self):\r\n raise NotImplementedError()", "def number_of_iterations(self) -> int:\n return self._solution.info.iter", "def task2_extra2():\n N = 0\n lam = 0\n L = 10\n h = 0.001\n tau = 0.000099\n aa = numpy.array([0.25*a for a in range((L-1)*4)])\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n Vm = V1D(lam, x)\n # eps=int(0.1*len(x))\n\n iterss = []\n for a in aa:\n print(a)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center ($L={}$)\".format(L))\n plt.xlabel(\"$a$\")\n plt.ylabel(\"Time\")\n plt.plot(aa, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel_fixedL={}.pdf\".format(L), bbox_inches=\"tight\")", "def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type):\n trialsRecord = []\n for trail in range(num_trials):\n #VISUALIZING ROBOTS - refer course pdf note 'Optional_Visualizing Robots Problem Set 2.pdf'\n #anim = ps2_visualize.RobotVisualization(num_robots, width, height)\n #create room\n room = RectangularRoom(width, height)\n #create robots & store in array\n robots = []\n count = 0\n for i in range(num_robots):\n robots.append(robot_type(room, speed))\n #NB: how does robot_type(room, speed) create a robot object???? what magic is this???\n #while calcualted coverage is < min_coverage, update positions & repeat\n while float(room.getNumCleanedTiles()) / room.getNumTiles() < min_coverage:\n #anim.update(room, robots)\n #do more cleaning - update robot positions\n for robot in robots:\n robot.updatePositionAndClean()\n count += 1\n trialsRecord.append(count)#record number of steps to achieve min_coverage in this trial.\n #after loop, close animation\n #anim.done()\n #calculate average number of steps over trials.\n return sum(trialsRecord)/float(len(trialsRecord))\n #raise NotImplementedError", "def run(self, num_iterations = 50, **kwargs):\n \n #setup system\n self.cost_calculator = t.CostCalculator(self.suppliers_allcards, self.all_ensembles_dict)\n bounds = np.array(self.cost_calculator.ensemble_sizes) - 1\n #define cost functions\n cost_func = lambda p: sum(self.cost_calculator.get_cost(p))\n #create model\n self.model = ga(cost_func, bounds, **kwargs)\n \n fitness_list = [];\n \n for i in range(num_iterations):\n #Update\n f = next(self.model)\n #get fitness values\n fitness_list.append(f[0])\n #Output\n print('\\r(%d/%d) '%(i+1,num_iterations), end = '')\n print('top ensemble fitness: %1.1f '%f[0], end = '')\n \n print('\\nDone')\n self.solution = self.cost_calculator.decode_arrangement(self.model.get_solution())", "def add_building_output_locations(self,dictionary, start,end,step): \n \"\"\"\n Given a dictionary of building footprints and associated nodes,element and sides, add the values \n to the netcdf grid file.\n \n The nodes, elements and sides associated with each footprint correspond to the there index in the RiCOM grid file\n \n Dictionary format:\n {id1: {'nodes': [n1, n2,...nn] }, {'elements': [e1,e2,...,en] },{'sides': [s1,s2,...,sn]}, id2: {}, id3 {}, ...., idn {} } \n \n idn = the id of the building footprint that the node, elements and sides belong to\n \n \"\"\"\n \n if (dictionary != {}):\n maxNodes = 0\n maxElements = 0\n maxSides = 0\n nodesAll = []\n elementsAll = []\n sidesAll = []\n id = []\n perimeter = []\n type = []\n for row in dictionary.iteritems(): \n id.append(row[0]) \n n = row[1]['nodes'] \n e = row[1]['elements']\n s = row[1]['sides']\n perimeter.append(row[1]['perimeter'])\n \n if row[1]['type'] == \"BUILDINGS_AS_HOLES\":\n typeNUM = 1\n elif row[1]['type'] == \"BUILDINGS_GRIDDED\":\n typeNUM = 2\n\n elif row[1]['type'] == \"BUILDINGS_AS_POINTS\":\n typeNUM = 3\n else:\n typeNUM = 0\n type.append(typeNUM)\n \n nodesAll.extend(n)\n elementsAll.extend(e)\n sidesAll.extend(s)\n if maxNodes < len(n): maxNodes = len(n)\n if maxElements < len(e): maxElements = len(e)\n if maxSides < len(s): maxSides = len(s)\n \n \n #remove repeated elements, sides and nodes\n nodesAll = list(set(nodesAll))\n elementsAll = list(set(elementsAll))\n sidesAll = list(set(sidesAll))\n \n print \"# elements = %s\" % len(elementsAll)\n print \"# sides = %s\" % len(sidesAll)\n print \"# nodes = %s\" % len(nodesAll)\n\n \n #initialise arrays for entry into netcdf file\n nodes = zeros((len(dictionary),maxNodes))\n elements = zeros((len(dictionary),maxElements))\n sides = zeros((len(dictionary),maxSides)) \n \n i = 0\n for row in dictionary.iteritems(): \n nodes[i,0:(len(row[1]['nodes']))] = row[1]['nodes']\n elements[i,0:(len(row[1]['elements']))] = row[1]['elements']\n sides[i,0:(len(row[1]['sides']))] = row[1]['sides']\n i+=1 \n \n #create dimensions\n try: self.buildings.createDimension('max_number_nodes',maxNodes)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.buildings.createDimension('max_number_elements',maxElements)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.buildings.createDimension('max_number_sides',maxSides)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.buildings.createDimension('number_of_buildings',len(dictionary))\n except Exception, e: print \"WARNING: %s\" % e \n try: self.building_nodes.createDimension('number_of_nodes',len(nodesAll))\n except Exception, e: print \"WARNING: %s\" % e\n try: self.building_elements.createDimension('number_of_elements',len(elementsAll))\n except Exception, e: print \"WARNING: %s\" % e\n try: self.building_sides.createDimension('number_of_sides',len(sidesAll))\n except Exception, e: print \"WARNING: %s\" % e\n \n \n #create variables\n try: building_id = self.buildings.createVariable(varname = 'building_id',datatype = 'i', dimensions=('number_of_buildings',)) \n except Exception, e:\n building_id = self.buildings.variables['building_id']\n print \"WARNING: %s\" % e\n \n try: building_wkt = self.buildings.createVariable(varname = 'building_wkt',datatype = str, dimensions=('number_of_buildings',)) \n except Exception, e:\n building_wkt = self.buildings.variables['building_wkt'] \n print \"WARNING: %s\" % e\n\n try: building_perimeter = self.buildings.createVariable(varname = 'building_perimeter',datatype = 'd', dimensions=('number_of_buildings',)) \n except Exception, e:\n building_perimeter = self.buildings.variables['building_perimeter'] \n print \"WARNING: %s\" % e\n\n\n try: building_type = self.buildings.createVariable(varname = 'building_type',datatype = 'i', dimensions=('number_of_buildings',)) \n except Exception, e:\n building_type = self.buildings.variables['building_type'] \n print \"WARNING: %s\" % e\n\n try: building_nodes = self.buildings.createVariable(varname = 'building_nodes',datatype = 'i', dimensions=('number_of_buildings','max_number_nodes',)) \n except Exception, e:\n building_nodes = self.buildings.variables['building_nodes'] \n print \"WARNING: %s\" % e\n \n try: building_elements = self.buildings.createVariable(varname = 'building_elements',datatype = 'i', dimensions=('number_of_buildings','max_number_elements',)) \n except Exception, e:\n building_elements = self.buildings.variables['building_elements']\n print \"WARNING: %s\" % e\n \n try: building_sides = self.buildings.createVariable(varname = 'building_sides',datatype = 'i', dimensions=('number_of_buildings','max_number_sides',)) \n except Exception, e:\n building_sides = self.buildings.variables['building_sides']\n print \"WARNING: %s\" % e\n \n building_nodes[:] = nodes\n building_elements[:] = elements\n building_sides[:] = sides\n building_id[:] = array(id) \n building_perimeter[:] = array(perimeter)\n building_type[:] = array(type)\n #Set the attributes\n self.building_nodes.start = start\n self.building_nodes.finish = end\n self.building_nodes.step = step\n self.building_elements.start = start\n self.building_elements.finish = end\n self.building_elements.step = step\n self.building_sides.start = start\n self.building_sides.finish = end\n self.building_sides.step = step\n \n #assign the data\n output_ids = {'nodes': [], 'elements': [], 'sides': []}\n try: output_ids['nodes'] = self.building_nodes.createVariable(varname = 'id',datatype = 'i', dimensions=('number_of_nodes',))\n except Exception, e:\n output_ids['nodes'] = self.building_nodes.variables['id']\n print \"WARNING: %s\" % e\n try: output_ids['elements'] = self.building_elements.createVariable(varname = 'id',datatype = 'i', dimensions=('number_of_elements',))\n except Exception, e:\n output_ids['elements'] = self.building_elements.variables['id']\n print \"WARNING: %s\" % e\n try: output_ids['sides'] = self.building_sides.createVariable(varname = 'id',datatype = 'i', dimensions=('number_of_sides',))\n except Exception, e:\n output_ids['sides'] = self.building_sides.variables['id']\n print \"WARNING: %s\" % e\n \n \n output_ids['nodes'][:] = array(nodesAll)\n output_ids['elements'][:] = array(elementsAll)\n output_ids['sides'][:] = array(sidesAll)\n \n \n self.buildingsAdded = True\n else:\n #create dimensions\n try: self.buildings.createDimension('number_of_buildings',0)\n except Exception, e: print \"WARNING: %s\" % e \n try: self.building_nodes.createDimension('number_of_nodes',0)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.building_elements.createDimension('number_of_elements',0)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.building_sides.createDimension('number_of_sides',0)\n except Exception, e: print \"WARNING: %s\" % e \n self.buildingsAdded = True", "def populate_a_matrix_per_schedule(self):\n self.matrixes = []\n for i in range(self.num_schedules):\n m = np.zeros((2048, 20))\n self.matrixes.append(m)\n for i, each_matrix in enumerate(self.matrixes):\n # lets look at elements of schedule 1\n for j in range(self.schedule_array_train_naive[i][0], self.schedule_array_train_naive[i][1] + 1):\n binary_embedding = self.total_binary_embeddings[j]\n index = self.pass_in_embedding_out_state_ID(binary_embedding)\n # action taken at this instance\n action = self.Y_train_naive[j]\n each_matrix[index][action] += 1\n total_sum = each_matrix.sum()\n self.matrixes[i] = np.divide(each_matrix, total_sum)\n\n print('n matrices have been generated')", "def generate_moves(self):\n if self.begin:\n self.init_population()\n self.begin = False\n self.decoded_population = list()\n for chromosome in self.population:\n # print(chromosome)\n network_info = self.nn_data(chromosome)\n # print(network_info)\n artificial_neural_network = net.NeuralNetwork(network_info)\n x, y = self.decode_network_output(artificial_neural_network.out())\n self.decoded_population.append((x, y))\n print(self.decoded_population)\n self.clear()", "def island_feeding(self):\n for y in self.island_map:\n for cell in y:\n cell.feeding()", "def sim_walks(num_steps, num_trials, d_class):\n Homer = d_class()\n origin = Location(0, 0)\n distances = []\n for t in range(num_trials):\n f = Field()\n f.add_drunk(Homer, origin)\n distances.append(round(walk(f, Homer, num_trials), 1))\n return distances", "def simulate(self, number_of_simulations):\n self.number_of_simulations = number_of_simulations\n\n for iteration_num in range(0, number_of_simulations, 1):\n self.add_grain(0)\n self.check_pile(iteration_num)\n self.mass_when_iteration.append(self.mass_count - self.mass_fallen_count)\n self.plot_iteration(self.angles_array, self.radial_array, self.array, iteration_num)\n print(self.array)", "def _project_loops(self):\n\n self._create_projection_datasets()\n self._get_sho_chunk_sizes(10)\n\n '''\n Loop over the FORCs\n '''\n for forc_chunk_index in range(self._num_forcs):\n pos_chunk_index = 0\n\n self._current_sho_spec_slice = slice(self.sho_spec_inds_per_forc * self._current_forc,\n self.sho_spec_inds_per_forc * (self._current_forc + 1))\n self._current_met_spec_slice = slice(self.metrics_spec_inds_per_forc * self._current_forc,\n self.metrics_spec_inds_per_forc * (self._current_forc + 1))\n dc_vec = self._get_dc_offset()\n '''\n Loop over positions\n '''\n while self._current_pos_slice.stop < self._end_pos:\n loops_2d, nd_mat_shape_dc_first, order_dc_offset_reverse = self._get_projection_data(pos_chunk_index)\n\n # step 8: perform loop unfolding\n projected_loops_2d, loop_metrics_1d = self._project_loop_batch(dc_vec, np.transpose(loops_2d))\n\n # test the reshapes back\n projected_loops_2d = self._reshape_projected_loops_for_h5(projected_loops_2d,\n order_dc_offset_reverse,\n nd_mat_shape_dc_first)\n self.h5_projected_loops[self._current_pos_slice, self._current_sho_spec_slice] = projected_loops_2d\n\n metrics_2d = self._reshape_results_for_h5(loop_metrics_1d, nd_mat_shape_dc_first)\n\n self.h5_loop_metrics[self._current_pos_slice, self._current_met_spec_slice] = metrics_2d\n\n # Reset the position slice\n self._current_pos_slice = slice(None)\n\n pass", "def test_generate_tiles(self):\n tile_list = utils.generate_tiles()\n self.assertEqual(len(tile_list), utils.TILE_COUNT)", "def windows_of_permutations(n, step):\n def gen(p):\n for i in range(0, NB_AVIONS-n, step):\n for perm in all_permutations(range(i, i+n))(p):\n yield perm\n return gen", "def next_generation(self, population):\n pass", "def generateNumsets(G):\n # paths = []\n #\n # path = [0]\n # for edge in nx.dfs_edges(G, 0):\n # if edge[0] == path[-1]:\n # path.append(edge[1])\n # else:\n # paths.append(path)\n # search_index = 2\n # while search_index <= len(path):\n # if edge[0] == path[-search_index]:\n # path = path[:-search_index + 1] + [edge[1]]\n # break\n # search_index += 1\n # else:\n # raise Exception(\"Wrong path structure?\", path, edge)\n # paths.append(path)\n # return paths\n\n \"\"\"\n Trying to use itertools LMAO\n \"\"\"\n # paths = []\n #\n # for path in itertools.combinations(G.nodes, 5):\n # paths.append(path)\n # return paths\n\n \"\"\"\n Generating paths using graph\n \"\"\"\n paths = []\n n = len(G.nodes)\n for source in range(n):\n for target in range(source+1, n):\n paths.extend([path for path in nx.all_simple_paths(G, source=source, target=target)])\n return paths\n\n # return paths", "def count_tilings(n: int) -> int:\n if n < 5:\n # handle recursive base case\n return 2**(n - 1)\n else:\n # place each tile at end of row and recurse on remainder\n return (count_tilings(n - 1) +\n count_tilings(n - 2) +\n count_tilings(n - 3) +\n count_tilings(n - 4))", "def getNumTiles(self):\n\t\treturn self.numTiles", "def sim_walks(num_steps, num_trials, d_class):\n Homer = d_class()\n origin = Location(0, 0)\n distances = []\n for t in range(num_trials):\n f = Field()\n f.add_drunk(Homer, origin)\n distances.append(round(walk(f, Homer, num_steps), 1))\n return distances", "def make_executions(numbers):\n executions = []\n _numbers = numbers.copy()\n orig_len = len(numbers)\n for i in range(len(_numbers)):\n print(f\"Nbs left == {len(_numbers)} / {orig_len}\")\n executions += splice(_numbers)\n _numbers = _numbers[:-1]\n return executions", "def step(self):\n # amt_selected = \\\n # int(self.population_size * self.part_selected) \n\n # spawning_pool = [] # list of dna selected for reproduction\n new_data =[]\n \n sorted_dna = sorted(self.data, \n key=lambda dna: dna.fitness_function(dna),\n reverse=True)\n \n \n \n\n # mutation\n for dna in sorted_dna:\n dna.mute(self.mutation_probability)\n\n # crossover\n while len(new_data) < \\\n self.population_size - (self.population_size % 2):\n\n d1 = copy.copy(self.pick())\n d2 = copy.copy(self.pick())\n times = 2\n for i in range(times):\n d1.crossover(d2)\n\n new_data += [d1, d2]\n\n\n\n\n\n if (self.population_size % 2) == 1:\n new_data.append(copy.deepcopy(self.pick()))\n\n assert(len(self.data) == len(new_data))\n\n for i in range(len(new_data)):\n self.data[i].data = new_data[i]", "def _generate_iterator(self) -> Iterable:\n params: List[Tensor] = []\n for angle_range in self._ranges:\n lin_space: Tensor = linspace(angle_range[0], angle_range[1], steps=self._num_steps)\n params.append(lin_space)\n power: int\n dims: int\n for i in range(0, self._num_params):\n power = len(self._ranges) - 1 - i\n dims = i\n params[i] = params[i].repeat_interleave(self._num_steps ** power)\n params[i] = params[i].broadcast_to((self._num_steps ** dims, self._num_steps ** (power + 1))).flatten()\n return zip(*params)", "def iter_fun(self):\n\n run_id = self._run_id\n etopo_dir = driver_home\n topodir = driver_home\n\n # load input info\n if self._input_info == None:\n scn_fname = os.path.join(self._run_home,'scenario_pts.txt') \n scn = np.loadtxt(scn_fname)\n scn_list = scn.tolist()\n else:\n scn_list = self._input_info\n \n # total number of runs\n M = len(scn_list)\n N = 8*M + 2 # 8*M runs plus two empty bathymetry runs\n\n if run_id == N:\n raise StopIteration()\n\n else:\n \n #=========================\n # set coarse and fine grids\n #\n t_shelf = 0. # time approaching continental slope\n t_harbor = 0. # time approaching harbor\n\n if ((run_id >= 0) and (run_id < 4*M)) or (run_id == 8*M):\n #------------------\n # setrun for coarse\n #\n grid = 'coarse'\n \n self._rundata.amrdata.amr_levels_max = 4\n # coarse grid run = 10\"\n # dx = 30', 5', 1', 10\"\n self._rundata.amrdata.refinement_ratios_x = [6, 5, 6]\n self._rundata.amrdata.refinement_ratios_y = [6, 5, 6]\n self._rundata.amrdata.refinement_ratios_t = [6, 5, 6]\n\n\n # add topography (coarse)\n topofiles = self._rundata.topo_data.topofiles\n # for topography, append lines of the form\n # [topotype, minlevel, maxlevel, t1, t2, fname]\n topofiles = []\n\n topofiles.append([3, 1, 4, 0., 1.e10, \\\n os.path.join(etopo_dir, 'etopo1_-130_-124_38_45_1min.asc')])\n topofiles.append([-3, 3, 4, 0., 1.e10, \\\n os.path.join(topodir, 'cc-1sec.asc')])\n\n # add regions\n regions = self._rundata.regiondata.regions \n # between shelf and CC \n regions = []\n regions.append(\\\n [2, 3, t_shelf, 1e9, -125, -124.05, 40.5, 43]) \n regions.append(\\\n [3, 4, t_harbor, 1e9, -124.26, -124.14, 41.67, 41.79])\n regions.append(\\\n [4, 4, t_harbor, 1e9, -124.218,-124.17, 41.7345, 41.77])\n\n # == fgmax.data values ==\n fgmax_files = self._rundata.fgmax_data.fgmax_files\n fgmax_files = []\n \n # for fixed grids append to this list names of any fgmax input files\n fgmax1_fname = os.path.join(driver_home,'fgmax1_coarse.txt')\n fgmax2_fname = os.path.join(driver_home,'fgmax2_coarse.txt')\n fgmax3_fname = os.path.join(driver_home,'fgmax3_coarse.txt')\n\n fgmax_files.append(fgmax1_fname) \n fgmax_files.append(fgmax2_fname) \n fgmax_files.append(fgmax3_fname) \n \n self._rundata.fgmax_data.num_fgmax_val = 2\n \n \n elif ((run_id >= 4*M) and (run_id < 8*M)) or (run_id == 8*M+1):\n #----------------\n # setrun for fine\n #\n grid = 'fine'\n \n self._rundata.amrdata.amr_levels_max = 6\n\n ## fine grid run = 2/3\"\n ## dx = 30', 5', 1', 10\", 2\", 2/3\"\n self._rundata.amrdata.refinement_ratios_x = [6, 5, 6, 5, 3]\n self._rundata.amrdata.refinement_ratios_y = [6, 5, 6, 5, 3]\n self._rundata.amrdata.refinement_ratios_t = [6, 5, 6, 5, 3]\n\n regions = self._rundata.regiondata.regions \n regions = []\n # between shelf and CC\n regions.append(\\\n [2, 4, t_shelf, 1e9, -125, -124.05, 40.5, 43]) \n regions.append(\\\n [4, 5, t_harbor, 1e9, -124.26, -124.14, 41.67, 41.79])\n regions.append(\\\n [6, 6, t_harbor, 1e9, -124.218,-124.17, 41.7345, 41.77])\n\n # add topography (fine)\n topofiles = self._rundata.topo_data.topofiles\n # for topography, append lines of the form\n # [topotype, minlevel, maxlevel, t1, t2, fname]\n topofiles = []\n\n topofiles.append([3, 1, 6, 0., 1.e10, \\\n os.path.join(etopo_dir, 'etopo1_-130_-124_38_45_1min.asc')])\n topofiles.append([-3, 4, 6, 0., 1.e10, \\\n os.path.join(topodir, 'cc-1sec.asc')])\n topofiles.append([3, 6, 6, 0., 1.e10, \\\n os.path.join(topodir,'cc-1_3sec-c_pierless.asc')])\n \n # == fgmax.data values ==\n fgmax_files = self._rundata.fgmax_data.fgmax_files\n fgmax_files = []\n \n # for fixed grids append to this list names of any fgmax input files\n fgmax1_fname = os.path.join(driver_home,'fgmax1_fine.txt')\n fgmax2_fname = os.path.join(driver_home,'fgmax2_fine.txt')\n fgmax3_fname = os.path.join(driver_home,'fgmax3_fine.txt')\n\n fgmax_files.append(fgmax1_fname) \n fgmax_files.append(fgmax2_fname) \n fgmax_files.append(fgmax3_fname) \n \n self._rundata.fgmax_data.num_fgmax_val = 2\n \n\n\n #\n # set desired magnitude\n #\n if ((run_id >= 0) and (run_id < M)) \\\n or ((run_id >= 4*M) and (run_id < 5*M)):\n self.KL_Mw_desired = 8.6\n elif ((run_id >= M) and (run_id < 2*M)) \\\n or ((run_id >= 5*M) and (run_id < 6*M)):\n self.KL_Mw_desired = 8.8\n elif ((run_id >= 2*M) and (run_id < 3*M)) \\\n or ((run_id >= 6*M) and (run_id < 7*M)):\n self.KL_Mw_desired = 9.0\n elif ((run_id >= 3*M) and (run_id < 4*M)) \\\n or ((run_id >= 7*M) and (run_id < 8*M)):\n self.KL_Mw_desired = 9.2\n \n #\n # set slip distribution\n #\n run_id_mod = run_id - 100*(run_id/100)\n m = scn_list[run_id_mod]\n self.set_KL_slip(m)\n \n if run_id < 8*M:\n dir_grid_Mw = '../geoclaw_output/' + str(grid) + '_' + str(self.KL_Mw_desired)\n self._rundir = os.path.join(dir_grid_Mw, 'run_' + str(run_id_mod))\n else:\n # empty runs to obtain bathymetry\n \n dir_grid_Mw = '../geoclaw_output/' + str(grid) + '_B0'\n self._rundir = dir_grid_Mw\n self.KL_Mw_desired = 0.0\n self.set_KL_slip([0.]*len(m)) # set output\n self._rundata.clawdata.output_times = [1.0, 3.0]\n \n self._run_id += 1\n \n return self", "def get_steps_num():\n return 0", "def simulation_run_v4(num_demand_nodes,num_nurses,time_horizon, locations, fixed_service_time=True, shifts=False, restrictions=False):\n\n # generate demand nodes, customers, and nurses\n node = True\n node_type = input('Input \"actual\" for nodes to be actual locations, \"random\" for nodes to be randomly generated locations: ')\n customer_type = input('Input \"random\" for random arrival rate, \"rate from data\", or \"actual\" for actual data: ')\n if node_type == \"actual\":\n demand_node_list = generate_demand_nodes_from_data(locations, customer_type)\n elif node_type == \"random\":\n radius = float(input(\"Input a radius for the demand nodes: \"))\n demand_node_list = generate_demand_nodes_in_zip(locations, num_demand_nodes, radius, customer_type)\n else:\n raise InputError\n file_name = write_file(demand_node_list,demand_node_list)\n distance_matrix = distance_between_nodes_api(demand_node_list)\n if customer_type == \"random\" or \"random from data\":\n customer_list = generate_customers(demand_node_list, time_horizon, fixed_service_time)\n elif customer_type == \"actual\":\n start = input(\"Input a start time (hour in military time): \")\n stop = start+(time_horizon/60)\n customer_list = generate_customers_from_data(node_list, fixed_service_time, start, stop)\n day = input(\"Input a number 0-6 corresponding to a day Sun-Sat: \")\n customer_list = customer_list[day]\n if shifts is True:\n shift_lower_bound = input('Input the minimum shift length: ')\n shift_upper_bound = input('Input the maximum shift length: ')\n else:\n shift_lower_bound = 0\n shift_upper_bound = time_horizon\n if restrictions is True:\n # build probabilities dictionary\n probabilities = {}\n for node in demand_node_list:\n print(\"Latitude: \" % s) % node.lat\n print(\"Longitude: \" % s) % node.lon\n prob = input('Input the probability of a nurse being restricted from this node: ')\n probabilites[node.id_number] = prob\n nurse_list = generate_nurses_with_restrictions(probabilities, node_list,num_nurses, time_horizon,\n shift_lower_bound, shift_upper_bound, node)\n else:\n nurse_list = generate_nurses(num_nurses, num_demand_nodes, demand_node_list, time_horizon, shift_lower_bound, shift_upper_bound,node)\n\n # customers are served in the order they arrive (at the moment)\n current_time = 0\n for customer in customer_list:\n # next event dispatch after a customer arrives and at least one nurse is available\n current_time = max(current_time, customer.arrival_time)\n # choose which nurse to dispatch\n nurse_to_dispatch, dispatch_time, nurses_working = updated_dispatch_nurse(nurse_list, customer,\n current_time, distance_matrix,\n fixed_service_time,\n time_horizon)\n # serve customer and update metrics\n current_time = updated_serve_customer(nurse_to_dispatch, customer, distance_matrix, dispatch_time,\n nurses_working)\n # report on simulation\n time_varying_system_metrics(nurse_list, customer_list, fixed_service_time, time_horizon)\n aggregate_system_metrics(nurse_list, customer_list)\n return nurse_list, demand_node_list", "def next_generation(self):\n new_population = self.population.copy()\n new_length = self.tour_length.copy()\n for i in range(self.loops):\n order_a = self.pick_one()\n order_b = self.pick_one()\n order = self.crossover(order_a, order_b)\n order_length = self.distance(order)\n new_population[i], new_length[i] = self.mutate(order_length, order)\n if new_length[i] < self.worst:\n self.tour_length[self.worst_pos] = new_length[i]\n self.population[self.worst_pos] = new_population[i]\n self.fitness[self.worst_pos] = 1/new_length[i]\n self.normalise()\n self.worst = 0\n for j in range(self.loops):\n if self.worst < self.tour_length[j]:\n self.worst = self.tour_length[j]\n self.worst_pos = j\n return new_population, new_length", "def runauto(self, istart, nrows, rstep):\n self.ImageSolution=self.arcdisplay.autoidentify(istart=istart, nrows=nrows, rstep=rstep, oneline=False)", "def getSteps():", "def simulation(nepisodes):\n # Initialize robots\n # print('I am inside the simulation')\n agents = [] # List containing all robots\n a1 = Agent(start = [0, 0], end = [grid_size-1, grid_size-1], nr = 1) # Create agent 1\n a2 = Agent(start = [0, grid_size-1], end = [grid_size-1, 0], nr = 2) # Create agent 2\n a3 = Agent(start = [grid_size-1, 0], end = [0, grid_size-1], nr = 3) # Create agent 3\n a4 = Agent(start = [grid_size-1, grid_size-1], end = [0, 0], nr = 4) # Create agent 4\n agents.append(a1)\n agents.append(a2)\n agents.append(a3)\n agents.append(a4)\n\n # for agent in agents:\n # agent.load_target('target_weights_{}.h5'.format(agent.nr))\n # agent.load_policy('policy_weights_{}.h5'.format(agent.nr))\n # print('loaded')\n\n steps_list = [[] for i in range(len(agents))]\n reward_list = [[] for i in range(len(agents))]\n cumulative_rewards = [[] for i in range(len(agents))]\n collisions_list = [[] for i in range(len(agents))]\n\n t = 0 # Set time to zero\n for i in range(nepisodes):\n t = episode(agents, t, i+1) # Run one episode\n\n print('End of episode ', i+1)\n agent_index = 0\n for agent in agents:\n steps_list[agent_index].append(agent.steps)\n reward_list[agent_index].append(agent.reward)\n collisions_list[agent_index].append(agent.collisions)\n if i == 0:\n cumulative_rewards[agent_index].append(agent.reward)\n else:\n cumulative_rewards[agent_index].append(agent.reward + cumulative_rewards[agent_index][i-1])\n agent_index += 1\n\n if i % 1000 == 0:\n with open('reward_4_agents_{}'.format(i),'wb') as f:\n pickle.dump(reward_list,f)\n\n with open('steps_4_agents_{}'.format(i), 'wb') as f:\n pickle.dump(steps_list, f)\n\n with open('cols_4_agents_{}'.format(i), 'wb') as f:\n pickle.dump(collisions_list, f)\n\n\n return steps_list, reward_list, collisions_list, cumulative_rewards", "def increment_time(self, **kwargs):\n \n #Pull all optional keyword arguements\n if 'timerange' in kwargs:\n timerange = kwargs.pop('timerange')\n else:\n timerange = 7\n \n if 'display' in kwargs:\n displayflag = kwargs.pop('display')\n else:\n displayflag = 1\n \n if 'auto' in kwargs:\n autoflag = kwargs.pop('auto')\n else:\n autoflag = 0\n \n if 'triggered' in kwargs:\n triggered_rules = kwargs.pop('triggered')\n else:\n triggered_rules = []\n \n #Run simulation one day at a time until specified end point is reached\n count = range(0,timerange)\n for i in count:\n \n \n #Increment one day if at least one infected person remains. If not, end the simulation\n if self.SD_Map.IPop.value() > 1:\n time = self.timeSeries[-1]\n self.timeSeries.append(time+1)\n self.SD_Map.update_all(self.timestep(), len(self.timeSeries)-2)\n else:\n print('Done!')\n \n #Update the time display\n self.timev.set(self.timeSeries[-1])\n \n #Add any triggered rules to the rule log display\n if triggered_rules != []:\n day_text = self.translate('Day')+' ' + str(self.timeSeries[-1]) \n rule_text = '; ' + self.translate('Rules') + ': ' + str(triggered_rules)[1:-1]\n log_text = day_text + rule_text\n self.list_info_boxes['Log'].insert(tk.END, log_text)\n \n #If appropriate, update all of the graphs\n if displayflag == 1:\n if self.arrangment == ['Map', 'Graph']:\n index = 2\n invertflag = 1\n else:\n index = 0\n invertflag = 0\n \n #Select all of the graphs\n canvaslist = []\n for entrylist in self.graph_canvas_list:\n for entry in entrylist:\n canvaslist.append(entry)\n\n #For each graph, delete it and replace it with an update graph\n for canvas in canvaslist:\n if index < 2:\n col = 0\n inputindex = index\n self.figures[index].clear()\n plt.close(self.figures[index])\n else:\n col = 1\n inputindex = index - 2\n if invertflag:\n self.figures[inputindex].clear()\n plt.close(self.figures[inputindex])\n else:\n self.figures[index].clear()\n plt.close(self.figures[index])\n \n #Make new graph\n framename = canvas.get_tk_widget().master\n canvas.get_tk_widget().destroy()\n graph = self.translate(self.graph_setting_list[col][inputindex].get(),\n input_language=self.language,\n output_language='english')\n canvas,fig = self.make_graph(framename, graph,\n gridpos = inputindex*2+1)\n self.graph_canvas_list[col][inputindex]=canvas\n \n #Update figures list\n if invertflag:\n self.figures[inputindex] = fig\n else:\n self.figures[index] = fig\n index += 1", "def run_experiment(x_loops=15, max_steps=0, display_on=True, max_fps=10,\n garden_size=8, tako_number=1, pop_max=30, max_width=1800,\n max_height=900, collect_data=True, export_all=False,\n rand_nets=False, max_gen = 505, genetic_mode=\"Plain\",\n learning_on=False, seeds=None, garden_mode=\"Diverse Static\",\n family_detection=None, family_mod=0, record_inbreeding=True,\n inbreed_lim = 1.1, hla_genes=0, binary_health=0,\n carrier_percentage=40, two_envs=False, diff_envs=False,\n migration_rate=0, phen_pref=False, filename=\"\"): \n #round width/height down to nearest multiple of 50 if need be\n if max_width % 50 != 0:\n max_width = max_width - (max_width % 50)\n if max_height % 50 != 0:\n max_height = max_height - (max_height % 50)\n\n i = 0\n #create csv files if they don't already exist\n if collect_data or export_all:\n if filename == \"\":\n filename = str(int(time.time())) + \".csv\"\n elif len(filename) < 4:\n filename = filename + \".csv\"\n elif filename[-4:] != \".csv\":\n filename = filename + \".csv\"\n\n if not os.path.exists(\"Data\"):\n os.makedirs(\"Data\")\n\n if collect_data:\n if not os.path.exists(os.path.join(\"Data\", filename)):\n with open(os.path.join(\"Data\", filename), 'a', newline='') as\\\n csvfile:\n writ = csv.writer(csvfile)\n writ.writerow(['iteration', 'env #', 'ID', 'parent1',\n 'parent2', 'age', 'generation', '# children',\n 'mating attempts', 'accum pain',\n 'cause of death', 'timestep', 'mutations',\n 'parent_degree', 'parent_genoverlap',\n '# disorders',\n 'health a', 'health b', 'preference'])\n else:\n with open(os.path.join(\"Data\", filename), newline='') as\\\n csvfile:\n reader = csv.DictReader(csvfile)\n row = None\n for row in reader: pass\n if row != None:\n i = int(row[\"iteration\"]) + 1\n\n if export_all:\n h = make_headers()\n f = os.path.join('Data', (filename[:-4] + ' gene data.csv'))\n if not os.path.exists(f):\n with open(f, 'a') as file:\n writ = csv.writer(file)\n writ.writerow(h)\n\n tako.rand_nets = rand_nets\n tako.family_mod = family_mod\n tako.family_detection = family_detection\n gt.family_detection = family_detection\n tako.record_inbreeding = record_inbreeding\n tako.inbreed_lim = inbreed_lim\n tako.hla_genes = hla_genes\n tako.binary_health = binary_health\n tako.carrier_percentage = carrier_percentage\n tako.phen_pref = phen_pref\n gt.phen_pref = phen_pref\n \n loop_limit = x_loops\n if loop_limit < 1:\n loop_limit = 1\n\n if seeds == None:\n seeds = [None for i in range(x_loops)]\n\n while loop_limit > 0:\n #check if seeds is long enough\n if len(seeds) < loop_limit + i:\n for j in range(loop_limit + i - len(seeds)):\n seeds.append(seeds[j])\n if seeds[0] != None:\n tako.set_seed(seeds[i])\n g = garden_game(garden_size, tako_number, pop_max, max_width,\n max_height, display_on, max_fps, learning_on,\n genetic_mode, rand_nets, garden_mode, filename,\n export_all, family_mod, family_detection,\n two_envs, diff_envs, migration_rate,\n seeds[i])\n if display_on:\n main_window = g\n main_window.main_loop(max_steps, max_gen, display_on,\n collect_data, garden_mode, i)\n else:\n g.main_loop(max_steps, max_gen, display_on, collect_data,\n garden_mode, i)\n loop_limit -= 1\n i += 1" ]
[ "0.622001", "0.6160472", "0.5919634", "0.5737377", "0.56605166", "0.5636519", "0.5605472", "0.5555017", "0.5548242", "0.5515285", "0.5510959", "0.5510417", "0.54903746", "0.54616743", "0.54526263", "0.54507804", "0.54276735", "0.5407268", "0.54053366", "0.5392859", "0.5392859", "0.53682655", "0.5362302", "0.5348458", "0.5347966", "0.5346053", "0.5329057", "0.5321828", "0.5318315", "0.529225", "0.52912533", "0.52905256", "0.5289384", "0.52811295", "0.52754134", "0.52721524", "0.5257538", "0.52391803", "0.523655", "0.52207786", "0.51973784", "0.51972187", "0.518673", "0.5185363", "0.516816", "0.5166717", "0.5164196", "0.5161782", "0.5154006", "0.5132263", "0.5129744", "0.51208925", "0.5118085", "0.5116259", "0.51043063", "0.5103527", "0.510255", "0.5087144", "0.508194", "0.5080705", "0.5074644", "0.5063941", "0.50592405", "0.5059187", "0.5049522", "0.5046053", "0.50455666", "0.50416255", "0.5036965", "0.5026586", "0.50265205", "0.5025032", "0.50174755", "0.50144565", "0.50129443", "0.50119394", "0.5011237", "0.5009546", "0.50046974", "0.5003716", "0.5000339", "0.49992552", "0.4993051", "0.4991355", "0.49904847", "0.4990273", "0.49899668", "0.4983679", "0.49829194", "0.49825567", "0.49792296", "0.49730167", "0.49639252", "0.49610588", "0.49593088", "0.4952135", "0.49519572", "0.49485195", "0.4946961", "0.49393559" ]
0.8080125
0
Shuffles island populations for migration and performs migration by swapping pairs of individuals between islands
def coordinate_migration_between_islands(self): island_partners = self._shuffle_island_indices() for i in range(self._num_islands//2): self._shuffle_island_and_swap_pairs(island_partners, i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def island_migration(self):\n for y in self.island_map:\n for cell in y:\n cell.migration()\n\n for y in self.island_map:\n for cell in y:\n for animal in cell.population:\n animal.has_moved = False", "def migrate(pops, fit_pop):\n best_index_d, best_fit_inds_d, best_pop_inds_d = select_best(pops, fit_pop)\n pops_mid, fit_pop_mid = remove_best(best_index_d, pops, fit_pop)\n for i in range(n_islands):\n for j in range(n_migrate):\n if i + 1 > n_islands - 1:\n pops[i] = np.vstack((pops[i], best_pop_inds_d[0][j]))\n fit_pop[i] = np.hstack((fit_pop[i], best_fit_inds_d[0][j]))\n else:\n pops[i] = np.vstack((pops[i], best_pop_inds_d[i+1][j]))\n fit_pop[i] = np.hstack((fit_pop[i], best_fit_inds_d[i+1][j]))\n\n return pops, fit_pop", "def _mix_neighboring_replicas(self):\n\n if self.verbose: print \"Will attempt to swap only neighboring replicas.\"\n\n # Attempt to swap replica 0 with all other replicas.\n istate = 0\n for jstate in range(1, self.nstates):\n # Determine which replicas these states correspond to.\n i = None\n j = None\n for index in range(self.nstates):\n if self.replica_states[index] == istate: i = index\n if self.replica_states[index] == jstate: j = index \n\n # Reject swap attempt if any energies are nan.\n if (numpy.isnan(self.u_kl[i,jstate]) or numpy.isnan(self.u_kl[j,istate]) or numpy.isnan(self.u_kl[i,istate]) or numpy.isnan(self.u_kl[j,jstate])):\n continue\n \n # Compute log probability of swap.\n log_P_accept = - (self.u_kl[i,jstate] + self.u_kl[j,istate]) + (self.u_kl[i,istate] + self.u_kl[j,jstate])\n\n # Record that this move has been proposed.\n self.Nij_proposed[istate,jstate] += 1\n self.Nij_proposed[jstate,istate] += 1\n\n # Accept or reject.\n if (log_P_accept >= 0.0 or (numpy.random.rand() < math.exp(log_P_accept))):\n # Swap states in replica slots i and j.\n (self.replica_states[i], self.replica_states[j]) = (self.replica_states[j], self.replica_states[i])\n # Accumulate statistics\n self.Nij_accepted[istate,jstate] += 1\n self.Nij_accepted[jstate,istate] += 1\n\n # Attempt swaps for all other replicas, choosing either even states or odd states.\n offset = numpy.random.randint(2) # offset is 0 or 1 \n for istate in range(1+offset, self.nstates, 2):\n # Determine phi and psi indices\n phi_index = int((istate-1) / self.nbins)\n psi_index = (istate-1) - phi_index*self.nbins\n \n # Choose direction: [left, right, up, down]\n direction = numpy.random.randint(4) \n if direction == 0: # left\n psi_index -= 1\n if (psi_index < 0): psi_index = self.nbins-1\n if direction == 1: # right\n psi_index += 1\n if (psi_index >= self.nbins): psi_index = 0\n if direction == 2: # up\n phi_index -= 1\n if (phi_index < 0): phi_index = self.nbins-1\n if direction == 3: # down\n phi_index += 1\n if (phi_index >= self.nbins): phi_index = 0\n jstate = 1 + phi_index*self.nbins + psi_index\n \n # Determine which replicas these states correspond to.\n i = None\n j = None\n for index in range(self.nstates):\n if self.replica_states[index] == istate: i = index\n if self.replica_states[index] == jstate: j = index \n\n # Reject swap attempt if any energies are nan.\n if (numpy.isnan(self.u_kl[i,jstate]) or numpy.isnan(self.u_kl[j,istate]) or numpy.isnan(self.u_kl[i,istate]) or numpy.isnan(self.u_kl[j,jstate])):\n continue\n \n # Compute log probability of swap.\n log_P_accept = - (self.u_kl[i,jstate] + self.u_kl[j,istate]) + (self.u_kl[i,istate] + self.u_kl[j,jstate])\n\n # Record that this move has been proposed.\n self.Nij_proposed[istate,jstate] += 1\n self.Nij_proposed[jstate,istate] += 1\n\n # Accept or reject.\n if (log_P_accept >= 0.0 or (numpy.random.rand() < math.exp(log_P_accept))):\n # Swap states in replica slots i and j.\n (self.replica_states[i], self.replica_states[j]) = (self.replica_states[j], self.replica_states[i])\n # Accumulate statistics\n self.Nij_accepted[istate,jstate] += 1\n self.Nij_accepted[jstate,istate] += 1\n\n return", "def animal_migrates(self, adj_cells):\n for species, animals in self.fauna_list.items():\n for animal in animals:\n if animal.probability_of_move:\n propensity = [cell.propensity_to_move(animal)\n for cell in adj_cells]\n total_propensity = sum(propensity)\n if total_propensity != 0:\n probability = [cell.probability_move_to_cell(\n animal, total_propensity)\n for cell in adj_cells]\n cum_probability = np.cumsum(probability)\n i = 0\n while np.random.random() > cum_probability[i]:\n i += 1\n cell_to_migrate = adj_cells[i]\n if cell_to_migrate.is_migratable:\n if animal.is_animal_moved_already is False:\n cell_to_migrate.add_animal(animal)\n self.remove_animal(animal)\n animal.is_animal_moved_already = True", "def shuffle(self):\r\n #\r\n shuffled_seed = copy.deepcopy(self)\r\n #\r\n # for each location [x0][y0], randomly choose another location\r\n # [x1][y1] and swap the values of the cells in the two locations.\r\n #\r\n for x0 in range(self.xspan):\r\n for y0 in range(self.yspan):\r\n x1 = rand.randrange(self.xspan)\r\n y1 = rand.randrange(self.yspan)\r\n temp = shuffled_seed.cells[x0][y0]\r\n shuffled_seed.cells[x0][y0] = shuffled_seed.cells[x1][y1]\r\n shuffled_seed.cells[x1][y1] = temp\r\n #\r\n return shuffled_seed", "def shuffle_opacities(mutated_genome):\n mutated_genome", "def swapped(N, D, rng, rows, cols, iterations=100, encoders=None):\n \n assert rows*cols == N # make sure the layout has a valid size\n\n if encoders is None: # if we aren't given encoders\n encoders = random(N, D, rng) # make our own\n \n def score(encoders, index, rows, cols):\n \"\"\"Helper function to compute similarity for one encoder.\n \n :param array encoders: the encoders\n :param integer index: the encoder to compute for\n :param integer rows: the width of the 2d grid\n :param integer cols: the height of the 2d grid\n \"\"\"\n i = index % cols # find the 2d location of the indexth element\n j = index / cols\n \n sim = 0 # total of dot products\n count = 0 # number of neighbours\n if i>0: # if we're not at the left edge, do the WEST comparison\n sim += np.dot(encoders[j*cols+i], encoders[j*cols+i-1])\n count += 1\n if i<cols-1: # if we're not at the right edge, do EAST\n sim += np.dot(encoders[j*cols+i], encoders[j*cols+i+1])\n count += 1\n if j>0: # if we're not at the top edge, do NORTH\n sim += np.dot(encoders[j*cols+i], encoders[(j-1)*cols+i])\n count += 1\n if j<rows-1: # if we're not at the bottom edge, do SOUTH \n sim += np.dot(encoders[j*cols+i], encoders[(j+1)*cols+i])\n count += 1\n return sim/count\n \n for k in range(iterations):\n target = rng.randint(0, N, N) # pick random swap targets\n for i in range(N):\n j = target[i]\n if i != j: # if not swapping with yourself\n # compute similarity score how we are (unswapped)\n sim1 = score(encoders, i, rows, cols) + score(encoders, \n j, rows, cols)\n # swap the encoder\n encoders[[i,j],:] = encoders[[j,i],:]\n # compute similarity score how we are (swapped)\n sim2 = score(encoders, i, rows, cols) + score(encoders, \n j, rows, cols)\n \n # if we were better unswapped\n if sim1 > sim2:\n # swap them back\n encoders[[i,j],:] = encoders[[j,i],:]\n \n return encoders", "def _move_range_shuffle(self, range_len):\n start = randint(0, len(self.state) - range_len)\n end = start + range_len\n\n # print(\"start: \" + str(start))\n # print(\"end: \" + str(end))\n # print(\"range_len: \" + str(range_len))\n # print(\"prior state: \", self.state)\n # print(\"prior dict: \", self.wiz_to_pos)\n\n copy_state = self.state[start:end]\n\n #for wizard in copy_state:\n # print(wizard)\n\n random.shuffle(copy_state)\n\n for i, wizard in enumerate(copy_state):\n #print(\"wiz1_loop: \" + wizard)\n self.state[start + i] = wizard\n self.wiz_to_pos[wizard] = start + i\n\n # print(\"post state: \", self.state)\n # print(\"post dict: \", self.wiz_to_pos)\n # print('\\n Error:', self.dict_check())\n # print(\"end\\n \\n\")", "def steady_state_replacement(random, population, parents, offspring, args):\r\n population.sort()\r\n num_to_replace = min(len(offspring), len(population))\r\n population[:num_to_replace] = offspring[:num_to_replace]\r\n return population", "def steady_state_replacement(random, population, parents, offspring, args):\n population.sort()\n num_to_replace = min(len(offspring), len(population))\n population[:num_to_replace] = offspring[:num_to_replace]\n return population", "def steady_state_replacement(new_pop, individuals):\n individuals.sort(reverse=True)\n individuals[-1] = max(new_pop + individuals[-1:])\n return individuals", "def knuth_shuffle_backward(arr):\n for i in reversed(range(1, len(arr))): # i from n-1 downto 1\n j = random.randint(0, i) # Pick randomly 0 <= j <= i \n arr[j], arr[i] = arr[i], arr[j] # exchange ", "def switch_opacities(mutated_genome):\n index1 = random.randint(0,max(0,len(mutated_genome)-1))\n index2 = random.randint(0,max(0,len(mutated_genome)-1))\n temp = mutated_genome[index1][1]\n mutated_genome[index1][1] = mutated_genome[index2][1]\n mutated_genome[index2][1] = temp", "def shuffle(self):\n self.__c_elem().melange()", "def knuth_shuffle_forward(arr):\n for i in range(0, len(arr)-2): # i from 0..n-2 \n j = random.randint(i, len(arr)-1) # Pick randomly i <= j < n\n arr[i], arr[j] = arr[j], arr[i]", "def calc_granger_shuffle(self):\n if not hasattr(self, 'input_data'):\n self.preprocess_and_check_stationarity()\n temp_series = [np.stack([np.random.permutation(x)\n for x in self.input_data.T]).T\n for i in trange(self.n_shuffles)]\n\n outs_temp = parallelize(self.calc_granger, temp_series, n_jobs=30)\n outs_temp = [x[0] for x in outs_temp]\n self.shuffle_outs = np.array(outs_temp)", "def bitFlip_mutation(population, **kwargs):\r\n new_pop = []\r\n for indiv in population:\r\n mutation_mask = np.random.random(size=indiv.shape) < kwargs['mutation_prob']\r\n indiv[mutation_mask] = 1 - indiv[mutation_mask]\r\n new_pop.append(indiv.copy())\r\n return new_pop", "def random_swaps(dict_prefs, initial_guess, iterations=50000, repetitions=3, random_seed=None):\n best=0\n if random_seed is not None:\n random.seed(random_seed)\n np.random.seed(random_seed)\n for rep in range(repetitions):\n ordering=deepcopy(initial_guess)\n for it in range(iterations):\n candidates_ind=random.sample(range(len(ordering)), 2)\n score_as_is,score_rev=_pair_net_effect(ordering,candidates_ind,dict_prefs)\n if score_rev>score_as_is:\n _swap(ordering,candidates_ind)\n score=eval_ordering(ordering,dict_prefs)\n if score>best:\n best=score\n best_ordering=deepcopy(ordering)\n return best_ordering", "def SwapMutaton(item):\n countryNo = len(item)\n [start,end] = sorted(random.sample(range(1,countryNo),2))\n temp = item[start]\n item[start] = item[end]\n item[end] = temp\n return item\n # TODO CHECK !!", "def step(self):\n # amt_selected = \\\n # int(self.population_size * self.part_selected) \n\n # spawning_pool = [] # list of dna selected for reproduction\n new_data =[]\n \n sorted_dna = sorted(self.data, \n key=lambda dna: dna.fitness_function(dna),\n reverse=True)\n \n \n \n\n # mutation\n for dna in sorted_dna:\n dna.mute(self.mutation_probability)\n\n # crossover\n while len(new_data) < \\\n self.population_size - (self.population_size % 2):\n\n d1 = copy.copy(self.pick())\n d2 = copy.copy(self.pick())\n times = 2\n for i in range(times):\n d1.crossover(d2)\n\n new_data += [d1, d2]\n\n\n\n\n\n if (self.population_size % 2) == 1:\n new_data.append(copy.deepcopy(self.pick()))\n\n assert(len(self.data) == len(new_data))\n\n for i in range(len(new_data)):\n self.data[i].data = new_data[i]", "def _evolve_population(self):\n # save the old population\n self.old_population = self.population.copy()\n self.old_population_errors = self.population_errors.copy()\n\n # index pointers\n rind = numpy.random.permutation(4)+1\n\n # shuffle the locations of the individuals\n ind1 = numpy.random.permutation(self.population_size)\n pop1 = self.old_population[ind1,:]\n \n # rotate for remaining indices\n rot = numpy.remainder(self.rot_ind + rind[0], self.population_size)\n ind2 = ind1[rot,:]\n pop2 = self.old_population[ind2,:]\n\n rot = numpy.remainder(self.rot_ind + rind[1], self.population_size)\n ind3 = ind2[rot,:]\n pop3 = self.old_population[ind3,:]\n\n rot = numpy.remainder(self.rot_ind + rind[2], self.population_size)\n ind4 = ind3[rot,:]\n pop4 = self.old_population[ind4,:]\n\n rot = numpy.remainder(self.rot_ind + rind[3], self.population_size)\n ind5 = ind4[rot,:]\n pop5 = self.old_population[ind5,:]\n \n # population filled with best individual\n best_population = self.best_individual[numpy.newaxis,:].repeat(self.population_size,axis=0)\n\n # figure out the crossover ind\n xold_ind = numpy.random.rand(self.population_size,self.num_params) >= \\\n self.crossover_prob\n\n # get new population based on desired strategy\n # DE/rand/1\n if self.method == DE_RAND_1:\n population = pop3 + self.scale*(pop1 - pop2)\n population_orig = pop3\n # DE/BEST/1\n if self.method == DE_BEST_1:\n population = best_population + self.scale*(pop1 - pop2)\n population_orig = best_population\n # DE/best/2\n elif self.method == DE_BEST_2:\n population = best_population + self.scale * \\\n (pop1 + pop2 - pop3 - pop4)\n population_orig = best_population\n # DE/BEST/1/JITTER\n elif self.method == DE_BEST_1_JITTER:\n population = best_population + (pop1 - pop2) * \\\n ((1.0-0.9999) * \\\n numpy.random.rand(self.population_size,self.num_params) + \\\n self.scale)\n population_orig = best_population\n # DE/LOCAL_TO_BEST/1\n elif self.method == DE_LOCAL_TO_BEST_1:\n population = self.old_population + \\\n self.scale*(best_population - self.old_population) + \\\n self.scale*(pop1 - pop2)\n population_orig = self.old_population\n \n # crossover\n population[xold_ind] = self.old_population[xold_ind]\n\n # apply the boundary constraints\n for p in xrange(self.num_params):\n # get min and max\n min_val = self.param_ranges[p][0]\n max_val = self.param_ranges[p][1]\n\n # find where exceeded max\n ind = population[:,p] > max_val\n if ind.sum() > 0:\n # bounce back\n population[ind,p] = max_val + \\\n numpy.random.rand(ind.sum())*\\\n (population_orig[ind,p]-max_val)\n\n # find where below min\n ind = population[:,p] < min_val\n if ind.sum() > 0:\n # bounce back\n population[ind,p] = min_val + \\\n numpy.random.rand(ind.sum())*\\\n (population_orig[ind,p]-min_val)\n\n # set the class members\n self.population = population\n self.population_orig = population", "def shuffle(self):\n self.edges = np.random.permutation(self.edges)\n self.batch_num = 0", "def randomize_by_edge_swaps(G, num_iterations):\n G_copy = G.copy()\n edge_list = list(G_copy.edges())\n num_edges = len(edge_list)\n total_iterations = num_edges * num_iterations\n\n for _ in range(total_iterations):\n i, j = np.random.choice(num_edges, 2, replace=False)\n u, v = edge_list[i]\n x, y = edge_list[j]\n\n if len(set([u, v, x, y])) < 4:\n continue\n\n # Save edge data\n i_data = G_copy[u][v]\n j_data = G_copy[x][y]\n\n if G_copy.has_edge(u, x) or G_copy.has_edge(v, y):\n # Interchange edge data\n G_copy.remove_edges_from(((u, v), (x, y)))\n G_copy.add_edges_from(((u, v, j_data), (x, y, i_data)))\n else:\n # Regular swap\n G_copy.remove_edges_from(((u, v), (x, y)))\n G_copy.add_edges_from(((u, x, i_data), (v, y, j_data)))\n\n edge_list[i] = (u, x)\n edge_list[j] = (v, y)\n\n assert len(G_copy.edges()) == num_edges\n return G_copy", "def shuffle(self): \n for x in range(12):\n self.right(primary=-60, counter=0)\n time.sleep(.1)\n self.left(primary=-60, counter=0)\n time.sleep(.1)\n self.stop()", "def changeState(self, xyPoints):\n nPts = len(xyPoints)\n ind0 = random.randint(1, nPts-1)\n ind1 = random.randint(1, nPts-1)\n while ind1 == ind0:\n ind1 = random.randint(1, nPts-1)\n # make copy of the sources to make sure the swap works correctly\n xyPoints[ind0], xyPoints[ind1] = tuple(xyPoints[ind1]), tuple(xyPoints[ind0])", "def switch_points(mutated_genome,index):\n point_index1 = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n point_index2 = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n temp = mutated_genome[index][2][point_index1]\n mutated_genome[index][2][point_index1] = mutated_genome[index][2][point_index2]\n mutated_genome[index][2][point_index2] = temp", "def _step(self):\n self.sort()\n selection = self._select()\n offspring = self._crossover(selection)\n self._mutate(offspring)\n\n self.sort()\n if self.elite_num > 0:\n offspring[:self.elite_num] = self.population[:self.elite_num]\n\n self.population[:] = offspring\n\n self.sort()\n if self.cull_num > 0:\n self.population[-self.cull_num:] = self._initialize(self.cull_num)", "def generate_moves(self):\n if self.begin:\n self.init_population()\n self.begin = False\n self.decoded_population = list()\n for chromosome in self.population:\n # print(chromosome)\n network_info = self.nn_data(chromosome)\n # print(network_info)\n artificial_neural_network = net.NeuralNetwork(network_info)\n x, y = self.decode_network_output(artificial_neural_network.out())\n self.decoded_population.append((x, y))\n print(self.decoded_population)\n self.clear()", "def return_swaps( # pylint: disable=too-many-locals,too-many-branches,too-many-statements\n self, old_mapping, new_mapping, permutation=None\n ):\n if permutation is None:\n permutation = list(range(self.num_rows))\n swap_operations = []\n\n class Position: # pylint: disable=too-few-public-methods\n \"\"\"Custom Container.\"\"\"\n\n def __init__( # pylint: disable=too-many-arguments\n self,\n current_row,\n current_column,\n final_row,\n final_column,\n row_after_step_1=None,\n ):\n self.current_row = current_row\n self.current_column = current_column\n self.final_row = final_row\n self.final_column = final_column\n self.row_after_step_1 = row_after_step_1\n\n # final_positions contains info containers\n # final_position[i][j] contains info container with\n # current_row == i and current_column == j\n final_positions = [[None for i in range(self.num_columns)] for j in range(self.num_rows)]\n # move qubits which are in both mappings\n used_mapped_ids = set()\n for logical_id in old_mapping:\n if logical_id in new_mapping:\n used_mapped_ids.add(new_mapping[logical_id])\n old_column = old_mapping[logical_id] % self.num_columns\n old_row = old_mapping[logical_id] // self.num_columns\n new_column = new_mapping[logical_id] % self.num_columns\n new_row = new_mapping[logical_id] // self.num_columns\n info_container = Position(\n current_row=old_row,\n current_column=old_column,\n final_row=new_row,\n final_column=new_column,\n )\n final_positions[old_row][old_column] = info_container\n # exchange all remaining None with the not yet used mapped ids\n all_ids = set(range(self.num_qubits))\n not_used_mapped_ids = list(all_ids.difference(used_mapped_ids))\n not_used_mapped_ids = sorted(not_used_mapped_ids, reverse=True)\n for row in range(self.num_rows):\n for column in range(self.num_columns):\n if final_positions[row][column] is None:\n mapped_id = not_used_mapped_ids.pop()\n new_column = mapped_id % self.num_columns\n new_row = mapped_id // self.num_columns\n info_container = Position(\n current_row=row,\n current_column=column,\n final_row=new_row,\n final_column=new_column,\n )\n final_positions[row][column] = info_container\n if len(not_used_mapped_ids) > 0: # pragma: no cover\n raise RuntimeError('Internal compiler error: len(not_used_mapped_ids) > 0')\n # 1. Assign column_after_step_1 for each element\n # Matching contains the num_columns matchings\n matchings = [None for i in range(self.num_rows)]\n # Build bipartite graph. Nodes are the current columns numbered (0, 1, ...) and the destination columns\n # numbered with an offset of self.num_columns (0 + offset, 1+offset, ...)\n graph = nx.Graph()\n offset = self.num_columns\n graph.add_nodes_from(range(self.num_columns), bipartite=0)\n graph.add_nodes_from(range(offset, offset + self.num_columns), bipartite=1)\n # Add an edge to the graph from (i, j+offset) for every element currently in column i which should go to\n # column j for the new mapping\n for row in range(self.num_rows):\n for column in range(self.num_columns):\n destination_column = final_positions[row][column].final_column\n if not graph.has_edge(column, destination_column + offset):\n graph.add_edge(column, destination_column + offset)\n # Keep manual track of multiple edges between nodes\n graph[column][destination_column + offset]['num'] = 1\n else:\n graph[column][destination_column + offset]['num'] += 1\n # Find perfect matching, remove those edges from the graph and do it again:\n for i in range(self.num_rows):\n top_nodes = range(self.num_columns)\n matching = nx.bipartite.maximum_matching(graph, top_nodes)\n matchings[i] = matching\n # Remove all edges of the current perfect matching\n for node in range(self.num_columns):\n if graph[node][matching[node]]['num'] == 1:\n graph.remove_edge(node, matching[node])\n else:\n graph[node][matching[node]]['num'] -= 1\n # permute the matchings:\n tmp = deepcopy(matchings)\n for i in range(self.num_rows):\n matchings[i] = tmp[permutation[i]]\n # Assign row_after_step_1\n for column in range(self.num_columns):\n for row_after_step_1 in range(self.num_rows):\n dest_column = matchings[row_after_step_1][column] - offset\n best_element = None\n for row in range(self.num_rows):\n element = final_positions[row][column]\n if element.row_after_step_1 is not None:\n continue\n if element.final_column == dest_column:\n if best_element is None:\n best_element = element\n elif best_element.final_row > element.final_row:\n best_element = element\n best_element.row_after_step_1 = row_after_step_1\n # 2. Sort inside all the rows\n swaps = self._sort_within_columns(final_positions=final_positions, key=lambda x: x.row_after_step_1)\n swap_operations += swaps\n # 3. Sort inside all the columns\n swaps = self._sort_within_rows(final_positions=final_positions, key=lambda x: x.final_column)\n swap_operations += swaps\n # 4. Sort inside all the rows\n swaps = self._sort_within_columns(final_positions=final_positions, key=lambda x: x.final_row)\n swap_operations += swaps\n return swap_operations", "def sort(self):\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[0][j].fitness < self.genepool[0][j-1].fitness:\n self.genepool[0][j], self.genepool[0][j-1] = self.genepool[0][j-1], self.genepool[0][j]\n else:\n break\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[1][j].fitness < self.genepool[1][j-1].fitness:\n self.genepool[1][j], self.genepool[1][j-1] = self.genepool[1][j-1], self.genepool[1][j]\n else:\n break", "def switch_chromosomes(mutated_genome):\n index1 = random.randint(0,max(0,len(mutated_genome)-1))\n index2 = random.randint(0,max(0,len(mutated_genome)-1))\n temp = mutated_genome[index1]\n mutated_genome[index1] = mutated_genome[index2]\n mutated_genome[index2] = temp", "def run(self,step=2,\n sizePop=100,\n infoFields=['migrate_to','fitness'],\n recombination_rate = 0.00375,\n migration_rate = 0.01,\n mutation_rate = [0.00000001],\n subPopNames = ['x','y','z','w'],\n alleleNames = ['A','B'],\n s1 = 0.1,\n burnin=50,\n **kargs):\n\n self.reset()\n pop=sim.Population(size=[sizePop]*self.numPop, loci=self.loci, lociPos=list(range(self.dist, (self.dist*self.loci)+1,self.dist)), subPopNames=subPopNames, infoFields=infoFields)\n \n simu = sim.Simulator(pop)\n print(\"The simulation has started\")\n t1 = time.time()\n\n\n mutate_snps=range(0,50)+range(51,101)\n\n # define the initialization of each loci based the beta distribution where a and b parameters are allele frequencies from noncoding human regions\n snps=[0.14, 0.11, 0.17, 0.11, 0.32, 0.33, 0.21, 0.11, 0.11, 0.28, 0.11, 0.12, 0.8, 0.66, 0.74, 0.68, 0.66, 0.77, 0.77, 0.76, 0.77, 0.74, 0.72, 0.11, 0.73, 0.72, 0.72, 0.72, 0.54, 0.17, 0.78, 0.64, 0.78, 0.2, 0.24, 0.25, 0.78, 0.66, 0.2, 0.14, 0.75, 0.16, 0.72, 0.18, 0.77, 0.42, 0.34, 0.7, 0.17, 0.14, 0.2, 0.46, 0.13, 0.26, 0.16, 0.13, 0.14, 0.24, 0.18, 0.36, 0.71, 0.27, 0.28, 0.25, 0.25, 0.3, 0.19, 0.14, 0.16, 0.3, 0.39, 0.16, 0.24, 0.32, 0.11, 0.18, 0.48, 0.31, 0.21, 0.15, 0.34, 0.71, 0.33, 0.18, 0.71, 0.13, 0.23, 0.2, 0.22, 0.23, 0.16, 0.23, 0.23, 0.22, 0.24, 0.82, 0.36, 0.37, 0.72, 0.16, 0.14]\n self.initFreq=[]\n\n \n for i in range(len(snps)):\n alpha=float(4*sizePop*migration_rate*snps[i])\n bhta=float(4*sizePop*migration_rate*(1-snps[i])) \n p=numpy.random.beta(alpha,bhta)\n while (p>=0.9 or p<=0.1):\n p=numpy.random.beta(alpha,bhta)\n \n print \" SNP {snp} with alpha {alpha}, bhta {bhta} and frequency {p}\".format(snp=i, alpha=alpha, bhta=bhta, p=p)\n self.initFreq.append(p)\n\n simu.evolve(\n \n initOps=[sim.InitGenotype(freq=[self.initFreq[i], 1-self.initFreq[i]], loci=i) for i in range(len(snps))],\n \n\n # initialize the sex and select the 50 loci (parents)\n preOps = [sim.InitSex(maleProp=0.5,at=[0]),\n\n # initialize the genotype of locus 50 at generation 0 (in the beginning of the simulation)\n sim.PyOperator(self.genotypeBegin,at=[0]),\n \n # Wait 50 generations for the system to reach equilibrium\n # Then, change the the genotype of locus 50 at generation 50 by inserting a single copy of allele 0 in one individual \n sim.PyOperator(self.genotypeAfter,at=[50]),\n\n # function that carries out the selection proccess\n sim.MaSelector(loci=50,wildtype=0,fitness=[1+s1, 1+s1/2, 1],begin=50, end=-1,subPops=1)],\n\n # recombination\n matingScheme=sim.RandomMating(ops=[\n sim.Recombinator(rates=recombination_rate)]),\n \n # mutation and migration of offsprings\n postOps = [\n\n \n sim.SNPMutator(u=mutation_rate,loci=mutate_snps),\n \n # call function to calculate Fst and check for equilibrium state\n sim.PyOperator(self.calcFst,step=step),\n\n #migration\n # Here we define an island model, but this can easily be changed.\n # For more information about the migration models, please look in the documentation of SimuPOP here http://simupop.sourceforge.net/manual_svn/build/userGuide_ch7_sec3.html\n sim.Migrator(sim.utils.migrIslandRates(migration_rate,self.numPop)),\n \n # call function to save the allele frequencies\n sim.PyOperator(self.checkAlleles, step=step, param = subPopNames),\n \n \n # check if locus 50 is lost due to genetic drift. If yes, we terminate the simulation\n sim.Stat(alleleFreq=50,step=step,subPops=1,begin=50,end=-1),\n sim.TerminateIf('alleleFreq[50][0] == 0',step=step,begin=50,end=-1),\n \n # check the progress of the simulation\n sim.PyEval('\"Gen: %d\" % gen',step=step),\n sim.PyOutput('\\n',step=step),\n \n ],\n gen=self.Gen\n \n )\n \n \n t2 = time.time()\n print \"simulation took\", t2-t1, \"seconds.\"", "def shuffle_list(self, tour_list, pop_size):\n x = np.array(tour_list)\n while len(self.pop_group) < self.shuffle_population:\n y = np.random.permutation(x)\n if not any((y == x).all() for x in self.pop_group):\n self.pop_group.append(y.tolist())", "def migration(self):\n\n coordinates = self.get_random_coordinates()\n for coordinate in coordinates:\n if isinstance(self.cells[coordinate], (Jungle, Savannah, Desert)):\n self.cell_move_herbivores(coordinate)\n self.cell_move_carnivores(coordinate)\n\n for coordinate in coordinates:\n if isinstance(self.cells[coordinate], (Jungle, Savannah, Desert)):\n self.cells[coordinate].move_new_animals()", "def neighbours(assignment): \n for index_1, index_2 in itertools.combinations(range(len(assignment)), 2):\n new_assign = list(assignment)\n new_assign[index_1], new_assign[index_2] = new_assign[index_2], new_assign[index_1]\n yield tuple(new_assign)", "def shuffle(self):\n for i in xrange(self.n - 1):\n pos = random.randint(i, self.n - 1)\n self.to[i], self.to[pos] = self.to[pos], self.to[i]\n self.a[i], self.a[pos] = self.a[pos], self.a[i]\n return self.a", "def shuffle_points(mutated_genome,index):\n random.shuffle(mutated_genome[index][2])", "def flip_bits(self, mutation_rate):\r\n num_mutations = 0\r\n for s_x in range(self.xspan):\r\n for s_y in range(self.yspan):\r\n if (rand.uniform(0, 1) < mutation_rate):\r\n # flip cell value: 0 becomes 1 and 1 becomes 0\r\n self.cells[s_x][s_y] = 1 - self.cells[s_x][s_y]\r\n # count the number of mutations so far\r\n num_mutations = num_mutations + 1\r\n # force a minimum of one mutation -- there is no value\r\n # in having duplicates in the population\r\n if (num_mutations == 0):\r\n s_x = rand.randrange(self.xspan)\r\n s_y = rand.randrange(self.yspan)\r\n self.cells[s_x][s_y] = 1 - self.cells[s_x][s_y]", "def test_swap(self, dim):\r\n graph = nx.complete_graph(dim)\r\n graph.remove_edge(0, dim - 1)\r\n s = list(range(dim - 1))\r\n assert set(clique.swap(s, graph)) == set(range(1, dim))", "def mutatation(self, indiv: Tour) -> Tour:\n n = indiv.size()\n for i in range(n):\n if random.random() < self.mutation_rate:\n j = int(random.random() * n)\n\n # Swap 2 genes (cities)\n indiv.tour_ids[i], indiv.tour_ids[j] = indiv.tour_ids[j], indiv.tour_ids[i]\n\n # Update fitness\n indiv.compute_fitness(self.map.get())\n return indiv", "def test_moves(self):\n self.herb.fitness = 1\n Herbivore.set_parameters({\"mu\": 1})\n original_position = (1, 1)\n self.herb.coordinates = self.herb.migrate(self.list)\n nt.assert_not_equal(self.herb.coordinates, original_position)", "def old_mutate(self, offspring):\n # this mutation function will use gray code\n for o in offspring:\n for (idx,_) in enumerate(o):\n before_mutation = o[idx]\n gray = self.binary_to_gray(before_mutation)\n if random.random() < self.mutation_chance:\n gray = gray ^ 1\n if random.random() < self.mutation_chance:\n gray = gray ^ 2\n if random.random() < self.mutation_chance:\n gray = gray ^ 4\n \n o[idx] = self.gray_to_binary(gray)\n \n return offspring", "def shuffle_chromosomes(mutated_genome):\n random.shuffle(mutated_genome)", "def shuffle(self) -> List[int]:", "def swap_cells(state, i1, j1, i2, j2):\n value1 = state[i1][j1]\n value2 = state[i2][j2]\n \n new_state = []\n for row in range(len(state)): \n new_row = []\n for column in range(len(state[row])): \n if row == i1 and column == j1: \n new_row.append(value2)\n elif row == i2 and column == j2:\n new_row.append(value1)\n else: \n new_row.append(state[row][column])\n new_state.append(tuple(new_row))\n return tuple(new_state)", "def int_flip_mutation(individual):\n for i in range(len(individual.genome)):\n if random.random() < MUTATION_PROBABILITY:\n individual.genome[i] = random.randint(0, CODON_SIZE)\n return individual", "def attemptswap(swap_method, replicas):\n N = len(replicas)\n # Attempt a swap between replicas\n if swap_method == 'random pair':\n # pick pair at random\n r = random()\n i = min(int(r * N), (N - 1))\n j = i\n while j == i:\n s = random()\n j = min(int(s * N), (N - 1))\n # make sure j > i (needed for the swap criterion below)\n if j < i:\n tmp = i\n i = j\n j = tmp\n \n elif swap_method == 'neighbors': \n # pick neighboring pair at random \n r = random()\n i = min(int(r * (N - 1)), (N - 2))\n j = i + 1\n \n else:\n print 'Swap method', swap_method, 'unknown.'\n \n randnum = random()\n\n ### if proposing i-->j, \n ### \n ### del = d(beta_j - beta_i) * d(E_j - E_i)\n ###\n ### (see Hansmann 1997)\n\n boltzfactor = _compute_boltz_factor(replicas[i], replicas[j])\n\n if randnum < boltzfactor:\n\n # swap the ***temperatures***\n temp_i = replicas[i].mc.temp\n temp_j = replicas[j].mc.temp\n tempfromrep_i = replicas[i].mc.tempfromrep\n tempfromrep_j = replicas[j].mc.tempfromrep\n replicas[i].mc.temp = temp_j\n replicas[j].mc.temp = temp_i\n replicas[i].mc.tempfromrep = tempfromrep_j\n replicas[j].mc.tempfromrep = tempfromrep_i\n \n swap_success = True\n \n else:\n swap_success = False\n\n return i, j, swap_success", "def shuffle(self):\n\t\t\trandom.seed(231)\n\t\t\trandom.shuffle(self.Ind)\n\t\t\tself.Ind = self.Ind[:int(len(self.Ind)/5)*5].reshape((self.cv_iters, -1))\n\t\t\t#index of valication set\n\t\t\tself.CVindex = 1\n\t\t\tself.Testindex = 0", "def test_swip_swap():\n print('Testing swip_swap')\n\n # Cases given to test this problem\n assert_equals('offbar', hw1.swip_swap('foobar', 'f', 'o'))\n assert_equals('foocar', hw1.swip_swap('foobar', 'b', 'c'))\n assert_equals('foobar', hw1.swip_swap('foobar', 'z', 'c'))\n\n # Additional cases to test this problem\n assert_equals('nesnitg', hw1.swip_swap('testing', 't', 'n'))\n assert_equals('nap', hw1.swip_swap('nap', 'f', 'g'))", "def _fuzz(self):\r\n\r\n # we can calculate which bytes to swap based on the number of tries\r\n # we've made on this seedfile\r\n a = self.sf.tries\r\n b = a + 1\r\n\r\n if b >= len(self.input):\r\n raise FuzzerExhaustedError('Iteration exceeds seed file length')\r\n\r\n logger.debug('%s - swap bytes %d <-> %d', self.sf.basename, a, b)\r\n self.input[b], self.input[a] = self.input[a], self.input[b]\r\n self.output = self.input", "def _move_range_mirror(self, range_len):\n #start1 = randint(range_len, len(self.state) - range_len)\n start = randint(0, len(self.state) - range_len)\n #range_list = choice([[start1, start1 - range_len], [start2, start2 + range_len]])\n end = start + range_len\n\n copy_state = self.state[start:end]\n copy_state.reverse()\n self.state[start:end] = copy_state\n\n for wizard in self.state[start:end]:\n self.wiz_to_pos[wizard] = self.state.index(wizard)", "def _next_generation(self, ranks):\n replace = ranks[:int(self.population_size * self.culling)]\n for idx in replace:\n self.population[idx] = self._create_offspring()", "def _evolve_swap_element(self, state, element):\n new_state = PhotonicState()\n for in_modes, amp in state.items():\n out_modes = []\n for in_mode in in_modes:\n offset_in_mode = in_mode - element.offset \n if offset_in_mode in element.in_modes:\n index = element.in_modes.index(offset_in_mode)\n out_mode = element.out_modes[index] + element.offset\n else:\n out_mode = in_mode \n out_modes.append(out_mode)\n new_state[tuple(sorted(out_modes))] = amp \n return new_state", "def InversionMutation(item):\n item=copy.deepcopy(item)\n countryNo = len(item)\n [start,end] = sorted(random.sample(range(1,countryNo+1),2))\n item[start:end] = reversed(item[start:end])\n return item", "def unspool(X):\n # Size of (B,M,18)\n ranks = X[:,:,::2]\n suits = X[:,:,1::2]\n hand_ranks = ranks[:,:,:4]\n hand_suits = suits[:,:,:4]\n board_ranks = ranks[:,:,4:]\n board_suits = suits[:,:,4:]\n # sort by suit\n hand_suit_index = torch.argsort(hand_suits)\n board_suit_index = torch.argsort(board_suits)\n hand_ranks = torch.gather(hand_ranks,-1,hand_suit_index)\n hand_suits = torch.gather(hand_suits,-1,hand_suit_index)\n board_ranks = torch.gather(board_ranks,-1,board_suit_index)\n board_suits = torch.gather(board_suits,-1,board_suit_index)\n # sort by rank\n hand_index = torch.argsort(hand_ranks)\n board_index = torch.argsort(board_ranks)\n ranks = torch.cat((torch.gather(hand_ranks,-1,hand_index),torch.gather(board_ranks,-1,board_index)),dim=-1).long()\n suits = torch.cat((torch.gather(hand_suits,-1,hand_index),torch.gather(board_suits,-1,board_index)),dim=-1).long()\n sequence_ranks = ranks[:,:,UNSPOOL_INDEX]\n sequence_suits = suits[:,:,UNSPOOL_INDEX]\n # sequence_suits = swap_batch_suits(sequence_suits)\n return sequence_ranks,sequence_suits", "def ScrambleMutation(item):\n item=copy.deepcopy(item)\n countryNo = len(item)\n [start,end] = sorted(random.sample(range(1,countryNo+1),2))\n shuffle_slice(item,start,end)\n return item", "def swap_suits(cards):\n cards_need_swap = cards\n new_suit = 5\n while cards_need_swap.shape[0] > 0:\n suit = cards_need_swap[0,1]\n cards[cards[:,1] == suit, 1] = new_suit\n new_suit += 1\n cards_need_swap = cards[cards[:,1] < 5]\n cards[:,1] = cards[:,1] - 4\n return cards", "def topsort_lat(lat, random_shift=False, max_state=None):\n\n V = {arc[STATE_FROM] for arc in lat} | {arc[STATE_TO] for arc in lat}\n A = {i: set() for i in V}\n for arc in lat:\n A[arc[STATE_TO]].add(arc[STATE_FROM])\n newid2oldid = [0]\n while len(newid2oldid) <= len(V):\n vs = [i for i, v in A.items() if len(v) == 0]\n if len(vs) == 0:\n print(f\"Lat: {lat}\")\n print(f\"V: {V}\")\n print(f\"A: {A}\")\n print(f\"newid2oldid: {newid2oldid}\")\n raise RuntimeError(f\"Topsort error.\")\n i = np.random.choice(vs)\n A.pop(i)\n newid2oldid.append(i)\n for a in A.values():\n a.discard(i)\n old2new = {i_old: i_new for i_new, i_old in enumerate(newid2oldid)}\n if random_shift:\n shift=0\n max_shift = max_state - len(old2new)\n max_step = max_state // len(old2new)\n for k,v in old2new.items():\n if v == 0 or v == 1:\n continue\n new_shift = random.randint(0, min(max_step, max_shift))\n shift += new_shift\n max_shift -= new_shift\n old2new[k] += shift\n\n sorted_lat = np.array([(arc[0], old2new[arc[1]], old2new[arc[2]]) for arc in lat])\n return sorted_lat", "def update(self):\n self.chromosome_list = self.next_population\n self.reset_mating_pool()\n self.reset_next_population()", "def generational_replacement(new_pop, individuals):\n individuals.sort(reverse=True)\n for ind in individuals[:ELITE_SIZE]:\n new_pop.append(copy.copy(ind))\n new_pop.sort(reverse=True)\n return new_pop[:GENERATION_SIZE]", "def randomize_in_place(list1, list2, init=0):\n np.random.seed(seed=init)\n np.random.shuffle(list1)\n np.random.seed(seed=init)\n np.random.shuffle(list2)", "def replacement(old_pop, new_pop, mode=\"delete-all\", n=None,\n based_on_fitness=True, fitness_old=[], fitness_new=[]):\n\n if mode == \"delete-all\":\n if fitness_old == []:\n raise ValueError(\"[!] 'fitness_old' has to be filled!\")\n\n population = np.empty(old_pop.shape)\n\n # take over all members of new population\n for i in range(len(new_pop)):\n population[i] = new_pop[i]\n\n # list of members sorted from best to worse\n sorted_members = [member for _, member in sorted(zip(fitness_old, old_pop), key=lambda pair: -pair[0])][:n]\n\n # fill up rest with best of old population\n for i in range(len(new_pop), len(old_pop)):\n population[i] = sorted_members[i-len(new_pop)]\n\n return population\n\n # if not, check if n was supplied\n if n is None:\n raise Exception(\"[-] Please supply n when using steady-state modes!\")\n\n # generate list for the resulting population, starting as the old_pop\n population = old_pop[:]\n\n # here we generate lists of indx for the old population and values to\n # replace them with from the new population\n\n # for the \"steady-state mode\" ...\n if mode == \"steady-state\" and not based_on_fitness:\n # choose n random indexes from the old_pop\n # replace=False ensures no duplicates\n indx_list = np.random.choice(range(len(old_pop)), size=n, replace=False)\n # and n random values from new_pop\n value_list = np.random.choice(new_pop, size=n, replace=False)\n\n elif mode == \"steady-state\" and based_on_fitness:\n \"\"\"\n check if fitness lsits are defined\n build two correpsonding lists of indx and values to replace\n \"\"\"\n\n if len(fitness_old) != len(old_pop) or len(fitness_new) != len(new_pop):\n raise Exception(\"\"\"[-] Both 'fitness_old' and 'fitness_new' need to be\n the same length as 'old_pop' and 'new_pop'\"\"\")\n # check if sort the right way\n # sort populations, based on their fitness score as sort key\n # (for old population we want the n worst member, thus default sorted\n # [small to big] does just fine, for the new population we want the\n # n best member, thus we include a -)\n # for old population, sort the indexes to be replace\n # for new population, sort the member to replace with\n indx_list = [indx_old for _, indx_old in \\\n sorted(zip(fitness_old, range(len(old_pop))), \\\n key=lambda pair: pair[0])][:n]\n value_list = [member for _, member in \\\n sorted(zip(fitness_new, new_pop), \\\n key=lambda pair: -pair[0])][:n]\n\n # now that we got our lists, simply replace them\n for indx, val in zip(indx_list, value_list):\n population[indx] = val\n\n return population", "def shuffle(self) -> List[int]:\n runs = self.nums.copy()\n # Fisher-Yates Algorithm\n n = len(runs)\n for i in range(n):\n j = random.randint(i, n - 1)\n runs[i], runs[j] = runs[j], runs[i]\n return runs", "def shuffle_pair(x,y):\n xy = list(zip(x,y))\n np.random.shuffle(xy)\n x, y = zip(*xy)\n x = np.array(x)\n y = np.array(y)", "def reshuffle(self):\n place = 0\n saveplace = 0\n\n \"\"\" Now we're going to randomly swap cards to the bottom of the deck until we reach the top \"\"\"\n for counter in reversed(range(self.currentCard + 1, self.deckSize)):\n \n place = self.randomMethod() * (counter + 1 - self.currentCard)\n place = math.floor(place) + self.currentCard\n saveplace = self.shuffledPtrs[counter]\n self.shuffledPtrs[counter] = self.shuffledPtrs[place]\n self.shuffledPtrs[place] = saveplace", "def swap(indiv, Optimizer):\n if 'MU' in Optimizer.debug:\n debug = True\n else:\n debug = False\n Optimizer.output.write('Swap Mutation performed on individual\\n')\n Optimizer.output.write('Index = '+repr(indiv.index)+'\\n')\n if len(indiv[0]) > 4:\n natomsswap=random.randint(1,len(indiv[0])/5)\n else:\n natomsswap=1\n Optimizer.output.write('Number of swaps = '+repr(natomsswap)+'\\n')\n syms=list(set(indiv[0].get_chemical_symbols()))\n if len(syms)<len(Optimizer.atomlist):\n syms=[sym for sym,c,m,u in Optimizer.atomlist]\n if len(syms)==1:\n Optimizer.output.write('WARNING: Swap Mutation attempted on single atom structure system\\n')\n else:\n for i in range(natomsswap):\n if len(indiv[0])>1:\n a1=indiv[0][random.randint(0,indiv[0].get_number_of_atoms()-1)]\n else:\n a1=indiv[0][0]\n osym=a1.symbol\n nsymlist=[sym for sym in syms if sym != osym]\n a1.symbol=random.choice(nsymlist)\n nsym=a1.symbol\n Optimizer.output.write('Swapped '+osym+' atom with '+nsym+'\\n')\n Optimizer.output.write(repr(indiv[0])+'\\n')\n muttype='S'+repr(natomsswap)\n if indiv.energy==0:\n indiv.history_index=indiv.history_index+'m'+muttype\n else:\n indiv.history_index=repr(indiv.index)+'m'+muttype\n return indiv", "def swap_suits(cards):\n cards_need_swap = cards\n new_suit = 5\n while cards_need_swap.shape[0] > 0:\n suit = cards_need_swap[0,1]\n cards[cards[:,1] == suit, 1] = new_suit\n new_suit += 1\n cards_need_swap = cards[cards[:,1] < 5]\n cards[:,1] = cards[:,1] - 4\n return cards", "def shuffle(self) -> List[int]:\n for i in range(len(self.nums) - 1, 0, -1):\n pivot = random.randint(0, i) # 前闭后闭\n self.nums[i], self.nums[pivot] = self.nums[pivot], self.nums[i]\n return self.nums", "def mutation(self):\n\n for r in range(self.pop_num*3, 5): # Mutation.\n for w in range(0,self.length): \n if random.random()<0.2: \n self.par_and_sons[r].A[w] = self.par_and_sons[r].A[w] + np.random.randint(-20, 20) # Offset + -20 pixels.", "def scramble_clause_crossover(ind1, ind2):\n all_clauses = ind1 + ind2\n random.shuffle(all_clauses)\n ind1[0:len(ind1)] = all_clauses[0:len(ind1)]\n ind2[0:len(ind2)] = all_clauses[len(ind1):len(ind1) + len(ind2)]", "def inv_shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][3], state[1][0], state[1][1], state[1][2]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][1], state[3][2], state[3][3], state[3][0]", "def shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][1], state[1][2], state[1][3], state[1][0]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][3], state[3][0], state[3][1], state[3][2]", "def _move_randomly(self):\n a, b = randint(0, len(self.state) - 1), randint(0, len(self.state) - 1)\n wiz1, wiz2 = self.state[a], self.state[b]\n self._swap_wizards(wiz1, wiz2)", "def run_permute(self):\n # CH5+ = [[1,2,3,4,5]] , or for testing [[1,2,3],[4,5]]\n # H5O2+ = [[2,3],[4,5]] [O_left,O_left,H_left,H_left,H_right,H_right,H_center]\n like_atoms = self.technique_kwargs['like_atoms']\n ensemble = self.technique_kwargs['ensemble']\n\n # Get ensemble size\n if ensemble is None:\n walkers = np.tile(self.coord, (self.num_walkers, 1, 1))\n else:\n walkers = ensemble\n\n # For each tuple of like atoms, we will randomly permute them\n for pair in like_atoms:\n cds_to_randomize = walkers[:, pair]\n [np.random.shuffle(x) for x in cds_to_randomize]\n # Assign the stack of permuted atom coordinates to the appropriate place in the walker array\n walkers[:, pair] = cds_to_randomize\n return walkers", "def star_topology(random, population, args):\r\n for _ in range(len(population)):\r\n yield population[:]", "def mutate(ways, multiply):\n mutated=ways[:]\n for way in ways:\n for i in range(multiply):\n shuffle = [random.randrange(len(way)),random.randrange(len(way)-1)]\n shuffle[1] = shuffle[1] if shuffle[1]<shuffle[0] else shuffle[1]+1\n shuffle.sort()\n new_way = way[:shuffle[0]]+[way[shuffle[1]]]+way[shuffle[0]+1:shuffle[1]]+[way[shuffle[0]]]+way[shuffle[1]+1:]\n if new_way not in mutated:\n mutated.append(new_way)\n return mutated", "def generate_final_scoring_tiles(seed=0):\n\n if seed is not 0:\n random.seed(seed)\n all_tiles_list = [1, 2, 3, 4, 5, 6]\n randomized_tiles = list()\n\n for _ in range(2):\n chosen_tile_index = random.randint(0, len(all_tiles_list) - 1)\n randomized_tiles.append(all_tiles_list[chosen_tile_index])\n all_tiles_list.pop(chosen_tile_index)\n\n return tuple(randomized_tiles)", "def swap_up(self, index):\n grid = self.from_grid\n new_grid = []\n for row in grid:\n new_grid.append(list(row))\n \n new_grid[index[0] - 1][index[1]], new_grid[index[0]][index[1]] = \\\n new_grid[index[0]][index[1]], new_grid[index[0] - 1][\n index[1]]\n for i in range(len(grid)):\n new_grid[i] = tuple(new_grid[i])\n\n return MNPuzzle(tuple(new_grid), self.to_grid)", "def shuffle(self):\n x = len(self.org)\n result = self.org[:]\n var = x\n for i in range(x):\n id = random.randrange(0, var)\n result[id], result[var - 1] = result[var - 1], result[id]\n var -= 1\n\n return result", "def shuffle(self):\n self.train_edges = np.random.permutation(self.train_edges)\n self.nodes = np.random.permutation(self.nodes)\n self.batch_num = 0", "def testPreferFewerSwaps(self):\n data = (((1, 'C'), (5, 'B')),\n ((2, 'A'), (6, 'A')),\n ((3, 'C'), (7, 'C')),\n ((4, 'B'), (8, 'D')))\n result = [(1, 5), (2, 7), (3, 6), (4, 8)]\n self.assertEqual(result, self.draw(data))\n return self.draw(data)", "def test_shuffled(self):\n self.setup_flags()\n self.io_args.matches = os.path.join(\n self.io_args.output_root, \"shuffled\", \"matches.json\"\n )\n self._calibration_error_test(\"shuffled\", \"GeometricCalibration\")", "def sample_nets(nets, idxs):\n unique_s = set(idxs)\n\n # leaf routine\n replace_dict = {}\n saw_leaf = False\n we_done = True\n for p in range(len(idxs)):\n s = idxs[p]\n we_done = we_done and (p == s)\n if p not in unique_s:\n saw_leaf = True\n # mutate net\n nets[p].load_state_dict(nets[s].state_dict()) # nets[p] = nets[s]\n\n # change idxs to indicate we have updated this net\n idxs[p] = p\n if s not in replace_dict:\n replace_dict[s] = p\n\n if we_done:\n return\n\n if saw_leaf:\n for i in range(len(idxs)):\n if i == idxs[i]:\n continue\n if idxs[i] in replace_dict:\n idxs[i] = replace_dict[idxs[i]]\n sample_nets(nets, idxs)\n else:\n # cycle routine\n for i in range(len(idxs)):\n if (i == idxs[i]):\n continue\n else:\n tmp_net = copy.deepcopy(nets[i]) # tmp_net = nets[i]\n curr_p = i\n prev_p = idxs[i]\n while prev_p != i:\n nets[curr_p].load_state_dict(nets[prev_p].state_dict()) # nets[curr_p] = nets[prev_p]\n\n idxs[curr_p] = curr_p\n curr_p = prev_p\n prev_p = idxs[curr_p]\n nets[curr_p].load_state_dict(tmp_net.state_dict()) # nets[curr_p] = tmp_net\n idxs[curr_p] = curr_p", "def swap(a, b, state, target):\n # a = random.randrange(0, 200, 2)\n # b = random.randrange(0, 200, 2)\n new_state = ''\n for i in range(len(state)):\n if i == a:\n new_state += state[b]\n continue\n if i == b:\n new_state += state[a]\n continue\n new_state += state[i]\n # print(new_state)\n res = solving(int(new_state[0]), int(new_state[2]), new_state[1])\n for i in range(2, len(new_state) - 2, 2):\n res = solving(res, int(new_state[i + 2]), new_state[i + 1])\n # print(\"Distance from target: \", target - res)\n return new_state, abs(target - res)", "def nsga_replacement(random, population, parents, offspring, args):\n survivors = []\n combined = list(population)\n combined.extend(offspring)\n \n # Perform the non-dominated sorting to determine the fronts.\n fronts = []\n pop = set(range(len(combined)))\n while len(pop) > 0:\n front = []\n for p in pop:\n dominated = False\n for q in pop:\n if combined[p] < combined[q]:\n dominated = True\n break\n if not dominated:\n front.append(p)\n fronts.append([dict(individual=combined[f], index=f) for f in front])\n pop = pop - set(front)\n \n # Go through each front and add all the elements until doing so\n # would put you above the population limit. At that point, fall\n # back to the crowding distance to determine who to put into the\n # next population. Individuals with higher crowding distances\n # (i.e., more distance between neighbors) are preferred.\n for i, front in enumerate(fronts):\n if len(survivors) + len(front) > len(population):\n # Determine the crowding distance.\n distance = [0 for _ in range(len(combined))]\n individuals = list(front)\n num_individuals = len(individuals)\n num_objectives = len(individuals[0]['individual'].fitness)\n for obj in range(num_objectives):\n individuals.sort(key=lambda x: x['individual'].fitness[obj])\n distance[individuals[0]['index']] = float('inf')\n distance[individuals[-1]['index']] = float('inf')\n for i in range(1, num_individuals-1):\n distance[individuals[i]['index']] = (distance[individuals[i]['index']] + \n (individuals[i+1]['individual'].fitness[obj] - \n individuals[i-1]['individual'].fitness[obj]))\n \n crowd = [dict(dist=distance[f['index']], index=f['index']) for f in front]\n crowd.sort(key=lambda x: x['dist'], reverse=True)\n last_rank = [combined[c['index']] for c in crowd]\n r = 0\n num_added = 0\n num_left_to_add = len(population) - len(survivors)\n while r < len(last_rank) and num_added < num_left_to_add:\n if last_rank[r] not in survivors:\n survivors.append(last_rank[r])\n num_added += 1\n r += 1\n # If we've filled out our survivor list, then stop.\n # Otherwise, process the next front in the list.\n if len(survivors) == len(population):\n break\n else:\n for f in front:\n if f['individual'] not in survivors:\n survivors.append(f['individual'])\n return survivors", "def shuffle(self):\n new_nums = self.nums[:]\n n = len(new_nums)\n import random\n for i in range(n):\n rand_num = random.randint(0, n - 1)\n # Swap nums[i] with nums[randint]\n temp = new_nums[i]\n new_nums[i] = new_nums[rand_num]\n new_nums[rand_num] = temp\n return new_nums", "def shuffle(self):\n perm = self.rng.permutation(self.inputs.shape[0])\n self._current_order = self._current_order[perm]\n self.inputs = self.inputs[perm]\n self.targets = self.targets[perm]\n self.target_ids = self.target_ids[perm]", "def randomize_data(arr_a, arr_b):\n for ix in range(0, len(arr_a)-1):\n j = random.randint(ix+1, len(arr_a)-1)\n swap(ix, j, arr_a, arr_b)", "def swap_mutate(\n self, crossover_pop_dict, test=False, mutation_prob={}, random_aas=''\n ):\n\n print('Performing mutations')\n\n # Initialises dictionary of mutated child networks\n mutated_pop_dict = OrderedDict()\n\n # Mutates the amino acid identities of randomly selected nodes\n for network_num in list(crossover_pop_dict.keys()):\n G = copy.deepcopy(crossover_pop_dict[network_num])\n\n for node in list(G.nodes):\n if G.nodes()[node]['type'] == 'loop':\n continue\n\n if test is False:\n random_number = random.uniform(0, 1)\n else:\n random_number = mutation_prob[network_num][node]\n if random_number <= self.mutation_prob:\n if test is False:\n orig_aa = G.nodes()[node]['aa_id']\n poss_aas = copy.deepcopy(self.aa_list)\n poss_aas.remove(orig_aa)\n new_aa = poss_aas[random.randint(0, (len(poss_aas)-1))]\n else:\n new_aa = random_aas[0]\n random_aas = random_aas[1:]\n\n nx.set_node_attributes(G, values={node: {'aa_id': new_aa}})\n\n mutated_pop_dict[network_num] = G\n\n return mutated_pop_dict", "def migrate(self, display_on):\n from_0 = random.sample(self.env_list[0].tako_list,\n int(self.migration_rate * len(\n self.env_list[0].tako_list)))\n from_1 = random.sample(self.env_list[1].tako_list,\n int(self.migration_rate*len(\n self.env_list[1].tako_list)))\n for t in from_0:\n self.env_list[0].tako_list.remove(t)\n self.env_list[1].garden_map[t.y][t.x] = Dirt(display_on,\n t.x, t.y)\n self.env_list[1].add_creature(t)\n for t in from_1:\n self.env_list[1].tako_list.remove(t)\n self.env_list[0].garden_map[t.y][t.x] = Dirt(display_on,\n t.x, t.y)\n self.env_list[0].add_creature(t)", "def transition_function(grid, neighbourstates, neighbourcounts, decay_grid,\n water_decay_grid):\n\n global water_counter\n global ignition_grid\n neighbourstates = np.array(neighbourstates)\n init_grid = initial_grid.astype(int)\n ig_grid = np.array(ignition_grid)\n windspeed_ignition_modifiers = wind_speed_rvalue(\"NE\", 10)\n new_ig_grid = []\n for i, row in enumerate(grid):\n new_ig_grid.append([\n ignite(cell, neighbourstates[:, i, j],\n windspeed_ignition_modifiers) for j, cell in enumerate(row)\n ])\n new_ig_grid = np.array(new_ig_grid)\n started_to_burn = []\n for i, row in enumerate(grid):\n started_to_burn.append([\n started_burning(cell, ig_grid[i, j], new_ig_grid[i, j])\n for j, cell in enumerate(row)\n ])\n grid[started_to_burn] = START_BURN\n ig_grid = np.add(new_ig_grid, ig_grid)\n full_burn = []\n for i, row in enumerate(grid):\n full_burn.append([\n fully_burning(cell, ig_grid[i, j], decay_grid[i, j])\n for j, cell in enumerate(row)\n ])\n grid[full_burn] = BURNING\n end_burning = []\n for i, row in enumerate(grid):\n end_burning.append([\n ending_burn(cell, decay_grid[i, j], decay_values[int(\n initial_grid[i, j])]) for j, cell in enumerate(row)\n ])\n grid[end_burning] = END_BURN\n decay_grid[(grid == BURNING) | (grid == END_BURN)] -= 1\n burnt_out = (decay_grid == 0) # find those which have decayed to 0\n grid[(decay_grid == 0\n )] = BURNT #set all that have decayed to zero to BURNT(7)\n water_counter += 1\n\n if (water_counter == 100):\n grid[120:160, 80:120] = initial_grid[120:160, 80:120]\n water_decay_grid[(grid != LAKE)] -= 1 # take one off their decay value\n grid[(water_decay_grid == 0)] = BURNT # switch their state to 5\n ignition_grid = ig_grid\n return grid", "def lr_flip(self):\n for g in self.grid:\n g.reverse()", "def temp_swap_dice(self, move, new_faces):\n assert len(move) == len(new_faces)\n output = list(self.dice)\n for face in move:\n output.remove(face)\n for face in new_faces:\n output.append(face)\n return tuple(sorted(output))", "def _swap2(self, cids, iids):\n # The coupling indexes of the two legs to swap\n c1, c2 = cids\n # The index of the two legs to swap within the given coupling\n i1, i2 = iids\n assert c1 != c2\n\n # Get the connecting coupling between the two couplings\n cnx = self.get_couplingnetwork().to_undirected(as_view=True)\n ci = set(nx.common_neighbors(cnx, *[self.coupling[ii] for ii in cids]))\n if len(ci) != 1:\n raise ValueError(f'cids: {cids} have {len(ci)} common neighbors')\n ci = self.coupling.index(ci.pop())\n\n # internal legs\n l1 = cnx.edges[self.coupling[c1], self.coupling[ci], 0]['leg']\n l2 = cnx.edges[self.coupling[c2], self.coupling[ci], 0]['leg']\n\n # index of the internal leg in c1 and c2\n legs = self.get_legs()\n il1, il2 = [[legs[x].index(ll) for x in (y, ci)]\n for y, ll in zip(cids, (l1, l2))]\n\n assert il1[0] != i1 and il2[0] != i2\n assert il1[1] != il2[1]\n # Check that the flow is consistent along the internal bond\n assert self.coupling[c1][il1[0]][1] is not self.coupling[ci][il1[1]][1]\n assert self.coupling[c2][il2[0]][1] is not self.coupling[ci][il2[1]][1]\n\n def permute_key(key):\n copy = list(list(k) for k in key)\n copy[c1][i1], copy[c2][i2] = copy[c2][i2], copy[c1][i1]\n return copy\n f1, f2, fi = ([x[1] for x in self.coupling[c]] for c in (c1, c2, ci))\n self._coupling = tuple(tuple(c) for c in permute_key(self.coupling))\n\n # All good interal symmetry sectors in for the swapped 1st coupling\n nkeys = set(tuple(tuple(e) for e in permute_key(k)) for k in self)\n c1set = {}\n r11, r12 = set(range(3)).difference([il1[0]])\n for k in set(key[c1] for key in nkeys):\n kn = (k[r11], k[r12])\n if kn not in c1set:\n c1set[kn] = set(\n sls.allowed_couplings(k, f1, il1[0], self.symmetries))\n c2set = {}\n r21, r22 = set(range(3)).difference([il2[0]])\n for k in set(key[c2] for key in nkeys):\n kn = (k[r21], k[r22])\n if kn not in c2set:\n c2set[kn] = set(\n sls.allowed_couplings(k, f2, il2[0], self.symmetries))\n\n vac = sls.vacuumIrrep(self.symmetries)\n Z1 = set().union(*c1set.values())\n Z2 = set().union(*c2set.values())\n rf = set(range(3)).difference([il1[1], il2[1]]).pop()\n fit = [fi[rf], fi[il1[1]], fi[il2[1]]]\n oks = {(k1, k2): set(sls.allowed_couplings((vac, k1, k2),\n fit, 0, self.symmetries))\n for k1, k2 in itertools.product(Z1, Z2)}\n\n def mappingf(okey):\n nk = permute_key(okey)\n set1 = c1set[(nk[c1][r11], nk[c1][r12])]\n set2 = c2set[(nk[c2][r21], nk[c2][r22])]\n for kk1 in set1:\n for kk2 in set2:\n if nk[ci][rf] not in oks[(kk1, kk2)]:\n continue\n\n # Assign the key of the internal leg\n nk[c1][il1[0]], nk[ci][il1[1]] = kk1, kk1\n nk[c2][il2[0]], nk[ci][il2[1]] = kk2, kk2\n yield tuple(tuple(e) for e in nk)\n\n prefdict = sls._prefswap2(iids, il1, il2, f1, f2, fi)\n\n def prefactorf(okey, nkey):\n flokey = [list(x) for x in\n zip(*[el for j in (c1, c2, ci) for el in okey[j]])]\n flnkey = [list(x) for x in\n zip(*[el for j in (c1, c2, ci) for el in nkey[j]])]\n return np.prod([prefdict.get(ss, lambda x, y: 1.)(o, n) for\n o, n, ss in zip(flokey, flnkey, self.symmetries)])\n\n self._manipulate_coupling(mappingf, prefactorf)\n return self", "def stitch(tiles, dest):\n pass", "def swap_down(self, index):\n grid = self.from_grid\n new_grid = []\n for row in grid:\n new_grid.append(list(row))\n \n new_grid[index[0] + 1][index[1]], new_grid[index[0]][index[1]] = \\\n new_grid[index[0]][index[1]], new_grid[index[0] + 1][index[1]]\n for i in range(len(grid)):\n new_grid[i] = tuple(new_grid[i])\n\n return MNPuzzle(tuple(new_grid), self.to_grid)", "def _swap1(self, cids, iids):\n # The coupling indexes of the two legs to swap\n c1, c2 = cids\n # The index of the two legs to swap within the given coupling\n i1, i2 = iids\n assert c1 != c2\n\n # Get the connecting leg between the two couplings\n legs = self.get_legs()\n intersect = set(legs[c1]).intersection(set(legs[c2]))\n\n assert len(intersect) == 1 # Only one internal leg between couplings\n ileg = intersect.pop()\n # index of the internal leg in c1 and c2\n ii = [legs[cid].index(ileg) for cid in cids]\n\n assert ii[0] != i1 and ii[1] != i2\n # Check that the flow is consistent along the internal bond\n assert self.coupling[c1][ii[0]][1] is not self.coupling[c2][ii[1]][1]\n\n # Order such that first bond is in the one with out\n if self.coupling[c1][ii[0]][1]:\n c1, c2, i1, i2, ii = c2, c1, i2, i1, (ii[1], ii[0])\n assert not self.coupling[c1][ii[0]][1] and self.coupling[c2][ii[1]][1]\n\n def permute_key(key):\n copy = list(list(k) for k in key)\n copy[c1][i1], copy[c2][i2] = copy[c2][i2], copy[c1][i1]\n return copy\n self._coupling = tuple(tuple(c) for c in permute_key(self.coupling))\n f1, f2 = ([x[1] for x in self.coupling[c]] for c in (c1, c2))\n\n def mappingf(okey):\n nk = permute_key(okey)\n # All good interal symmetry sectors in for the swapped 1st coupling\n for k in sls.allowed_couplings(nk[c1], f1, ii[0], self.symmetries):\n # Assign the key of the internal leg\n nk[c1][ii[0]], nk[c2][ii[1]] = k, k\n if sls.is_allowed_coupling(nk[c2], f2, self.symmetries):\n yield tuple(tuple(e) for e in nk)\n\n prefdict = sls._prefswap1((i1, i2), ii)\n\n def prefactorf(okey, nkey):\n return np.prod([prefdict.get(ss, lambda x, y: 1.)(\n [el[i] for j in (c1, c2) for el in okey[j]],\n [el[i] for j in (c1, c2) for el in nkey[j]]\n ) for i, ss in enumerate(self.symmetries)])\n\n self._manipulate_coupling(mappingf, prefactorf)\n return self", "def run(self, iterations):\n # print(f'Before:\\n {self.population}\\n')\n # self.best()\n # print(f'Best Genome before: {self.best_genome.array}, fitness={self.best_genome.fitness} ')\n\n mutator = Rand1MutationOperator(self.population, self.bounds, 0.2)\n mixer = ExponentialCrossoverOperator(self.minfun)\n replacer = ElitistReplacementOperator()\n\n for _ in range(iterations):\n candidate_population = Population(None, None, 0)\n for target in self.population.collection:\n # List with genomes who will be the donors\n mutant = mutator.apply(target)\n # Genome modified by replacing a few random positions\n candidate_genome = mixer.apply(target, mutant)\n\n candidate_population.add(candidate_genome)\n\n # Targets are replaced by candidates from the population if candidate has less fitness than target\n self.population = replacer.apply(self.population, candidate_population)\n\n # print(f'After:\\n {self.population}\\n')\n # self.best()\n # print(f'Best Genome after: {self.best_genome.array}, fitness={self.best_genome.fitness} ')", "def _apply_mutation(pop, op, pb):\n for i in range(len(pop)):\n if random.random() < pb:\n pop[i], = op(pop[i])\n del pop[i].fitness.values\n return pop", "def shuffle(self):\n perm = self.rng.permutation(self.inputs.shape[0])\n self._current_order = self._current_order[perm]\n self.inputs = self.inputs[perm]\n self.targets = self.targets[perm]" ]
[ "0.68163073", "0.6413612", "0.6349531", "0.61725616", "0.6144668", "0.59786785", "0.59766847", "0.5838693", "0.58274037", "0.58169585", "0.58069676", "0.58006257", "0.5762604", "0.57593423", "0.57574344", "0.571842", "0.56880665", "0.5660161", "0.5643648", "0.563159", "0.5629687", "0.56173265", "0.5615884", "0.558851", "0.55857384", "0.5568997", "0.5559965", "0.5548154", "0.55451417", "0.55230445", "0.55226105", "0.55224985", "0.5457535", "0.5448625", "0.5445214", "0.54445213", "0.5443074", "0.5437033", "0.54363924", "0.54348373", "0.5430084", "0.5421212", "0.54057586", "0.5403594", "0.5399192", "0.5392612", "0.5382001", "0.5360835", "0.5360459", "0.5352578", "0.53428286", "0.53420585", "0.5337131", "0.5320624", "0.53197014", "0.5318278", "0.5317551", "0.53164893", "0.5316284", "0.5307576", "0.52922195", "0.52777225", "0.5277244", "0.5276182", "0.5275097", "0.52719945", "0.5260964", "0.5259794", "0.5257201", "0.5251383", "0.525115", "0.5239382", "0.52351755", "0.5229717", "0.52268285", "0.52162594", "0.5210762", "0.5210125", "0.5208041", "0.52048796", "0.5201999", "0.5200576", "0.518906", "0.5187908", "0.51868767", "0.5180576", "0.51787007", "0.5170023", "0.5169996", "0.516834", "0.516586", "0.51630455", "0.51567346", "0.51548725", "0.5153162", "0.5152705", "0.5149954", "0.5148146", "0.5146909", "0.5142823" ]
0.73018396
0
Tests that the fitness of individuals is less than or equal to the specified error tolerance
def test_for_convergence(self, error_tol): list_of_best_indvs = [] for island in self._islands: best_indv = island.best_individual() list_of_best_indvs.append(best_indv) list_of_best_indvs.sort(key=lambda x: x.fitness) best_indv = list_of_best_indvs[0] converged = best_indv.fitness <= error_tol self._best_indv = best_indv self._converged = converged return converged
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _test_expected_error_bound(bellman_error_margin, optimal_res, test_res, mdp):\n test_eval = test_res.policy.evaluate_on(mdp)\n test_occ = test_eval.state_occupancy #occupancy from start state\n test_start_steps = sum(test_occ.values())\n value_diff = test_res.initial_value - optimal_res.initial_value\n within_bound = 0 <= value_diff <= bellman_error_margin*test_start_steps\n if not within_bound:\n raise OutOfExpectedErrorBound\n return bellman_error_margin*test_start_steps", "def terminate(fitness, tolerance):\n for i in fitness:\n if abs((2**(1.0 / 2)) - i) < tolerance:\n return True\n return False", "def fitness(NN):\n x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])\n expected_y = np.array([[0], [1], [1], [0]])\n y = NN.feed_forward(x)\n error = expected_y - y\n return 1 / (np.square(np.dot(error.T, error)).squeeze() + 0.01)", "def test_run_simulations_and_get_percentile_allele_length_2():\n simulator = genotype_confidence_simulator.GenotypeConfidenceSimulator(\n 50, 300, 0.01, allele_length=2, iterations=5\n )\n simulator.run_simulations()\n expected_confidence_scores_percentiles = {\n 193: 20.0,\n 221: 40.0,\n 271: 60.0,\n 278: 80.0,\n 303: 100.0\n }\n assert (\n simulator.confidence_scores_percentiles\n == expected_confidence_scores_percentiles\n )\n assert simulator.get_percentile(193) == 20.00\n assert simulator.get_percentile(221) == 40.00\n # Try getting number that is not in the dict and will have to be inferred\n assert simulator.get_percentile(207) == 30.0\n # Try values outside the range of what we already have\n simulator.get_percentile(192) == 0.00\n simulator.get_percentile(191) == 0.00\n simulator.get_percentile(304) == 100.00\n simulator.get_percentile(305) == 100.00", "def test_run_simulations_and_get_percentile_allele_length_1():\n simulator = genotype_confidence_simulator.GenotypeConfidenceSimulator(\n 50, 300, 0.01, iterations=5\n )\n simulator.run_simulations()\n expected_confidence_scores_percentiles = {\n 193: 20.0,\n 221: 40.0,\n 271: 60.0,\n 278: 80.0,\n 303: 100.0\n }\n assert (\n simulator.confidence_scores_percentiles\n == expected_confidence_scores_percentiles\n )\n assert simulator.get_percentile(193) == 20.00\n assert simulator.get_percentile(221) == 40.00\n # Try getting number that is not in the dict and will have to be inferred\n assert simulator.get_percentile(207) == 30.0\n # Try values outside the range of what we already have\n simulator.get_percentile(192) == 0.00\n simulator.get_percentile(191) == 0.00\n simulator.get_percentile(304) == 100.00\n simulator.get_percentile(305) == 100.00", "def average_fitness_termination(population, num_generations, num_evaluations, args):\r\n tolerance = args.setdefault('tolerance', 0.001)\r\n avg_fit = sum([x.fitness for x in population]) / float(len(population))\r\n best_fit = max([x.fitness for x in population])\r\n return (best_fit - avg_fit) < tolerance", "def compute_errors(gt, pred):\n thresh = np.maximum((gt / pred), (pred / gt))\n a1 = (thresh < 1.25 ).mean()\n a2 = (thresh < 1.25 ** 2).mean()\n a3 = (thresh < 1.25 ** 3).mean()\n\n rmse = (gt - pred) ** 2\n rmse = np.sqrt(rmse.mean())\n\n rmse_log = (np.log(gt) - np.log(pred)) ** 2\n rmse_log = np.sqrt(rmse_log.mean())\n\n abs_rel = np.mean(np.abs(gt - pred) / gt)\n\n sq_rel = np.mean(((gt - pred) ** 2) / gt)\n\n return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3", "def is_error(ranking, references):\n return 1 if average_precision(ranking, references) < 1 else 0", "def fitness(individual, divider, target_sum, target_multiply):\n\n sum_val = reduce(operator.add, individual[:divider], 0)\n multiply_val = reduce(operator.mul, individual[divider:], 1)\n \n sum_error = abs(target_sum - sum_val)\n sum_error = sum_error / target_sum\n\n multiply_error = abs(target_multiply - multiply_val)\n multiply_error = multiply_error / target_multiply\n\n #print(multiply_error, sum_error)\n #print(sum_error, multiply_error)\n return (multiply_error + sum_error)/2 * 100", "def expected_error(noise_param, states):\n\n from math import comb\n import preferences\n\n comparison_errors = [\n preferences.comparison_error(\n state / states,\n noise_param\n )\n for state in range(1, states)\n ]\n\n n_choose_2 = comb(states, 2)\n\n expected_error = 0.0\n for i, p in enumerate([(states - x)/n_choose_2 for x in range(1, states)]):\n expected_error += p * comparison_errors[i]\n\n return round(expected_error, 3)", "def compute_errors(gt, pred, selector):\n gt = gt[selector]\n pred = pred[selector]\n\n thresh = np.maximum((gt / pred), (pred / gt))\n a1 = (thresh < 1.25 ).mean()\n a2 = (thresh < 1.25 ** 2).mean()\n a3 = (thresh < 1.25 ** 3).mean()\n\n rmse = (gt - pred) ** 2\n rmse = np.sqrt(rmse.mean())\n\n rmse_log = (np.log(gt) - np.log(pred)) ** 2\n rmse_log = np.sqrt(rmse_log.mean())\n\n abs_rel = np.mean(np.abs(gt - pred) / gt)\n\n sq_rel = np.mean(((gt - pred) ** 2) / gt)\n\n return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3", "def c_test_fitness_function(self, function):\r\n return 1", "def c_test_fitness_function(self, function):\r\n return 1", "def derr(min, max):\n return lambda mate: min <= mate['d_err'] <= max", "def continuous_mse(individual, test_data, truth_data, name=None):\r\n test_data = np.array(test_data)\r\n truth_data = np.array(truth_data)\r\n error = 0\r\n\r\n for test_item, truth_item in zip(test_data, truth_data):\r\n error += np.sqrt(np.mean(np.square(truth_item - test_item)))\r\n\r\n return error", "def gss(self,LB,UB,tol,itr):\n \n GoldenRatio = (math.sqrt(5) + 1) / 2\n \n iterations=0\n gss=[]\n gss_x=[LB,UB]\n \n c = UB - (UB - LB) / GoldenRatio\n d = LB + (UB - LB) / GoldenRatio\n while abs(UB - LB) > tol and iterations < itr:\n if self.Func(c) < self.Func(d):\n UB = d\n gss_x.append(UB)\n iterations+=1\n else:\n LB = c\n \n gss_x.append(LB)\n iterations+=1\n c = UB - (UB - LB) / GoldenRatio\n d = LB + (UB - LB) / GoldenRatio\n \n \n #print(\" best at %.15f\"% ((UB + LB)/2) , \"itr = \",iterations)\n gss.append(gss_x)\n gss.append((LB+UB)/2)\n gss.append(iterations)\n \n return gss", "def test_get_wrf_fitness():\n if [on_aws, on_cheyenne, on_magma].count(True) is 0:\n print('\\n!!!Not running test_wrf_era5_diff -- switch to Magma, Cheyenne, or AWS!!!')\n return\n fitness, ghi_mean_error, wpd_mean_error, runtime = get_wrf_fitness(param_ids, start_date, end_date, verbose=True)\n assert fitness >= 0\n assert ghi_mean_error >= 0\n assert wpd_mean_error >= 0\n assert type(runtime) is str", "def Catch(X,Tolerance=0):\n if X < (.5-(Tolerance/2)):\n return(0)\n elif X > (.5+(Tolerance/2)):\n return(1)\n else:\n return(.5)", "def test_ge():\n # Test for greater than or equal to special method with scalar Rnode object and float value\n x = Rnode(2.0)\n try:\n assert (x >= 3) == False\n assert (x >= 1) == True\n except AssertionError as e:\n print(e)\n raise AssertionError\n\n # Test for greater than or equal to special method with two scalar Rnode object\n a = Rnode(2.0)\n b = Rnode(2.0)\n c = Rnode(1.0)\n d = Rnode(1.0)\n try:\n assert (a >= b) == True\n assert (a >= c) == True\n assert (d >= a) == False\n except AssertionError as e:\n print(e)\n raise AssertionError", "def max_over_prediction_error(individual, test_data, truth_data, name=None, tolerance=0):\r\n test_data = np.array(test_data)\r\n truth_data = np.array(truth_data)\r\n over_predicted_inds = get_over_predicted_inds(test_data,\r\n truth_data,\r\n tolerance)\r\n if len(over_predicted_inds) == 0:\r\n return np.nan\r\n test_subset = test_data[over_predicted_inds]\r\n truth_subset = truth_data[over_predicted_inds]\r\n return np.max(test_subset - truth_subset)", "def test_error_curve(self):\r\n\r\n scores = self.test_confidences\r\n sort = numpy.argsort(scores, axis=0)\r\n sorted_scores = scores[sort]\r\n\r\n test_errors = numpy.zeros((scores.shape[0]))\r\n thresholds = numpy.zeros((scores.shape[0]))\r\n\r\n for i in range(sort.shape[0]):\r\n thresholds[i] = sorted_scores[i]\r\n test_errors[i] = numpy.sum(self.test_errors[self.test_confidences >= thresholds[i]]) / float(numpy.sum(self.test_confidences >= thresholds[i]))\r\n\r\n return test_errors, thresholds", "def test_bisection_system(testFunctions,tol, printFlag):\n pass", "def average_precent_error_over(individual, test_data, truth_data, name=None, tolerance=0):\r\n test_data = np.array(test_data)\r\n truth_data = np.array(truth_data)\r\n over_predicted_inds = get_over_predicted_inds(test_data,\r\n truth_data,\r\n tolerance)\r\n test_subset = test_data[over_predicted_inds]\r\n truth_subset = truth_data[over_predicted_inds]\r\n return average_percent_error(individual, test_subset, truth_subset)", "def test_error_at_99tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.99))", "def test_rr_se(results):\n truese = np.asarray([2.09826858, 30.60745128, 108.51947421, 0.95693751,\n 0.6564318])\n test_se = results.params_se()\n assert test_se == pytest.approx(truese)", "def simulationTestGaussian2(params):\r\n x = gaussian(params[0], params[1], mu-3.5*sigma, mu+3.5*sigma)\r\n error = np.sum(np.power(optimal - x, 2))/optimal.shape[0]\r\n return 1/error", "def test_ilsr_tolerance():\n vals = [np.array([-0.5, 0.5]), np.array([-0.3, 0.3]),\n np.array([-0.2, 0.2]), np.array([-0.25, 0.25])]\n lsr = Mock(side_effect=vals)\n est = _ilsr(fun=lsr, params=None, max_iter=100, tol=0.15)\n assert np.array_equal(est, vals[2])\n assert lsr.call_count == 3", "def _fp_evaluate(sequence, iteration, tolerance):\n return np.abs(sequence[iteration] - sequence[iteration - 1]) < tolerance", "def test_failed_parameter_verification() -> None:\n name = create_random_alphanumeric(10)\n # The lower bound must be smaller than the upper bound!\n parameters = [5, 1, 3]\n\n with pytest.raises(ValueError):\n UnivDist(\n name=name, distribution=DISTRIBUTION_NAME, parameters=parameters\n )\n\n # The mid-point value must be between lower and upper bounds!\n parameters = [3, 4, 10]\n\n with pytest.raises(ValueError):\n UnivDist(\n name=name, distribution=DISTRIBUTION_NAME, parameters=parameters\n )", "def deviation_ok(norm, value, epsilon):\n deviation = abs(norm-value)/norm\n # print(abs(d-epsilon))\n return deviation <= epsilon", "def shaking_error_rate(individual, test_data, truth_data, name=None):\r\n test_data = np.array(test_data)\r\n truth_data = np.array(truth_data)\r\n total = 0\r\n num_wrong = 0\r\n for test_point, truth_point in zip(test_data, truth_data):\r\n # Nine represents a shaking event\r\n if (truth_point == 9):\r\n if np.isnan(test_point) or test_point <= 8.5 or test_point > 9.5:\r\n num_wrong += 1\r\n total += 1\r\n #if num_wrong == 0:\r\n # # Perfection implies overtraining\r\n # return 1.0\r\n #else:\r\n return float(num_wrong)/float(total)", "def test_G_fit(self):\r\n # test with williams correction\r\n data = [array(i) for i in [63, 31, 28, 12, 39, 16, 40, 12]]\r\n exp_G = 69.030858949133162 / 1.00622406639\r\n exp_p = 2.8277381487281706e-12\r\n obs_G, obs_p = G_fit(data, williams=True)\r\n self.assertFloatEqual(obs_G, exp_G)\r\n self.assertFloatEqual(obs_p, exp_p)\r\n # test with hand computed example and williams correction\r\n data = [array([75, 65, 48]), array([200]), array([10, 250, 13, 85])]\r\n exp_G = 85.90859811005285 / 1.0018930430667\r\n exp_p = 2.4012235241479195e-19\r\n obs_G, obs_p = G_fit(data, williams=True)\r\n self.assertFloatEqual(obs_G, exp_G)\r\n self.assertFloatEqual(obs_p, exp_p)\r\n # test without williams correction on another hand computed example\r\n data = [array([10, 12, 15, 7]), array([15, 12, 17, 18]),\r\n array([6, 9, 13])]\r\n exp_G = 1.6610421781232\r\n exp_p = 0.43582212499949591\r\n obs_G, obs_p = G_fit(data, williams=False)\r\n self.assertFloatEqual(obs_G, exp_G)\r\n self.assertFloatEqual(obs_p, exp_p)\r\n # now test that assertions raise AssertionErrors\r\n # neg_data = [array([-10,12,15,7]), array([15,12,17,18]), array([6,9,13])]\r\n # self.assertRaises(AssertionError, G_fit, neg_data)\r\n # emp_data = [array([]), array([15,12,17,18]), array([6,9,13])]\r\n # self.assertRaises(AssertionError, G_fit, emp_data)\r\n # zer_data = [array([0,0,0]), array([15,12,17,18]), array([6,9,13])]\r\n # self.assertRaises(AssertionError, G_fit, zer_data)\r", "def test_error_at_98tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.98))", "def test_Sobol_G_raises_error_if_values_gt_one():\n evaluate(np.array([0, 1, .02, 0.23, 1.234, 0.02848848, 0, 0.78]))", "def generalizationError(populationError, empiricalError):\n return abs(populationError - empiricalError)", "def calculate_fitness(pTarget, pCandidate):\n\terr = 0\n\tfor i in range(len(pTarget)):\n\t\terr += math.sqrt((pTarget[i] - pCandidate[i]) * (pTarget[i] - pCandidate[i])) \n\tret = 1000 - err\n\tif ret < 0:\n\t\tret = 0\n\treturn ret\n\t#return 1./err", "def compare(a, b, *, tol=1e-6):\n if abs(a - b) < tol:\n return 0.0\n elif a > b:\n return 1.0\n else:\n return -1.0", "def tolAcc(y,pred,testMat):\n\tcorrect=0\n\ttruecorrect=0\n\terrors=[]\n\tdistY = np.zeros(5)\n\tdistP = np.zeros(5)\n\tfor i in range(0,len(y)):\n\t\terrors.append(np.absolute(y[i]-pred[i]))\n\t\t#\tprint('Pred,True: {0},{1} Data: {2}'.format(pred[i],y[i],testMat[i,:]))\n\t\tdistP[pred[i]] += 1\n\t\tdistY[y[i]] += 1\n\t\tif errors[i]<=1:\n\t\t\tcorrect+=1\n\t\tif errors[i]==0:\n\t\t\ttruecorrect+=1\n\tscore = float(correct)/len(y)\n\ttruescore = float(truecorrect)/len(y)\n\tmeanEr = sum(errors)/len(errors)\n\tprint('Mean error: {0}'.format(meanEr))\n\tprint('Prediction distribution: {0}'.format(distP/float(sum(distP))))\n\tprint('Label distribution: {0}'.format(distY/float(sum(distY))))\n\n\treturn(score*100,truescore*100)", "def test_invalid_calculation_of_quantile(alpha: Any) -> None:\n n = 10\n with pytest.raises(\n ValueError, match=r\".*Number of samples of the score is too low*\"\n ):\n check_alpha_and_n_samples(alpha, n)", "def error_calculation_test(self):\n dataOrg = [[1,1], [2,2], [3,3], [4,4], [5,5], [6,6], [7,8], [7.3, 5], [8, 0], [9,10]]\n dataCalc = [[1,3], [2,5], [3,0], [4,3], [5,5], [6.1,6], [7,3], [7.3, 5], [8, 0], [9,9]]\n # abs difference: 2 3 3 1 0 NA 5 0 NA 1\n # local errors: 200 150 200 50 0 NA 125 0 NA 20\n # sum: 745\n\n tsOrg = TimeSeries.from_twodim_list(dataOrg)\n tsCalc = TimeSeries.from_twodim_list(dataCalc)\n\n wmape = WeightedMeanAbsolutePercentageError()\n wmape.initialize(tsOrg, tsCalc)\n assert str(wmape.get_error())[:6] == \"93.125\"", "def standard_deviation_over(individual, test_data, truth_data, name=None, tolerance=0):\r\n test_data = np.array(test_data)\r\n truth_data = np.array(truth_data)\r\n over_predicted_inds = get_over_predicted_inds(test_data,\r\n truth_data,\r\n tolerance)\r\n test_subset = test_data[over_predicted_inds]\r\n truth_subset = truth_data[over_predicted_inds]\r\n return overall_standard_deviation(individual, test_subset, truth_subset)", "def tol(self):\n return self._tol", "def tol(self, value):\n self._tol = value", "def test_error_at_95tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.95))", "def tol(self) -> Real:\n return self._tol", "def test():\n X,Xval,Yval = _load_sample_data()\n mu,var = estimate_gaussian_params(X)\n pval = get_probability(Xval,mu,var)\n\n figure()\n plot(X[:,0],X[:,1],'b+',label='data'); xlabel(\"Latency (ms)\"); ylabel(\"Throughput (Mb/s)\")\n epsilon, F1 = determine_threshold(Yval,pval)\n print(\"Optimal epsilon and F1 score for sample dataset {}, {}\".format(epsilon, F1))\n plot_gaussian(mu,var,epsilon=epsilon)\n\n ## Plot Outliers\n predictions = get_probability(X,mu, var)\n outliers = X[predictions < epsilon]\n plot(outliers[:,0],outliers[:,1],'ro',mfc=None,label='outliers');\n legend()\n grid()", "def success(self, x, tol=1.e-5):\n val = self.fun(asarray(x))\n if abs(val - self.fglob) < tol:\n return True\n\n # the solution should still be in bounds, otherwise immediate fail.\n if np.any(x > np.asfarray(self.bounds)[:, 1]):\n return False\n if np.any(x < np.asfarray(self.bounds)[:, 0]):\n return False\n\n # you found a lower global minimum. This shouldn't happen.\n if val < self.fglob:\n raise ValueError(\"Found a lower global minimum\",\n x,\n val,\n self.fglob)\n\n return False", "def test_Sobol_G_raises_error_if_values_gt_one():\n with raises(ValueError):\n evaluate(np.array([0, 1, .02, 0.23, 1.234, 0.02848848, 0, 0.78]))", "def check_randomness(self, alpha = 0.05, cutoff='mean'):\n \n stat, p = runstest_1samp(self.x, cutoff=cutoff)\n return self._result(p,alpha)", "def max_under_prediction_error(individual, test_data, truth_data, name=None, tolerance=0):\r\n test_data = np.array(test_data)\r\n truth_data = np.array(truth_data)\r\n under_predicted_inds = get_under_predicted_inds(test_data,\r\n truth_data,\r\n tolerance)\r\n if len(under_predicted_inds) == 0:\r\n return np.nan\r\n test_subset = test_data[under_predicted_inds]\r\n truth_subset = truth_data[under_predicted_inds]\r\n return np.max(truth_subset - test_subset)", "def calc_error_dist(self):\n pass", "def test_threshold_range_a(self):\n code, out, err = self.t.runError(\"--threshold --max 3.1 --min 3.2\")\n self.assertIn(\"The min value must be lower than the max value.\", out)", "def test_const_evaluate():\n pop = test_population\n pop = ops.const_evaluate(pop, value=123456789.0)\n \n for ind in pop:\n assert(pytest.approx(123456789.0) == ind.fitness)", "def testSpeciesRichnessNear(self):\n self.assertAlmostEqual(1.0, self.tree1.get_number_individuals() / self.tree2.get_number_individuals(), 0)", "def is_good_qualtiative_example(iaa_score, ann1_total, ann2_total):\n return iaa_score > .3 and iaa_score < 1 and ann1_total > 3 and ann2_total > 3", "def test_error_at_confidence(self, threshold):\r\n\r\n nominator = numpy.sum(numpy.logical_and(self.test_errors, self.test_confidences >= threshold))\r\n denominator = numpy.sum(self.test_confidences >= threshold)\r\n if denominator > 0:\r\n return nominator / float(denominator)\r\n else:\r\n return 0", "def checkStdDev(df,thr):\n greaterThanThreshold = True\n positions= np.array([])\n for i in range(1,df.shape[0]):\n stdDev = np.std(df.iloc[i,1:].astype(np.longdouble))\n if (stdDev < thr):\n greaterThanThreshold = False\n positions = np.append(positions,i)\n \n return greaterThanThreshold", "def test_gt():\n # Test for greater than special method with scalar Rnode object and float value\n x = Rnode(2.0)\n try:\n assert (x > 3) == False\n assert (x > 1) == True\n except AssertionError as e:\n print(e)\n raise AssertionError\n\n # Test for greater than special method with two scalar Rnode object\n a = Rnode(2.0)\n b = Rnode(2.0)\n c = Rnode(1.0)\n d = Rnode(1.0)\n try:\n assert (a > b) == False\n assert (a > c) == True\n assert (a > d) == True\n except AssertionError as e:\n print(e)\n raise AssertionError", "def average_precent_error_under(individual, test_data, truth_data, name=None, tolerance=0):\r\n test_data = np.array(test_data)\r\n truth_data = np.array(truth_data)\r\n under_predicted_inds = get_under_predicted_inds(test_data,\r\n truth_data,\r\n tolerance)\r\n test_subset = test_data[under_predicted_inds]\r\n truth_subset = truth_data[under_predicted_inds]\r\n return average_percent_error(individual, test_subset, truth_subset)", "def test_bisection(testFunctions, ranges,tol, printFlag):\n \n for i in range(len(testFunctions)):\n scipyValue = sp.optimize.bisect(testFunctions[i],ranges[i,0],ranges[i,1])\n nonlinValue =\n pass", "def test_fitness():\n herb1 = Herbivore(0)\n herb2 = Herbivore(80)\n nt.assert_not_equal(herb1.fitness, herb2.fitness)\n herb3 = Herbivore(20, 0)\n herb4 = Herbivore(20, 80)\n nt.assert_not_equal(herb3.fitness, herb4.fitness)", "def assert_compare(x, y, atol=1e-5, method='ALL'):\n mae = 0\n mse = 0\n rmse = 0\n result = 0\n if method == 'MAE':\n mae = np.abs(x-y).mean()\n result = mae\n elif method == 'RMSE':\n rmse = np.sqrt(np.square(x - y).mean())\n result = rmse\n #result=np.sqrt(((x - y) ** 2).mean())\n elif method == 'MSE':\n mse = np.square(x - y).mean()\n result = mse\n #result=((x - y) ** 2).mean()\n else:\n mae = np.abs(x-y).mean()\n rmse = np.sqrt(np.square(x - y).mean())\n mse = np.square(x - y).mean()\n\n if result > atol or (method == 'ALL' and (mae > atol or rmse > atol or mse > atol)):\n f = six.StringIO()\n f.write(\n 'assert_compare failed: \\n' +\n ' atol: {} \\n'.format(atol) +\n ' method: {}\\n'.format(method) +\n ' MAE: {}\\n'.format(mae) +\n ' MSE: {}\\n'.format(mse) +\n ' RMSE: {}\\n'.format(rmse) +\n ' shape: {} {}\\n'.format(x.shape, y.shape) +\n ' dtype: {} {}\\n'.format(x.dtype, y.dtype))\n if x.shape == y.shape:\n xx = x if x.ndim != 0 else x.reshape((1,))\n yy = y if y.ndim != 0 else y.reshape((1,))\n err = np.abs(xx - yy)\n i = np.unravel_index(np.argmax(err), err.shape)\n f.write(\n ' i: {}\\n'.format(i) +\n ' x[i]: {}\\n'.format(xx[i]) +\n ' y[i]: {}\\n'.format(yy[i]) +\n ' err[i]: {}\\n'.format(err[i]))\n opts = np.get_printoptions()\n try:\n np.set_printoptions(threshold=10000)\n f.write('x: ' + np.array2string(x, prefix='x: ') + '\\n')\n f.write('y: ' + np.array2string(y, prefix='y: ') + '\\n')\n finally:\n np.set_printoptions(**opts)\n logging.warning(f.getvalue())\n return False\n else:\n return True", "def error(self, in_sample=True):\n if in_sample:\n error = 0.0\n for i, point in enumerate(self.X):\n if self.Y[i] != self.rbf_classify(point):\n error += 1\n return error / 100\n else:\n error = 0.0\n for i, point in enumerate(self.test_X):\n if self.test_Y[i] != self.rbf_classify(point):\n error += 1\n return error / 10000", "def error(clf, X, y, ntrials=100, test_size=0.2) :\n\n train_error = 0\n test_error = 0\n ### ========== TODO : START ========== ###\n # compute cross-validation error over ntrials\n # hint: use train_test_split (be careful of the parameters)\n for i in range(0,ntrials, 1):\n #get the value of the error for each division\n #train on the test data for the clf\n #test also on the data\n #X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33, random_state = 42)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= test_size, random_state=i)\n #now find the error\n #first train the model\n #then predict\n #check the accuracy\n clf.fit(X_train,y_train)\n y_pred = clf.predict(X_train)\n #now find the error for the train_error\n train_err = 1 - metrics.accuracy_score(y_train, y_pred, normalize=True)\n train_error += train_err\n\n y_pred = clf.predict(X_test)\n test_err = 1 - metrics.accuracy_score(y_test, y_pred, normalize=True)\n test_error += test_err\n\n\n #get the average\n train_error = float(train_error)/((1-test_size)*len(X))\n test_error = float(test_error)/((test_size)*len(X))\n ### ========== TODO : END ========== ###\n\n return train_error, test_error", "def passed(self) -> Sequence[bool]:\n return [abs(error) < self._tolerance for error in self.error]", "def test_gt_1():\n a = FixedPoint(1, 'Q2.8')\n assert a > 0.9", "def stop(self, tol):\n\n self.converged = abs(np.median(self.pop_fitness)) < tol", "def test_le():\n # Test for less than or equal to special method with scalar Rnode object and float value\n x = Rnode(2.0)\n try:\n assert (x <= 3) == True\n assert (x <= 2) == True\n assert (x <= 1) == False\n except AssertionError as e:\n print(e)\n raise AssertionError\n\n # Test for less than or equal to special method with two scalar Rnode object\n a = Rnode(2.0)\n b = Rnode(2.0)\n c = Rnode(1.0)\n d = Rnode(1.0)\n try:\n assert (a <= b) == True\n assert (a <= c) == False\n assert (a <= d) == False\n except AssertionError as e:\n print(e)\n raise AssertionError", "def test_N_ge_M(self):\n\t\tdetails = self.watcher.describe()\n\t\tM = details.M.to_numpy()\n\t\tN = details.N.to_numpy()\n\t\tself.assertTrue((N >= M).all)", "def test_N_ge_M(self):\n\t\tdetails = self.watcher.describe()\n\t\tM = details.M.to_numpy()\n\t\tN = details.N.to_numpy()\n\t\tself.assertTrue((N >= M).all)", "def test_N_ge_M(self):\n\t\tdetails = self.watcher.describe()\n\t\tM = details.M.to_numpy()\n\t\tN = details.N.to_numpy()\n\t\tself.assertTrue((N >= M).all)", "def check_error(gluon_output, k_model, input_np, epsilon=1e-4):\n gluon_output = gluon_output.asnumpy()\n keras_output = k_model.predict(input_np)\n\n error = np.max(gluon_output - keras_output)\n print('Error:', error)\n\n assert error < epsilon\n return error", "def test_genomic(self):\n self.c.execute(\"\"\"select expIds,expScores from genomic_test\"\"\")\n rows = self.c.fetchall()\n self.assertEqual(len(rows), 1) # one probe\n self.assertEqual(rows[0][0], '0,1,2,3,4') # ordered by sample id\n values = map(lambda x: float(x), rows[0][1].split(',')) # scores are in correct order\n self.assertTrue(values[0] - 0.479005065149792 < self.tolerance)\n self.assertTrue(values[1] - 25.1 < self.tolerance)\n self.assertTrue(values[2] - 5.3 < self.tolerance)\n self.assertTrue(values[3] - 3.1 < self.tolerance)\n self.assertTrue(values[4] - -1.23 < self.tolerance)", "def error(clf, X, y, ntrials=100, test_size=0.2) :\n\n ### ========== TODO : START ========== ###\n # compute cross-validation error using StratifiedShuffleSplit over ntrials\n # hint: use train_test_split (be careful of the parameters)\n train_error = 0\n test_error = 0\n f1_score = 0\n sss = StratifiedShuffleSplit(n_splits = ntrials, test_size = test_size, random_state = 0)\n for train_index, test_index in sss.split(X, y):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n clf.fit(X_train, y_train)\n y_pred_train = clf.predict(X_train)\n y_pred_test = clf.predict(X_test)\n train_error += float(1 - metrics.accuracy_score(y_train, y_pred_train, normalize=True))\n test_error += float(1 - metrics.accuracy_score(y_test, y_pred_test, normalize=True))\n f1_score += metrics.f1_score(y_test, y_pred_test, average = \"micro\")\n\n train_error = train_error/ntrials\n test_error = test_error/ntrials\n f1_score = f1_score/ntrials\n ### ========== TODO : END ========== ###\n\n return train_error, test_error, f1_score", "def test_failure_and_non_convergence(self):\n\n # Set up the problem of finding the square roots of three numbers.\n constants = np.array([4.0, 9.0, 16.0])\n # Choose a bad initial position.\n initial_values = np.zeros(len(constants))\n\n def objective_and_gradient(values):\n objective = values**2 - constants\n gradient = 2.0 * values\n return objective, gradient\n\n # Obtain and evaluate a tensor containing the roots.\n roots = newton_root_finder(objective_and_gradient, initial_values)\n _, converged, failed = self.evaluate(roots)\n\n # Reference values - we should not have converged and should have failed.\n converged_bench = np.array([False, False, False])\n failed_bench = np.array([True, True, True])\n\n # Assert that the values we obtained are close to the true values.\n np.testing.assert_array_equal(converged, converged_bench)\n np.testing.assert_array_equal(failed, failed_bench)", "def constant_r_success(r_target, tol):\n def isdone(node_dict):\n \"\"\"return a delta heading value to best turn to heading\"\"\"\n # compute heading difference\n hdiff = heading_diff(r_target, node_dict['pn'].heading)\n # return if we are we close enough\n return abs(hdiff) < abs(tol)\n\n # return the function we just created\n return isdone", "def test__compute_tolerance_distance():\n classifier = classifier_module.Classifier(None)\n L1 = [11.2, 41.43, 1.33]\n L2 = [10.9, 41.45, 1.34]\n L3 = [12.0, 41.4412, 1.001]\n L4 = [11.3, 41.15, 1.12]\n L5 = [11.223, 41.0, 1.31]\n AL = [L1, L2, L3, L4, L5]\n symbol = \"a\"\n classifier._compute_tolerance_distance(AL, symbol)\n tolerance_distance_path = \\\n classifier_module.Classifier._get_file_path( \\\n classifier.files[classifier_module.DISTANCE_TOLERANCE_FILE], symbol)\n file_with_tolerance_distance = \\\n open(tolerance_distance_path, 'r')\n tolerance_distance = float(file_with_tolerance_distance.readline())\n file_with_tolerance_distance.close()\n assert fabs(tolerance_distance - 0.5506099238118276) < epsilon", "def class4AccuracyEvalFunction(individual, test_data, truth_data, name=None):\r\n test_data = np.array(test_data)\r\n truth_data = np.array(truth_data)\r\n total = 0\r\n num_wrong = 0\r\n for test_point, truth_point in zip(test_data, truth_data):\r\n if truth_point == 4:\r\n if test_point <= 2 or test_point > 4:\r\n num_wrong += 1\r\n total += 1\r\n if num_wrong == 0:\r\n # We don't want 'perfect' equate it with 100% error\r\n return 1.0\r\n else:\r\n return float(num_wrong)/float(total)", "def test_evaluate_error_score(error_score, return_data, strategy, backend):\n # skip test for dask backend if dask is not installed\n if backend == \"dask\" and not _check_soft_dependencies(\"dask\", severity=\"none\"):\n return None\n\n forecaster = ExponentialSmoothing(sp=12)\n y = load_airline()\n # add NaN to make ExponentialSmoothing fail\n y.iloc[1] = np.nan\n fh = [1, 2, 3]\n cv = ExpandingWindowSplitter(step_length=48, initial_window=12, fh=fh)\n if error_score in [np.nan, 1000]:\n with pytest.warns(FitFailedWarning):\n results = evaluate(\n forecaster=forecaster,\n y=y,\n cv=cv,\n return_data=return_data,\n error_score=error_score,\n strategy=strategy,\n backend=backend,\n )\n if isinstance(error_score, type(np.nan)):\n assert results[\"test_MeanAbsolutePercentageError\"].isna().sum() > 0\n if error_score == 1000:\n assert results[\"test_MeanAbsolutePercentageError\"].max() == 1000\n if error_score == \"raise\":\n with pytest.raises(Exception): # noqa: B017\n evaluate(\n forecaster=forecaster,\n y=y,\n cv=cv,\n return_data=return_data,\n error_score=error_score,\n strategy=strategy,\n )", "def equalWithinTolerance(a, b, tol):\n return abs(a - b) <= tol", "def test_kl_divergence(get_distributions):\n for i, dist_a in enumerate(get_distributions):\n for j, dist_b in enumerate(get_distributions):\n kl = kl_divergence(dist_a, dist_b)\n if i == j:\n assert pytest.approx(kl, 0.0001) == 0.0\n else:\n assert kl > 0", "def fittest(self, population, f_thres=None):\n pass", "def expected_improvement(f_min, mu, sigma):\n # log-scaling might not be the best idea here, especially\n # if people use negative values to maximize output\n # v = (np.log(f_min) - mu) / sigma\n v = (f_min - mu) / sigma\n return (f_min * norm.cdf(v)\n - (np.exp(0.5 * sigma ** 2 + mu)\n * norm.cdf(v - sigma)))", "def compare_error_bounds( model_fname, log_fname, delta = 0.1 ):\n gmm = GaussianMixtureModel.from_file( model_fname )\n k, d, M, w = gmm.k, gmm.d, gmm.means, gmm.weights\n\n P, T = exact_moments( M, w )\n\n lg = sc.load( log_fname )\n\n # TODO: Use concentration bounds on aerr_P12\n n_M, sk_M = lg[\"norm_M_2\"], lg[\"s_k_M\"], \n e_P, e_T = lg[\"aerr_P_2\"], lg[\"aerr_T\"], \n n_P, sk_P, n_T = lg[\"norm_Pe_2\"], lg[\"s_k_P\"], lg[\"norm_Te\"]\n w_min = min(w)\n\n # TODO: Ah, not computing sigma2! \n\n # alpha_P and \\beta_P\n a_P = e_P/sk_P\n b_P = a_P/(1-a_P)\n\n e_Wb = 2/sqrt(sk_P) * b_P\n e_W = lg[\"aerr_W_2\"]\n\n e_Twb = 1/sqrt(sk_M * (1-a_P)) * e_T + n_T/sk_M * (1 + 1/sqrt(1-a_P) + 1/(1-a_P)) * e_W\n e_Tw = lg[\"aerr_Tw\"]\n\n e_Lb = e_Tw\n e_L = lg[\"aerr_lambda\"]\n\n D_M = column_sep( M )\n D_Tw = delta/(sqrt(sc.e) * k**2 * (1+sqrt(2 * log(k/delta)))) * D_M\n e_vb = 4 * sqrt(2) * e_Tw / D_Tw\n e_v = lg[\"aerr_v_col\"]\n\n e_Wtb = 2 * sqrt( n_P + e_P ) * b_P\n n_Wtb = sqrt( n_P + e_P )\n\n e_mub = e_Lb + (1+1/sqrt(w_min)) * n_Wtb * e_vb + e_Wtb\n e_mu = lg[\"aerr_M_col\"]\n\n print \"A\\t\\tbound\\t\\tactual\"\n print \"W\\t\\t%f\\t\\t%f\" % (e_Wb, e_W)\n print \"Tw\\t\\t%f\\t\\t%f\" % (e_Twb, e_Tw)\n print \"L\\t\\t%f\\t\\t%f\" % (e_Lb, e_L)\n print \"v\\t\\t%f\\t\\t%f\" % (e_vb, e_v)\n print \"mu\\t\\t%f\\t\\t%f\" % (e_mub, e_mu)\n return [(e_W/e_Wb), (e_Tw/e_Twb), (e_L / e_Lb), (e_v/e_vb), (e_mu / e_mub),]", "def test_evaluation_error():\n folding = Folding(DATASET,\n reset=True)\n\n for fold in folding.fold():\n sjob = SamplingJob(fold,\n OVERSAMPLER,\n OVERSAMPLER_PARAMS)\n\n result = sjob.do_oversampling()\n\n ejob = EvaluationJob(result,\n [('smote_variants.classifiers',\n 'ErrorWarningClassifier',\n {'raise_value_error': True})])\n\n result_eval = ejob.do_evaluation()\n\n assert len(result_eval[0]['error']) > 3\n\n ejob = EvaluationJob(result,\n [('smote_variants.classifiers',\n 'ErrorWarningClassifier',\n {'raise_runtime_error': True})])\n\n result_eval = ejob.do_evaluation()\n\n assert len(result_eval[0]['error']) > 3", "def test_too_low_max_iterations(self):\n\n # Set up the problem of finding the square roots of three numbers.\n constants = np.array([4.0, 9.0, 16.0])\n initial_values = np.ones(len(constants))\n\n def objective_and_gradient(values):\n objective = values**2 - constants\n gradient = 2.0 * values\n return objective, gradient\n\n # Obtain and evaluate a tensor containing the roots.\n roots = newton_root_finder(\n objective_and_gradient, initial_values, max_iterations=1)\n _, converged, failed = self.evaluate(roots)\n\n # Reference values - we should neither have converged nor failed.\n converged_bench = np.array([False, False, False])\n failed_bench = np.array([False, False, False])\n\n # Assert that the values we obtained are close to the true values.\n np.testing.assert_array_equal(converged, converged_bench)\n np.testing.assert_array_equal(failed, failed_bench)", "def test_genextreme_fit(self):\n p = generic.fit(self.genextreme, \"genextreme\")\n np.testing.assert_allclose(p, (0.20949, 297.954091, 75.7911863), 1e-5)", "def tolerance(self):\n return self.params['tolerance']", "def test_uniform(self):\n\n for size in range(1, 10):\n uniformvar = uniform(range(size))\n for n in range(size):\n self.assertTrue(\n math.isclose(uniformvar[n], 1 / size, rel_tol=1e-05,\n abs_tol=1.0))", "def test_fail_tailed_option(self):\n\n with self.assertRaises(ValueError):\n _p_value_and_confidence_intervals(2.3, 100, 'greater')", "def test_calculate_class_2_individuals_best_response_markov_upper_and_lower_bounds():\n assert (\n calculate_class_2_individuals_best_response(\n lambda_2=2,\n lambda_1_1=1,\n lambda_1_2=1,\n mu_1=2,\n mu_2=2,\n num_of_servers_1=3,\n num_of_servers_2=3,\n threshold_1=3,\n threshold_2=3,\n system_capacity_1=5,\n system_capacity_2=5,\n buffer_capacity_1=4,\n buffer_capacity_2=4,\n lower_bound=0.1,\n upper_bound=0.2,\n )\n == 1\n )\n\n assert (\n calculate_class_2_individuals_best_response(\n lambda_2=2,\n lambda_1_1=1,\n lambda_1_2=1,\n mu_1=2,\n mu_2=2,\n num_of_servers_1=3,\n num_of_servers_2=3,\n threshold_1=3,\n threshold_2=3,\n system_capacity_1=5,\n system_capacity_2=5,\n buffer_capacity_1=4,\n buffer_capacity_2=4,\n lower_bound=0.8,\n upper_bound=0.9,\n )\n == 0\n )", "def class0AccuracyEvalFunction(individual, test_data, truth_data, name=None):\r\n test_data = np.array(test_data)\r\n truth_data = np.array(truth_data)\r\n total = 0\r\n num_wrong = 0\r\n for test_point, truth_point in zip(test_data, truth_data):\r\n if truth_point == 0:\r\n if test_point < 0 or test_point > 2:\r\n num_wrong += 1\r\n total += 1\r\n if num_wrong == 0:\r\n # We don't want 'perfect' equate it with 100% error\r\n return 1.0\r\n else:\r\n return float(num_wrong)/float(total)", "def ge_success_func(target, result):\n if result is None:\n return False\n return result >= target", "def evaluate_fit_range(predicted, fit_range):\n test1 = (predicted[0] >= fit_range[0])\n test2 = (predicted[0] <= fit_range[1])\n test3 = (predicted[1] >= fit_range[2])\n test4 = (predicted[1] <= fit_range[3])\n return all([test1, test2, test3, test4])", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_convergence(ODE, alg, expected_rate):\n final_t = 5*ODE.dt_init\n dts = [ODE.dt_init/2**i for i in range(4)]\n steppers = [alg(0, dt, ODE.q_init, ODE.A) for dt in dts]\n\n for s in steppers:\n s.stepUntil(final_t)\n\n errs = [np.linalg.norm(s.q - ODE.exact(s.t), ord=np.inf) for s in steppers]\n\n p, logM = np.polyfit(np.log10(dts), np.log10(errs), 1)\n\n # This does not need to be especially close. Being within a digit or two\n # is enough to demonstrate convergence.\n assert np.isclose(p, expected_rate, rtol=1e-2, atol=0)", "def test_check_distribution1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_distribution(distribution_fail_1)\n assert str(err_info.value) == 'distribution type input not within range of index'", "def Q4_test():\n chemin = [3,2,1,0]\n tab_dist = [[0, 4.123105625617661, 4.242640687119285, 4.47213595499958], [4.123105625617661, 0, 4.123105625617661, 7.810249675906654], [4.242640687119285, 4.123105625617661, 0, 5.0990195135927845], [4.47213595499958, 7.810249675906654, 5.0990195135927845, 0]]\n d = longueur(chemin, tab_dist)\n return (d > 13.34523076) and (d < 13.34523077)" ]
[ "0.67049265", "0.6273725", "0.5888294", "0.5867151", "0.5821513", "0.58181053", "0.5802155", "0.57919264", "0.57553136", "0.5714477", "0.56883866", "0.56820804", "0.56820804", "0.56757665", "0.56673026", "0.5622294", "0.56171614", "0.56007904", "0.5595701", "0.55744684", "0.5573483", "0.5569411", "0.5566425", "0.5564774", "0.5564383", "0.5558858", "0.55489767", "0.55461675", "0.5532688", "0.55259806", "0.5524135", "0.552156", "0.5521518", "0.5514425", "0.5506208", "0.5504658", "0.5497747", "0.5495412", "0.5488193", "0.5485833", "0.54855305", "0.5475599", "0.5464306", "0.5453234", "0.5433458", "0.54257166", "0.5422417", "0.5414165", "0.5412936", "0.5412922", "0.5411806", "0.5407783", "0.5405412", "0.5403647", "0.5401373", "0.5401136", "0.54007167", "0.53975624", "0.5387963", "0.5387552", "0.53808016", "0.53706884", "0.5363846", "0.5357757", "0.5357381", "0.5352371", "0.53427", "0.5338184", "0.53357697", "0.53357697", "0.53357697", "0.5325895", "0.53215116", "0.5320688", "0.5313294", "0.5307738", "0.53050536", "0.5303472", "0.52994937", "0.52978843", "0.5295754", "0.5293013", "0.5287064", "0.5287045", "0.52761185", "0.52745", "0.52725774", "0.5264488", "0.52636856", "0.52633834", "0.52573436", "0.5254657", "0.5253512", "0.5252134", "0.52401066", "0.52401066", "0.52401066", "0.52339095", "0.52330166", "0.52312523" ]
0.7107953
0
Returns the best individual if the islands converged to an acceptable fitness. Returns
def get_best_individual(self): return self._best_indv
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _best_individual(self):\n return max(self._population, key=attrgetter(\"fitness\"))", "def best_individual(self):\n return self.population.best_individual()", "def get_best(self) -> Chromosome:\n if not (self._best_chromosome is None): # if the best chromosome is unchanged since the last calculation\n return self._best_chromosome\n\n best = None\n best_fitness = None\n\n for chromosome in self._population:\n chromosome_fitness = chromosome.get_fitness()\n\n if best_fitness is None or self._is_fitter(chromosome_fitness, best_fitness):\n best = chromosome\n best_fitness = chromosome_fitness\n\n return best", "def result(self):\n return min(self.population, key=lambda individual: individual.get_fitness())", "def get_best_solution(self):\n if not self.tours:\n raise Exception('No solution has been computed yet')\n scores = {s:get_cost(self.tours[s],self) for s in self.tours}\n best = min(scores,key=scores.get)\n print('The best solution is given by {} with score {}'.format(best,scores[best]))\n return self.tours[best]", "def get_best_fitness(self):\n f = max(self.characters, key=operator.attrgetter('fitness'))\n self.best_fitness = round(f.fitness, 3)\n self.best_candidate = f", "def _find_solution(self, population, num_of_best_chromosomes):\n data = self._Individuals()\n for x in population:\n curr_fit = self._fitness(x)\n data.append_object(self._Individual(curr_fit, x))\n return data.sort_objects()[:num_of_best_chromosomes]", "def get_maximum_fitness(self) -> float:\n anticipated_change_cls = [cl for cl in self\n if cl.does_anticipate_change()]\n\n if len(anticipated_change_cls) > 0:\n best_cl = max(anticipated_change_cls, key=lambda cl: cl.fitness)\n return best_cl.fitness\n\n return 0.0", "def _get_best(self, populations, func):\n best = None\n for population in populations:\n for item in population:\n if not best:\n best = item\n elif func.fit(*item) > func.fit(*best):\n best = item\n return best", "def get_best_individual(population: List[IndividualType]) -> IndividualType:\n best_individual = population[0]\n for individual, rating in population:\n if rating < best_individual[1]:\n best_individual = (individual, rating)\n return best_individual", "def get_worst_fitness(self):\n f = min(self.characters, key=operator.attrgetter('fitness'))\n self.worst_fitness = round(f.fitness, 3)", "def test_for_convergence(self, error_tol):\n list_of_best_indvs = []\n for island in self._islands:\n best_indv = island.best_individual()\n list_of_best_indvs.append(best_indv)\n list_of_best_indvs.sort(key=lambda x: x.fitness)\n\n best_indv = list_of_best_indvs[0]\n converged = best_indv.fitness <= error_tol\n\n self._best_indv = best_indv\n self._converged = converged\n return converged", "def get_best(self):\n scores, ids = self.sort_best()\n return scores[1], ids[1]", "def get_individual_fitness(individual):\r\n fitness = 0\r\n # A COMPLETER\r\n \r\n #Si distance avec le point objectif diminue, alors fitness augmente ?\r\n \r\n return fitness", "def get_personal_best(self):\n return self._personal_best", "def get_best(self, population):\n best = min(population, key=self.cost_function)\n return best, self.cost_function(best)", "def bestIndividual(hof, X, y):\n maxAccurcy = 0.0\n for individual in hof:\n #print(individual.fitness.values)\n #print(maxAccurcy)\n if(individual.fitness.values[0] > maxAccurcy):\n maxAccurcy = individual.fitness.values\n _individual = individual\n\n _individualHeader = [list(X)[i] for i in range(\n len(_individual)) if _individual[i] == 1]\n return _individual.fitness.values, _individual, _individualHeader", "def get_fittess_tour(self) -> Tour:\n # max_index = 0\n # for i in range(1, self.population_size):\n # if self.population[i].fitness > self.population[max_index]:\n # max_index = i\n # return self.population[max_index]\n return self.population[0]", "def best_bat(self):\n\n i = 0\n j = 0\n for i in range(self.NP):\n if self.Fitness[i] < self.Fitness[j]:\n j = i\n for i in range(self.D):\n self.best[i] = self.Sol[j][i]\n self.f_min = self.Fitness[j]", "def get_fitness(self):\n if self.fitness == 0:\n self.fitness = 1 / self.get_cost()\n return self.fitness", "def best(self):\n self.population.ascendent_sort()\n self.best_genome = self.population.collection[0]\n return self.best_genome", "def best_value(self):\r\n return self._best_value", "def get_best_candidate(self):\n if not self.scores:\n return None\n return self.te_list[self.scores.index(max(self.scores))]", "def select_leader(self):\n\n if self.leaders.size() == 1:\n return self.leaders.rand_choice()\n\n candidates = self.leaders.rand_sample(2)\n\n # randomly favourize one of them\n # best_global = choice(candidates)\n\n # should select those which has bigger fitness\n # # if one of them dominates, it will be selected as global best\n # dom = self.dominance.compare(candidates[0].costs_signed, candidates[1].costs_signed)\n #\n # if dom == 1:\n # best_global = candidates[0]\n #\n # if dom == 2:\n # best_global = candidates[1]\n\n if candidates[1].features['crowding_distance'] > candidates[0].features['crowding_distance']:\n best_global = candidates[1]\n else:\n best_global = candidates[0]\n return best_global", "def select_leader(self):\n\n if self.leaders.size() == 1:\n return self.leaders.rand_choice()\n\n candidates = self.leaders.rand_sample(2)\n\n # randomly favourize one of them\n # best_global = choice(candidates)\n\n # should select those which has bigger fitness\n # # if one of them dominates, it will be selected as global best\n # dom = self.dominance.compare(candidates[0].costs_signed, candidates[1].costs_signed)\n #\n # if dom == 1:\n # best_global = candidates[0]\n #\n # if dom == 2:\n # best_global = candidates[1]\n\n if candidates[1].features['crowding_distance'] > candidates[0].features['crowding_distance']:\n best_global = candidates[1]\n else:\n best_global = candidates[0]\n return best_global", "def calculate_population_fitness(self):\n for individual in tqdm(self.current_generation):\n individual.fitness = self.fitness_function(\n individual.genes, self.seed_data)\n log.info(f'Current best validation accuracy: {max([x.fitness for x in self.current_generation])}')", "def best(self):\n alpha = -1\n beta = +1\n move = self.__negamax(alpha, beta, tt=DictTT())\n return move[1]", "def acceptance(current, candidate,fitness):\n cost_current = fitness(current)\n cost_candidate = fitness(candidate)\n if cost_current < cost_candidate: # Minimization\n return current\n else:\n return candidate", "def fitness(self):\n # TO BE DECIDED\n return 1", "def evaluate_fitness(self):\r\n fitness = 0.0\r\n # TO-DO: Write your fitness evaluation code here:\r\n \r\n if self.graph is not None:\r\n try:\r\n fitness = 1.0 / algorithms.sdr_widgerson(\r\n self.graph, self.values[0], self.values[1]\r\n )\r\n except RuntimeError:\r\n fitness = 1 / (2 ** 63)\r\n else:\r\n raise RuntimeError(\"Particle graph has not been set!\")\r\n \r\n # END TO-DO\r\n self.current_fitness = fitness\r\n \r\n # Check if we've got a better result\r\n if fitness > self.best_fitness:\r\n # Update the best performance accordingly\r\n self.best_fitness = fitness\r\n self.personal_best = self.values[:]\r\n self.best_coloring = copy.deepcopy(self.graph)\r\n \r\n self.sync = True", "def best_genome(self) -> Genome:\n return self._candidate", "def get_best(self):\n if len(self._table) == 0:\n self.log.warning(\"table is empty, cannot extract best value\")\n raise ValueError()\n\n max_prob = -np.inf\n max_assignment = None\n for assignment in self._table.keys():\n prob = self._table[assignment]\n if prob > max_prob:\n max_prob = prob\n max_assignment = assignment\n\n # TODO: check refactor > there is no case of max_assignment is None\n return max_assignment if max_assignment is not None else Assignment.create_default(self._head_vars)", "def best(self):\n if len(self) == 0:\n return None\n return max_elems(self, key=attr.getter(\"value\"), gt=self.solver.sense.is_better)[0]", "def get_best( self ):\n if len(self.listScore) < 1:\n if self.bMinimumIsBest: return 9999,\"Unknown\"\n else: return -1,\"Unknown\"\n return self.listScore[0]", "def get_best_value(self):\n # Todo: implement\n best_value_global = -inf\n for particle in self.particles:\n if particle.best_value >= best_value_global:\n best_value_global = particle.best_value\n return best_value_global # Remove this line", "def find_best_solution_and_score(self):\r\n best_score = MAXSIZE\r\n best_solution = self.simulation.solutions[0]\r\n for solution in self.simulation.solutions:\r\n score = self.simulation.fitting_function.fit_score(solution)\r\n if score < best_score:\r\n best_score = score\r\n best_solution = solution\r\n return best_solution, best_score", "def personal_best(scores):\n return max(scores)", "def best_feas_seq(self):\n is_better = self.solver.sense.is_better\n best = self.solver.sense.worst_value\n for sol in self:\n if sol.is_feasible and is_better(sol.value, best):\n best = sol.value\n yield sol", "def get_fitness(self):\n hard_conflicts = self.get_conflicts()\n soft_conflicts = self.get_soft_conflicts()\n hard_fitness = 1 / hard_conflicts if hard_conflicts != 0 else math.inf\n soft_fitness = 1 / soft_conflicts if soft_conflicts != 0 else math.inf\n return [hard_fitness, soft_fitness]", "def best_observer(population, num_generations, num_evaluations, args):\r\n print(\"Best Individual: {0}\\n\".format(str(max(population))))", "def search_best_goal_node(self):\n\n dist_to_goal_list = [self.calc_dist_to_goal(n.x, n.y) for n in self.node_list]\n goal_indexes = [\n dist_to_goal_list.index(i)\n for i in dist_to_goal_list\n if i <= self.expand_dis\n ]\n\n safe_goal_indexes = []\n for goal_index in goal_indexes:\n t_node = self.steer(self.node_list[goal_index], self.goal_node)\n if self.check_collision(t_node, self.obstacle_list):\n safe_goal_indexes.append(goal_index)\n\n if not safe_goal_indexes:\n return None\n\n min_cost = min([self.node_list[i].cost for i in safe_goal_indexes])\n for i in safe_goal_indexes:\n if self.node_list[i].cost == min_cost:\n return i\n\n return None", "def personal_best(scores):\n# return sorted(scores, reverse=True)[0]\n return max(scores)", "def compute_best_guess(self) -> str:\n entropy_all = self.compute_entropy_all()\n return entropy_all.idxmax()", "def _compute_best_value(self):\n asgt = self._neighbors_values.copy()\n best_cost, best_val = None, []\n\n for v in self._variable.domain:\n asgt[self.variable.name] = v\n c = self._compute_cost(**asgt)\n if (\n best_cost is None\n or (best_cost > c and self._mode == \"min\")\n or (best_cost < c and self._mode == \"max\")\n ):\n best_cost = c\n best_val = [v]\n elif best_cost == c:\n best_val.append(v)\n\n return best_val, best_cost", "def fitness(self,*val):\n if len(val): self._fitness = val[0]\n return self._fitness", "def search_loop(max_generations, individuals, grammar, replacement, selection, fitness_function):\n #Evaluate initial population\n evaluate_fitness(individuals, grammar, fitness_function)\n best_ever = max(individuals)\n individuals.sort(reverse=True)\n print_stats(1, individuals)\n for generation in range(2, (max_generations+1)):\n individuals, best_ever = step(\n individuals, grammar, replacement, selection, fitness_function, best_ever)\n print_stats(generation, individuals)\n return best_ever", "def fitness(individual, n_clusters=3, n_seeds=5):\n\n dataframe = common.scale_dataframe(individual)\n corr = abs(individual.dataframe.corr().iloc[0, 1])\n differences = []\n for seed in range(n_seeds):\n km = KMeans(n_clusters, random_state=seed).fit(dataframe)\n differences.append(silhouette_score(dataframe, km.labels_) - corr)\n\n best = max(differences)\n return best", "def get_bestparameter(self):\n if self._df_test is None:\n raise RuntimeError('get_bestparameter: please the '\n 'train model first')\n mean = self._df_test.mean(axis=1)\n if len(mean) == 1:\n result = mean.idxmax()\n elif len(mean) == 2:\n result = mean.loc[mean.index > 1].idxmax()\n else:\n result = mean.loc[mean.index > 2].idxmax()\n return result", "def get_bestparameter(self):\n if self._df_test is None:\n raise RuntimeError('get_bestparameter: please the '\n 'train model first')\n mean = self._df_test.mean(axis=1)\n if len(mean) == 1:\n result = mean.idxmax()\n elif len(mean) == 2:\n result = mean.loc[mean.index > 1].idxmax()\n else:\n result = mean.loc[mean.index > 2].idxmax()\n return result", "def best_node(self):\n nodes = self._all_nodes()\n sorted_nodes, _ = self.scorer.sort(nodes)\n return sorted_nodes[0]", "def pick_one(self):\n index = 0\n r = random.random()\n while r >= 0:\n r = r - self.normalised_fitness[index]\n index += 1\n index -= 1\n return self.population[index]", "def fitness(individual):\n different_pos = 0\n return different_pos", "def choose_best_neighbour_simple(self):\n\t\trejected = set([]) #list of prohibited indexes which are rejected because of tabu and energy\n\t\tnIndex = -1\n\t\twhile(True):\n\t\t\tnIndex = self._find_min_diff(rejected=rejected)\t\t#index of best neighbor\n\n\t\t\tif self.is_tabu(nIndex):\n\t\t\t\toutput(message=\"\\t Neuron is in tabu. Need to check the aspiration criteria\",isDebug=True)\n\t\t\t\tif self.aspiration_criteria_satisfied(nIndex):\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\trejected.add(nIndex)\n\t\t\telse:\n\t\t\t\tbreak\n\t\t# output(\"Neuron is found\",isDebug=True)\n\t\treturn nIndex", "def getBestOption(self):\n if len(self.Data) < 1:\n return None\n else:\n bestR = max(self.Data.items(), key=lambda x: x[1]['SPat'].I)\n return bestR[1]", "def best_genome(self):\n return self.best_genomes(1)[0]", "def find_leader(self):\r\n # Initialize the leader fitness as an arbitrarly bad value\r\n leaderFitness = -(2**63)\r\n \r\n for number in range(POPULATION_SIZE):\r\n if self.population[number].current_fitness > leaderFitness:\r\n leaderFitness = self.population[number].current_fitness\r\n self.leader = number", "def test_get_best_candidate(self):\n optimizer = \"RandomSearch\"\n name = \"test_init_experiment\"\n param_defs = {\n \"x\": MinMaxNumericParamDef(0, 1),\n \"name\": NominalParamDef([\"A\", \"B\", \"C\"])\n }\n minimization = True\n\n LAss = PrettyLabAssistant()\n LAss.init_experiment(name, optimizer, param_defs, minimization=minimization)\n cand_one = LAss.get_next_candidate(name)\n cand_one.result = 1\n LAss.update(name, cand_one)\n\n cand_two = LAss.get_next_candidate(name)\n cand_two.result = 0\n LAss.update(name, cand_two)\n\n assert_equal(cand_two, LAss.get_best_candidate(name))", "def get_best_particle(self):\n index = self.weights.argmax()\n return self.particles[index, :]", "def getAction(self, gameState):\n bestVal = -INF\n bestAction = None\n searchDepth = self.depth * gameState.getNumAgents()\n for action in gameState.getLegalActions(0):\n state = gameState.generateSuccessor(0, action)\n newVal = self.minimax(state, 1, searchDepth - 1)\n if newVal > bestVal:\n bestVal = newVal\n bestAction = action\n return bestAction", "def initPopulation(self):\n for i in range(0, self.popSize):\n individual = Individual(self.genSize, self.data)\n individual.computeFitness()\n self.population.append(individual)\n\n self.best = self.population[0].copy()\n for ind_i in self.population:\n if self.best.getFitness() > ind_i.getFitness():\n self.best = ind_i.copy()\n print (\"Best initial sol: \",self.best.getFitness())", "def __find_best(self):\n # First look for offensive moves\n for i in range(0, 3):\n col = self.__get_col(i)\n if len(col.get('empty')) == 1:\n if col.get(self.opponent_char) == 2:\n return col.get('empty')[0]\n for i in range(0, 3):\n row = self.__get_row(i)\n if len(row.get('empty')) == 1:\n if row.get(self.opponent_char) == 2:\n return row.get('empty')[0]\n for i in range(0, 2):\n diag = self.__get_diag(i)\n if len(diag.get('empty')) == 1:\n if diag.get(self.opponent_char) == 2:\n return diag.get('empty')[0]\n\n # Then check again looking for defensive moves\n for i in range(0, 3):\n col = self.__get_col(i)\n if len(col.get('empty')) == 1:\n if col.get(self.player_char) == 2:\n return col.get('empty')[0]\n for i in range(0, 3):\n row = self.__get_row(i)\n if len(row.get('empty')) == 1:\n if row.get(self.player_char) == 2:\n return row.get('empty')[0]\n for i in range(0, 2):\n diag = self.__get_diag(i)\n if len(diag.get('empty')) == 1:\n if diag.get(self.player_char) == 2:\n return diag.get('empty')[0]\n\n ##### CLEAN THIS METHOD UP LATER #####\n return None", "def selection(self):\n\n # sort the generation according to fitness.\n self.sortByFitness()\n # get the fitness sum.\n fitnessSum = 0\n for outfit in self.currentGeneration:\n fitnessSum += self.applyFitness(outfit)\n # generate a random number\n stop = random.uniform(0, 1)\n accumulated = 0\n offset = 0\n for outfit in self.currentGenerationSorted:\n fitness = self.applyFitness(outfit) + offset\n probability = fitness / fitnessSum\n accumulated += probability\n\n if stop <= accumulated:\n return outfit", "def best_fis(self,rng):\n return evolve_fis(self.clusters(rng),self.gen_fis,self.eval_fis)", "def _find_largest_candidate(self, reduced):\n nbr_counts = np.count_nonzero(reduced == 0, axis=0) # = [1, 1, 4, 2,...] where each value is the number of neighbours for the variant at that index.\n count_max = nbr_counts.max()\n if count_max == 0: # Indicates there are no available variants close enough\n return None, [] # to the remaining unassigned. Usually raises an error.\n max_inds = np.nonzero(nbr_counts == count_max)[0] # Array containing the indices of all variants with the max number of neighbours.\n if len(max_inds) == 1: # A single largest cluster\n best_center = max_inds[0]\n best_clstr = np.nonzero(reduced[:,best_center] == 0)[0]\n else: # A tie for largest cluster. Broken by smallest sum of full scores\n # This was tested with the below more accurate and true scoring function. Unfortunately it became hideously slow (clustered_inds and centre_inds were given as args):\n # clstr_inds = np.nonzero(reduced[:,max_ind] == 0)[0]\n # covered_inds = list(clustered_inds | set(clstr_inds))\n # centre_inds.append(max_ind)\n # score = np.sum(np.min(self.orig_dists[np.ix_(covered_inds,centre_inds)], axis=1))\n # centre_inds.pop()\n best_center, best_clstr, best_score = None, [], np.inf\n for max_ind in max_inds:\n clstr_inds = np.nonzero(reduced[:,max_ind] == 0)[0]\n score = np.sum(self.orig_dists[clstr_inds,max_ind])\n if score < best_score:\n best_center, best_clstr, best_score = max_ind, clstr_inds, score\n return best_center, best_clstr", "def _minimax_decision(gameState):\n # The built in `max()` function can be used as argmax!\n return max(gameState.get_legal_moves(),\n key=lambda m: min_value(gameState.forecast_move(m)))", "def get_best_nest(nest, newnest, fitness):\n\n\tfor i in range(nest.shape[0]):\n\t\tfnew=fobj(newnest[i,:])\n\t\tif fnew < fitness[i]:\n\t\t\tfitness[i]=fnew\n\t\t\tnest[i,:]=newnest[i,:]\n\n\tfmin = min(fitness)\n\tK = np.argmin(fitness)\n\tbest = nest[K,:]\n\treturn (fmin, best, nest, fitness)", "def initial_solution(self):\n cur_node = random.choice(self.nodes) # start from a random node\n solution = [cur_node]\n\n free_nodes = set(self.nodes)\n free_nodes.remove(cur_node)\n while free_nodes:\n next_node = min(free_nodes, key=lambda x: self.dist(cur_node, x)) # nearest neighbour\n free_nodes.remove(next_node)\n solution.append(next_node)\n cur_node = next_node\n\n cur_fit = self.fitness(solution)\n if cur_fit < self.best_fitness: # If best found so far, update best fitness\n self.best_fitness = cur_fit\n self.best_solution = solution\n self.fitness_list.append(cur_fit)\n return solution, cur_fit", "def getAction(self, gameState):\n bestVal = -INF\n bestAction = None\n searchDepth = self.depth * gameState.getNumAgents()\n for action in gameState.getLegalActions(0):\n state = gameState.generateSuccessor(0, action)\n newVal = self.expectimax(state, 1, searchDepth - 1)\n if newVal > bestVal:\n bestVal = newVal\n bestAction = action\n return bestAction", "def getlocalbestcoordinate(self):\n return self.localbest.coordinate", "def get_altruist_fitness(self):\n return self.altruist_fitness", "def findHeuristic(self, _, __):\n popSize = 100\n retain = 0.25\n random_select = 0.1\n mutate = 0.1\n\n popList = self.populationList(popSize)\n\n solved = False\n count = 0\n while not solved:\n # evolves current\n popList = (self.evolve(popList, retain, random_select, mutate))\n# print(popList) # for troubleshooting\n for i in popList:\n if (self.fitness(i) == 0):\n print(\"solution: \", i)\n solved = True\n break\n # if plateus at a local minima, then end after 50 generations\n if count >= 50:\n if (self.fitness(i) <= 10):\n print(\"solution: \", i)\n solved = True\n break\n if solved is True:\n break\n print(\"-----------------\")\n\n # will modify mutation, random_select and retain values to help leave a\n # local minima. More randomness the longer it takes up to specific points\n if count % 3 == 0:\n if mutate < 0.2:\n mutate += 0.01\n if random_select < 0.3:\n random_select += 0.01\n count += 1\n\n return exit(0)", "def judge(self):\n self.bounds = 0.0\n self.best = self.lives[0]\n for life in self.lives:\n life.score = self.matchFun(life)\n self.bounds += life.score\n if self.best.score < life.score:\n self.best = life", "def judge(self):\n self.bounds = 0.0\n self.best = self.lives[0]\n for life in self.lives:\n life.score = self.matchFun(life)\n self.bounds += life.score\n if self.best.score < life.score:\n self.best = life", "def get_fitness(self) -> float:\n return self.fitness", "def personal_best(scores: list) -> int:\n return max(scores)", "def default_fitness(maximise):\n if maximise:\n return -100000.0\n else:\n return 100000.0", "def calculate_best_way(self) -> int:\n node = self._find_lowest_cost_node()\n while node:\n cost = self.costs[node]\n neighbors = self.graph[node]\n for neighbor in neighbors.keys():\n node_cost = cost + neighbors[neighbor]\n if self.costs[neighbor] > node_cost:\n self.costs[neighbor] = node_cost\n self.parents[neighbor] = node\n self.closed_nodes.append(node)\n node = self._find_lowest_cost_node()\n\n return self.costs[\"fin\"]", "def get_n_best(self):\n pass", "def _calculate_fitness(self):\n pass", "def get_best_lower_bound(self):\n if not self.tours:\n raise Exception('No lower bound has been computed yet')\n best = max(self.lower_bounds,key=self.lower_bounds.get)\n print('The best lower bound is given by {} with score {}'.format(best,self.lower_bounds[best]))\n return self.lower_bounds[best]", "def get_overall_fitness(self):\n total_fitness = 0\n for chromosome_list in self.chromo_list:\n if chromosome_list:\n for chromosomes in chromosome_list:\n total_fitness += chromosomes.fitness\n\n return float(total_fitness/(self.number_chromosomes*\\\n float(self.best_fitness)))", "def get_objective(self):\n self.objective = 0\n for r in self.routes:\n r.update_route(self.vrpdata)\n self.objective += r.distance\n # all() returns True if all elements of the iterable are true\n self.solutionValid = (all([r.tourValid for r in self.routes]) and len(self.routes) <= self.vrpdata.MaxNumVeh)\n if self.solutionValid:\n return self.objective\n return -1", "def search(self, time_limit, initial_population, max_unchanged_it):\n population = []\n for e in initial_population:\n w = self.maze.walk(e)[1]\n population.append(w)\n end_time = time.time() + time_limit\n best = self.get_best(population)\n it = 0\n\n while time.time() < end_time:\n parents = self.select_parents(population)\n children = self.reproduce(parents)\n population = children\n current_best = best\n best = self.get_global_best(best, self.get_best(population))\n if current_best == best:\n it += 1\n if it >= max_unchanged_it:\n break\n else:\n it = 0\n\n return best", "def anneal(self):\n # Initialize with the greedy solution.\n self.cur_solution, self.cur_fitness = self.initial_solution()\n\n print(\"Starting annealing.\")\n while self.T >= self.stopping_temperature and self.iteration < self.stopping_iter:\n candidate = list(self.cur_solution)\n l = random.randint(2, self.N - 1)\n i = random.randint(0, self.N - l)\n candidate[i : (i + l)] = reversed(candidate[i : (i + l)])\n self.accept(candidate)\n self.T *= self.alpha\n self.iteration += 1\n\n self.fitness_list.append(self.cur_fitness)\n\n print(\"Best fitness obtained: \", self.best_fitness)\n improvement = 100 * (self.fitness_list[0] - self.best_fitness) / (self.fitness_list[0])\n print(f\"Improvement over greedy heuristic: {improvement : .5f}%\")\n return self.best_fitness", "def get_best_moves(self):\n return self.best_moves", "def _get_individual_at_extreme(self,\n generation: int,\n extreme_type: ExtremeType) -> 'Individual':\n top_error_individual = self.get_individual(generation, 0)\n for i in range(len(self.generations[generation])):\n individual = self.get_individual(generation, i)\n if (extreme_type == ExtremeType.LOW and\n individual.fitness < top_error_individual.fitness):\n top_error_individual = individual\n elif (extreme_type == ExtremeType.HIGH and\n individual.fitness > top_error_individual.fitness):\n top_error_individual = individual\n return top_error_individual", "def minimax_decision(gameState):\n value = -sys.maxsize\n best_value = -sys.maxsize\n best_move = None\n legal_moves = gameState.get_legal_moves()\n for move in legal_moves:\n game = gameState.forecast_move(move)\n value = max(value, min_value(game))\n if value > best_value:\n best_value = value\n best_move = move\n return best_move", "def find_solution(self):\r\n for solution in self.solutions:\r\n if self.fitting_function.is_legal_solution(solution):\r\n return solution\r\n return None", "def worst_feas_seq(self):\n is_better = self.solver.sense.is_better\n worst = self.solver.sense.best_value\n for sol in self:\n if sol.is_feasible and is_better(worst, sol.value):\n worst = sol.value\n yield sol", "def selectBestCoordinates(self):\r\n coordinatesQueue = []\r\n # It's highly likely that there are going to be a lot of coordinates with the same \"most\" weight. Rather\r\n # than always choosing the leftmost coordinates, make a random choice by adding a random tie breaker to the \r\n # priority. \r\n randomTieBreaker = [i for i in range(self.boardDimensions ** 2 )]\r\n random.shuffle(randomTieBreaker)\r\n for i in range(self.boardDimensions):\r\n for j in range(self.boardDimensions):\r\n if self.enemyBoard[i][j] > BoardState.OPEN:\r\n heapq.heappush(coordinatesQueue, (-self.enemyBoard[i][j], randomTieBreaker.pop(), Coordinates(i, j)))\r\n bestCoordinates = heapq.heappop(coordinatesQueue)[-1]\r\n self.enemyBoard[bestCoordinates.x][bestCoordinates.y] = BoardState.OPEN\r\n while len(coordinatesQueue) > 0:\r\n coordinates = heapq.heappop(coordinatesQueue)[-1]\r\n # Reset the weights on all coordinates under consideration so they'll be ready for another round of \r\n # weighting.\r\n self.enemyBoard[coordinates.x][coordinates.y] = BoardState.OPEN\r\n return bestCoordinates", "def minimum_value(self):\n return self._fitness[self._minidx]", "def search(self, is_max, possible_moves, state, depth, alpha, beta):\n temp_state = state.deepcopy()\n best_move = None\n best_move_val = float('-inf') if is_max else float('inf')\n \n for move in possible_moves:\n for to in move['to']:\n \n if time() > self.thinking_time:\n return best_move, best_move_val\n \n temp_state.board.move_pawn(move['from'], to)\n temp_state.next_turn()\n _, val = self.minimax(temp_state, not(is_max), depth+1, alpha, beta)\n \n temp_state.board.move_pawn(to, move['from'])\n temp_state.undo_turn()\n \n if is_max and val > best_move_val:\n alpha = max(val, alpha)\n best_move_val = val\n best_move = (move['from'], to)\n \n if not(is_max) and val < best_move_val:\n beta = min(val, beta)\n best_move_val = val\n best_move = (move['from'], to)\n \n if beta <= alpha: #pruning\n return best_move, best_move_val\n \n return best_move, best_move_val", "def _get_elite_individuals(self, elites):\n # 适应度在这里被attrgetter调用,会计算适应度并排序\n return sorted(self._population, key=attrgetter(\"fitness\"))[-elites:]", "def get_optimal_move(self):\n # create the root state\n root = State(self.current_board, True, self.__machine_token, self.__human_token)\n # alpha-beta-pruning algorithm\n best_move = max_value_a_b(root, depth(root), -1000, 1000)\n # obtain the direct children.\n direct_children = get_direct_children(root, all_states_generated)\n # obtain the coordinates of the movement.\n for direct_child in direct_children:\n if direct_child.value == best_move:\n return get_coordinates(root, direct_child)", "def choose_mi_best(self, R):\n population = np.empty([self.P.shape[0] + R.shape[0], 2*self.d + 1])\n i = 0\n for individual in np.vstack([self.P, R]):\n population[i, 0] = -self.J(individual[0:self.d], self.nCEC)\n population[i, 1:] = individual\n i = i+1\n\n sorted_population = population[np.argsort(population[:, 0])]\n\n return sorted_population[-self.mi:, 1:]", "def get_fitness(self):\n score = self.score - (abs(self.distance_to_pipe()[0])) * 0.3\n score = self.score + 1.5 * 1e-3 * self.birth_time - (abs(self.distance_to_pipe()[0])) * 0.001\n score = self.birth_time * 3 - \\\n (abs(Bird.lower_pipes[self.get_next_pipe_index()]['y'] - PIPE_GAP_SIZE // 2 - self.pos_y)) * 0.5\n pipe_width = IMAGES['pipe'][0].get_width()\n\n constants.debug_circle = Bird.lower_pipes[self.get_next_pipe_index()]['x'] + pipe_width // 1.2, \\\n Bird.lower_pipes[\n self.get_next_pipe_index()]['y'] - PIPE_GAP_SIZE // 10\n constants.debug_circle = tuple(map(int, constants.debug_circle))\n # score *= 1e-1\n # score = self.score\n # if self.score > 1:\n # print(score)\n\n score = self.birth_time * 3 - abs(constants.debug_circle[1] - self.pos_y) + 5 * self.score\n return score", "def find_bestParameter(self,currentEnergy):\n if currentEnergy==5.89:\n currentEnergy=6.4\n print(\"WARNING !!!!!!!! E=5.89 KeV ==>> uso best value trovato a 6.4 KeV !!!!!\")\n \n \n index_summary=1e6\n try:\n index_summary=np.where( np.logical_and ( self.energy<(float(currentEnergy)+0.05), self.energy >(float(currentEnergy)-0.05) ) )[0][0]\n print (\"readSummaryData: energia trovata! index = \",index_summary)\n except:\n print (\"readSummaryData: energia *NON* trovata nello scan ploarizzato\")\n\n \n bestPar=1e6 \n if ( index_summary<1000):\n bestPar=self.best_val[index_summary]\n \n return bestPar", "def find_bestParameter(self,currentEnergy):\n if currentEnergy==5.89:\n currentEnergy=6.4\n print(\"WARNING !!!!!!!! E=5.89 KeV ==>> uso best value trovato a 6.4 KeV !!!!!\")\n \n \n index_summary=1e6\n try:\n index_summary=np.where( np.logical_and ( self.energy<(float(currentEnergy)+0.05), self.energy >(float(currentEnergy)-0.05) ) )[0][0]\n print (\"readSummaryData: energia trovata! index = \",index_summary)\n except:\n print (\"readSummaryData: energia *NON* trovata nello scan ploarizzato\")\n\n \n bestPar=1e6 \n if ( index_summary<1000):\n bestPar=self.best_val[index_summary]\n \n return bestPar", "def search_for_maximum(self):\n return self.maximise_aquisition(self.expected_improvement)", "def SeleksiSurvivor(self, fitness):\n elite1, elite2 = 0, 0\n for i in range(1, len(fitness)):\n if fitness[i] > fitness[elite1]:\n elite2 = elite1\n elite1 = i\n return elite1, elite2" ]
[ "0.80649817", "0.75292766", "0.71652573", "0.6961003", "0.6884281", "0.6873114", "0.67495084", "0.66600174", "0.6649065", "0.6631332", "0.6609789", "0.6591238", "0.65880316", "0.6585515", "0.65848273", "0.65382606", "0.65047693", "0.64753777", "0.6447623", "0.6391002", "0.63350487", "0.63271767", "0.63256395", "0.62754226", "0.62754226", "0.626942", "0.6264517", "0.6262883", "0.62416655", "0.62351215", "0.62260985", "0.62075824", "0.62046564", "0.6204433", "0.61888057", "0.61851436", "0.61734027", "0.61509186", "0.61405665", "0.61152375", "0.61026394", "0.60957325", "0.6095505", "0.6081875", "0.6077503", "0.6060548", "0.6056321", "0.60345626", "0.60345626", "0.6033758", "0.60322666", "0.6023855", "0.6000109", "0.59950536", "0.5984142", "0.59676176", "0.5966838", "0.5958448", "0.59528065", "0.5947744", "0.5932163", "0.5930763", "0.5929103", "0.59194446", "0.59169066", "0.5914507", "0.5893419", "0.58892214", "0.58883464", "0.5884956", "0.5861826", "0.58549875", "0.58549875", "0.5837441", "0.5836319", "0.5827586", "0.5816993", "0.58165", "0.58064485", "0.57992", "0.57987595", "0.5796223", "0.5793444", "0.5787929", "0.5787383", "0.5775346", "0.5773505", "0.5771509", "0.5769027", "0.5765374", "0.5755657", "0.575257", "0.57492167", "0.5744966", "0.57419", "0.57330656", "0.57252586", "0.57252586", "0.571849", "0.5716129" ]
0.76329255
1
ctor must be without arguments, it is created with reflection at run time
def __init__(self): self.command_orchestrator = None # type: CommandOrchestrator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args, **kwargs):\n raise NotImplementedError", "def __init__(self, **kwargs):\n raise NotImplementedError", "def __init__(self, *args, **kwargs) -> None:\n pass", "def __init__(self, *args, **kwargs) -> None:\n pass", "def __init__(self, *args, **kwargs):\n raise NotImplementedError()", "def __init__(self, **kwds):\n raise NotImplementedError", "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "def initialize(cls):", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self, *args, **kwargs):\n pass", "def __init__(self, *args, **kwargs):\n pass", "def __init__(self, *args, **kwargs):\n pass", "def __init__(self, *args, **kwargs):\n pass", "def __init__(self, **kwargs):\n pass", "def __init__(self, **kwargs):\n pass", "def __init__(self, **kwargs):\n pass", "def __init__(self):\n raise NotImplementedError(\"This class cannot be instantiated!\")", "def __init__(self, *args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def __init__(self):\n raise NotImplementedError()", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, constructor_fn=None):", "def __init__(self, constructor_fn=None):", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(self,*args):\n pass", "def __init__(self, *args):\n pass", "def __init__(self):\n raise NoInitiation", "def __init__ (self):\n pass", "def __init__():", "def __init__(self) -> None:\n # TODO: Provide the complete constructor for this object", "def __init__(self, **kwargs):\n _declarative_constructor(self, **kwargs)", "def _init(self):\n raise NotImplementedError", "def init(self) -> None:", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass" ]
[ "0.79701906", "0.7918839", "0.78944093", "0.78944093", "0.7850591", "0.7834187", "0.7754874", "0.7754874", "0.7754874", "0.77097005", "0.7696675", "0.7696675", "0.7696675", "0.7696675", "0.7654578", "0.7654578", "0.7654578", "0.7654578", "0.76379013", "0.76379013", "0.76379013", "0.76312256", "0.7592629", "0.7583821", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7576652", "0.7557278", "0.7557278", "0.7548179", "0.7548179", "0.7548179", "0.7548179", "0.7548179", "0.7548179", "0.7548179", "0.7548179", "0.7548179", "0.7548179", "0.7524444", "0.75156814", "0.75146216", "0.74526495", "0.7428775", "0.7397548", "0.73657143", "0.7351235", "0.7303855", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905", "0.7285905" ]
0.0
-1
Powers off the remote vm
def PowerOn(self, context, ports): return self.command_orchestrator.power_on(context, ports)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def power_off_vm_hard(v_sphere, vmw_vm):\n\n task = vmw_vm.PowerOff()\n task_functions.wait_for_task_complete(v_sphere, task, timeout_seconds=60)", "def off(cls, client_object):\n vm_mor = client_object.get_api()\n return cls._do_power_action(vm_mor.PowerOffVM_Task())", "def halt(vm='',env=''):\n local( main_dir + '/vagrant/bin/vm.sh halt ' + str(vm) + ' ' + str(env) )", "def power_off(vmname):\n\n _conn.lookupByName(vmname).destroy() # cirros don't know shutdown command\n infokeeper.update_status_vm(vmname, Instance.STATUS_POWER_OFF)\n return 'VM %s powered off' % vmname", "def power_off(self):\n for vm in self.vms:\n try:\n vm.name = \"%s_%s\" % (self.resource_pool, vm.name)\n vm.power_off(manager=self.manager)\n except:\n self.logger.error(\"Error with VM '%s'\" % vm.name)\n raise", "def remote_kill():", "def down(**kwargs):\n call([\"vagrant\", \"suspend\"], env=env)\n return", "def power_off_vm_soft(v_sphere, vmw_vm):\n timeout = 240\n task = vmw_vm.ShutdownGuest()\n\n if task: # ShutdownGuest doesn't always return a task, which is rather annoying.\n task_functions.wait_for_task_complete(v_sphere, task, timeout_seconds=timeout)\n\n else:\n # Manually check the VM shut down as requested because VMWare didn't give us a Task.\n time.sleep(20)\n\n wait_count = 0\n refreshes = 0\n while vmw_vm.summary.runtime.powerState == VM_POWER_STATE_ON:\n time.sleep(5)\n v_sphere.logger.info(\"VM to shut down, current status {}\".format(vmw_vm.guest.toolsRunningStatus))\n wait_count += 1\n\n # If we waited 2 minutes, try to get a new Managed Object from the Service Instance\n if wait_count > 24:\n # If we've already done this several times times, give up\n if refreshes >= 5:\n msg = \" VM is still powered on and won't shut off! Help!\"\n v_sphere.logger.error(msg)\n raise VMWareTimeout(msg)\n\n v_sphere.logger.info(\n \"Waited 2 minutes for tools. Refreshing VM Object from Service Instance. Might be bugged.\")\n vmw_vm = v_sphere.get_vmw_obj_by_uuid(vmw_vm.config.uuid)\n refreshes += 1", "def suspend(vm='', env=''):\n local( main_dir + '/vagrant/bin/vm.sh suspend ' + str(vm) + ' ' + str(env) )", "def shutdownVM(self):\n\t\tlog.info(\"\\tStopping the container...\")\n#\t\texecuteCommandSSH(\"lxc-stop\" + \" -n \" + self.id)\n\t\texecuteCommandSSH(\"lxc-destroy\" + \" -n \" + self.id)\n\t\texecuteCommandSSH(\"shutdown -h now\")", "def _Uninstall(vm):\n vm.RemoteCommand('cd {0} && sudo make uninstall'.format(MPI_DIR))", "def restart_vm_hard(v_sphere, vmw_vm):\n task = vmw_vm.ResetVM_Task()\n task_functions.wait_for_task_complete(v_sphere, task, timeout_seconds=30)", "def power_off(self, sync=True):\n self.vmomi_object.PowerOff()\n if sync: self._wait_for_power_off()", "def undeploy_vm(context, vm):\n monitor = context.getMonitoringService().getVirtualMachineMonitor()\n print \"Uneploying virtual machine %s... This may take some time.\" \\\n % vm.getInternalName()\n vm.undeploy()\n monitor.awaitCompletionUndeploy(vm)\n return refresh_vm(context, vm)", "def do_power_down(self, *arg):\n print_info(\"Shutting down POCS instance, please wait\")\n self.pocs.power_down()\n\n while self.pocs.observatory.mount.is_parked is False:\n print_info('.')\n time.sleep(5)\n\n self.pocs = None", "def stop(self):\n self.log.info(\"Stopping servers\")\n if self.runner.job.yaml_params.is_nvme():\n self.kill()\n self.storage_reset()\n # Make sure the mount directory belongs to non-root user\n self.log.info(\"Changing ownership of mount to non-root user\")\n cmd = \"sudo chown -R {0}:{0} /mnt/daos*\".format(getpass.getuser())\n pcmd(self._hosts, cmd, False)\n else:\n try:\n self.runner.stop()\n except CommandFailure as error:\n raise ServerFailed(\"Failed to stop servers:{}\".format(error))", "def down(self, arguments):\n force = arguments['--force']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if not force and vmrun.installedTools():\n stopped = vmrun.stop()\n else:\n stopped = vmrun.stop(mode='hard')\n if stopped is None:\n puts_err(colored.red(\"Not stopped\", vmrun))\n else:\n puts_err(colored.green(\"Stopped\", vmrun))", "def shutdownVirtualMachine(self,node,vmid):\n post_data = None\n data = self.connect('post',\"nodes/%s/qemu/%s/status/shutdown\" % (node,vmid), post_data)\n return data", "def powerOff(self):\n self._sendCommand(self.SONY_CMD_ExtBackupCommunicator_ForcePowerOff, bufferSize=0)", "def destroy(vm, env=''):\n local( main_dir + '/vagrant/bin/vm.sh destroy ' + str(vm) + ' ' + str(env) )", "def shutdown_guest(self, vm):\n try:\n self.client.shutdown_guest(vm.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)", "def set_remote(self):\n self.write_to_serial(':SYST:REM')", "def reset(cls, client_object):\n vm_mor = client_object.get_api()\n return cls._do_power_action(vm_mor.ResetVM_Task())", "def stop_proxy(self):\n self.log_output('Stopping MITM proxy server')\n command = ''\n if self.remote is True:\n command = \"echo '{0}' | sudo killall {1}\".format(\n self.ssh_password, os.path.basename(self.python3_path))\n else:\n mitm_pids = self.pids()\n if mitm_pids:\n command = \"kill {0}\".format(' '.join(mitm_pids.split(\"\\n\")))\n self.run_command(command)", "def test_14_migrate_vm_live_restore_on_remote(self):\n global vm2\n self.virtual_machine_live_migration_2.restore(self.apiclient)\n self.virtual_machine_live_migration_2.getState(\n self.apiclient,\n \"Running\"\n )\n # Migrate the VM and its volumes\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n\n self.helper.destroy_vm(self.apiclient, self.virtual_machine_live_migration_2.id)", "def poweroff(*args, **kwargs):\n try:\n master.main_exit()\n except Exception:\n log.error(\"main_exit error\")\n with open('/tmp/reboot', 'w+') as f:\n log.info(\"Poweroff ...\")", "def umount_root_vm(self):\n print \"demontage de la partition root de %s\" % name_vm_dest\n self.exec_cmd(\"umount %s\" % self.rep_vhosts_vm)", "def off(\n id: int = typer.Argument(1),\n ip: str = typer.Option(..., \"--ip\", \"-i\", envvar=\"HUE_BRIDGE_IP\"),\n user: str = typer.Option(..., \"--user\", \"-u\", envvar=\"HUE_BRIDGE_USER\"),\n):\n light = Light(id, ip=ip, user=user)\n resp = asyncio.run(light.power_off())\n console.print(f\"[{ip}] Light {id} Off:\\n{json.dumps(resp, indent=2)}\")", "def down(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n images=\"all\",\n volumes=True,\n orphans=False,\n):\n command = f\"down --rmi {images}\"\n\n if volumes:\n command = f\"{command} --volumes\"\n\n if orphans:\n command = f\"{command} --remove-orphans\"\n\n run_command(context, user, remote, instance, stack, command)", "async def async_turn_off(self, **kwargs: Any) -> None:\n await self.coordinator.roku.remote(\"poweroff\")\n await self.coordinator.async_request_refresh()", "def power_off(self, ec2_session, ami_id):\n instance = self.aws_api.get_instance_by_id(ec2_session, ami_id)\n instance.stop()\n self.instance_waiter.wait(instance, self.instance_waiter.STOPPED)\n return True", "def resume(vm='', env=''):\n local( main_dir + '/vagrant/bin/vm.sh resume ' + str(vm) + ' ' + str(env) )", "def turn_off(self, **kwargs):\n self.vacuum.stop()\n self.vacuum.home()", "def suspend_virtual_machine(self, vm):\n try:\n self.client.suspend_vm(vm.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)", "def stopVirtualMachine(self,node,vmid):\n post_data = None\n data = self.connect('post',\"nodes/%s/qemu/%s/status/stop\" % (node,vmid), post_data)\n return data", "def reboot_guest(self, vm):\n try:\n self.client.reboot_guest(vm.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)", "def unset():\n rino.remote.unset()", "def teardown(self):\n self.logger.info('Tearing down file server vm')\n self.local_env.execute('uninstall', task_retries=40,\n task_retry_interval=30)", "def test_11_migrate_vm_live_attach_disk_on_remote(self):\n \n global vm2\n global data_disk_2\n data_disk_2 = self.helper.create_custom_disk(\n self.apiclient,\n {\"diskname\":\"StorPoolDisk\" },\n zoneid=self.zone.id,\n size = 5,\n miniops = 2000,\n maxiops = 5000,\n account=self.account.name,\n domainid=self.account.domainid,\n diskofferingid=self.disk_offerings.id,\n )\n\n self.debug(\"Created volume with ID: %s\" % data_disk_2.id)\n\n self.virtual_machine_live_migration_2.attach_volume(\n self.apiclient,\n data_disk_2\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n\n\n self.virtual_machine_live_migration_2.attach_volume(\n self.apiclient,\n self.volume_2\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)", "def do_unpark(self, *arg):\n try:\n self.pocs.observatory.mount.unpark()\n self.pocs.say(\"Unparking mount\")\n except Exception as e:\n print_warning('Problem unparking: {}'.format(e))", "def halt_cmd(ctx):\n pass", "def stop_vm(client, resource_group_name, vm_name, stop_mode):\n return client.stop(resource_group_name, vm_name, stop_mode)", "def stop(self):\n # remove all tap interfaces\n for i in range(self._vport_id):\n tapx = 'tap' + str(i)\n tap_cmd_list = ['sudo', 'ip', 'tuntap', 'del', tapx, 'mode', 'tap']\n # let's assume, that all VMs have NIC QUEUES enabled or disabled\n # at the same time\n if int(settings.getValue('GUEST_NIC_QUEUES')[0]):\n tap_cmd_list += ['multi_queue']\n tasks.run_task(tap_cmd_list, self._logger, 'Deleting ' + tapx, False)\n self._vport_id = 0\n\n # remove datapath before vswitch shutdown\n dpctl = DPCtl()\n dpctl.del_dp()\n\n super(OvsVanilla, self).stop()\n\n # give vswitch time to terminate before modules are removed\n time.sleep(5)\n self._module_manager.remove_modules()", "def power_off(self, instance, node=None):\n if not node:\n node = _get_baremetal_node_by_instance_uuid(instance['uuid'])\n pm = get_power_manager(node=node, instance=instance)\n pm.deactivate_node()\n if pm.state != baremetal_states.DELETED:\n raise exception.InstancePowerOffFailure(_(\n \"Baremetal power manager failed to stop node \"\n \"for instance %r\") % instance['uuid'])\n pm.stop_console()", "def stop_firewall():\r\n connections.execute_shell_command('sc stop TmPfw')", "def test_stop_machine(self, pretty_print, owner_api_token):\n machine = setup_data.get('stop_machine', {}).get(\n 'machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + \\\n '/api/v2/machines/{machine}/actions/stop'.format(machine=machine)\n request = MistRequests(\n api_token=owner_api_token,\n uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'stop_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(\n api_token=owner_api_token,\n uri=setup_data['amazon_machine_uri'],\n data={'state': 'stopped', 'actions': {'resize': True}},\n timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')", "def reset_virtual_machine(self, vm):\n try:\n self.client.reset_vm(vm.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)", "def test_remove_vm(self, instance_name):\n self.instances.pop(instance_name)", "def shutdown(cls, client_object):\n # TODO: This function has not been tested yet, as even with\n # VMware tools intalled on the guest, it throws an error saying\n # tools not found. VMware tools most probably needs to be\n # upgraded but upgrade was failing, so this function remains to\n # be tested.\n vm_mor = client_object.get_api()\n cls._do_power_action(vm_mor.ShutdownGuest())", "async def power_off(self):\n ...", "def unpause(self, oid):\n data = {\"unpause\": None}\n path = '/servers/%s/action' % oid\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Unpause openstack server: %s' % truncate(res))\n return res[0]", "def power_on_vm_and_wait_for_os(v_sphere, vmw_vm):\n\n v_sphere.logger.info(f\"Trying to power on {vmw_vm}\")\n\n task = vmw_vm.PowerOn()\n task_functions.wait_for_task_complete(v_sphere, task, timeout_seconds=60)\n\n wait_for_vmware_tools_response(v_sphere, vmw_vm)", "def sshtest():\n vbox = Vbox(env.vm_name)\n print vbox.ssh_up", "def interrupt(v):\n print(\" \" + bcolors.OKBLUE + \"[!] Detected CTRL+C ! restoring setting, please wait...\" + bcolors.ENDC)\n bash = \"ip link delete dummy type dummy\"\n os.system(bash)\n if v.spoof:\n restoreSpoof(v)\n if v.ntpStatus:\n ntpToggle(v)\n print(\" \" + bcolors.OKGREEN + \"Done\")\n print(\" --------------------------------------------------------\" + bcolors.ENDC)\n exit()", "def power_off(self):\n LOG.info('Powering off system')\n self._run_shutdown_command('poweroff')", "def power_on(self):\n for vm in self.vms:\n try:\n vm.name = \"%s_%s\" % (self.resource_pool, vm.name)\n vm.power_on(manager=self.manager)\n except:\n self.logger.error(\"Error with VM '%s'\" % vm.name)\n raise", "def stop_virtual_machine(self, vm):\n try:\n self.client.stop_vm(vm.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)", "def unpause(self, instance):\n self.power_on(\n context=None,\n instance=instance,\n network_info=None,\n block_device_info=None)", "def reboot_vm(self, account, vm_id):\n node = Node()\n node.id = vm_id\n self.driver(account).reboot_node(node)", "def resetVirtualMachine(self,node,vmid):\n post_data = None\n data = self.connect('post',\"nodes/%s/qemu/%s/status/reset\" % (node,vmid), post_data)\n return data", "def shutdown(self, sync=True):\n self.vmomi_object.ShutdownGuest()\n if sync: self._wait_for_power_off()", "def turn_off_motors():\n MOTOR_HAT.release_motors()", "def AptUninstall(vm):\n _Uninstall(vm)", "def power_off_soft(self, timeout_secs=None, async=False):\n self.cluster().power_off_nodes_soft([self],\n timeout_secs=timeout_secs,\n async=async)", "def tear_down(duthost1, duthost2, ptfhost, localhost, collect):\n yield\n\n mclag_interfaces = collect[duthost1.hostname]['mclag_interfaces']\n cmds_to_del_lags = ['ip link del {}'.format(lag) for lag in mclag_interfaces]\n ptfhost.shell_cmds(cmds=cmds_to_del_lags)\n\n ptfhost.remove_ip_addresses()\n duthost1.shell(\"mv {} {}\".format(CONFIG_DB_BACKUP, CONFIG_DB_TEMP))\n reboot(duthost1, localhost)\n\n duthost2.shell(\"mv {} {}\".format(CONFIG_DB_BACKUP, CONFIG_DB_TEMP))\n reboot(duthost2, localhost)", "def reset(self, sync=True):\n self.vmomi_object.ResetVM_Task()\n if sync: self._wait_for_power_on()", "def halt_cmd(cmd, cnt, args):\n log(\"halt\") # need an interrupt handler to do this\n cpu.halt()", "def on(cls, client_object):\n vm_mor = client_object.get_api()\n return cls._do_power_action(vm_mor.PowerOnVM_Task())", "def test_off_reboot_on(self):\n self.openstack('baremetal node power off {0}'\n .format(self.node['uuid']))\n show_prop = self.node_show(self.node['uuid'], ['power_state'])\n self.assertEqual('power off', show_prop['power_state'])\n\n self.openstack('baremetal node reboot {0}'.format(self.node['uuid']))\n show_prop = self.node_show(self.node['uuid'], ['power_state'])\n self.assertEqual('power on', show_prop['power_state'])", "def vm_power(self, vm_name, state):\n states = [\"on\", \"off\"]\n if state not in states:\n raise OpenStackConnectorException(f\"Incorrect action was provided for the vm {vm_name} power state change\")\n \n vm_id = self._get_vm_id_by_name(vm_name)\n\n if not vm_id:\n return False\n \n try:\n if state == \"on\":\n self.connection.compute.start_server(vm_id)\n else:\n self.connection.compute.stop_server(vm_id)\n except ConflictException: # This exception block handles the situation when the VM is already in the required power state\n pass\n \n return True", "def shutdown(pi):\n command = \"ssh {0} 'sudo shutdown -h now'\".format(pi)\n subprocess.Popen(command, shell=True)\n print(\"Shutting down {0}\".format(pi))", "def remote(self):\n logging.info(__name__ + ' : Set control to remote & locked')\n self.set_remote_status(1)", "def test_ssh_after_deleting_floating(self, prepare_openstack):\n ip = self.floating_ip[\"floating_ip_address\"]\n pkeys = self.convert_private_key_for_vm(\n [self.instance_keypair.private_key])\n\n float_ssh = self.env.get_ssh_to_vm(ip, private_keys=pkeys, timeout=5,\n **self.cirros_creds)\n assert float_ssh.check_connection() is True\n\n with float_ssh as vm_remote:\n vm_remote.check_call(\"ping -c1 8.8.8.8\")\n\n self.os_conn.disassociate_floating_ip(\n self.server, self.floating_ip, use_neutron=True)\n\n with pytest.raises(Exception) as e:\n wait(lambda: vm_remote.execute('uname') is None,\n timeout_seconds=60,\n waiting_for='ssh connection be stopped')\n assert e.typename != 'TimeoutExpired'\n\n # check that vm became inaccessible with ssh\n assert float_ssh.check_connection() is False", "def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')", "def guest_reboot(self, userid):\n LOG.info(\"Begin to reboot vm %s\", userid)\n self._smtclient.guest_reboot(userid)\n LOG.info(\"Complete reboot vm %s\", userid)", "def main():\n\n parser = cli.Parser()\n parser.add_required_arguments(cli.Argument.VM_NAME)\n args = parser.get_args()\n si = service_instance.connect(args)\n\n content = si.RetrieveContent()\n\n vm = get_vm(content, args.vm_name)\n vm_moid = vm._moId\n\n vcenter_data = content.setting\n vcenter_settings = vcenter_data.setting\n console_port = '7331'\n\n for item in vcenter_settings:\n key = getattr(item, 'key')\n if key == 'VirtualCenter.FQDN':\n vcenter_fqdn = getattr(item, 'value')\n\n session_manager = content.sessionManager\n session = session_manager.AcquireCloneTicket()\n\n vc_cert = ssl.get_server_certificate((args.host, int(args.port)))\n vc_pem = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,\n vc_cert)\n vc_fingerprint = vc_pem.digest('sha1')\n\n print(\"Open the following URL in your browser to access the \"\n \"Remote Console.\\n\"\n \"You have 60 seconds to open the URL, or the session\"\n \"will be terminated.\\n\")\n print(\"http://\" + args.host + \":\" + console_port + \"/console/?vmId=\"\n + str(vm_moid) + \"&vmName=\" + args.vm_name + \"&host=\" + vcenter_fqdn\n + \"&sessionTicket=\" + session + \"&thumbprint=\" + str(vc_fingerprint))\n print(\"Waiting for 60 seconds, then exit\")\n time.sleep(60)", "def shutdown(miner: Miner, login):\n connection = Ssh(miner.ipaddress, login.username, login.password, port=getportfromminer(miner))\n connection.open_shell()\n connection.send_shell('/sbin/poweroff')\n time.sleep(5)\n print_connection_data(connection)\n connection.close_connection()", "def _set_vm_instance_for_vmi(self, vmi_obj, instance_name):\n vm_refs = vmi_obj.get_virtual_machine_refs()\n delete_vm_list = []\n for vm_ref in vm_refs or []:\n if vm_ref['to'] != [instance_name]:\n delete_vm_list.append(vm_ref)\n\n if instance_name or delete_vm_list:\n vm_handler = VMachineHandler(self._vnc_lib)\n\n if instance_name:\n try:\n instance_obj = vm_handler.ensure_vm_instance(instance_name)\n vmi_obj.set_virtual_machine(instance_obj)\n except vnc_exc.RefsExistError as e:\n self._raise_contrail_exception(\n 'BadRequest', resource='port', msg=str(e))\n except vnc_exc.NoIdError:\n self._raise_contrail_exception(\n 'DeviceIDNotOwnedByTenant', resource='port',\n device_id=instance_name)\n else:\n vmi_obj.set_virtual_machine_list([])\n\n if delete_vm_list:\n self._vnc_lib.virtual_machine_interface_update(vmi_obj)\n for vm_ref in delete_vm_list:\n try:\n vm_handler._resource_delete(id=vm_ref['uuid'])\n except vnc_exc.RefsExistError:\n pass", "def unpunch(self):\n self.punching = 0", "def reboot(vmname):\n\n dom = _conn.lookupByName(vmname)\n # cirros also don't know reboot command\n dom.destroy()\n infokeeper.update_status_vm(vmname, Instance.STATUS_POWER_OFF)\n dom.create()\n infokeeper.update_status_vm(vmname, Instance.STATUS_POWER_ON)\n return 'VM %s rebooted' % vmname", "def stop_server(self):\n p = pexpect.spawn('/usr/bin/pkill', ['-f', self.wallet_bin_path])\n p.wait()\n if p.status is not 0:\n raise ValueError('Error pkilling ETH:\\n{}'.format(p.read()))", "def down():\n\n # Stop the program if no init has occurred.\n Vagrant.stop_if_not_init()\n\n # Run vagrant halt from the vagrant folder.\n command = [\"vagrant\", \"halt\"]\n cwd = Settings.devbox_folder\n try:\n result = subprocess.check_call(command, cwd=cwd)\n except subprocess.CalledProcessError:\n Utilities.log(\"Could not run 'vagrant halt'.\")\n exit(1)", "def reboot(pi):\n command = \"ssh {0} 'sudo reboot'\".format(pi)\n subprocess.Popen(command, shell=True)\n print(\"Rebooting {0}\".format(pi))", "def reload(vm='', env=''):\n local( main_dir + '/vagrant/bin/vm.sh reload ' + str(vm) + ' ' + str(env) )", "def YumUninstall(vm):\n _Uninstall(vm)", "async def unlight(self) -> None:\n self.lit = False\n await self.run_command(\"miner fault_light off\")\n print(\"unlight\" + self.ip)", "def vm_stop(self, params: dict) -> Tuple[\"Status\", dict]:", "def remove_vdisk_from_svc(svc, vdisk):\r\n svc_ssh = openSSH(svc, getpass.getuser())\r\n ## First we need to unmap from the host\r\n print \"Removing the mapping between %s on %s...\" % (vdisk[\"name\"],\r\n vdisk[\"hostlist\"][0])\r\n command = \"rmvdiskhostmap -host %s %s\" % (vdisk[\"hostlist\"][0],\r\n vdisk[\"name\"])\r\n print command\r\n output = svc_ssh.exec_command(command)[1].readlines()\r\n for line in output:\r\n print line.strip()\r\n ## Remove the volume\r\n print \"Removing the vdisk %s...\" % vdisk[\"name\"]\r\n command = \"rmvdisk %s\" % vdisk[\"name\"]\r\n print command\r\n output = svc_ssh.exec_command(command)[1].readlines()\r\n for line in output:\r\n print line.strip()\r\n svc_ssh.close()\r\n ## End remove_vdisk_from_svc\r", "def suicide(self):\n es.sexec(self.userid, \"kill\")", "def stop():\n _with_deploy_env(['./bin/paster serve src/remix/oerpub/rhaptoslabs/production.ini --stop-daemon'])", "async def poweroff(ctx):\n await ctx.send(\"Bye\")\n await bot.logout()", "def up(vm, env=''):\n local( main_dir + '/vagrant/bin/vm.sh up ' + str(vm) + ' ' + str(env) )", "def destroy_vm(self, name_of_vm):\n self.power_off(name_of_vm)\n # import pdb;pdb.name_of_vm()\n vm = self.get_dc_object([vim.VirtualMachine], name_of_vm)\n task = vm.Destroy_Task()\n WaitForTask(task)\n states = [vim.TaskInfo.State.success, vim.TaskInfo.State.error]\n while task.info.state not in states:\n time.sleep(1)\n status = task.info.state\n if status == \"success\":\n return status\n if status == \"error\":\n log.error(task.info.error.msg)\n log.info(task.info.error)\n return status", "def test_13_migrate_vm_live_resize_volume_on_remote(self):\n global vm2\n global data_disk_2\n\n vol = self.helper.resize_volume(apiclient = self.apiclient, volume = data_disk_1, shrinkOk = False, maxiops = 15000)\n\n # Migrate all volumes and VMs\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def reset(self):\n\t\treturn Job(SDK.PrlVm_Reset(self.handle)[0])", "def reboot(host=None):\r\n if host:\r\n host.reboot()", "def pause(self, instance):\n self.power_off(instance)", "def test_reboot_machine(self, pretty_print, owner_api_token):\n machine = setup_data.get('reboot_machine', {}).get(\n 'machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + \\\n '/api/v2/machines/{machine}/actions/reboot'.format(machine=machine)\n request = MistRequests(\n api_token=owner_api_token,\n uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'reboot_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(\n api_token=owner_api_token,\n uri=setup_data['amazon_machine_uri'],\n data={'state': 'running', 'actions': {'stop': True}},\n timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')", "def reboot(self, node):", "def clear_target(remote):\n cmd = mmapi.StoredCommands()\n cmd.AppendSceneCommand_ClearTarget();\n remote.runCommand(cmd)", "def vm_deprovision(self, params: dict) -> Tuple[\"Status\", dict]:" ]
[ "0.691207", "0.68590164", "0.68160033", "0.66162086", "0.65903586", "0.65885574", "0.6554674", "0.64404196", "0.6437955", "0.63067865", "0.6167935", "0.61371875", "0.61085886", "0.60364866", "0.5960529", "0.59569734", "0.5938743", "0.59068495", "0.5905675", "0.58882743", "0.5869915", "0.585963", "0.5845156", "0.5836634", "0.5833376", "0.5828058", "0.5827144", "0.5814575", "0.5810545", "0.57747066", "0.5763693", "0.57612437", "0.5741411", "0.5741075", "0.57357866", "0.57107675", "0.5694223", "0.56851923", "0.5681356", "0.56699157", "0.5660516", "0.56561005", "0.56518507", "0.56516886", "0.56414586", "0.56360155", "0.56336826", "0.5629175", "0.5620808", "0.55968827", "0.5589212", "0.55759084", "0.55756724", "0.55701745", "0.5567182", "0.55622613", "0.5559985", "0.5529601", "0.5529164", "0.5512206", "0.54946923", "0.5486269", "0.54861176", "0.547087", "0.54692817", "0.5462818", "0.5460213", "0.5453193", "0.5451258", "0.5446075", "0.5445309", "0.5438869", "0.54336804", "0.5433328", "0.5414242", "0.5411608", "0.5411365", "0.54079473", "0.5407408", "0.5403585", "0.5399983", "0.5397795", "0.5395411", "0.537802", "0.53720385", "0.5369414", "0.53564733", "0.5332775", "0.5326733", "0.53257096", "0.5323476", "0.53151774", "0.53118235", "0.5311721", "0.53098327", "0.53081256", "0.5304105", "0.52976745", "0.5293157", "0.52836597", "0.52834296" ]
0.0
-1
> Part One Given a CSV file, read the data into a nested list
def clean_file(csv_file): my_list = [] with open(csv_file, newline='') as csvfile: file_reader = csv.reader(csvfile, delimiter=',', quotechar=" ") for row in file_reader: my_list.append(row) """ > Part Two Input: Nested list csv_table and a string file_name Action: Write fields in csv_table into a comma-separated CSV file with the name file_name Mutates output: Yes """ with open(csv_file, 'w', newline='') as csvfile: my_csv_writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE) for row in my_list: row2 = [] for item in row: a = item.lstrip('"') b = a.rstrip('"') row2.append(b) my_csv_writer.writerow(row2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv(csv_file):\r\n with open(csv_file, \"r\") as files:\r\n data = csv.reader(files)\r\n return list(data)", "def read_from_csv(self, csv_file):\n data = []\n with codecs.open(csv_file, 'r', encoding='utf-8') as csvfile:\n header = None\n for i, line in enumerate(csvfile):\n line_split = [x.strip() for x in line.split(\"|\")]\n line_data = [x for x in line_split if len(x) > 0]\n if i == 0:\n header = line_data\n else:\n entry = {}\n for i,datum in enumerate(line_data):\n entry[header[i]] = datum\n data.append(entry)\n print \"Loaded %d entries from %s\" % (len(data), csv_file)\n return data", "def read_csv(csv_file_path):\n res = [] #list\n # f = open(csv_file_path) #read file\n with open(csv_file_path,\"r\") as f:", "def read_file_to_list(input_file):\n with open(input_file) as csvfile:\n csv_rows = csv.reader(csvfile)\n\n data = []\n for row in csv_rows:\n data.append(row)\n\n return data", "def parse_csv_file(file_path):\n\n complete_data_list = []\n\n try:\n import_file = open(file_path, \"rb\")\n\n except IOError:\n print 'An error occured trying to read the file.'\n\n else:\n reader_file = csv.DictReader(import_file)\n complete_data_list = get_file_data(reader_file)\n import_file.close()\n\n return complete_data_list", "def read_csvfile(inputfn):\n with open(inputfn, 'rU') as fd:\n datastruct = gen_csv_data(fd, returntype='list') # Make sure to store as list before closing file.\n return datastruct", "def read_csv():", "def read_from_csv(file):\n with open(file) as f:\n next(f)\n data = []\n for line in csv.reader(f, delimiter='\\t'):\n data.append(list(line))\n return data", "def load_data_from_csv(csv_file):\n list=[]\n\n with open(csv_file) as csv_1:\n csv_out = csv.reader(csv_1) \n next(csv_out)\n for rows in csv_out: \n if len(rows) != 0:\n list.append([rows[0],int(rows[1]),int(rows[2])])\n \n return (list)", "def __obtain_data_from_csv__(self, csvfile):\n data = csvfile.readlines()\n data = self.__parse_string_for_delimiter__(data)\n return data", "def read_entry_lists(floor_csv, par_df):\n # print(\"Info : my_lib/entry_list/read_entry_lists().\")\n par_id_list = par_df[\"ID\"].values.tolist()\n genre_code_list = par_df[\"GENRE_CODE\"].values.tolist()\n\n \"\"\"\n floor.csv\n ---------\n ID,X,Y,BLOCK\n 27,0,0,C\n 26,1,0,C\n 25,2,0,C\n \"\"\"\n tbl_df = pd.read_csv(floor_csv,\n sep=',', engine='python')\n tbl_id_list = tbl_df[\"ID\"].values.tolist()\n return tbl_id_list, par_id_list, genre_code_list", "def read_csv_to_list(csv_path):\n\n with open(csv_path, newline=\"\") as f:\n reader = csv.reader(f)\n data = list(reader)\n\n return data", "def loadCSV(input_file):", "def parse_csv_input(input_file): # {{{\n parsed_infile = []\n try:\n with open(input_file) as infile:\n for line in csv.reader(infile):\n parsed_infile.append(line)\n\n temp_object_storage = []\n\n for line_index, line in enumerate(parsed_infile[1:]):\n temp_object_storage.append({})\n for category_index, category in enumerate(parsed_infile[0]):\n if category_index == 0:\n category = category[3:]\n temp_object_storage[line_index][category] = line[category_index]\n\n return temp_object_storage\n except FileNotFoundError as excep:\n LOGGER.info(\"error parsing csv file: %s\", excep) # }}}", "def read_file():\r\n #with nos permite manejar el archivo dentro del bloque y despues cerrarlo\r\n with open('Entries.csv') as f:\r\n reader = csv.reader(f)\r\n data = list(reader)\r\n return data", "def get_data(self, csv_file):\n pass", "def parse_csv2list_upload(file_name):\n with open(file_name) as f:\n records = csv.reader(f)\n csv_list = [[j.strip() for j in record] for record in records]\n return csv_list", "def listparse(csvfilename):\r\n output = []\r\n with open(csvfilename, 'r', newline = '') as csvfile:\r\n csvreader = csv.reader(csvfile, skipinitialspace = True)\r\n for row in csvreader:\r\n output.append(row)\r\n return output", "def load_list(filename):\n # Open the file\n with open(filename, 'r', newline='') as f:\n # Use the CSV library to load the file\n reader = csv.reader(f)\n # Return the full list to the caller of the function. The 'list' in this line converts the 'reader' object to a list type\n # using a process called 'casting'. https://www.w3schools.com/python/python_casting.asp\n return(list(reader))\n #endwith", "def get_data_from_csv(csv_file):\n # create customer list form csv file\n with open(csv_file, encoding='utf-8', errors='ignore') as people:\n customer_reader = csv.reader(people)\n customers = [row for row in customer_reader]\n\n return customers", "def load_data_from_csv(csv_file, users_to_i = {}, items_to_i = {}):\n raw_data = []\n with open(csv_file) as f:\n csvreader = csv.reader(f)\n # skipping first row (header)\n next(csvreader)\n for user, item in csvreader:\n raw_data.append((user, item))\n return load_data_from_array(raw_data, users_to_i, items_to_i)", "def csvReader(file):\r\n reader = csv.reader(file)\r\n for element in reader:\r\n arr.append(element)", "def read(self):\r\n\r\n self.data = []\r\n\r\n with open(self.filename + \".csv\", mode='r') as csv_file:\r\n reader = csv.DictReader(csv_file)\r\n for row in reader:\r\n self.data.append(row)", "def read_csv_file(in_file):\n out_list = []\n with open(in_file, 'r', newline='') as p_file:\n file_list = csv.DictReader(p_file, delimiter=',')\n for row in file_list:\n out_list.append(row)\n return out_list\n #thequeue.put(out_list)", "def csvread(file):\r\n thisfile = open(file)\r\n thisreader = csv.reader(thisfile)\r\n filelist = np.array(list(thisreader))\r\n return filelist", "def parse(raw_file):\n parsed_data = []\n with open(raw_file, 'r') as r:\n rows = csv.reader(r)\n fields = rows.next()\n counter = 0\n for r in rows:\n parsed_data.append(dict(zip(fields, r)))\n\n return parsed_data", "def read_csv(file_path, delimiter=\",\", quotechar='\"'):\n # Opening file\n with open(file_path, newline='') as csvfile:\n # Will be used to store content\n lsts = []\n\n # Loading and reading csv\n csv_data = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar)\n\n # Adding data to container\n for row in csv_data:\n lsts.append(row)\n\n return lsts", "def open_csv(file):\n\n\ttmp = [] # initialise the list\n\twith open(file, 'r') as f:\n\t\treader = csv.reader(f)\n\t\tfor row in reader:\n\t\t\ttmp.append(row) # add row to list\n\n\treturn tmp", "def read_data(filename):\n print(\"Reading data from\", filename)\n with open(filename) as f:\n reader = csv.reader(f)\n data = []\n for row in reader:\n data.append(row)\n return data", "def read_csv_file(filename: str) -> any:\r\n with open(filename) as file:\r\n reader = csv.reader(file)\r\n next(reader)\r\n data = [process_row_p(row) for row in reader]\r\n\r\n return data", "def load_data(filepath):\n with open(filepath) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n yield row", "def csv_reader(self, file_obj):\n reader = csv.reader(file_obj)\n for row in reader:\n row_1 = (' '.join(row))\n self.data.append(row_1.split(';'))\n return self.data", "def read_data(self) -> List[BankCSVRecord]:\n try:\n with open(self.source, \"r\") as csv_source:\n row_records = []\n reader = DictReader(csv_source)\n for row in reader:\n transformed_data = {\n \"timestamp\": datetime.strptime(\n row[\"date_readable\"], \"%d %b %Y\"\n ),\n \"trans_type\": row[\"type\"],\n \"amount\": int(row[\"euro\"]) + int(row[\"cents\"]) / 100,\n \"from\": row[\"from\"],\n \"to\": row[\"to\"],\n }\n row_records.append(BankCSVRecord(**transformed_data))\n return row_records\n except FileNotFoundError as e:\n raise ImporterSourceError(message=f\"File {self.source} not found\")\n except KeyError as e:\n raise ImporterSourceFormatError(\n message=\"Source file data does not match format\"\n )\n except Exception as e:\n raise ImporterError(message=\"Import failed!\") from e", "def csvFileReader(filename):\n try:\n with open(filename) as csv_file:\n data = csv.reader(csv_file)\n imported = []\n for row in data:\n imported.append(row)\n list = [l[1] for l in imported]\n csv_file.closed\n return imported[1:]\n except:\n return None", "def load_csv(csv_path):\n with open(csv_path, newline='') as csv_file:\n csv_reader = csv.reader(csv_file)\n csv_list = [row for row in csv_reader]\n return csv_list", "def csv_list(file_name):\n listoflist = []\n with open('../test_files/' + file_name, 'r') as infile:\n for x in infile.readlines():\n x = x.replace('\\n','')\n #splitting based on ',' that are encountered in csv files.\n #splitted vale will be a list, that inturn is stored into another main list\n #making it list of lists or 2D array.\n listoflist.append(x.split(','))\n return listoflist", "def read_csv(filename):\n # Implement this function\n file = open(filename)\n wrapper = csv.reader(file)\n result = []\n for rpos in wrapper: \n result = result + [rpos]\n file.close() \n return result", "def _load_csv_data(kingdom_csv_path: str):\n\n file_path = os.getcwd() + \"/\" + RESOURCES_DIR_PATH + \"/\" + kingdom_csv_path\n\n kingdomArr = []\n\n with open(file_path, newline=\"\") as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n for row in reader:\n kingdomArr.append(Kingdom(row[0], row[1]))\n\n return kingdomArr", "def csvread(infile):\n out_csv = []\n errors = []\n index = -1\n p = LineParser(csv=True)\n for line in infile:\n index += 1\n try:\n values = p.feed(line)\n except ListQuoteError as e:\n values = []\n e.line = line\n e.index = index\n errors.append(e)\n #\n out_csv.append(values)\n #\n if errors:\n e = CSVError(\"Parsing CSV failed. See 'errors' attribute.\")\n e.csv = out_csv\n e.errors = errors\n raise e\n return out_csv", "def parse_csv(csv_path):\n song_list = []\n\n try:\n with open(csv_path, encoding='utf-8') as playlist:\n print(\"Parsing \" + csv_path)\n reader = csv.reader(playlist, delimiter=',')\n next(reader) # skip csv header\n for row in reader:\n song_list.append(row[2] + \" - \" + row[1])\n # todo: parse CSV, then check to see which songs already exist in current dir\n # move non-existent results to new list and return that\n except IndexError as error:\n # consider validating playlists when parsing\n # from API on web server instead\n print(str(error))\n \n return song_list", "def read_file():\r\n fp = open_file() \r\n csv_fp = csv.reader(fp) #Csv reader because splitting cannont be done on commas \r\n L = [] \r\n for line in csv_fp:\r\n data_lst = line\r\n race = data_lst[15]\r\n gender = data_lst[16]\r\n victim_info = data_lst[27]\r\n T = (race, gender, victim_info)\r\n L.append(T) #add the information to our list that began as empty\r\n \r\n return (L[1:]) #this is so that victim info is off by one.\r", "def read(filename: str)-> List [CrimeStatistics]:\n #return [] #stub\n # Template from htDAP\n \n #loc contains all results read so far\n loc = [] #type List[CrimeStatistics]\n \n with open(filename) as csvfile:\n reader = csv.reader(csvfile)\n next(reader)\n \n \n for row in reader:\n university = row[0].replace(\"4\", \"\")\n campus = parse_campus(row[1])\n enrollment = parse_int(row[2].replace(\",\", \"\"))\n violent_crimes = parse_int(row[3])\n property_crimes = parse_int(row[8])\n arson = parse_int(row[12])\n \n if valid(enrollment):\n cs = CrimeStatistics(university,\n campus,\n enrollment,\n violent_crimes,\n property_crimes,\n arson)\n \n loc.append(cs)\n return loc", "def import_csv(filename):\n with open(filename, newline='') as csvfile:\n reader_obj = csv.reader(csvfile, delimiter=',', quotechar='\"')\n data = list(reader_obj)\n return data", "def csv_reader(filepath):\n with open(filepath) as f: \n for row in f: \n row = row.strip()\n r = list()\n part = '' \n is_double_quoted = False\n\n for c in row: \n if c == ',': \n if is_double_quoted is False:\n r.append(part)\n part = ''\n else: \n part += c\n elif c == '\\\"': \n is_double_quoted = not is_double_quoted\n else: \n part += c\n if part != '': \n r.append(part)\n\n yield r", "def csv_parser(s):\r\n data = []\r\n lines = s.splitlines()\r\n lines = lines[1: ]\r\n for line in lines:\r\n l = line.strip().split(\",\")\r\n l[0] = int(l[0])\r\n l[1] = float(l[1])\r\n data.append(l)\r\n return data\r\n\r\n #data.pop[i]\r\n #file2 = s.split()\r\n #lines = []\r\n #lines.append(file2)\r\n #lines.pop[0]\r\n #print(lines)\r\n #for line in lines:\r\n \r\n\r\n # Data is our output. It will be a list of lists.\r\n\r\n # Split csv into lines and store them in a list called 'lines'.\r\n \r\n # Remove the first element from lines, so that you have only the data lines left.\r\n \r\n # At this stage, we loop through the list called lines.\r\n # As you loop\r\n # i. split each line on the commas;\r\n # ii. convert the Subject variable to int.\r\n # iii. convert the Height variable to float.\r\n # iv. add to data a list consisting of this line's Subject, Height, and Occupation values \r", "def read_csv(path):\r\n data = []\r\n csv_file = open(path)\r\n for row in csv.DictReader(csv_file):\r\n data.append(row)\r\n csv_file.close() \r\n return data", "def readRecordFromFile():\n\twith open(gbl.sourceFile, newline='') as csvfile:\n\t\trowReader = csv.reader(csvfile, delimiter=gbl.csvDiscriminator, quotechar=gbl.csvQuotechar)\n\t\tfor row in rowReader:\n\t\t\tROWData.append(row)", "def get_data():\n with open(os.path.join('data', 'demo_file.csv'), 'r') as fin:\n reader = csv.reader(fin)\n data = list(reader)\n\n return data", "def read_csv_file(input_filepath):\n\n out_list = []\n\n with open(input_filepath, 'r', encoding = 'utf-8') as f:\n reader = csv.reader(f)\n for i,row in enumerate(reader):\n if i == 0:\n labels = row\n else:\n new_dict = {}\n for j,value in enumerate(row):\n new_dict[labels[j]] = value\n out_list.append(new_dict)\n\n return out_list", "def walk_csv(self, filepath: str):\n with open(filepath, encoding='ISO-8859-1') as f:\n reader = csv.DictReader(f)\n for row in reader:\n logger.debug('Loading map {}'.format(row.get('id', None)))\n yield row", "def load_csv(filename):\n # Open csvfile\n with open(filename) as csvfile:\n reader = csv.DictReader(csvfile)\n\n # Put data in gloabal list\n for row in reader:\n # Get data of subject with either or both milk and peanut allergy\n if row[\"MILK_ALG_START\"] != \"NA\" or row[\"PEANUT_ALG_START\"] != \"NA\":\n sub_list = list()\n for key in DATA_KEYS:\n sub_list.append(row[key])\n\n # Add data of subject to all data \n data_list.append(sub_list)", "def read_data(self) -> List[BankCSVRecord]:\n try:\n with open(self.source, \"r\") as csv_source:\n row_records = []\n reader = DictReader(csv_source)\n for row in reader:\n transformed_data = {\n \"timestamp\": datetime.strptime(row[\"timestamp\"], \"%b %d %Y\"),\n \"trans_type\": row[\"type\"],\n \"amount\": row[\"amount\"],\n \"from\": row[\"from\"],\n \"to\": row[\"to\"],\n }\n row_records.append(BankCSVRecord(**transformed_data))\n return row_records\n except FileNotFoundError as e:\n raise ImporterSourceError(message=f\"File {self.source} not found\")\n except KeyError as e:\n raise ImporterSourceFormatError(\n message=\"Source file data does not match format\"\n )\n except Exception as e:\n raise ImporterError(message=\"Import failed!\") from e", "def read_data(self) -> List[BankCSVRecord]:\n try:\n with open(self.source, \"r\") as csv_source:\n row_records = []\n reader = DictReader(csv_source)\n for row in reader:\n transformed_data = {\n \"timestamp\": datetime.strptime(row[\"date\"], \"%d-%m-%Y\"),\n \"trans_type\": row[\"transaction\"],\n \"amount\": row[\"amounts\"],\n \"from\": row[\"from\"],\n \"to\": row[\"to\"],\n }\n row_records.append(BankCSVRecord(**transformed_data))\n return row_records\n except FileNotFoundError as e:\n raise ImporterSourceError(message=f\"File {self.source} not found\")\n except KeyError as e:\n raise ImporterSourceFormatError(\n message=\"Source file data does not match format\"\n )\n except Exception as e:\n raise ImporterError(message=\"Import failed!\") from e", "def read_csv_file(input_csv_file_path):\n with open(input_csv_file_path, 'r', encoding='utf-8') as file_path:\n csv_reader = csv.reader(file_path)\n for row in itertools.islice(csv_reader, 1, None):\n yield (\n int(row[0]), row[2],\n datetime.datetime.strftime(\n datetime.datetime.strptime(row[-1], '%m/%d/%y'),\n '%Y-%m-%d'))", "def mock_data_loader(csv_path):\n file_path = KINGDOM_CSV_PATH\n\n kingdomArr = []\n\n with open(file_path, newline=\"\") as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n for row in reader:\n kingdomArr.append(Kingdom(row[0], row[1]))\n\n return kingdomArr", "def read_list(filename):\r\n listoutput = []\r\n with open(filename) as file:\r\n entries = csv.reader(file)\r\n for item in entries:\r\n listoutput.append(item)\r\n return listoutput", "def _read_csv(input_file):\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f)\n lines = []\n for line in reader:\n lines.append(line)\n return lines[1:] # remove header", "def get_csv_data(file_name: str) -> Iterator[list]:\n with open(file_name) as f:\n # creating a reader instance that can iterate over rows of file.\n reader = DictReader(f)\n\n # iterating over rows:\n for row in reader:\n yield dict(row) # returning the dicts for each row in the dataset.", "def datareader(self, path):\n\n f = open(path, 'r')\n data = f.read()\n data = data.split('\\n')\n data_tmp = []\n for idx in range(len(data)):\n if str(data[idx]).find('@data') >= 0:\n data_tmp = data[idx + 1:]\n break\n res = []\n for record in data_tmp:\n record = record.split(',')\n record = map(float, record)\n res.append(record)\n return res", "def csv_parser(s):\n\n # Data is our output. It will be a list of lists.\n\n # Split csv into lines and store them in a list called 'lines'.\n \n # Remove the first element from lines, so that you have only the data lines left.\n \n # At this stage, we loop through the list called lines.\n # As you loop\n # i. split each line on the commas;\n # ii. convert the Subject variable to int.\n # iii. convert the Height variable to float.\n # iv. add to data a list consisting of this line's Subject, Height, and Occupation values ", "def parse_csvfile(self, csvfile):\n\n logging.info(\"Parseing csvfile: %s\" % basename(csvfile))\n fields = []\n data = {}\n try:\n with open(csvfile) as f:\n for line in f:\n line = line.strip()\n # Skip empty or commented line\n if not line or line[0] == \"#\":\n continue\n if not fields:\n # The first valid line defines fields.\n fields = [x.strip() for x in line.split(\",\")]\n for f in self.REQUIRED_FIELDS:\n if f not in fields:\n logging.error(\"Failed to find %s field. \"\n \"Aborted.\" % f)\n sys.exit(1)\n else:\n # The rest lines are data\n values = [x.strip() for x in line.split(\",\")]\n record = {}\n for k, v in zip(fields, values):\n record[k] = v\n # Convert date time string to epoch seconds\n record[\"time_h\"] = self.parse_timestr(record[\"time_h\"])\n node = record[\"name\"]\n if data.get(node, None):\n data[node].append(record)\n else:\n data[node] = [record]\n except Exception as e:\n logging.exception(\"Failed to parsing the csvfile. \"\n \"See stack trace below:\")\n sys.exit(1)\n\n # While it didn't occur often, I observed that data in CSV files\n # generated by cbtool monextrac command were not in time order.\n # So sort them.\n logging.debug(\"Sorting the data\")\n for node in data.keys():\n data[node].sort(lambda x, y: cmp(int(x[\"time\"]), int(y[\"time\"])))\n\n return data, fields", "def get_data_from_robot():\n file = ''\n if file:\n reader = csv.DictReader(open(file))\n data = []\n for row in reader:\n data.append([row])\n return data", "def import_data(address):\n try:\n inputcsv = csv.reader(open(address, \"r\"), delimiter=\";\", lineterminator=\"\\n\")\n except IOError:\n print \"File not exists or is unreadable, please check it.\"\n exit(1)\n\n data = list() # all data\n item = list() # each tabular\n count = 0\n subcount = 0\n try:\n for row in inputcsv:\n if count < 2 : # read Time period and number of product\n data.append(int(row[1]))\n else :\n item.append(row[1:])\n subcount +=1 \n if subcount == data[1]:\n data.append(np.array(item, dtype=float))\n item = list()\n subcount = 0\n count += 1\n if (data[1] > 1):\n data.append(np.array(item, dtype=float)) # manage the last tabular\n except:\n print \"File is not well formated, please correct it.\"\n exit(1)\n return data", "def parse(csvfilename):\n table = []\n with open(csvfilename, \"r\") as csvfile:\n for line in csvfile:\n line = line.rstrip()\n columns = line.split(',')\n table.append(columns)\n return table", "def _read_csv(self):\n with open(self._file_path, 'rb') as f:\n reader = csv.DictReader(f, delimiter=',')\n self._content = [row for row in reader]", "def read_csv_file(path):\n\n\twith open(path, encoding=\"utf-8\", newline=\"\") as msgfile:\n\t\tdata = reader(msgfile)\n\t\tnewdata = [[val for val in row] for row in data]\n\n\treturn newdata", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "def get_url_data(csv_file_path: str):\n with open(csv_file_path, \"r\", encoding=\"latin-1\") as url_records:\n for url_records in csv.reader(url_records):\n yield url_records", "def load_csv(csvpath):\n with open(csvpath, \"r\") as csvfile:\n data = []\n csvreader = csv.reader(csvfile, delimiter=\",\")\n\n # Skip the CSV Header\n next(csvreader)\n\n # Read the CSV data\n for row in csvreader:\n data.append(row)\n return data", "def read_csv_to_list(in_file, headless=True, delim='\\t'):\n ret_list=list()\n with open(in_file,'r') as csv_file:\n my_reader = csv.reader(csv_file, delimiter=delim) \n if headless:\n next(my_reader)\n for row in my_reader:\n ret_list.append(list(row))\n return(ret_list)", "def parse(file, delimiter):\n\n\t# Open CSV\n\topened_file = open(file)\n\n\t# Read CSV\n\tdata = csv.reader(opened_file, delimiter=delimiter)\n\n\t# Build data structure\n\tparsed_data = []\n\tfields = next(data)\n\tfor row in data:\n\t\tparsed_data.append(dict(zip(fields, row)))\n\n\t# Close CSV\n\topened_file.close()\n\n\treturn parsed_data", "def importAll():\n csvFile = openCsv()\n items = [] # chooseKey, count, grade, keyType, mainCategory, mainKey,\n # name, pricePerOne, subCategory, subKey, totalTradeCount,\n # mainLabel, subLabel, description\n\n with open(csvFile) as i:\n readItem = csv.reader(i)\n itemRow = next(readItem)\n for row in readItem:\n items.append(row)\n\n return items", "def read(self):\n with open(self.filename) as f:\n reader=csv.reader(f)\n for row in reader:\n self.data.appendleft(row)", "def read_data_from_csv(csv_file, header=None, **kwargs):\n if os.path.isabs(csv_file) == False:\n path_to_csv = os.path.join(csv_file)\n else:\n path_to_csv = csv_file\n row_list = []\n if \"field_sep\" not in kwargs.keys():\n field_sep = ','\n else:\n field_sep = kwargs.get(\"field_sep\")\n with open(path_to_csv, mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=field_sep, fieldnames=header)\n for record in csv_reader:\n if list(record.values())[0].startswith(\"#\") is not True:\n # IT'S A COMMENT IF IT STARTS WITH \"#\" \n # IF THIS IS YOUR HEADER ROW, SUPPLY A LIST OF COLUMN NAMES WHEN CALLING THE FUNCTION\n row_list.append(record)\n return row_list", "def parse(raw_file, delimiter):\n\t#open csv file\n\topened_file = open(raw_file)\n\t\n\t#read csv file\n\tcsv_data = csv.reader(opened_file,delimiter=delimiter)\n\t\n\t#build parsed data\n\tparsed_data = []\n\t\n\t#define headers\n\tfields = csv_data.next()\n\t\n\t#Iterate over each row of the csv file, zip together field->value pairs\n\tfor row in csv_data:\n\t\tparsed_data.append(dict(zip(fields, row)))\n\t\n\t#close csv file\n\topened_file.close()\n\t\n\treturn parsed_data", "def get_csv_data(file_path):\n reader = csv.DictReader(open(file_path), delimiter=\"\\t\")\n result = []\n for row in reader:\n result.append(row)\n\n return result", "def csv_file_to_list(filename, config=None):\n output_list = []\n with open(filename) as f:\n reader= csv.DictReader(f)\n for line in reader:\n output_list.append(line)\n return output_list", "def load_from_file_csv(cls):\n fields = []\n rows = []\n new_dict = {}\n new_list = []\n key = \"\"\n filename = cls.__name__ + \".csv\"\n with open(filename) as fp:\n reader = csv.reader(fp)\n fields = next(reader)\n for row in reader:\n rows.append(row)\n for row in rows:\n i = 0\n new_dict = new_dict.fromkeys(fields)\n for attr in fields:\n key = fields[i]\n value = row[i]\n new_dict[key] = value\n i += 1\n new_list.append(cls.create(**new_dict))\n return new_list", "def _enumerate_csv(self, csv_input):\n csv_file = open(csv_input, 'rb') \n csv_reader = csv.reader(csv_file)\n next(csv_reader, None)\n for row in reader:\n yield row", "def loadCSVFile (file, sep=\";\"):\n lst = lt.newList(\"ARRAY_LIST\") #Usando implementacion arraylist\n #lst = lt.newList() #Usando implementacion linkedlist\n print(\"Cargando archivo ....\")\n t1_start = process_time() #tiempo inicial\n dialect = csv.excel()\n dialect.delimiter=sep\n try:\n with open(file, encoding=\"utf-8\") as csvfile:\n spamreader = csv.DictReader(csvfile, dialect=dialect)\n for row in spamreader: \n lt.addLast(lst,row)\n except:\n print(\"Hubo un error con la carga del archivo\")\n t1_stop = process_time() #tiempo final\n print(\"Tiempo de ejecución \",t1_stop-t1_start,\" segundos\")\n return lst", "def load_csv(path: Path) -> Any:\n with open(path, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n items = list(reader)\n return items", "def walk_csv_data(**kwargs):\n for path, name in walk(**kwargs):\n if path.endswith('.csv'):\n with open(path, newline='') as f:\n text = f.read()\n reader = csv.DictReader(StringIO(text))\n try:\n fieldnames = reader.fieldnames\n rows = list(reader)\n yield (path, name, text, fieldnames, rows)\n except csv.Error:\n continue", "def read_file(filename):\n # iterate over the CSV\n data = []\n with open(filename, 'r') as data_file:\n i = 0\n reader = csv.reader(data_file, delimiter=\",\", quotechar=\"\\\"\")\n for row in reader:\n # skip header\n if i == 0:\n i += 1\n continue\n i += 1\n\n # grab just the name and adress\n name = row[5]\n address = row[9]\n # zip_code = row[8]\n # town = row[7]\n # apt = row[10]\n data.append([i, name, address]) # ,town,zip_code,apt])\n\n return data", "def read_file(filename):\n with open(filename) as fp:\n reader = csv.DictReader(fp)\n return list(reader)", "def read_partslist_csv(csv: str)->pd.DataFrame:\n try:\n p_df = pd.read_csv(csv, sep='\\t', header=0, engine='python', na_values='', skipfooter=3,\n dtype={'BLItemNo': str, 'BLColorId': int, 'LDrawColorId': int, 'Qty': int})\n p_df = p_df.fillna({'BLColorId': '', 'Qty': 0})\n p_df = p_df.rename(mapper={'BLItemNo': 'ItemId', 'BLColorId': 'Color'}, axis=1)\n p_df = p_df.drop(columns=['ElementId', 'LdrawId', 'LDrawColorId'])\n return p_df\n except FileNotFoundError as e:\n print(e)\n return pd.DataFrame()", "def listed_data(self, reader):\n result = []\n for line in reader:\n result.append(line)\n return result", "def extract_data_from_file(self, path):\n try:\n with open(path, 'r') as file:\n for row in file:\n row = row.replace('\\n','')\n row = row.split(',')\n yield row\n except EOFError as err:\n exit(err)", "def read_csv(self, filename: str):\n author_field_index = 0\n authors = []\n author_data = []\n with open(filename, 'r') as csvfile:\n csvreader = csv.reader(csvfile)\n fields = next(csvreader)\n for index, field in enumerate(fields):\n if field.lower() == \"author\":\n author_field_index = index\n break\n for row in csvreader:\n authors.append(row[author_field_index])\n for index, author in enumerate(authors):\n if author:\n data = self.get_author_info(author)\n author_data.append((author, data))\n if index % 10 == 0: # every 10 entries we write the data to json in case we get a crash\n self._reference.update_maps_jsons()\n self._reference.update_maps_jsons()\n return author_data", "def load_from_file_csv(cls):\n new_list = []\n try:\n with open(\"%s.csv\" % cls.__name__, mode='r') as f:\n file = cls.from_json_string(f.read())\n for i in file:\n new_list.append(cls.create(**i))\n except Exception:\n pass\n return new_list", "def gen_csv_data(inputfilehandle, returntype='generator'):\n # First do some sniffing (I expect input smmc file to have headers!)\n snif = csv.Sniffer()\n csvdialect = snif.sniff(inputfilehandle.read(4048)) # The read _must_ encompass a full first line.\n csvdialect.lineterminator = '\\n' # Ensure correct line terminator (\\r\\n is just silly...)\n inputfilehandle.seek(0) # Reset file\n # Then, extract dataset:\n setreader = csv.DictReader(inputfilehandle, dialect=csvdialect)\n # Import data\n # Note: Dataset is a list of dicts.\n if returntype == 'list':\n return [row for row in setreader if len(row)>0]\n elif returntype == 'csvreader':\n return setreader\n else:\n return (row for row in setreader if len(row)>0)", "def _read_csv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\",\", quotechar=quotechar))", "def _read_csv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\",\", quotechar=quotechar))", "def read_csv_file(self):\n pass", "def test_read_in_file(self):\r\n filename = \"CrimeDataSmall.csv\"\r\n\r\n lst = cds.read_in_file(filename)\r\n\r\n self.assertIsInstance(lst, list, \"Returned datatype should be a list\")\r\n self.assertEqual(len(lst), 4, \"There should be 4 rows returned from CrimeDataSmall 1 header and 3 data rows\")\r\n self.assertEqual(len(lst[0]), 23, \"Each row should have 23 columns\")\r\n self.assertEqual(lst[0][1], \"Reported_Date\", \"Column 1 was incorrect header\")\r\n self.assertEqual(lst[0][7], \"Offense\", \"Column 7 was incorrect header\")\r\n self.assertEqual(lst[0][13], \"Zip Code\", \"Column 13 header was incorrect\")\r\n self.assertEqual(lst[1][1], \"03/19/2019\", \"Column 1 was incorrect in first data row\")\r\n self.assertEqual(lst[1][7], \"Vehicular – Non-Injury\", \"Column 7 was incorrect in first data row\")\r\n self.assertEqual(lst[1][13], \"64161\", \"Column 13 in first data row was incorrect\")\r\n self.assertEqual(lst[3][1], \"03/27/2019\", \"Column 1 was incorrect in 3rd data row\")\r\n self.assertEqual(lst[3][7], \"Embezzlement\", \"Column 7 was incorrect 3rd data row\")\r\n self.assertEqual(lst[3][13], \"64112\", \"Column 13 3rd data row was incorrect\")\r\n self.assertEqual(lst[3][11], \"4600, S WORNALL RD\", \"Column 11 3rd data row was incorrect. Use csv module to read \")", "def read_csv(path):\r\n output = []\r\n for row in csv.DictReader(open(path)):\r\n output.append(row)\r\n return output", "def read_csv(path):\n csv_data =[]\n \n with open(path, 'r') as csv_file:\n csv_read = csv.reader(csv_file, dialect='excel')\n for row in csv_read:\n csv_data.append(row)\n\n return(csv_data)", "def parse(raw_file, delimiter):\n\n opened_file = open(raw_file, 'rU')\n csv_data = csv.reader(opened_file, delimiter=delimiter)\n\n parsed_data = []\n\n fields = csv_data.next()\n\n for row in csv_data:\n parsed_data.append(dict(zip(fields,row)))\n\n opened_file.close()\n\n return parsed_data", "def read_csv(file_name):\n final_list = []\n reader = csv.reader(open(file_name, 'rb'), delimiter=',')\n for x in reader:\n final_list.append(x)\n return final_list", "def parse_csv2list_hdfs(file_path):\n file_df = h2o.import_file(path=file_path, header=1, sep=',')\n file_df = file_df[:, :2].ascharacter()\n csv_list = h2o.as_list(file_df, use_pandas=False, header=True)\n csv_list = [[j.strip() for j in i] for i in csv_list]\n return csv_list", "def buildFromCSV(self, filepath):\r\n\t\t# TODO: Implement\r\n\t\traise NotImplementedError('This function has not yet been implemented.')\r\n\t\t# with open(filepath, 'r') as scheduleFile:\r\n\t\t# \t# Reusing Parser.parseCSVs(), but not in the intended way; ok because validation is not yet implemented\r\n\t\t# \t# TODO: Split Parser.parseCSVs() into separate people/set file parsers \r\n\t\t# \tn, people, setConstraints = Parser.parseCSVs(-1, scheduleFile, [])\r", "def read_csv(filename):\n with open(filename) as csv:\n return [csv_line.strip().split(',') for csv_line in csv]" ]
[ "0.7167241", "0.7161207", "0.70977736", "0.7063041", "0.7038156", "0.7010745", "0.70001847", "0.6948276", "0.69236016", "0.6910194", "0.68603677", "0.68457425", "0.68209416", "0.6799708", "0.67926854", "0.6789507", "0.67835903", "0.6757448", "0.67329955", "0.67210245", "0.67145526", "0.6664426", "0.66477334", "0.66401535", "0.66359967", "0.6619827", "0.66177106", "0.6599284", "0.6591604", "0.65901434", "0.65884995", "0.65769744", "0.65761745", "0.6573769", "0.65731376", "0.6561448", "0.6540017", "0.6533228", "0.65280336", "0.65174776", "0.65170383", "0.6516645", "0.65012354", "0.65008193", "0.6500661", "0.64925", "0.64923465", "0.6492049", "0.6489874", "0.64889175", "0.6488122", "0.64757", "0.64735734", "0.64724016", "0.6468706", "0.64664084", "0.6452546", "0.6434447", "0.6432265", "0.6430378", "0.6420446", "0.6420378", "0.64061326", "0.6405298", "0.64000833", "0.6399326", "0.6395048", "0.6390875", "0.63897985", "0.63860154", "0.6377083", "0.63770664", "0.63732624", "0.63655996", "0.63633233", "0.63621026", "0.63525176", "0.6346238", "0.6341323", "0.6340092", "0.6332201", "0.63266945", "0.63126254", "0.63071036", "0.63061184", "0.63026506", "0.6290297", "0.6289048", "0.6287053", "0.6286166", "0.6285852", "0.6285852", "0.6283849", "0.6282369", "0.62794346", "0.6272781", "0.62651604", "0.62620467", "0.6259587", "0.62563586", "0.6256342" ]
0.0
-1
THIS FUNCTION DOESN'T WORK PROPERLY DOESN'T WORK ABOVE A CERTAIN LIST SIZE HAVEN'T DEBUGGED TO FIND OUT WHY SINCE AN ANSWER IS GIVEN
def median_approx(values_list, numbins): # call median_bins function: really nice technique here mu, sigma, ignore_vals, bins = median_bins(values_list, numbins) bin_width = 2 * sigma / numbins # now get to midpoint of ... Adjust to include ignore_vals N = ignore_vals + np.sum(bins) midpoint = (N + 1)/2 count = ignore_vals for b, bincount in enumerate(bins): while count < midpoint: count += bincount # find midpoint of bin in countB median = (mu - sigma) + (b - 0.5) * bin_width return median
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_best_guess(self, lst):\n maxlen = 0\n pass\n #for elem in lst:", "def main(size, item, answer):\n item = list(map(int, item))\n item.sort()\n for number in item:\n if size - number >= 0:\n size -= number\n answer += 1\n print(answer)", "def solveProblem(list):\n return len(findSubArray(list))", "def test_dque_size(iterable, result, new_dque):\n for idx in range(len(iterable)):\n new_dque.append(iterable[idx])\n assert new_dque.size() == result", "def n50_counter(input_list):\n input_list.sort()\n half_tot = sum(input_list) / 2\n\n current_count = 0\n for num in input_list:\n current_count += num\n if current_count >= half_tot:\n return num", "def prioritize_candidates(lst_cand):\n print(f\"\\nprioritize_candidates(); len = {len(lst_cand)}\")\n if len(lst_cand) > 1:\n for n in range(len(lst_cand)):\n nc = list(lst_cand[n])\n nc.insert(0,0)\n lst_cand[n] = nc\n for cand in lst_cand:\n # some text adds p\n if cand[1].find(\"Okay\") > -1:\n cand[0] += 10\n if cand[1].lower().find(\"serie\") > -1:\n cand[0] += 10\n if cand[1].find(\"__NAM\") > -1:\n cand[0] += 10\n if cand[1].find(\"BIX_\") > -1:\n cand[0] += 10\n if cand[1].find(\"REF_\") > -1:\n cand[0] += 10\n if cand[1].find(\"veracrypt1\") > -1:\n cand[0] += 100\n if cand[1].find(\"veracrypt2\") > -1:\n cand[0] += -10\n # some text cost p\n if any([cand[1].find(f\"-{n}\") > -1 for n in range(9)]):\n cand[0] -= 5\n if cand[1].find(\"DEL\") > -1:\n cand[0] -= 100\n if cand[1].find(\"copy\") > -1:\n cand[0] -= 50\n if cand[1].find(\"output\") > -1:\n cand[0] -= 6\n if cand[1].find(\".part\") > -1:\n cand[0] -= 9\n # deeper path adds p\n cand[0] += cand[1].count(os.sep)\n # If still even, older is better\n lst_top = [cand for cand in sorted(lst_cand, reverse=True)]\n if lst_top[0][0] == lst_top[1][0]: # No winner\n if lst_top[0][2] < lst_top[1][2]: # head is oldest\n lst_top[0][0] += 1\n else:\n lst_top[1][0] += 1\n return lst_top\n else: # Too few to prioritize\n return lst_cand # return unchanged", "def brute_force_search_solution():\n return len(coin_search(TOTAL, COINS))", "def picksomequestions():\n answers = dict()\n for question in nlist:\n answers[question[0]] = question[1]\n if len(answers.keys()) > 50:\n break\n\n return answers", "def picksomequestions():\n answers = dict()\n for question in nlist:\n answers[question[0]] = question[1]\n if len(answers.keys()) > 50:\n break\n\n return answers", "def picksomequestions():\n answers = dict()\n for question in nlist:\n answers[question[0]] = question[1]\n if len(answers.keys()) > 50:\n break\n\n return answers", "def NewFeedbackSystem(guess, correct, semicorrect, lijst):\n\n global allcombos\n global usedcombos\n global all_right\n\n\n feedback = correct + semicorrect\n\n usedcombos.append(guess)\n\n if not allright: #needs an extra way to AT LEAST get the same feedback as previous one\n\n if feedback == 4: #takes all letters in the code and checks for possible new combinations, adds them to the list\n for j in range(1):\n A = guess[j]\n B = guess[j + 1]\n C = guess[j + 2]\n D = guess[j + 3]\n\n results = permutations(f\"{A}{B}{C}{D}\", 4)\n newcombos = resulttolist(results)\n newcombos = [item for item in newcombos if item not in usedcombos]\n\n all_right = True\n return AIguessing(newcombos)\n\n elif feedback == 3: #takes all letters in the code and checks for possible new combinations with >= 3 from previous code, adds them to the list\n results = permutations(guess, 3)\n newresult = resulttolist(results, feedback)\n\n return compareWithAll(newresult, lijst, feedback)\n\n elif feedback == 2:\n #takes all letters in the code and checks for possible new combinations with >= 2 from previous code, adds them to the list\n results = permutations(guess, 2)\n newresult = resulttolist(results, feedback)\n\n return compareWithAll(newresult, lijst, feedback)\n\n elif feedback == 1:\n #takes all letters in the code and checks for possible new combinations with >= 1 from previous code, adds them to the list\n results = combinations(guess, 1)\n newresult = list(dict.fromkeys(resulttolist(results)))\n\n\n return compareWithAll(newresult, lijst)\n\n else:\n #takes all letters in the code and checks for possible new combinations WITHOUT these letters, adds them to the list\n newletterlist = [item for item in letters if item not in guess] #creates a new list with letters that weren't used\n newletters = \"\".join(newletterlist)\n\n results = product(newletters, repeat=4)\n newcombos = resulttolist(results)\n newcombos = [item for item in newcombos if item not in usedcombos]\n\n return AIguessing(newcombos)\n\n else: #if all letters were guessed correctly\n\n if correct == 4:\n return (\"Well played Human, but I win this time\")\n\n elif correct == 2: #in a 2,2 case, checks which combinations are possible while keeping 2 on the same spot each time\n\n results = permutations(guess, 2)\n newresult = resulttolist(results, feedback)\n\n return compareWithAll(newresult, lijst, feedback)\n\n elif correct == 1: #in a 1,3 case, creates a list with still possible combinations (since there'll be only 8, it's hardcoded in here)\n for j in range(1):\n A = guess[j]\n B = guess[j + 1]\n C = guess[j + 2]\n D = guess[j + 3]\n\n newcombos = [f\"{A}{C}{D}{B}\", f\"{A}{D}{B}{C}\", f\"{C}{B}{D}{A}\", f\"{D}{B}{A}{C}\", f\"{B}{D}{C}{A}\", f\"{D}{A}{C}{B}\", f\"{B}{C}{A}{D}\", f\"{C}{A}{B}{D}\"]\n newcombos = [item for item in newcombos if item not in usedcombos]\n\n return AIguessing(newcombos)\n\n else:\n for j in range(1):\n A = guess[j]\n B = guess[j + 1]\n C = guess[j + 2]\n D = guess[j + 3]\n\n results = permutations(f\"{A}{B}{C}{D}\", 4)\n newcombos = resulttolist(results)\n newcombos = [item for item in newcombos if item not in usedcombos]\n\n return AIguessing(newcombos)", "def test_list_size_one_even(self):\n argument = [2]\n expect = 2\n actual = find_an_even(argument)\n self.assertEqual(expect, actual)", "def finding_helper(list_s, index, len_s):\n # base case\n if index == len_s:\n ans_maybe = ''.join(list_s)\n if ans_maybe in dict_list:\n print('Found:', ans_maybe)\n print('Searching...')\n ans.append(ans_maybe)\n return\n\n # recursion\n for i in range(index, len_s):\n if index != i:\n if not duplicate(list_s, index, i):\n # if length of word more than ten chars, add with the early stopper\n if len_s > 10:\n word = ''.join(list_s)\n if has_prefix(word[:2]):\n # choose(swap)\n (list_s[index], list_s[i]) = (list_s[i], list_s[index])\n # explore\n finding_helper(list_s, index+1, len_s)\n # un-choose\n (list_s[index], list_s[i]) = (list_s[i], list_s[index])\n else:\n # choose(swap)\n (list_s[index], list_s[i]) = (list_s[i], list_s[index])\n # explore\n finding_helper(list_s, index + 1, len_s)\n # un-choose\n (list_s[index], list_s[i]) = (list_s[i], list_s[index])\n else:\n finding_helper(list_s, index + 1, len_s)", "def test_candidates_list(self):\n pass", "def __size_restriction_correct_list_number(self):\n\n strTestName = 'List size higher than a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizH('parameter1', 3)\n\n RxCSObject.parameter1 = [1, 2, 3, 4, 5, 6]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def lessthan_5(num_list):", "def compareWithAll(lijst, previouslist, feedback = 0):\n\n global usedcombos\n\n results = []\n\n\n if feedback == 2: #to make sure there's a 2 letter combination with gaps\n for i in previouslist:\n for letter1, letter2 in lijst:\n if letter1 in i and letter2 in i:\n results.append(i)\n\n elif feedback == 3: #to make sure there's a 3 letter combination with gaps\n for i in previouslist:\n for letter1, letter2, letter3 in lijst:\n if letter1 in i and letter2 in i and letter3 in i:\n results.append(i)\n else:\n for i in previouslist:\n\n for j in range(len(lijst)):\n\n if lijst[j] in i:\n\n results.append(i)\n\n results = [item for item in results if item not in usedcombos]\n results = list(dict.fromkeys(results))\n\n print(f\"It seems I only {len(results)} options left!\")\n\n return AIguessing(results)", "def test_length_of_list_increases_after_few_single_val_insertion(small_list):\n assert len(small_list) == 4\n assert small_list.head.val == 4", "def helper(self,nums,start_idx) :\n if start_idx == None :\n return None, 0\n \n if self.solutions.get(start_idx) :\n return self.solutions[start_idx]\n \n \n if len(nums) - start_idx == 0 :\n return None, 0\n\n return_idx = None\n heist_total = None\n\n if len(nums) - start_idx == 1 :\n self.solutions[start_idx] = (start_idx,nums[start_idx])\n return_idx,heist_total = start_idx, nums[start_idx]\n elif len(nums) - start_idx == 2 :\n if nums[start_idx] > nums[start_idx + 1] :\n return_idx,heist_total = start_idx,nums[start_idx]\n else :\n return_idx,heist_total = start_idx+1,nums[start_idx+1] \n elif len(nums) - start_idx == 3 :\n if (nums[start_idx] + nums[start_idx+2]) > nums[start_idx + 1] :\n return_idx,heist_total = start_idx,(nums[start_idx]+nums[start_idx+2])\n else :\n return_idx,heist_total = (start_idx+1),nums[start_idx+1]\n else : # array is greater than size 3 \n r1 = self.helper(nums, start_idx +1)\n r2 = self.helper(nums, start_idx +2)\n r3 = self.helper(nums, start_idx +3)\n \n valid_cases = []\n if (r1[0] != None) and (r1[0] == start_idx +1) :\n valid_cases.append(r1)\n \n if (r2[0] != None) and (r2[0] == start_idx +2) :\n valid_cases.append((start_idx, nums[start_idx] + r2[1]))\n\n if (r3[0] != None) and (r3[0] == start_idx +3) :\n valid_cases.append((start_idx, nums[start_idx] + r3[1]))\n \n valid_cases.sort(key = lambda x : x[1],reverse = True)\n return_idx, heist_total = valid_cases[0][0], valid_cases[0][1]\n\n \n self.solutions[start_idx] = (return_idx,heist_total)\n return (return_idx, heist_total)", "def lab10_q5():\n return \"\"\"\n 'assert type(c) is int' to make sure c is a number/integer\n\tmake a helper function to solve this with the same parameters\n\tfirst base case is if the count is 0 which returns Link.empty if true\n\tThen is when lst is Link.empty, which should just Link.empty as well\n\trecursion for these base cases where you just link the first with the helper(rest, c) :Link(lst.first, helper(lst.rest, count))\n \"\"\"", "def ansJudge(ansList, ml_string, blanksList, i):\n\n ansInput = raw_input('Please fill in the first blank: ')\n i = 0\n\n while i < len(ansList) - 1:\n if ansInput == ansList[i]:\n print \"Great! \" + play_game(ml_string, blanksList, ansList, i)\n i += 1\n ansInput = raw_input('Please enter your answer for the next blank: ')\n else:\n print \"Nice try, but please try it again.\"\n ansInput = raw_input('Please enter your answer again: ')\n\n while i == len(ansList) - 1:\n if ansInput == ansList[i]:\n print \"Great! \" + play_game(ml_string, blanksList, ansList, i) + \"\\nYou have past this test!\"\n break\n else:\n print \"Nice try, but please try again.\"\n ansInput = raw_input('Please enter your answer again: ')", "def check_answer(chaine):\n l_chaine = list(chaine)\n if len(l_chaine) ==1:\n l_chaine.append(1)\n if l_chaine[0] in ['N', 'S', 'E', 'W', 'Q'] and len(l_chaine)<3:\n return l_chaine\n else:\n return None", "def picksomequestions():\n answers = dict()\n for question in nlist:\n answers[question[0]] = question[1]\n if len(answers.keys()) > 49:\n break\n\n return answers", "def ansJudgeRunes(ml_string, qusList, ansList, i):\n\n ansInput = raw_input('Please fill in the first blank: ')\n ml_stringList = ml_string.split(',')\n i = 0\n\n while i < len(ml_stringList) - 1:\n if ansInput == ml_stringList[i]:\n print\"Great! The meaning of \" + qusList[i] + \" is: \" + ansList[i]\n i += 1\n ansInput = raw_input('Please enter your answer for the next blank: ')\n else:\n print \"Nice try, but please try it again.\"\n ansInput = raw_input('Please enter your answer again: ')\n\n while i == len(ml_stringList) - 1:\n if ansInput == ml_stringList[i]:\n print\"Great! The meaning of \" + qusList[i] + \" is: \" + ansList[i] + \"\\nYou have past this test!\"\n break\n else:\n print \"Nice try, but please try again.\"\n ansInput = raw_input('Please enter your answer again: ')", "def findAlternatives(sortedList):\n #zeroing the data below treshold\n global TRESHOLD\n # if THRESHOLD == 0:\n TRESHOLD = readsHistogram(sortedList)\n afterTresholdData = []\n print(len(sortedList))\n for i in range(len(sortedList)):\n if np.mean(sortedList[i].getSamples()) >= TRESHOLD:\n afterTresholdData.append(sortedList[i]) #leaves only the reads only if the mean of the reads above TRESHOLD\n index = 0\n while index < (len(afterTresholdData) - 1):\n counter = 1\n while afterTresholdData[index].getName() == afterTresholdData[index + counter].getName():\n afterTresholdData[index].appendSamples(afterTresholdData[index + counter].getSamples())\n afterTresholdData[index].appendCoordinates(afterTresholdData[index + counter].getCoordinates())\n counter += 1\n index += counter\n alternatives = []\n for item in afterTresholdData:\n if len(item.getSamples().shape) > 1:\n alternatives.append(item)\n print(len(afterTresholdData), len(alternatives))\n return alternatives", "def __size_restriction_correct_list_list(self):\n\n strTestName = 'List size higher than the size of other list (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('lRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('lRefParameter1', list)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizEq('parameter1', 'lRefParameter1', mul=0.5)\n\n RxCSObject.lRefParameter1 = [21, 22, 23, 24, 25, 26]\n RxCSObject.parameter1 = [11, 12, 13]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def gene_finder(dna, threshold):\n finder = []\n twoStrands = find_all_ORFs_both_strands(dna) #this calls the function that finds the compliment of dna and finds all ORFs \n print twoStrands \n for k in range(len(twoStrands)): #go through the list \"twoStrands\"\n if twoStrands[k]>threshold: #if the length of \n print twoStrands[k]\n print len(twoStrands[k])\n finder.append(twoStrands[k])\n return finder", "def test_kth_from_end_exception_on_list_too_short(small_list):\n expected = 'exception'\n actual = small_list.kth_from_end(101)\n assert expected == actual", "def large_straight_points(dice_list):\n if straight_size(dice_list) >= 5 or check_yahtzee(dice_list):\n return 40\n else:\n return 0", "def brute_force_cow_transport(cows,limit=10):\n # TODO: Your code here\n #print(list(cows.items()))\n cows_list=list(cows.items())\n curr_list=[[[0]]]\n for i in range(1,len(cows_list)):\n smaller_fun(curr_list,i,limit,cows_list)\n\n ans =sorted(curr_list,key=lambda x:len(x))\n print(ans)\n ansfinal=[]\n for item in ans:\n trip=[]\n for i in range(len(item)):\n trip.append(cows_list[item[i]][0])\n ansfinal.append(trip)\n return ansfinal", "def small_straight_points(dice_list):\n if straight_size(dice_list) >= 4 or check_yahtzee(dice_list):\n return 30\n else:\n return 0", "def test_get_top_n_words_incorrect_numbers(self):\n expected = []\n actual = get_top_n_words({}, -1)\n self.assertEqual(expected, actual)\n actual = get_top_n_words({'happy': 2}, 0)\n self.assertEqual(expected, actual)", "def num_elements_eval_function_capped(individual, test_data, truth_data, name=None):\r\n return max(len(individual), 1707)", "def userSuggestions(database):\n firstname=str(input(\"who do you want to have follow suggestions for :\"))\n usr,find=getByName(database,firstname)\n if not find:\n print(\"the User could not be found\")\n return\n else:\n following=[]\n followers=[]\n for folower in usr.folowed:\n followers.append(folower)\n for folowed in usr.folow:\n following.append(folowed)\n results=[]\n print(\"On what do you want your suggestions to be based on?\\n1. Mutual Interests\\n2. Mutual Connections\\n3. Both\")\n choice=int(input(\"Your choice :\"))\n for key ,usrs in database.items():\n if key not in following: \n correspondant=0\n if choice == 1 or choice == 3:\n for interest in usr.interest:\n if interest in usrs.interest:\n correspondant+=1\n if choice == 2 or choice == 3:\n for folower in followers:\n for folows in usrs.folowed:\n if key == folows:\n correspondant+=1\n results.append([key,correspondant])\n for i in range(len(results)):\n for j in range(0, len(results)-i-1):\n if results[j][1] > results[j+1][1] :\n results[j], results[j+1] = results[j+1], results[j]\n for k in range(5):\n print(results[k][0])", "def truncate_ocr_sim_list(token, ocr_sims_list, limit=10):\n if len(ocr_sims_list) <= limit:\n return ocr_sims_list\n\n ocr_scores = set([sc for sim, sc in ocr_sims_list.items()])\n\n # Limit of 10 different scores allowed\n sorted_ocr_scores = sorted(ocr_scores, reverse=True)[:limit]\n ocr_list = []\n for score in sorted_ocr_scores:\n tmp_ocr_list = [ocr_sims for ocr_sims, ocr_score in ocr_sims_list.items() if ocr_score == score]\n\n if len(ocr_list) + len(tmp_ocr_list) > limit:\n list_len = limit - len(ocr_list)\n tmp_list = []\n\n while len(tmp_list) < list_len:\n tmp_list += select_lower_edit_distance(token, tmp_ocr_list)\n\n if len(ocr_list) + len(tmp_list) == limit: # Final list has exactly 10 elements\n ocr_list += tmp_list\n break\n else: # List has more than 10 arguments (need to chose only the n elements needed)\n alpha_tmp_list = []\n\n while len(alpha_tmp_list) != list_len:\n alpha_word = select_best_alphabetical_word(token, tmp_list)\n\n alpha_tmp_list.append(alpha_word)\n tmp_list = [tkn for tkn in tmp_list if tkn != alpha_word]\n\n ocr_list += alpha_tmp_list\n break\n elif len(ocr_list) + len(tmp_ocr_list) == limit:\n ocr_list += tmp_ocr_list\n break\n else: # len(ocr_list) + len(tmp_ocr_list) < limit\n ocr_list += tmp_ocr_list\n\n if len(ocr_list) != limit:\n raise IndexError(\"OCR list is still too big (\"+str(len(ocr_list))+\"/\"+str(limit)+\")\")\n\n return {tkn: ocr_sims_list[tkn] for tkn in ocr_list}", "def __size_restriction_correct_list_parameter(self):\n\n strTestName = 'List size equal to a parameter (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let us define a tuple\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizEq('parameter1', 'iRefParameter1')\n\n RxCSObject.iRefParameter1 = 3\n RxCSObject.parameter1 = [11, 12, 13]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def ansJudgeDada(ml_string, qusList, i):\n\n ansInput = raw_input('Please fill in the first blank: ')\n ml_stringList = ml_string.split(',')\n i = 0\n\n while i < len(ml_stringList) - 1:\n if ansInput == ml_stringList[i]:\n print\"Great! The charm \" + qusList[i] + \" is \" + ml_stringList[i] + '.'\n i += 1\n ansInput = raw_input('Please enter your answer for the next blank: ')\n else:\n print \"Nice try, but please try it again.\"\n ansInput = raw_input('Please enter your answer again: ')\n\n while i == len(ml_stringList) - 1:\n if ansInput == ml_stringList[i]:\n print\"Great! The charm \" + qusList[i] + \" is \" + ml_stringList[i] + '.' +\"\\nYou have past this test!\"\n break\n else:\n print \"Nice try, but please try again.\"\n ansInput = raw_input('Please enter your answer again: ')", "def test_case_7():\n N = 50\n\n x = 0\n y = 0\n strength = 10000\n population = [[0 for i in range(N)] for j in range(N)]\n final_population = simulate_infection(population, x, y, strength)\n\n assert sum([len([i for i in row if i == -1]) for row in final_population]) == N * N", "def approval_loan_algorithm(applications, N, K):\n approved_loan = []\n loan_disbursement = []\n loan_repayment = []\n while len(applications):\n print(len(applications), 'left!')\n print('*'*50) \n application = applications[0]\n intersected = list(intersected_applications(application, applications))\n intersected.sort(key=lambda application: (\n (application['repaid_amount'] - application['principal']) / application['principal']) * 100, reverse=True)\n applications = filter(lambda item: item['application_id'] not in list(\n map(lambda item: item['application_id'], intersected)), applications)\n for i_application in intersected:\n total_disburse = reduce(lambda a, b: (a[1]+b[1], a[1]+b[1]), filter(lambda item: datetime.strptime(\n i_application['repayments'][-1]['date'], DATE_FORMAT) >= datetime.strptime(item[0], DATE_FORMAT), loan_disbursement), (0, 0))\n total_repayments = reduce(lambda a, b: (a[1]+b[1], a[1]+b[1]), filter(lambda item: datetime.strptime(\n i_application['disbursement_date'], DATE_FORMAT) >= datetime.strptime(item[0], DATE_FORMAT), loan_repayment), (0, 0))\n if (N + total_repayments[0] - total_disburse[0]) > i_application['principal'] and not has_active_loan(i_application, approved_loan) and slot_avaialble(i_application, approved_loan, K):\n loan_disbursement.append(\n (i_application['disbursement_date'], i_application['principal']))\n for repayments in i_application['repayments']:\n loan_repayment.append(\n (repayments['date'], repayments['amount']))\n approved_loan.append(i_application)\n return approved_loan", "def big_selections(lst: List[int], n: int) -> List[List[int]]:\n if not lst:\n return [[]]\n else:\n holder = [lst.copy()]\n for i in range(len(lst)):\n l2 = lst.copy()\n l2.pop(i)\n for item in selections(l2):\n if item not in holder and sum(item) >= n:\n holder.append(item)\n return holder", "def length(somelist):\n if somelist == []:\n return 0\n else:\n return 1 + length(somelist[1:])", "def FilterScafDict(ScafDict):\n\n def CheckScafOrder(NestedListBoi, StrandInfo):\n \"\"\"The purpose of this nested function is to check if the size of the\n previous scaffold is less than the current. Returns True if this is the\n case, and false if this fails\n\n :arg1: [[0, 82558], [82568, 14200], [96783, 4436], [101349, 11648],\n [113468, 12600], [126901, 6375], [136697, 30162]]\n :returns: Boolean value TRUE of FALSE\n \"\"\"\n NoOverlap = True\n \n \n \n CurrentLen = 0\n if StrandInfo == '+':\n for item in NestedListBoi:\n AddItems = item[0] + item[1] \n if AddItems > CurrentLen:\n CurrentLen = AddItems\n else:\n print(\"WE ARE FUCKEDDDDDD\")\n NoOverlap = False\n\n elif StrandInfo == '-':\n #Flip list for negative\n NestedListBoi = NestedListBoi[::-1]\n for item in NestedListBoi:\n AddItems = item[0] + item[1] \n if AddItems > CurrentLen:\n CurrentLen = AddItems\n else:\n print(\"WE ARE FUCKEDDDDDD\")\n break\n sys.exit(2)\n NoOverlap = False\n return NoOverlap\n\n\n for key, value in ScafDict.items():\n StartPGASeq = int(value[0][0][2])\n EndPGaSeq = int(value[-1][0][2])\n \n TotalScaflen = int(value[0][1][5])\n LastLastScafLentoadd = int(value[-1][1][3])\n NegLastScafToAdd = int(value[0][1][3])\n\n\n TakeAllScafStartsAndLens = []\n\n for thing in value:\n StartAndLen = [int(thing[1][2]), int(thing[1][3])]\n TakeAllScafStartsAndLens.append(StartAndLen)\n \n #Check if there is any overlap with scaf hitting different PGA scaf\n TakeStrand = value[0][1][4]\n Overlap = CheckScafOrder(TakeAllScafStartsAndLens, TakeStrand)\n \n\n #Print List out with correct orientation\n if TakeStrand == '-':\n FinalPGSLoc = (EndPGaSeq)\n NegScafEnd = StartPGASeq + NegLastScafToAdd\n FinalListToPrint = [key,str(EndPGaSeq), str(NegScafEnd), str(TakeStrand)]\n print('\\t'.join(FinalListToPrint))\n\n elif TakeStrand == '+':\n FinalPGSLoc = (EndPGaSeq + LastLastScafLentoadd)\n FinalListToPrint = [key,str(StartPGASeq), str(FinalPGSLoc), str(TakeStrand)]\n print('\\t'.join(FinalListToPrint))\n\n #print(\"FINAL\")\n #print(key)\n #print(CurrentVal)\n #print(FinalItem[2][0:5])\n #input()", "def Tests(): \n\t# Test 1 \n\tS = [2,1,5,7]\n\tt = 4\n\tk = 2\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")\n\n\t# Test 2\n\tS = [2,1,5,7]\n\tt = 6\n\tk = 2\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")\n\n\n\t# Test 3\n\tS = [2,1,5,7]\n\tt = 6\n\tk = 3\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")\n\n\t# Test 4\n\tS = [3,2,7,1]\n\tt = 7\n\tk = 1\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")\n\n\t# Test 5\n\tS = [3,2,7,1]\n\tt = 4\n\tk = 3\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")\n\n\t# Test 6\n\tS = [3,2,7,1]\n\tt = 4\n\tk = 2\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")\n\n\t# Test 7\n\tS = [2,4,7,8,9]\n\tt = 11\n\tk = 3\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")\n\n\t# Test 8\n\tS = [2,4,7,8,9]\n\tt = 11\n\tk = 2\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")\n\n\n\t# Test 9\n\tS = [3,6,2,1]\n\tt = 3\n\tk = 2\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")\n\n\t# Test 10\n\tS = [3,6,2,1]\n\tt = 3\n\tk = 1\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")", "def relevance_feedback_exp(vec_docs, vec_queries, sim,gt,n=10):\n\n \n top_n_relevant_doc = []\n for i in range(sim.shape[1]):\n ranked_documents = np.argsort(-sim[:, i])\n top_n_relevant_doc.append(ranked_documents[:n])\n #print ('Query:', i+1, 'Top relevant n documents:', ranked_documents[:n] + 1)\n real_rel = []\n non_rel = []\n \n for i in range(len(top_n_relevant_doc)):\n \n query = i\n curr = top_n_relevant_doc[query]\n a = []\n b = []\n \n for j in range(len(gt)):\n #print('gt[j][0]',gt[j][0])\n #print('query number', query)\n if (gt[j][0] == query+1):\n \n \n if ( gt[j][1] not in list(curr)):\n a.append(gt[j][1])\n else:\n b.append(gt[j][1])\n \n real_rel.append(b)\n non_rel.append(a)\n\n #print(real_rel)\n #print(non_rel)\n \n alpha = 0.1\n beta = 1\n\n new_vec_queries = np.zeros([30,10625])\n \n for i in range(30):\n query = vec_queries.toarray()[i]\n rel_doc = real_rel[i]\n non_doc = non_rel[i]\n\n ##\n weight_up_rel = np.zeros([10625,])\n for j in rel_doc:\n weight_doc = vec_docs.toarray()[j-1]\n weight_up_rel += weight_doc\n \n weight_up_rel = alpha * weight_up_rel\n\n ##\n\n ##\n weight_up_non = np.zeros([10625,])\n for k in non_doc:\n doc_w = vec_docs.toarray()[k-1]\n weight_up_non += doc_w\n\n weight_up_non = beta * weight_up_non\n ##\n\n new_vec_queries[i] = query + weight_up_rel + weight_up_non\n\n new_vec_queries = sparse.csr_matrix(new_vec_queries)\n\n\n######## After Updating #########\n update_rank_doc = []\n for i in range(sim.shape[1]):\n ranked_documents = np.argsort(-sim[:, i])\n update_rank_doc.append(ranked_documents[:10])\n \n #print(update_rank_doc)\n up_rel = []\n up_non = []\n \n for i in range(len(update_rank_doc)):\n \n query = i\n curr = update_rank_doc[query]\n a = []\n b = []\n \n for j in range(len(gt)):\n #print('gt[j][0]',gt[j][0])\n #print('query number', query)\n if (gt[j][0] == query+1):\n \n \n if ( gt[j][1] not in list(curr)):\n a.append(gt[j][1])\n else:\n b.append(gt[j][1])\n \n up_rel.append(b)\n up_non.append(a)\n\n\n \n all_rel_doc_tfidf = []\n \n all_rel_doc_index = []\n \n \n for i in up_rel:\n \n doc_tfidf = []\n index = []\n \n for doc_num in i:\n \n ini_v_d = vec_docs.toarray()[doc_num-1]\n v_d = np.sort(ini_v_d)[::-1]\n \n for u in range(10):\n tf = v_d[u]\n ind = list(ini_v_d).index(tf)\n index.append(ind)\n doc_tfidf.append(v_d[u])\n\n all_rel_doc_tfidf.append(doc_tfidf)\n all_rel_doc_index.append(index)\n \n\n final_vec_queries = np.zeros([30,10625])\n \n for i in range(30):\n \n query = new_vec_queries.toarray()[i]\n tfidf = all_rel_doc_tfidf[i]\n index = all_rel_doc_index[i]\n\n \n for j in range(len(index)):\n query[index[j]] += tfidf[j]\n \n \n final_vec_queries[i] = query\n\n final_vec_queries = sparse.csr_matrix(final_vec_queries)\n \n \n \n sim = cosine_similarity(vec_docs, final_vec_queries) \n\n rf_sim = sim # change\n return rf_sim", "def yesQuestionHelper(letter, lst):\n i = 0\n while i < len(lst):\n if letter in lst[i]:\n i = i + 1\n else:\n return 0\n return 1", "def fn(arr, k):\n ans = []\n for i, x in enumerate(arr): \n while ans and ans[-1] < x and len(ans) + len(arr) - i > k: ans.pop()\n if len(ans) < k: ans.append(x)\n return ans", "def f(i):\n l = checkout_values[i]\n two_or_less_checkouts = 0 #No crashes possible for checkouts of length 1 or 2.\n three_throws = []\n for e in l:\n if len(e) < 3:\n two_or_less_checkouts += 1\n elif [e[1], e[0], e[2]] not in three_throws: #The only possible crash.\n three_throws.append(e)\n return len(three_throws) + two_or_less_checkouts", "def test_get_length(t_list):\n if not get_length(t_list) == 10:\n raise ValueError(\"Wrong number of transactions\")", "def test_known_common_stable_isotopes_len():\n\n assert len(common_isotopes()) == 288, (\n \"The length of the list returned by common_isotopes() is \"\n f\"{len(common_isotopes())}, which is not the expected value.\"\n )\n\n assert len(stable_isotopes()) == 254, (\n \"The length of the list returned by stable_isotopes() is \"\n f\"{len(stable_isotopes())}, which is not the expected value.\"\n )\n\n assert 3352 <= len(known_isotopes()) <= 3400, (\n \"The length of the list returned by known_isotopes() is \"\n f\"{len(known_isotopes())}, which is not within the expected range.\"\n )", "def fn(stack, x, k=0):\n if x == 0: return ans.append(stack.copy()) #store a copy \n for i in range(k, len(candidates)): \n if candidates[i] > x: break \n stack.append(candidates[i])\n fn(stack, x-candidates[i], i)\n stack.pop()", "def brute_force_cow_transport(cows,limit=10):\r\n # TODO: Your code here\r\n #print(cows)\r\n #trip=[]\r\n #import copy\r\n cowsNames=cows.keys()\r\n #print(cowsNames)\r\n cowNamesList=[]\r\n \r\n #for cowName in cowsNames:\r\n # if cows[cowName] <=limit:\r\n # cowNamesList.append(cowName)\r\n # print(cowNamesList)\r\n\r\n herd = sorted(cows.items(), key=lambda cows:cows[1], reverse=True) \r\n #print(herd)\r\n #limit = 10\r\n #weight = [v for x, v in cows.items()] \r\n #name = [x for x, v in cows.items()]\r\n #print('weight', weight)\r\n #print('name', name)\r\n #for i in weight:\r\n #print (i)\r\n # if sum(trip) <= limit: \r\n # trip.append(i)\r\n # print(trip)\r\n #trips=[]\r\n number_of_trips=len(cows)\r\n results=None\r\n limit=10\r\n #best_trips=len(cows) + 1\r\n for trips in get_partitions(herd): \r\n #print(trips) \r\n #flag = False\r\n #numberOfTrips = 0\r\n weights=[]\r\n for trip in trips:\r\n print(trip)\r\n weight=(sum([v for x, v in cows.items() if x in trip]))\r\n #print('weight',weight) \r\n weights.append(weight)\r\n #print('weights',weights)\r\n #print('max weight',max(weights))\r\n for w in weights:\r\n #print (w)\r\n if w <= limit: #and len(trips) <= number_of_trips:\r\n #print(limit) \r\n #print(len(trips))\r\n #number_of_trips=len(trips)\r\n #print(number_of_trips)\r\n results = trips\r\n #print(trips)\r\n return results \r\n #for cow in one_trip:\r\n #print('cow',cow)\r\n #trip_weight+=cow[1]\r\n #print('trip weight', trip_weight)\r\n #temp_results=[] \r\n #if trip_weight > limit: \r\n #print('name',cow[0])\r\n #flag = False \r\n #break\r\n #if flag and (len(trips) < best_trips):\r\n #best_trips = len(trips)\r\n # print(best_trips)\r\n #for trip in trips:\r\n #temp_results=[]\r\n #print(l)\r\n #for cow in trip:\r\n #temp_results = trips.append(cow[0]) \r\n #print(trips)\r\n #print(temp_results)\r\n #results.append(temp_results)\r\n #return results \r\n #print('trips',trips)\r\n #if len(i) < fewest_trips:\r\n\r\n #trips.append(i[0])\r\n\r\n\r\n # trips = len(i)\r\n # for j in i:\r\n # temp = []\r\n # for cow in i:\r\n # temp.append(i[0])\r\n # print(temp)\r\n #for k in j:\r\n # print(k)\r\n #result=[sum(z) for z in trip[1]]\r\n #print(result)\r\n #print('limit',limit)\r\n #for i in result:\r\n # if i <= limit:\r\n # trip.append(name)\r\n # print(trip)\r\n \r\n #print(alist)\r\n #for p in partition:\r\n # print(p) \r\n #if weight <= limit:\r\n #result = (brute_force_cow_transport(weight, limit))\r\n #print(True)\r\n \r\n \r\n #if j==[] or limit==0:\r\n # result = (0,())\r\n \r\n #elif j[1] > limit:\r\n #explore right branch only\r\n # result = brute_force_cow_transport(cows[1], limit) \r\n # else:\r\n #nextItem = cows\r\n #print(nextItem)\r\n #explore left branch\r", "def DealerLogic(hand):\r\n inithand = [0,0,0,0,0]\r\n temphand = [0,0,0,0,0]\r\n for j in range(5):\r\n inithand[j] = hand[j] #just numericalvalues of hand\r\n temphand[j] = hand[j]\r\n possiblecards = []\r\n basesuits = CountSuits(inithand)\r\n\r\n for i in range(5):\r\n for j in range(5):\r\n temphand[j] = inithand[j] #resetting for correct value\r\n temphand[i] = 0 #generic trump value for hand\r\n temphand = sorted(temphand) #putting in ascending order again\r\n temp = CountSuits(temphand)\r\n if temp[4] < basesuits[4]: #if by replacing that card, number of suits decreases \r\n possiblecards.append(i) #save index of card \r\n\r\n if len(possiblecards) == 0: #if can't decrease number of suits, tries to make as close to less suited\r\n if basesuits[4] == 1: #can't make less suited as all one suit already\r\n return max(inithand) #smallest card possible discarded\r\n elif basesuits[4] == 2: #two suited already (2 of 1 suit, 3 of other), can't make less suited\r\n discardsuit = basesuits.index(2) #finds suit that has 2\r\n else: #three suited, can't make less (1 trump, 2 of one, 2 of other)\r\n for i in range(len(OFFSUITS)):\r\n for j in range(len(OFFSUITS[i])):\r\n if OFFSUITS[i][j] in inithand:\r\n return OFFSUITS[i][j] #returning minimum offsuit card\r\n if discardsuit == 1: #discard ss\r\n return inithand[1] \r\n elif discardsuit == 2: #discard os1\r\n if basesuits[1] != 0: #other option is ss\r\n return inithand[4]\r\n else: #other option is os2\r\n return inithand[1]\r\n else: #discard os2\r\n return inithand[4]\r\n elif len(possiblecards) == 1: #if only one card makes less suited\r\n return inithand[possiblecards[0]]\r\n else: #multiple choices on proper discard, discard lowest card\r\n for i in range(len(OFFSUITS)):\r\n for j in range(len(OFFSUITS[i])):\r\n if OFFSUITS[i][j] in inithand:\r\n return OFFSUITS[i][j] #returning minimum offsuit card\r", "def get_QSL_candidates():\n print(\"❔Requesting target to find qsl candidates.\")\n for x in range(1500,1950):\n res = requests.get(target+ \"/PHP%0ABonusChallengeExploits.php?\"+(repeat_to_length(postfix, x-2) ), headers=http_header)\n if res.status_code == 502:\n print(\"\\t[Response] Status sode is {} for QSL {}. Meets criteria and added as candidate\".format(res.status_code, x))\n query_string_length.append(x)\n if len(query_string_length) == 0:\n print(\"❌ There has been no candidate found. Server not vulnerable or query string length did not meet requirements.\")\n sys.exit()\n # Post process qsl candidates to get all candidates\n qsl_candidate_extend = []\n for qsl in query_string_length:\n qsl_extend = [qsl-10,qsl-5,qsl]\n qsl_candidate_extend.extend(qsl_extend)\n qsl_list = list(set(qsl_candidate_extend))\n qsl_list.sort()\n print(\"\\tPost processed QSL candidate(s): {}\".format(\", \".join(str(x) for x in qsl_list)))\n return qsl_list", "def test(x_test: int, out_counter_test: int):\n for combo in permutations(input_list, x_test + 1):\n if len(set(map(str.lower, combo))) == len(combo):\n line = ''.join(combo)\n if int(args.min) <= len(line) <= int(args.max):\n print(line)\n out_counter_test += 1\n if out_counter_test >= int(args.test):\n return out_counter_test\n if args.append is not None:\n print(line + args.append)\n out_counter_test += 1\n if out_counter_test >= int(args.test):\n return out_counter_test\n if args.prepend is not None:\n print(args.prepend + line)\n out_counter_test += 1\n if out_counter_test >= int(args.test):\n return out_counter_test\n if args.leet is True:\n for old, new in leet_replacements:\n line = line.replace(old, new)\n print(line)\n out_counter_test += 1\n if out_counter_test >= int(args.test):\n return out_counter_test\n if args.append is not None:\n print(line + args.append)\n out_counter_test += 1\n if out_counter_test >= int(args.test):\n return out_counter_test\n if args.prepend is not None:\n print(args.prepend + line)\n out_counter_test += 1\n if out_counter_test >= int(args.test):\n return out_counter_test\n return out_counter_test", "def count(pred, l):\n temp_number = 0\n if l == []:\n return 0\n \n while len(l) > 0:\n if pred(l[0]) == True:\n temp_number = temp_number + 1\n #print(l[0], 'it is true')\n l = l[1:]\n \n \n else:\n #print(l[0], 'it is not true')\n l = l[1:]\n \n output = temp_number\n output = output + count(pred, l)\n return output", "def number_of_valid_pass_puzzle_1(input_list: list):\n num_of_valid = 0\n for item in input_list:\n data = split_data(item)\n if check_for_validity_puzzle_1(*data):\n num_of_valid += 1\n return num_of_valid", "def __size_restriction_incorrect_list_list(self):\n\n strTestName = 'List size higher or equal to the size of other list (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('lRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('lRefParameter1', list)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List 1D parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizHE('parameter1', 'lRefParameter1', mul=0.5)\n\n RxCSObject.lRefParameter1 = [21, 22, 23, 24, 25, 26]\n RxCSObject.parameter1 = [11, 12]\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)", "def smart_answer(content, qwords):\n\n zipped = zip(qwords, qwords[1:])\n points = 0\n for element in zipped:\n if content.count(element[0] + \" \" + element[1]) != 0:\n points += 1000\n print(points)\n return points", "def __size_restriction_incorrect_list_number(self):\n\n strTestName = 'List size lower or equal to a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizLE('parameter1', 3)\n\n RxCSObject.parameter1 = [1, 2, 3, 4, 5, 6]\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)", "def make_list():\n user_input = [0, 0, 0] # initialized a list\n\n for x in range(0, 3):\n user_input[x] = get_input()\n\n index = search_list(user_input) - 1 # subtracting one for index sake\n if index != -1:\n print(\"You've chosen index #\", (index + 1), \", which is \", user_input[index], \"on the list\")\n\n return sort_list(user_input)", "def buyEssentials(self, amount=5): \n ls = self.location.getEssentialList()\n print(f\"{bcolors.WHITE}\\nGreat job so far! Now, it's time to arm yourself with some essentials you would need to survive in the {self.location.worldType}.{bcolors.ENDC}\")\n print(f\"\\n{bcolors.CYAN}Following are the essential items for {self.location.worldName}. Please choose a minimum of 3 items to proceed.{bcolors.ENDC}\")\n outputList = [str(i+1) + \". \" + ls[i] + \"\\n\" for i in range(len(ls))]\n print(f\"\\n{bcolors.CYAN}{''.join(outputList)}{bcolors.ENDC}\")\n sizeEssentialList = len(ls)\n essentialsList = []\n\n \n choiceInput = False\n while choiceInput is False:\n choices = input(f\"{bcolors.CYAN}Input your selection as numbers 1, 2, 3, 4, or 5 separated by comma: {bcolors.ENDC}\")\n choiceInput = True\n choices = choices.split(',')\n for choice in choices:\n if choice not in ('1', '2', '3', '4', '5', 'quit', 'QUIT', 'Quit'):\n print(f\"\\n{bcolors.PINK}Please enter a valid Input{bcolors.ENDC}\\n\")\n choiceInput = False\n break\n \n for choice in choices:\n if choice.capitalize() == \"Quit\":\n # User input \"Quit\" at this stage. So, just quit the game.\n return choices\n \n\n try:\n # Convert input to integer for index in essentialList item\n choices = [int(i) for i in choices]\n except ValueError:\n # If input is not a number, Quit gracefully!\n print(\"Input is not a number. Quit\")\n return essentialsList\n\n if max(choices) > sizeEssentialList:\n print(f\"Invalid input! Input is not in essentialList\")\n return essentialsList\n\n for j in choices:\n if self.spendCoin(amount):\n essentialsList.append(ls[j-1])\n else:\n print(f\"You don't have enough money to buy {j}. You only have {self.coins} coins left.\")\n break\n self.assets = essentialsList\n print(f\"\\n{bcolors.WHITE}Thank you for buying the essentials. Now you are officially ready to enter into the {self.location.worldType}.\\nHere is your current asset bag with essential items and the available coins.{bcolors.ENDC}\")\n print(f\"\\n{bcolors.YELLOW}Asset Bag Contents: {self.assets}\\nCoins: {self.coins}{bcolors.ENDC}\")\n\n return self.assets", "def test_get_top_n_words_bad_inputs(self):\n bad_inputs = ['string', (), None, 9, 9.34, True, [None], []]\n expected = []\n for bad_input in bad_inputs:\n actual = get_top_n_words(bad_input, 2)\n self.assertEqual(expected, actual)", "def get_words(wordlist, minimum, count):\n # Create the empty list object for the sublist\n selection = []\n # Launch a loop. This loop will continue all the time the length of the sublist (selection) is less that the number of items we want in that list (count)\n while len(selection) < count:\n # Get a random item from the full list\n item = random.choice(wordlist)\n # Check whether the item selected is greater than or equal to the minimum size stated when calling the function.\n # If it is go on to the next line, if not then go round the while loop again\n if len(item[1]) >= minimum:\n # Check that the item randomly chosen was not already randomly chosen. If it is go on to the next line, if not then go round the while loop again\n if item not in selection:\n # Add the selected item to the sublist\n selection.append(item)\n #endif\n #endif\n # Note that the above two 'if' statements could be combined using 'and'. In that case both tests would need to be true before continuing\n # if len(item[1]) >= minimum and item not in selection:\n #endwhile\n # Return the sublist to the caller of the function\n return(selection)", "def initalize():\n global itemlist\n global limit\n global weightlimit\n global indivcount\n\n \n itemlist = []\n \n limit = 50000\n print(\"Initializing with \",limit,\" Items to choose from\")\n sack = generatesack(limit)\n \n weightlimit = 0\n for i in sack:\n weightlimit += i.weight\n weightlimit = weightlimit//2\n print(\"The napsack weightlimit is: \", weightlimit)\n \n indivcount = 200\n print(\"Initializing with \",indivcount,\" Individuals per generation\")\n takelist = geninitialpopulation(indivcount, sack)\n final_output = evolution(takelist)\n \n generations = final_output[1]\n final_output = final_output[0]\n \n print(\"\\n\")\n print(\"Best individual:\")\n bestindiv = prettytable.PrettyTable()\n bestindiv.field_names = [\"weight\",\"total value\",\"fitness\"]\n bestindiv.add_row([final_output.mass,final_output.value,final_output.fitness])\n print(bestindiv)\n print(\"Best solution acheived after \",generations, \" generations!\")\n print(\"Distance to weightlimit: \",weightlimit - final_output.mass)", "def test_get_list8(self):\n pass", "def apriori_v3(q, insig, sex_file_dict, countries_list, age):\r\n\r\n q = [[int(num)] for num in q] # queue is formatted as a nested list\r\n insignificant = [[int(num)] for num in insig]\r\n significant = []\r\n\r\n #print(\"\\nInsig\", insignificant)\r\n #print(\"Sig\", significant)\r\n #print(\"Queue\\n\", q)\r\n\r\n while len(q) > 0:\r\n element = q[0]\r\n obs_freqs = []\r\n\r\n for country in countries_list:\r\n icd_freq = 0\r\n for freq in element:\r\n icd_freq += round(float(sex_file_dict[country][age][str(freq)]) * 1000000)\r\n obs_freqs.append(icd_freq)\r\n\r\n chisq, pvalue = chisquare(obs_freqs)\r\n\r\n if pvalue >= 0.01:\r\n significant.append(element)\r\n\r\n for i in range(int(element[-1])+1,36):\r\n if [i] not in insignificant:\r\n tentativeCandidate = sorted(list(element)+[i]) # add the two lists together (element is a list)\r\n if tentativeCandidate not in q and tentativeCandidate not in significant: # then add it to the queue\r\n\r\n q.append(tentativeCandidate)\r\n q.pop(0) #remove it from the queue after we have created/tried all the tentativeCandidates\r\n\r\n else: # when the p-value not significant\r\n q.pop(0)\r\n insignificant.append(element)\r\n return significant # grab the last values before breaking out of the while loop\r", "def getLists():\r\n\r\n # The following code will get a user submitted length for the lists, being atleast 100 elements long.\r\n listLength = 0\r\n while listLength < 100:\r\n listLength = int(input(\"Please indicate the length of the list, being atleast 100 elements long: \"))\r\n if listLength < 100:\r\n print('Enter a value of atleast 100 elements.')\r\n\r\n # This code will get two lists, one sorted and one unsorted, of the length indicated by the user\r\n minValue = -10 * listLength # This code will get an appropriate min range value based on what the user had entered\r\n maxValue = 10 * listLength # This code will get an appropriate max range value based on what the user had entered\r\n unsortedList = ListMaker.unorderdIntegers(minValue, maxValue, listLength)\r\n sortedList = ListMaker.orderdIntegersAscending(minValue, maxValue, listLength)\r\n\r\n # run searchData to get data from search algorithms\r\n searchData(unsortedList, sortedList)\r\n\r\n # run sortData to get data from the sorting algorithms\r\n sortData(unsortedList)", "def main():\n\n n = int(input().strip())\n inputList = []\n for counter in range(n):\n List = input().strip().split(' ')\n inputList.append([int(i) for i in List])\n\n # Printing maximum count.\n print(maxAlignPoints(inputList))", "def dawkins_algorithm(strlen=28):\n copies = copy_string(generate_string(strlen))\n new_score = 0\n\n while new_score < 28:\n for index, s in enumerate(copies):\n current_score = compare(s)\n mutated_string = mutate_string(s)\n new_score = compare(mutated_string)\n if new_score > current_score:\n copies[index] = mutated_string\n print(mutated_string)\n\n if new_score == strlen:\n break\n\n copies = copy_string(find_best_candidate(copies))\n\n return print(\"Done\")", "def generate_helper(result_list, symptoms, start):\n if start >= len(symptoms):\n # [TODO] it might be better to the calculation right here\n # if test cases are big, memory might be a probleb ^^\n result_list.append(symptoms)\n\n for i in range(start, len(symptoms)):\n if symptoms[i] == SYMPTOM_UNKNOWN:\n new_list = copy.deepcopy(symptoms)\n new_list2 = copy.deepcopy(symptoms)\n new_list[i] = SYMPTOM_PRESENT\n new_list2[i] = SYMPTOM_NOT_PRESENT\n\n generate_helper(result_list, new_list, i+1)\n generate_helper(result_list, new_list2, i+1)\n return\n\n result_list.append(symptoms)", "def check_for_list(check):", "def resolve(iteration_count, qid, original_question, qname, sname, slist, sanswers, sauthorities, sadditional):\n ##an iteration count is kept to ensure we're not stuck in an infinite loop, and are actually getting closer to the answer \n iteration_count += 1\n #requirement #2\n if iteration_count > 200:\n raise OutOfTimeException(\"Called resolve too many times, might be stuck in a loop\")\n #if we already know about this domain name, and know its an alias\n # compliant with requirement 4 & 7\n if sname in cnamecache:\n now = int(time())\n new_sname = cnamecache[sname]._cname\n cname_record = RR_CNAME(sname, cnamecache[sname]._expiration - now, new_sname)\n sanswers.append(cname_record)\n return resolve(iteration_count, qid, original_question, qname, new_sname, get_best_ns(nscache, new_sname), sanswers, sauthorities, sadditional)\n #if we know about the a record for this name, requirement 7\n if sname in acache:\n now = int(time())\n ip = acache[sname]._dict.keys()[0].toNetwork()\n exp = acache[sname]._dict.values()[0]._expiration - now\n answer_a_record = RR_A (sname, exp, ip)\n sanswers.append(answer_a_record)\n #adds records we need to keep track of to comply with requirement 6\n sauthorities, sadditional = construct_authorities_for_answers(sanswers)\n our_response = (qid, original_question, sanswers, sauthorities, sadditional)\n return our_response\n\n ns_to_query = pick_from_slist(slist)\n if not ns_to_query:\n logger.log(DEBUG1, \"exhausted list, and couldnt find anything about these servers in our cache, will have to query from root\")\n new_qname = next(slist.iterkeys())\n #we now have to resolve one of these servers as if it were a normal domain query,\n #save the answer, and use it to continue our original query, we should iterate through each server\n # check the return value for a succesful resolution, and carry on.\n #shouldnt need qid nor original question (still refers to old question)\n #essentially calling resolve in this case will cause side-effects that update the cache with\n #the entries we need\n resolve(iteration_count, qid , original_question, new_qname, new_qname, get_best_ns(nscache, new_qname), [], [], [])\n #continue search as before\n return resolve(iteration_count, qid, original_question, qname, sname, get_best_ns(nscache, sname), sanswers, sauthorities, sadditional)\n ##\n (name_server, ipv4) = ns_to_query\n #logger.log(DEBUG2, \"CCache is:\\n{0}\\n\".format(pp.pformat(cnamecache)))\n address = (ipv4,53)\n payload, question = construct_A_query(sname)\n \n logger.log(DEBUG1, \"sending question for A record for {0} to {1} @{2}:\\n{3}\\n\".format(question._dn, name_server, address, hexdump(payload)))\n\n #requirement #8\n cs.sendto(payload, address)\n try:\n (cs_data, cs_address,) = cs.recvfrom(512)\n except timeout:\n #try a different server, requirement 8\n logger.info(\"Timed out, trying someone else\")\n return resolve(iteration_count, qid, original_question, qname, sname, get_best_ns(nscache, sname), sanswers, sauthorities, sadditional) \n \n response = parse_response_payload(cs_data)\n #if is authority, set its records in cache as authoritative\n #also adds records we need to keep track of to comply with requirement 5\n if response[\"header\"]._aa is 1:\n #print \"response {0} from {1} \".format(response, name_server)\n logger.log(DEBUG1, \"{0}\".format( name_server))\n ns_ns_rr = set_authoritative(sname, name_server)\n ns_a_rr = construct_a_rr_from_cache(name_server)\n if ns_ns_rr not in sauthorities:\n sauthorities.append(ns_ns_rr)\n if ns_a_rr not in sadditional:\n sadditional.append(ns_a_rr)\n \n load_response_into_cache(response)\n logger.log(DEBUG2, \"Answer received from {0} server is:\\n {1}\".format(name_server, pp.pformat(response)))\n logger.log(DEBUG1, \"*\"*50)\n answer_sec = response[\"answer\"]\n ##if there is an answer in the response, either its a cname or an a record\n #if its an A, we're done, if its a CNAME we're not\n if len(answer_sec) > 0:\n sanswers.append(response[\"answer\"][0])\n logger.log(DEBUG2, \"Sanswers is {0}\".format(pp.pformat(sanswers)))\n ##part of fulfilling requirement 4\n if answer_sec[0]._type is RR.TYPE_CNAME:\n sname = answer_sec[0]._cname\n return resolve(iteration_count, qid, original_question, qname, sname, get_best_ns(nscache, sname), sanswers, sauthorities, sadditional)\n our_response = (qid, original_question, sanswers, sauthorities, sadditional)\n logger.log(DEBUG1, \"&#\"*50+\"\\n\"*3+\"Response:\\n{0}\".format(our_response))\n return our_response \n return resolve(iteration_count, qid, original_question, qname, sname, get_best_ns(nscache, sname), sanswers, sauthorities, sadditional)", "def gridSearch(list_of_alpha):\n # Initialize best result to be 0\n best_result = 0.0\n mean_results_LinUCB = np.zeros(len(list_of_alpha))\n # Perform grid search over a list of alpha values\n for j in range(0, len(list_of_alpha)):\n mab = LinUCB(10, 10, list_of_alpha[j])\n mean_results_LinUCB[j] = np.mean(offlineEvaluate(mab, arms, rewards, \n contexts, 800)) \n if mean_results_LinUCB[j] > best_result:\n best_result = mean_results_LinUCB[j]\n best_alpha = list_of_alpha[j]\n\n print('Alpha', '\\t Mean Rewards for 800 Matching Rounds') \n for i in range(0, len(list_of_alpha)):\n print(list_of_alpha[i],'\\t', mean_results_LinUCB[i])\n \n return(best_alpha, best_result)", "def test_pick_best_sentences_with_request_more_sentences(self): \n input_sentences = (\n \"first sentence\",\n \"second sentence\",\n \"third sentence\",\n \"fourth sentence\"\n )\n\n input_ratings = [0.01, 0.015, 0.02, 0.005]\n\n input_requested_numSentences = len(input_sentences) + 1\n\n self.assertRaises(\n ValueError,\n lambda: self.summarizer.pick_best_sentences(input_sentences, input_ratings, input_requested_numSentences)\n )", "def spellingBeeSolutions(wordlist, puzzles):\n counter = []\n word_counter = 0\n\n for puzzle in puzzles:\n\n for word in wordlist:\n\n if len(word) < 5 or puzzle[0][0] not in word:\n word_counter += 0\n elif not set(word).issubset(set(puzzle)):\n word_counter += 0\n\n else:\n word_counter += 1\n\n counter.append(word_counter)\n\n word_counter = 0\n return counter", "def lsize( lst ):\n return sum( [ x[1] for x in lst ] )", "def best_case(my_list=list(range(100))):\n if len(my_list) <= 1:\n return my_list\n else:\n mid = len(my_list) // 2\n return (\n best_case(my_list[:mid - 1]) +\n best_case(my_list[mid + 1:]) +\n [my_list[mid]]\n )", "def test_kyc_get_legal_list(self):\n pass", "def twentyone():\r\n \r\n notamicable = []\r\n isamicable = []\r\n \r\n for i in range(10000):\r\n if i not in notamicable and i not in isamicable:\r\n a = i\r\n b = amicable(findDivisors(a))\r\n c = amicable(findDivisors(b))\r\n if a == c and not a == b:\r\n isamicable.append(a)\r\n isamicable.append(b)\r\n else:\r\n notamicable.append(a)\r\n notamicable.append(b)\r\n \r\n print isamicable\r\n t = 0\r\n for v in isamicable:\r\n t += v\r\n return t", "def __size_restriction_incorrect_list_parameter(self):\n\n strTestName = 'List size equal to a parameter (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizEq('parameter1', 'iRefParameter1')\n\n RxCSObject.iRefParameter1 = 14\n RxCSObject.parameter1 = [11, 12, 13]\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)", "def yahtzee_points(dice_list):\n if of_a_kind_size(dice_list) >= 5:\n return 50\n else:\n return 0", "def nqueens(size):\n if type(size) is not int:\n print(\"N must be a number\")\n return\n if size < 4:\n print(\"N must be at least 4\")\n return\n queens = [0] * size\n\n def printsolution(queens):\n print(\"[[0, \", queens[0], \"]\", sep=\"\", end=\"\")\n for y, x in enumerate(queens[1:], 1):\n print(\", [\", y, \", \", x, \"]\", sep=\"\", end\"\")\n print(\"]\")\n\n def queencalc(queen):\n \"\"\"Recursive call queen position validator\"\"\"\n for x in range(size):\n \"\"\"horizontal board positions per queen\"\"\"\n nextx = 0\n for y in range(queen):\n qx = queens[y]\n if x == qx or x + queen == qx + y or x - queen == qx - y:\n nextx = 1\n break\n if nextx == 1:\n nextx == 0\n continue\n if queen != size - 1:\n queens[queen + 1] = 0\n queens[queen] = x\n queencalc(queen + 1)\n else:\n queens[queen] = x\n printsolution(queens)\n queencalc(0)", "def part_2(data: Iterator[str]) -> int:\n return solve(data, 5)", "def check_list(list_obj, limit):\r\n if len(list_obj) > limit:\r\n num_of_lists = int(len(list_obj) / limit) + 1\r\n sublist = []\r\n k = 0\r\n while k < num_of_lists:\r\n x = list_obj[limit*k:limit*(k+1)]\r\n sublist.append(x)\r\n k += 1\r\n\r\n return sublist\r\n\r\n return list_obj", "def givecandies(arr):\n n = len(arr)\n candies = [1] * n\n print(arr, 'Student scores')\n print(candies, 'Initital/minimum candies')\n for i in range(1, n):\n if arr[i] > arr[i - 1]:\n candies[i] = 1 + candies[i - 1]\n for i in range(1, n):\n if (\n arr[n - i - 1] > arr[n - i]\n and candies[n - i - 1] <= candies[n - i]\n ):\n candies[n - i - 1] = candies[n - i] + 1\n print(candies, 'Minimum candies rewarding for higher score')\n print(sum(candies), 'Total candies')\n return sum(candies)", "def len_list(self) -> int:\n return 1", "def SelectWorkingSet(L, q):\n i = 0\n index = 0\n while i < int(q/2):\n if L[index, 1] > 0 and L[index, 1] < C:\n WorkingSet[i, 0] = L[index, 0]\n WorkingSet[i, 1] = L[index, 1]\n WorkingSet[i, 2] = index\n i = i + 1\n elif y_train[int(L[index, 0])] == -1 and L[index, 1] <= 0:\n WorkingSet[i, 0] = L[index, 0]\n WorkingSet[i, 1] = L[index, 1]\n WorkingSet[i, 2] = index\n i = i + 1\n elif y_train[int(L[index, 0])] == 1 and L[index, 1] == 100:\n WorkingSet[i, 0] = L[index, 0]\n WorkingSet[i, 1] = L[index, 1]\n WorkingSet[i, 2] = index\n i = i + 1\n index = index + 1\n # print(WorkingSet)\n # print(index)\n\n index = len(y_train) - 1\n while i < int(q):\n j = 0\n while j < (int(q/2)):\n if index == int(WorkingSet[j, 2]):\n # print(\"Hello cunt\")\n # print(index)\n index = index - 1\n # print(index)\n if j > 0:\n j = 0\n else:\n j = j + 1\n if L[index, 1] > 0 + error and L[index, 1] < C:\n WorkingSet[i, 0] = L[index, 0]\n WorkingSet[i, 1] = L[index, 1]\n WorkingSet[i, 2] = index\n i = i + 1\n elif y_train[int(L[index, 0])] == 1 and L[index, 1] <= 0:\n WorkingSet[i, 0] = L[index, 0]\n WorkingSet[i, 1] = L[index, 1]\n WorkingSet[i, 2] = index\n i = i + 1\n elif y_train[int(L[index, 0])] == -1 and L[index, 1] == 100:\n WorkingSet[i, 0] = L[index, 0]\n WorkingSet[i, 1] = L[index, 1]\n WorkingSet[i, 2] = index\n i = i + 1\n index = index - 1\n # print(WorkingSet)\n return WorkingSet", "def elim_reps(lst):\n ans = []\n for x in lst:\n if x not in ans:\n ans.append(x)\n return ans", "def number_of_valid_pass_puzzle_2(input_list: list):\n num_of_valid = 0\n for item in input_list:\n data = split_data(item)\n if check_for_validity_puzzle_2(*data):\n num_of_valid += 1\n return num_of_valid", "def corrected_clump_tester(clump):\n tester = True\n for block in clump:\n if len(block) >= 3: # Fixed block!\n tester = False\n break\n return tester", "def dg(lst_0,lst,ans,anses,htans,htanses):\n \n if 0 not in lst:\n anses.append(ans.copy())\n htanses.append(htans.copy())#!!!!!!!!!!!!!!!!!!!!\n\n for i in range(m*n):\n zhuan=[]\n if lst[i]==1:\n pass\n else:\n zhuanhp=[]\n zhuansp=[]\n #hp \n if (i%m+a<=m) and (i//m+b<=n):\n for ai in range(a):\n for bi in range(b):\n zhuanhp.append(lst[i+ai+bi*m])\n if 1 not in zhuanhp:\n for ai in range(a):\n for bi in range(b):\n lst[i+ai+bi*m]=1\n zhuan.append(lst_0[i+ai+bi*m])\n ans.append(tuple(zhuan))\n htans.append((i,i+a-1,i+(b-1)*m,i+a-1+(b-1)*m))#!!!!!!!!!!!!!!!!\n zhuan=[]\n dg(lst_0,lst,ans,anses,htans,htanses)\n zhuanlhp=[]\n for ai in range(a):\n for bi in range(b):\n lst[i+ai+bi*m]=0\n ans.pop()\n htans.pop()#!!!!!!!!!!!!!!!!!!!!!!!!\n else:\n pass\n #sp \n if (i%m+b<=m) and (i//m+a<=n):\n for bi in range(b):\n for ai in range(a):\n zhuansp.append(lst[i+bi+ai*m])\n if 1 not in zhuansp: \n for bi in range(b):\n for ai in range(a):\n lst[i+bi+ai*m]=1\n zhuan.append(lst_0[i+bi+ai*m])\n ans.append(tuple(zhuan))\n htans.append((i,i+b-1,i+(a-1)*m,i+b-1+(a-1)*m))#!!!!!!!!!!!!!!!!\n zhuan=[]\n dg(lst_0,lst,ans,anses,htans,htanses)\n zhuansp=[]\n for bi in range(b):\n for ai in range(a):\n lst[i+bi+ai*m]=0\n zhuan=[]\n ans.pop()\n htans.pop()#!!!!!!!!!!!!!!!!!!!!!!!!\n else:\n pass\n return", "def question_new_search():", "def test_void_list(self):\n lst = []\n self.assertIsNone(max_integer(lst))", "def brute_force_cow_transport(cows,limit=10):\n cows_list=list(cows.items())\n ans=sorted(test_comb(cows_list,limit,0,limit),key=lambda x:len(x))\n return ans", "def fastClumpFinder(sequence, k, L, t):\n\n # to be implemented ;)\n pass", "def test_pick_best_sentences(self): \n input_sentences = (\n \"first sentence\",\n \"second sentence\",\n \"third sentence\",\n \"fourth sentence\"\n )\n\n input_ratings = [0.01, 0.015, 0.02, 0.005]\n\n input_length = 2\n\n expected = [\"second sentence\", \"third sentence\"]\n\n result = self.summarizer.pick_best_sentences(input_sentences, input_ratings, input_length)\n self.assertListEqual(expected, result)", "def uniqueYes(lst):\n yesQuestions = []\n count = 0\n i = 0\n while i < len(lst):\n for letter in lst[i]:\n if (letter not in yesQuestions) and (letter != ''): #in case we run into some cases where the '' is picked up\n yesQuestions.append(letter)\n count = count + 1\n else: #if the question has already been answered yes to, this is not a unique answer\n pass\n i = i + 1\n return count", "def main(num_q: int, point_list: List[int])-> int:\n dp_table", "def calcRestaurantList2(latlngs, cuisines, distance):\n restlist = []\n used = []\n cuisine = str(cuisines[0])\n if len(cuisines) > 1:\n cuisine = \",\".join(cuisines)\n minrating = 5.0\n worst = ''\n ratings = []\n for point in latlngs:\n yelpresults = search2(cuisine,point,distance)['businesses']\n processedyelpresults = processResults(yelpresults)\n for result in processedyelpresults:\n if (result not in used):\n if len(restlist) < 40:\n restlist.append(processedyelpresults[result])\n used.append(result)\n ratings.append(float(processedyelpresults[result]['rating']))\n if float(processedyelpresults[result]['rating']) < minrating:\n minrating = float(processedyelpresults[result]['rating'])\n worst = result\n # print (\"The worst restaurant is {0}\".format(worst))\n elif len(restlist) >= 40:\n ratings.sort()\n minrating = ratings[0]\n if float(processedyelpresults[result]['rating']) > ratings[0]:\n if worst in restlist:\n ratings.remove(minrating)\n restlist.remove(restlist.index(worst))\n # print (\"Removed {0}, which had a rating of {1}. It was in restlist\".format(worst, minrating))\n if len(restlist) <= 45:\n restlist.append(processedyelpresults[result])\n # print (\"Added {0}, which had a rating of {1}\".format(result, processedyelpresults[result]['rating']))\n else:\n minrating = float(ratings[0])\n # print (\"The minimum rating for a restaurant is {0}\".format(minrating))\n for r in restlist:\n # print (r)\n if float(r['rating']) == minrating:\n restlist.remove(r)\n # print (\"Removed {0}, which had a rating of {1}. Matched on minrating\".format(r, minrating))\n if minrating in ratings:\n ratings.remove(minrating)\n if len(restlist) <= 45:\n restlist.append(processedyelpresults[result])\n # print (\"Added {0}, which had a rating of {1}\".format(result, processedyelpresults[result]['rating']))\n\n # pprint.pprint(restlist)\n # print(used)\n\n return restlist", "def fn(x):\n if len(x) == len(s): ans.append(x)\n for k, v in freq.items(): \n if v >= 2: \n freq[k] -= 2\n fn(k + x + k)\n freq[k] += 2", "def exercise_b2_53():\r\n pass" ]
[ "0.67095226", "0.59630436", "0.59258145", "0.56170946", "0.5612554", "0.55911374", "0.55631673", "0.5525219", "0.5525219", "0.5525219", "0.55023384", "0.54870147", "0.54636025", "0.5461479", "0.545865", "0.54275244", "0.5421886", "0.5410912", "0.54019207", "0.5401082", "0.53983337", "0.53949004", "0.5384531", "0.537642", "0.53455096", "0.5337025", "0.5335886", "0.53141457", "0.5304511", "0.5297973", "0.529737", "0.52931905", "0.52827394", "0.5272664", "0.52719676", "0.5252734", "0.5245941", "0.5241301", "0.523304", "0.52073437", "0.5201923", "0.5187476", "0.51855844", "0.51806813", "0.5173215", "0.5167975", "0.51666874", "0.5166657", "0.51666456", "0.51652277", "0.5162987", "0.5158274", "0.51535875", "0.5133918", "0.5132817", "0.51321715", "0.51271933", "0.5124773", "0.5114195", "0.5113079", "0.51061517", "0.5105399", "0.5102734", "0.5099746", "0.5098618", "0.50983", "0.5096928", "0.5095623", "0.50935453", "0.50922716", "0.5090215", "0.5083062", "0.50818366", "0.5081414", "0.5078924", "0.5078905", "0.5066315", "0.5064624", "0.5064596", "0.50547373", "0.50545305", "0.50517285", "0.50488037", "0.50464255", "0.5043527", "0.50409395", "0.5038941", "0.50343573", "0.50289816", "0.50170827", "0.50109214", "0.50090283", "0.5006965", "0.5005391", "0.50044566", "0.5003549", "0.50025123", "0.49965936", "0.4996515", "0.49956638", "0.4992613" ]
0.0
-1
Largely used model answer idea of 'enumerate', and (b+0.5) not (b0.5)
def median_approx2(values, B): # Call median_bins to calculate the mean, std, # and bins for the input values mean, std, left_bin, bins = median_bins(values, B) # Position of the middle element N = len(values) mid = (N + 1) / 2 count = left_bin for b, bincount in enumerate(bins): count += bincount if count >= mid: # Stop when the cumulative count exceeds the midpoint break width = 2 * std / B median = mean - std + width * (b + 0.5) return median
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def I (self, n):", "def extractstatesusingintegral(self, bias=1.0):\n numtoadd = int(round(float(bias) * simplesum(comp.weight for comp in self.gmm)))\n print(\"bias is %g, numtoadd is %i\" % (bias, numtoadd))\n items = []\n # A temporary list of peaks which will gradually be decimated as we steal from its highest peaks\n peaks = [{'loc': comp.loc, 'weight': comp.weight, 'id': comp.id} for comp in self.gmm]\n while numtoadd > 0:\n windex = 0\n wsize = 0\n for which, peak in enumerate(peaks):\n if peak['weight'] > wsize:\n windex = which\n wsize = peak['weight']\n # add the winner\n items.append([deepcopy(peaks[windex]['loc']), 0, peaks[windex]['id']])\n #peaks[windex]['weight'] -= 100.0\n peaks.pop(windex)\n numtoadd -= 1\n\n lp, lc = len(self.pre_state), len(items) # pre_state and items is current state\n cost = numpy.ones([lp, lc]) * 100000000\n for i in range(0, lp):\n for j in range(0, lc):\n if (self.pre_state[i][2] == items[j][2]):\n xp, yp, _, _ = self.pre_state[i][0]\n xc, yc, _, _ = items[j][0]\n cost[i, j] = sqrt((xp - xc) ** 2 + (yp - yc) ** 2)\n row_ind, col_ind = linear_sum_assignment(cost, maximize=False)\n for i, idx in enumerate(col_ind):\n items[idx][1] = self.pre_state[row_ind[i]][1]\n for i in range(0, lc):\n if i not in col_ind:\n self.track_id += 1\n items[i][1] = self.track_id\n\n self.pre_state = deepcopy(items)\n\n return items", "def mystery3(input_val):\n global counter\n for index in range(input_val):\n for dummy_index in range(int(1.1 ** index)):\n counter += 1", "def fib(index):\n return round((GR**index)/R5)", "def sigb(o) :\n return o * (1 - o)", "def b(self, i):\n if i == 1:\n return 1;\n numer = factorial( i-1 ) * factorial(i)\n denom = factorial(2*i-1)\n if (i-1)&1 == 1:\n numer = -numer\n return frac(numer, denom)", "def enumerate(self):\r\n return enumerate(self, 1)", "def elementary_summand(fixed, i):\n if i < fixed:\n return 0\n elif i == fixed:\n return 2\n else:\n return 1", "def get_next(current):\n return 0.5 * (current + n / current)", "def loc_eval(x, b):\r\n loc_est = 0\r\n for i in enumerate(b): loc_est+=i[1]*(x**i[0])\r\n return(loc_est)", "def E_inc(self):\n\n\t\tmaxit = self.num_data + 100\n\n\t\tfor i in range(maxit):\n\t\t\tsqB", "def index_(iterable: Iterable[_Value], x: _Value) -> int:\n for i, value in enumerate(iterable):\n if x == value:\n return i\n elif isinstance(value, float) and isinstance(x, float):\n if abs(x - value) < FLOAT_EQUALITY_EPSILON:\n return i\n raise ValueError(\"{} is not in iterable\".format(str(x)))", "def quantize(value, quant, with_index=False):\n mids = [(quant[i] + quant[i + 1]) / 2.0\n for i in range(len(quant) - 1)]\n ind = bisect.bisect_right(mids, value)\n if with_index:\n return ind, quant[ind]\n else:\n return quant[ind]", "def I_bisection(self,a,b,tol,itr):\n \n ibs=[]\n iterations=0\n ibs_x=[]\n while iterations < itr:\n \n if (a+b)/2 > tol:\n a=a\n b=(a+b)/2\n ibs_x.append(b)\n #print(\"Upper Bound = \",b)\n \n \n elif (a+b)/2 < tol :\n a=(a+b)/2\n b=b\n #print(\"Lowe Bound =\",a)\n ibs_x.append(a)\n \n \n elif (a+b)/2 == tol:\n ibs_x.append((a+b)/2)\n #print(\" Sol =\" , (a+b)/2)\n iterations=iterations+1\n \n \n #print(\"IBS =\",self.Func((a+b)/2),\" iter=\",iterations) \n ibs.append(ibs_x)\n ibs.append((a+b)/2)\n ibs.append(iterations)\n \n return ibs", "def acc_b_v(self):\r\n return self._acc_b_v", "def e(i):\n if i==0:\n return 0\n else:\n return gc(2*int(math.floor((i-1)//2)))", "def _advance(self):\t\t# override inherited version\n self._current *= self._base", "def ap(self, result, next_item):\n if next_item in result.index:\n rank = result.index.get_loc(next_item) + 1\n return 1.0 / rank\n else:\n return 0", "def counter(l,b):\n if l == [b-1]*len(l): return\n i = 1\n while(True):\n if l[-i] < b-1:\n l[-i] += 1\n break\n else:\n l[-i] = 0\n i += 1", "def go_c_enumerate_step():\n for i,k in enumerate(range(1,7,2)):\n print(i, k)", "def convert_target_enumerate(L):\n for a, b in enumerate(L):\n print(a,b)", "def get_b(self):\n return ((self.b_plus_bstar / self.n_pos) + (self.b_minus_bstar / self.n_neg)) / 2", "def mystery2(input_val):\n global counter\n for index in range(input_val):\n for dummy_index in range(index / 2, index):\n counter += 1", "def go_py_enumerate():\n for i,k in enumerate(range(1,5)):\n print(i, k)", "def b_plus_bstar(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 and self.prob.Y[i] == 1:\n ayxx = 0\n for j in range(self.prob.num):\n ayxx += (self.alphas[j] + self.etas[j]) * self.prob.Y[j] * self.prob.xkernel(self.prob.X[j],\n self.prob.X[i])\n abcxx = 0\n for j in range(self.prob.num):\n abcxx += (self.alphas[j] + self.deltas[j]) * self.prob.xkernel(self.prob.X[j], self.prob.X[i])\n abcxx *= (1 / self.prob.gamma)\n running_total += 1 - abcxx - ayxx\n return running_total", "def quanty_index(i,ang=2):\n norb = 2*ang + 1\n k = (i//(2*norb))*(2*norb)\n if (i-k) < norb:\n j = k + 2*(i-k)\n else:\n j = k + 2*((i-k)-norb) + 1\n return j", "def expFromAdd(a,b):\n\tif (b == 0):\n\t\treturn 1\n\tresult = 1\n\tfor c1 in range(0, b):\n\t\ttemp = 0\n\t\tfor c2 in range(0, a):\n\t\t\ttemp += result\n\t\tresult = temp\n\treturn result", "def convert_target_enumerate_start(L, n):\n for a, b in enumerate(L, n):\n print(a,b)", "def add(iA, iB):\n for i in range(iB):\n iA = iA + 1", "def go_py_enumerate_start():\n for i,k in enumerate(list(range(1,5)), 5):\n print(i, k)", "def _subdiff_b(self, i, compensate_class_balance=False):\n if 1 - self._data.train_y[i] * self._f(self._data.train_X[:, i]) > 0:\n if compensate_class_balance:\n return - self._data.train_y[i] * self._data.importance(self._data.train_y[i])\n else:\n return - self._data.train_y[i]\n else:\n return 0", "def calculate(self, b):\n self.n_steps = self.n_steps + 1\n self.length = b.length\n self.natoms = b.natoms\n for i in range(0,self.natoms-1):\n for j in range(i+1,self.natoms):\n rij = (b.atoms[i].xyz - b.atoms[j].xyz)\n rij = rij - self.pbc_correction(rij)\n mag_rij = la.norm(rij)\n bin_no = int(round(mag_rij/self.dr))\n if bin_no <= self.n_max:\n self.gr[bin_no] = self.gr[bin_no] + 1", "def go_c_enumerate():\n for i,k in enumerate(range(1,5)):\n print(i, k)", "def __bsa(self, a, b):\n try:\n if a + 1 == b:\n if a == 0:\n p_ab = q_ab = mpz(1)\n else:\n p_ab = mpz((6 * a -5) * (2 * a - 1) * (6 * a - 1))\n q_ab = mpz(a * a * a * self.C3_24)\n t_ab = p_ab * (self.A + self.B * a)\n if a & 1:\n t_ab *= -1\n else:\n m = (a + b) // 2\n p_am, q_am, t_am = self.__bsa(a, m)\n p_mb, q_mb, t_mb = self.__bsa(m, b)\n p_ab = p_am * p_mb\n q_ab = q_am * q_mb\n t_ab = q_mb * t_am + p_am * t_mb\n return [p_ab, q_ab, t_ab]\n except Exception as e:\n raise", "def enumerate(x) -> List[Tuple[int, any]]:\n pass", "def test_indexed_increment(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=2., mode='indexed').base\n fa = a.function\n fa.data[1:, 1:] = 0\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def numer(self, a):\n raise NotImplementedError", "def test_adjacent_bomb_count_3(self):\n index = 17\n adj_list = utils.adjacent_bomb_count(index)\n adj_list_2 = [\n index + x\n for x in utils.ADJ_LIST\n if 0 <= index + x <= (utils.TILE_COUNT - 1)\n ]\n self.assertEqual(adj_list, adj_list_2)", "def float_up(self, index=1):\n\n i = index\n p = index // 2\n\n while True:\n\n if self.h[i] <= self.h[p] or i == 1:\n break\n\n self.h[p], self.h[i] = self.h[i], self.h[p]\n self.d[self.h[i]] = i\n self.d[self.h[p]] = p\n\n i = p\n p = i // 2\n\n return i", "def multi_c_enumerate():\n for a,(b,(c,d)) in enumerate(enumerate(enumerate(range(1,5)))):\n print(a,b,c,d)", "def numeric_sequence_iteration(self) -> global___Statement.Iteration.NumericSequenceIteration:", "def bCheck(c, v, p, b):\n val = (v+1).floor()\n deg = c.degree()\n coeffs = c.coefficients(sparse=False)\n lcoeff = coeffs[deg]; coeffs.remove(lcoeff)\n check1 = [(coeffs[i].valuation(p) - lcoeff.valuation(p))/(deg - i) for i in range(0,len(coeffs)) if coeffs[i] != 0]\n check2 = (val - lcoeff.valuation(p))/deg\n check1.append(check2)\n bval = min(check1)\n return (bval).ceil()", "def item_um(n):\n if n <= 0.250:\n return 0\n elif n > 0.250 and n <= 0.500:\n return 1\n elif n > 0.500 and n <= 0.750:\n return 2\n elif n > 0.750 and n <= 1.000:\n return 3", "def float_up(self, index=1):\n\n i = index\n p = index // 2\n\n while True:\n\n if self.h[i] >= self.h[p] or i == 1:\n break\n\n self.h[p], self.h[i] = self.h[i], self.h[p]\n self.d[self.h[i]] = i\n self.d[self.h[p]] = p\n\n i = p\n p = i // 2\n\n return i", "def calculate(self):\n\n result = \"FINITE\"\n pos = 0\n vis = set([])\n while 0 <= pos < self.n:\n vis.add(pos)\n if self.numa[pos]:\n pos += self.numb[pos]\n else:\n pos -= self.numb[pos]\n if pos in vis:\n result = \"IN\" + result\n break\n\n return str(result)", "def _ith_point(self, i):\n if self.start is S.NegativeInfinity:\n initial = self.stop\n else:\n initial = self.start\n\n if self.start is S.NegativeInfinity:\n step = -1\n else:\n step = 1\n\n return initial + i*step", "def acc_b(self):\n return self._acc_b", "def fsum(iterable):\n return 0.0", "def _set_bounds(b, x, n):\n for j in range(1, n - 1):\n for i in range(1, n - 1):\n x[index_of(i, j, 0, n)] = -x[index_of(i, j, 1, n)] if b == 3 else x[index_of(i, j, 1, n)]\n x[index_of(i, j, 0, n - 1)] = -x[index_of(i, j, 1, n - 2)] if b == 3 else x[index_of(i, j, 1, n - 2)]\n for k in range(1, n - 1):\n for i in range(1, n - 1):\n x[index_of(i, 0, k, n)] = -x[index_of(i, 1, k, n)] if b == 2 else x[index_of(i, 1, k, n)]\n x[index_of(i, n - 1, 0, n - 1)] = -x[index_of(i, n - 2, k, n - 2)] if b == 2 else x[\n index_of(i, n - 2, k, n - 2)]\n for k in range(1, n - 1):\n for j in range(1, n - 1):\n x[index_of(0, j, k, n)] = -x[index_of(1, j, k, n)] if b == 1 else x[index_of(1, j, k, n)]\n x[index_of(n - 1, j, k, n - 1)] = -x[index_of(n - 2, j, k, n)] if b == 1 else x[\n index_of(n - 2, j, k, n)]\n\n x[index_of(0, 0, 0, n)] = 1 / 3 * (x[index_of(1, 0, 0, n)] + x[index_of(0, 1, 0, n)] + x[index_of(0, 0, 1, n)])\n x[index_of(0, n - 1, 0, n)] = 1 / 3 * (\n x[index_of(1, n - 1, 0, n)] + x[index_of(0, n - 2, 0, n)] + x[index_of(0, n - 1, 1, n)])\n x[index_of(0, 0, n - 1, n)] = 1 / 3 * (\n x[index_of(1, 0, n - 1, n)] + x[index_of(0, 1, n - 1, n)] + x[index_of(0, 0, n - 2, n)])\n x[index_of(0, n - 1, n - 1, n)] = 1 / 3 * (\n x[index_of(1, n - 1, n - 1, n)] + x[index_of(0, n - 2, n - 1, n)] + x[index_of(0, n - 1, n - 2, n)])\n x[index_of(n - 1, 0, 0, n)] = 1 / 3 * (\n x[index_of(n - 2, 0, 0, n)] + x[index_of(n - 1, 1, 0, n)] + x[index_of(n - 1, 0, 1, n)])\n x[index_of(n - 1, n - 1, 0, n)] = 1 / 3 * (\n x[index_of(n - 2, n - 1, 0, n)] + x[index_of(n - 1, n - 2, 0, n)] + x[index_of(n - 1, n - 1, 1, n)])\n x[index_of(n - 1, 0, n - 1, n)] = 1 / 3 * (\n x[index_of(n - 2, 0, n - 1, n)] + x[index_of(n - 1, 1, n - 1, n)] + x[index_of(n - 1, 0, n - 2, n)])\n x[index_of(n - 1, n - 1, n - 1, n)] = 1 / 3 * (\n x[index_of(n - 2, n - 1, n - 1, n)] + x[index_of(n - 1, n - 2, n - 1, n)] + x[\n index_of(n - 1, n - 1, n - 2, n)])", "def test02(self):\n N, blen = self.N, 100\n a = bcolz.fromiter(xrange(N), dtype=np.float64, count=N,\n rootdir=self.rootdir)\n l, s = 0, 0\n for block in bcolz.iterblocks(a, blen, blen-1):\n l += len(block)\n s += block.sum()\n self.assertEqual(l, (N - (blen - 1)))\n self.assertEqual(s, np.arange(blen-1, N).sum())", "def bin_bil_to_unil(a):\n return (a + 1)/2", "def get_b(self):\n return ((self.s_pos / self.n_pos) + (self.s_neg / self.n_neg)) / 2", "def find_med(x):\n\n total = 0\n for ent in x:\n total += ent\n\n half = int(0.5 * total)\n sum = 0\n for i in range(0, len(x)):\n sum += x[i]\n if sum > half:\n pos = i\n break\n \n return i - 1", "def degree_on_basis(self, b):\n return sum(b)", "def up_index(index):\n return 2 * index", "def B(n, k):\n assert 0 < k <= n\n global lookup\n for index_y in range(len(lookup), n + 1):\n lookup.append([1])\n min_value = min(index_y, k)\n for index_x in range(min_value):\n if index_x < len(lookup[index_y - 1]) - 1:\n lookup[index_y].append(lookup[index_y - 1][index_x] + lookup[index_y - 1][index_x + 1])\n else:\n lookup[index_y].append(lookup[index_y - 1][index_x])\n return lookup[n][k]", "def update_idx(self):\n self.idx = (self.F * self.FMUL +\n self.E * self.EMUL +\n self.Z * self.ZMUL +\n self.A * self.AMUL +\n self.B * self.BMUL )", "def to_negative_base(i, b):\n if not i:\n return [0]\n else:\n l = []\n while i != 0:\n i,r = divmod(i,b)\n if r < 0:\n i += 1\n r += abs(b)\n l.append(r)\n return l", "def __getitem__(self, i):\n raise NotImplementedError", "def J (self, n):", "def test_adjacent_bomb_count_2(self):\n index = 9\n adj_list = utils.adjacent_bomb_count(index)\n adj_list_2 = [\n index + x\n for x in utils.RIGHT_ADJ_LIST\n if 0 <= index + x <= (utils.TILE_COUNT - 1)\n ]\n self.assertEqual(adj_list, adj_list_2)", "def powAlpha( n ):\n return (1-betaval)*Fib(n) + Fib(n-1)\n #return Fib(n+1) - Fib(n) * betaval", "def test_adjacent_bomb_count(self):\n index = 0\n adj_list = utils.adjacent_bomb_count(index)\n adj_list_2 = [\n index + x\n for x in utils.LEFT_ADJ_LIST\n if 0 <= index + x <= (utils.TILE_COUNT - 1)\n ]\n self.assertEqual(adj_list, adj_list_2)", "def __getitem__(self, i):\n return self.get(i, i + 1)", "def step(self, points, b, m):\n b_gradient = 0\n m_gradient = 0\n N = float(len(points))\n\n for x,y in points:\n b_gradient += -(2/N) * (y - ((m * x) + b))\n m_gradient += -(2/N) * x * (y - ((m * x) + b))\n #print('m_grad:{}'.format(m_gradient))\n \n b -= (b_gradient * self.learning_rate)\n m -= (m_gradient * self.learning_rate)\n return b, m", "def abscissae(self) -> List[float]:", "def at_value(self, *value):\n result = 0\n for x in value:\n i = 0\n for coef in self.arg:\n result = result + coef * (x ** i)\n i = i + 1\n result = -result\n return -result", "def ibi_cv(bstart, bstop):\n ibis = []\n for b in range(len(bstart)-1):\n if bstart[b+1] > bstop[b]: # ortho, correct\n ibis.append(bstart[b+1] - bstop[b])\n else:\n print(' In %s, %.2f starts before burst ends at %.2f' \n %(cell_id, bstart[b+1], bstop[b]))\n return np.mean(ibis), np.std(ibis)/np.mean(ibis)", "def burg(sample_list, coefficient_number):\n\n p = sum(sample ** 2 for sample in sample_list)\n a0 = p / len(sample_list)\n\n b1 = sample_list[:len(sample_list) - 1]\n b2 = sample_list[1:]\n\n aa = [0.0 for i in range(coefficient_number)]\n coefficient_list = [0.0 for i in range(coefficient_number)]\n\n for i in range(coefficient_number):\n\n numerator = 0.0\n denominator = 0.0\n\n for j in range(len(sample_list) - i - 1):\n numerator += b1[j] * b2[j]\n denominator += b1[j] ** 2 + b2[j] **2\n\n coefficient_list[i] = 2.0 * numerator / denominator\n a0 *= 1.0 - coefficient_list[i] ** 2\n\n for j in range(i - 1):\n coefficient_list[j] = aa[j] - coefficient_list[i] * aa[i - j - 1]\n\n if i < coefficient_number + 1:\n\n for j in range(i + 1):\n aa[j] = coefficient_list[j]\n\n for j in range(len(sample_list) - i - 2):\n b1[j] -= aa[i] * b2[j]\n b2[j] = b2[j + 1] - aa[i] * b1[j + 1];\n\n return a0, coefficient_list", "def find_imbalance(weights, leafs, base):\n while True:\n #print('searching: ', base)\n w = [calc_weight(leaf) for leaf in leafs[base]]\n if len(set(w)) == 1:\n #print('found!', base, delta)\n break\n delta = max(w) - min(w)\n # find the sub-tree with the odd weight:\n if delta > 0:\n i = w.index(max(w))\n else:\n i = w.index(min(w))\n base = leafs[base][i]\n \n return weights[base]-delta", "def boundingIndices(start, stop, step, value):\n if value < start:\n return 0, 0\n elif value > stop:\n stopIndex = int((stop - start)/step)\n return stopIndex, stopIndex\n lowerIndex = int((value - start)/step)\n return lowerIndex, lowerIndex+1", "def __biweight(data):\n mask = np.isfinite(data)\n if np.all(~mask):\n return (np.nan, 0.)\n\n xx = data[mask]\n med_xx = np.median(xx)\n deltas = xx - med_xx\n med_dd = np.median(np.abs(deltas))\n if med_dd == 0:\n return (med_xx, 0.)\n\n wmx = np.maximum(0, 1 - (deltas / (6 * med_dd)) ** 2) ** 2\n xbi = med_xx + np.sum(wmx * deltas) / np.sum(wmx)\n\n umn = np.minimum(1, (deltas / (9 * med_dd)) ** 2)\n sbi = np.sum(deltas ** 2 * (1 - umn) ** 4)\n sbi /= np.sum((1 - umn) * (1 - 5 * umn)) ** 2\n sbi = np.sqrt(len(xx) * sbi)\n\n return (xbi, sbi)", "def ofi(b, b_v, a, a_v):\n b_, b_v_, a_, a_v_ = b.shift(1), b_v.shift(1), a.shift(1), a_v.shift(1)\n ofi = i(b >= b_) * b_v - i(b <= b_) * b_v_ - i(a <= a_) * a_v + i(a >= a_) * a_v_\n return ofi.fillna(0)", "def zero_mul_simp(l, index):\n while index >=0 and index < len(l) - 1 and l[index][0] == l[index + 1][0]:\n exp = l[index][1] + l[index + 1][1]\n base = l[index][0]\n l[index] = (base, exp)\n del l[index + 1]\n if l[index][1] == 0:\n del l[index]\n index -= 1", "def index(self, x) -> int:\n pass", "def iterate4(x, omega=1, N=Mynum):\n omega = 1\n n = len(x)\n h = 1.0 / (N - 1.)\n A = redblackA(N)\n b = redblackb(N)\n \n m = (n-1)/2\n l = (n-1)\n \n for i in range(0,n):\n xsum=0\n for j in range(0,n):\n xsum = xsum + A[i,j]*x[j] \n xsum = xsum - A[i,i]*x[i] \n x[i] = omega * (b[i] - xsum) / A[i,i] + (1-omega)*x[i]\n \n return x", "def advance(self):\n u, f, k, t = self.u, self.f, self.k, self.t\n dt = t[k + 1] - t[k]\n u_new = u[k] + dt * f(u[k], t[k], k)\n u_new = [(i > 0) * i for i in u_new]\n\n return u_new", "def prodi(items: Iterable[float]) -> float:\n p: float = 1\n for n in items:\n p *= n\n return p", "def evaluate(f, a):\n if f == []:\n return 0\n result = f[-1] # begin with leading coefficient\n i = len(f) - 1 # number of times to iterate \n while i >= 0:\n result = f[i] + a*result\n i -= 1\n return result", "def _reward(self, i, rewards, reward=1):\n for j,a in enumerate(self.agents):\n if a.index==i or a.index==0:\n rewards[j]+=reward\n if self.zero_sum:\n if a.index!=i or a.index==0:\n rewards[j] -= reward", "def floating_point_generator():\n i = 0\n while True:\n yield str((i % 5) * 1.1)\n i += 1", "def _B2I(B, reverse=False):\n if reverse:\n return reduce(lambda x, y: (x << 1) + y, list(reversed(B)))\n else:\n return reduce(lambda x, y: (x << 1) + y, B)", "def bisection(df, a, b, niter=10):\n for i in xrange(niter):\n mid = (a+b)/2.\n if df(mid) > 0:\n b = mid\n else:\n a = mid\n\n print \"Bisection method converges faster\"\n return (a+b)/2.", "def viterbi(self, observation):\n N=len(observation)\n tab=[[0]*self.nStates for i in range(N)]\n backtrack=[[-1]*self.nStates for i in range(N)]\n if not self.logdomain:\n self.__convert_to_log()\n\n for i in range(self.nStates):\n tab[0][i]=self.e[i][observation[0]]+self.pi[i]\n \n for i in range(1,N):\n for j in range(self.nStates):\n smax=-1\n maxval=float('-inf')\n for s in range(self.nStates):\n cs=tab[i-1][s]+self.t[s][j]\n if cs>maxval:\n smax=s\n maxval=cs\n assert(smax>-1 and smax<self.nStates)\n tab[i][j]=self.e[j][observation[i]]+maxval\n backtrack[i][j]=smax\n\n smax=-1\n llike=float('-inf')\n for s in range(self.nStates):\n if llike<tab[N-1][s]:\n llike=tab[N-1][s]\n smax=s\n\n best=[-1]*N\n best[-1]=smax\n for i in range(N-2, -1, -1):\n best[i]=backtrack[i+1][best[i+1]]\n\n return best, llike", "def test_simple_ib_1():\n dist = Distribution(['00', '02', '12', '21', '22'], [1 / 5] * 5)\n ib = IBCurve(dist, rvs=[[0], [1]], beta_max=10, beta_num=21)\n assert ib.complexities[2] == pytest.approx(0.0, abs=1e-4)\n assert ib.complexities[5] == pytest.approx(0.8, abs=1e-4)\n assert ib.complexities[20] == pytest.approx(1.5129028136502387, abs=1e-4)\n assert ib.relevances[2] == pytest.approx(0.0, abs=1e-4)\n assert ib.relevances[5] == pytest.approx(0.4, abs=1e-4)\n assert ib.relevances[20] == pytest.approx(0.5701613885745838, abs=1e-4)\n assert 3.0 in ib.find_kinks()", "def _adjustBlock(self, b):\n raise NotImplementedError", "def nits(self):", "def part1b_0():\n xs = exampleInput\n _, forward = submission.computeForward(simpleCRF, xs)\n for i in xrange(len(xs)):\n grader.requireIsEqual( 1.0, sum( forward[i].values() ) )", "def binary_search(f: Callable, eps: float, a: float, b: float = None,\n display: bool = False, max_iterations: int = 100) -> float:\n x = np.nan\n find_b = False\n if b is None:\n find_b = True\n b = a + 1\n for _ in range(max_iterations):\n x = (a + b) / 2\n f_x = f(x)\n\n if display:\n import matplotlib.pyplot as plt\n xx0 = a\n xx1 = b\n xx = np.linspace(xx0, xx1, 100)\n yy = np.array(list(map(f, xx)))\n plt.plot(xx, yy)\n plt.axvline(x=x)\n plt.show()\n\n if f_x > 0:\n a = x\n if find_b:\n b = 2*max(b, 1)\n else:\n b = x\n find_b = False\n\n if abs(f_x) <= eps:\n break\n else:\n # print(\"Error: Reached maximum iteration\", b)\n pass\n return x", "def iterate_continutations(model, radix, whitelist, topk):\n for size in sorted(model.keys(), reverse=True):\n probas = dict()\n left = (\"^\" + radix)[-size:]\n count = 0\n for right in model[size].get(left, dict()):\n if whitelist.includes(LetterBag(right)):\n probas.setdefault(right, 0)\n probas[right] += math.exp(model[size][left][right])\n count += 1\n if count > 0:\n k = 0\n for selection, proba in sorted(probas.items(), key=lambda x: -x[1]):\n k += 1\n if k > topk:\n break\n yield selection, math.log(proba / count)\n break\n yield None, 0", "def getx(v, lb, ub, i, B):\r\n x = lb + np.multiply((ub - lb), v)\r\n x[i] = B - (x.sum() - x[i])\r\n # Test if variable x[i] is within the bounds\r\n if x[i] <= ub[i] and x[i] >= lb[i]:\r\n return x\r\n else:\r\n return np.array([])", "def get_bias(self):", "def increment_bv(bv, increment: int, graycode=False, saturate=False) -> BitVector:\n assert increment == 1 or increment == -1\n nbits = len(bv)\n if graycode:\n index = graytobin(bv2int(bv))\n index = (index+increment) % 2**nbits\n return int2bv(bintogray(index), nbits)\n else:\n if bv == tuple(True for i in range(nbits)) and increment > 0:\n if saturate:\n return bv\n raise ValueError(\"Bitvector overflow for nonperiodic domain.\")\n if bv == tuple(False for i in range(nbits)) and increment < 0:\n if saturate:\n return bv\n raise ValueError(\"Bitvector overflow for nonperiodic domain.\")\n return int2bv(bv2int(bv) + increment, nbits)", "def test_indexed_buffered(self, expr, result):\n i, j, l = dimify('i j l')\n a = symbol(name='a', dimensions=(i, j, l), value=2., mode='indexed').base\n fa = a.function\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def test08b(self):\n a = np.arange(1e3)\n b = bcolz.carray(a, rootdir=self.rootdir)\n u = b.iter(3, 30, 3)\n w = b.iter(2, 20, 2)\n self.assertEqual(a.tolist(), list(b))\n self.assertEqual(sum(a[3:30:3]), sum(u))\n self.assertEqual(sum(a[2:20:2]), sum(w))", "def optimized_fibonacci(f):\n a = 0\n b = 1\n if f < 2:\n return f\n else:\n for i in range(1, f):\n c = a + b\n a = b\n b = c\n return b", "def counit(self, element):\n return element.coefficient([])", "def next_fib(f):\n for f in fib:\n i = fib.index(f)\n return f+fib[i-1]", "def steff(f, x: float):\n print(x)\n if g(f, x)(x) != 0:\n yield x - f(x) / g(f, x)(x) # First give x_n + 1\n yield from steff(f, x - f(x) / g(f, x)(x)) # Then give new iterator", "def __getitem__(self, i):\n raise NotImplementedError(\"Not implmented!\")", "def _gibbs_sampling_iteration(self):\n for m, dm in enumerate(self.corpus):\n for n, w_mn in enumerate(dm):\n k = self.z_mn[m, n]\n self.n_mk[m, k] -= 1\n self.n_m[m] -= 1\n self.n_kt[k, w_mn] -= 1\n self.n_k[k] -= 1\n k = self._conditional_z(\n self.n_components, self.alpha, self.beta,\n self.n_mk, self.n_kt, m, w_mn, self.beta_sum, self.n_k)\n self.z_mn[m, n] = k\n self.n_mk[m, k] += 1\n self.n_m[m] += 1\n self.n_kt[k, w_mn] += 1\n self.n_k[k] += 1" ]
[ "0.57004464", "0.5678648", "0.5644198", "0.55693084", "0.5482084", "0.5432887", "0.54156345", "0.5355259", "0.53485566", "0.53301316", "0.5308281", "0.5281137", "0.5262242", "0.5249782", "0.5240983", "0.523122", "0.52278364", "0.52272636", "0.5227069", "0.52154845", "0.5185357", "0.5183869", "0.517472", "0.5169048", "0.5158779", "0.5152627", "0.51505023", "0.5141597", "0.51339334", "0.5118063", "0.5112578", "0.5096454", "0.50888497", "0.50725", "0.5068372", "0.5060508", "0.5051538", "0.50512594", "0.50487494", "0.50401217", "0.5027402", "0.5022313", "0.50210434", "0.50111437", "0.5006318", "0.5003422", "0.5003374", "0.4997149", "0.4993741", "0.49907714", "0.4990553", "0.49880472", "0.49865645", "0.4979513", "0.49670577", "0.49591693", "0.49533445", "0.49530745", "0.49483284", "0.49425438", "0.4942447", "0.4928901", "0.49283975", "0.4927983", "0.49273047", "0.49153814", "0.49126866", "0.49126098", "0.49090114", "0.4906922", "0.4904849", "0.4903965", "0.4902092", "0.49015832", "0.4898419", "0.48951116", "0.48920587", "0.4891869", "0.48905256", "0.48877457", "0.48867074", "0.48812363", "0.4877372", "0.48700547", "0.48689806", "0.4866053", "0.4864778", "0.4862897", "0.48625523", "0.4862547", "0.48624036", "0.486092", "0.48586628", "0.48578057", "0.48569077", "0.48563612", "0.48555142", "0.48521405", "0.4851554", "0.48490363", "0.48482314" ]
0.0
-1
This function renames parts of model to meet the specific requirements of this writer. This behaviour replaces the previous approach of subclassing the parser to produce different results depending on the which writer was intended to be used.
def rename(self): # Remove any zero-padding from single-digit parameter names # This reverses any change applied by one of the CUDA writers for i in range(self.parser.comp-1, len(self.parser.parsedModel.parameterId)): old_name = self.parser.parsedModel.parameterId[i] num = old_name[len('parameter'):] if len(num) > 1 and num[0] == '0': new_name = 'parameter' + str(num[1:]) self.parser.parsedModel.parameterId[i] = new_name self.parser.rename_everywhere(old_name, new_name) # Remove any zero-padding from single-digit species names # This reverses any change applied by one of the CUDA writers for i in range(len(self.parser.parsedModel.speciesId)): old_name = self.parser.parsedModel.speciesId[i] num = old_name[len('species'):] if len(num) > 1 and num[0] == '0': new_name = 'species' + str(num[1:]) self.parser.parsedModel.speciesId[i] = new_name self.parser.rename_everywhere(old_name, new_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_model_name(self, name):\n self._name = name\n if self._zon is not None:\n self._zon.filename = f\"{name}.{self._zon.filename.split('.')[-1]}\"", "def rename_value(model: onnx.ModelProto, old_name: str, new_name: str):\n if old_name == new_name:\n return\n logger = get_root_logger()\n logger.info(f'rename {old_name} -> {new_name}')\n for n in model.graph.node:\n for i, output in enumerate(n.output):\n if output == old_name:\n n.output[i] = new_name\n for i, input in enumerate(n.input):\n if input == old_name:\n n.input[i] = new_name\n for v in model.graph.value_info:\n if v.name == old_name:\n v.name = new_name\n for i, input in enumerate(model.graph.input):\n if input.name == old_name:\n input.name = new_name\n for i, output in enumerate(model.graph.output):\n if output.name == old_name:\n output.name = new_name", "def normalize_names(self):\n for node in self.asset.findall(\".//*[@name]\"):\n name = node.get(\"name\")\n if not name.startswith(self.name + \".\"):\n node.set(\"name\", self.name + \".\" + name)\n\n for attr in ['texture', 'material', 'mesh']:\n for node in self.root.findall(\".//*[@{}]\".format(attr)):\n name = node.get(attr)\n if not name.startswith(self.name + \".\"):\n node.set(attr, self.name + \".\" + name)\n\n for node in self.worldbody.findall(\".//*[@name]\"):\n name = node.get(\"name\")\n if not name.startswith(self.name + \".\"):\n node.set(\"name\", self.name + \".\" + name)\n\n for node in self.worldbody.findall(\".//*[@joint]\"):\n joint = node.get(\"joint\")\n if not joint.startswith(self.name + \".\"):\n node.set(\"joint\", self.name + \".\" + name)", "def fix_name(self):\n self._name_fixed = True", "def reformat(self):\n\t\told_path = os.path.join( self.path, self.init_str )\n\t\tnew_path = os.path.join( self.path, self.reorder() )\n\t\tos.rename(old_path,new_path)", "def rename_meta(meta, mapper, ignore_batch_props):\n rename_properties(mapper)\n rename_lib_values(meta['lib']['values'], mapper)\n rename_masks(meta['masks'], mapper, keep_original)\n rename_columns(meta['columns'], mapper, keep_original)\n rename_sets(meta['sets'], mapper, keep_original)\n if 'batches' in meta['sets'] and not ignore_batch_props:\n rename_batch_properties(meta['sets']['batches'], mapper)\n if not keep_original:\n rename_set_items(meta['sets'], mapper)", "def __sanitizeMetaModelName( self, name ):\r\n if( name[-5:] == '_META' ): \r\n return name[:-5]\r\n else: return name", "def _name_changed ( self, name ):\n self.name_last = parse_name( name )[-1]\n self.inputs_changed()", "def get_model_name(ind: int) -> str:\n nonlocal model_index\n model_index += 1\n return f'{fizz_name}-{fizz_type.model_name}{model_index:02}'", "def restore_names(input_file, output_file):\n\n if not dataModel.loadModel(input_file):\n print(\"Couldn't open input file\")\n return 1\n\n model = dataModel.getModel()\n\n restore_names_in(model.getCompartments())\n restore_names_in(model.getMetabolitesX())\n restore_names_in(model.getModelValues())\n restore_names_in(model.getReactions())\n restore_names_in(model.getEvents())\n\n dataModel.saveModel(output_file, True)\n\n return 0", "def rename(old, new):", "def rename(old, new):", "def renameFields(self, nameDict):\n for format in self.values():\n if format.genericType in nameDict:\n nameDict[format.name] = nameDict[format.genericType]\n for item in globalref.docRef.root.descendantGen():\n for oldName, newName in nameDict.get(item.formatName, []):\n if oldName in item.data:\n item.data[newName] = item.data[oldName]\n del item.data[oldName]", "def set_post_score_renames(self):\n raise NotImplementedError(\"Must be defined by subclass!\")", "def _MaybeNewName(self, name):\n if not name:\n return name\n if name == self._old[:-1]:\n return self._module_name\n before, match, after = name.partition(self._old)\n if match and not before and \".\" not in after:\n return self._new + after\n else:\n return name", "def change_names (fixed_structure, moving_structure, index):\n for chain in moving_structure[0]:\n chain.id = utilities.merged_list[index]+\"-\"+chain.id\n index +=1\n return (fixed_structure, moving_structure, index)", "def fix(self):\n for namespace in pm.listNamespaces():\n for elem in namespace.ls():\n elem.rename(elem.split(\":\")[-1])\n namespace.remove()\n\n self.run()", "def update_model_prefix(model, db=DEFAULT_DB_ALIAS, verbosity=2):\n prefix = \"content:\"\n\n ct = ContentType.objects.get_for_model(model)\n new_name = u\"{0} {1}\".format(prefix, model._meta.verbose_name_raw).strip()\n\n if ct.name != new_name:\n # Django 1.4/1.5 compatible .save(update_fields=('name',)) look-a-like\n ContentType.objects.using(db).filter(pk=ct.id).update(name=new_name)\n\n if verbosity >= 1:\n print(\" - Updated ContentType title for {0}.{1}\".format(model._meta.app_label, model._meta.object_name))\n return True\n return False", "def get_model_name(ind: int) -> str:\n return f'{fizz_name}-{fizz_type.model_name}{ind:02}'", "def TransformNames(self) -> _n_2_t_0[str]:", "def fix_name(row, index, name_map):\n # print(\"Input row: {}\".format(row))\n name = row[index].strip()\n # print(\"Name entry is {}\".format(name))\n if name.endswith(\" (yourself)\"):\n name = name[:-len(\" (yourself)\")]\n # print(\"Shortening to |{}|\".format(name))\n if name not in name_map:\n name_map[name] = name # Initially the identity transform\n row[index] = name_map[name]", "def normalize_col_name(col_name, used_column_names, is_relation):\n field_params = {}\n field_notes = []\n\n new_name = col_name.lower()\n if new_name != col_name:\n field_notes.append('Field name made lowercase.')\n\n if is_relation:\n if new_name.endswith('_id'):\n new_name = new_name[:-3]\n else:\n field_params['db_column'] = col_name\n\n new_name, num_repl = re.subn(r'\\W', '_', new_name)\n if num_repl > 0:\n field_notes.append('Field renamed to remove unsuitable characters.')\n\n if new_name.find(LOOKUP_SEP) >= 0:\n while new_name.find(LOOKUP_SEP) >= 0:\n new_name = new_name.replace(LOOKUP_SEP, '_')\n if col_name.lower().find(LOOKUP_SEP) >= 0:\n # Only add the comment if the double underscore was in the original\n # name\n field_notes.append(\n \"Field renamed because it contained more than one '_' in a row.\"\n )\n\n if new_name.startswith('_'):\n new_name = 'field%s' % new_name\n field_notes.append(\"Field renamed because it started with '_'.\")\n\n if new_name.endswith('_'):\n new_name = '%sfield' % new_name\n field_notes.append(\"Field renamed because it ended with '_'.\")\n\n if keyword.iskeyword(new_name):\n new_name += '_field'\n field_notes.append(\n 'Field renamed because it was a Python reserved word.')\n\n if new_name[0].isdigit():\n new_name = 'number_%s' % new_name\n field_notes.append(\n \"Field renamed because it wasn't a valid Python identifier.\")\n\n if new_name in used_column_names:\n num = 0\n while '%s_%d' % (new_name, num) in used_column_names:\n num += 1\n new_name = '%s_%d' % (new_name, num)\n field_notes.append('Field renamed because of name conflict.')\n\n if col_name != new_name and field_notes:\n field_params['db_column'] = col_name\n\n return new_name, field_params, field_notes", "def update_network_name(info_file, new_example_file, default_name, model_name):\n # load file\n with info_file.open() as fr:\n lines = fr.read()\n\n if default_name != model_name:\n old_name_list = [default_name, default_name.upper()]\n new_name_list = [model_name, model_name.upper()]\n\n # replace file\n for i in range(len(old_name_list)):\n lines = re.sub(old_name_list[i], new_name_list[i], lines)\n\n # save new example file\n with new_example_file.open(\"w\") as fw:\n fw.write(lines)\n\n return new_example_file", "def __normalize_name(self):\n self.normalized_name = normalizeSimplified(self.name)", "def renameFormats(self, nameDict):\n for item in globalref.docRef.root.descendantGen():\n item.formatName = nameDict.get(item.formatName, item.formatName)", "def _transform_name(self) -> None:\n self.name = utils.maybe_rename_for_k8s(self.name)", "def normalizeNames(self, startIndex=0):\n ind = startIndex\n for a in self:\n oldName = a.getName()\n newName = a.makeNameFromAssemNum(ind)\n if oldName == newName:\n ind += 1\n continue\n\n a.p.assemNum = ind\n a.setName(newName)\n\n for b in a:\n axialIndex = int(b.name.split(\"-\")[-1])\n b.name = b.makeName(ind, axialIndex)\n\n ind += 1\n\n self.normalizeInternalBookeeping()\n\n return ind", "def new_name(self,new_name):\n self.name = new_name", "def _name_changed(self):\n self._named = True", "def _convert_name(self, name):\n if not self.re_name.match(name):\n org_name = name\n name = self.re_white.sub('_', name)\n name = self.re_alpha.sub('_', name)\n if not self.re_name.match(name):\n name = 'x_' + name2\n self.warn('Converting name <' + org_name + '> to <' + name + '>.')\n return name", "def setName(self, *args):\n return _libsbml.Submodel_setName(self, *args)", "def update_name(self, new_name):\r\n self.__name = new_name", "def update_name(self, new_name):\r\n self.__name = new_name", "def setName(self, *args):\n return _libsbml.Model_setName(self, *args)", "def parse_yolo_name(backbone_name, num_anchors, num_classes):\n model_name = 'yolov3'\n\n if 'tiny' in backbone_name:\n model_name += '-tiny'\n elif 'spp' in backbone_name:\n model_name += '-spp'\n model_name += '_a' + str(num_anchors)\n model_name += '_c' + str(num_classes)\n\n return model_name", "def get_model_name(ind: int) -> str:\n return f'{fizz_name}-{fizz_type.model_name}'", "def standard_name_remapper(orig_name):\n # Remove any trailing parentheses.\n # TODO(tjann): to check if this is safe.\n paren_start = orig_name.find(\"(\")\n if paren_start != -1:\n orig_name = orig_name[:paren_start]\n\n # Removes separating words.\n orig_name = orig_name.replace(\",\", \" \")\n orig_name = orig_name.replace(\"-\", \" \")\n orig_name = orig_name.replace(\"and \", \"\")\n return \"\".join([word.capitalize() for word in orig_name.split()])", "def changeName(name):\n\tif name in [\"<OPEN>\", \"<HIGH>\", \"<LOW>\", \"<CLOSE>\"]:\n\t\t# Frist charector is upper case\n\t\tname = name.replace('<', '').replace('>', '')\n\t\t#name = name[0] + name[1:].lower()\t\t\n\telif name in [\"<VOL>\"]:\n\t\t#name = name.replace(\"<VOL>\", \"Volume\")\n\t\tname = name.replace(\"<VOL>\", \"VOLUME\")\n\telif name in [\"<DTYYYYMMDD>\"]:\n\t\t#name = name.replace(\"<DTYYYYMMDD>\", \"Date\")\n\t\tname = name.replace(\"<DTYYYYMMDD>\", \"DATE\")\n\treturn name", "def simplifyOutName(name):\n return \"HLTNav_\" + name.replace(\"HLTNav_\", \"\").replace(\"Trig\", \"\").replace(\"Alg\", \"\")", "def rename(oldname, newname):", "def rewrite(self, str_new_definition=None):\n if str_new_definition == None:\n str_new_definition = self['meta']['raw_definition']\n config.item_rewrite(self._original_attributes, str_new_definition)\n self['meta']['raw_definition'] = str_new_definition\n self._event(level='write', message=\"Object definition rewritten\")\n return True", "def _adjustNames(self, antimonyModel:str, observedTS:NamedTimeseries) \\\r\n ->typing.Tuple[NamedTimeseries, list]:\r\n rr = te.loada(antimonyModel)\r\n dataNames = rr.simulate().colnames\r\n names = [\"[%s]\" % n for n in observedTS.colnames]\r\n missingNames = [n[1:-1] for n in set(names).difference(dataNames)]\r\n newSelectedColumns = list(self.selectedColumns)\r\n if len(missingNames) > 0:\r\n newObservedTS = observedTS.copy()\r\n self.logger.exception(\"Missing names in antimony export: %s\"\r\n % str(missingNames))\r\n for name in observedTS.colnames:\r\n missingName = \"%s_\" % name\r\n if name in missingNames:\r\n newObservedTS = newObservedTS.rename(name, missingName)\r\n newSelectedColumns.remove(name)\r\n newSelectedColumns.append(missingName)\r\n else:\r\n newObservedTS = observedTS\r\n return newObservedTS, newSelectedColumns", "def process_name(self, name, inverse=False):\n if inverse:\n return name.replace('_', ' ').title()\n return name.lower().replace(' ', '_').replace('.', '')", "def name(self, new_name: str) -> None:\n raise NotImplementedError()", "def rename(self, oldname, newname):\n self._check_rename(oldname, newname)\n conns = self.find_referring_connections(oldname)\n wflows = self.find_in_workflows(oldname)\n old_autos = self._cleanup_autopassthroughs(oldname)\n\n obj = self.remove(oldname)\n self.add(newname, obj)\n\n # oldname has now been removed from workflows, but newname may be in the wrong\n # location, so force it to be at the same index as before removal\n for wflow, idx in wflows:\n wflow.remove(newname)\n wflow.add(newname, idx)\n\n old_rgx = re.compile(r'(\\W?)%s.' % oldname)\n par_rgx = re.compile(r'(\\W?)parent.')\n\n # recreate all of the broken connections after translating oldname to newname\n for u, v in conns:\n self.connect(re.sub(old_rgx, r'\\g<1>%s.' % newname, u),\n re.sub(old_rgx, r'\\g<1>%s.' % newname, v))\n\n # recreate autopassthroughs\n if self.parent:\n for u, v in old_autos:\n u = re.sub(old_rgx, r'\\g<1>%s.' % '.'.join([self.name, newname]), u)\n v = re.sub(old_rgx, r'\\g<1>%s.' % '.'.join([self.name, newname]), v)\n u = re.sub(par_rgx, r'\\g<1>', u)\n v = re.sub(par_rgx, r'\\g<1>', v)\n self.parent.connect(u, v)", "def parse_modelname(string,labellist,ensemblesfolder):\n ## We need to account for two different prefixes now. \n split_ens_temp = ensemble_template.split(\"{f}\")\n template_prefix = split_ens_temp[0]\n\n template_seedind = split_ens_temp[1].split(\"{s}\")[0]\n if string.startswith(template_prefix): ## TODO or other prefix\n frames,seedext = string.split(template_prefix)[-1].split(template_seedind)\n seed=seedext.split(\"results.json\")[0]\n return {\"name\":string,\n \"frames\":int(frames),\n \"seed\":int(seed),\n \"template\":ensemble_template,\n \"outliers\":determine_outliers(labellist,int(seed),int(frames)),\n }", "def set_name(self, newname=\"\"):\n self.name = newname", "def renameIDs(self, *args):\n return _libsbml.Model_renameIDs(self, *args)", "def _auto_name(name, parent):\n if not is_ready(parent):\n parent._pywarm_auto_name_dict = {}\n def _hook(model, x):\n model._pywarm_auto_name_dict = {}\n parent._pywarm_forward_pre_hook = parent.register_forward_pre_hook(_hook)\n track = parent._pywarm_auto_name_dict\n if name not in track:\n track[name] = 0\n track[name] += 1\n return f'{name}_{track[name]}'", "def rename(self, renames):\n def relation(*args):\n args = [renames[i] for i in args]\n result = self(*args)\n return result\n\n return FO_Relation(relation, d_universe=list(range(len(renames))), arity=self.arity())", "def renameAssetObjects(self):\n\t\tfor i,o in enumerate( self.objects ):\n\t\t\tmn.Node( o ).name = self.name + '%i'%i", "def renameAttr(*args, **kwargs)->AnyStr:\n pass", "def update_control_names(self, prefix):\n for name in self.inputs.controls:\n self.inputs[name].name = (\n \"{0}->\".format(prefix) + self.inputs[name].name)\n for name in self.outputs.controls:\n if self.outputs[name].type != \"reference\":\n self.outputs[name].name = (\n \"{0}->\".format(prefix) + self.outputs[name].name)", "def replace(name, newobject):", "def rename_tab(self):\n if not (hasattr(self, 'name_edit')\n and self.name_edit.isVisible()):\n return\n\n self.name_edit.hide()\n\n label = self.name_edit.text().strip()\n if not bool(label):\n label = self.name_edit.tab_text\n\n index = self.name_edit.tab_index\n\n if self.renaming_label == label:\n return\n\n # FIXME: if the tab is not\n # positioned to the right,\n # this can cause a jump.\n self.setTabText(index, label)\n\n data = self.tabData(index)\n data['name'] = label\n self.tab_renamed_signal.emit(\n data['uuid'],\n data['name'],\n data['text'],\n str(index),\n data.get('path')\n )\n self.setTabData(index, data)", "def get_name(self, old_name):\n if old_name not in self.record:\n self.record[old_name] = [self.PLACEHOLDER]\n suffix = \"\"\n else:\n self.record[old_name].append(self.PLACEHOLDER)\n suffix = f\"{len(self.record[old_name]) - 1}\"\n\n new_name = f\"{old_name}{suffix}\"\n self.topo_order.append(new_name)\n\n return new_name", "def cmip6_renaming_dict():\n # I could probably simplify this with a generalized single dict, \n # which has every single possible `wrong` name and then for each model\n # the renaming function just goes through them...\n dim_name_dict = {\n \"AWI-CM-1-1-MR\":{},\n \"BCC-CSM2-MR\": {\n \"x\": \"lon\",\n \"y\": \"lat\",\n \"lon\": \"longitude\",\n \"lat\": \"latitude\",\n \"lev\": \"lev\",\n \"bnds\":\"bnds\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n \"vertex\": None,\n 'time_bounds': \"time_bnds\",\n },\n \"BCC-ESM1\": {\n \"x\": \"lon\",\n \"y\": \"lat\",\n \"lon\": \"longitude\",\n \"lat\": \"latitude\",\n \"lev\": \"lev\",\n \"bnds\":\"bnds\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n \"vertex\": \"vertex\",\n 'time_bounds': \"time_bnds\",\n },\n \"CAMS-CSM1-0\": {\n \"x\": [\"i\", 'lon'],\n \"y\": [\"j\", 'lat'],\n \"lon\": \"longitude\",\n \"lat\": \"latitude\",\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": None,\n \"lat_bounds\": None,\n \"vertex\": 'vertices',\n 'time_bounds': \"time_bnds\",\n },\n \"CanESM5\": {\n \"x\": [\"i\", 'lon'],\n \"y\": [\"j\", 'lat'],\n \"lon\": \"longitude\",\n \"lat\": \"latitude\",\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": None,\n \"lat_bounds\": None,\n \"time_bounds\": \"time_bnds\",\n \"vertex\": \"vertices\",\n },\n \"CanESM5-CanOE\": {\n \"x\": \"i\",\n \"y\": \"j\",\n \"lon\": \"longitude\",\n \"lat\": \"latitude\",\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": 'vertices_longitude',\n \"lat_bounds\": 'vertices_latitude',\n \"vertex\": \"vertices\",\n },\n \"CNRM-CM6-1\": {\n \"x\": [\"x\", 'lon'],\n \"y\": [\"y\", 'lat'],\n \"lon\": \"lon\",\n \"lat\": \"lat\",\n \"lev\": \"lev\",\n \"bnds\": \"axis_nbounds\",\n \"lev_bounds\": \"lev_bounds\",\n \"lon_bounds\": \"bounds_lon\",\n \"lat_bounds\": \"bounds_lat\",\n 'vertex': \"nvertex\",\n 'time_bounds': \"time_bnds\",\n },\n \"CNRM-ESM2-1\": {\n \"x\": [\"x\", \"lon\"],\n \"y\": [\"y\", \"lat\"],\n \"lon\": \"lon\",\n \"lat\": \"lat\",\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bounds\",\n \"lon_bounds\": \"bounds_lon\",\n \"lat_bounds\": \"bounds_lat\",\n \"bnds\":\"axis_nbounds\",\n 'vertex': None,\n 'time_bounds': \"time_bnds\",\n },\n \"E3SM-1-0\": {\n \"x\": \"lon\",\n \"y\": \"lat\",\n \"lon\": None,\n \"lat\": None,\n \"lev\": \"lev\",\n \"bnds\":\"bnds\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n \"time_bounds\":\"time_bounds\",\n 'vertex': None,\n },\n \"E3SM-1-1\": {\n \"x\": \"lon\",\n \"y\": \"lat\",\n \"lon\": None,\n \"lat\": None,\n \"lev\": \"lev\",\n \"bnds\":\"bnds\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n \"time_bounds\":\"time_bounds\",\n 'vertex': None,\n },\n \"E3SM-1-1-ECA\": {\n \"x\": \"lon\",\n \"y\": \"lat\",\n \"lon\": None,\n \"lat\": None,\n \"lev\": \"lev\",\n \"bnds\":\"bnds\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n \"time_bounds\":\"time_bounds\",\n 'vertex': None,\n },\n \"EC-Earth3-LR\": {\n \"x\": [\"i\", 'lon'],\n \"y\": [\"j\", 'lat'],\n \"lon\": \"longitude\",\n \"lat\": \"latitude\",\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": None,\n \"lat_bounds\": None,\n 'time_bounds': \"time_bnds\",\n # 'vertex': 'vertices',\n # 'dzt': 'thkcello',\n },\n \"EC-Earth3-Veg\": {\n \"x\": [\"i\", 'lon'],\n \"y\": [\"j\", 'lat'],\n \"lon\": \"longitude\",\n \"lat\": \"latitude\",\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": None,\n \"lat_bounds\": None,\n 'vertex': 'vertices',\n 'time_bounds': \"time_bnds\",\n # 'dzt': 'thkcello',\n },\n \"EC-Earth3\": {\n \"x\": [\"i\", 'lon'],\n \"y\": [\"j\", 'lat'],\n \"lon\": \"longitude\",\n \"lat\": \"latitude\",\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": None,\n \"lat_bounds\": None,\n 'time_bounds': \"time_bnds\",\n # 'vertex': 'vertices',\n # 'dzt': 'thkcello',\n },\n \"FGOALS-f3-L\": {\n \"x\": [\"i\", 'lon'],\n \"y\": [\"j\", 'lat'],\n \"lon\": \"longitude\",\n \"lat\": \"latitude\",\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": None,\n \"lat_bounds\": None,\n 'time_bounds': \"time_bnds\",\n # 'vertex': 'vertices',\n # 'dzt': 'thkcello',\n },\n \"NICAM16-7S\": {\n \"x\": \"lon\",\n \"y\": \"lat\",\n \"lon\": None,\n \"lat\": None,\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n \"time_bounds\": \"time_bnds\",\n 'vertex': 'vertices',\n },\n \"MIROC-ES2L\": {\n \"x\": [\"x\", 'lon'],\n \"y\": [\"y\", 'lat'],\n \"lon\": \"longitude\",\n \"lat\": \"latitude\",\n \"lev\": [\"lev\", \"zlev\"],\n \"lev_bounds\": [\"lev_bnds\", \"zlev_bnds\"],\n \"lon_bounds\": \"x_bnds\",\n \"lat_bounds\": \"y_bnds\",\n \"time_bounds\": \"time_bnds\",\n 'vertex': 'vertices',\n },\n \"MIROC6\": {\n \"x\": [\"x\", 'lon'],\n \"y\": [\"y\", 'lat'],\n \"lon\": \"longitude\",\n \"lat\": \"latitude\",\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"x_bnds\",\n \"lat_bounds\": \"y_bnds\",\n 'time_bounds': \"time_bnds\",\n },\n \"HadGEM3-GC31-LL\": {\n \"x\": [\"i\", 'lon'],\n \"y\": [\"j\", 'lat'],\n \"lon\": \"longitude\",\n \"lat\": \"latitude\",\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bounds\",\n \"lon_bounds\": None,\n \"lat_bounds\": None,\n 'time_bounds': \"time_bnds\",\n },\n \"HadGEM3-GC31-MM\": {\n \"x\": [\"i\", 'lon'],\n \"y\": [\"j\", 'lat'],\n \"lon\": \"longitude\",\n \"lat\": \"latitude\",\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bounds\",\n \"lon_bounds\": None,\n \"lat_bounds\": None,\n 'time_bounds': \"time_bnds\",\n },\n \"UKESM1-0-LL\": {\n \"x\": [\"i\", 'lon'],\n \"y\": [\"j\", 'lat'],\n \"lon\": \"longitude\",\n \"lat\": \"latitude\",\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": None,\n \"lat_bounds\": None,\n \"time_bounds\":\"time_bnds\",\n # 'vertex': 'vertices',\n # 'dzt': 'thkcello',\n },\n 'GISS-E2-2-G': { \n \"x\": \"lon\",\n \"y\": \"lat\",\n \"lon\": None,\n \"lat\": None,\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n 'vertex': None,\n 'time_bounds': \"time_bnds\",\n },\n \"GISS-E2-1-G-CC\": {\n \"x\": \"lon\",\n \"y\": \"lat\",\n \"lon\": None,\n \"lat\": None,\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n 'vertex': None,\n 'time_bounds': \"time_bnds\",\n },\n \"GISS-E2-1-G\": {\n \"x\": \"lon\",\n \"y\": \"lat\",\n \"lon\": None,\n \"lat\": None,\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n 'vertex': None,\n 'time_bounds': \"time_bnds\",\n },\n \"GISS-E2-1-H\": {\n \"x\": \"lon\",\n \"y\": \"lat\",\n \"lon\": None,\n \"lat\": None,\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n 'vertex': None,\n 'time_bounds': \"time_bnds\",\n },\n \"CESM1-1-CAM5-CMIP5\": {\n \"x\": [\"nlon\", \"lon\"],\n \"y\": [\"nlat\", \"lat\"],\n \"lon\": \"lon\",\n \"lat\": \"lat\",\n \"lev\": \"lev\",\n \"bnds\":\"d2\",\n \"time_bounds\":\"time_bnds\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n 'vertex': 'vertices',\n },\n \"CESM2-WACCM\": {\n \"x\": [\"nlon\", \"lon\"],\n \"y\": [\"nlat\", \"lat\"],\n \"lon\": \"lon\",\n \"lat\": \"lat\",\n \"lev\": \"lev\",\n \"bnds\":\"d2\",\n \"time_bounds\":\"time_bnds\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n 'vertex': 'vertices',\n },\n \"CESM2-WACCM-FV2\": {\n \"x\": [\"nlon\", \"lon\"],\n \"y\": [\"nlat\", \"lat\"],\n \"lon\": \"lon\",\n \"lat\": \"lat\",\n \"lev\": \"lev\",\n \"bnds\":\"d2\",\n \"time_bounds\":\"time_bnds\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n 'vertex': 'vertices',\n },\n \"CESM2\": {\n \"x\": [\"nlon\", \"lon\"],\n \"y\": [\"nlat\", \"lat\"],\n \"lon\": \"lon\",\n \"lat\": \"lat\",\n \"lev\": \"lev\",\n \"bnds\":'d2',\n \"time_bounds\":\"time_bnds\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n 'vertex': 'vertices',\n },\n \"CESM2-FV2\": {\n \"x\": [\"nlon\", \"lon\"],\n \"y\": [\"nlat\", \"lat\"],\n \"lon\": \"lon\",\n \"lat\": \"lat\",\n \"lev\": \"lev\",\n \"bnds\":'d2',\n \"time_bounds\":\"time_bnds\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n 'vertex': 'vertices',\n },\n \"GFDL-CM4\": {\n \"x\": [\"x\",\"lon\"],\n \"y\": [\"y\", \"lat\"],\n \"lon\": \"lon\",\n \"lat\": \"lat\",\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n \"time_bounds\": \"time_bnds\",\n # 'vertex': 'vertex',\n # 'dzt': 'thkcello',\n },\n \"GFDL-OM4p5B\": {\n \"x\": [\"x\",\"lon\"],\n \"y\": [\"y\", \"lat\"],\n \"lon\": \"lon\",\n \"lat\": \"lat\",\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n \"time_bounds\": \"time_bnds\",\n # 'vertex': 'vertex',\n # 'dzt': 'thkcello',\n },\n \"GFDL-ESM4\": {\n \"x\": [\"x\",\"lon\"],\n \"y\": [\"y\", \"lat\"],\n \"lon\": \"lon\",\n \"lat\": \"lat\",\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n \"time_bounds\": \"time_bnds\",\n # 'vertex': 'vertex',\n # 'dzt': 'thkcello',\n },\n \"NESM3\": {\n \"x\": ['i', \"lon\"],\n \"y\": ['j', \"lat\"],\n \"lon\": \"longitude\",\n \"lat\": \"latitude\",\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": None,\n \"lat_bounds\": None,\n 'vertex': 'vertices',\n 'time_bounds': \"time_bnds\",\n # 'dzt': 'thkcello',\n },\n \"MRI-ESM2-0\": {\n \"x\": ['x', \"lon\"],\n \"y\": ['y', \"lat\"],\n \"lon\": \"longitude\",\n \"lat\": \"latitude\",\n \"lev\": \"lev\",\n \"bnds\":'bnds',\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": [\"x_bnds\", 'lon_bnds'],\n \"lat_bounds\": [\"y_bnds\", 'lat_bnds'],\n \"time_bounds\": \"time_bnds\",\n 'vertex': 'vertices',\n },\n \"SAM0-UNICON\": {\n \"x\": [\"i\", 'lon'],\n \"y\": [\"j\", 'lat'],\n \"lon\": \"longitude\",\n \"lat\": \"latitude\",\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": None,\n \"lat_bounds\": None,\n 'vertex': 'vertices',\n 'time_bounds': \"time_bnds\",\n # 'dzt': 'thkcello',\n },\n \"MCM-UA-1-0\": {\n \"x\": \"longitude\",\n \"y\": \"latitude\",\n \"lon\": None,\n \"lat\": None,\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n 'time_bounds': \"time_bnds\",\n # 'vertex': 'vertices',\n # 'dzt': 'thkcello',\n }, \n 'IPSL-CM6A-LR': {\n \"x\": ['x', \"lon\"],\n \"y\": ['y', \"lat\"],\n \"lon\": 'nav_lon',\n \"lat\": 'nav_lat',\n \"lev\": [\"lev\",\"deptht\", \"olevel\"],\n \"lev_bounds\": [\"lev_bounds\", \"deptht_bounds\",'olevel_bounds'],\n \"lon_bounds\": \"bounds_nav_lon\",\n \"lat_bounds\": \"bounds_nav_lat\",\n 'vertex': 'nvertex',\n \"bnds\":\"axis_nbounds\",\n 'time_bounds': \"time_bnds\",\n # 'dzt': 'thkcello',\n },\n 'NorCPM1': {\n \"x\": [\"i\", 'lon'],\n \"y\": [\"j\", 'lat'],\n \"lon\": 'longitude',\n \"lat\": 'latitude',\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": None,\n \"lat_bounds\": None,\n 'vertex': 'vertices',\n 'time_bounds': \"time_bnds\",\n },\n 'NorESM1-F': {\n \"x\": [\"i\", 'lon'],\n \"y\": [\"j\", 'lat'],\n \"lon\": 'longitude',\n \"lat\": 'latitude',\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": 'vertices_longitude',\n \"lat_bounds\": 'vertices_latitude',\n 'vertex': 'vertices',\n 'time_bounds': \"time_bnds\",\n },\n 'NorESM2-LM': {\n \"x\": \"i\",\n \"y\": \"j\",\n \"lon\": 'longitude',\n \"lat\": 'latitude',\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": 'vertices_longitude',\n \"lat_bounds\": 'vertices_latitude',\n 'vertex': 'vertices',\n 'time_bounds': \"time_bnds\",\n },\n 'NorESM2-MM': {\n \"x\": \"i\",\n \"y\": \"j\",\n \"lon\": 'longitude',\n \"lat\": 'latitude',\n \"lev\": \"lev\", # i leave this here because the names are the same as for the other Nor models.\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": 'vertices_longitude',\n \"lat_bounds\": 'vertices_latitude',\n 'vertex': 'vertices',\n 'time_bounds': \"time_bnds\",\n },\n \n 'MPI-ESM1-2-HR': {\n \"x\": [\"i\", 'lon'],\n \"y\": [\"j\", 'lat'],\n \"lon\": 'longitude',\n \"lat\": 'latitude',\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": 'vertices_longitude',\n \"lat_bounds\": 'vertices_latitude',\n 'vertex': 'vertices',\n 'time_bounds': \"time_bnds\",\n },\n 'MPI-ESM1-2-LR': {\n \"x\": \"i\",\n \"y\": \"j\",\n \"lon\": 'longitude',\n \"lat\": 'latitude',\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": 'vertices_longitude',\n \"lat_bounds\": 'vertices_latitude',\n 'vertex': 'vertices',\n 'time_bounds': \"time_bnds\",\n },\n 'MPI-ESM-1-2-HAM': {\n \"x\": \"i\",\n \"y\": \"j\",\n \"lon\": 'longitude',\n \"lat\": 'latitude',\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": 'vertices_longitude',\n \"lat_bounds\": 'vertices_latitude',\n 'vertex': 'vertices',\n 'time_bounds': \"time_bnds\",\n },\n 'CNRM-CM6-1-HR': {\n \"x\": \"x\",\n \"y\": \"y\",\n \"lon\": 'lon',\n \"lat\": 'lat',\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bounds\",\n \"lon_bounds\": \"bounds_lon\",\n \"lat_bounds\": \"bounds_lat\",\n 'vertex': None,\n 'time_bounds': \"time_bounds\",\n },\n 'FIO-ESM-2-0': {\n \"x\": \"i\",\n \"y\": \"j\",\n \"lon\": 'longitude',\n \"lat\": 'latitude',\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"vertices_longitude\",\n \"lat_bounds\": \"vertices_latitude\",\n 'vertex': 'vertices',\n 'time_bounds': \"time_bnds\",\n },\n 'ACCESS-ESM1-5': {\n \"x\": \"i\",\n \"y\": \"j\",\n \"lon\": 'longitude',\n \"lat\": 'latitude',\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"vertices_longitude\",\n \"lat_bounds\": \"vertices_latitude\",\n 'vertex': 'vertices',\n 'time_bounds': \"time_bnds\",\n },\n 'ACCESS-CM2': {\n \"x\": \"i\",\n \"y\": \"j\",\n \"lon\": 'longitude',\n \"lat\": 'latitude',\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"vertices_longitude\",\n \"lat_bounds\": \"vertices_latitude\",\n 'vertex': 'vertices',\n 'time_bounds': \"time_bnds\",\n },\n 'INM-CM4-8': { # this is a guess.\n \"x\": \"lon\",\n \"y\": \"lat\",\n \"lon\": None,\n \"lat\": None,\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n 'vertex': None,\n 'time_bounds': \"time_bnds\",\n },\n 'INM-CM5-0': { # this is a guess.\n \"x\": \"lon\",\n \"y\": \"lat\",\n \"lon\": None,\n \"lat\": None,\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"lon_bnds\",\n \"lat_bounds\": \"lat_bnds\",\n 'vertex': None,\n 'time_bounds': \"time_bnds\",\n },\n 'MRI-ESM2-0':{\n \"x\": \"x\",\n \"y\": \"y\",\n \"lon\": 'longitude',\n \"lat\": 'latitude',\n \"lev\": \"lev\",\n \"lev_bounds\": \"lev_bnds\",\n# \"lon_bounds\": 'x_bnds',\n# \"lat_bounds\": 'y_bnds',\n# 'vertex': None, # this is a mess. there is yet another convention. Will have to deal with this once I wrap xgcm into here.\n 'time_bounds': \"time_bnds\",\n },\n 'CIESM': { # this is a guess.\n \"x\": \"i\",\n \"y\": \"j\",\n \"lon\": 'longitude',\n \"lat\": 'latitude',\n# \"lev\": \"lev\", # no 3d data available as of now\n# \"lev_bounds\": \"lev_bnds\",\n \"lon_bounds\": \"vertices_longitude\",\n \"lat_bounds\": \"vertices_latitude\",\n 'vertex': 'vertices',\n 'time_bounds': \"time_bnds\",\n },\n 'KACE-1-0-G': { # this is a guess.\n \"x\": \"lon\",\n \"y\": \"lat\",\n \"lon\": 'longitude',\n \"lat\": 'latitude',\n# \"lev\": \"lev\", # no 3d data available as of now\n# \"lev_bounds\": \"lev_bnds\",\n# \"lon_bounds\": \"vertices_longitude\",\n# \"lat_bounds\": \"vertices_latitude\",\n# \"lon_bounds\": \"vertices_longitude\",\n# \"lat_bounds\": \"vertices_latitude\",\n 'vertex': 'vertices',\n 'time_bounds': \"time_bnds\",\n },\n \n }\n # cast all str into lists\n for model in dim_name_dict.keys():\n for field in dim_name_dict[model].keys():\n if isinstance(dim_name_dict[model][field], str) or dim_name_dict[model][field] is None :\n dim_name_dict[model][field] = [dim_name_dict[model][field]]\n# add 'lon' and 'lat' as possible logical indicies for all models. This should take care of all regridded ocean output and all atmosphere models.\n if 'x' in dim_name_dict[model].keys():\n if not 'lon' in dim_name_dict[model]['x']:\n dim_name_dict[model]['x'].append('lon')\n \n if 'y' in dim_name_dict[model].keys():\n if not 'lat' in dim_name_dict[model]['y']:\n dim_name_dict[model]['y'].append('lat') \n return dim_name_dict", "def adjust_name_for_printing(name):\n if name is not None:\n name2 = name\n name = name.replace(\" \", \"_\").replace(\".\", \"_\").replace(\"-\", \"_m_\")\n name = name.replace(\"+\", \"_p_\").replace(\"!\", \"_I_\")\n name = name.replace(\"**\", \"_xx_\").replace(\"*\", \"_x_\")\n name = name.replace(\"/\", \"_l_\").replace(\"@\", '_at_')\n name = name.replace(\"(\", \"_of_\").replace(\")\", \"\")\n if re.match(r'^[a-zA-Z_][a-zA-Z0-9-_]*$', name) is None:\n raise NameError(\"name {} converted to {} cannot be further converted to valid python variable name!\".format(name2, name))\n return name\n return ''", "def simplify_model_names(models):\n modified_models = []\n for m in models:\n regex = re.findall(re.compile('\\w+(?:-\\w+)+|\\w+'), m)\n regex = [word for word in regex if not re.search('[0-9]d', word)]\n regex = [word for word in regex if not len(str(word)) < 2]\n regex = [word for word in regex if not re.search(\n r'\\b\\d+\\b', word)]\n modified_models.append(' '.join(regex))\n return modified_models", "def get_model_name(ind: int) -> str:\n return fizz_name", "def update_name(name, mapping):\n words_name = name.split(\" \")\n if words_name not in expected:\n for word in words_name:\n if word in mapping:\n name = name.replace(word, mapping[word])\n \n if word == word.lower():\n if word not in allowed_lowercase:\n name = name.replace(word, word.capitalize())\n \n if words_name[0] not in expected:\n if words_name[0] not in mapping:\n if words_name[0] == \"Fernando\":\n name = \"Avenida \" + name\n elif words_name[0] == \"rua\":\n pass\n else:\n name = \"Rua \" + name\n\n return name", "def setName(self, *args):\n return _libsbml.ModelCreator_setName(self, *args)", "def update_street_name(name, mapping):\r\n m = street_type_re.search(name)\r\n if m:\r\n street_type = m.group()\r\n if street_type in list(mapping.keys()):\r\n better_street_type = mapping[street_type]\r\n name = street_type_re.sub(better_street_type, name)\r\n return name", "def set_model_name(self, model_name: str = \"355M\") -> None:\n self.model_name = model_name", "def namehack(field):\n if field.endswith((\"attribute\", \"views\")):\n return field + \"__name\"\n else:\n return field", "def update_short_name(name):\n # First verify that the common errors have been fixed\n name = update_street_name(name)\n\n # Find the abbreviation to replace\n m = over_abbr_re.search(name)\n if m:\n if m.group() in abbreviations:\n name = over_abbr_re.sub(abbreviations[m.group()], name)\n\n return name", "def build_model_name(cls, name='modelName', output_name='output'):\n obj = cls(name)\n obj.exporter = 'generate_model_name'\n obj.output_name = output_name\n return obj", "def change_pkg_name(self):\n\n sender = self.sender()\n self.change_data()\n self.full_ed_lines[7].setText(sender.text() + '_node')", "def set_model_name(self, name):\n self.model_name = name", "def reverse_update_source_names(apps, schema_editor):\n Source = apps.get_model(\"vast_pipeline\", \"Source\")\n while Source.objects.filter(name__startswith=\"J\").exists():\n # do the updates in transaction batches of 1000 in case the source table is large\n with transaction.atomic():\n for source in Source.objects.filter(name__startswith=\"J\")[:1000]:\n source.name = (\n f\"ASKAP_{deg2hms(source.wavg_ra, precision=2)}\"\n f\"{deg2dms(source.wavg_dec, precision=2)}\"\n ).replace(\":\", \"\")\n source.save()", "def setName(self, attributeIndex, newName) -> None:\n ...", "def rename_multiindex(idx, feature_names_out, idx_name=\"index\"):\n if feature_names_out == \"multiindex\":\n return idx\n elif feature_names_out == \"flat\":\n return flatten_multiindex(idx)\n elif feature_names_out == \"original\":\n if idx.get_level_values(-1).is_unique:\n return idx.get_level_values(-1)\n else:\n raise ValueError(\n 'Error, resulting index names when using \"original\" naming '\n f\"for {idx_name} contains non-unique elements.\"\n )\n elif feature_names_out == \"auto\":\n original = idx.get_level_values(-1)\n idx_out = original.copy().values.astype(\"str\")\n if original.is_unique:\n return pd.Index(idx_out)\n\n flat = flatten_multiindex(idx)\n duplicated = original.duplicated(keep=False)\n\n idx_out[duplicated] = flat[duplicated]\n return pd.Index(idx_out)\n else:\n raise ValueError(\n \"invalid value for feature_names_out in rename_multiindex, \"\n 'must be one of \"flat\", \"multiindex\", \"original\", \"auto\", '\n f\"but found {feature_names_out}\"\n )", "def renameUI(*args, **kwargs)->AnyStr:\n pass", "def correct_naming(obsid, inst):\n cobsid = str(int(float(obsid)))\n if len(cobsid) == 5:\n return \n\n lobsid = mcf.add_leading_zero(obsid, 5)\n \n for sdir in ['secondary', 'analysis']:\n\n cmd = 'ls /data/hrc/' + inst + '/' + lobsid + '/' + sdir + '/hrcf* >' + zspace\n os.system(cmd)\n\n data = mcf.read_data_file(zspace, remove=1)\n for ent in data:\n atemp = re.split('\\/', ent)\n fname = atemp[-1]\n mc = re.search(lobsid, fname)\n if mc is not None:\n continue\n else:\n atemp = re.split('hrcf', fname)\n btemp = re.split('_', atemp[1])\n sobs = btemp[0]\n new = fname.replace(sobs, lobsid)\n full = '/data/hrc/' + inst + '/' + lobsid + '/' + sdir + '/' + new\n\n cmd = 'mv ' + ent + ' ' + full\n os.system(cmd)", "def reset_name_labels(infr):\n infr.print('reset_name_labels', 1)\n orig_names = infr.get_node_attrs('orig_name_label')\n infr.set_node_attrs('name_label', orig_names)", "def model_name(spec_name):\n return \"spec_\" + spec_name", "def associate_renames(df_renames, tr, node):\n # If any part of the node string is in the index of the rename dataframe\n # then build the original name.\n if any(new_nm.lower() in node.lower() for new_nm in df_renames.index):\n row_index = list(\n filter(lambda x: x.lower() in node, df_renames.index)\n )\n old_name = df_renames.loc[row_index].to_numpy()\n row_index = [x.lower() for x in row_index]\n old_name = [x.lower() for x in chain(*old_name)]\n new_old_tup = zip(row_index, old_name)\n # take the original name and the current name and use the current name\n # as a template to build up the old name.\n original_name = reduce(\n lambda new, kv: new.replace(*kv), new_old_tup, node\n )\n if node == original_name:\n row_index = list(filter(lambda x: x in node, df_renames.index))\n old_name = df_renames.loc[row_index].to_numpy()\n new_old_tup = zip(row_index, chain(*old_name))\n original_name = reduce(\n lambda new, kv: new.replace(*kv), new_old_tup, node\n )\n\n # Get the ID of node and the ID of the original node name that was\n # generated above.\n original_id = tr.get_uml_id(name=original_name)\n tr.uml_id.update({node: original_id})\n return {\"original_name\": original_name, \"original_id\": original_id}\n else:\n return {\"original_name\": None, \"original_id\": None}", "def set_name(self, name):\n if name == 'PositiveInteger' :\n self.name = 'Integer'\n self.output = False\n elif name == 'NaturalNumber' :\n self.name = 'Integer'\n self.output = False\n elif name == 'TimeAndDate' :\n self.name = 'DateTime'\n self.output = False\n elif name == 'Real' :\n self.name = 'Float'\n self.output = False\n elif name == 'Percentage':\n self.name = 'Float'\n self.output = False\n elif name == 'Identifier45':\n self.name = 'String'\n self.length = 45\n self.output = False\n elif name == 'Identifier90':\n self.name = 'String'\n self.length = 90\n self.output = False\n else :\n # print \"Not converting %s to base type\" % (name)\n self.name = name", "def reverseName(self, locatorGroup):\r\n locatorList = cmds.listRelatives(locatorGroup)\r\n\r\n eyeLocators = []\r\n earLocators = []\r\n\r\n for i in locatorList:\r\n if \"Eye_Coord\" in i:\r\n eyeLocators.append(i)\r\n if \"Ear_Coord\" in i:\r\n earLocators.append(i)\r\n\r\n\r\n # We first check if there is more then one eye or not. If there is, we have to reorder\r\n points = 8\r\n TempRename = []\r\n if len(eyeLocators) > points:\r\n # We first rename all the eye locators to a default name to prevent name clashing\r\n for i in range(0, len(eyeLocators)):\r\n RenameObj = cmds.rename(eyeLocators[i], 'TempEyeCoord#')\r\n TempRename.append(RenameObj)\r\n\r\n # We reorder the eye from right to left\r\n for i in range((len(eyeLocators)/points)-1 , -1 , -1):\r\n for j in range(0, points):\r\n cmds.rename(TempRename[j + (i * points)], 'Eye_Coord#')\r\n\r\n # We then check if there is more then one ear or not. If there is, we have to reorder\r\n points = 5\r\n TempRename = []\r\n if len(earLocators) > points:\r\n # We first rename all the ear locators to a default name to prevent name clashing\r\n for i in range(0, len(earLocators)):\r\n RenameObj = cmds.rename(earLocators[i], 'TempEarCoord#')\r\n TempRename.append(RenameObj)\r\n\r\n # We reorder the ear from right to left\r\n for i in range((len(earLocators) / points) - 1, -1, -1):\r\n for j in range(0, points):\r\n cmds.rename(TempRename[j + (i * points)], 'Ear_Coord#')", "def _should_be_renamed(old_name, new_name):\n # type: (str, str) -> bool\n\n # There's no point to rename into default name\n if _is_default_name(new_name):\n return False\n\n # Strip prefixes and check if names are the same\n return old_name.lstrip('_') != new_name.lstrip('_')", "def change_name(self, item):\n # Get the new name.\n new_name = str(item.text())\n if not new_name or not self.item_name:\n return None\n\n # See if the name was actually changed.\n if new_name == self.item_name:\n return None\n\n # If it was, change the name in the list/tree view and in Maya.\n if not new_name:\n item.setText(self.item_name)\n self.item_name = cmds.rename(self.item_name, new_name)\n item.setText(self.item_name)", "def name(self, new_name):\n self.rename(new_name)", "def setName(self, *args):\n return _libsbml.ExternalModelDefinition_setName(self, *args)", "def generate_rename_direct(self, prefix):\n return \"#define %s%s %s\" % (prefix, self.__name, self.__rename)", "def fix_name_table(font):\n modified = False\n name_records = font_data.get_name_records(font)\n\n copyright_data = name_records[0]\n years = re.findall('20[0-9][0-9]', copyright_data)\n year = min(years)\n copyright_data = u'Copyright %s Google Inc. All Rights Reserved.' % year\n\n if copyright_data != name_records[0]:\n print('Updated copyright message to \"%s\"' % copyright_data)\n font_data.set_name_record(font, 0, copyright_data)\n modified = True\n\n for name_id in [1, 3, 4, 6]:\n record = name_records[name_id]\n for source in NAME_CORRECTIONS:\n if source in record:\n oldrecord = record\n record = record.replace(source, NAME_CORRECTIONS[source])\n break\n if record != name_records[name_id]:\n font_data.set_name_record(font, name_id, record)\n print('Updated name table record #%d from \"%s\" to \"%s\"' % (\n name_id, oldrecord, record))\n modified = True\n\n trademark_names = ['Noto', 'Arimo', 'Tinos', 'Cousine']\n trademark_name = None\n font_family = name_records[1]\n for name in trademark_names:\n if font_family.find(name) != -1:\n trademark_name = name\n break\n if not trademark_name:\n print('no trademarked name in \\'%s\\'' % font_family)\n else:\n trademark_line = TRADEMARK_TEMPLATE % trademark_name\n if name_records[7] != trademark_line:\n old_line = name_records[7]\n font_data.set_name_record(font, 7, trademark_line)\n modified = True\n print('Updated name table record 7 from \"%s\" to \"%s\"' % (old_line, trademark_line))\n\n if name_records[11] != NOTO_URL:\n font_data.set_name_record(font, 11, NOTO_URL)\n modified = True\n print('Updated name table record 11 to \"%s\"' % NOTO_URL)\n\n if name_records[_LICENSE_ID] != _SIL_LICENSE:\n font_data.set_name_record(font, _LICENSE_ID, _SIL_LICENSE)\n modified = True\n print('Updated license id')\n\n if name_records[_LICENSE_URL_ID] != _SIL_LICENSE_URL:\n font_data.set_name_record(font, _LICENSE_URL_ID, _SIL_LICENSE_URL)\n modified = True\n print('Updated license url')\n\n # TODO: check preferred family/subfamily(16&17)\n\n return modified", "def rename(self, name: str):\n self.doc['name'] = name", "def _update_column_name(self, column, idx, old_name, name):\n dtype = self.dtype\n # Updating the names on the dtype should suffice\n dtype.names = dtype.names[:idx] + (name,) + dtype.names[idx + 1 :]", "def my_rename(self, src, dst):\n self.renamerCalled = True", "def ChangeName(self, newName):\n if newName != \"\":\n newPath = self.format + os.sep + \"playlists\" + os.sep + newName + \".txt\"\n os.replace(self.path, newPath)\n self.path = newPath", "def re_name(name,new_name):\n\n try:\n os.rename(config_tools.full_dest+name,config_tools.full_dest+new_name)\n except OSError:\n print(f\"Не удалось переименовать {name}\")\n else:\n print(f\"{name} успешно переименновавано в {new_name}\")", "def final_rename(understat_no_similar, fpl_no_similar, join = 'inner'): \n name_mapper = {'Adrián':'Adrián Bernabé', # Contains both seasons corrections\n 'Alisson':'Alisson Ramses Becker',\n 'Allan':'Allan Marques Loureiro',\n 'André Gomes':'André Filipe Tavares Gomes',\n 'Angelino':'José Ángel Esmorís Tasende',\n 'Bernard':'Bernard Anício Caldeira Duarte', # Everton\n 'Bernardo Silva':'Bernardo Mota Veiga de Carvalho e Silva', # Manchester City\n 'Bernardo':'Bernardo Fernandes da Silva Junior', # \n 'Borja Bastón':'Borja González Tomás',\n 'Chicharito':'Javier Hernández Balcázar',\n 'David Luiz':'David Luiz Moreira Marinho', \n 'Ederson':'Ederson Santana de Moraes',\n 'Emerson':'Emerson Palmieri dos Santos',\n 'Fabinho':'Fabio Henrique Tavares',\n 'Felipe Anderson':'Felipe Anderson Pereira Gomes',\n 'Fred':'Frederico Rodrigues de Paula Santos', # Manchester United\n 'Hélder Costa': 'Hélder Wander Sousa de Azevedo e Costa', # Leeds\n 'Joelinton':'Joelinton Cássio Apolinário de Lira', # Chelsea\n 'Jonny':'Jonathan Castro Otto', # Wolves\n 'Jorginho':'Jorge Luiz Frello Filho', # Chelsea\n 'Jota':'José Ignacio Peleteiro Romallo',\n 'Kepa':'Kepa Arrizabalaga',\n 'Kiko Femenía':'Francisco Femenía Far',\n 'Lucas Moura':'Lucas Rodrigues Moura da Silva',\n 'Pedro': 'Pedro Rodríguez Ledesma', # Chelsea\n 'Raphinha':'Raphael Dias Belloli',\n 'Ricardo Pereira':'Ricardo Domingos Barbosa Pereira',\n 'Rodri':'Rodrigo Hernandez',\n 'Rúben Dias':'Rúben Santos Gato Alves Dias',\n 'Rúben Vinagre':'Rúben Gonçalo Silva Nascimento Vinagre',\n 'Semi Ajayi':'Oluwasemilogo Adesewo Ibidapo Ajayi',\n 'Trézéguet':'Mahmoud Ahmed Ibrahim Hassan', # Aston Villa\n 'Wesley':'Wesley Moraes',\n 'Willian':'Willian Borges Da Silva',\n }\n understat_no_similar['player_name'] = understat_no_similar['player_name'].map(name_mapper)\n manual_merge = pd.merge(fpl_no_similar, understat_no_similar, left_on=['player_name', 'kickoff_time'],\n right_on=['player_name', 'date'], how=join) # Merge using player name and date of game\n return manual_merge", "def __set_name(self):\n table_name = self.get_table_name()\n record, timestamp = self.__get_max_timestamp()\n self.name = \"%s_%s_%s\" % (table_name, record, timestamp)", "def fitarg_rename(fitarg, ren):\n tmp = ren\n if isinstance(ren, str):\n ren = lambda x: tmp + '_' + x\n ret = {}\n prefix = ['limit_', 'fix_', 'error_', ]\n for k, v in fitarg.items():\n vn = k\n pf = ''\n for p in prefix:\n if k.startswith(p):\n vn = k[len(p):]\n pf = p\n newvn = pf + ren(vn)\n ret[newvn] = v\n return ret", "def get_model_name(pipe) -> str:\n # https://stackoverflow.com/questions/14596884/remove-text-between-and\n model_name = pipe.named_steps.get(\"regressor\").__class__.__name__\n model_name = re.sub(r\"[\\(\\[].*?[\\)\\]]\", \"\", model_name)\n return model_name", "def mangle(raw_name: str) -> str:\n\n # Handle names with '.'.\n if '.' in raw_name:\n res = []\n for name in raw_name.split('.'):\n if invalid_identifier.search(name):\n res.append(mangle(name))\n else:\n res.append(name)\n return '.'.join(res)\n\n name = raw_name.lstrip('_')\n underscores = '_' * (len(raw_name) - len(name))\n return underscores + 'hyx_' + _mangle_re.sub(_match, name)", "def rename(self, identifier):\n self._line[7] = self._speaker = identifier", "def attr_namer(name, renames=renames):\n if name in renames:\n return renames[name]\n return name", "def rename_bindnames(tqry, li_adjust):\n for bindname, attrname in li_adjust:\n from_ = \"%(\" + bindname + \")s\"\n to_ = \"%(\" + attrname + \")s\"\n tqry = tqry.replace(from_, to_)\n return tqry", "def mangle_name(name):\n import re\n try:\n return re.sub('_+','_',re.sub('[^\\w_]','_',name).lower()).rstrip('_')\n except TypeError:\n raise TypeError(\n 'Trying to mangle name with invalid type of: ' + str(type(name)))", "def rename(ctx, input_file, output_file):\n ctx.ensure_object(dict)\n ctx.obj[\"reader\"] = PFBReader(input_file)\n ctx.obj[\"writer\"] = PFBWriter(output_file)" ]
[ "0.6066185", "0.57070076", "0.56384206", "0.5627522", "0.5619943", "0.56025624", "0.55501103", "0.5547868", "0.5511047", "0.54787534", "0.5460824", "0.5460824", "0.5460381", "0.5453813", "0.54244596", "0.53756005", "0.53688747", "0.5359618", "0.53477144", "0.5337874", "0.5324656", "0.5323677", "0.530052", "0.5275786", "0.52736783", "0.52571064", "0.52363616", "0.5226877", "0.522081", "0.5203459", "0.5197168", "0.5196763", "0.5196763", "0.519577", "0.51931244", "0.5189812", "0.51845104", "0.51753485", "0.5144874", "0.51347697", "0.51298887", "0.51211774", "0.5108633", "0.50992453", "0.50904846", "0.50725824", "0.5071338", "0.50676197", "0.5066788", "0.5063055", "0.5061545", "0.5059417", "0.50500983", "0.504518", "0.5043046", "0.5031018", "0.50223345", "0.5017828", "0.50155246", "0.50134337", "0.50058204", "0.5003995", "0.50010693", "0.50000507", "0.49779993", "0.49779493", "0.4975909", "0.4973682", "0.49684364", "0.49615604", "0.49577686", "0.49394235", "0.4934727", "0.4932984", "0.4932767", "0.49310797", "0.49310562", "0.4921925", "0.49218345", "0.49048197", "0.48967233", "0.48924798", "0.48914322", "0.48799878", "0.48792824", "0.48792425", "0.4878988", "0.4877065", "0.48759744", "0.4867598", "0.48632", "0.48620692", "0.4856957", "0.48494896", "0.4847258", "0.48417652", "0.48302287", "0.48266003", "0.4820236", "0.48181832" ]
0.6582319
0
create or update customer
def create_or_update_customer(entity): try: organisation = entity.get('organisation').replace("'","") organisation = "%s(C)"%organisation if is_supplier_or_customer_group(organisation) else organisation name = frappe.db.get_value('Customer', organisation) if not name: customer = frappe.new_doc("Customer") customer.customer_name = organisation else: customer = frappe.get_doc("Customer", name) customer.entity_id = entity.get('entity_id') customer.customer_type = 'Company' if entity.get('group'): if entity.get('group').strip() == 'General': customer.customer_group = 'All Customer Groups' elif frappe.db.get_value('Customer Group', entity.get('group').strip()): customer.customer_group = entity.get('group').strip() or 'All Customer Groups' elif frappe.db.get_value('Customer', entity.get('group').strip()): customer.customer_group = 'All Customer Groups' else: customer.customer_group = create_customer_group(entity.get('group').strip()) customer.territory = 'Australia' customer.customer_status = 'Existing' customer.modified_date = entity.get('updated_at') customer.save(ignore_permissions=True) if "(C)" in customer.customer_name: frappe.db.set_value("Cusomer", customer.name, "customer_name", organisation.replace("(C)", "")) create_or_update_contact(customer, entity) get_addresses(entity.get('entity_id')) # return status return { entity.get("entity_id"): { "operation": "Customer Created" if not name else "Customer Updated", "name": customer.name, "modified_date": entity.get("updated_at") } } except Exception, e: docname = entity.get('entity_id') response = entity log_sync_error("Customer", docname, response, e, "create_new_customer")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_customer_update(self):\n # first performe create\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then performe update\n data = { \n \"name\": \"Changed the name\",\n \"email\": self.customer_data[\"email\"],\n \"phone\": self.customer_data[\"phone\"]\n }\n self._update_model(\"customer\", id, data, [\"name\"])\n self.assertIsNotNone(id)", "def updateCustomer(self, **params):\n self.__requireParams(params, ['id'])\n return self.__req('update_customer', params)", "def edit_customer(customer_id, password, name, email, phone):\n with MY_CONNECTION as connection:\n connection.execute(\n \"\"\"\n UPDATE Customers\n SET password=?, customer_name=?, phone=?, email=?\n WHERE id_customer=?\n \"\"\",\n (password, name, phone, email, customer_id))", "def update_customer(customer_id, login, name, email, phone=\"\", permission=0):\n with MY_CONNECTION as connection:\n connection.execute(\n \"\"\"\n UPDATE Customers\n SET login=?, customer_name=?, phone=?, email=?, perm=?\n WHERE id_customer=?\n \"\"\",\n (login, name, phone, email, permission, customer_id))", "def test_update_customer(self):\n # create a customer to update \n test_customer = self._create_customers(\"Alex\")\n resp = self.app.post(\n \"/customers\", json=test_customer.serialize(), content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n \n # update the customer\n new_customer = resp.get_json()\n logging.debug(new_customer)\n new_customer[\"address\"] = \"unknown\"\n resp = self.app.put(\n \"/customers/{}\".format(new_customer[\"id\"]),\n json=new_customer,\n content_type=\"application/json\",\n )\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n updated_customer = resp.get_json()\n self.assertEqual(updated_customer[\"address\"], \"unknown\")", "def update(self):\n return self._api.update_customer(**to_dict(self))", "def update_customer(cls, customer_data):\n customer_instance = cls.get_customer(customer_data['email'])\n for field_name, values in customer_data:\n setattr(customer_instance, field_name, values)\n customer_instance.save()\n return customer_instance", "def add_customer(customer_id, name, lastname, home_address,\n phone_number, email_address, status, credit_limit):\n try:\n with database.transaction():\n customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit,\n )\n customer.save()\n except Exception as unknown_error:\n print(unknown_error)", "def update_customer(self):\n customer = self.customer\n name = self.display_name or self.name or \"\"\n email = self.billing_email or self.email or \"\"\n\n if stripe.api_key != \"sk_test_xxxx\":\n try:\n stripe.Customer.modify(customer.id, name=name, email=email)\n except Exception:\n logger.exception(\"Error syncing customer with Stripe\")\n\n customer.name = name\n customer.email = email\n customer.save()", "def post(self):\n data = request.json\n return save_new_customer(data=data)", "def edit_customer(cls, api, id, **data):\n return api.update_customer(id, **data)", "def save(self, customer):\n\t\t\n\t\twith sqlite3.connect(\"bangazon_cli.db\") as bang:\n\t\t\tcursor = bang.cursor()\n\n\t\t\ttry: \n\t\t\t\tcursor.execute(\"SELECT * FROM Customers\")\n\t\t\t\tcustomers = cursor.fetchall()\n\t\t\texcept sqlite3.OperationalError:\n\t\t\t\tcursor.execute(\"\"\"\n\t\t\t\tCREATE TABLE IF NOT EXISTS `Customers`\n\t\t\t\t\t(\n\t\t\t\t\t\tcustomer_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n\t\t\t\t\t\tfirst_name TEXT NOT NULL,\n\t\t\t\t\t\tlast_name TEXT NOT NULL,\n\t\t\t\t\t\temail TEXT NOT NULL,\n\t\t\t\t\t\tphone_number TEXT NOT NULL,\n\t\t\t\t\t\tcity TEXT NOT NULL,\n\t\t\t\t\t\tstate TEXT NOT NULL,\n\t\t\t\t\t\tpostal_zip INTEGER NOT NULL,\n\t\t\t\t\t\taddress TEXT NOT NULL,\n\t\t\t\t\t\tis_active BOOLEAN NOT NULL,\n\t\t\t\t\t\tCONSTRAINT name_unique UNIQUE (first_name, last_name, email, phone_number, city, state, postal_zip, address)\n\t\t\t\t\t)\n\t\t\t\t\"\"\")\n\n\t\t\tcursor.execute(\"\"\"\n\t\t\tINSERT INTO Customers VALUES (null, \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\")\n\t\t\t\"\"\".format(\n\t\t\t\t\t\tcustomer.get_first_name(), \n\t\t\t\t\t\tcustomer.get_last_name(), \n\t\t\t\t\t\tcustomer.get_email(), \n\t\t\t\t\t\tcustomer.get_phone_number(),\n\t\t\t\t\t\tcustomer.get_city(),\n\t\t\t\t\t\tcustomer.get_state(),\n\t\t\t\t\t\tcustomer.get_postal_zip(),\n\t\t\t\t\t\tcustomer.get_address(),\n\t\t\t\t\t\tcustomer.get_active_status()\n\t\t\t\t\t\t)\n\t\t\t\t\t)", "def save(self, customer):\n\t\t\n\t\twith sqlite3.connect(\"bangazon_cli.db\") as bang:\n\t\t\tcursor = bang.cursor()\n\n\t\t\ttry: \n\t\t\t\tcursor.execute(\"SELECT * FROM Customers\")\n\t\t\t\tcustomers = cursor.fetchall()\n\t\t\texcept sqlite3.OperationalError:\n\t\t\t\tcursor.execute(\"\"\"\n\t\t\t\tCREATE TABLE IF NOT EXISTS `Customers`\n\t\t\t\t\t(\n\t\t\t\t\t\tcustomer_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n\t\t\t\t\t\tfirst_name TEXT NOT NULL,\n\t\t\t\t\t\tlast_name TEXT NOT NULL,\n\t\t\t\t\t\temail TEXT NOT NULL,\n\t\t\t\t\t\tphone_number TEXT NOT NULL,\n\t\t\t\t\t\tcity TEXT NOT NULL,\n\t\t\t\t\t\tstate TEXT NOT NULL,\n\t\t\t\t\t\tpostal_zip INTEGER NOT NULL,\n\t\t\t\t\t\taddress TEXT NOT NULL,\n\t\t\t\t\t\tis_active BOOLEAN NOT NULL,\n\t\t\t\t\t\tCONSTRAINT name_unique UNIQUE (first_name, last_name, email, phone_number, city, state, postal_zip, address)\n\t\t\t\t\t)\n\t\t\t\t\"\"\")\n\n\t\t\tcursor.execute(\"\"\"\n\t\t\tINSERT INTO Customers VALUES (null, \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\")\n\t\t\t\"\"\".format(\n\t\t\t\t\t\tcustomer.get_first_name(), \n\t\t\t\t\t\tcustomer.get_last_name(), \n\t\t\t\t\t\tcustomer.get_email(), \n\t\t\t\t\t\tcustomer.get_phone_number(),\n\t\t\t\t\t\tcustomer.get_city(),\n\t\t\t\t\t\tcustomer.get_state(),\n\t\t\t\t\t\tcustomer.get_postal_zip(),\n\t\t\t\t\t\tcustomer.get_address(),\n\t\t\t\t\t\tcustomer.get_active_status()\n\t\t\t\t\t\t)\n\t\t\t\t\t)", "def update_customer(connection, customer_id, customer):\n connection.command_path = \"customer/{0}\".format(customer_id)\n extra_headers = {\n connection.header_key: connection.token,\n 'Content-Type': 'text/xml'\n }\n url = connection.build_url()\n verify_ssl = connection.verify_ssl\n customer_data = _build_customer_payload(customer)\n res = requests.put(url, headers=extra_headers,\n data=customer_data,\n verify=verify_ssl)\n if res.status_code == 400 and res.content == b'The Customer name must be unique.':\n raise DuplicateCustomerException(res.content)\n elif res.status_code == 400 and b'not a valid country code' in res.content:\n raise InvalidCountryCodeException(res.content)\n elif res.status_code == 200:\n return customers.parse_customer(res.content)\n else:\n raise UnExpectedCustomerException(res.content)", "def add_customer(insert_dict):\n return cr.add_customer(insert_dict)", "def test_update_customer(self):\n set_up_db()\n add_customer(*self.test_customer)\n update_customer_credit(1, 500000.00)\n self.assertEqual(500000.00, Customer.get_by_id(1).credit_limit)", "def update(customer, **data):\n if isinstance(customer, resources.Customer):\n customer = customer.id\n\n http_client = HttpClient()\n response, _ = http_client.patch(routes.url(routes.CUSTOMER_RESOURCE, resource_id=customer), data)\n return resources.Customer(**response)", "def put(self, customer_id):\n data = request.json\n return edit_customer(customer_id=customer_id, data=data)", "def add_customer(customer_id, name, lastname, homeaddress, phone_number, email, status, credit_limit):\n try:\n with customer_db.transaction():\n new_customer_mi = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n homeaddress=homeaddress,\n phone_number=phone_number,\n email=email,\n status=status,\n credit_limit=credit_limit\n )\n logger.debug(\"Added customer %s to %s\", new_customer_mi, customer_db.database)\n return new_customer_mi\n except Exception as e:\n logger.error(\"Error creating customer_id %s: %s\", customer_id, e)", "def add_customer(customer_id, first_name, last_name, home_address,\n phone_number, email_address, status, credit_limit):\n print('Adding new customer, Customer ID {}...'.format(customer_id))\n try:\n Customer.get_by_id(customer_id)\n print('Customer ID {} is already in use'.format(customer_id))\n except Exception as ex:\n if \"instance matching query does not exist\" in str(ex):\n try:\n new_customer = Customer.create(customer_ID=customer_id,\n first_name=first_name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit)\n new_customer.save()\n LOGGER.info('Added new customer, Customer ID %s', customer_id)\n except IntegrityError:\n print('Incorrect format, customer {} not saved'\n .format(customer_id))", "def createCustomer(sender, instance, **kwargs):\n Customer.objects.get_or_create(user=instance)", "def test_activate_customer(self):\n # create a customer to activate\n body = {\n \"name\": \"Kendall\",\n \"address\": \"333 Bedford Street\",\n \"phone_number\": \"555-555-3333\",\n \"email\": \"[email protected]\",\n \"credit_card\": \"VISA\"\n }\n resp_create = self.app.post('/customers',\n json=body,\n content_type='application/json')\n self.assertEqual(resp_create.status_code, status.HTTP_201_CREATED)\n self.assertEqual(resp_create.get_json()['active'], True)\n customer_id = resp_create.get_json()[\"id\"]\n\n # deactivate the customer\n logging.debug(customer_id)\n resp_deactivate = self.app.put(\"/customers/{}/deactivate\".format(customer_id),\n json=body,\n content_type=\"application/json\")\n self.assertEqual(resp_deactivate.status_code, status.HTTP_200_OK)\n self.assertEqual(resp_deactivate.get_json()[\"active\"], False)\n\n # activate the customer\n logging.debug(customer_id)\n resp_activate = self.app.put(\"/customers/{}/activate\".format(customer_id),\n json=body,\n content_type=\"application/json\")\n self.assertEqual(resp_activate.status_code, status.HTTP_200_OK)\n self.assertEqual(resp_activate.get_json()[\"active\"], True)", "def create_customer(cls, api, **data):\n return api.create_customer(**data)", "def add_customer(customer_id, first_name, last_name, home_address, phone_number,\n email_address, is_active, credit_limit):\n try:\n LOGGER.info('Successfully connected to the database')\n\n with DATABASE.transaction():\n new_customer = Customer.create(customer_id=customer_id,\n first_name=first_name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n is_active=is_active,\n credit_limit=credit_limit)\n new_customer.save()\n LOGGER.info(\"Customer added successfully\")\n\n except IntegrityError as error:\n LOGGER.info(error)\n LOGGER.info('Error occurred')", "def createCustomer(self, **params):\n return self.__req('create_customer', params)", "def add_customer(customer_id, name, lastname, home_address,\n phone_number, email_address, status, credit_limit):\n try:\n with database.transaction():\n customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit,\n )\n logger.info(\n f\"Successfully added customer {customer_id} with {credit_limit}\"\n )\n customer.save()\n except Exception as unknown_error:\n logger.error(\n f\"Error. Failed to added customer {customer_id}. {unknown_error}\"\n )\n print(unknown_error)", "def test_update_customer_success(self):\n customer = Customer.objects.create(**customer_data)\n\n self.assertTrue(isinstance(customer, Customer))\n\n email= '[email protected]'\n customer.email = email\n customer.save()\n\n self.assertEqual(customer.__str__(), email)", "def store_customer(self, name):\n pass", "def add_customer(login, password, name, phone, email):\n with MY_CONNECTION as connection:\n connection.execute(\n \"\"\"\n INSERT INTO Customers\n (login,password,customer_name,phone,email)\n VALUES(?,?,?,?,?)\n \"\"\",\n (login, password, name, phone, email))", "def create_customer(data):\n mandatory_params = ['customer_name', 'mobile_number']\n result = api_utils.check_required_params(mandatory_params, data)\n if result:\n return result\n mobile_number = db_helper.mobile_number_unique(data['mobile_number'])\n if not mobile_number:\n return api_utils.error(\"There already is a customer with \\\n mobile number {} found\".format(data['mobile_number']), 404)\n\n new_customer = db_helper.add_new_customer(data['customer_name'],\n mobile_number)\n return jsonify({'new_customer': new_customer})", "def update_customer_credit(customer_id, credit_limit):\n try:\n with database.transaction():\n customer = Customer.get_by_id(customer_id)\n customer.credit_limit = credit_limit\n customer.save()\n except Exception as unknown_error:\n print(f'Error. Cutomer {customer_id} does not exist. {unknown_error}')\n raise ValueError", "def update_customer(customer_id, credit_limit):\n try:\n with database.transaction():\n customer_update = Customer.get(Customer.customer_id == customer_id)\n LOGGER.info('Current limit: %s', customer_update.credit_limit)\n customer_update.credit_limit = credit_limit\n LOGGER.info('New credit limit: %s', customer_update.credit_limit)\n\n return True\n\n except DoesNotExist as err:\n\n LOGGER.warning('Customer ID: %s does not exist', customer_id)\n LOGGER.warning(err)\n\n return False", "def add_customer(*, customer_id, name=None, lastname=None, home_address=None,\n phone_number=None, email_address=None, status=None,\n credit_limit=None):\n with DATABASE.transaction():\n try:\n new_customer = Customer.create(\n customer_id=customer_id,\n first_name=name,\n last_name=lastname,\n address=home_address,\n phone=phone_number,\n email=email_address,\n is_active=status,\n credit_limit=credit_limit\n )\n new_customer.save()\n LOGGER.info('Database add successful: (%s, %s)', lastname, name)\n return new_customer\n except pw.IntegrityError:\n LOGGER.warning('Database add error: (%s, %s)', lastname, name)", "def add_customer(self, info, dup):\n if not dup:\n self.cursor.execute(\"INSERT INTO customerpersonal VALUES (%s,%s)\", (int(info['phone']), info['address']))\n\n self.cursor.execute(\"INSERT INTO customercredentials VALUES (%s,%s,%s,%s,%s,%s)\",\n (info['loginID'], info['firstName'], info['lastName'], info['salt'],\n info['key'], int(info['phone'])))\n self.db.commit()", "def insert_customer(self):\n if self.check_user():\n return False\n else:\n cursor = self.db.cursor()\n cursor.execute(\n \"INSERT INTO costumers (dni, costumername, costumerlastname, costumeraddress, costumerpostcode, costumertlfnumber, costumerbirth) VALUES (?, ?, ?, ?, ?, ?, ?)\",\n (self.dni, self.name, self.last_name, self.address, self.postal_code, self.t_number, self.date_birth))\n self.db.commit()\n return True", "def add_customer(customer_id, first_name, last_name, home_address,\n phone_number, email_address, status, credit_limit):\n # database.transaction; all work given to database gets done or none of it\n with cm.DATABASE.transaction():\n try:\n # .create inserts the data into the database\n new_customer = cm.Customer.create(customer_id=customer_id,\n first_name=first_name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit)\n # .save() will write the data to the database\n new_customer.save()\n LOGGER.info(\"Added customer [%s]\", customer_id)\n except pw.IntegrityError:\n LOGGER.error(\"Customer [%s] not added to database!\", customer_id)\n raise pw.IntegrityError", "def add_customer(customer_id, name, lastname, home_address, phone_number, email_address, status,\n credit_limit):\n init_database()\n try:\n with database.transaction():\n new_customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n active_status=status,\n credit_limit=credit_limit\n )\n new_customer.save()\n logging.info('New customer, ID %s, added successfully.', customer_id)\n return True\n except peewee.IntegrityError as exc:\n logging.error('Error creating new customer with ID %s: %s.', customer_id, exc)\n return False\n finally:\n database.close()", "def add_customer(\n customer_id,\n name,\n last_name,\n home_address,\n phone_number,\n email_address,\n status,\n credit_limit,\n):\n LOGGER.info(\"Adding new customer, %s %s to database\", name, last_name)\n try:\n Customers.create(\n customer_id=customer_id,\n name=name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit,\n )\n LOGGER.info(\"Added new customer %s %s to database\", name, last_name)\n except IntegrityError as e_val:\n LOGGER.warning(\"Customer %s already exists\", customer_id)\n LOGGER.warning(e_val)", "def update(self, customerguid, name=\"\", login=\"\", password=\"\", email=\"\", address=\"\", vat=\"\", jobguid=\"\", executionparams=None):", "def add_customer(customer_id, first, last, addr, phone, email, status, limit):\n try:\n LOGGER.info('Creating customer record')\n with database.transaction():\n new_customer = Customer.create(\n customer_id=customer_id,\n first_name=first,\n last_name=last,\n home_address=addr,\n phone_number=phone,\n email_address=email,\n status=status,\n credit_limit=limit\n )\n new_customer.save()\n LOGGER.info('Added customer: %s', new_customer.customer_id)\n except IntegrityError as err:\n LOGGER.warning('Error creating = ID: %s', customer_id)\n LOGGER.warning(err)\n\n return Customer", "def test_customer_create(self):\n self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])", "def test_invoice_update(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # create another customer\n id_other = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id_other:\n # then performe update\n data = self.invoice_data\n data[\"customer_id\"] = id_other\n self._update_model(\"invoice\", id, data, [])\n self.assertIsNotNone(id_other)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def add_customer(customer_id,\n name,\n lastname,\n home_address,\n phone_number,\n email,\n status,\n credit_limit):\n # pylint: disable = W0703\n try:\n with DB.transaction():\n new_customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email=email,\n status=status.lower(),\n credit_limit=credit_limit)\n new_customer.save()\n logging.info('Customer(s) successfully added')\n\n except Exception as error:\n LOGGER.info(f'Error creating = {name}')\n LOGGER.info(error)", "def create_or_update_contact(customer, entity):\n\tname = frappe.db.get_value('Contact', { 'entity_id': entity.get('entity_id') })\n\tif not name:\n\t\tcontact = frappe.new_doc('Contact')\n\telse:\n\t\tcontact = frappe.get_doc(\"Contact\", name)\n\n\tif not entity.get('firstname'):\n\t\treturn\n\t\n\tcontact.first_name = entity.get('firstname')\n\tcontact.last_name = entity.get('lastname')\n\tcontact.customer = customer.name\n\tcontact.customer_name = customer.customer_name\n\tcontact.entity_id = entity.get('entity_id')\n\tcontact.email_id = entity.get('email')\n\tcontact.save(ignore_permissions=True)", "def update(self):\n schema = load_customer_schema(self.request)\n for key, value in schema.iteritems():\n setattr(self.customer, key, value)\n return {'klant': self.customer}", "def test_update_customer_credit(self):\r\n create_empty_db()\r\n add_customer(**user_1)\r\n update_customer_credit(user_1['customer_id'], 5000.00)\r\n query = Customer.get(Customer.customer_id == user_1['customer_id'])\r\n self.assertEqual(5000.00, query.customer_limit)\r\n\r\n # Test for non-existant customer\r\n with self.assertRaises(ValueError):\r\n update_customer_credit('456879', 5000.00)\r\n\r\n # Test for non-float value inputted\r\n with self.assertRaises(TypeError):\r\n update_customer_credit(user_1['customer_id'], '$20')\r\n drop_db()", "def test_update_stripe_customer_id(self):\n pass", "def test_update_no_customer(self):\n set_up_db()\n with self.assertRaises(ValueError):\n update_customer_credit(2, 5.50)", "def update_customer_credit(cust_id, credit_limit):\n update_query = Customer.update(credit_limit=credit_limit) \\\n .where(Customer.customer_id == cust_id)\n if not update_query.execute():\n raise ValueError(\"Record does not exist\")\n return True", "def update_customer_credit(customer_id, credit_limit):\n init_database()\n try:\n customer = Customer.get_by_id(customer_id)\n customer.credit_limit = credit_limit\n customer.save()\n return True\n except peewee.DoesNotExist:\n logging.error(\"Customer ID %s doesn't exist in database.\", customer_id)\n raise ValueError('Customer ID does not exist in database.')\n finally:\n database.close()", "def test_create_customer(self):\n create_customer_url = reverse(\"customer_list\")\n\n customer_info = {\"first_name\": \"Denny\", \"last_name\": \"Wayne\"}\n\n response = self.client.post(\n create_customer_url, data=customer_info, format=\"json\"\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Customer.objects.count(), 5)\n self.assertEqual(Customer.objects.get(pk=5).first_name, \"Denny\")\n self.assertEqual(Customer.objects.get(pk=5).last_name, \"Wayne\")", "def test_update_customer_invalid_payload(self):\n update_customer_url = reverse(\"customer_detail\", kwargs={\"pk\": 1})\n\n payload = {\"first_name\": \"Dennis\", \"last_name\": \"\", \"is_active\": True}\n\n response = self.client.put(update_customer_url, payload)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_add_customer(self):\n set_up_db()\n add_customer(*self.test_customer)\n test_customer = Customer.get_by_id(1)\n self.assertEqual(\"Bruce\", test_customer.name)\n self.assertEqual(\"Wayne\", test_customer.last_name)\n self.assertEqual(\"1007 Mountain Drive, Gotham\", test_customer.home_address)\n self.assertEqual(\"228-626-7699\", test_customer.phone_number)\n self.assertEqual(\"[email protected]\", test_customer.email)\n self.assertEqual(True, test_customer.status)\n self.assertEqual(200000.00, test_customer.credit_limit)", "def test_update_customer_fails(self):\n customer = Customer.objects.create(**customer_data)\n\n self.assertTrue(isinstance(customer, Customer))\n\n with self.assertRaises(IntegrityError):\n customer.email = None\n customer.save()", "def test_set_existing_customer(self):\n request = self.factory.get('/', follow=True)\n request.user = self.bart\n request.session = {'session_key': 'bart1234'}\n self.cm.process_request(request)\n self.assertEqual(request.customer, self.bart.customer)", "def test_create_customer_success(self):\n customer = Customer.objects.create(**customer_data)\n\n self.assertTrue(isinstance(customer, Customer))\n self.assertEqual(customer.__str__(), customer_data['email'])", "def test_create_new_customer(client, db_session):\n # Arrange\n customer_data = {\"name\": \"Customer 1\", \"isActive\": True}\n\n # Act\n response = client.post(\"api/customers/\", json=customer_data)\n response_data = response.get_json()\n\n # Assert\n assert response.status_code == status.HTTP_201_CREATED\n assert response_data[\"name\"] == customer_data[\"name\"]", "def customer_put_ajax(h):\n global html\n html = h\n ##\n #Created By\n ##\n updated_by = html.req.session.get('username')\n if updated_by is None:\n updated_by = \"SuperAdmin\" \n\n details_list = [\n 'user_id',\n 'user_name',\n 'password',\n 'first_name',\n 'last_name',\n 'designation',\n 'company_name',\n 'groups',\n 'mobile_no',\n 'email_id',\n 'city_id',\n 'state_id',\n 'country_id',\n 'address',\n # 'created_by', put created by manually\n # 'is_customer',\n 'telephone_no',\n 'fax',\n 'usage',\n 'purpose'\n ]\n user_details = {}\n user_details[\"updated_by\"] = updated_by #created by what main user ?\n user_details[\"is_customer\"] = 1 #yes this is a customer\n for details in details_list:\n if html.var(details) != None and len(html.var(details)) > 0:\n user_details[details] = html.var(details)\n else:\n if details == \"usage\":\n user_details[details] = 0\n elif details == \"password\":\n user_details[\"password\"] = \"\"\n else:\n user_details[details] = \"unknown\"\n\n result = customer_put(user_details)\n html.write(json.dumps(result))", "def customer(self, customer):\n\n self._customer = customer", "def customer(self, customer):\n\n self._customer = customer", "def test_update_customer_credit(_update_customer_credit):\n for customer in _update_customer_credit:\n bo.add_customer(\n customer[0],\n customer[1],\n customer[2],\n customer[3],\n customer[4],\n customer[5],\n customer[6],\n customer[7]\n )\n\n bo.update_customer_credit(\"598\", 110)\n updated_cust = bo.CustomerCredit.select().join(\n bo.Customer).where(bo.Customer.customer_id == '598')\n assert updated_cust[0].credit_limit == 110\n\n with pytest.raises(ValueError) as execinfo:\n bo.update_customer_credit(\"11111\", 456)\n assert 'NoCustomer' in str(execinfo.value)\n\n bo.delete_customer('598')\n\n assert bo.list_active_customers() == 0", "def test_create_customer(self):\n url = reverse('customers-list')\n data = {\n 'first_name': self.customer_first_name,\n 'last_name': self.customer_last_name,\n 'email': self.customer_email\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Customer.objects.count(), 1)\n self.assertEqual(Customer.objects.get().first_name, 'John')", "def test_update_customer_valid_payload(self):\n update_customer_url = reverse(\"customer_detail\", kwargs={\"pk\": 1})\n\n payload = {\"first_name\": \"Dennis\", \"last_name\": \"Ng'ang'a\", \"is_active\": True}\n\n response = self.client.put(update_customer_url, payload)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n customer = Customer.objects.get(pk=1)\n self.assertEqual(customer.last_name, \"Ng'ang'a\")", "def update_customer_status():\n data = user_obj.update_customer_status(request.forms)\n return data", "def update_customer_credit(customer_id, credit_limit):\n try:\n with database.transaction():\n customer = Customer.get_by_id(customer_id)\n customer.credit_limit = credit_limit\n customer.save()\n logger.info(\n f\"Successfully updated customer {customer_id} credit limit\"\n )\n except Exception as unknown_error:\n logger.error(\n f\"Error. Failed to update customer {customer_id}\"\n \" credit limit. {unknown_error}\"\n )\n print(f'Error. Cutomer {customer_id} does not exist. {unknown_error}')\n raise ValueError", "def create_or_update_address(address, customer):\n\tname = frappe.db.get_value('Address', { 'entity_id': address.get('entity_id') })\n\tif not name:\n\t\taddr = frappe.new_doc('Address')\n\t\taddr.address_title = \"{} {} {}\".format(\n\t\t\taddress.get(\"firstname\"),\n\t\t\taddress.get(\"lastname\"),\n\t\t\taddress.get(\"entity_id\")\n\t\t)\n\telse:\n\t\taddr = frappe.get_doc(\"Address\", name)\n\n\taddr.address_type = get_address_type(address).get('type')\n\taddr.entity_id = address.get('entity_id')\n\taddr.address_line1 = address.get('street')[0]\n\taddr.address_line2 = address.get('street')[1] if len(address.get('street')) > 1 else \"\"\n\taddr.city = address.get('city')\n\taddr.country = frappe.db.get_value('Country', { 'code': address.get('country_id') })\n\taddr.state = address.get('region')\n\taddr.pincode = address.get('postcode')\n\taddr.phone = address.get('telephone') or '00000'\n\taddr.fax = address.get('fax')\n\taddr.customer = customer\n\taddr.customer_name = address.get('firstname')+' '+address.get('lastname')\n\taddr.is_primary_address = get_address_type(address).get('is_primary_address')\n\taddr.is_shipping_address = get_address_type(address).get('is_shipping_address')\n\n\taddr.save(ignore_permissions=True)", "def ht_get_stripe_customer(account, cc_token=None, cc_card=None, cust=None):\n\n\tif (account.stripe_cust is not None):\n\t\tprint 'ht_get_stripe_customer_id(): found customer', account.stripe_cust\n\t\tstripe.api_key = sc_server.config['STRIPE_SECRET']\n\t\tstripe_cust = stripe.Customer.retrieve(account.stripe_cust)\n\t\tprint 'ht_get_stripe_customer_id(): update customer,' + str(stripe_cust.get('email')) + ', w/ info(' + str(cc_token) + ', ' + str(cc_card) + ')'\n\t\tstripe_cust.cards.create(card=cc_token)\n\t\treturn account.stripe_cust\n\n\tprint 'ht_get_stripe_customer_id: customer does not exist, create'\n\ttry:\n\t\tstripe.api_key = sc_server.config['STRIPE_SECRET']\n\n\t\tht_metadata = {}\n\t\tht_metadata['ht_account'] = account.userid\n\n\t\tprint 'ht_get_stripe_customer_id: customer info cc_token: ' + str(cc_token) + ' cc_card: ' + str(cc_card)\n\t\tstripe_customer = stripe.Customer.create(card=cc_token, description=str(account.userid), metadata=ht_metadata, email=account.email)\n\t\tstripe_cust\t= stripe_customer['id']\n\t\tstripe_card\t= stripe_customer['default_card']\n\t\tprint 'ht_get_stripe_customer_id: New Customer (%s, %s)' % (stripe_cust, stripe_card)\n\t\tpp(stripe_cust)\n\n\t\tprint 'ht_get_stripe_customer_id: Update Account'\n\t\taccount.stripe_cust = stripe_cust\n\t\tdb_session.add(account)\n\t\tdb_session.commit()\n\texcept Exception as e:\n\t\t# problems with customer create\n\t\tprint type(e), e\n\t\tdb_session.rollback()\n\n\tprint 'ht_get_stripe_customer_id:', stripe_cust\n\treturn stripe_cust", "def post(self, request, id):\n form = CallRegisterForm(request.POST or None)\n if form.is_valid():\n object = get_object_or_404(CallRegister, pk=id)\n # Getting only ONE customer from db\n # that matches passed name\n # MUST BE CHANGED\n CallRegister.objects.filter(pk=id).update(\n customer = get_object_or_404(ClientDetails, pk=request.POST['customer-id']),\n complaint_nature = form.cleaned_data['complaint_nature'],\n brand = form.cleaned_data['brand'],\n product_name = form.cleaned_data['product_name'],\n product_serial = form.cleaned_data['product_serial'],\n product_coverage = form.cleaned_data['product_coverage'],\n appointment_date = form.cleaned_data['appointment_date'],\n appointment_time = form.cleaned_data['appointment_time'],\n edited_by = self.request.user,\n edit_datetime = datetime.datetime.now()\n )\n messages.add_message(request, messages.INFO, 'Success - Call detials edited successfully!')\n return redirect(reverse('calls:call_detail_view', kwargs={'id':id}))\n messages.add_message(request, messages.INFO, 'Failed - Invalid details')\n return redirect(reverse('calls:call_edit_view', kwargs={'id':id}))", "def test_newCustomer(self):\n\t\tdashboardPage = DashboardPage(self.driver)\n\t\tdashboardPage.goToOnboard()\n\n\n\t\tdashboardPage.createCustomer(USER_NAME, S3FOLDER)\n\t\tdashboardPage.goToCustomerList()\n\t\tdashboardPage.sortRecentCustomer()\n\n\t\tinitialId = dashboardPage.getId()\n\t\teditPage = dashboardPage.goToEditPage() \n\t\tcheckId, checkName, checkS3Folder, maxSize, panoMaxSize, checkBox = editPage.getParameters()\n\n\n\t\tself.assertEqual(initialId, checkId)\n\t\tself.assertEqual(checkName, USER_NAME)\n\t\tself.assertEqual(checkS3Folder, S3FOLDER)\n\t\tself.assertEqual(maxSize, MAX_SIZE)\n\t\tself.assertEqual(panoMaxSize, PANO_MAX_SIZE)\n\t\tself.assertEqual(CHECK_BOX, checkBox)", "def update_customer_credit(customer_id, credit_limit):\n try:\n customer = cm.Customers.get(cm.Customers.customer_id == customer_id)\n customer.credit_limit = credit_limit\n customer.save()\n except cm.DoesNotExist:\n raise ValueError", "def add_customer(self, Customer):\n self._customer_repo.add_customer(Customer)", "def customer_group_customer_put(user_id, group_id):\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n\n user_group_user_details = {}\n user_group_user_details[\"user_id\"] = user_id\n user_group_user_details[\"group_id\"] = group_id\n\n query = \"\"\"\n Update `users_groups`\n SET `group_id` = \\\"%(group_id)s\\\"\n WHERE `user_id` = \\\"%(user_id)s\\\" \n \"\"\" %(user_group_user_details)\n cursor = db.cursor()\n result = {\"success\" : 0, \"message\" : \"Customer's Group is not updated\"}\n try:\n if cursor.execute(query):\n db.commit()\n result = {\"success\" : 0, \"message\" : \"Customer updated Successfully\"}\n except Exception as e:\n result = {\"success\" : 1, \"message\" : \"Customer can not be updated in the Group. Error \\\"\\'%s\\'\\\" \\\n Query \\\"\\'%s\\'\\\" \" % (e, query) }\n finally:\n cursor.close()\n db.close()\n return result", "def update_customer_credit(customer_id, credit_limit):\n customer = search_customer(customer_id)\n if customer is None:\n raise ValueError(f'Could not find customer for update with id '\n f'{customer_id}.')\n customer.credit_limit = credit_limit\n customer.save()", "def test_deactivate_customer(self):\n #create a customer to deactivate\n body = {\n \"name\": \"Robin\",\n \"address\": \"222 Bleeker Street\",\n \"phone_number\": \"555-555-2222\",\n \"email\": \"[email protected]\",\n \"credit_card\": \"VISA\"\n }\n \n #test_customer = self._create_customers(\"Alex\")\n #logging.debug(test_customer)\n #test_customer.create() \n resp_create = self.app.post('/customers',\n json=body,\n content_type='application/json')\n self.assertEqual(resp_create.status_code, status.HTTP_201_CREATED)\n self.assertEqual(resp_create.get_json()['active'], True)\n customer_id = resp_create.get_json()[\"id\"]\n\n # deactivate the customer\n logging.debug(customer_id)\n resp_deactivate = self.app.put(\"/customers/{}/deactivate\".format(customer_id),\n content_type=\"application/json\")\n self.assertEqual(resp_deactivate.status_code, status.HTTP_200_OK)\n self.assertEqual(resp_deactivate.get_json()[\"active\"], False)", "def save_object(self, data):\n return Customer(**data)", "def customer_created_handler(event):\n obj = event.obj\n\n # submit customer after creation\n obj.workflow.submit()", "def test_customer_detail(self):\n # first performing create\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then performing detail\n self._detail_model(\"customer\", self.customer_data, id, [\"name\", \"email\", \"phone\"])\n \n self.assertIsNotNone(id)", "def add_customer(db_url: str):\n db_url = \"{}/{}\".format(db_url, \"user_api\")\n engine = create_engine(db_url, echo=True)\n session = sessionmaker(engine)()\n customer = Customer()\n session.add(customer)\n session.commit()\n return customer.id", "def test_add_customer(self):\r\n create_empty_db()\r\n add_customer(**user_1)\r\n query = Customer.get(Customer.customer_id == user_1['customer_id'])\r\n self.assertEqual(user_1['name'], query.customer_name)\r\n self.assertEqual(user_1['lastname'], query.customer_last_name)\r\n self.assertEqual(user_1['home_address'], query.customer_address)\r\n self.assertEqual(user_1['phone_number'], query.customer_phone)\r\n self.assertEqual(user_1['email_address'], query.customer_email)\r\n self.assertEqual(user_1['status'], query.customer_status)\r\n self.assertEqual(user_1['credit_limit'], query.customer_limit)\r\n\r\n # add another person\r\n add_customer(**user_2)\r\n query = Customer.get(Customer.customer_id == user_2['customer_id'])\r\n self.assertEqual(user_2['name'], query.customer_name)\r\n self.assertEqual(user_2['lastname'], query.customer_last_name)\r\n self.assertEqual(user_2['home_address'], query.customer_address)\r\n self.assertEqual(user_2['phone_number'], query.customer_phone)\r\n self.assertEqual(user_2['email_address'], query.customer_email)\r\n self.assertEqual(user_2['status'], query.customer_status)\r\n self.assertEqual(user_2['credit_limit'], query.customer_limit)\r\n\r\n # add a duplicate person\r\n with self.assertRaises(ValueError):\r\n add_customer(**user_2)\r\n drop_db()", "def test_customer_delete(self):\n # first performe create\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then performe delete\n self._delete_model(\"customer\", id)\n self.assertIsNotNone(id)", "def customer_post(user_details):\n now = datetime.datetime.now()\n f = '%Y-%m-%d %H:%M:%S'\n insert_time = now.strftime(f)\n\n user_details[\"creation_time\"] = insert_time\n\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n query = \"\"\"\n INSERT INTO `users`\n ( `user_id`, \n `first_name`, \n `last_name`, \n `designation`, \n `company_name`, \n `mobile_no`, \n `address`, \n `city_id`, \n `state_id`,\n `country_id`, \n `email_id`, \n `is_customer`, \n `telephone_no`, \n `fax`, \n `usage`, \n `purpose`\n ) \n VALUES \n ( \\\"%(user_id)s\\\",\n \\\"%(first_name)s\\\",\n \\\"%(last_name)s\\\",\n \\\"%(designation)s\\\",\n \\\"%(company_name)s\\\",\n \\\"%(mobile_no)s\\\",\n \\\"%(address)s\\\",\n \\\"%(city_id)s\\\",\n \\\"%(state_id)s\\\",\n \\\"%(country_id)s\\\",\n \\\"%(email_id)s\\\",\n %(is_customer)s,\n \\\"%(telephone_no)s\\\",\n \\\"%(fax)s\\\",\n %(usage)s,\n \\\"%(purpose)s\\\"\n )\n \"\"\" %(user_details)\n\n cursor = db.cursor()\n result = {\"success\" : 1, \"message\" : \"Customer can not be added\"}\n try:\n if cursor.execute(query):\n db.commit()\n result = {\"success\" : 0, \"message\" : \"Customer added Successfully\"}\n except Exception as e:\n result = {\"success\" : 1, \"message\" : \"Customer can not be added. Error \\\"\\'%s\\'\\\" \" % (e) }\n finally:\n cursor.close()\n db.close()\n\n #relate customer to the customer group\n result = customer_group_customer_post(user_details[\"user_id\"], user_details[\"groups\"])\n\n #insert customer into login table\n\n customer_login_post(user_details)\n\n return result", "def get_customer(self) -> djstripe.models.Customer:\n if self.customer_id:\n return self.customer\n\n name = self.display_name or self.name or \"\"\n email = self.billing_email or self.email or \"\"\n\n if stripe.api_key != \"sk_test_xxxx\":\n try:\n customer = stripe.Customer.create(name=name, email=email)\n self.customer = djstripe.models.Customer.sync_from_stripe_data(customer)\n except Exception:\n logger.exception(\"Error creating customer on Stripe\")\n else:\n self.customer = djstripe.models.Customer.objects.create(\n id=shortuuid.uuid(), name=name, email=email\n )\n\n self.save()\n return self.customer", "def mutate(self, info, **kwargs):\n email = kwargs.get('email')\n password = kwargs.get('password')\n complete_name = kwargs.get('complete_name')\n user = get_user_model()(email=email)\n user.set_password(password)\n user.is_service = True\n user.save()\n service = Service.objects.create(user=user)\n service = Service(\n complete_name=complete_name,\n email=email,\n user=user,\n )\n service.save()\n\n return CreateService(service=service)", "def test_customer_creation():\n agent = AgentFactory()\n customer = CustomerFactory(agent=agent)\n assert agent == customer.agent\n\n customer.name = 'customer test name 1'\n customer.customer_type = 'hom'\n customer.save()\n assert customer.name == 'customer test name 1'\n\n customer.name = 'customer test name 2'\n customer.customer_type = 'oth'\n customer.save()\n assert customer.name == 'customer test name 2'", "def test_associate_customer_on_signup(self):\n # is this necessary, or is it handled by login logic anyway?\n pass", "def customer():\n customer = stripe.Customer.create(\n description=\"User created by pytest test_payments.py\",\n email=generate_random_email(),\n address={\"country\": \"DK\"},\n )\n yield customer\n customer.delete()", "def test_get_customer(self):\n # get the id of a customer\n test_customer = self._create_customers(\"Alex\")\n logging.debug(test_customer)\n test_customer.create() \n resp = self.app.get(\n \"/customers/{}\".format(test_customer.id), content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(data[\"name\"], test_customer.name)", "def create_user(data):\n return woo_request_helper().post_details(wc_endpoint='customers', params=data)", "def update_customer_credit(customer_id, credit_limit):\n try:\n with customer_db.transaction():\n customer = Customer.select().where(Customer.customer_id == customer_id).get()\n customer.credit_limit = credit_limit\n customer.save()\n logger.info(\"Updated credit limit of customer ID %s to %s\", customer_id, credit_limit)\n except Customer.DoesNotExist as e:\n logger.error(\"Error updating credit limit for customer %s: %s\", customer_id, e)\n # Absolutely could not get pylint to recognize peewee's DoesNotExist error type, so raise ValueError instead\n raise ValueError(\"NoCustomer\")", "def _create_customers(self, customer_name=\"Alex\"):\n test_customer = Customer(\n name=customer_name,\n address=\"Washington Square Park\",\n phone_number=\"555-555-1234\",\n email=\"[email protected]\",\n credit_card=\"VISA\",\n active = True\n )\n return test_customer", "def customer_login_put(user_details):\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n query = \"\"\"\n UPDATE `user_login` \n SET `old_password`= `password`, \n `password` = SHA('%(password)s'), \n `change_password_date` = NOW() \n WHERE `user_id` = \\\"%(user_id)s\\\"\n \"\"\" % (user_details)\n\n cursor = db.cursor()\n result = {\"success\" : 0, \"message\" : \"Customer Login not updated\"}\n try:\n if cursor.execute(query):\n db.commit()\n result = {\"success\" : 0, \"message\" : \"Customer Login updated Successfully\"}\n except Exception as e:\n result = {\"success\" : 1, \"message\" : \"Customer Login can not be created. Error \\\"\\'%s\\'\\\" \" % (e) }\n finally:\n cursor.close()\n db.close()\n\n return result", "def test_delete_customer(self):\r\n create_empty_db()\r\n add_customer(**user_1)\r\n delete_customer(user_1['customer_id'])\r\n self.assertEqual({}, search_customer(user_1['customer_id']))\r\n drop_db()", "def billCustomer(self, **params):\n self.__requireParams(params, ['id', 'amount'])\n return self.__req('bill_customer', params)", "def customer(self, customer):\n if customer is None:\n raise ValueError(\"Invalid value for `customer`, must not be `None`\") # noqa: E501\n\n self._customer = customer", "def customer_update_coordinate(request):\n\n # Check if the request type if POST\n if request.method == \"POST\":\n # Deserialize the JSON because it will be in bytes\n body = json.loads(request.body)\n # Make success true\n body[\"success\"] = True\n result = Coordinates.objects.filter(user_id=body['customer_id'])\n if not result.exists() or result[0].user_id.role != str(Role.customer):\n # Make success false if something goes wrong\n body[\"success\"] = False\n # Return the body JSON\n return JsonResponse(body)\n # The result variable is immutable. So, put it to a new coordinates\n # object\n coordinates = result[0]\n coordinates.latitude = body[\"latitude\"]\n coordinates.longitude = body[\"longitude\"]\n # Save the coordinates object\n coordinates.save()\n\n # Return the body JSON\n return JsonResponse(body)\n else:\n # Return method not allowed\n return HttpResponse(status=405)", "def update_customer_credit(customer_id, credit_limit):\n with cm.DATABASE.transaction():\n try:\n a_customer = cm.Customer.get(\n cm.Customer.customer_id == customer_id)\n a_customer.credit_limit = credit_limit\n a_customer.save()\n LOGGER.info(\"Updating customer [%s] credit limit to $%s\",\n customer_id, credit_limit)\n except pw.DoesNotExist:\n LOGGER.warning(\"Error updating credit limit for customer [%s]!\",\n customer_id)\n raise ValueError", "def customer_profile(request):\r\n\r\n if request.method == \"GET\":\r\n user = request.user\r\n context = {\"user\": request.user}\r\n return render(request, 'customer_profile.html', context)\r\n\r\n elif request.method == \"POST\":\r\n\r\n if 'edit' in request.POST:\r\n user = request.user\r\n form = {\"formA\": UserCustomerFormA(instance = user), \"formB\": UserCustomerFormB(instance = user.customer)}\r\n context = {\"user\": request.user,\r\n \"form\": form}\r\n return render(request, 'customer_profile.html', context)\r\n\r\n else:\r\n req=request.POST\r\n form_user = {\"last_name\": req[\"last_name\"]}\r\n form_cust = {\"phone_number\": req[\"phone_number\"], \"street_address\": req[\"street_address\"], \"city\": req[\"city\"], \"state\": req[\"state\"], \"zipcode\": req[\"zipcode\"]}\r\n\r\n\r\n user_form = UserCustomerFormA(form_user)\r\n\r\n if user_form.is_valid():\r\n with connection.cursor() as cursor:\r\n cursor.execute(\"UPDATE auth_user SET last_name=%s WHERE id=%s\", [req[\"last_name\"], request.user.id])\r\n\r\n customer_form = UserCustomerFormB(form_cust)\r\n\r\n if customer_form.is_valid():\r\n with connection.cursor() as cursor:\r\n cursor.execute(\"UPDATE website_customer SET phone_number=%s, street_address=%s, city=%s, state=%s, zipcode=%s WHERE id=%s\", [req[\"phone_number\"], req[\"street_address\"], req[\"city\"], req[\"state\"], req[\"zipcode\"], request.user.customer.id])\r\n\r\n\r\n user = User.objects.raw(\"Select * From auth_user where id=%s\",[request.user.id])\r\n context = {\"user\": user[0]}\r\n return render(request, 'customer_profile.html', context)", "async def assign_customer_table(self, customer_id, table_id):\n if customer_id in self.waiting_list['customers']:\n # update db\n await self.db_customers.update(\n table_id=table_id,\n where={\n 'id': customer_id\n }\n )\n\n # send please_sit \n await self.restaurant.events_for_client.put(\n {\n \"name\": \"please_sit\",\n \"payload\": {\n \"customer_id\": customer_id,\n \"table_id\": table_id\n }\n }\n )", "def create(self):\n schema = load_customer_schema(self.request)\n customer = models.Klant(**schema)\n self.request.db.add(customer)\n self.request.db.flush()\n return {'klant': customer}", "def add_customers(current_customers, new_customer_list):\n for new in new_customer_list:\n new_id = _get_next_cust_id()\n current_customers[new_id] = new\n customer_cases[new_id] = {}" ]
[ "0.7436413", "0.7397227", "0.7354975", "0.71516573", "0.7110927", "0.710495", "0.7101501", "0.70412105", "0.69803005", "0.69173807", "0.686979", "0.68632066", "0.68632066", "0.6809742", "0.6789764", "0.6761391", "0.6745711", "0.67418706", "0.67362934", "0.6710319", "0.67056274", "0.6701821", "0.6653577", "0.66515476", "0.66486007", "0.6639992", "0.6614949", "0.65890306", "0.6561591", "0.6460951", "0.64338213", "0.64309824", "0.6402042", "0.6400776", "0.63984215", "0.6386957", "0.6374605", "0.63386345", "0.6318502", "0.6305202", "0.6289629", "0.6268674", "0.6260759", "0.624003", "0.6228417", "0.6194634", "0.616847", "0.61345863", "0.61289454", "0.60947096", "0.60940135", "0.6082572", "0.60620934", "0.60553503", "0.6022215", "0.60162115", "0.60125417", "0.6011018", "0.5998328", "0.5998328", "0.5992947", "0.59893966", "0.59580266", "0.59485096", "0.5942918", "0.59341806", "0.5929495", "0.5880494", "0.58737737", "0.58689916", "0.5867525", "0.5855054", "0.58184355", "0.5803608", "0.57664657", "0.5751895", "0.5739604", "0.57107604", "0.5708072", "0.5684908", "0.56830984", "0.5669277", "0.56281906", "0.562799", "0.56050104", "0.55980885", "0.5597572", "0.55941063", "0.5581853", "0.55794805", "0.55781776", "0.5560928", "0.5552437", "0.55498415", "0.5536999", "0.5493043", "0.54898894", "0.5480231", "0.54772854", "0.54762363" ]
0.7588754
0
create or update the customer Contact
def create_or_update_contact(customer, entity): name = frappe.db.get_value('Contact', { 'entity_id': entity.get('entity_id') }) if not name: contact = frappe.new_doc('Contact') else: contact = frappe.get_doc("Contact", name) if not entity.get('firstname'): return contact.first_name = entity.get('firstname') contact.last_name = entity.get('lastname') contact.customer = customer.name contact.customer_name = customer.customer_name contact.entity_id = entity.get('entity_id') contact.email_id = entity.get('email') contact.save(ignore_permissions=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_or_update_customer(entity):\n\ttry:\n\t\torganisation = entity.get('organisation').replace(\"'\",\"\")\n\t\torganisation = \"%s(C)\"%organisation if is_supplier_or_customer_group(organisation) else organisation\n\t\tname = frappe.db.get_value('Customer', organisation)\n\t\tif not name:\n\t\t\tcustomer = frappe.new_doc(\"Customer\")\n\t\t\tcustomer.customer_name = organisation\n\t\telse:\n\t\t\tcustomer = frappe.get_doc(\"Customer\", name)\n\n\t\tcustomer.entity_id = entity.get('entity_id')\n\t\tcustomer.customer_type = 'Company'\n\t\tif entity.get('group'):\n\t\t\tif entity.get('group').strip() == 'General':\n\t\t\t\tcustomer.customer_group = 'All Customer Groups'\n\t\t\telif frappe.db.get_value('Customer Group', entity.get('group').strip()):\n\t\t\t\tcustomer.customer_group = entity.get('group').strip() or 'All Customer Groups'\n\t\t\telif frappe.db.get_value('Customer', entity.get('group').strip()):\n\t\t\t\tcustomer.customer_group = 'All Customer Groups'\n\t\t\telse:\n\t\t\t\tcustomer.customer_group = create_customer_group(entity.get('group').strip())\n\t\tcustomer.territory = 'Australia'\n\t\tcustomer.customer_status = 'Existing'\n\t\tcustomer.modified_date = entity.get('updated_at')\n\t\tcustomer.save(ignore_permissions=True)\n\t\tif \"(C)\" in customer.customer_name:\n\t\t\tfrappe.db.set_value(\"Cusomer\", customer.name, \"customer_name\", organisation.replace(\"(C)\", \"\"))\n\n\t\tcreate_or_update_contact(customer, entity)\n\t\tget_addresses(entity.get('entity_id'))\n\n\t\t# return status\n\t\treturn {\n\t\t\tentity.get(\"entity_id\"): {\n\t\t\t\t\"operation\": \"Customer Created\" if not name else \"Customer Updated\",\n\t\t\t\t\"name\": customer.name,\n\t\t\t\t\"modified_date\": entity.get(\"updated_at\")\n\t\t\t}\n\t\t}\n\texcept Exception, e:\n\t\tdocname = entity.get('entity_id')\n\t\tresponse = entity\n\t\tlog_sync_error(\"Customer\", docname, response, e, \"create_new_customer\")", "def test_customer_update(self):\n # first performe create\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then performe update\n data = { \n \"name\": \"Changed the name\",\n \"email\": self.customer_data[\"email\"],\n \"phone\": self.customer_data[\"phone\"]\n }\n self._update_model(\"customer\", id, data, [\"name\"])\n self.assertIsNotNone(id)", "def test_update_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n\n dictionary = org.as_dict()\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact1['email']\n\n org.update_contact(TestContactInfo.contact2)\n\n dictionary = org.as_dict()\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact2['email']", "def create(self,contact: Contact) -> bool:\n try:\n contact_new=ContactSet(name=contact.name,birthdate=contact.birthdate\n ,contact_type=contact.contact_type, description=contact.description, phone=contact.phone)\n db.session.add(contact_new)\n db.session.commit()\n return True\n except Exception as ex:\n app.logger.error('Error creating a new Contact. {}'.format(ex))\n return False", "def update_contact(self, context, payload):\n\n if context.get('headers').get('api_key') is None or context.get('headers').get('app_id') is None:\n raise Exception(\"Please provide Api-Key and Api-Appid\")\n \n # Set headers\n headers = {\n \"Api-Key\": context.get('headers').get('api_key'),\n \"Api-Appid\": context.get('headers').get('app_id'),\n \"Content-Type\": \"application/json\"\n }\n payload[\"id\"] = payload.get(\"contact_id\")\n response = requests.request(\"PUT\", f'{self.url}Contacts', headers=headers, data=payload).text\n response = json.loads(response)\n response = response[\"data\"][\"attrs\"]\n return response", "def edit_contact(contact):\n db = get_db()\n \n if contact.get_hash_name() in db:\n db[contact.get_hash_name()] = json.loads(contact.json())\n write_db(db)\n else:\n sys.exit(logger.fail('fatal: contact does not exist'))", "def update_customer(cls, customer_data):\n customer_instance = cls.get_customer(customer_data['email'])\n for field_name, values in customer_data:\n setattr(customer_instance, field_name, values)\n customer_instance.save()\n return customer_instance", "async def create_contact(dbcon: DBConnection, name: Optional[str], email: Optional[str],\n phone: Optional[str], active: bool) -> str:\n q = \"\"\"insert into contacts (name, email, phone, active) values (%s, %s, %s, %s)\"\"\"\n q_args = (name, email, phone, active)\n contact_id = await dbcon.operation(q, q_args)\n return contact_id", "def post(self):\n return Contacts().create_one(request.get_json())", "def update_contact_in_db(self):\n self.init_db(self._testing)\n\n # making sure that the object is in the db\n assert not self.uid == \"\"\n\n self._update_row_in_db(Contact.table_name, Contact.columns, self.values_with_uid)", "def updateAccountContact(self,contact, accountId, contactId, responseFields = None):\r\n\r\n\t\turl = MozuUrl(\"/api/commerce/customer/accounts/{accountId}/contacts/{contactId}?responseFields={responseFields}\", \"PUT\", UrlLocation.TenantPod, False);\r\n\t\turl.formatUrl(\"accountId\", accountId);\r\n\t\turl.formatUrl(\"contactId\", contactId);\r\n\t\turl.formatUrl(\"responseFields\", responseFields);\r\n\t\tself.client.withResourceUrl(url).withBody(contact).execute();\r\n\t\treturn self.client.result();", "def edit_customer(customer_id, password, name, email, phone):\n with MY_CONNECTION as connection:\n connection.execute(\n \"\"\"\n UPDATE Customers\n SET password=?, customer_name=?, phone=?, email=?\n WHERE id_customer=?\n \"\"\",\n (password, name, phone, email, customer_id))", "def do_addContact(self, line):\n\t\tif not(self.db is None):\n\t\t\tcont = self.db.contact\n\t\t\tcontact_info = {\n\t\t\t\t'first_name': input(\"First name: \"),\n\t\t\t\t'surname': input(\"Surname: \"),\n\t\t\t\t'company': input(\"Company: \"),\n\t\t\t\t'address': input(\"Address: \"),\n\t\t\t\t'telephone': input(\"Telephone: \"),\n\t\t\t\t'email': input(\"Email: \")\n\t\t\t}\n\t\t\tcont.insert_one(contact_info)\n\t\telse:\n\t\t\tprint(\"You must open the existing database or create new one.\")", "def test_update_customer(self):\n # create a customer to update \n test_customer = self._create_customers(\"Alex\")\n resp = self.app.post(\n \"/customers\", json=test_customer.serialize(), content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n \n # update the customer\n new_customer = resp.get_json()\n logging.debug(new_customer)\n new_customer[\"address\"] = \"unknown\"\n resp = self.app.put(\n \"/customers/{}\".format(new_customer[\"id\"]),\n json=new_customer,\n content_type=\"application/json\",\n )\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n updated_customer = resp.get_json()\n self.assertEqual(updated_customer[\"address\"], \"unknown\")", "def create_or_update_address(address, customer):\n\tname = frappe.db.get_value('Address', { 'entity_id': address.get('entity_id') })\n\tif not name:\n\t\taddr = frappe.new_doc('Address')\n\t\taddr.address_title = \"{} {} {}\".format(\n\t\t\taddress.get(\"firstname\"),\n\t\t\taddress.get(\"lastname\"),\n\t\t\taddress.get(\"entity_id\")\n\t\t)\n\telse:\n\t\taddr = frappe.get_doc(\"Address\", name)\n\n\taddr.address_type = get_address_type(address).get('type')\n\taddr.entity_id = address.get('entity_id')\n\taddr.address_line1 = address.get('street')[0]\n\taddr.address_line2 = address.get('street')[1] if len(address.get('street')) > 1 else \"\"\n\taddr.city = address.get('city')\n\taddr.country = frappe.db.get_value('Country', { 'code': address.get('country_id') })\n\taddr.state = address.get('region')\n\taddr.pincode = address.get('postcode')\n\taddr.phone = address.get('telephone') or '00000'\n\taddr.fax = address.get('fax')\n\taddr.customer = customer\n\taddr.customer_name = address.get('firstname')+' '+address.get('lastname')\n\taddr.is_primary_address = get_address_type(address).get('is_primary_address')\n\taddr.is_shipping_address = get_address_type(address).get('is_shipping_address')\n\n\taddr.save(ignore_permissions=True)", "def add_contact(self, name, number, email, zipcode):\n \n new_contact = f\"{name}, {number}, {email}, {zipcode}\"\n contact_list = [name,number,email,zipcode]\n self.contacts.append(contact_list)\n self.save()\n print(f\"Thank you {new_contact} has been added to your contact book.\")", "def update_customer(self):\n customer = self.customer\n name = self.display_name or self.name or \"\"\n email = self.billing_email or self.email or \"\"\n\n if stripe.api_key != \"sk_test_xxxx\":\n try:\n stripe.Customer.modify(customer.id, name=name, email=email)\n except Exception:\n logger.exception(\"Error syncing customer with Stripe\")\n\n customer.name = name\n customer.email = email\n customer.save()", "async def post(self):\n await self.handle_request(self.contacts_new_api, 1)", "def add_customer(customer_id, name, lastname, home_address,\n phone_number, email_address, status, credit_limit):\n try:\n with database.transaction():\n customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit,\n )\n customer.save()\n except Exception as unknown_error:\n print(unknown_error)", "def add_contact(self, contact):\n self.db.insert_contact(contact)\n return self.update_contacts()", "def test_create_contact(self):\n \n url = reverse('contact-list')\n contact = self.get_dummy_contact()\n\n response = self.client.post(url, contact,\n format='json',\n HTTP_AUTHORIZATION=self.get_auth())\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Contact.objects.count(), 1)\n self.assertEqual(Contact.objects.get().email_address, contact['email_address'])", "def updateCustomer(self, **params):\n self.__requireParams(params, ['id'])\n return self.__req('update_customer', params)", "def test_update_customer_success(self):\n customer = Customer.objects.create(**customer_data)\n\n self.assertTrue(isinstance(customer, Customer))\n\n email= '[email protected]'\n customer.email = email\n customer.save()\n\n self.assertEqual(customer.__str__(), email)", "def _setcontact(id, name=None, address=None, phone=None, email=None):\n try:\n if name is not None:\n r.set(\"uid:\" + id + \":name\", name)\n if address is not None: \n r.set(\"uid:\" + id + \":address\", address)\n if phone is not None: \n r.set(\"uid:\" + id + \":phone\", phone)\n if email is not None: \n r.set(\"uid:\" + id + \":email\", email)\n\n return True\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def post(self):\n data = request.json\n return save_new_customer(data=data)", "def create_contact(self, context, payload):\n\n if context.get('headers').get('api_key') is None or context.get('headers').get('app_id') is None:\n raise Exception(\"Please provide Api-Key and Api-Appid\")\n \n # Set headers\n headers = {\n \"Api-Key\": context.get('headers').get('api_key'),\n \"Api-Appid\": context.get('headers').get('app_id'),\n \"Content-Type\": \"application/json\"\n }\n\n response = requests.request(\"POST\", f'{self.url}Contacts', headers=headers, data=payload).text\n response = json.loads(response)\n response = response[\"data\"]\n return response", "def add_contact(self, request, **kwargs):\n if request.data is None:\n return Response({'message': 'Invalid contact details'}, status=status.HTTP_400_BAD_REQUEST)\n if request.data.get('first_name') is None:\n return Response({'message': 'First name not provided'}, status=status.HTTP_400_BAD_REQUEST)\n\n contact_data = request.data.get('contact')\n for data in contact_data:\n print(data.get('phone'))\n try:\n parse_number = phonenumbers.parse(data.get('phone'), None)\n except Exception:\n return Response({'details': 'Invalid Phonenumber'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n if not phonenumbers.is_valid_number(parse_number):\n return Response({'details': 'Invalid Phonenumber entered'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n new_contact_data = ContactCreationAndUpdationMixin().create(request.data)\n group = self.get_object()\n group.contacts.add(new_contact_data)\n serializer_data = ContactSerializer(new_contact_data) \n return Response(serializer_data.data)", "def add_contact_to_db_by_one(name, email, module_db_id, contact_id):\n success = False\n if name is not None:\n try:\n done_email = email.lower().strip()\n validate_email(done_email)\n\n if contact_id:\n try:\n contact = Contact.objects.get(id=contact_id, list_owner_id=module_db_id)\n contact.name_and_last_name = name\n contact.email = email\n contact.status = 1\n contact.save()\n success = True\n except Contact.DoesNotExist:\n pass\n else:\n contact, created = Contact.objects.get_or_create(list_owner_id=module_db_id, email=email)\n if created and contact:\n contact.name_and_last_name = name\n contact.status = 1\n contact.save()\n success = True\n except Exception as e:\n print(e.args)\n\n return success, name, email", "def test_add_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n dictionary = org.as_dict()\n assert dictionary['contacts']\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact1['email']", "def create_contact(contact, party_type, party, email):\n\tcontact = contact.split(' ')\n\n\tcontact = frappe.get_doc({\n\t\t'doctype': 'Contact',\n\t\t'first_name': contact[0],\n\t\t'last_name': len(contact) > 1 and contact[1] or \"\"\n\t})\n\tcontact.append('email_ids', dict(email_id=email, is_primary=1))\n\tcontact.append('links', dict(link_doctype=party_type, link_name=party))\n\tcontact.insert()", "def add_contact_to_db(name, email, module_db_id):\n success = False\n if name is not None:\n try:\n done_email = email.lower().strip()\n validate_email(done_email)\n\n contact, created = Contact.objects.get_or_create(list_owner_id=module_db_id, email=email)\n if created and contact:\n contact.name_and_last_name = name\n contact.email = email\n contact.status = 1\n contact.save()\n success = True\n else:\n success = False\n except Exception as e:\n print(e.args)\n contact, created = Contact.objects.get_or_create(list_owner_id=module_db_id, email=email)\n if created and contact:\n contact.name_and_last_name = name\n contact.email = email\n contact.status = 0\n contact.save()\n success = True\n else:\n success = False\n\n return success, name, email", "def add_contact_to_db(self):\n self.init_db(self._testing)\n\n # make sure that the object is not in the db\n assert self.uid == \"\"\n\n self._insert_row_into_db(Contact.table_name, Contact.columns, self.values)\n\n # update this objects uid\n self.uid = self._get_id_of_last_row(Contact.table_name)", "def add_customer(customer_id, first_name, last_name, home_address, phone_number,\n email_address, is_active, credit_limit):\n try:\n LOGGER.info('Successfully connected to the database')\n\n with DATABASE.transaction():\n new_customer = Customer.create(customer_id=customer_id,\n first_name=first_name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n is_active=is_active,\n credit_limit=credit_limit)\n new_customer.save()\n LOGGER.info(\"Customer added successfully\")\n\n except IntegrityError as error:\n LOGGER.info(error)\n LOGGER.info('Error occurred')", "def update(self):\n return self._api.update_customer(**to_dict(self))", "def contact(request):\n email = request.GET.get(\"email\")\n version = get_version_or_leave(request, \"contact\", email)\n\n if version == '1':\n\n return contact_v1(request)\n\n else:\n\n api_access_logging(\n request,\n \"contact\",\n email,\n \"400\",\n \"4\",\n None\n )\n return Response(\n {\n \"error_code\": \"4\",\n \"detail\": errors_for_customers[\"4\"]\n },\n status=status.HTTP_400_BAD_REQUEST\n )", "def add_customer(customer_id, first_name, last_name, home_address,\n phone_number, email_address, status, credit_limit):\n print('Adding new customer, Customer ID {}...'.format(customer_id))\n try:\n Customer.get_by_id(customer_id)\n print('Customer ID {} is already in use'.format(customer_id))\n except Exception as ex:\n if \"instance matching query does not exist\" in str(ex):\n try:\n new_customer = Customer.create(customer_ID=customer_id,\n first_name=first_name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit)\n new_customer.save()\n LOGGER.info('Added new customer, Customer ID %s', customer_id)\n except IntegrityError:\n print('Incorrect format, customer {} not saved'\n .format(customer_id))", "def update_contact(self,name):\n update_choice = input(\"What part of the contact would you like to\"+\n \" modify? Enter name, number, email, or zipcode. \")\n find_contact = self.pull_one_contact(name)[1]\n \n if update_choice == \"name\":\n new_name = input(\"Please enter the updated name as\"+ \n \" firstname lastname: \")\n self.contacts[find_contact][0] = new_name\n print(f\"Your contact has been updated successfully with the\"+ \n f\" following information: \\n Name: {new_name}\")\n \n elif update_choice == \"number\":\n new_number = input(\"Please enter the updated number: \")\n self.contacts[find_contact][1] = new_number\n print(f\"Your contact has been updated successfully with the\"+ \n f\" following information: \\n Number: {new_number}\")\n \n elif update_choice == \"email\":\n new_email = input(\"Please enter the updated email: \") \n self.contacts[find_contact][2] = new_email\n print(f\"Your contact has been updated successfully with the\"+ \n f\" following information: \\n Email: {new_email}\")\n \n elif update_choice == \"zipcode\":\n new_zipcode = input(\"Please enter the updated zipcode: \")\n self.contacts[find_contact][3] = new_zipcode\n print(f\"Your contact has been updated successfully with the\"+ \n f\" following information: \\n Zipcode: {new_zipcode}\")\n \n else:\n sys.exit() \n self.save()", "def addAccountContact(self,contact, accountId, responseFields = None):\r\n\r\n\t\turl = MozuUrl(\"/api/commerce/customer/accounts/{accountId}/contacts?responseFields={responseFields}\", \"POST\", UrlLocation.TenantPod, False);\r\n\t\turl.formatUrl(\"accountId\", accountId);\r\n\t\turl.formatUrl(\"responseFields\", responseFields);\r\n\t\tself.client.withResourceUrl(url).withBody(contact).execute();\r\n\t\treturn self.client.result();", "def test_save_contact(self):\n self.new_contact.save_contact() # saving the new contact\n self.assertEqual(len(Contact.contact_list), 1)", "def create_or_enable(self, name, email, **kwargs):\n try:\n return self.create(name=name, email=email, **kwargs)\n except FreshDeskClient.APIError as e:\n contacts = self.get_list(\n state='deleted', query='email is {}'.format(email))\n if ((len(contacts) > 0) and\n (e.resp.status_code == HTTP_ALREADY_EXISTS)):\n contact = contacts[0]\n self.update(contact['id'], name=name, deleted=False, **kwargs)\n # update local version as well\n contact[name] = name\n contact.update(kwargs)\n\n return contact\n else:\n raise", "def edit_customer(cls, api, id, **data):\n return api.update_customer(id, **data)", "def post(self, request, id):\n form = CallRegisterForm(request.POST or None)\n if form.is_valid():\n object = get_object_or_404(CallRegister, pk=id)\n # Getting only ONE customer from db\n # that matches passed name\n # MUST BE CHANGED\n CallRegister.objects.filter(pk=id).update(\n customer = get_object_or_404(ClientDetails, pk=request.POST['customer-id']),\n complaint_nature = form.cleaned_data['complaint_nature'],\n brand = form.cleaned_data['brand'],\n product_name = form.cleaned_data['product_name'],\n product_serial = form.cleaned_data['product_serial'],\n product_coverage = form.cleaned_data['product_coverage'],\n appointment_date = form.cleaned_data['appointment_date'],\n appointment_time = form.cleaned_data['appointment_time'],\n edited_by = self.request.user,\n edit_datetime = datetime.datetime.now()\n )\n messages.add_message(request, messages.INFO, 'Success - Call detials edited successfully!')\n return redirect(reverse('calls:call_detail_view', kwargs={'id':id}))\n messages.add_message(request, messages.INFO, 'Failed - Invalid details')\n return redirect(reverse('calls:call_edit_view', kwargs={'id':id}))", "async def update_contact(dbcon: DBConnection, contact_id: int, data: Dict[str, str]) -> None:\n\n async def _run(cur: Cursor) -> None:\n for key, value in data.items():\n if key not in ['name', 'email', 'phone', 'active']:\n raise errors.IrisettError('invalid contact key %s' % key)\n q = \"\"\"update contacts set %s=%%s where id=%%s\"\"\" % key\n q_args = (value, contact_id)\n await cur.execute(q, q_args)\n\n if not await contact_exists(dbcon, contact_id):\n raise errors.InvalidArguments('contact does not exist')\n await dbcon.transact(_run)", "def edit_contact(self):\n edit_data = input(\"Enter the first name of user you want to edit\\n\")\n\n for contact in self.contact_list:\n if contact.first_name == edit_data:\n user_input = int(input(\n \"Enter the number that you want to edit field in details\"\n \" \\n 1. First Name 2. Last name 3. Address 4. city 5. state 6.zip 7. Phone number 8.Email \\n\"))\n if user_input == 1:\n first_name = input(\"Enter new first name\\n\")\n contact.first_name = first_name\n elif user_input == 2:\n last_name = input(\"Enter new last name\\n\")\n contact.last_name = last_name\n elif user_input == 3:\n address = input(\"Enter new address\\n\")\n contact.address = address\n elif user_input == 4:\n city = input(\"Enter new city\\n\")\n contact.city = city\n elif user_input == 5:\n state = input(\"Enter new state\\n\")\n contact.state = state\n elif user_input == 6:\n zip = input(\"Enter new zip\\n\")\n contact.zip = zip\n elif user_input == 7:\n phone_number = input(\"Enter new phone number\\n\")\n contact.phone_number = phone_number\n elif user_input == 8:\n email = input(\"Enter new email\\n\")\n contact.email = email\n else:\n print(\"Please enter a valid input\")\n else:\n print(\"Please enter a valid name\")", "def add_customer(customer_id, first_name, last_name, home_address,\n phone_number, email_address, status, credit_limit):\n # database.transaction; all work given to database gets done or none of it\n with cm.DATABASE.transaction():\n try:\n # .create inserts the data into the database\n new_customer = cm.Customer.create(customer_id=customer_id,\n first_name=first_name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit)\n # .save() will write the data to the database\n new_customer.save()\n LOGGER.info(\"Added customer [%s]\", customer_id)\n except pw.IntegrityError:\n LOGGER.error(\"Customer [%s] not added to database!\", customer_id)\n raise pw.IntegrityError", "def test_customer_create(self):\n self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])", "def edit_contact(_id: str):\n # ------- GET ------------\n title = 'Edit contact -- ' + _id\n doc = numbers.find_one(ObjectId(_id), projection={'_id': False, 'create_time': False, 'update_time': False})\n # convert array in to string separated by coma\n doc = {key: (', '.join(val) if type(val) == list else val) for key, val in doc.items()}\n form = SaveNumber(**doc)\n # ========================\n\n if form.validate_on_submit():\n web_logging.debug('request to edit _id= {}'.format(_id))\n # prepare for mongo\n form_recieved = Contact()\n form_recieved.from_form(form.data)\n form_recieved.update_time = datetime.utcnow()\n\n result = form_recieved.mongo_update(_id)\n match_count, modified_count = result\n flash(\"match_count= {}, modified_count= {}\".format(match_count, modified_count))\n return redirect(url_for('add_edit_number'))\n\n return render_template('save_contact.html', title=title, form=form)", "def contact(request):\n ContactMessage.objects.create(\n datetime=saturn.now(),\n name=request.data['name'],\n email=request.data['email'],\n body=request.data['body']\n )\n\n return Response({'success': True})", "def save(self, customer):\n\t\t\n\t\twith sqlite3.connect(\"bangazon_cli.db\") as bang:\n\t\t\tcursor = bang.cursor()\n\n\t\t\ttry: \n\t\t\t\tcursor.execute(\"SELECT * FROM Customers\")\n\t\t\t\tcustomers = cursor.fetchall()\n\t\t\texcept sqlite3.OperationalError:\n\t\t\t\tcursor.execute(\"\"\"\n\t\t\t\tCREATE TABLE IF NOT EXISTS `Customers`\n\t\t\t\t\t(\n\t\t\t\t\t\tcustomer_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n\t\t\t\t\t\tfirst_name TEXT NOT NULL,\n\t\t\t\t\t\tlast_name TEXT NOT NULL,\n\t\t\t\t\t\temail TEXT NOT NULL,\n\t\t\t\t\t\tphone_number TEXT NOT NULL,\n\t\t\t\t\t\tcity TEXT NOT NULL,\n\t\t\t\t\t\tstate TEXT NOT NULL,\n\t\t\t\t\t\tpostal_zip INTEGER NOT NULL,\n\t\t\t\t\t\taddress TEXT NOT NULL,\n\t\t\t\t\t\tis_active BOOLEAN NOT NULL,\n\t\t\t\t\t\tCONSTRAINT name_unique UNIQUE (first_name, last_name, email, phone_number, city, state, postal_zip, address)\n\t\t\t\t\t)\n\t\t\t\t\"\"\")\n\n\t\t\tcursor.execute(\"\"\"\n\t\t\tINSERT INTO Customers VALUES (null, \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\")\n\t\t\t\"\"\".format(\n\t\t\t\t\t\tcustomer.get_first_name(), \n\t\t\t\t\t\tcustomer.get_last_name(), \n\t\t\t\t\t\tcustomer.get_email(), \n\t\t\t\t\t\tcustomer.get_phone_number(),\n\t\t\t\t\t\tcustomer.get_city(),\n\t\t\t\t\t\tcustomer.get_state(),\n\t\t\t\t\t\tcustomer.get_postal_zip(),\n\t\t\t\t\t\tcustomer.get_address(),\n\t\t\t\t\t\tcustomer.get_active_status()\n\t\t\t\t\t\t)\n\t\t\t\t\t)", "def save(self, customer):\n\t\t\n\t\twith sqlite3.connect(\"bangazon_cli.db\") as bang:\n\t\t\tcursor = bang.cursor()\n\n\t\t\ttry: \n\t\t\t\tcursor.execute(\"SELECT * FROM Customers\")\n\t\t\t\tcustomers = cursor.fetchall()\n\t\t\texcept sqlite3.OperationalError:\n\t\t\t\tcursor.execute(\"\"\"\n\t\t\t\tCREATE TABLE IF NOT EXISTS `Customers`\n\t\t\t\t\t(\n\t\t\t\t\t\tcustomer_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n\t\t\t\t\t\tfirst_name TEXT NOT NULL,\n\t\t\t\t\t\tlast_name TEXT NOT NULL,\n\t\t\t\t\t\temail TEXT NOT NULL,\n\t\t\t\t\t\tphone_number TEXT NOT NULL,\n\t\t\t\t\t\tcity TEXT NOT NULL,\n\t\t\t\t\t\tstate TEXT NOT NULL,\n\t\t\t\t\t\tpostal_zip INTEGER NOT NULL,\n\t\t\t\t\t\taddress TEXT NOT NULL,\n\t\t\t\t\t\tis_active BOOLEAN NOT NULL,\n\t\t\t\t\t\tCONSTRAINT name_unique UNIQUE (first_name, last_name, email, phone_number, city, state, postal_zip, address)\n\t\t\t\t\t)\n\t\t\t\t\"\"\")\n\n\t\t\tcursor.execute(\"\"\"\n\t\t\tINSERT INTO Customers VALUES (null, \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\")\n\t\t\t\"\"\".format(\n\t\t\t\t\t\tcustomer.get_first_name(), \n\t\t\t\t\t\tcustomer.get_last_name(), \n\t\t\t\t\t\tcustomer.get_email(), \n\t\t\t\t\t\tcustomer.get_phone_number(),\n\t\t\t\t\t\tcustomer.get_city(),\n\t\t\t\t\t\tcustomer.get_state(),\n\t\t\t\t\t\tcustomer.get_postal_zip(),\n\t\t\t\t\t\tcustomer.get_address(),\n\t\t\t\t\t\tcustomer.get_active_status()\n\t\t\t\t\t\t)\n\t\t\t\t\t)", "def test_updateContact(self):\n response = self.client.get(self.url)\n qs = response.json()\n contact = qs[0]\n to_update_value = 'address 2'\n contact['address'] = to_update_value\n response = self.client.put(self.url + str(contact['id']) + '/', contact, content_type=\"application/json\")\n self.assertEqual(response.status_code, 200)\n contact2 = response.json()\n self.assertEqual(contact2['address'], to_update_value)", "def customer_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n customer_form = CustomerForm()\n return render_to_response('customer_form.html', {'form': customer_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n customer_form = CustomerForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if customer_form.is_valid():\n of = customer_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('customer_form.html', \n {'form': customer_form, 'form_errors': customer_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def test_projects_id_contacts_put(self):\n project = Contact()\n response = self.client.open('/project-tracker/projects/{id}/contacts'.format(id=56),\n method='PUT',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def update_customer(customer_id, login, name, email, phone=\"\", permission=0):\n with MY_CONNECTION as connection:\n connection.execute(\n \"\"\"\n UPDATE Customers\n SET login=?, customer_name=?, phone=?, email=?, perm=?\n WHERE id_customer=?\n \"\"\",\n (login, name, phone, email, permission, customer_id))", "def create_customer(data):\n mandatory_params = ['customer_name', 'mobile_number']\n result = api_utils.check_required_params(mandatory_params, data)\n if result:\n return result\n mobile_number = db_helper.mobile_number_unique(data['mobile_number'])\n if not mobile_number:\n return api_utils.error(\"There already is a customer with \\\n mobile number {} found\".format(data['mobile_number']), 404)\n\n new_customer = db_helper.add_new_customer(data['customer_name'],\n mobile_number)\n return jsonify({'new_customer': new_customer})", "def add_customer(customer_id, first, last, addr, phone, email, status, limit):\n try:\n LOGGER.info('Creating customer record')\n with database.transaction():\n new_customer = Customer.create(\n customer_id=customer_id,\n first_name=first,\n last_name=last,\n home_address=addr,\n phone_number=phone,\n email_address=email,\n status=status,\n credit_limit=limit\n )\n new_customer.save()\n LOGGER.info('Added customer: %s', new_customer.customer_id)\n except IntegrityError as err:\n LOGGER.warning('Error creating = ID: %s', customer_id)\n LOGGER.warning(err)\n\n return Customer", "def get_or_update_contact(request, **kwargs):\n contact_id = kwargs['id']\n contact = private.Contact()\n if request.method.lower() == 'get':\n data = contact.get_contact(contact_id)\n return JsonResponse(data)\n elif request.method.lower() == 'post':\n data = json.loads(request.body)\n try:\n contact.check_and_update(pk=contact_id, name=data.get('name'))\n except exception.ContactException as ex:\n return JsonResponse({'error_message': ex.message})\n elif request.method.lower() == 'delete':\n contact.delete(pk=contact_id)\n return JsonResponse({'success_message': 'Contact updated successfully.'})", "def test_invoice_update(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # create another customer\n id_other = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id_other:\n # then performe update\n data = self.invoice_data\n data[\"customer_id\"] = id_other\n self._update_model(\"invoice\", id, data, [])\n self.assertIsNotNone(id_other)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def add_customer(customer_id, name, lastname, homeaddress, phone_number, email, status, credit_limit):\n try:\n with customer_db.transaction():\n new_customer_mi = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n homeaddress=homeaddress,\n phone_number=phone_number,\n email=email,\n status=status,\n credit_limit=credit_limit\n )\n logger.debug(\"Added customer %s to %s\", new_customer_mi, customer_db.database)\n return new_customer_mi\n except Exception as e:\n logger.error(\"Error creating customer_id %s: %s\", customer_id, e)", "def post(self):\n data = json.loads(request.data.decode())\n contact = Contacts(\n Email=data[\"email\"],\n TechRider=data[\"techRider\"],\n InputList=data[\"inputList\"],\n Backline=data[\"backline\"],\n Created=get_datetime(),\n )\n db.session.add(contact)\n db.session.commit()\n\n # The RFC 7231 spec says a 201 Created should return an absolute full path\n server = socket.gethostname()\n contents = \"Location: {}{}{}\".format(\n server,\n url_for(\"ContactsView:index\"),\n contact.ContactsID\n )\n\n return make_response(jsonify(contents), 201)", "def test_save_contact(self):\n # .save_contact() is the save to contact function.\n # Test would check if an addition has been made to our contact list\n self.new_contact.save_contact()\n self.assertEqual(len(Contact.contact_list), 1)", "def test_update_contact_association(self):\n patient1 = self.create_patient({'mobile_number': '12223334444'})\n patient2 = self.create_patient()\n subject_number = patient1.subject_number\n node = self.create_xml_patient({'Subject_Number': subject_number,\n 'Mobile_Number': '43332221111'})\n payload = self.create_payload([node])\n parse_patient(node, payload)\n patient = payload.patients.all()[0]\n self.assertNotEqual(patient.pk, patient2.pk)\n self.assertEqual(patient.pk, patient1.pk)\n self.assertNotEqual(patient.contact.pk, patient2.contact.pk)\n self.assertEqual(patient.contact.pk, patient1.contact.pk)\n self.assertEqual(patient.mobile_number, '+43332221111')\n self.assertEqual(patient.contact.phone, '+43332221111')", "def add_customer(customer_id, name, lastname, home_address, phone_number, email_address, status,\n credit_limit):\n init_database()\n try:\n with database.transaction():\n new_customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n active_status=status,\n credit_limit=credit_limit\n )\n new_customer.save()\n logging.info('New customer, ID %s, added successfully.', customer_id)\n return True\n except peewee.IntegrityError as exc:\n logging.error('Error creating new customer with ID %s: %s.', customer_id, exc)\n return False\n finally:\n database.close()", "def add_customer(\n customer_id,\n name,\n last_name,\n home_address,\n phone_number,\n email_address,\n status,\n credit_limit,\n):\n LOGGER.info(\"Adding new customer, %s %s to database\", name, last_name)\n try:\n Customers.create(\n customer_id=customer_id,\n name=name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit,\n )\n LOGGER.info(\"Added new customer %s %s to database\", name, last_name)\n except IntegrityError as e_val:\n LOGGER.warning(\"Customer %s already exists\", customer_id)\n LOGGER.warning(e_val)", "def create_contact(contact, party_type, party):\n\tcontact = contact\t.split(\" \")\n\n\tcontact = frappe.get_doc({\n\t\t\"doctype\":\"Contact\",\n\t\t\"first_name\":contact[0],\n\t\t\"last_name\": len(contact) > 1 and contact[1] or \"\"\n\t})\n\tcontact.append('links', dict(link_doctype=party_type, link_name=party))\n\tcontact.insert()", "def createCustomer(sender, instance, **kwargs):\n Customer.objects.get_or_create(user=instance)", "def contact(request):\n name = request.POST.get('name', '')\n email = request.POST.get('email', '')\n msg = request.POST.get('msg', '')\n ContactUs.objects.create(name=name, email=email, msg=msg)\n messages.success(request, 'Submitted, Thank you!')\n return HttpResponseRedirect('/')", "def add_customer(customer_id, name, lastname, home_address,\n phone_number, email_address, status, credit_limit):\n try:\n with database.transaction():\n customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit,\n )\n logger.info(\n f\"Successfully added customer {customer_id} with {credit_limit}\"\n )\n customer.save()\n except Exception as unknown_error:\n logger.error(\n f\"Error. Failed to added customer {customer_id}. {unknown_error}\"\n )\n print(unknown_error)", "def create_contact_info(query, user, number):\n\n data = {\n 'startDate': str_date(get_date()),\n 'number': number,\n 'user': user\n }\n\n fb.patch(query, data)", "def add_contact(contact):\n db = get_db()\n \n if contact.get_hash_name() not in db:\n db[contact.get_hash_name()] = json.loads(contact.json())\n write_db(db)\n else:\n sys.exit(logger.fail('fatal: contact already exists'))", "def contact():\n if request.method == \"POST\":\n mongo.db.contact.insert_one(request.form.to_dict())\n\n return jsonify(success=True)\n\n return render_template(\"contact.html\", page_title=\"Contact Us\")", "def save_contract(bid):\n title = 'Create contract -- ' + bid\n doc = data_active.find_one({'bid': bid})\n if doc is None:\n # error exit from function\n web_logging.error('bid= {} not found in \"data_active\"'.format(bid))\n flash('bid= {} not found in \"data_active\"'.format(bid))\n return redirect(url_for('lists'))\n\n # take only 10 chars as number\n doc['phone'] = doc['phone'].replace('-', '')\n doc['phone'] = doc['phone'][len(doc['phone'])-10:]\n\n search_number = numbers.find_one({'numbers': {'$eq': doc['phone']}})\n contract = Contract()\n contract.from_lists(doc)\n\n if search_number is None:\n # no contacts with such number, call create new contact form\n form_doc = {'city': doc['location'], 'numbers': doc['phone'], 'comment': doc['comment'],\n 'loc_comments': doc['comment']}\n info = '--------- New Contact ------------'\n else:\n form_doc = Contact().to_form(search_number)\n info = '========= Contact already known, please check ======'\n\n form_doc.update(contract.as_dict())\n web_logging.debug('data for form= {}'.format(form_doc))\n form = Transaction(**form_doc)\n\n if form.validate_on_submit():\n contact_info = Contact()\n contact_info.from_form(form.data)\n\n contract_info = Contract()\n contract_info.from_form(form.data)\n\n if contact_info.contact_id is None:\n # contact is new\n contact_info.create_time = datetime.utcnow()\n contact_info.update_time = contact_info.create_time\n web_logging.debug('inserting contact_info= {}'.format(contact_info.as_dict()))\n web_logging.debug('inserting contract_info= {}'.format(contract_info))\n flash('inserting contact_info= {}, contract_info= {}'.format(contact_info.as_dict(), contract_info))\n result_contract = contract_info.mongo_insert()\n result_contact = contact_info.mongo_insert()\n # add contact id into document\n result_contract_upd = contracts.update_one({'_id': ObjectId(result_contract)},\n {'$set': {'contact': ObjectId(result_contact)}})\n result_contact_upd = numbers.update_one({'_id': ObjectId(result_contact)},\n {'$addToSet': {'contracts': ObjectId(result_contract)}})\n else:\n # contact already exists\n contact_info.update_time = datetime.utcnow()\n web_logging.debug('inserting contact_info= {}'.format(contact_info.as_dict()))\n web_logging.debug('inserting contract_info= {}'.format(contract_info))\n flash('updating contact_info= {}, creating contract_info= {}'.format(contact_info.as_dict(), contract_info))\n result_contract = contract_info.mongo_insert()\n result_contact = numbers.update_one({'_id': ObjectId(contact_info.contact_id)},\n {'$addToSet': {'contracts': ObjectId(result_contract)}})\n\n return redirect('/contracts')\n\n return render_template('contract.html', title=title, form=form, info=info)", "def test_update_contact_no_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n\n with pytest.raises(BusinessException) as exception:\n org.update_contact(TestContactInfo.contact2)\n assert exception.value.code == Error.DATA_NOT_FOUND.name", "def contact(session, contact_factory):\n contact_factory.get()", "def new_contact(self, context, payload):\n\n data = OntraportContact(\n contact_id= payload[\"data\"].get(\"id\"),\n first_name= payload[\"data\"].get(\"firstname\"),\n last_name= payload[\"data\"].get(\"lastname\"),\n email_address= payload[\"data\"].get(\"email\"),\n date= payload[\"data\"].get(\"date\"),\n office_phone= payload[\"data\"].get(\"office_phone\"),\n company= payload[\"data\"].get(\"company\"),\n title= payload[\"data\"].get(\"title\"),\n country= payload[\"data\"].get(\"country\"),\n zip_code= payload[\"data\"].get(\"zip\"),\n owner= payload[\"data\"].get(\"owner\"),\n unique_id= payload[\"data\"].get(\"unique_id\"),\n profile_image= payload[\"data\"].get(\"profile_image\")\n )\n return data.__dict__", "def add_contact(self):\n contact = Contact.create_contact()\n self.contact_list.append(contact)\n\n df = pd.read_csv('address_book.csv')\n #print(df)\n adf = pd.DataFrame({'FIRST NAME': [contact.first_name],\n 'LAST NAME': [contact.last_name],\n 'ADDRESS': [contact.address],\n 'CITY': [contact.city],\n 'STATE': [contact.state],\n 'ZIP CODE': [contact.zip],\n 'PHONE NUMBER': [contact.phone_number],\n 'EMAIL': [contact.email]})\n adf.to_csv('address_book.csv',mode='a', header=False, index=None)\n #storing all contacts in address_book.csv file\n \"\"\"with open(\"address_book.csv\", \"w\") as f:\n for contact in self.contact_list:\n f.write(f\"FIRST NAME -> {contact.first_name}\\n\"\n f\"LAST NAME -> {contact.last_name}\\n\"\n f\"ADDRESS -> {contact.address}\\n\"\n f\"CITY -> {contact.city}\\n\"\n f\"STATE -> {contact.state}\\n\"\n f\"ZIP CODE -> {contact.zip}\\n\"\n f\"PHONE NUMBER -> {contact.phone_number}\\n\"\n f\"EMAIL -> {contact.email}\\n\\n\")\"\"\"", "def put(self, customer_id):\n data = request.json\n return edit_customer(customer_id=customer_id, data=data)", "def createCustomer(self, **params):\n return self.__req('create_customer', params)", "def add_contact():\n return 'add contact'", "def create_customer(cls, api, **data):\n return api.create_customer(**data)", "def test_create_customer_success(self):\n customer = Customer.objects.create(**customer_data)\n\n self.assertTrue(isinstance(customer, Customer))\n self.assertEqual(customer.__str__(), customer_data['email'])", "def create_contact_on_google(self, info):\n\n\t\twith open('client.pickle') as pickle_file:\n\t\t\tclient = pickle.load(pickle_file)\n\n\t\t#create contact in google\n\t\tnew_contact = gdata.contacts.data.ContactEntry()\n\n\t\t# Set the contact's name.\n\t\tnew_contact.name = gdata.data.Name( given_name=gdata.data.GivenName(text=info['name']), family_name=gdata.data.FamilyName(text=info['name']),\n\t\t\tfull_name=gdata.data.FullName(text=info['name']))\n\n\t\tnew_contact.content = atom.data.Content(text='Notes')\n\n\t\t# Set the contact's email addresses.\n\t\tnew_contact.email.append(gdata.data.Email(address=info['email'], primary='true', rel=gdata.data.WORK_REL, display_name=info['name']))\n\n\t\t# Set the contact's phone numbers.\n\t\tnew_contact.phone_number.append(gdata.data.PhoneNumber(text=info['phone'], rel=gdata.data.WORK_REL, primay='true'))\n\n\t\tcontact_entry = client.CreateContact(new_contact)\n\t\twebnotes.errprint(\"Contact's ID: %s\" % contact_entry.id.text)\n\n\t\twebnotes.conn.set_value(\"Contact\",self.name,\"contct_id\", contact_entry.id.text)", "def test_contact_exists(self):\n self.new_contact.save_contact()\n test_contact = Contact(\"Test\", \"User\", 254711223344, \"[email protected]\")\n test_contact.save_contact()\n contact_exists = Contact.contact_exist(254711223344)\n self.assertTrue(contact_exists)", "def save_object(self, data):\n return Contact(**data)", "def add_customer(self, info, dup):\n if not dup:\n self.cursor.execute(\"INSERT INTO customerpersonal VALUES (%s,%s)\", (int(info['phone']), info['address']))\n\n self.cursor.execute(\"INSERT INTO customercredentials VALUES (%s,%s,%s,%s,%s,%s)\",\n (info['loginID'], info['firstName'], info['lastName'], info['salt'],\n info['key'], int(info['phone'])))\n self.db.commit()", "def contact_create(request):\n\n if request.method == \"POST\":\n form = ContactForm(request.POST)\n profile_form = ContactProfileForm(request.POST)\n\n if form.is_valid() and profile_form.is_valid():\n contact = form.save()\n\n # Populate the required 'contact' field before saving\n profile = profile_form.save(commit=False)\n profile.contact = contact\n profile.save()\n\n messages.success(request, _(\"The contact %(name)s was successfully created\") % \n {'name': unicode(contact)})\n return HttpResponseRedirect(reverse(\"moderation.views.contact\", args=(contact.pk,)))\n else:\n form_initial = {'phone_number': request.GET.get('phone_number', '')}\n form = ContactForm(initial=form_initial)\n\n profile_initial = {}\n if \"facility\" in request.GET:\n facility = get_object_or_404(Facility, pk=request.GET['facility'])\n profile_initial['facility'] = facility.pk\n profile_form = ContactProfileForm(initial=profile_initial)\n\n return render_to_response(\"contact_create.html\", \n { 'form': form, \n 'profile_form': profile_form,\n },\n context_instance=RequestContext(request))", "def test_updateContact(self):\n qs = Contact.objects.all()\n contact = qs[0]\n contact2 = Contact.objects.get(id=contact.id)\n to_update_value = 'address 2'\n contact2.address = to_update_value\n contact2.save()\n # refresh from db\n contact3 = Contact.objects.get(id=contact.id)\n self.assertEqual(contact3.address, to_update_value)", "def Commit(self):\n try:\n acm.BeginTransaction()\n self.__contact.Commit()\n if FIntegrationUtils.FIntegrationUtils.get_acm_version_override() < 2015.4:\n self.__integration_utils.set_additional_info('DateOfBirth', self.__contact, self.__reg_date_of_birth)\n self.__integration_utils.set_additional_info('FirstName', self.__contact, self.__reg_first_name)\n self.__integration_utils.set_additional_info('LastName', self.__contact, self.__reg_last_name)\n self.__integration_utils.set_additional_info('NationalId', self.__contact, self.__reg_national_id)\n self.__integration_utils.set_additional_info('RegContactCrmId', self.__contact, self.__reg_crm_id)\n self.__integration_utils.set_additional_info('RegContExchangeId', self.__contact, self.__reg_exchange_id)\n self.__integration_utils.set_additional_info('UniqueName', self.__contact, self.__reg_unique_name)\n self.__integration_utils.set_additional_info('RegGeneralPartner', self.__contact, self.__is_general_partner)\n acm.CommitTransaction()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n FRegulatoryLogger.ERROR(logger, \"ABORTING TRANSACTION***********\")\n acm.AbortTransaction()", "def add_customer(login, password, name, phone, email):\n with MY_CONNECTION as connection:\n connection.execute(\n \"\"\"\n INSERT INTO Customers\n (login,password,customer_name,phone,email)\n VALUES(?,?,?,?,?)\n \"\"\",\n (login, password, name, phone, email))", "def showEditContact(self):", "def update(self, customerguid, name=\"\", login=\"\", password=\"\", email=\"\", address=\"\", vat=\"\", jobguid=\"\", executionparams=None):", "def post(self):\n post_data = request.get_json()\n\n # decode token and check if expired\n token = post_data.get('odoo_contact_token')\n odoo_contact_id, expiration_date = decode_token(token)\n\n if datetime.now() > expiration_date:\n return {\n \"error_id\": \"alumni_register_link_expired_error\",\n \"message\": \"Unauthorized: Registration link is expired.\"\n }, 401\n\n # check if such odoo user exists\n filter_list = []\n filter_list.append(['id', '=', odoo_contact_id])\n from app.controllers.odoo_controller import OdooController\n try:\n contacts_number = OdooController.count_number_of_odoo_contacts_by_filter_list(filter_list)\n except OdooIsDeadError as err:\n abort(503, err, error_id='odoo_connection_error')\n\n if contacts_number == 0:\n return {\n \"error_id\": \"odoo_contact_not_found_error\",\n \"message\": \"Odoo contact not found.\"\n }, 404\n\n # create alumni user\n from app.controllers.alumni_controller import AlumniController\n post_data.update({'odoo_contact_id': odoo_contact_id})\n response = AlumniController.create_alumni_user(post_data)\n\n \n if response[1] == 201:\n # delete record in alumni invite status\n from app.controllers.alumni_invite_status_controller import AlumniInviteStatusController\n AlumniInviteStatusController.delete_invite_status_record(odoo_contact_id)\n\n # send email for confirmation\n receiver_email = response[0]['email']\n alumni_uuid = response[0]['alumni_uuid']\n send_confirmation_email(receiver_email, alumni_uuid)\n\n return response", "def add_customer(customer_id,\n name,\n lastname,\n home_address,\n phone_number,\n email,\n status,\n credit_limit):\n # pylint: disable = W0703\n try:\n with DB.transaction():\n new_customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email=email,\n status=status.lower(),\n credit_limit=credit_limit)\n new_customer.save()\n logging.info('Customer(s) successfully added')\n\n except Exception as error:\n LOGGER.info(f'Error creating = {name}')\n LOGGER.info(error)", "def test_add_contacts(self):\n response = self.contacts.add(\"alex\", \"0708913841\")\n self.assertEqual(response, \"Successfully added contacts\" )", "def test_create_customer(self):\n url = reverse('customers-list')\n data = {\n 'first_name': self.customer_first_name,\n 'last_name': self.customer_last_name,\n 'email': self.customer_email\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Customer.objects.count(), 1)\n self.assertEqual(Customer.objects.get().first_name, 'John')", "def test_find_contact(self):\n self.new_contact.save_contact()\n test_contact = Contact(\"Test\", \"User\", 254711223344, \"[email protected]\")\n test_contact.save_contact()\n found_contact = Contact.find_by_phone(254711223344)\n\n self.assertEqual(found_contact.email, test_contact.email)", "def update_contact(self, uuid, name, urns, fields, groups):\n payload = self._build_params(uuid=uuid, name=name, urns=urns, fields=fields, group_uuids=groups)\n return Contact.deserialize(self._post('contacts', None, payload))", "def customer_update(request, slug, id):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n customer_reference = get_object_or_404(Customer, id=id,company=company)\n customer_form = CustomerForm(instance=customer_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('customer_form.html',{'form':customer_form, 'info': customer_reference},context_instance=RequestContext(request))\n else:\n customer_form = CustomerForm(request.POST, instance=customer_reference)\n #if is POST Validates the form is well filled and save it redirecting to the company page \n if customer_form.is_valid():\n customer_form.save()\n\n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('customer_form.html', \n {'form': customer_form, 'form_errors': customer_form.errors, 'info': customer_reference},\n context_instance=RequestContext(request))", "def test_update_customer(self):\n set_up_db()\n add_customer(*self.test_customer)\n update_customer_credit(1, 500000.00)\n self.assertEqual(500000.00, Customer.get_by_id(1).credit_limit)", "def customer_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n customer_reference = get_object_or_404(Customer, id=id,company=company)\n\n return render_to_response('customer_form.html', \n {'details': customer_reference,'info':customer_reference},\n context_instance=RequestContext(request))" ]
[ "0.7233565", "0.68543464", "0.66533446", "0.6599077", "0.6465166", "0.64515483", "0.64510775", "0.6444469", "0.6425489", "0.6370573", "0.63516873", "0.6306901", "0.62915707", "0.62861454", "0.62610596", "0.6254461", "0.6238027", "0.62310433", "0.62062687", "0.61987454", "0.61920035", "0.6190888", "0.6183589", "0.61717117", "0.6165592", "0.6165463", "0.614933", "0.61366814", "0.61314833", "0.6129482", "0.61277723", "0.6113039", "0.6073676", "0.6070029", "0.6065068", "0.60567707", "0.60492295", "0.60398936", "0.6035747", "0.60261494", "0.6008276", "0.59908676", "0.5977017", "0.59760875", "0.5962137", "0.59617364", "0.5960984", "0.5960895", "0.5951143", "0.5951143", "0.59507024", "0.5950539", "0.59254616", "0.59171766", "0.59119654", "0.5897066", "0.58881146", "0.588764", "0.58848673", "0.58816886", "0.5879751", "0.5879613", "0.58755493", "0.5875331", "0.58735526", "0.5867091", "0.58660537", "0.583622", "0.5834976", "0.5803809", "0.5772748", "0.5772571", "0.5770246", "0.5769868", "0.5760847", "0.57552534", "0.57550883", "0.5750972", "0.57486844", "0.5743718", "0.57196057", "0.5713698", "0.5702911", "0.57027334", "0.5701373", "0.56948364", "0.56888014", "0.5686512", "0.5679837", "0.5678459", "0.56537795", "0.5653438", "0.5646422", "0.5621427", "0.5618304", "0.5614649", "0.56093556", "0.5603143", "0.5601653", "0.55975044" ]
0.81380326
0
create or update the address
def create_or_update_address(address, customer): name = frappe.db.get_value('Address', { 'entity_id': address.get('entity_id') }) if not name: addr = frappe.new_doc('Address') addr.address_title = "{} {} {}".format( address.get("firstname"), address.get("lastname"), address.get("entity_id") ) else: addr = frappe.get_doc("Address", name) addr.address_type = get_address_type(address).get('type') addr.entity_id = address.get('entity_id') addr.address_line1 = address.get('street')[0] addr.address_line2 = address.get('street')[1] if len(address.get('street')) > 1 else "" addr.city = address.get('city') addr.country = frappe.db.get_value('Country', { 'code': address.get('country_id') }) addr.state = address.get('region') addr.pincode = address.get('postcode') addr.phone = address.get('telephone') or '00000' addr.fax = address.get('fax') addr.customer = customer addr.customer_name = address.get('firstname')+' '+address.get('lastname') addr.is_primary_address = get_address_type(address).get('is_primary_address') addr.is_shipping_address = get_address_type(address).get('is_shipping_address') addr.save(ignore_permissions=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_address(self, new_address, ):\n self.address.append(new_address)\n self.save()", "def update_address(cls, address_data):\n address_instance = cls.objects.get(email=address_data['customer']['email'])\n address_data = address_data.get('addresses')\n for field_name, values in address_data:\n setattr(address_instance, field_name, values)\n address_instance.save()\n return address_instance.save()", "def test_update_shipping_address(self):\n self.cim.update_shipping_address(\n customer_profile_id=u\"222\",\n customer_address_id=u\"444\",\n first_name=u\"pippo\",\n phone=u\"415-415-4154\"\n )", "def create_address(self, address: str) -> Optional[Address]:\n raise NotImplemented", "def add_address(self, address_list=None):\n sql = u' INSERT INTO address_TBL ' \\\n u'(line_1, line_2, city, county, country, billing_address, main_address, client_company_ID) ' \\\n u'VALUES (%s, %s, %s, %s, %s, %s, %s, %s);'\n if address_list is None:\n address_list = self.data_set['address']\n\n c, conn = connection(self.schema)\n\n try:\n for address in address_list:\n if address['line_2'] is None:\n address['line_2'] = 'NULL'\n if address['billing'] is None:\n address['billing'] = 0\n if address['default'] is None:\n address['default'] = 0\n\n data = (address['line_1'],\n address['line_2'],\n address['city'],\n address['county'],\n address['country'],\n address['billing'],\n address['default'],\n self.id)\n\n c.execute(sql, data)\n finally:\n conn_close(c, conn)", "def put(self, request, *args, **kwargs):\n\n payload = request.data\n\n instance = self.get_object()\n\n # validate request data body length is zero raise error message\n if len(payload) == 0:\n return APIResponse({'message': NOT_FOUND_JSON_DATA}, HTTP_400_BAD_REQUEST)\n\n # create store address serializers object\n serializer = self.serializer_class(data=payload, partial=True, context={'request': request})\n\n # check admin address serializers is valid\n if not serializer.is_valid():\n return APIResponse(serializer.errors, HTTP_400_BAD_REQUEST)\n\n validated_data = serializer.validated_data\n\n # get last transaction save point id\n sid = transaction.savepoint()\n\n try:\n # update address\n instance = serializer.update(instance, validated_data)\n except Exception as err:\n logger.error(\"Unexpected error occurred : %s.\", err.args[0])\n # roll back transaction if any exception occur while update address\n transaction.savepoint_rollback(sid)\n return APIResponse({\"message\": err.args[0]}, HTTP_400_BAD_REQUEST)\n\n # convert model object into json\n data = AddressViewSerializer(instance).data\n data['message'] = UPDATE_ADDRESS\n\n return APIResponse(data, HTTP_OK)", "def store_address(intent, session):\n sess_data = session.setdefault('attributes', {})\n if not sess_data.get('add_address') and \\\n not sess_data['next_step'] == 'store_address':\n raise RuntimeError('Something went wrong.')\n\n data = {sess_data['which']: dict(latitude=str(sess_data['latitude']),\n longitude=str(sess_data['longitude']),\n address=str(sess_data['full_address']))}\n success = database.update_user_data(session['user']['userId'], **data)\n if not success:\n return reply.build(\"I'm sorry, something went wrong and I could't \"\n \"store the address.\", is_end=True)\n else:\n return reply.build(\"Okay, I've saved your %s \"\n \"address.\" % sess_data['which'],\n is_end=True)", "def process_address():\n #get address info from form\n user_details = request.form\n #validate address with google geocoding\n update_details = apiapijoyjoy.validate_address(user_details)\n #update ino in db\n dbwrangler.newaddress(update_details)\n \n return redirect(\"/\")", "def post(self, request, format=None):\n success = False\n try:\n line1=request.data[\"line1\"]\n district=request.data[\"district\"]\n state=request.data[\"state\"]\n pincode=request.data[\"pincode\"]\n branch=request.data[\"branch\"]\n address_obj = Address(line1=line1,district=district,\n state=state,pincode=pincode,branch=Branch.objects.get(pk=branch))\n address_obj.save()\n address_string = district+\", \"+state+\", \"+pincode\n if address_obj.id:\n location_coordinates = GeolocationApi.get_lat_lng(address_string)\n geolocation_obj = Geolocation(address=address_obj,\n lat=location_coordinates[\"latitude\"],\n lng=location_coordinates[\"latitude\"])\n geolocation_obj.save()\n success=True\n except Exception as e:\n success=False\n print(e)\n return Response(success)", "def add_new_address(self, address: dict) -> None:\n self.new_address_button.click()\n\n self.address_form.select_location(address['address'])\n self.address_form.label_input.fill(address['name'])\n\n self.address_form.save_button.click()", "def update(self, addr, replace=False):\n if self.ignorer.ignore_address(addr[1]):\n return False\n try:\n with self.connect() as c:\n cur = c.cursor()\n if replace:\n present = cur.execute(\"SELECT 1 FROM AddressBook WHERE address = ?\", [addr[1]])\n if present:\n cur.execute(\"UPDATE AddressBook SET name = ? WHERE address = ?\", addr)\n else:\n cur.execute(\"INSERT INTO AddressBookView VALUES(?,?)\", addr)\n else:\n cur.execute(\"INSERT INTO AddressBookView VALUES(?,?)\", addr)\n return True\n except sqlite3.IntegrityError:\n return False", "def test_create_shipping_address(self):\n self.cim.create_shipping_address(\n customer_profile_id=100,\n ship_phone=u'415-415-4154',\n ship_first_name=u'valentino'\n )", "def address(self, new_address):\n house_num, street_name, apt_num = new_address\n self._address.house_num = house_num\n self._address.street_name = street_name\n self._address.apt_num = apt_num", "def post(self, request, *args, **kwargs):\n request_data = request.data\n\n # create address serializers object\n serializer = self.serializer_class(data=request_data, context={'request': request})\n\n # check address serializers is valid\n if not serializer.is_valid():\n return APIResponse(serializer.errors, HTTP_400_BAD_REQUEST)\n\n validated_data = serializer.validated_data\n\n try:\n # verified address\n verifiedAddressRep = LobAddressVerification.verify_address(validated_data)\n except Exception as err:\n return APIResponse({\"message\": err.args[0]}, HTTP_400_BAD_REQUEST)\n\n print(\"======verifiedAddressRep=========\", verifiedAddressRep)\n # get last transaction save point id\n sid = transaction.savepoint()\n\n # try:\n # # save admin address\n # instance = serializer.create(validated_data)\n # except Exception as err:\n # logger.error(\"Unexpected error occurred : %s.\", err)\n # # roll back transaction if any exception occur while add new address\n # transaction.savepoint_rollback(sid)\n # return APIResponse({\"message\": err.args[0]}, HTTP_400_BAD_REQUEST)\n #\n # # convert model object into json\n # data = AddressViewSerializer(instance).data\n # data['message'] = ADD_ADDRESS\n\n return APIResponse(\"data\", HTTP_201_CREATED)", "def get_or_create(self):\n if self.is_valid():\n address, created = Address.objects.get_or_create(\n street=self.cleaned_data['street'],\n city=self.cleaned_data['city'],\n state=self.cleaned_data['state'],\n country=self.cleaned_data['country'],\n postal_code=self.cleaned_data['postal_code'],\n )\n return address\n return None", "def test_client_address_update(self):\n pass", "def save(self, *args, **kwargs):\n # Save a hash of the address fields so we can check whether two\n # addresses are the same to avoid saving duplicates\n # self.hash = self.generate_hash()\n\n # Ensure that each user only has one default shipping address\n # and billing address\n self._ensure_defaults_integrity()\n super(UserAddress, self).save(*args, **kwargs)", "def set_address(self, address):\n pass", "def address_to_update(id,con,cur):\n\n psql=f\"\"\"select line1,line2,city,state,postal_code,country from address where extern_client_id='{id}'\"\"\"\n cur.execute(psql)\n record=cur.fetchall()\n \n address={'line1':record[0][0],\n 'line2':record[0][1],\n 'city':record[0][2],\n 'state':record[0][3],\n 'postal_code':record[0][4],\n 'country':record[0][5]}\n return address", "def test_create_new_address_with_permission(self):\n data = dict(\n address_line1='random address 2',\n postal_code='RAN DOM',\n city='random city',\n state_province=self.random_state_province.iso_code,\n country=self.random_country.iso_code,\n )\n\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.post(\n reverse('location:addresses'),\n data,\n format='json',\n )\n\n data['address_line2'] = ''\n data['state_province'] = dict(\n name=self.random_state_province.name,\n iso_code=self.random_state_province.iso_code\n )\n data['country'] = dict(\n name=self.random_country.name,\n iso_code=self.random_country.iso_code\n )\n\n res = json.loads(response.content)\n del res['id']\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(res, data)", "def test_client_address_create(self):\n pass", "def put(self, *args, **kwargs):\n\n addr = EtherAddress(args[0])\n\n if 'desc' in kwargs:\n self.service.update(addr, kwargs['desc'])\n else:\n self.service.update(addr)", "def update(self, update):\n\n params = shlex.split(update)\n if params[0] in self.addr:\n self.addr[params[0]].update(*params)\n\n else:\n a = Addr(self)\n # add both name and IP address\n self.addr[params[0]] = a\n self.addr[params[1]] = a\n a.update(*params)\n self.notify(\"addrmap_added\", *[a], **{})", "def set_address(self, address):\n if address == \"\":\n self.address = Address(\"\", \"\", \"\")\n else:\n self.address = address", "def insert_address_info(address,client_id,extern_client_id,con,cur):\n psql_address=f\"\"\" insert into address \n (extern_id,line1,line2,city,postal_code,state,country,client_id,extern_client_id)\n values \n {\n address.id,\n address.line1,\n address.line2,\n address.city,\n address.postal_code,\n address.state,\n address.country,\n client_id,\n extern_client_id,};\"\"\"\n psql=psql_address\n cur.execute(psql)\n con.commit()", "def add_address(intent, session):\n slots = intent.get('slots')\n sess_data = session.setdefault('attributes', {})\n sess_data['add_address'] = True\n sess_data.setdefault('next_step', 'which')\n if sess_data['next_step'] == 'which':\n if slots['which_address'].get('value') in ORIGIN_NAMES:\n sess_data['which'] = 'origin'\n sess_data['next_step'] = 'num_and_name'\n return reply.build(\"Okay, storing your origin address. \"\n \"What's the street number and name?\",\n reprompt=\"What's the street number and name?\",\n persist=sess_data,\n is_end=False)\n elif slots['which_address'].get('value') in DEST_NAMES:\n sess_data['which'] = 'destination'\n sess_data['next_step'] = 'num_and_name'\n return reply.build(\"Okay, storing your destination address. \"\n \"What's the street number and name?\",\n reprompt=\"What's the street number and name?\",\n persist=sess_data,\n is_end=False)\n else:\n sess_data['next_step'] = 'which'\n return reply.build(\"Would you like to set the address here or at \"\n \"your destination?\",\n reprompt='You can say \"here\" or \"destination\".',\n persist=sess_data,\n is_end=False)\n elif sess_data['next_step'] == 'num_and_name':\n if slots['address_street'].get('value'):\n num = slots.get('address_number', {}).get('value', '')\n direction = slots.get('direction', {}).get('value', '')\n st = slots.get('address_street', {}).get('value', '')\n sess_data['spoken_address'] = (('%s %s %s' %\n (num, direction, st))\n .replace(' ', ' ')\n .strip())\n sess_data['next_step'] = 'zip'\n return reply.build(\"Got it. Now what's the zip code? \"\n \"You can tell me \"\n \"to skip it if you don't know.\",\n reprompt=\"What's the zip code?\",\n persist=sess_data,\n is_end=False)\n else:\n return reply.build(\"Please say a street number and street name.\",\n reprompt=\"What's the street number and name?\",\n persist=sess_data,\n is_end=False)\n elif sess_data['next_step'] == 'zip':\n if not slots['address_number'].get('value'):\n return reply.build(\"I need the zip code now.\",\n reprompt=\"What's the zip code?\",\n persist=sess_data,\n is_end=False)\n sess_data['next_step'] = 'check_address'\n sess_data['zip_code'] = slots['address_number']['value']\n return add_address(intent, session)\n elif sess_data['next_step'] == 'check_address':\n if sess_data['zip_code']:\n # Assume that network subscribers are always interested\n # in in-state addresses, but not necessarily in the city.\n addr = '%s, %s, %s' % (sess_data['spoken_address'],\n config.default_state,\n sess_data['zip_code'])\n else:\n # Without a zip code, assume the network's home city\n # to add necessary specificity.\n addr = '%s, %s, %s' % (sess_data['spoken_address'],\n config.default_city,\n config.default_state)\n lat, lon, full_address = geocoding.get_lat_lon(addr)\n if full_address.endswith(\", USA\"):\n # We don't need to keep the country name.\n full_address = full_address[:-5]\n\n if full_address.lower().startswith(\"%s, %s\" %\n (config.default_city.lower(),\n config.default_state.lower())):\n # If the geocoding fails to find a specific address,\n # it will return a generic city location.\n sess_data['next_step'] = 'num_and_name'\n return reply.build(\"I'm sorry, I heard the address \\\"%s\\\", \"\n \"but I can't figure out where that is. \"\n \"Try a different address, something I can \"\n \"look up on the map.\" % addr,\n reprompt=\"What's the street number and name?\",\n persist=sess_data,\n is_end=False)\n\n sess_data['latitude'], sess_data['longitude'] = lat, lon\n sess_data['full_address'] = full_address\n sess_data['next_step'] = 'store_address'\n return reply.build(\"Thanks! Do you want to set \"\n \"your %s address to %s?\" %\n (sess_data['which'],\n location.text_to_speech(full_address)),\n reprompt=\"Is that the correct address?\",\n persist=sess_data,\n is_end=False)\n elif sess_data['next_step'] == 'store_address':\n # The user should have said \"yes\" or \"no\" after\n # being asked if we should store the address.\n # Only get here if they didn't.\n full_address = sess_data['full_address']\n return reply.build(\"Sorry, I didn't understand that. \"\n \"Do you want to set \"\n \"your %s address to %s?\" %\n (sess_data['which'],\n location.text_to_speech(full_address)),\n reprompt=\"Is that the correct address?\",\n persist=sess_data,\n is_end=False)\n else:\n return reply.build(\"I'm sorry, I got confused. What do you mean?\",\n persist=sess_data,\n is_end=False)", "def generateNewAddress(self, currency):\n pass", "def test_set_address(self):\n s1 = System()\n s1.set_address(\"101 St James Rd\")\n self.assertEqual(s1.get_address(), \"101 St James Rd\")", "def save_object(self, data):\n return Address(**data)", "def test_address_other_parameters():\n address = lob.Address.create(name='Siddharth Saha', address_line1='104, Printing Boulevard',\n address_line2='Sunset Town', email='[email protected]',\n address_city='Boston', address_state='MA', address_country='US',\n address_zip='12345')\n print address.to_dict()", "def add(self, address):\r\n return http.Request('PUT', '{0}/{1}'.format(\r\n self.get_url(), address)), parsers.parse_json", "def createUserAddress(self,username):\n addr = self.api.getnewaddress()\n dct = {'USER_NAME':username, 'ADDRESS':addr}\n print dct\n address = self.getAdminAddress()\n txd = self.api.publishfrom(address, 'users_addresses',username,self.bin2hex(json.dumps(dct)))\n print txd\n return addr", "def test_address_other_parameters():\n address = lob.Address.create(name = 'Siddharth Saha', address_line1 = '104, Printing Boulevard',\n address_line2 = 'Sunset Town', email = '[email protected]', \n address_city = 'Boston', address_state = 'MA', address_country = 'US',\n address_zip = '12345')\n print address.to_dict()", "def test_new_empty_shipping_address(self):\r\n self.original = self.env[\"res.partner\"].create({\r\n \"is_company\": False,\r\n \"type\": 'delivery',\r\n \"lastname\": \"\",\r\n \"firstname\": \"\"})", "def update_one_address(update_dict,id,id_address,con,cur):\n psql=\"update address set \"\n psql_update=\"\"\n for (key,value) in update_dict.items():\n psql_update=f\"{key}='{value}',\"+psql_update\n\n condition=f\" where extern_id='{id_address}' and extern_client_id='{id}';\"\n psql=psql+psql_update[:-1]+condition\n cur.execute(psql)\n con.commit()", "def update_one_address(update_dict,id,id_address,con,cur):\n psql=\"update address set \"\n psql_update=\"\"\n for (key,value) in update_dict.items():\n psql_update=f\"{key}='{value}',\"+psql_update\n\n condition=f\" where extern_id='{id_address}' and extern_client_id='{id}';\"\n psql=psql+psql_update[:-1]+condition\n cur.execute(psql)\n con.commit()", "def save_address(data):\n print(\"Saving address.\")\n firebase_uid = data['session'].split('/')[-1]\n db = firebase.database()\n contexts = data['queryResult']['outputContexts']\n for i in contexts:\n if 'address_data' in i['name']:\n context = i\n break\n\n pincode = str(int(context[\"parameters\"][\"pincode\"]))\n address = context[\"parameters\"][\"address\"]\n temp = {\n \"Main\": address,\n \"Pincode\": pincode\n }\n db.child(\"user_data\").child(firebase_uid).child(\"Address\").set(temp)\n print(\"Address saved. Checking if mobile number is present.\")\n response = check_mobile(data)\n print(\"Response from Check Mobile function = \")\n pprint.pprint(response)\n return response", "def test82_GenNewAddress(self):\n payload = {\n 'id': 0,\n 'params': {'amount': 100.0, 'qr_code': False, 'gen_new': False},\n 'jsonrpc': '2.0',\n 'method': 'create_order'\n }\n res = requests.post( url, data=json.dumps(payload), headers=headers).json()['result']\n self.assertEqual(res['receiving_address'], 'mjPS9N4T6cjcWLvdkv4jtCrzNA6C6qm8uv')\n self.assertEqual(res['amount'], '0.2860001')\n self.assertTrue(res['exact_amount'])\n order_id = res['order_id']\n payload = {\n 'id': 0, 'params': {'bindings':{'receiving_address': 'mjPS9N4T6cjcWLvdkv4jtCrzNA6C6qm8uv'}},\n 'jsonrpc': '2.0',\n 'method': 'get_address'\n }\n res = requests.post( url, data=json.dumps(payload), headers=headers).json()['result'][0]\n self.assertEqual(res['keypath'], '0/0/4')\n self.assertEqual(res['max_tx'], config.MAX_LEAF_TX)\n self.assertTrue(res['special_digits'] > 0)", "def _update_nro_address(oracle_cursor, nr, event_id, change_flags):\n if not nr.applicants:\n return\n applicant_info = nr.applicants[0]\n\n if change_flags['is_changed__applicant'] or change_flags['is_changed__address']:\n\n # find request_party ID\n oracle_cursor.execute(\"\"\"\n SELECT party_id, address_id\n FROM request_party\n WHERE request_id = :request_id\n AND end_event_id IS NULL\n AND party_type_cd='APP'\n FOR UPDATE\n \"\"\",\n request_id=nr.requestId\n )\n row = oracle_cursor.fetchone()\n rp_id = int(row[0])\n address_id = int(row[1])\n\n # set end event for old request_party instance\n oracle_cursor.execute(\"\"\"\n UPDATE request_party\n SET end_event_id = :event_id\n WHERE party_id = :party_id\n \"\"\",\n event_id=event_id,\n party_id=rp_id)\n\n if change_flags['is_changed__address']:\n # get next address ID\n oracle_cursor.execute(\"\"\"select address_seq.NEXTVAL@global_address from dual\"\"\")\n row = oracle_cursor.fetchone()\n address_id = int(row[0])\n\n # create new address record\n oracle_cursor.execute(\"\"\"\n INSERT INTO address@global_address(addr_id, application_cd, state_province_cd, postal_cd, addr_line_1, addr_line_2, addr_line_3, city, country_type_cd)\n VALUES (:address_id, :application_cd, :state_province_cd, :postal_cd, :addr_line_1, :addr_line_2, :addr_line_3, :city, :country_type_cd)\n \"\"\",\n address_id=address_id,\n application_cd='AB',\n state_province_cd=applicant_info.stateProvinceCd,\n postal_cd=applicant_info.postalCd,\n addr_line_1=applicant_info.addrLine1,\n addr_line_2=applicant_info.addrLine2,\n addr_line_3=applicant_info.addrLine3,\n city=applicant_info.city,\n country_type_cd=applicant_info.countryTypeCd\n )\n\n # create new record for request party instance\n oracle_cursor.execute(\"\"\"\n INSERT INTO request_party (party_id, request_id, party_type_cd, last_name, first_name, middle_name, \n phone_number, fax_number, email_address, address_id, start_event_id, contact, \n client_first_name, client_last_name, decline_notification_ind) \n VALUES (request_party_seq.nextval, :request_id, 'APP', :last_name, :first_name, :middle_name, \n :phone_number, :fax_number, :email_address, :address_id, :event_id, :contact, \n :client_first_name, :client_last_name, :decline_notification_ind)\n \"\"\",\n request_id=nr.requestId,\n last_name=applicant_info.lastName,\n first_name=applicant_info.firstName,\n middle_name=applicant_info.middleName,\n phone_number=applicant_info.phoneNumber,\n fax_number=applicant_info.faxNumber,\n email_address=applicant_info.emailAddress,\n address_id=address_id,\n event_id=event_id,\n contact=applicant_info.contact,\n client_first_name=applicant_info.clientFirstName,\n client_last_name=applicant_info.clientLastName,\n decline_notification_ind=applicant_info.declineNotificationInd\n )", "async def test_update_organization_address(client):\n address = {}\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='PATCH',\n path='/v1/addresses/{address_id}'.format(address_id=56),\n headers=headers,\n json=address,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_update__endtoend__4(\n address_book, FieldFactory, UpdateablePersonFactory,\n PostalAddressFactory, browser):\n field_name = FieldFactory(\n address_book, IPostalAddress, 'Choice', u'distance',\n values=[u'< 50 km', u'>= 50 km']).__name__\n PostalAddressFactory(UpdateablePersonFactory(address_book),\n **{field_name: '>= 50 km', 'set_as_default': True})\n\n browser.login('mgr')\n browser.keyword_search(KEYWORD, apply='Update')\n browser.getControl('field').displayValue = ['postal address -- distance']\n browser.getControl('Next').click()\n assert ['No value', '< 50 km', '>= 50 km'] == browser.getControl(\n 'new value').displayOptions\n browser.getControl('new value').displayValue = ['< 50 km']\n browser.getControl('operation').displayValue = [\n 'replace existing value with new one']\n browser.getControl('Next').click()\n # Update sets the value to '< 50 km':\n assert ('<td>Tester</td><td><50km</td>' in\n browser.contents_without_whitespace)", "def build_address(record):\n pass", "def edit_address(self, new_label: str) -> None:\n self.address_form.label_input.fill(new_label)\n self.address_form.save_button.click()", "def test_client_address_partial_update(self):\n pass", "def address(self, address: object):\n\n self._address = address", "def add_address(self, **kwargs):\n addressitem = AddressItem(**kwargs)\n self.addresses.append(addressitem)\n # TODO check uniqueness of email addresses", "def test81_GenNewAddress(self):\n payload = {\n 'id': 0,\n 'params': {'amount': 100.0, 'qr_code': False, 'gen_new': True, 'order_id': 'DUMMY_ORD_3'},\n 'jsonrpc': '2.0',\n 'method': 'create_order'\n }\n res = requests.post( url, data=json.dumps(payload), headers=headers).json()['result']\n self.assertEqual(res['amount'], '0.286')\n self.assertEqual(res['exact_amount'], False)\n self.assertEqual(res['receiving_address'], 'miXzTXvkEsfVmkwMjLCHfXAjboodrgQQ9Z')", "def test_updateContact(self):\n response = self.client.get(self.url)\n qs = response.json()\n contact = qs[0]\n to_update_value = 'address 2'\n contact['address'] = to_update_value\n response = self.client.put(self.url + str(contact['id']) + '/', contact, content_type=\"application/json\")\n self.assertEqual(response.status_code, 200)\n contact2 = response.json()\n self.assertEqual(contact2['address'], to_update_value)", "def edit_address(self) -> object:\n self.edit_button.click()\n\n if 'admin' not in self.driver.current_url:\n return WebAddressForm(self).wait_for_component_to_be_present()\n return AdminAddressForm(self).wait_for_component_to_be_present()", "def upsert_location(self, location):", "def address(self, address: str):\n\n self._address = address", "async def create_deposit_address(self, code: str, params={}):\n await self.load_markets()\n request = {\n 'op_renew': 1,\n }\n return await self.fetch_deposit_address(code, self.extend(request, params))", "def test_create_address(self): \n url = reverse('v1:addresses-list', args=[1])\n profile = ProfileFactory.create()\n \n address_data = {'profile' : str(profile.id),\n 'street_address' : 'test street',\n 'city' : 'test city',\n 'state' : 'test state', \n 'zip_code' : 'test zip',\n 'country' : 'test country',\n }\n\n # Check Anonymous User should return 403\n response = self.client.post(url, address_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n # Profile Owner User\n self.client.credentials(Authorization='Bearer ' + 'regularusertoken')\n response = self.client.post(url, address_data, format='json')\n response_data = response.json()\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n \n self.assertIsNotNone(response_data[\"profile\"])\n self.assertEqual(ProfileAddress.objects.get(profile=response_data['profile']).street_address, 'test street')\n self.assertEqual(ProfileAddress.objects.get(profile=response_data['profile']).city, address_data['city'])\n self.assertEqual(len(ProfileAddress.objects.all()), 1)\n self.assertEqual(Profile.objects.get(pk=profile.id).id, uuid.UUID(response_data['profile']))", "def address1(self, address1):\n\n self._address1 = address1", "def set_addresses(cls, records, name, value=None):\n Party = Pool().get('party.party')\n\n for record in records:\n Party.write([record.party], {'addresses': value})", "def create_order(self, request):\n data = request.data\n address_id = request.query_params[\"address_id\"]\n # data._mutable = True\n data[\"user\"] = request.user.id\n data[\"address\"] = address_id\n serializer = OrderCreateSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response({\"result\": serializer.data, \"message\": \"Done\", \"status\": True},\n status=status.HTTP_201_CREATED)\n return Response({\"result\": serializer.errors, \"message\": \"Done\", \"status\": False},\n status=status.HTTP_400_BAD_REQUEST)", "def add_address(self, address_item):\r\n self.addresses_to_validate.append(address_item)", "def insert(self, name, address, city, state, zipcode, hour, phone, rating, image):\r\n pass", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def addStake(self, address: str, value_USDC: int, value_eXRD: int, value_LP: int):\n addr = address.lower()\n balances = self.addresses[addr] if addr in self.addresses else []\n balances.append({\n 'USDC': value_USDC,\n 'eXRD': value_eXRD,\n 'LP': value_LP\n })\n self.addresses[addr] = balances", "def set_GOST_address(address=None):\n if not address:\n address = get_address_from_file()\n else:\n f = open(address_file_path, 'w')\n f.writelines([])\n f.close()\n f = os.open(address_file_path, os.O_RDWR)\n os.write(f, bytes(address, 'utf-8'))\n os.fsync(f)\n os.close(f)\n if test_connection(address, verbose=False):\n return address\n else:\n return False", "def test_create_new_address_incorrect_state_province(self):\n data = dict(\n address_line1='random address 2',\n postal_code='RAN DOM',\n city='random city',\n state_province=self.random_state_province.iso_code,\n country=self.random_country2.iso_code,\n )\n\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.post(\n reverse('location:addresses'),\n data,\n format='json',\n )\n\n res = json.loads(response.content)\n\n err = {\n 'detail': 'The StateProvince should be linked to the Country'\n }\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(res, err)", "def createAddressSet(self) -> ghidra.program.model.address.AddressSet:\n ...", "async def update_email_address(self, ctx, email_address: str):\n author = ctx.message.author\n\n if not EmailAddressCRUD.validate_email_address(email_address):\n await ctx.send(\"Enter a valid Email Address..!\")\n return\n\n if not self.email_list:\n with open(\"data/email/emails.json\", \"r\", encoding='utf-8') as file:\n self.email_list = json.load(file)\n\n if str(author.id) in self.email_list:\n self.email_list[str(author.id)] = email_address\n with open(\"data/email/emails.json\", \"w\", encoding='utf-8') as file:\n json.dump(self.email_list, file)\n await ctx.send(\"Email address has been updated successfully..!\")\n else:\n await ctx.send(\"There is no email address configured, \"\n \"Please use add command to add one..!\")\n return", "def change_name(self, address, name):\n with self.connect() as c:\n cur = c.cursor()\n cur.execute(\"UPDATE AddressBook SET name = '{}' WHERE address = '{}'\".format(name, address))\n return True", "def insert_address_info_company(address,company_id,extern_company_id,con,cur):\n psql_address=f\"\"\" insert into address \n (extern_id,line1,line2,city,postal_code,state,country,company_id,extern_client_id)\n values \n {\n address.id,\n address.line1,\n address.line2,\n address.city,\n address.postal_code,\n address.state,\n address.country,\n company_id,\n extern_company_id,};\"\"\"\n psql=psql_address\n cur.execute(psql)\n print(psql)\n con.commit()", "def __editAddress(self):\n idx = self.bookmarksTree.currentIndex()\n idx = idx.sibling(idx.row(), 1)\n self.bookmarksTree.edit(idx)", "def address_1(self, address_1):\n\n self._address_1 = address_1", "def check_and_add(email, name):\n\n key = ndb.Key(AddressEntry, email)\n model = key.get()\n # we only have a problem if a model for the given email exists AND the name is different\n if not model is None:\n if model.name != name:\n jdict = model.to_json_dict()\n jdict[\"requested_name\"] = name\n return False, jdict\n\n model = AddressEntry(\n id=email,\n email=email,\n name=name\n )\n model.put()\n return True, model.to_json_dict()", "def test_updateContact(self):\n qs = Contact.objects.all()\n contact = qs[0]\n contact2 = Contact.objects.get(id=contact.id)\n to_update_value = 'address 2'\n contact2.address = to_update_value\n contact2.save()\n # refresh from db\n contact3 = Contact.objects.get(id=contact.id)\n self.assertEqual(contact3.address, to_update_value)", "def ship_to_pickup_address(self, address):\n self._set('shipping', 'pickup_address_id', address.id)", "async def add_email_address(self, ctx, email_address: str):\n author = ctx.message.author\n\n if not EmailAddressCRUD.validate_email_address(email_address):\n await ctx.send(\"Enter a valid Email Address..!\")\n return\n\n if not self.email_list:\n with open(\"data/email/emails.json\", \"r\", encoding='utf-8') as file:\n self.email_list = json.load(file)\n\n if str(author.id) in self.email_list.keys():\n await ctx.send(\n \"There is already an email address configured, \"\n \"Please use update command to update it..!\")\n return\n else:\n self.email_list[str(author.id)] = email_address\n with open(\"data/email/emails.json\", \"w\", encoding='utf-8') as file:\n json.dump(self.email_list, file)\n await ctx.send(\"Email address has been configured successfully..!\")", "def set_address(self, address):\n self._java_ref.setAddress(address)", "def test_ipam_ip_addresses_update(self):\n pass", "def test_new_empty_invoice_address(self):\r\n self.original = self.env[\"res.partner\"].create({\r\n \"is_company\": False,\r\n \"type\": 'invoice',\r\n \"lastname\": \"\",\r\n \"firstname\": \"\"})", "def add_streets():\n add_from_file('street', STREETS_FNAME)", "def site_address_etl():\r\n with arcetl.ArcETL(\"Site Addresses\") as etl:\r\n etl.extract(dataset.SITE_ADDRESS.path(\"maint\"))\r\n # Clean maintenance values.\r\n transform.clear_nonpositive(etl, field_names=[\"house_nbr\"])\r\n transform.clean_whitespace(\r\n etl,\r\n field_names=[\r\n \"house_suffix_code\",\r\n \"pre_direction_code\",\r\n \"street_name\",\r\n \"street_type_code\",\r\n \"unit_type_code\",\r\n \"unit_id\",\r\n \"city_name\",\r\n \"landuse\",\r\n \"maptaxlot\",\r\n \"account\",\r\n ],\r\n )\r\n transform.force_uppercase(\r\n etl,\r\n field_names=[\r\n \"house_suffix_code\",\r\n \"pre_direction_code\",\r\n \"street_name\",\r\n \"street_type_code\",\r\n \"unit_type_code\",\r\n \"unit_id\",\r\n \"maptaxlot\",\r\n \"valid\",\r\n \"archived\",\r\n ],\r\n )\r\n transform.clear_non_numeric_text(etl, field_names=[\"account\"])\r\n etl.transform(\r\n arcetl.attributes.update_by_function,\r\n field_name=\"landuse\",\r\n function=(lambda x: x if is_numeric(x) else \"0\"),\r\n )\r\n transform.force_yn(etl, field_names=[\"archived\"], default=\"N\")\r\n transform.force_yn(etl, field_names=[\"valid\"], default=\"Y\")\r\n transform.add_missing_fields(etl, dataset.SITE_ADDRESS, tags=[\"pub\"])\r\n # Assign geometry attributes.\r\n coordinate_system_xy_keys = {\r\n 2914: {\"x\": \"x_coordinate\", \"y\": \"y_coordinate\"},\r\n 4326: {\"x\": \"longitude\", \"y\": \"latitude\"},\r\n }\r\n for spatial_reference_id, xy_key in coordinate_system_xy_keys.items():\r\n for axis, key in xy_key.items():\r\n etl.transform(\r\n arcetl.attributes.update_by_geometry,\r\n field_name=key,\r\n spatial_reference_item=spatial_reference_id,\r\n geometry_properties=[\"centroid\", axis],\r\n )\r\n # Assign overlays.\r\n overlay_kwargs = [\r\n # City attributes.\r\n {\r\n \"field_name\": \"geocity\",\r\n \"overlay_field_name\": \"inccityabbr\",\r\n \"overlay_dataset_path\": dataset.INCORPORATED_CITY_LIMITS.path(),\r\n },\r\n {\r\n \"field_name\": \"annexhist\",\r\n \"overlay_field_name\": \"annexnum\",\r\n \"overlay_dataset_path\": dataset.ANNEXATION_HISTORY.path(\"pub\"),\r\n },\r\n # Have to do overlay rather than join because some lack codes.\r\n {\r\n \"field_name\": \"yearanx\",\r\n \"overlay_field_name\": \"annexyear\",\r\n \"overlay_dataset_path\": dataset.ANNEXATION_HISTORY.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"ugb\",\r\n \"overlay_field_name\": \"ugbcity\",\r\n \"overlay_dataset_path\": dataset.UGB.path(\"pub\"),\r\n },\r\n # Planning & zoning attributes.\r\n {\r\n \"field_name\": \"greenwy\",\r\n \"overlay_field_name\": \"greenway\",\r\n \"overlay_dataset_path\": dataset.WILLAMETTE_RIVER_GREENWAY.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"nodaldev\",\r\n \"overlay_field_name\": \"nodearea\",\r\n \"overlay_dataset_path\": dataset.NODAL_DEVELOPMENT_AREA.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"plandes_id\",\r\n \"overlay_field_name\": \"plandes_id\",\r\n \"overlay_dataset_path\": dataset.PLAN_DESIGNATION.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"sprsvcbndy\",\r\n \"overlay_field_name\": \"is_inside\",\r\n \"overlay_dataset_path\": dataset.SPRINGFIELD_HANSEN_EXTENT.path(),\r\n },\r\n # Public safety attributes.\r\n {\r\n \"field_name\": \"ambulance_district\",\r\n \"overlay_field_name\": \"asacode\",\r\n \"overlay_dataset_path\": dataset.AMBULANCE_SERVICE_AREA.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"firedist\",\r\n \"overlay_field_name\": \"fireprotprov\",\r\n \"overlay_dataset_path\": dataset.FIRE_PROTECTION_AREA.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"police_beat\",\r\n \"overlay_field_name\": \"CAD\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.LCOG_GIS_PROJECTS,\r\n \"Public_Safety\\\\PSAPS\\\\CLPSAP\\\\SunGard_CAD\\\\Maintained_Layers\",\r\n \"Maintained_Layers.gdb\\\\Fire_Law_Tow\\\\law_beat\",\r\n ),\r\n },\r\n {\r\n \"field_name\": \"psap_code\",\r\n \"overlay_field_name\": \"psap_code\",\r\n \"overlay_dataset_path\": dataset.PSAP_AREA.path(\"pub\"),\r\n },\r\n # Election attributes.\r\n {\r\n \"field_name\": \"electionpr\",\r\n \"overlay_field_name\": \"precntnum\",\r\n \"overlay_dataset_path\": dataset.ELECTION_PRECINCT.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"ccward\",\r\n \"overlay_field_name\": \"ward\",\r\n \"overlay_dataset_path\": dataset.CITY_WARD.path(),\r\n },\r\n {\r\n \"field_name\": \"clpud_subdivision\",\r\n \"overlay_field_name\": \"SUBDIVISIO\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.LCOG_GIS_PROJECTS,\r\n \"UtilityDistricts\\\\CentralLincolnPUD\\\\Redistricting2012\",\r\n \"CLPUD_Subdivisions.shp\",\r\n ),\r\n },\r\n {\r\n \"field_name\": \"cocommdist\",\r\n \"overlay_field_name\": \"commrdist\",\r\n \"overlay_dataset_path\": (\r\n dataset.COUNTY_COMMISSIONER_DISTRICT.path(\"pub\")\r\n ),\r\n },\r\n {\r\n \"field_name\": \"epud\",\r\n \"overlay_field_name\": \"boardid\",\r\n \"overlay_dataset_path\": dataset.EPUD_SUBDISTRICT.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"hwpud_subdivision\",\r\n \"overlay_field_name\": \"BoardZone\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.LCOG_GIS_PROJECTS,\r\n \"UtilityDistricts\\\\HecetaWaterPUD\\\\NewBoardSubzones\",\r\n \"HecetaData.gdb\",\r\n \"ScenarioB\",\r\n ),\r\n },\r\n {\r\n \"field_name\": \"lcczone\",\r\n \"overlay_field_name\": \"lccbrdzone\",\r\n \"overlay_dataset_path\": dataset.LCC_BOARD_ZONE.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"senatedist\",\r\n \"overlay_field_name\": \"sendist\",\r\n \"overlay_dataset_path\": dataset.STATE_SENATOR_DISTRICT.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"strepdist\",\r\n \"overlay_field_name\": \"repdist\",\r\n \"overlay_dataset_path\": (\r\n dataset.STATE_REPRESENTATIVE_DISTRICT.path(\"pub\")\r\n ),\r\n },\r\n {\r\n \"field_name\": \"swcd\",\r\n \"overlay_field_name\": \"swcdist\",\r\n \"overlay_dataset_path\": (\r\n dataset.SOIL_WATER_CONSERVATION_DISTRICT.path(\"pub\")\r\n ),\r\n },\r\n {\r\n \"field_name\": \"swcdzone\",\r\n \"overlay_field_name\": \"swczone\",\r\n \"overlay_dataset_path\": (\r\n dataset.SOIL_WATER_CONSERVATION_DISTRICT.path(\"pub\")\r\n ),\r\n },\r\n # Education attributes.\r\n {\r\n \"field_name\": \"schooldist\",\r\n \"overlay_field_name\": \"district\",\r\n \"overlay_dataset_path\": dataset.SCHOOL_DISTRICT.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"elem\",\r\n \"overlay_field_name\": \"attend\",\r\n \"overlay_dataset_path\": dataset.ELEMENTARY_SCHOOL_AREA.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"middle\",\r\n \"overlay_field_name\": \"attend\",\r\n \"overlay_dataset_path\": dataset.MIDDLE_SCHOOL_AREA.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"high\",\r\n \"overlay_field_name\": \"attend\",\r\n \"overlay_dataset_path\": dataset.HIGH_SCHOOL_AREA.path(\"pub\"),\r\n },\r\n # Transportation attributes.\r\n {\r\n \"field_name\": \"ltddist\",\r\n \"overlay_field_name\": \"LTD\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"transport\\\\ltd\\\\2012 LTD Boundary.shp\"\r\n ),\r\n },\r\n {\r\n \"field_name\": \"ltdridesrc\",\r\n \"overlay_field_name\": \"LTD\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"transport\\\\ltd\\\\2015 RideSource Boundary.shp\"\r\n ),\r\n },\r\n {\r\n \"field_name\": \"cats\",\r\n \"overlay_field_name\": \"CATSBNDY\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"transport\\\\eug\\\\catsbndy.shp\"\r\n ),\r\n },\r\n {\r\n \"field_name\": \"trans_analysis_zone\",\r\n \"overlay_field_name\": \"TAZ_NUM\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"transport\\\\MTAZ16.shp\"\r\n ),\r\n },\r\n # Natural attributes.\r\n {\r\n \"field_name\": \"firmnumber\",\r\n \"overlay_field_name\": \"firm_pan\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"natural\\\\flood\\\\Flood.gdb\\\\FIRMPanel\"\r\n ),\r\n },\r\n {\r\n \"field_name\": \"soilkey\",\r\n \"overlay_field_name\": \"mukey\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"natural\\\\soils\\\\Soils.gdb\\\\Soil\"\r\n ),\r\n },\r\n {\r\n \"field_name\": \"wetland\",\r\n \"overlay_field_name\": \"WET_TYPE\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"natural\\\\eug\\\\Wetland\\\\wetlands.shp\"\r\n ),\r\n },\r\n # Census attributes.\r\n {\r\n \"field_name\": \"ctract\",\r\n \"overlay_field_name\": \"TRACT\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA,\r\n \"federal\\\\census\\\\lane\\\\2010\",\r\n \"lc_census2010.gdb\\\\lc_tracts2010\",\r\n ),\r\n },\r\n {\r\n \"field_name\": \"blockgr\",\r\n \"overlay_field_name\": \"BlockGroup\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA,\r\n \"federal\\\\census\\\\lane\\\\2010\",\r\n \"lc_census2010.gdb\\\\lc_blockgroups2010\",\r\n ),\r\n },\r\n # Other district attributes.\r\n {\r\n \"field_name\": \"neighbor\",\r\n \"overlay_field_name\": \"NEIBORHD\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA,\r\n \"boundary\\\\districts\\\\eug\",\r\n \"Boundary.gdb\\\\EugNeighborhoods\",\r\n ),\r\n },\r\n ]\r\n for kwargs in overlay_kwargs:\r\n etl.transform(\r\n arcetl.attributes.update_by_overlay,\r\n overlay_central_coincident=True,\r\n **kwargs\r\n )\r\n # Override overlays for special cases.\r\n for override in OVERRIDE_ATTRS:\r\n for kwargs in OVERRIDE_ATTRS[override].get(\"overlay_kwargs\", []):\r\n etl.transform(\r\n arcetl.attributes.update_by_value,\r\n dataset_where_sql=OVERRIDE_ATTRS[override].get(\"where_sql\"),\r\n **kwargs\r\n )\r\n # Clean overlay values.\r\n transform.clean_whitespace(\r\n etl, field_names=[\"police_beat\", \"wetland\", \"ctract\", \"blockgr\", \"neighbor\"]\r\n )\r\n transform.force_uppercase(etl, field_names=[\"cats\", \"ltddist\", \"ltdridesrc\"])\r\n # Set default overlay values where missing.\r\n transform.force_yn(\r\n etl,\r\n field_names=[\"greenwy\", \"sprsvcbndy\", \"cats\", \"ltddist\", \"ltdridesrc\"],\r\n default=\"N\",\r\n )\r\n # Remove invalid overlay values.\r\n transform.clear_nonpositive(etl, field_names=[\"ctract\", \"blockgr\"])\r\n etl.transform(\r\n arcetl.attributes.update_by_function,\r\n field_name=\"neighbor\",\r\n function=(lambda x: x if x and int(x) != 99 else None),\r\n )\r\n # Assign joinable field values after overlays.\r\n join_kwargs = [\r\n # Core attributes.\r\n {\r\n \"field_name\": \"pre_direction\",\r\n \"join_field_name\": \"description\",\r\n \"join_dataset_path\": dataset.STREET_DIRECTION.path(),\r\n \"on_field_pairs\": [(\"pre_direction_code\", \"code\")],\r\n },\r\n {\r\n \"field_name\": \"street_type\",\r\n \"join_field_name\": \"description\",\r\n \"join_dataset_path\": dataset.STREET_TYPE.path(),\r\n \"on_field_pairs\": [(\"street_type_code\", \"code\")],\r\n },\r\n {\r\n \"field_name\": \"unit_type\",\r\n \"join_field_name\": \"description\",\r\n \"join_dataset_path\": dataset.UNIT_TYPE.path(),\r\n \"on_field_pairs\": [(\"unit_type_code\", \"code\")],\r\n },\r\n {\r\n \"field_name\": \"city_name_abbr\",\r\n \"join_field_name\": \"CityNameAbbr\",\r\n \"join_dataset_path\": dataset.CITY.path(),\r\n \"on_field_pairs\": [(\"city_name\", \"CityName\")],\r\n },\r\n # Extended attributes.\r\n {\r\n \"field_name\": \"five_digit_zip_code\",\r\n \"join_field_name\": \"zip_code\",\r\n \"join_dataset_path\": dataset.ADDRESS_POSTAL_INFO.path(),\r\n \"on_field_pairs\": [(\"geofeat_id\", \"geofeat_id\")],\r\n },\r\n # Any addresses not assigned zip from USPS gets an overlay zip.\r\n {\r\n \"field_name\": \"five_digit_zip_code\",\r\n \"dataset_where_sql\": \"five_digit_zip_code is null\",\r\n \"join_field_name\": \"zip_code_overlay\",\r\n \"join_dataset_path\": dataset.ADDRESS_POSTAL_INFO.path(),\r\n \"on_field_pairs\": [(\"geofeat_id\", \"geofeat_id\")],\r\n },\r\n {\r\n \"field_name\": \"four_digit_zip_code\",\r\n \"join_field_name\": \"plus_four_code\",\r\n \"join_dataset_path\": dataset.ADDRESS_POSTAL_INFO.path(),\r\n \"on_field_pairs\": [(\"geofeat_id\", \"geofeat_id\")],\r\n },\r\n {\r\n \"field_name\": \"usps_delivery_point_code\",\r\n \"join_field_name\": \"delivery_point_code\",\r\n \"join_dataset_path\": dataset.ADDRESS_POSTAL_INFO.path(),\r\n \"on_field_pairs\": [(\"geofeat_id\", \"geofeat_id\")],\r\n },\r\n {\r\n \"field_name\": \"postal_carrier_route\",\r\n \"join_field_name\": \"carrier_route\",\r\n \"join_dataset_path\": dataset.ADDRESS_POSTAL_INFO.path(),\r\n \"on_field_pairs\": [(\"geofeat_id\", \"geofeat_id\")],\r\n },\r\n {\r\n \"field_name\": \"usps_is_cmra\",\r\n \"join_field_name\": \"is_cmra\",\r\n \"join_dataset_path\": dataset.ADDRESS_POSTAL_INFO.path(),\r\n \"on_field_pairs\": [(\"geofeat_id\", \"geofeat_id\")],\r\n },\r\n {\r\n \"field_name\": \"usps_is_vacant\",\r\n \"join_field_name\": \"is_vacant\",\r\n \"join_dataset_path\": dataset.ADDRESS_POSTAL_INFO.path(),\r\n \"on_field_pairs\": [(\"geofeat_id\", \"geofeat_id\")],\r\n },\r\n {\r\n \"field_name\": \"usps_has_mail_service\",\r\n \"join_field_name\": \"has_mail_service\",\r\n \"join_dataset_path\": dataset.ADDRESS_POSTAL_INFO.path(),\r\n \"on_field_pairs\": [(\"geofeat_id\", \"geofeat_id\")],\r\n },\r\n {\r\n \"field_name\": \"landuse_desc\",\r\n \"join_field_name\": \"ludesc\",\r\n \"join_dataset_path\": dataset.LAND_USE_CODES_DETAILED.path(\"pub\"),\r\n \"on_field_pairs\": [(\"landuse\", \"landusec\")],\r\n },\r\n {\r\n \"field_name\": \"usecode\",\r\n \"join_field_name\": \"usecode\",\r\n \"join_dataset_path\": dataset.LAND_USE_CODES_DETAILED.path(\"pub\"),\r\n \"on_field_pairs\": [(\"landuse\", \"landusec\")],\r\n },\r\n {\r\n \"field_name\": \"usedesc\",\r\n \"join_field_name\": \"ucname\",\r\n \"join_dataset_path\": dataset.LAND_USE_CODES_USE_CODES.path(\"pub\"),\r\n \"on_field_pairs\": [(\"usecode\", \"usecode\")],\r\n },\r\n # A&T attributes.\r\n {\r\n \"field_name\": \"tca\",\r\n \"join_field_name\": \"tax_code_overlay\",\r\n \"join_dataset_path\": dataset.ADDRESS_ASSESS_TAX_INFO.path(),\r\n \"on_field_pairs\": [(\"geofeat_id\", \"geofeat_id\")],\r\n },\r\n # City attributes.\r\n {\r\n \"field_name\": \"geocity_name\",\r\n \"join_field_name\": \"inccityname\",\r\n \"join_dataset_path\": dataset.INCORPORATED_CITY_LIMITS.path(),\r\n \"on_field_pairs\": [(\"geocity\", \"inccityabbr\")],\r\n },\r\n {\r\n \"field_name\": \"ugb_city_name\",\r\n \"join_field_name\": \"ugbcityname\",\r\n \"join_dataset_path\": dataset.UGB.path(\"pub\"),\r\n \"on_field_pairs\": [(\"ugb\", \"ugbcity\")],\r\n },\r\n # Planning & zoning attributes.\r\n {\r\n \"field_name\": \"nodaldev_name\",\r\n \"join_field_name\": \"nodename\",\r\n \"join_dataset_path\": dataset.NODAL_DEVELOPMENT_AREA.path(\"pub\"),\r\n \"on_field_pairs\": [(\"nodaldev\", \"nodearea\")],\r\n },\r\n {\r\n \"field_name\": \"plandesjuris\",\r\n \"join_field_name\": \"planjuris\",\r\n \"join_dataset_path\": dataset.PLAN_DESIGNATION.path(\"pub\"),\r\n \"on_field_pairs\": [(\"plandes_id\", \"plandes_id\")],\r\n },\r\n {\r\n \"field_name\": \"plandes\",\r\n \"join_field_name\": \"plandes\",\r\n \"join_dataset_path\": dataset.PLAN_DESIGNATION.path(\"pub\"),\r\n \"on_field_pairs\": [(\"plandes_id\", \"plandes_id\")],\r\n },\r\n {\r\n \"field_name\": \"plandesdesc\",\r\n \"join_field_name\": \"plandesnam\",\r\n \"join_dataset_path\": dataset.PLAN_DESIGNATION.path(\"pub\"),\r\n \"on_field_pairs\": [(\"plandes_id\", \"plandes_id\")],\r\n },\r\n # Public safety attributes.\r\n {\r\n \"field_name\": \"ambulance_service_area\",\r\n \"join_field_name\": \"asa\",\r\n \"join_dataset_path\": dataset.AMBULANCE_SERVICE_AREA.path(\"pub\"),\r\n \"on_field_pairs\": [(\"ambulance_district\", \"asacode\")],\r\n },\r\n {\r\n \"field_name\": \"ambulance_service_provider\",\r\n \"join_field_name\": \"provider\",\r\n \"join_dataset_path\": dataset.AMBULANCE_SERVICE_AREA.path(\"pub\"),\r\n \"on_field_pairs\": [(\"ambulance_district\", \"asacode\")],\r\n },\r\n {\r\n \"field_name\": \"fire_protection_provider\",\r\n \"join_field_name\": \"fpprovname\",\r\n \"join_dataset_path\": dataset.FIRE_PROTECTION_AREA.path(\"pub\"),\r\n \"on_field_pairs\": [(\"firedist\", \"fireprotprov\")],\r\n },\r\n {\r\n \"field_name\": \"psap_name\",\r\n \"join_field_name\": \"psap_name\",\r\n \"join_dataset_path\": dataset.PSAP_AREA.path(\"pub\"),\r\n \"on_field_pairs\": [(\"psap_code\", \"psap_code\")],\r\n },\r\n {\r\n \"field_name\": \"emergency_service_number\",\r\n \"join_field_name\": \"emergency_service_number\",\r\n \"join_dataset_path\": dataset.EMERGENCY_SERVICE_NUMBER.path(),\r\n \"on_field_pairs\": [\r\n # City used as proxy for police.\r\n (\"geocity\", \"city_limits\"),\r\n (\"ambulance_district\", \"asa_code\"),\r\n (\"firedist\", \"fire_district\"),\r\n (\"psap_code\", \"psap_code\")\r\n ],\r\n },\r\n {\r\n \"field_name\": \"emergency_service_number\",\r\n \"join_field_name\": \"emergency_service_number\",\r\n \"join_dataset_path\": dataset.EMERGENCY_SERVICE_NUMBER.path(),\r\n \"on_field_pairs\": [\r\n # City used as proxy for police.\r\n (\"geocity\", \"city_limits\"),\r\n (\"ambulance_district\", \"asa_code\"),\r\n (\"firedist\", \"fire_district\"),\r\n ],\r\n \"dataset_where_sql\": \"emergency_service_number is null\",\r\n },\r\n # Election attributes.\r\n {\r\n \"field_name\": \"city_councilor\",\r\n \"join_field_name\": \"councilor\",\r\n \"join_dataset_path\": dataset.CITY_WARD.path(),\r\n \"on_field_pairs\": [(\"ccward\", \"ward\")],\r\n },\r\n {\r\n \"field_name\": \"cocommdist_name\",\r\n \"join_field_name\": \"cmdistname\",\r\n \"join_dataset_path\": dataset.COUNTY_COMMISSIONER_DISTRICT.path(\"pub\"),\r\n \"on_field_pairs\": [(\"cocommdist\", \"commrdist\")],\r\n },\r\n {\r\n \"field_name\": \"county_commissioner\",\r\n \"join_field_name\": \"commrname\",\r\n \"join_dataset_path\": dataset.COUNTY_COMMISSIONER_DISTRICT.path(\"pub\"),\r\n \"on_field_pairs\": [(\"cocommdist\", \"commrdist\")],\r\n },\r\n {\r\n \"field_name\": \"eweb_commissioner_name\",\r\n \"join_field_name\": \"eweb_commissioner_name\",\r\n \"join_dataset_path\": dataset.EWEB_COMMISSIONER.path(\"pub\"),\r\n \"on_field_pairs\": [(\"ccward\", \"city_council_ward\")],\r\n },\r\n {\r\n \"field_name\": \"state_representative\",\r\n \"join_field_name\": \"repname\",\r\n \"join_dataset_path\": dataset.STATE_REPRESENTATIVE_DISTRICT.path(\"pub\"),\r\n \"on_field_pairs\": [(\"strepdist\", \"repdist\")],\r\n },\r\n {\r\n \"field_name\": \"state_senator\",\r\n \"join_field_name\": \"senname\",\r\n \"join_dataset_path\": dataset.STATE_SENATOR_DISTRICT.path(\"pub\"),\r\n \"on_field_pairs\": [(\"senatedist\", \"sendist\")],\r\n },\r\n # Education attributes.\r\n {\r\n \"field_name\": \"schooldist_name\",\r\n \"join_field_name\": \"names\",\r\n \"join_dataset_path\": dataset.SCHOOL_DISTRICT.path(\"pub\"),\r\n \"on_field_pairs\": [(\"schooldist\", \"district\")],\r\n },\r\n {\r\n \"field_name\": \"elem_name\",\r\n \"join_field_name\": \"elem_school\",\r\n \"join_dataset_path\": dataset.ELEMENTARY_SCHOOL_AREA.path(\"pub\"),\r\n \"on_field_pairs\": [(\"elem\", \"attend\")],\r\n },\r\n {\r\n \"field_name\": \"middle_name\",\r\n \"join_field_name\": \"middle_school\",\r\n \"join_dataset_path\": dataset.MIDDLE_SCHOOL_AREA.path(\"pub\"),\r\n \"on_field_pairs\": [(\"middle\", \"attend\")],\r\n },\r\n {\r\n \"field_name\": \"high_name\",\r\n \"join_field_name\": \"high_school\",\r\n \"join_dataset_path\": dataset.HIGH_SCHOOL_AREA.path(\"pub\"),\r\n \"on_field_pairs\": [(\"high\", \"attend\")],\r\n },\r\n # Natural attributes.\r\n {\r\n \"field_name\": \"firmprinted\",\r\n \"join_field_name\": \"panel_printed\",\r\n \"join_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"natural\\\\flood\\\\Flood.gdb\\\\FIRMPanel\"\r\n ),\r\n \"on_field_pairs\": [(\"firmnumber\", \"firm_pan\")],\r\n },\r\n {\r\n \"field_name\": \"firm_community_id\",\r\n \"join_field_name\": \"com_nfo_id\",\r\n \"join_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"natural\\\\flood\\\\Flood.gdb\\\\CommunityInfo\"\r\n ),\r\n \"on_field_pairs\": [(\"geocity\", \"community_code\")],\r\n },\r\n {\r\n \"field_name\": \"firm_community_post_firm_date\",\r\n \"join_field_name\": \"in_frm_dat\",\r\n \"join_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"natural\\\\flood\\\\Flood.gdb\\\\CommunityInfo\"\r\n ),\r\n \"on_field_pairs\": [(\"geocity\", \"community_code\")],\r\n },\r\n {\r\n \"field_name\": \"soiltype\",\r\n \"join_field_name\": \"musym\",\r\n \"join_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"natural\\\\soils\\\\Soils.gdb\\\\MUAggAtt\"\r\n ),\r\n \"on_field_pairs\": [(\"soilkey\", \"mukey\")],\r\n },\r\n # Other district attributes.\r\n {\r\n \"field_name\": \"neighborhood_name\",\r\n \"join_field_name\": \"NAME\",\r\n \"join_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA,\r\n \"boundary\\\\districts\\\\eug\\\\Boundary.gdb\\\\EugNeighborhoods\",\r\n ),\r\n \"on_field_pairs\": [(\"neighbor\", \"NEIBORHD\")],\r\n },\r\n ]\r\n for kwargs in join_kwargs:\r\n etl.transform(arcetl.attributes.update_by_joined_value, **kwargs)\r\n # Clean join values.\r\n transform.clean_whitespace(etl, field_names=[\"neighborhood_name\"])\r\n # Remove Metro Plan designations, per City of Eugene request.\r\n transform.clear_all_values(\r\n etl,\r\n field_names=[\"plandes\", \"plandesdesc\"],\r\n dataset_where_sql=\"plandesjuris = 'MTP'\",\r\n )\r\n # Remove +4 ZIP where initial ZIP is missing.\r\n transform.clear_all_values(\r\n etl,\r\n field_names=[\"four_digit_zip_code\"],\r\n dataset_where_sql=\"five_digit_zip_code is null\",\r\n )\r\n # Assign constants.\r\n constant_kwargs = [\r\n {\"field_name\": \"state_code\", \"value\": \"OR\"},\r\n {\"field_name\": \"state_name\", \"value\": \"Oregon\"},\r\n {\"field_name\": \"county_name\", \"value\": \"Lane\"},\r\n ]\r\n for kwargs in constant_kwargs:\r\n etl.transform(arcetl.attributes.update_by_value, **kwargs)\r\n # Override constants for special cases.\r\n for override in OVERRIDE_ATTRS:\r\n for kwargs in OVERRIDE_ATTRS[override].get(\"constant_kwargs\", []):\r\n etl.transform(\r\n arcetl.attributes.update_by_value,\r\n dataset_where_sql=OVERRIDE_ATTRS[override].get(\"where_sql\"),\r\n **kwargs\r\n )\r\n # Build values from functions.\r\n function_kwargs = [\r\n {\r\n \"field_name\": \"street_name_full\",\r\n \"function\": concatenate_arguments,\r\n \"arg_field_names\": [\r\n \"pre_direction_code\",\r\n \"street_name\",\r\n \"street_type_code\",\r\n ],\r\n },\r\n {\r\n \"field_name\": \"city_state_zip\",\r\n \"function\": city_state_zip,\r\n \"kwarg_field_names\": [\"city_name\", \"state_code\", \"five_digit_zip_code\"],\r\n },\r\n {\r\n \"field_name\": \"concat_address_no_unit\",\r\n \"function\": concatenate_arguments,\r\n \"arg_field_names\": [\r\n \"house_nbr\",\r\n \"house_suffix_code\",\r\n \"street_name_full\",\r\n ],\r\n },\r\n {\r\n \"field_name\": \"concat_address\",\r\n \"function\": concatenate_arguments,\r\n \"arg_field_names\": [\r\n \"concat_address_no_unit\",\r\n \"unit_type_code\",\r\n \"unit_id\",\r\n ],\r\n },\r\n {\r\n \"field_name\": \"concat_address_no_direction\",\r\n \"function\": concatenate_arguments,\r\n \"arg_field_names\": [\r\n \"house_nbr\",\r\n \"house_suffix_code\",\r\n \"street_name\",\r\n \"street_type_code\",\r\n \"unit_type_code\",\r\n \"unit_id\",\r\n ],\r\n },\r\n {\r\n \"field_name\": \"concat_address_full\",\r\n \"function\": concat_address_full,\r\n \"kwarg_field_names\": [\r\n \"concat_address\",\r\n \"city_name\",\r\n \"state_code\",\r\n \"five_digit_zip_code\",\r\n \"four_digit_zip_code\",\r\n ],\r\n },\r\n {\r\n \"field_name\": \"mapnumber\",\r\n \"function\": (lambda x: x[:8] if x else None),\r\n \"arg_field_names\": [\"maptaxlot\"],\r\n },\r\n {\r\n \"field_name\": \"taxlot\",\r\n \"function\": (lambda x: x[-5:] if x else None),\r\n \"arg_field_names\": [\"maptaxlot\"],\r\n },\r\n {\r\n \"field_name\": \"maptaxlot_hyphen\",\r\n \"function\": maptaxlot_separated,\r\n \"arg_field_names\": [\"maptaxlot\"],\r\n },\r\n ]\r\n for kwargs in function_kwargs:\r\n etl.transform(\r\n arcetl.attributes.update_by_function, field_as_first_arg=False, **kwargs\r\n )\r\n # Take care of addresses flagged not to update in publication.\r\n ids = {}\r\n id_set_kwargs = {\r\n \"in_publication\": {\"dataset_path\": dataset.SITE_ADDRESS.path(\"pub\")},\r\n \"in_transform\": {\"dataset_path\": etl.transform_path},\r\n \"no_update\": {\r\n \"dataset_path\": dataset.ADDRESS_ISSUES.path(),\r\n \"dataset_where_sql\": \"update_publication = 0\",\r\n },\r\n }\r\n for key, kwargs in id_set_kwargs.items():\r\n ids[key] = set(\r\n _id\r\n for _id, in arcetl.attributes.as_iters(\r\n field_names=\"site_address_gfid\", **kwargs\r\n )\r\n )\r\n ids[\"rollback\"] = ids[\"no_update\"] & ids[\"in_transform\"] & ids[\"in_publication\"]\r\n ids[\"hold\"] = ids[\"no_update\"] & (ids[\"in_transform\"] - ids[\"in_publication\"])\r\n rollback_features = [\r\n feat\r\n for feat in arcetl.attributes.as_dicts(dataset.SITE_ADDRESS.path(\"pub\"))\r\n if feat[\"site_address_gfid\"] in ids[\"rollback\"]\r\n ]\r\n # Strip OIDs (not part of update).\r\n for feat in rollback_features:\r\n del feat[\"oid@\"]\r\n if rollback_features:\r\n etl.transform(\r\n arcetl.features.update_from_dicts,\r\n update_features=rollback_features,\r\n id_field_names=\"site_address_gfid\",\r\n field_names=rollback_features[0].keys(),\r\n delete_missing_features=False,\r\n )\r\n etl.transform(\r\n arcetl.features.delete_by_id,\r\n delete_ids=ids[\"hold\"],\r\n id_field_names=\"site_address_gfid\",\r\n )\r\n LOG.info(\"%s addresses held from publication\", len(ids[\"hold\"]))\r\n LOG.info(\"%s addresses rolled-back from publication\", len(ids[\"rollback\"]))\r\n if any([ids[\"hold\"], ids[\"rollback\"]]):\r\n send_publication_issues_message()\r\n etl.load(dataset.SITE_ADDRESS.path(\"pub\"))\r\n send_new_lincom_address_message()", "def form_valid(self, ppform, address_form,cuform):\n addr = address_form.save()\n cuformo = cuform.save()\n ppform.save()\n self.object.address = addr\n self.object.user = cuformo\n self.object.save()\n\n return HttpResponseRedirect(self.get_success_url())", "def _compute_adress(self):\r\n\t\tfor leads in self:\r\n\t\t\tleads.address = leads.street + \" \" + leads.street2", "def address(self):\n ...", "def test_create_new_address_without_permission(self):\n data = dict(\n address_line1='random address 1',\n postal_code='random postal',\n city='random city',\n state_province=self.random_state_province.name,\n country=self.random_country.name,\n )\n\n self.client.force_authenticate(user=self.user)\n\n response = self.client.post(\n reverse('location:addresses'),\n data,\n format='json',\n )\n\n content = {\"detail\": \"You are not authorized to create a new address.\"}\n self.assertEqual(json.loads(response.content), content)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def add_place(name, country, city, street):\n place = Place(name=name, country=country, city=city, street=street)\n session.add(place)\n session.commit()", "def address(self, address):\n if address is None:\n raise ValueError(\"Invalid value for `address`, must not be `None`\")\n\n self._address = address", "def test_ipam_ip_addresses_create(self):\n pass", "def update_or_create_delivery(self, orderitem_data):", "def open_edit_address(self, address: dict) -> None:\n row = self.addresses_list.surface_address_row(address)\n\n row.open_kebab_menu()\n row.kebab_menu.edit_address()", "def add_addressitem(self, addressitem):\n self.addresses.append(addressitem)", "def create(cls, address, location='Default', dynamic=False):\n from smc.elements.helpers import location_helper\n location_ref = location_helper(location)\n address = [{'address': address,\n 'dynamic': dynamic,\n 'location_ref': location_ref}]\n return {'contact_addresses': address}", "def _set_address(self, v, load=False):\n try:\n t = YANGDynClass(v,base=[unicode,unicode,unicode,unicode,unicode,], is_leaf=True, yang_name=\"address\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"address must be of a type compatible with base=[unicode,unicode,unicode,unicode,unicode,], is_leaf=True, yang_name=\"address\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True\"\"\")\n self.__address = t\n if hasattr(self, '_set'):\n self._set()", "def post(self):\n return self.get_request_handler(request.headers).create_new_address(request)", "def test_11_individual_1_address(self):\n with mock_api(individual_1_address):\n import_record(self.session, 'magento.res.partner',\n self.backend_id, '9999254')\n cr, uid = self.cr, self.uid\n partner_ids = self.model.search(cr, uid,\n [('magento_id', '=', '9999254'),\n ('backend_id', '=', self.backend_id)])\n self.assertEqual(len(partner_ids), 1)\n partner = self.model.browse(cr, uid, partner_ids[0])\n # Name of the billing address\n self.assertEqual(partner.name, 'Ferreira Margaux')\n self.assertEqual(partner.type, 'default')\n # billing address merged with the partner\n self.assertEqual(len(partner.child_ids), 0)\n self.assertEqual(len(partner.magento_bind_ids), 1)\n self.assertEqual(len(partner.magento_address_bind_ids), 1)\n address_bind = partner.magento_address_bind_ids[0]\n self.assertEqual(address_bind.magento_id, '9999253',\n msg=\"The merged address should be the \"\n \"billing address\")", "def test_user_address_relationship(self):\r\n user = self._create_test_user()\r\n addresses = [self._create_test_address() for x in range(3)]\r\n user.addresses += addresses\r\n self.db.session.commit()\r\n for a in addresses:\r\n assert a in user.addresses", "def update(self, request, phone):\n try:\n attrs = self.flatten_dict(request.POST)\n #if self.exists(**attrs):\n #return rc.DUPLICATE_ENTRY\n #else:\n endpoint = Endpoint.objects.get(uid__exact=phone, site__name__exact=request.user)\n if attrs.get('effective_caller_id_name'):\n endpoint.effective_caller_id_name = attrs.get('effective_caller_id_name')\n if attrs.get('password'):\n endpoint.password = attrs.get('password')\n if attrs.get('description'):\n endpoint.description = attrs.get('description')\n if attrs.get(\"enabled\") == \"false\":\n endpoint.enable = False\n elif attrs.get(\"enabled\") == \"true\":\n endpoint.enable = True\n if attrs.get(\"enable\") == \"false\":\n endpoint.enable = False\n elif attrs.get(\"enable\") == \"true\":\n endpoint.enable = True\n endpoint.save()\n return endpoint\n except:\n return rc.NOT_HERE" ]
[ "0.7209289", "0.70491314", "0.6748431", "0.660723", "0.66056013", "0.6600469", "0.65181816", "0.65090024", "0.64898705", "0.64765316", "0.6472879", "0.6464529", "0.6418298", "0.6395395", "0.63934857", "0.63818413", "0.6379002", "0.6343427", "0.634245", "0.6290266", "0.628957", "0.6285275", "0.62660414", "0.6241175", "0.6217752", "0.62130046", "0.62060106", "0.618207", "0.61816704", "0.61797255", "0.61691064", "0.61187845", "0.611765", "0.6074352", "0.60488766", "0.60488766", "0.60185975", "0.59886086", "0.5935827", "0.59205276", "0.58919144", "0.5884149", "0.5868755", "0.5841162", "0.58356595", "0.5831204", "0.58168274", "0.5785156", "0.57778156", "0.57435507", "0.57167745", "0.5631729", "0.5607708", "0.56049055", "0.55717903", "0.55641675", "0.5563722", "0.55550635", "0.5552731", "0.5552731", "0.5552731", "0.5552731", "0.5552731", "0.5552731", "0.5552731", "0.5552731", "0.553188", "0.5529352", "0.5524285", "0.552324", "0.55101645", "0.55070573", "0.5499726", "0.54964596", "0.54905444", "0.5481382", "0.5480066", "0.5472277", "0.5463503", "0.54583496", "0.5455836", "0.5442827", "0.5431956", "0.5428772", "0.54192585", "0.5418978", "0.5417812", "0.54150504", "0.54095703", "0.54002136", "0.53851646", "0.5382179", "0.5373939", "0.5355671", "0.53418356", "0.53035724", "0.5301058", "0.52992177", "0.5298152", "0.52980494" ]
0.762756
0
Conectado a botones, carga mensajes del grupo correspondiente Imprime mensajes en QTextBox
def cargar_mensajes_grupo(self, grupo): texto = "" for mensaje in Mensaje.grupos[grupo]: if not self.filtrar_mensajes or mensaje.sospechoso: if mensaje.sospechoso: texto += f"<p style=\"color: red\">{mensaje}<p/>" else: texto += f"<p>{mensaje}<p/>" self.text_area.setHtml(texto) self.title_label.setText(f"Chat grupal: {grupo}") self.mostrando_grupo = grupo
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __sendMessage(self):\n # TODO: Switch to this when implemented\n \n msg = self.ui.inputWidget.toPlainText()\n self.ui.inputWidget.clear()\n strv = StringView()\n strv.appendText(unicode(msg))\n self._amsn_conversation.sendMessage(strv)\n self.ui.textEdit.append(\"<b>/me says:</b><br>\"+unicode(msg)+\"\")", "def send_messages(self):\r\n self.clear_screen()\r\n user_label = Label(self.root, text=\"Hello \" + self.username,\r\n font=self.title_font, bg=self.bg_color, height=2)\r\n user_label.pack(pady=10, padx=50)\r\n messages_frame = Frame(self.root)\r\n messages_frame.pack(padx=30, pady=10)\r\n scrollbar_msg = Scrollbar(messages_frame)\r\n scrollbar_msg.pack(side=RIGHT, fill=Y)\r\n write_message = Text(messages_frame, width=50, height=15, font=self.text_font,\r\n yscrollcommand=scrollbar_msg.set)\r\n write_message.pack()\r\n scrollbar_msg.config(command=write_message.yview)\r\n button_speech_rec = Button(self.root, text=\"listen\\nto speech\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.create_speech_thread(write_message))\r\n button_speech_rec.pack(pady=10)\r\n button_send = Button(self.root, text=\"send\", font=self.text_font,\r\n height=2, width=20, command=lambda: self.send(write_message))\r\n button_send.pack(pady=10)\r\n button_send = Button(self.root, text=\"go back\", font=self.text_font,\r\n height=2, width=20, command=self.choose_path)\r\n button_send.pack(pady=10)", "def text_e(self, event):\n directory=os.getcwd()+ '/messages'\n filename=str(self.user)+'_'+str(self.friend)\n text = self.text_send.GetValue()\n messages = mf.addMessage(self.user, self.friend, self.passw, text)\n mf.makeTextFile(self.user, self.friend, self.passw, messages)\n \n self.chat_log.LoadFile('/'.join((directory, filename)))\n self.text_send.SetValue(\"\")\n event.Skip()", "def listener(messages):\n for m in messages:\n chatid = m.chat.id\n print(str(chatid))\n if m.content_type == 'text':\n text = m.text\n tb.send_message(chatid, text)", "def comsume_msg(self, msg_type):", "def modeMsgBox(self, messageText):\n self.createMessage(messageText)", "def send(event=None): # event is passed by binders.\n msg = my_msg.get()\n print(\"This is send: \",type(msg))\n if msg == \"{quit}\":\n root.quit()\n if msg == \"Type your messages here.\" or msg == \"\" :\n pass\n else:\n final_msg = \"You: \" + msg\n msg_list.insert(END, final_msg)\n receive_msg = receive(msg.lower())\n rec_msg = \"Genie: \" + receive_msg\n msg_list.insert(END, rec_msg)\n my_msg.set(\"\")", "def event_loop(self):\n if self.message_counter:\n if not self.msg:\n self.showdialog()\n else:\n self.msg.setText(\n \"COMET encounterd {} error(s)\".format(self.message_counter).ljust(\n 70\n )\n )", "def SendMessage(self, event):\n pass", "def read_messages(self, msg_num):\r\n self.clear_screen()\r\n user_label = Label(self.root, text=\"Hello \" + self.username, font=self.title_font,\r\n bg=self.bg_color, height=2)\r\n user_label.pack(pady=5, padx=50)\r\n lbl_msg = Label(self.root, text=\"Message \" + str(msg_num), font=self.title_font,\r\n bg=self.bg_color)\r\n lbl_msg.pack(pady=5, padx=10)\r\n self.refresh_button = Button(self.root, text=\"Refresh page\", font=self.text_font,\r\n bg=self.bg_color, command=lambda: self.refresh(msg_num))\r\n self.refresh_button.pack(padx=10, pady=10)\r\n messages_frame = Frame(self.root)\r\n messages_frame.pack(padx=30, pady=15)\r\n scrollbar_msg = Scrollbar(messages_frame)\r\n scrollbar_msg.pack(side=RIGHT, fill=Y)\r\n text_widget = Text(messages_frame, width=50, height=15, font=self.text_font,\r\n yscrollcommand=scrollbar_msg.set)\r\n text_widget.pack()\r\n scrollbar_msg.config(command=text_widget.yview)\r\n button_send = Button(self.root, text=\"go back\", font=self.text_font,\r\n height=2, width=20, command=self.go_back_read)\r\n button_send.pack(pady=5, side=BOTTOM)\r\n button_send = Button(self.root, text=\"see/close message\\ncontrol panel\",\r\n font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.new_window_messages(button_send))\r\n button_send.pack(pady=5, side=BOTTOM)\r\n if self.msg_list:\r\n if msg_num < len(self.msg_list):\r\n next_msg = Button(self.root, text=\"next message\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.read_messages(msg_num + 1))\r\n next_msg.pack(pady=5, padx=5, side=RIGHT)\r\n if msg_num > 1:\r\n previous_msg = Button(self.root, text=\"previous message\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.read_messages(msg_num - 1))\r\n previous_msg.pack(pady=5, padx=5, side=LEFT)\r\n text_widget.insert(END, \"from: \" + self.msg_list[msg_num - 1][2] + \"\\n\")\r\n text_widget.tag_add('sender', '1.0', '1.end')\r\n text_widget.tag_config('sender', font='none 14')\r\n\r\n text_widget.insert(END, self.msg_list[msg_num - 1][0])\r\n text_widget.tag_add('msg', '2.0', END)\r\n text_widget.tag_config('msg', font='none 12')\r\n\r\n text_widget.config(state=DISABLED)", "def send_message(self,contato,mensagem):\r\n #Open new chat on whatsapp web\r\n new_msg_button = self.driver.find_element_by_xpath(self.NEW_CHAT)\r\n new_msg_button.click()\r\n sleep(1)\r\n #Search the contact\r\n search_field = self.driver.find_element_by_xpath(self.SEARCH_CONTACT)\r\n search_field.click()\r\n search_field.send_keys(contato)\r\n sleep(1)\r\n #Click on the firts contact with the name that I told \r\n first_contact = self.driver.find_element_by_xpath(self.FIRST_CONTACT)\r\n first_contact.click()\r\n sleep(1.5)\r\n type_field = self.driver.find_element_by_xpath(self.TYPE_MSG)\r\n type_field.click()\r\n type_field.send_keys(mensagem)\r\n send_msg= self.driver.find_element_by_xpath(self.SEND_BUTTON)\r\n send_msg.click()\r\n sleep(1)", "def message_cb(self, msgType, title, message1, message2=\"\", message3=\"\"):\n #print(\"message_cb with %s STARTS\"%msgType)\n result = None\n\n # Display the correct dialogBox according the type\n if msgType == \"OK\" or msgType == \"Error\":\n dialogInfo = xbmcgui.Dialog()\n result = dialogInfo.ok(title, message1, message2,message3)\n elif msgType == \"YESNO\":\n dialogYesNo = xbmcgui.Dialog()\n result = dialogYesNo.yesno(title, message1, message2, message3)\n return result", "def send_message(self,message):\n connected=False\n self.driver_Lock.acquire()\n while(not connected):\n try:\n whatsapp_msg = self.driver.find_element_by_class_name('_2S1VP') #find text box element\n connected=True\n except Exception as exc:\n print(exc)\n sleep(1)\n\n if(isinstance(message,str)): #check if the message is of type string\n whatsapp_msg.send_keys(message) #input message\n whatsapp_msg.send_keys(Keys.SHIFT+Keys.ENTER) #create new line\n\n elif(isinstance(message,list)): #check if the message is of type list\n for line in message: #run through all the lines\n whatsapp_msg.send_keys(line) #input line\n whatsapp_msg.send_keys(Keys.SHIFT+Keys.ENTER) #create new line\n\n whatsapp_msg.send_keys(Keys.SHIFT+Keys.ENTER) #create new line\n whatsapp_msg.send_keys(\"-{}\".format(bot_name)) #add bot name tag\n\n whatsapp_msg.send_keys(Keys.ENTER) #send message\n self.driver_Lock.release() #release driver lock", "def update_messages():\n\n scrollbar = Scrollbar(root)\n scrollbar.pack(side=RIGHT, fill=Y)\n listbox = Text(root, wrap =WORD, yscrollcommand=scrollbar.set, background=\"#CCFFCC\", fg=\"black\", selectbackground=\"#003300\",\n highlightcolor=\"#0033CC\")\n\n msgs = []\n run = True\n while run:\n\n time.sleep(0.1) # update every 1/10 of a second\n new_messages = c1.get_messages() # get any new messages from client\n msgs.extend(new_messages) # add to local list of messages\n\n for msg in new_messages: # display new messages\n print(msg)\n #title_label = Label(text=str(msg), bg=\"#CCFFCC\", fg=\"black\", padx=34, pady=5, font=\"comicsansms 9 bold\",borderwidth=3,wraplength=300, relief=SUNKEN)\n #title_label.pack(side=TOP)\n\n listbox.insert(END, str(msg)+'\\n\\n')\n listbox.pack(fill=BOTH, padx=36)\n scrollbar.config(command=listbox.yview)\n\n if msg == \"{quit}\":\n root.destroy()\n run = False\n break", "def definir_mensaje(self, mensaje):\r\n self.mensaje_error.setText(mensaje)", "def mensagem(msg):\n mostra_linha()\n print(msg)\n mostra_linha()", "def sendMessage(driver, msg):\n # select correct input box to type msg\n input_box = driver.find_element(\n By.XPATH, '//*[@id=\"main\"]//footer//div[contains(@class, \"_2S1VP\")]')\n # input_box.clear()\n input_box.click()\n\n action = ActionChains(driver)\n action.send_keys(msg)\n action.send_keys(Keys.RETURN)\n action.perform()", "def on_commitMessageEdit_textChanged(self):\n self.__updateOK()", "def new_window_messages(self, button_see_all_msgs):\r\n # changing the button command to closing the window\r\n button_see_all_msgs.config(command=lambda: self.close_window(button_see_all_msgs))\r\n\r\n # creating the chat Tk object\r\n self.messages_window = Tk()\r\n self.messages_window.resizable(False, False)\r\n self.messages_window.config(bg=self.bg_color)\r\n self.messages_window.protocol(\"WM_DELETE_WINDOW\",\r\n lambda: self.close_window(button_see_all_msgs))\r\n\r\n chat_label = Label(self.messages_window, text=\"Hello \" + self.username +\r\n \"\\nHere are your messages\",\r\n bg=self.bg_color, font=self.title_font)\r\n chat_label.pack(padx=20, pady=10)\r\n chat_frame = Frame(self.messages_window)\r\n chat_frame.pack(padx=15, pady=15)\r\n scrollbar_chat = Scrollbar(chat_frame)\r\n scrollbar_chat.pack(side=RIGHT, fill=Y)\r\n text_chat = Text(chat_frame, width=30, height=15, font=self.text_font,\r\n yscrollcommand=scrollbar_chat.set)\r\n text_chat.pack()\r\n scrollbar_chat.config(command=text_chat.yview)\r\n for msg, encryption_data, sender_user in self.msg_list:\r\n text_chat.insert(END, \"from: \" + sender_user + \"\\n\")\r\n text_chat.insert(END, msg + \"\\n\\n\")\r\n text_chat.config(state=DISABLED)", "def showMessage(self):", "def iniciar_method(self):\r\n self.usuario = self.usuario_ingresado.text()\r\n contrasena = self.pass_in.text()\r\n self.senal.emit([self.usuario, contrasena])", "def esconder_mensaje(self):\r\n self.hide()", "def do_message(self, message):\r\n \r\n if not self.display_game:\r\n return\r\n \r\n if SlTrace.trace(\"message\"):\r\n if (self.prev_message is None\r\n or len(message.text) > len(self.prev_message)\r\n or len(message.text) > SlTrace.trace(\"message_len\", default=25) > 25):\r\n SlTrace.lg(f\"{len(message.text)}: {message}\")\r\n self.prev_message = message.text\r\n message.text = message.text[0:SlTrace.trace(\"message_len\", default=25)]\r\n SlTrace.lg(\"do_message(%s)\" % (message.text), \"execute\")\r\n if not self.run:\r\n return\r\n \r\n if (self.mw is None or not self.mw.winfo_exists()\r\n or self.msg_frame_base is None\r\n or not self.msg_frame_base.winfo_exists()):\r\n return\r\n \r\n self.wait_message(message)\r\n if self.msg_frame is not None:\r\n self.msg_frame.destroy() # Remove all message frames\r\n self.msg_frame = None\r\n self.msg_frame = Frame(self.msg_frame_base)\r\n self.msg_frame.pack(side=\"top\", expand=NO, fill=NONE)\r\n text = f'{message.text:40}'\r\n color = message.color\r\n font_size = message.font_size\r\n if font_size is None:\r\n font_size=40\r\n time_sec = message.time_sec\r\n\r\n \r\n if (self.mw is None or not self.mw.winfo_exists()\r\n or self.msg_frame is None\r\n or not self.msg_frame.winfo_exists()):\r\n return\r\n \r\n if self.mw is not None and self.mw.winfo_exists():\r\n if self.cur_message is not None:\r\n self.cur_message.destroy()\r\n self.cur_message = None\r\n width = self.get_width()\r\n if width < 500:\r\n width = 500\r\n message.msg = Message(self.msg_frame, text=text, width=width) # Seems to be pixels!\r\n message.msg.config(fg=color, bg='white',\r\n anchor=S,\r\n font=('times', font_size, 'italic'))\r\n message.msg.pack(side=\"top\")\r\n ###message.msg.pack(side=\"bottom\")\r\n self.cur_message = message\r\n if time_sec is not None:\r\n if self.speed_step >= 0:\r\n time_sec = self.speed_step # Modify for view / debugging\r\n end_time = datetime.now() + timedelta(seconds=time_sec)\r\n message.end_time = end_time", "def onPushButton_toMessageTree(self, event):\r\n\t\tMarketDataText = self.MarketData.GetMessage()\r\n\t\tself.MessageTree.SetMessage(MarketDataText)\r\n\t\tself.MessageTree.SetFocus()", "def callback_message( self, conn, mess):\n\n jid = mess.getFrom()\n props = mess.getProperties()\n text = mess.getBody()\n username = self.get_sender_username(mess)\n\n if username not in self.users.keys() + self.invited.keys():\n self.log.info(\"Ignored message from %s.\" % username)\n return\n\n self.log.debug(\"*** props = %s\" % props)\n self.log.debug(\"*** jid = %s\" % jid)\n self.log.debug(\"*** username = %s\" % username)\n self.log.debug(\"*** type = %s\" % type)\n self.log.debug(\"*** text = %s\" % text)\n\n # Ignore messages from before we joined\n if xmpp.NS_DELAY in props: return\n\n # If a message format is not supported (eg. encrypted), txt will be None\n if not text: return\n\n # Remember the last-talked-in thread for replies\n self._JabberBot__threads[jid] = mess.getThread()\n\n if ' ' in text:\n command, args = text.split(' ', 1)\n else:\n command, args = text, ''\n cmd = command\n self.log.debug(\"*** cmd = %s\" % cmd)\n\n # parse operators, commands, etc and if not, dump the message to the chat\n if self.apply_operator(mess, args):\n return\n\n if self.replace_text(username, mess):\n return\n\n if self.commands.has_key(cmd) and cmd != 'help':\n try:\n reply = self.commands[cmd](mess, args)\n except Exception, e:\n reply = traceback.format_exc(e)\n self.log.exception('An error happened while processing a message (\"%s\") from %s: %s\"' % (text, jid, reply))\n else:\n # In private chat, it's okay for the bot to always respond.\n # In group chat, the bot should silently ignore commands it\n # doesn't understand or aren't handled by unknown_command().\n default_reply = 'Unknown command: \"%s\". Type \"help\" for available commands.<b>blubb!</b>' % cmd\n if type == \"groupchat\": default_reply = None\n reply = self.unknown_command( mess, cmd, args)\n if reply is None:\n reply = default_reply\n\n if reply:\n self.send_simple_reply(mess,reply)\n\n self.log_to_mini_log(username, text)", "def handle_gui_example_three_intent(self, message):\n self.gui['sampleText'] = \"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Egestas sed tempus urna et pharetra pharetra massa massa ultricies. Aliquam sem et tortor consequat id porta nibh. Amet est placerat in egestas erat imperdiet sed. Ut ornare lectus sit amet est placerat in egestas erat. Iaculis eu non diam phasellus vestibulum lorem sed risus ultricies. Hac habitasse platea dictumst vestibulum rhoncus est pellentesque. Vulputate eu scelerisque felis imperdiet proin fermentum. Neque convallis a cras semper auctor neque. Pharetra magna ac placerat vestibulum lectus mauris ultrices eros in. Phasellus faucibus scelerisque eleifend donec pretium vulputate. Malesuada bibendum arcu vitae elementum curabitur vitae nunc. Tellus id interdum velit laoreet id donec. Diam donec adipiscing tristique risus nec. Nisi lacus sed viverra tellus in hac habitasse platea. Amet venenatis urna cursus eget nunc scelerisque viverra mauris in. Sit amet nisl suscipit adipiscing bibendum est ultricies. Nec ultrices dui sapien eget mi proin sed. Egestas dui id ornare arcu odio ut sem nulla. Rhoncus aenean vel elit scelerisque. Neque gravida in fermentum et sollicitudin. Pellentesque massa placerat duis ultricies lacus sed. Nunc id cursus metus aliquam eleifend mi. Eu feugiat pretium nibh ipsum consequat nisl. Aenean euismod elementum nisi quis eleifend quam adipiscing vitae. Est ante in nibh mauris cursus mattis. Sagittis eu volutpat odio facilisis mauris sit amet. At consectetur lorem donec massa sapien faucibus. Odio facilisis mauris sit amet. Quis ipsum suspendisse ultrices gravida dictum fusce. Sagittis nisl rhoncus mattis rhoncus urna neque viverra justo nec. Eget mi proin sed libero enim sed faucibus. Interdum velit euismod in pellentesque massa. Et netus et malesuada fames. Velit aliquet sagittis id consectetur purus. Condimentum lacinia quis vel eros donec ac odio tempor orci. Amet consectetur adipiscing elit pellentesque habitant. Eleifend mi in nulla posuere sollicitudin aliquam ultrices sagittis orci. Nisi porta lorem mollis aliquam ut porttitor leo a diam. Egestas integer eget aliquet nibh praesent tristique. Velit scelerisque in dictum non. Id volutpat lacus laoreet non curabitur gravida arcu ac. Suspendisse interdum consectetur libero id faucibus nisl tincidunt eget. Ipsum a arcu cursus vitae congue mauris. Duis at consectetur lorem donec massa. Orci sagittis eu volutpat odio facilisis mauris. Eget mauris pharetra et ultrices neque ornare. Commodo nulla facilisi nullam vehicula ipsum a. Arcu risus quis varius quam quisque. Gravida in fermentum et sollicitudin. Lacus laoreet non curabitur gravida arcu ac tortor dignissim. Netus et malesuada fames ac turpis. Ipsum dolor sit amet consectetur adipiscing. Tellus elementum sagittis vitae et leo duis ut diam quam. Vitae et leo duis ut diam quam nulla. Risus pretium quam vulputate dignissim. Justo laoreet sit amet cursus sit amet dictum sit. Blandit libero volutpat sed cras. Lacus sed viverra tellus in. Ornare lectus sit amet est placerat in egestas erat. Tortor dignissim convallis aenean et tortor at. Tempus quam pellentesque nec nam aliquam. Nisi scelerisque eu ultrices vitae auctor eu augue ut lectus. Consequat id porta nibh venenatis cras sed felis eget. Massa enim nec dui nunc mattis enim ut. Dignissim enim sit amet venenatis urna. Ac tincidunt vitae semper quis lectus nulla at. Sed felis eget velit aliquet sagittis. Vel turpis nunc eget lorem dolor sed viverra. Non consectetur a erat nam at lectus. Iaculis eu non diam phasellus vestibulum. Dolor sit amet consectetur adipiscing elit ut aliquam purus sit. Libero justo laoreet sit amet cursus sit. Tellus pellentesque eu tincidunt tortor. Maecenas volutpat blandit aliquam etiam erat velit scelerisque in. Semper risus in hendrerit gravida rutrum quisque non tellus orci. Diam in arcu cursus euismod quis viverra nibh cras pulvinar. Habitasse platea dictumst quisque sagittis purus sit amet volutpat consequat. Elit ut aliquam purus sit. Dui faucibus in ornare quam viverra orci sagittis eu. Purus ut faucibus pulvinar elementum integer. Condimentum lacinia quis vel eros donec ac odio tempor. At in tellus integer feugiat scelerisque varius morbi. Augue eget arcu dictum varius duis. Aliquam sem et tortor consequat id. Bibendum arcu vitae elementum curabitur vitae. Massa sed elementum tempus egestas sed sed. Suscipit adipiscing bibendum est ultricies. Etiam tempor orci eu lobortis.\"\n self.gui.show_page(\"paginationExample.qml\")", "def run_error_messages(self):\r\n self.error = \"\"\r\n #while self.error_queue:\r\n #self.error += (self.error_messages.get(\r\n # self.error_queue.popleft, None\r\n # ) + \" \")\r\n #self.error += self.I_source.query(\"STAT:QUE?\")\r\n #print(self.error)\r\n #self.I_source.write(\"STAT:QUE:CLE\")\r\n #self.message_box.setText(self.error)\r\n #self.message_box.exec_()\r", "def doMessageWindow(msg):\n _loadMsgSettings()\n if settings.has_key(msg):\n return\n global dialog\n dialog = QtGui.QDialog()\n msgDialog = ui.message.Ui_Dialog()\n msgDialog.setupUi(dialog)\n msgDialog.messageLabel.setText(msg)\n dialog.exec_()\n if msgDialog.showAgainCheckBox.isChecked():\n settings[msg] = True\n _saveMsgSettings()", "def direct_message(self, user, msg, num):\n PAUSE = 1\n logging.info('Send message {} to {}'.format(msg,user))\n self.driver.get(self.direct_url)\n self.driver.find_elements_by_xpath('/html/body/div[2]/div/div/div[2]/div[1]/div/div[2]/input')[0].send_keys(user)\n time.sleep(PAUSE)\n self.driver.find_elements_by_xpath('/html/body/div[5]/div/div/div/div[3]/button[2]')[0].click() #Edge case to get rid of notification\n time.sleep(PAUSE)\n self.driver.find_elements_by_xpath('/html/body/div[2]/div/div/div[2]/div[2]/div/div/div[3]/button')[0].click()\n self.driver.find_elements_by_xpath('/html/body/div[2]/div/div/div[1]/div/div[2]/div/button')[0].click()\n time.sleep(PAUSE)\n # The message will be placed and sent\n self.driver.find_elements_by_xpath('//*[@id=\"react-root\"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]/textarea')[0].send_keys(msg)\n time.sleep(PAUSE)\n self.driver.find_elements_by_xpath('//*[@id=\"react-root\"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[3]/button')[0].click()\n # Special feature involving reacting with heart\n for x in range(num):\n self.driver.find_elements_by_xpath('//*[@id=\"react-root\"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/button[2]')[0].click()\n time.sleep(PAUSE)", "def __addmsg(self, msg: str) -> None:\n # region Docstring\n # endregion\n self.record += msg\n self.textbox.kill()\n self.textbox = UITextBox(\n html_text=self.record,\n relative_rect=Rect((0, 0), (self.size[0], self.size[1] - 25)),\n container=self,\n manager=self.ui_manager,\n )", "def notifyUser(self, message):\n\n\n msg = QtWidgets.QMessageBox(self)\n msg.setText(message)\n msg.exec_()", "def initM(self, num):\n prefix = C_Messaging.PREFIX\n if not wait_el_xpath_click(self.driver, C_Messaging.PATH_BTN_CREATE):\n logging.info('{0}: Create new message unsucceed.'.format(prefix))\n self.fail('{0}: Create new message unsucceed.'.format(prefix))\n recipients = wait_el_xpath(self.driver, C_Messaging.PATH_RECIPIENTS)\n action(recipients, Commands.CLEAR)\n action(recipients, Commands.CLICK)\n\n # phone number: 147 8230 5348\n for s in num:\n self.driver.press_keycode(Keycode.get(self, s))\n\n self.driver.press_keycode(Keycode.ENTER)\n\n text_editor = wait_el_xpath(self.driver, C_Messaging.PATH_TEXT_EDITOR)\n return text_editor", "def Dialog(self, usrId: int, message: str, keybaord=None):\n self.session.method(\"messages.send\",\n {\n \"user_id\": usrId,\n \"keyboard\": keybaord,\n \"message\": message\n })", "def MessageBox(self, stringvalue, boxtype=\"error\"):\n messagebox = QMessageBox()\n messagebox.setText(stringvalue)\n messagebox.exec()\n if boxtype == \"error\":\n CoreLoadConfig.ConfigHandler.ErrorWriteLogger(stringvalue,self.LogBrowser)\n if boxtype == \"access\":\n CoreLoadConfig.ConfigHandler.AccessWriteLogger(stringvalue,self.LogBrowser)", "def msg_event(self, event):\r\n pass", "def on_message(mosq, obj, msg):\n print(msg.topic + \" - \" + str(msg.payload))\n nodes = msg.topic.split('/')\n global timeoutstarted\n global timeoutdisplayblocks\n global myLcdManager\n if nodes[0]=='clients':\n if nodes[2]=='configure':\n if str(msg.payload) == 'reboot':\n os.system('reboot')\n else:\n myLcdManager = lcd_manager.LcdManager(sortedlist, config)\n processRoundConfig(str(msg.payload))\n timeoutstarted = 0.0\n timeoutdisplayblocks = 0\n elif nodes[2] == 'instructions':\n myLcdManager.display(str(msg.payload), 20, \"0\")\n #start timer?\n if 'timeout' in roundconfig and roundconfig['timeout'] > 0.0:\n resetBlocks = True\n timeoutstarted = time.time()\n elif nodes[2] == 'timeout':\n roundconfig['timeout'] = float(str(msg.payload))\n elif nodes[2] in controlids:\n ctrlid = nodes[2]\n if nodes[3] == 'enabled':\n if str(msg.payload) == \"0\":\n roundconfig['controls'][ctrlid]['enabled'] = False\n #switch it off\n myLcdManager.display(\" \", config['local']['controls'][ctrlid]['display']['width'], ctrlid)\n else:\n roundconfig['controls'][ctrlid]['enabled'] = True\n #switch it on\n myLcdManager.display(roundconfig['controls'][ctrlid]['name'], config['local']['controls'][ctrlid]['display']['width'], ctrlid)\n elif nodes[3] == 'name':\n if str(msg.payload) == '':\n myLcdManager.clear(ctrlid)\n else:\n myLcdManager.display(str(msg.payload), config['local']['controls'][ctrlid]['display']['width'], ctrlid, False)\n elif nodes[0] == 'server':\n if nodes[1] == 'ready':\n mess = str(msg.payload)\n if mess == 'started':\n myLcdManager = lcd_manager.LcdManager(sortedlist, config)\n client.publish(\"server/register\", json.dumps(config['interface']))\n elif mess == 'ready':\n global hasregistered\n if not hasregistered:\n hasregistered = True\n client.publish(\"server/register\", json.dumps(config['interface']))\n elif mess == 'poweroff':\n os.system('poweroff')", "def set_message(self, node_uuid, index, data):\n try:\n self.lcd.clear()\n self.lcd.message(data)\n except Exception:\n logger.exception('Exception when displaying message')", "def handle_message() -> Response:\n commend = request.get_json()[\"message\"][\"text\"]\n chat_id = request.get_json()[\"message\"][\"chat\"][\"id\"]\n\n if commend == \"/start\":\n txt = \"Welcome to the shopping bot.\"+'\\n'+\"please enter category, or type popular to get the most popular searches \"\n elif str(commend).lower() in items:\n order[0] = str(commend)\n txt = \"choose color\"\n elif str(commend).lower() in colors:\n if order[0] == 0:\n txt = \"choose category\"\n order[1] = str(commend)\n txt = \"choose size\"\n elif str(commend).lower() in size:\n order[2] = str(commend)\n rec(chat_id, order)\n txt = get_url(order)\n elif str(commend).lower() == \"popular\":\n txt = get_popular(orders_dic)\n else:\n txt = \"try again\"\n # print(orders_dic)\n chat_id = request.get_json()[\"message\"][\"chat\"][\"id\"]\n print(chat_id)\n requests.get(f\"https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={chat_id}&text={txt}\")\n return Response(\"Success\")", "def run_chat(self, auto_send_receipts=False):\n for message in self.receive_messages():\n print(message)\n\n if message.payment:\n for func in self._payment_handlers:\n func(message.source, message.payment)\n continue\n\n if not message.text:\n continue\n\n for _, regex, func in self._chat_handlers:\n match = re.search(regex, message.text)\n if not match:\n continue\n\n try:\n reply = func(message, match)\n except Exception as e: # noqa - We don't care why this failed.\n print(e)\n continue\n\n if isinstance(reply, tuple):\n stop, reply = reply\n else:\n stop = True\n\n\n # In case a message came from a group chat\n group_id = message.group_info.get(\"groupId\")\n\n # mark read and get that sweet filled checkbox\n try:\n if auto_send_receipts and not group_id:\n self.send_receipt(recipient=message.source, timestamps=[message.timestamp])\n\n if group_id:\n self.send_group_message(recipient_group_id=group_id, text=reply)\n else:\n self.send_message(recipient=message.source, text=reply)\n except Exception as e:\n print(e)\n\n if stop:\n # We don't want to continue matching things.\n break", "def stand_by_msg(msg: str = \"\"):\n print(msg)\n input(\"Pressez une touche pour continuer...\")", "def text_message(self, update, context):\n # check mode\n if self.adding_meals:\n # text from the message is retrieved\n typed_meal = update.message.text\n # we get the instance from the meal list. It might be None\n meal = self.meal_list.get(typed_meal)\n try:\n # might produce an AttributeError if ingridients is None\n # every ingridient in the meal is checked\n for ingridient in meal.ingridients:\n # if it's already in self.list the quantity increases\n if ingridient.name in self.list.keys():\n self.list[ingridient.name][1] += 1\n else:\n # the instance is added to the list\n self.list[ingridient.name] = [ingridient, 1]\n # the list is transformed to text\n to_write = functions.list_to_text(sorted(self.list.values(),\n key=lambda x: x[0].category))\n except AttributeError:\n to_write = MESSAGES[\"meal_error\"]\n # message is send\n self.send_message(update, context, to_write)\n # check mode\n elif self.adding_ingridients:\n # text from the message is retrieved\n typed_ingridient = update.message.text\n # we get the instance from the ingridients list. It might be None\n ingridient = self.ingridients.get(typed_ingridient)\n try:\n # might produce an AttributeError if ingridients is None\n # if it's already in self.list the quantity increases\n if ingridient.name in self.list.keys():\n self.list[ingridient.name][1] += 1\n else:\n # the instance is added to the list\n self.list[ingridient.name] = [ingridient, 1]\n # the list is transformed to text\n to_write = functions.list_to_text(sorted(self.list.values(),\n key=lambda x: x[0].category))\n except AttributeError:\n to_write = MESSAGES[\"add_ingridient_error\"]\n # message is send\n self.send_message(update, context, to_write)\n # check mode\n elif self.removing_ingridients:\n # text from the message is retrieved\n typed_ingridient = update.message.text\n try:\n # might produce a KeyError if typed_meal is not in self.list\n # decreases amounot of the ingridient\n self.list[typed_ingridient][1] -= 1\n # remove igridient from list when the quantity is 0\n if self.list[typed_ingridient][1] == 0:\n del self.list[typed_ingridient]\n # the list is transformed to text\n to_write = functions.list_to_text(sorted(self.list.values(),\n key=lambda x: x[0].category))\n except KeyError:\n to_write = MESSAGES[\"remove_ingridient_error\"]\n # message is send\n self.keyboard = \"remove_ingridients\"\n self.send_message(update, context, to_write)", "def handle_groupchat_message(self, msg):\n self.xmpp.event('groupchat_message', msg)\n self.xmpp.event(\"muc::%s::message\" % msg['from'].bare, msg)", "def client_message_handler(self, message, client):\n LOG.debug(f\"Разбираем сообщение: {message}\")\n if (\n s.KEY_ACTION in message\n and message[s.KEY_ACTION] == s.ACTION_PRESENCE\n and s.KEY_TIME in message\n and s.KEY_USER in message\n ):\n if message[s.KEY_USER][s.KEY_ACCOUNT_NAME] not in self.names.keys():\n self.names[message[s.KEY_USER][s.KEY_ACCOUNT_NAME]] = client\n MSG.send(client, s.RESPONSE_200)\n else:\n response = s.RESPONSE_400\n response[s.KEY_ERROR] = \"Имя пользователя уже занято.\"\n MSG.send(client, response)\n self.clients.remove(client)\n client.close()\n return\n # Если это сообщение, то добавляем его в очередь сообщений.\n # Ответ не требуется.\n elif (\n s.KEY_ACTION in message\n and message[s.KEY_ACTION] == s.ACTION_MESSAGE\n and s.KEY_TIME in message\n and s.KEY_TO in message\n and s.KEY_FROM in message\n and s.KEY_MESSAGE in message\n ):\n self.messages.append(message)\n return\n # Если клиент выходит\n elif (\n s.KEY_ACTION in message\n and message[s.KEY_ACTION] == s.ACTION_EXIT\n and s.KEY_ACCOUNT_NAME in message\n ):\n self.clients.remove(self.names[message[s.KEY_ACCOUNT_NAME]])\n self.names[message[s.KEY_ACCOUNT_NAME]].close()\n del self.names[message[s.KEY_ACCOUNT_NAME]]\n return\n # Иначе отдаём Bad request\n else:\n response = s.RESPONSE_400\n response[s.KEY_ERROR] = \"Запрос не корректен\"\n MSG.send(client, response)\n return", "def message(self, msg):\n if msg['type'] in ('chat', 'normal'):\n msg.reply(\"Thanks for sending\\n%(body)s\" % msg).send()", "def send_message(self, message:str):\n self.chat.click()\n text_box = self.chat.find_element_by_xpath(\"//div[@class='_2_1wd copyable-text selectable-text' and @data-tab='6']\")\n text_box.click()\n text_box.send_keys(message)\n time.sleep(0.1)\n send_button = self.chat.find_element_by_xpath(\"//button[@class='_1E0Oz']\")\n send_button.click()", "def text(message):\n global list_messages\n room = session.get('room')\n msg = session.get('name') + ':' + message['msg']\n list_messages.append(msg)\n addNewMsg(message,session)\n print ('size of list_messages ' + str(len(list_messages)) + ', session ' + str(session))\n emit('message', {'msg': msg}, room=room)", "def Event(self, gui):\n while True:\n event, values = gui.read()\n if event == sg.WIN_CLOSED: # if user closes window or clicks cancel\n break\n if event == 'Entrar':\n if values['user']=='Celso' and values['senha']=='35316':\n print(\"Ola, %s\" % values['user'] ) \n pass", "def handle_gui_example_one_intent(self, message):\n self.gui.show_text(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec placerat varius turpis porta scelerisque. Nam feugiat, lectus a ultricies tempus, mi sem tempor felis, vitae laoreet nisi ipsum vitae mauris.\")", "def handle_text_messages(self, update, context):\n\n # Split user input into single words\n words = set(update.message.text.lower().split())\n logging.debug(f'Received message: {update.message.text}')\n\n # For debugging: Log users that received something from bot\n chat_user_client = update.message.from_user.username\n if chat_user_client == None:\n chat_user_client = update.message.chat_id\n\n\n # Possibility: received command from menu_trigger\n for Trigger in self.menu_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n self.show_menu(update, context)\n logging.info(f'{chat_user_client} checked out the menu!')\n\n return\n\n\n # Possibility: received command from loan_stats_trigger\n for Trigger in self.loan_stats_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n #self.send_textfile('under_construction.txt', update, context)\n self.show_loan_stats(update, context)\n self.send_signature(update, context)\n logging.info(f'{chat_user_client} got loan stats!')\n\n return\n\n # Possibility: received command from il_trigger\n for Trigger in self.il_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n self.send_textfile('under_construction.txt', update, context)\n #self.show_il(update, context)\n #self.send_signature(update, context)\n logging.info(f'{chat_user_client} tried to get IL info!')\n\n return\n\n # Possibility: received command from assets_trigger\n for Trigger in self.assets_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n self.send_textfile('under_construction.txt', update, context)\n #self.self.show_assets(update, context)\n #self.send_signature(update, context)\n logging.info(f'{chat_user_client} tried to get asset info!')\n\n return", "def list_messages(self):", "async def on_message(self, msg: Message):\r\n from_contact = msg.talker()\r\n text = msg.text()\r\n type = msg.type()\r\n room = msg.room()\r\n #\r\n username = from_contact.name\r\n if username=='KFu':\r\n print('message from myself')\r\n return\r\n # 不处理群消息\r\n # if room is None:\r\n if msg.type() == Message.Type.MESSAGE_TYPE_IMAGE:\r\n\r\n print('__image')\r\n image_file_box = await msg.to_file_box()\r\n filename='p'+str(time.time())+'.jpg'\r\n\r\n await image_file_box.to_file(file_path=filename,overwrite=True)\r\n inputdata=\"#pic#\"+filename\r\n bot = self.bm.run(username, inputdata)\r\n if bot is not None:\r\n # print('bot',bot)\r\n # print('bot replys',bot.replys[-1])\r\n # print('bot.replys_index',bot.replys_index)\r\n for i in range(bot.replys_index):\r\n bot, rdict = self.tm.run(bot)\r\n print('rdict',rdict)\r\n\r\n if len(list(rdict.keys()))==0:continue\r\n if list(rdict.keys())[0] == \"str\":\r\n print('reply str')\r\n conversation: Union[\r\n Room, Contact] = from_contact if room is None else room\r\n print('ready')\r\n await conversation.ready()\r\n print(list(rdict.values())[0])\r\n await conversation.say(list(rdict.values())[0])\r\n elif list(rdict.keys())[0] == \"pic\" or 'mov':\r\n print('reply pic/mov')\r\n\r\n conversation: Union[\r\n Room, Contact] = from_contact if room is None else room\r\n\r\n await conversation.ready()\r\n try:\r\n file_box = FileBox.from_file(list(rdict.values())[0])\r\n except Exception as e:\r\n print('file box error',e)\r\n file_box='嗯嗯'\r\n await conversation.say(file_box)\r\n\r\n elif msg.type() == Message.Type.MESSAGE_TYPE_TEXT:\r\n inputdata = \"#str#\" + msg.text()\r\n print('————text')\r\n\r\n bot = self.bm.run(username, inputdata)\r\n if bot is not None:\r\n # print('bot', bot)\r\n # print('bot replys',bot.replys[-1])\r\n # print('bot.replys_index',bot.replys_index)\r\n for i in range(bot.replys_index):\r\n bot, rdict = self.tm.run(bot)\r\n print('rdict',rdict)\r\n if len(list(rdict.keys()))==0:continue\r\n if list(rdict.keys())[0] == \"str\":\r\n print('reply str')\r\n conversation: Union[\r\n Room, Contact] = from_contact if room is None else room\r\n\r\n await conversation.ready()\r\n print('rdict[splitNum:]',list(rdict.values())[0])\r\n await conversation.say(list(rdict.values())[0])\r\n elif list(rdict.keys())[0] == \"pic\" or 'mov':\r\n print('reply pic/mov')\r\n conversation: Union[\r\n Room, Contact] = from_contact if room is None else room\r\n\r\n await conversation.ready()\r\n try:\r\n file_box = FileBox.from_file(list(rdict.values())[0])\r\n except Exception as e:\r\n print('file box error',e)\r\n file_box='嗯嗯'\r\n await conversation.say(file_box)\r\n else:\r\n print('__new for dict')\r\n conversation: Union[\r\n Room, Contact] = from_contact if room is None else room\r\n await conversation.ready()\r\n await conversation.say('暂时不支持这种类型的消息哦')", "def sendText(self, messageText):\n # send an event, which will set the text on the\n #print \"got a message\"\n messenger.send(\"setText\", [messageText])", "def __init__(self, name, email, status = \"offline\", message = \"\"):\n\n #Used to keep track of the borderwidth when highlighted\n self._border_width = 0\n \n self._layer = gui.Layer()\n self._border = gui.Layer()\n \n self._border.blending(False)\n self._border.set_background_color(\"black\")\n \n self._layer.set_border_type(\"flat\")\n \n self._email = email\n self._status = gui.TextBox(text=\"N\", \n pos=(0,0), \n width = globals.MEMBER_STATUS_WIDTH)\n \n self._nick = name\n self._nickname = gui.TextBox(pos=(0,1), \n width = globals.MEMBER_NAME_WIDTH, \n justify=\"left\")\n self.set_nickname(name)\n \n self._mess = message\n self._message = gui.TextBox(pos=(0,2), \n width = globals.MEMBER_MESSAGE_WIDTH, \n justify=\"left\")\n self.set_message(message)\n \n self._selected = False\n \n self.set_status(status)\n \n self._layer.add(self._status)\n self._layer.add(self._nickname)\n self._layer.add(self._message)\n \n self._border.add(self._layer)\n \n self._group = \"\"\n self._window = None\n \n self._conv = []\n \n #bindings\n self._status.bind(gui.globals.CLICKED, self.clicked)\n self._nickname.bind(gui.globals.CLICKED, self.clicked)\n self._message.bind(gui.globals.CLICKED, self.clicked)\n \n self._status.bind(gui.globals.DBL_CLICKED, self.start_conversation)\n self._nickname.bind(gui.globals.DBL_CLICKED, self.start_conversation)\n self._message.bind(gui.globals.DBL_CLICKED, self.start_conversation)", "def _send_message(self, e: Event):\n\n message = self.message_text.get(\"1.0\", 'end-1c').replace('\\n', \"\")\n\n if len(message) > 0:\n self.add_message_to_chat('you: ' + message)\n self._clear_message_text()\n self.connection_socket.send(bytes('them: ' + message, 'utf-8'))", "async def messages(self, ctx):\n\n\t\tawait self.message_leaderboard(ctx, \"messages\")", "async def chat_message(self, event):\n await self.send_json(\n return_value(\n ACTION_MESSAGE,\n event['label'],\n event['username'],\n MSG_MESSAGE,\n event['message']\n )\n )", "def __init__(self, controller):\n self.controller = controller\n\n self.top = tkinter.Tk()\n self.top.title(\"Chatter\")\n\n self.input = tkinter.StringVar() # For the messages to be sent.\n self.input.set(\"\")\n\n \"\"\"Message box\"\"\"\n messages_frame = tkinter.Frame(self.top)\n scrollbar = tkinter.Scrollbar(messages_frame)\n self.msg_list = tkinter.Listbox(messages_frame, height=15, width=60, yscrollcommand=scrollbar.set)\n scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)\n self.msg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)\n self.msg_list.pack()\n messages_frame.pack()\n\n\n\n\n \"\"\"Input box and send button\"\"\"\n entry_field = tkinter.Entry(self.top, textvariable=self.input)\n entry_field.bind(\"<Return>\", self.controller.msg_to_send)\n entry_field.pack()\n send_button = tkinter.Button(self.top, text=\"Send\", command=lambda: self.controller.msg_to_send(self.input))\n send_button.pack()\n \"\"\"Send file button\"\"\"\n file_button = tkinter.Button(self.top, text=\"Send file\", command=lambda: self.controller.find_file())\n file_button.pack()\n\n \"\"\"On closing the window\"\"\"\n self.top.protocol(\"WM_DELETE_WINDOW\", self.controller.close)", "def _process_messages(self):\r\n \r\n self._print(\"%s: Starting _process messages, looking out for special messages:\" \\\r\n % (self._clientnr))\r\n \r\n # Set some expected messages.\r\n expected = {}\r\n expected['clientconfirm'] = cb.CLIENTCONFIRM[:cb.CLIENTCONFIRM.find('_')]\r\n expected['waitwhat'] = cb.WAITWHATCLIENT[:cb.WAITWHATCLIENT.find('_')]\r\n \r\n for key in expected.keys():\r\n self._print(\"%s: Special message '%s': '%s'\" % \\\r\n (self._clientnr, key, expected[key]))\r\n \r\n # Run idefinitively\r\n while True:\r\n \r\n # Get new incoming commands.\r\n cmds = self.udp.getCommands()\r\n self._print(\"%s: Found %d new UDP commands.\" % \\\r\n (self._clientnr, len(cmds)))\r\n # Add new commands to the queue.\r\n for c in cmds:\r\n # Parse the message.\r\n target, message, clienttime = c.text.split('|')\r\n self._print(\"%s: Found message (%s to %s, t=%s) '%s'\" % \\\r\n (self._clientnr, c.ip, target, clienttime, message))\r\n # Only process messages from the server.\r\n if c.ip == self._servernr:\r\n # Check if this is a client confirmation message.\r\n if expected['clientconfirm'] in message:\r\n self._print(\"%s: Adding message '%s' (t=%s) to the incoming queue\" \\\r\n % (self._clientnr, message, clienttime))\r\n self._incominglock.acquire()\r\n self._incoming.append(message)\r\n self._incominglock.release()\r\n # Only process the messages that were directed at this client.\r\n elif target in ['None', str(self._clientnr)]:\r\n # Check if this is a confused message to find out what\r\n # the client is waiting for.\r\n if expected['waitwhat'] in message:\r\n self._print(\"%s: Received '%s' from server\" % \\\r\n (self._clientnr, message))\r\n # Parse the waitwhat message, which looks like this:\r\n # 'waitwhatclient_expected=%s'\r\n msg, xpctd = message.split('_')\r\n xpctd = xpctd[xpctd.find('=')+1:]\r\n # Re-send the last version of the expected message.\r\n if xpctd in self._lastmessage.keys():\r\n self._outgoing.append(self._lastmessage[xpctd])\r\n self._print(\"%s: Resending the last version of expected message '%s': '%s'\" % \\\r\n (self._clientnr, xpctd, self._lastmessage[xpctd]))\r\n else:\r\n self._print(\"%s: Do not have a last version of expected message '%s'\" % \\\r\n (self._clientnr, xpctd))\r\n else:\r\n # Add the message to the queue.\r\n self._print(\"%s: Adding message '%s' (t=%s) to the incoming queue\" \\\r\n % (self._clientnr, message, clienttime))\r\n self._incominglock.acquire()\r\n self._incoming.append(message)\r\n self._incominglock.release()\r\n # Chuck a message out if the queue is getting too long.\r\n if len(self._incoming) > self._maxincominglen:\r\n self._incominglock.acquire()\r\n delmsg = self._incoming.pop(0)\r\n self._incominglock.release()\r\n self._print(\"%s: Removed message '%s' from the incoming queue\" \\\r\n % (self._clientnr, delmsg))\r\n else:\r\n self._print(\"%s: Ignoring message '%s', as it wasn't for me (%s)\" \\\r\n % (self._clientnr, message, self._clientnr))\r\n else:\r\n self._print(\"%s: Ignoring message '%s', as it wasn't from the server (%s)\" \\\r\n % (self._clientnr, message, self._servernr))\r\n \r\n # Process outgoing commands.\r\n while len(self._outgoing) > 0:\r\n # Send a message to the server.\r\n self._outgoinglock.acquire()\r\n message = self._outgoing.pop(0)\r\n self._outgoinglock.release()\r\n self._print(\"%s: Sending '%s' to %s\" % \\\r\n (self._clientnr, message, self._servernr))\r\n msg = 'cmd,%s|%s' % (self._servernr, message)\r\n self.udp.sendWithTimeStamp(msg, '|')\r\n for i in range(self._message_reps):\r\n self.udp.sendWithTimeStamp(msg, '|')\r\n # Store the message in the 'last sent' dict.\r\n if '_' in message:\r\n m = message[:message.find('_')]\r\n else:\r\n m = message\r\n self._lastmessage[m] = message", "def sendMessage_0(self, messages):\n for message in messages:\n self.sendMessage(message)", "def handleMessage(msg):", "def on_Add_new_class_button_clicked(self):\n Add_new_class = Shangke_message()\n Add_new_class.exec_()\n result = Shangke_message.result\n # print(result)\n self.lineEdit.setText(result['Cname'])\n self.lineEdit_2.setText(result['Sclass'])\n self.lineEdit_3.setText(result['ClassTime'])\n self.lineEdit_4.setText(result['Tno'])\n self.lineEdit_6.setText(result['Date'])", "def absenden(self):\n\n message = self.textFeld.toPlainText()\n self.c.send(message)\n self.textFeld.clear()", "def joingroup_command(update,context):\n update.message.reply_text('Want to chat with other CTF players or ask questions to admins? Use the following channel:\\r\\nhttps://t.me/joinchat/CYsj-xwzlFqIbQPPeo04bw')", "def sendmessage(self):\n \n self.message.parentItem = self.rxtxcontroller.transmittable.rootItem\n self.message.can_id = self.idInput.toPlainText()\n self.message.dlc = self.lengthInput.value()\n self.message.cycle_time = self.cycleInput.toPlainText()\n self.message.time = int(round(time.time() * 1000))\n self.message.rxtx = \"TX\"\n self.message.count = 1\n self.message.data = self.dataInput.toPlainText()\n self.accept()", "def submit(self, msg):\n if len(msg) == 0:\n return\n self.prompt_win.clear()\n self.prompt_win.addstr(\"> \")\n self.refresh_prompt()\n if not self.client:\n self.add_msg(\"Error: Not Connected to Server\")\n self.refresh_prompt()\n return\n self.add_msg(\"You: \" + msg)\n self.client.send(msg)", "def text_message(update: Update, _: CallbackContext) -> None:\n update.message.reply_text(\n f\"Thank you for sending: {update.message.text},\\n\" +\n f\"but I am waiting only for images...\")", "def display_messages(self, layout):", "def process_chatter(self, msg):\n # note, nothing in here is ROS specific, it's just python code that\n # runs when new info appears\n\n print msg.data # print the recieved message\n\n self.msgs_recieved += 1 # increase msg count\n self.msgs_recieved %= 500 # mod 500 so we don't get enormous numbers\n self.msg = \"%d messages recieved\" % self.msgs_recieved # set message", "def juguemos(self):\r\n msje = {\"status\": \"queremos_jugar\",\r\n \"data\": {\"nada\": None}}\r\n sleep(0.1)\r\n self.server_signal_2.emit(msje)", "def show_msgdialog(self):\n log_msg = log.getBufferAsString()\n if not log_msg:\n return\n\n # initialise message dialog\n msg_dialog = msgdialog.MessageDialog(None, -1, \"\")\n msg_dialog.msg_list.InsertColumn(0, \"\")\n\n # clear dialog and show new messages\n msg_dialog.msg_list.Freeze()\n msg_dialog.msg_list.DeleteAllItems()\n for line in log_msg.split('\\n'):\n msg_dialog.msg_list.Append([line, ])\n msg_dialog.msg_list.SetColumnWidth(0, -1)\n msg_dialog.msg_list.Thaw()\n msg_dialog.ShowModal()\n msg_dialog.Destroy()", "def wemo_process(self, msg):\n if msg[\"content\"][\"command\"] == \"nickname\":\n # print msg\n self.nickname = msg[\"content\"][\"value\"]\n self.controller.sending(\n {\"subject\": \"control\" + \".\" + self.controller.type,\n \"content_type\": \"request\",\n \"content\": {\"request\": \"nickname\",\n \"target\": self.controller.type + \".\" + self.name,\n #\"token\": self.controller.target,\n \"value\": {\"name\": self.name, \"nickname\": msg[\"content\"][\"value\"]}}})\n elif msg[\"content\"][\"command\"] == \"status\":\n # Not gone the way of the dodo\n # try:\n self.controller.sending({\"subject\": self.controller.type,\n \"content_type\": \"event\",\n \"content\": {\"event\": \"status\",\n \"target\": self.controller.type +\n \".\" +\n self.name,\n \"icon status\":\n {\"bu-radar1\": {\"fill\":\"black\", \"opacity\":\"1\"},\n \"bu-radar2\": {\"fill\":cssColour(), \"opacity\":\"0\"},\n \"bu-not-present\": {\n \"opacity\": 0}},\n \"value\": {}}})\n # except: #Most probably is known but we lost pairing\n # pass\n\n\n return None", "async def send_commands(ans: Message):\n await ans.answer(all_commands)", "def draw_message_box(self):\r\n length = len(self.__lines)\r\n\r\n # Build the Dialogue Box\r\n background = pygame.transform.scale(BACKGROUND_PNG, (WINDOW_WIDTH, WINDOW_HEIGHT // 3))\r\n rect = background.get_rect()\r\n rect.center = (WINDOW_WIDTH // 2, 2 * WINDOW_HEIGHT // 3 + 100)\r\n SCREEN.blit(background, rect)\r\n\r\n for offset in range(length):\r\n draw_text_abs(self.__lines[offset], 24, WINDOW_WIDTH // 2, 2 * WINDOW_HEIGHT // 3 + (offset * 45 + 50))\r\n\r\n pygame.display.update()", "def show_messages(self):\n for msg in self.messages:\n print msg['text']", "def MultiMessage(self, *args, **kwargs):\n pass", "def show_data(self, msg):\n\n message = msg\n # self.ECGWin.append(message)\n self.getter.get(message)\n # self.ECGWin.append(msg2)\n # self.ECGWin.append(msg3)", "def main():\n root = tkinter.Tk()\n delegate = ReceiveMessages()\n mqtt_client = com.MqttClient(delegate)\n mqtt_client.connect_to_ev3()\n\n progress_bar = setup_gui(root, mqtt_client)\n delegate.progress_bar = progress_bar\n\n root.mainloop()", "async def on_ready(self):\n self.send_message = self.bot.get_cog('Text').send_message", "def test_im_chat_messages(self):\n pass", "def createDialogBox(self, x=-0.1, y=-0.85, texts=['Insert Dialog Here'], \n textColors=['orange'],displayNextMessage=False):\n if self.dialogBox == None:\n if globals.isTutorial:\n texts[0] = \" ====================== Cosmica Tutorial Step: %s of %s ======================\\n\\n%s\" % (globals.tutorialStep, globals.tutorialTotalSteps, texts[0])\n self.dialogBox = dialogbox.DialogBox(path=self.guiMediaPath, x=x, y=y, texts=texts, textColors=textColors)\n self.dialogBox.setMyMode(self)\n self.gui.append(self.dialogBox)", "def __init__(self, widget, texto):\n super(Nuevo, self).__init__(\n flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT\n )\n self.modelo = widget.modelo\n self.campos = Formulario(self.modelo)\n self.set_title('Nuevo ' + self.modelo._meta.verbose_name)\n self.set_defaults(widget.defaults)\n for e in self.campos:\n hbox = gtk.HBox(False, 0)\n self.vbox.pack_start(hbox)\n hbox.pack_start(e.label)\n hbox.pack_start(e.widget)\n self.campos[0].set_text(texto)\n but_guardar = gtk.Button('Guardar')\n self.action_area.pack_start(but_guardar)\n but_guardar.connect('clicked', self.guardar)\n self.show_all()", "async def send_interactive(\n self, messages: Iterable[str], box_lang: str = None, timeout: int = 15\n) -> List[discord.Message]:\n messages = tuple(messages)\n ret = []\n\n for idx, page in enumerate(messages, 1):\n if box_lang is None:\n msg = await self.send(page)\n else:\n msg = await self.send(box(page, lang=box_lang))\n ret.append(msg)\n n_remaining = len(messages) - idx\n if n_remaining > 0:\n if n_remaining == 1:\n plural = \"\"\n is_are = \"is\"\n else:\n plural = \"s\"\n is_are = \"are\"\n\n omega = SPECIAL_AUTHOR_CASES.get(self.author.id, OMEGA)\n query = await self.send(\n \"There {} still {} message{} remaining. \"\n f\"Type {random.choice(omega)} to continue.\"\n \"\".format(is_are, n_remaining, plural)\n )\n try:\n resp = await self.bot.wait_for(\n \"message\",\n check=MessagePredicate.lower_contained_in(FULL_MORE_LIST, self),\n timeout=timeout,\n )\n except asyncio.TimeoutError:\n with contextlib.suppress(discord.HTTPException):\n await query.delete()\n break\n else:\n try:\n await self.channel.delete_messages((query, resp))\n except (discord.HTTPException, AttributeError):\n # In case the bot can't delete other users' messages,\n # or is not a bot account\n # or channel is a DM\n with contextlib.suppress(discord.HTTPException):\n await query.delete()\n return ret", "def receive_message(self, message):", "def safe_message_dialog(self, markup, msgtype=gtk.MESSAGE_ERROR):\n gtk.gdk.threads_enter()\n mbox = gtk.MessageDialog(type=msgtype, buttons=gtk.BUTTONS_OK)\n mbox.set_markup(markup)\n mbox.run()\n mbox.destroy()\n gtk.gdk.threads_leave()", "def msg(self, text=None, from_obj=None, session=None, options=None, **kwargs):\n super(Bot, self).msg(text=text, from_obj=from_obj, session=session, options=options, **kwargs)", "def callback_botmessage(self, message):\n pass", "def callback_botmessage(self, message):\n pass", "def callback_botmessage(self, message):\n pass", "async def _mafia_chat(self, ctx: Context, *mafias: discord.Member):\n\n guild: discord.Guild = ctx.guild\n\n overwrites = {\n guild.default_role: discord.PermissionOverwrite(\n read_messages=False\n )\n }\n\n for user in mafias:\n overwrites[user] = discord.PermissionOverwrite(\n read_messages=True,\n send_messages=True\n )\n\n channel = await guild.create_text_channel(\n \"mafia-chat\", overwrites=overwrites\n )\n\n await ctx.send(_(\"Created {}!\").format(channel.mention))", "def message(self, text):\n\n if( rpi_device ):\n self.clear()\n for char in text:\n if char == '\\n' or char == '^':\n self.cmd(0xC0) # new line\n else:\n self.cmd(ord(char),True)", "def multiple_messages(self, messages):\n for message in messages:\n cmd = '{}serverMessage \"{}\"'.format(self.console, Commands.aquote(message))\n self.write_command(cmd)", "def send_to_gui(self, message):\n message.on_thread_side()\n self.queue.put(message)\n self.sig.set()\n logger.debug(\"Message %r has been send to GUI\", message.message_id)", "def SendMessage(self, kind, message):\r\n \r\n self.messageWindow.SendMessage(kind, message)", "def Message(self, *args, **kwargs):\n pass", "def _initilize_message(self):\n\n message_label = ttk.Label(\n self._frame, text=\"Are you sure you want to delete this recipe?\")\n\n message_label.grid(row=0, column=0, columnspan=2, padx=5, pady=5)", "def main():\n\n bus_controller = BusController()\n steve = TelegramController(\"990223452:AAHrln4bCzwGpkR2w-5pqesPHpuMjGKuJUI\")\n message_sender = MessagesSender()\n db = DBManager()\n gui = GUI()\n\n message_sender.connect(bus_controller=bus_controller)\n bus_controller.connect(telegram_bot=steve, message_sender=message_sender)\n steve.connect(bus_controller=bus_controller, gui=gui, message_sender=message_sender, data_base=db)\n gui.connect(bus_controller=bus_controller, telegram_controller=steve, message_sender=message_sender, data_base=db)\n\n message_sender.start()\n bus_controller.start()\n steve.start()\n gui.start()", "def new_message_from_conn(self, friend, msg):\n print(\"new_msg signal activated with friend\",friend,\"and msg\",msg)\n\n if not self.stack.get_child_by_name(friend):\n new_chat_window = chat_layout.ChatLayout(orientation=Gtk.Orientation.VERTICAL,friend=friend)\n new_chat_window.show_all()\n self.stack.add_titled(new_chat_window, friend, friend)\n\n child = self.move_to_child(friend)\n child.append_friend_text(msg)", "def show_message_dialog(text):\n\n dialog = QDialog()\n interface = messageGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n dialog.exec_()", "def on_message(\n self, client: mqtt.Client, userdata: typing.Any, msg: mqtt.MQTTMessage\n ) -> None:\n self.msgs.append(msg)", "def process_messages(self):\n pass" ]
[ "0.6545797", "0.645299", "0.625715", "0.6236866", "0.61671734", "0.61267936", "0.6124985", "0.61188024", "0.6083885", "0.6070145", "0.6039385", "0.59454966", "0.59306055", "0.59265524", "0.5921751", "0.58650035", "0.58130616", "0.5776519", "0.57635653", "0.57403535", "0.57364553", "0.57331175", "0.5727542", "0.57095647", "0.5707396", "0.5658448", "0.5637678", "0.56269133", "0.56265396", "0.56238693", "0.5621812", "0.5618424", "0.56152236", "0.5597813", "0.5570093", "0.55629826", "0.55609053", "0.55495733", "0.5544009", "0.55360556", "0.55315965", "0.55211705", "0.55014205", "0.5494995", "0.5493266", "0.54878116", "0.54822576", "0.54808575", "0.54716986", "0.54632217", "0.5452209", "0.54504424", "0.54460853", "0.54406995", "0.5429427", "0.5428891", "0.542516", "0.54200095", "0.541989", "0.5417246", "0.54165465", "0.54065144", "0.54062915", "0.5405177", "0.539243", "0.5382288", "0.53761077", "0.5369001", "0.5367789", "0.53653383", "0.53619677", "0.5361851", "0.5356673", "0.5352545", "0.5350373", "0.53499913", "0.5340799", "0.53406405", "0.5337496", "0.53328985", "0.5328562", "0.5321897", "0.5320602", "0.53109664", "0.5306381", "0.5302771", "0.5302771", "0.5302771", "0.529782", "0.5296352", "0.52884245", "0.52854455", "0.5276559", "0.52750534", "0.5271989", "0.52711606", "0.52697915", "0.5263037", "0.525801", "0.52561253" ]
0.63634753
2
Conectado a QCheckBox, cambia el estado del filtro y refresca mensajes
def filterToggleEvent(self, event): self.filtrar_mensajes = event if self.mostrando_grupo: self.cargar_mensajes_grupo(self.mostrando_grupo)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_filterCheckBox_clicked(self):\n self.__enableFindButton()", "def comb_box_click(self):\n\n if self.checkBox.isChecked():\n self.completer.setFilterMode(Qt.MatchStartsWith)\n else:\n self.completer.setFilterMode(Qt.MatchContains)", "def GetCheckButtonSelect(self, selectList, time, title=\"Filter rosbag\", fname=\"\",app=None):\n \n if app is None:\n app = QApplication(sys.argv)\n win = QWidget()\n layout=QGridLayout()\n layoutRow=0\n\n # Topic title\n label = QLabel(\"Filter by topic name:\")\n layout.addWidget(label,layoutRow,0)\n layoutRow=layoutRow+1\n\n # Topic uncheck/check all\n def setAllTopicState(checkboxs, state):\n for c in checkboxs:\n c.setChecked(state)\n btn=QPushButton(\"Select all\")\n btn.clicked.connect(lambda: setAllTopicState(checkboxs, True))\n layout.addWidget(btn,layoutRow, 0)\n btn=QPushButton(\"Unselect all\")\n btn.clicked.connect(lambda: setAllTopicState(checkboxs, False))\n layout.addWidget(btn,layoutRow, 1)\n layoutRow=layoutRow+1\n \n\n # Topic checkbox\n checked = SimplePyQtGUIKit.loadCacheTopicFilter(selectList)\n checkboxs=[]\n i=0\n for select in selectList:\n checkbox=QCheckBox(select)\n checkbox.setChecked(select in checked)\n layout.addWidget(checkbox,layoutRow,0, 1, 2)\n layoutRow=layoutRow+1\n checkboxs.append(checkbox)\n i+=1\n\n # Text time start\n title_start = QLabel(\"Start time:\")\n layout.addWidget(title_start, layoutRow, 0)\n\n textedit_start = QLineEdit(str(time[0]))\n layout.addWidget(textedit_start, layoutRow, 1)\n layoutRow+=1\n\n # Text time end\n title_end = QLabel(\"End time:\")\n layout.addWidget(title_end, layoutRow, 0)\n\n textedit_end = QLineEdit(str(time[1]))\n layout.addWidget(textedit_end, layoutRow, 1)\n layoutRow+=1\n \n # Button OK\n global _SimplePyQTGUIKit_validated\n _SimplePyQTGUIKit_validated = False\n def validate():\n global _SimplePyQTGUIKit_validated\n _SimplePyQTGUIKit_validated = True\n app.quit()\n btn=QPushButton(\"OK\")\n btn.clicked.connect(validate)\n layout.addWidget(btn,layoutRow,0, 1, 2)\n layoutRow=layoutRow+1\n\n win.setLayout(layout)\n win.setWindowTitle(title+\" \"+fname)\n win.show()\n app.exec_()\n\n if _SimplePyQTGUIKit_validated:\n result={}\n for (checkbox, select) in zip(checkboxs, selectList):\n result[select]=checkbox.isChecked()\n SimplePyQtGUIKit.saveCacheTopicFilter(result)\n tStart = eval(str(textedit_start.text()))\n tEnd = eval(str(textedit_end.text()))\n\n return (result, (tStart, tEnd))\n else:\n return tuple()", "def CheckBoxClicked(self,chkb):\r\n\r\n print(\"{} Selecionado.\", format(chkb.text()))", "def selection_changed(self):\n filter_name = self.cb_selectes_filter.currentText()\n\n # show filter description\n self.infob.setText(filters_dict[filter_name].__doc__)\n #print(self.selected_filter)\n\n self.compute(filter_name)\n # afficher\n self.update_display()\n\n self.commit()", "def handler_search_changed(self, widget):\n #we set the current language filter to the button's label\n self.filter_on = widget.get_text()\n text = widget.get_text()\n #we update the filter, which updates in turn the view\n self.emit('search-show',text)\n #self.log_filter.refilter()", "def __trigger_filter(self, act):\n filter_actions = act.parent().actions()\n # manage menu check state\n if act is self.act_filter_all:\n # copy check state of \"All\" action to other actions\n [a.setChecked(act.isChecked()) for a in filter_actions if a is not act]\n\n elif act.isChecked():\n # if all items are checked or unchecked make sure the \"All\" option is checked\n self.act_filter_all.setChecked(len(list(set([a.isChecked() for a in filter_actions if a is not self.act_filter_all]))) == 1)\n\n elif not act.isChecked():\n # if any other action is unchecked uncheck \"All\" action as well\n self.act_filter_all.setChecked(False)\n\n # trigger proxy filter with new check states\n filter_data = [a.data() for a in filter_actions if a.isChecked() and a.data() is not None]\n self.proxy_model.set_current_filter([a.data() for a in filter_actions if a.isChecked()])\n # save settings to ~/.nuke/uistate.ini\n self.settings.setValue(SettingsKeys.filter, filter_data)", "def _filter(self, __button):\r\n# WARNING: Refactor _filter; current McCabe Complexity metric = 54.\r\n _criteria = []\r\n _inputs = []\r\n _compound = []\r\n\r\n # Read the user inputs for the different fields that can be used to\r\n # filter with.\r\n _criteria.append(self.cmbCriteriaID.get_active_text())\r\n _inputs.append(self.txtFilterID.get_text())\r\n _compound.append(self.cmbCompound1.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCategory.get_active_text())\r\n _inputs.append(self.cmbFilterCategory.get_active())\r\n _compound.append(self.cmbCompound2.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaType.get_active_text())\r\n _inputs.append(self.cmbFilterType.get_active())\r\n _compound.append(self.cmbCompound3.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaStatus.get_active_text())\r\n _inputs.append(self.cmbFilterStatus.get_active())\r\n _compound.append(self.cmbCompound4.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCriticality.get_active_text())\r\n _inputs.append(self.cmbFilterCriticality.get_active())\r\n _compound.append(self.cmbCompound5.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaAge.get_active_text())\r\n _inputs.append(self.txtFilterAge.get_text())\r\n _compound.append(self.cmbCompound6.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaLifeCycle.get_active_text())\r\n _inputs.append(self.cmbFilterLifeCycle.get_active())\r\n _compound.append(self.cmbCompound7.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaShortDesc.get_active_text())\r\n _inputs.append(self.txtFilterShortDesc.get_text())\r\n _compound.append(self.cmbCompound8.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaLongDesc.get_active_text())\r\n _inputs.append(self.txtFilterLongDesc.get_text())\r\n _compound.append(self.cmbCompound9.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaRemarks.get_active_text())\r\n _inputs.append(self.txtFilterRemarks.get_text())\r\n _compound.append(self.cmbCompound10.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaAnalysis.get_active_text())\r\n _inputs.append(self.txtFilterAnalysis.get_text())\r\n _compound.append(self.cmbCompound11.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaTest.get_active_text())\r\n _inputs.append(self.txtFilterTest.get_text())\r\n _compound.append(self.cmbCompound12.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaTestCase.get_active_text())\r\n _inputs.append(self.txtFilterTestCase.get_text())\r\n _compound.append(self.cmbCompound13.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaRequestBy.get_active_text())\r\n _inputs.append(self.cmbFilterRequestBy.get_active_text())\r\n _compound.append(self.cmbCompound14.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaRequestDate.get_active_text())\r\n _inputs.append(self.txtFilterRequestDate.get_text())\r\n _compound.append(self.cmbCompound15.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaReviewBy.get_active_text())\r\n _inputs.append(self.cmbFilterReviewBy.get_active_text())\r\n _compound.append(self.cmbCompound16.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaReviewDate.get_active_text())\r\n _inputs.append(self.txtFilterReviewDate.get_text())\r\n _compound.append(self.cmbCompound17.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaApproveBy.get_active_text())\r\n _inputs.append(self.cmbFilterApproveBy.get_active_text())\r\n _compound.append(self.cmbCompound18.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaApproveDate.get_active_text())\r\n _inputs.append(self.txtFilterApproveDate.get_text())\r\n _compound.append(self.cmbCompound19.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCloseBy.get_active_text())\r\n _inputs.append(self.cmbFilterCloseBy.get_active_text())\r\n _compound.append(self.cmbCompound20.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCloseDate.get_active_text())\r\n _inputs.append(self.txtFilterCloseDate.get_text())\r\n _compound.append(self.cmbCompound21.get_active_text())\r\n\r\n _inputs.append(self.chkFilterAccepted.get_active())\r\n _compound.append(self.cmbCompound22.get_active_text())\r\n\r\n _inputs.append(self.chkFilterReviewed.get_active())\r\n\r\n _criteria.append(self.cmbCriteriaAssembly.get_active_text())\r\n _model = self.cmbAssembly.get_model()\r\n _row = self.cmbAssembly.get_active_iter()\r\n if _row is not None:\r\n _text = int(_model.get_value(_row, 1))\r\n else:\r\n _text = 0\r\n _inputs.append(_text)\r\n _compound.append(self.cmbCompound23.get_active_text())\r\n\r\n # Build the query from the user-provided inputs.\r\n if all(_c is None for _c in _criteria):\r\n query = None\r\n elif Configuration.RTK_MODULES[0] == 1:\r\n query = \"SELECT * FROM rtk_incident \\\r\n WHERE fld_revision_id={0:d} AND \".format(\r\n self._revision_id)\r\n else:\r\n query = \"SELECT * FROM rtk_incident \\\r\n WHERE fld_revision_id=0 AND \"\r\n\r\n if _criteria[0] is not None and _criteria[0] != '':\r\n query = query + \"fld_incident_id\" + _criteria[0] + _inputs[0]\r\n if _compound[0] is not None and _compound[0] != '':\r\n query = query + \" \" + _compound[0] + \" \"\r\n\r\n if _criteria[1] is not None and _criteria[1] != '':\r\n query = query + \"fld_incident_category\" + _criteria[1] + \\\r\n str(_inputs[1])\r\n if _compound[1] is not None and _compound[1] != '':\r\n query = query + \" \" + _compound[1] + \" \"\r\n\r\n if _criteria[2] is not None and _criteria[2] != '':\r\n query = query + \"fld_incident_type\" + _criteria[2] + \\\r\n str(_inputs[2])\r\n if _compound[2] is not None and _compound[2] != '':\r\n query = query + \" \" + _compound[2] + \" \"\r\n\r\n if _criteria[3] is not None and _criteria[3] != '':\r\n query = query + \"fld_status\" + _criteria[3] + str(_inputs[3])\r\n if _compound[3] is not None and _compound[3] != '':\r\n query = query + \" \" + _compound[3] + \" \"\r\n\r\n if _criteria[4] is not None and _criteria[4] != '':\r\n query = query + \"fld_criticality\" + _criteria[4] + str(_inputs[4])\r\n if _compound[4] is not None and _compound[4] != '':\r\n query = query + \" \" + _compound[4] + \" \"\r\n\r\n if _criteria[5] is not None and _criteria[5] != '':\r\n query = query + \"fld_incident_age\" + _criteria[5] + str(_inputs[5])\r\n if _compound[5] is not None and _compound[5] != '':\r\n query = query + \" \" + _compound[5] + \" \"\r\n\r\n if _criteria[6] is not None and _criteria[6] != '':\r\n query = query + \"fld_life_cycle\" + _criteria[6] + str(_inputs[6])\r\n if _compound[6] is not None and _compound[6] != '':\r\n query = query + \" \" + _compound[6] + \" \"\r\n\r\n if _criteria[21] is not None and _criteria[21] != '':\r\n query = query + \"fld_hardware_id\" + _criteria[21] + \\\r\n str(_inputs[23])\r\n if _compound[22] is not None and _compound[22] != '':\r\n query = query + \" \" + _compound[22] + \" \"\r\n\r\n if _criteria[7] is not None and _criteria[7] != '':\r\n query = query + \"fld_short_description \" + _criteria[7] + \\\r\n \" '%\" + _inputs[7] + \"%'\"\r\n if _compound[7] is not None and _compound[7] != '':\r\n query = query + \" \" + _compound[7] + \" \"\r\n\r\n if _criteria[8] is not None and _criteria[8] != '':\r\n query = query + \"fld_long_description \" + _criteria[8] + \\\r\n \" '%\" + _inputs[8] + \"%'\"\r\n if _compound[8] is not None and _compound[8] != '':\r\n query = query + \" \" + _compound[8] + \" \"\r\n\r\n if _criteria[9] is not None and _criteria[9] != '':\r\n query = query + \"fld_remarks \" + _criteria[9] + \\\r\n \" '%\" + _inputs[9] + \"%'\"\r\n if _compound[9] is not None and _compound[9] != '':\r\n query = query + \" \" + _compound[9] + \" \"\r\n\r\n if _criteria[10] is not None and _compound[10] != '':\r\n query = query + \"fld_analysis \" + _criteria[10] + \\\r\n \" '%\" + _inputs[10] + \"%'\"\r\n if _compound[10] is not None and _compound[10] != '':\r\n query = query + \" \" + _compound[10] + \" \"\r\n\r\n if _criteria[11] is not None and _compound[11] != '':\r\n query = query + \"fld_test_found \" + _criteria[11] + \\\r\n \" '%\" + _inputs[11] + \"%'\"\r\n if _compound[11] is not None and _compound[11] != '':\r\n query = query + \" \" + _compound[11] + \" \"\r\n\r\n if _criteria[12] is not None and _compound[12] != '':\r\n query = query + \"fld_test_case \" + _criteria[12] + \\\r\n \" '%\" + _inputs[12] + \"%'\"\r\n if _compound[12] is not None and _compound[12] != '':\r\n query = query + \" \" + _compound[12] + \" \"\r\n\r\n if _criteria[13] is not None and _compound[13] != '':\r\n query = query + \"fld_request_by\" + _criteria[13] + \\\r\n \"'\" + _inputs[13] + \"'\"\r\n if _compound[13] is not None and _compound[13] != '':\r\n query = query + \" \" + _compound[13] + \" \"\r\n\r\n if _criteria[14] is not None and _compound[14] != '':\r\n query = query + \"fld_request_date\" + _criteria[14] + \\\r\n str(datetime.strptime(_inputs[14], \"%Y-%m-%d\").toordinal())\r\n if _compound[14] is not None and _compound[14] != '':\r\n query = query + \" \" + _compound[14] + \" \"\r\n\r\n if _criteria[15] is not None and _compound[15] != '':\r\n query = query + \"fld_reviewed_by\" + _criteria[15] + \\\r\n \"'\" + _inputs[15] + \"'\"\r\n if _compound[15] is not None and _compound[15] != '':\r\n query = query + \" \" + _compound[15] + \" \"\r\n\r\n if _criteria[16] is not None and _compound[16] != '':\r\n query = query + \"fld_reviewed_date\" + _criteria[16] + \\\r\n str(datetime.strptime(_inputs[16], \"%Y-%m-%d\").toordinal())\r\n if _compound[16] is not None and _compound[16] != '':\r\n query = query + \" \" + _compound[16] + \" \"\r\n\r\n if _criteria[17] is not None and _compound[17] != '':\r\n query = query + \"fld_approved_by\" + _criteria[17] + \\\r\n \"'\" + _inputs[17] + \"'\"\r\n if _compound[17] is not None and _compound[17] != '':\r\n query = query + \" \" + _compound[17] + \" \"\r\n\r\n if _criteria[18] is not None and _compound[18] != '':\r\n query = query + \"fld_approved_date\" + _criteria[18] + \\\r\n str(datetime.strptime(_inputs[18], \"%Y-%m-%d\").toordinal())\r\n if _compound[18] is not None and _compound[18] != '':\r\n query = query + \" \" + _compound[18] + \" \"\r\n\r\n if _criteria[19] is not None and _compound[19] != '':\r\n query = query + \"fld_complete_by\" + _criteria[19] + \\\r\n \"'\" + _inputs[19] + \"'\"\r\n if _compound[19] is not None and _compound[19] != '':\r\n query = query + \" \" + _compound[19] + \" \"\r\n\r\n if _criteria[20] is not None and _compound[20] != '':\r\n query = query + \"fld_complete_date\" + _criteria[20] + \\\r\n str(datetime.strptime(_inputs[20], \"%Y-%m-%d\").toordinal())\r\n if _compound[20] is not None and _compound[20] != '':\r\n query = query + \" \" + _compound[20] + \" \"\r\n\r\n if _inputs[21]:\r\n query = query + \"fld_accepted=%d\" % 1\r\n if _compound[21] is not None and _compound[21] != '':\r\n query = query + \" \" + _compound[21] + \" \"\r\n\r\n if _inputs[22]:\r\n query = query + \"fld_reviewed=%d\" % 1\r\n\r\n self._modulebook.request_filter_incidents(self._revision_id, query)", "def action_checkbox(self):\n self.checkbox_online_var = not self.checkbox_online_var", "def on_comboBox_centrale_currentIndexChanged(self, index):\n\n nbr_ligne = self.tableWidget_select_sondes.rowCount()\n for ligne in reversed(range(nbr_ligne)):\n self.tableWidget_select_sondes.removeRow(ligne) \n \n nom_centrale = self.comboBox_centrale.currentText()\n \n id_centrale = [x[0] for x in self.centrales if x[1] == nom_centrale][0]\n \n sondes_centrale = [x for x in self.sondes_centrales if x[6] == id_centrale] \n \n# \n for sonde in reversed(sondes_centrale): \n self.tableWidget_select_sondes.insertRow(0) \n check = QtGui.QCheckBox(self.tableWidget_select_sondes)\n \n if sonde[0] in self.donnees_caracterisation[\"MOYENS_MESURE\"][\"ID_SONDES_CENTRALES\"]:\n check.setChecked(True)\n check.setEnabled(False)\n\n \n self.tableWidget_select_sondes.setCellWidget(0, 1, check)\n item = QtGui.QTableWidgetItem(str(sonde[1]))\n self.tableWidget_select_sondes.setItem(0, 0, item)", "def on_checkBox_duotoukai_stateChanged(self, p0):\n # TODO: not implemented yet\n raise NotImplementedError", "def accept(self):\n # self.selectedoptions = [\"ID\", \"Sub district\"]\n self.selectedoptions = [\"ID\", \"District\"]\n model = self.lstLayers.model()\n for i in range(model.rowCount()):\n item = model.item(i)\n if item.checkState() == Qt.Checked:\n self.selectedoptions.append(item.text())\n QDialog.accept(self)", "def filter(self, observable):", "def setChecked(self,selected,flag=True):\n if flag:\n qtflag = QtCore.Qt.Checked\n else:\n qtflag = QtCore.Qt.Unchecked\n \n for s in selected:\n for i in self.input.findItems(s,QtCore.Qt.MatchExactly):\n i.setCheckState(qtflag)", "def itemStateChanged(self, event):\n checkBox = event.getItemSelectable()\n is_selected = (event.getStateChange() == ItemEvent.SELECTED)\n result_field = self.resultFieldCheckBoxes[checkBox]\n self.selected_result_fields[result_field['id']] = is_selected", "def cbTriggered(self, value):\n global selectedCheckboxes\n selectedCheckboxes=value", "def on_checkBox_duotouping_stateChanged(self, p0):\n # TODO: not implemented yet\n raise NotImplementedError", "def on_checkBox_duotoukai_clicked(self, checked):\n # TODO: not implemented yet\n raise NotImplementedError", "def populate_filter(self, src, col, tbl):\r\n sub_cats = set([c for l in kit.SQL_pull(col, tbl) for c in l])\r\n select = {g: tk.BooleanVar() for g in sub_cats}\r\n for key in select:\r\n select[key].set(True)\r\n \r\n line = 1\r\n for g in sub_cats:\r\n line += 1\r\n tk.Checkbutton(src,\r\n text = g,\r\n variable = select[g],\r\n command = lambda x = tbl,\r\n y = False,\r\n z = select: self.checked(x, y, z),\r\n **jt.filter_style).grid(row = line, \r\n sticky = tk.W)\r\n \r\n return select", "def on_filter_instances(self):\n self._set_filter_value(\n 'filterInstances', self.filter_instances_btn.isChecked())", "def on_check_append_messages_toggled(self, checkBox):\n\t\tself.logView.set_append_messages(checkBox.get_active())\n\t\tself.emit('append-messages-changed')", "def on_checkBox_kongtoukai_stateChanged(self, p0):\n # TODO: not implemented yet\n raise NotImplementedError", "def _onCheckBox(self, widget):\n widget.setStateCheck(not widget.getStateCheck())", "def on_show_wrong_name(self):\n self._set_filter_value(\n 'showWrongNameState', self.wrong_name_btn.isChecked())", "def on_checkBox_duotouping_clicked(self, checked):\n # TODO: not implemented yet\n raise NotImplementedError", "def on_pushButtonCheck_clicked(self, checked):\n\n # get the file name to open\n #cslDir = self.settings.value('CslDir', '') # default = ''\n #options = QFileDialog.Options()\n #fileName, _ = QFileDialog.getOpenFileName(self,\n # \"Open csl file\",\n # cslDir,\n # \"csl Files (*.csl);;All Files (*)\",\n # options = options)\n\n fileName = 'C:/Users/steph/OneDrive/Documents/QtPython/ArchiveEditor/ArchiveEditor/G4AUClarge.csl'\n\n print(fileName)\n\n if fileName:\n\n qApp.setOverrideCursor(Qt.WaitCursor)\n qApp.processEvents(QEventLoop.AllEvents)\n\n self.display() # blank line\n self.display('Checking:', fileName, colour='darkgreen')\n self.display()\n\n head, tail = os.path.split(fileName)\n self.settings.setValue('CslDir', head)\n self.setWindowTitle(TITLE + ' - ' + tail)\n\n self.pushButtonCheck.setEnabled(False)\n\n self.checked = self.checkBoxSimilarLocators.isChecked()\n\n #qApp.setOverrideCursor(Qt.WaitCursor)\n\n\n self.createReport(fileName, self.checked)\n\n #qApp.restoreOverrideCursor()", "def on_selection_button_clicked(self, widget):\n #we set the current language filter to the button's label\n self.current_filter_language = widget.get_label()\n print(\"%s language selected!\" % self.current_filter_language)\n #we update the filter, which updates in turn the view\n self.language_filter.refilter()", "def on_source_checked(self, source_item):\n # don't bother for non-checked events\n if not source_item.has_checked_changed():\n return\n # leave checked for as long as search is running\n if self.is_searching():\n source_item.setCheckState(Qt.Checked)\n return\n # keep to the same if exporting\n if self.is_exporting():\n if source_item.is_checked:\n source_item.setCheckState(Qt.Checked)\n else:\n source_item.setCheckState(Qt.Unchecked)\n return\n is_checked = source_item.current_state()\n for key, geometry in source_item.geometries.iteritems():\n if is_checked:\n geometry.enable_source(source_item.title)\n else:\n geometry.disable_source(source_item.title)\n for key, type_entry in source_item.type_entries.iteritems():\n if is_checked:\n type_entry.enable_source(source_item.title, self.geometries)\n else:\n type_entry.disable_source(source_item.title, self.geometries)\n source_item.update_checked()", "def eventFilter(self, qobject, event):\n return False", "def ok(self):\n # Save parameters\n self.runschematisations = ['geen']\n schematisationModel = self.listSchematisations.model()\n for index in range(schematisationModel.rowCount()):\n item = schematisationModel.item(index)\n if item.isCheckable() and item.checkState() == QtCore.Qt.Checked:\n self.runschematisations.append(item.text())\n\n self.runwaterlevels = []\n waterlevelsModel = self.listWaterlevels.model()\n for index in range(waterlevelsModel.rowCount()):\n item = waterlevelsModel.item(index)\n if item.isCheckable() and item.checkState() == QtCore.Qt.Checked:\n self.runwaterlevels.append(item.text())\n\n self.succeeded = True\n self.accept()", "def _add_checkbox(self, text, state_changed, tooltip, checked=True,\n enabled=True, button_label=True):\n cbox = QtWidgets.QCheckBox('' if button_label else text, self)\n self.control.layout().addWidget(cbox)\n btn = None\n if button_label:\n btn = QtWidgets.QPushButton(text, self)\n self.control.layout().addWidget(btn)\n\n def cb(checked, cbox=cbox, state_changed=state_changed):\n state_changed(cbox.isChecked(), one_shot=True)\n\n btn.clicked.connect(cb)\n btn.setToolTip(tooltip)\n cbox.setChecked(checked)\n cbox.setEnabled(enabled)\n cbox.stateChanged.connect(state_changed)\n cbox.setToolTip(tooltip)\n self.control.layout().addItem(QtWidgets.QSpacerItem(20, 0))\n return cbox", "def action_checkbox(self, obj):\n if self.check_concurrent_action:\n return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME,\n force_str(\"%s,%s\" % (obj.pk, get_revision_of_object(obj))))\n else: # pragma: no cover\n return super().action_checkbox(obj)", "def prepare_filter(self, ):\n if not self._parent.connected():\n return\n papers = self._parent.model.list_papers([\"name\"])\n upps = map(lambda a: (a[\"id\"], a[\"name\"]), papers)\n accounts = self._parent.model.list_accounts([\"name\"])\n uaccs = map(lambda a: (a[\"id\"], a[\"name\"]), accounts)\n self.dialog.update_widget(count_range = self._parent.model.get_deals_count_range(),\n price_range = self._parent.model.get_deals_price_range(),\n comm_range = self._parent.model.get_deals_commission_range(),\n volume_range = self._parent.model.get_deals_volume_range(),\n stock_list = upps,\n accounts_list = uaccs)", "def on_checkBox_kongtoukai_clicked(self, checked):\n # TODO: not implemented yet\n raise NotImplementedError", "def setup_fq_checkboxes(self):\n checked_fqs = self.get_settings_value(\"checkedfqs\", [])\n if len(checked_fqs) > 0: # else there is not saved state... take gui defaults\n for checkbox in self.fq_checkboxes.keys():\n ls_type = self.fq_checkboxes[checkbox]\n checkbox.setChecked(ls_type.name in checked_fqs)", "def __doSearch(self):\n if (\n self.__replaceMode and\n not e5App().getObject(\"ViewManager\").checkAllDirty()\n ):\n return\n \n self.__cancelSearch = False\n \n if self.filterCheckBox.isChecked():\n fileFilter = self.filterEdit.text()\n fileFilterList = [\n \"^{0}$\".format(filter.replace(\".\", r\"\\.\").replace(\"*\", \".*\"))\n for filter in fileFilter.split(\";\")\n ]\n filterRe = re.compile(\"|\".join(fileFilterList))\n \n if self.projectButton.isChecked():\n if self.filterCheckBox.isChecked():\n files = [self.project.getRelativePath(file)\n for file in\n self.__getFileList(\n self.project.getProjectPath(), filterRe)]\n else:\n files = []\n if self.sourcesCheckBox.isChecked():\n files += self.project.pdata[\"SOURCES\"]\n if self.formsCheckBox.isChecked():\n files += self.project.pdata[\"FORMS\"]\n if self.interfacesCheckBox.isChecked():\n files += self.project.pdata[\"INTERFACES\"]\n if self.protocolsCheckBox.isChecked():\n files += self.project.pdata[\"PROTOCOLS\"]\n if self.resourcesCheckBox.isChecked():\n files += self.project.pdata[\"RESOURCES\"]\n elif self.dirButton.isChecked():\n if not self.filterCheckBox.isChecked():\n filters = []\n if self.sourcesCheckBox.isChecked():\n filters.extend(\n [\"^{0}$\".format(\n assoc.replace(\".\", r\"\\.\").replace(\"*\", \".*\"))\n for assoc in list(\n Preferences.getEditorLexerAssocs().keys())\n if assoc not in self.formsExt + self.interfacesExt +\n self.protocolsExt])\n if self.formsCheckBox.isChecked():\n filters.append(self.filterForms)\n if self.interfacesCheckBox.isChecked():\n filters.append(self.filterInterfaces)\n if self.protocolsCheckBox.isChecked():\n filters.append(self.filterProtocols)\n if self.resourcesCheckBox.isChecked():\n filters.append(self.filterResources)\n filterString = \"|\".join(filters)\n filterRe = re.compile(filterString)\n files = self.__getFileList(\n os.path.abspath(self.dirPicker.currentText()),\n filterRe)\n elif self.openFilesButton.isChecked():\n vm = e5App().getObject(\"ViewManager\")\n vm.checkAllDirty()\n files = vm.getOpenFilenames()\n \n self.findList.clear()\n QApplication.processEvents()\n QApplication.processEvents()\n self.findProgress.setMaximum(len(files))\n \n # retrieve the values\n reg = self.regexpCheckBox.isChecked()\n wo = self.wordCheckBox.isChecked()\n cs = self.caseCheckBox.isChecked()\n ct = self.findtextCombo.currentText()\n if reg:\n txt = ct\n else:\n txt = re.escape(ct)\n if wo:\n txt = \"\\\\b{0}\\\\b\".format(txt)\n flags = re.UNICODE\n if not cs:\n flags |= re.IGNORECASE\n try:\n search = re.compile(txt, flags)\n except re.error as why:\n E5MessageBox.critical(\n self,\n self.tr(\"Invalid search expression\"),\n self.tr(\"\"\"<p>The search expression is not valid.</p>\"\"\"\n \"\"\"<p>Error: {0}</p>\"\"\").format(str(why)))\n self.stopButton.setEnabled(False)\n self.findButton.setEnabled(True)\n self.findButton.setDefault(True)\n return\n # reset the findtextCombo\n if ct in self.searchHistory:\n self.searchHistory.remove(ct)\n self.searchHistory.insert(0, ct)\n self.findtextCombo.clear()\n self.findtextCombo.addItems(self.searchHistory)\n Preferences.Prefs.settings.setValue(\n \"FindFileDialog/SearchHistory\",\n self.searchHistory[:30])\n \n if self.__replaceMode:\n replTxt = self.replacetextCombo.currentText()\n if replTxt in self.replaceHistory:\n self.replaceHistory.remove(replTxt)\n self.replaceHistory.insert(0, replTxt)\n self.replacetextCombo.clear()\n self.replacetextCombo.addItems(self.replaceHistory)\n Preferences.Prefs.settings.setValue(\n \"FindFileDialog/ReplaceHistory\",\n self.replaceHistory[:30])\n \n if self.dirButton.isChecked():\n searchDir = self.dirPicker.currentText()\n if searchDir in self.dirHistory:\n self.dirHistory.remove(searchDir)\n self.dirHistory.insert(0, searchDir)\n self.dirPicker.clear()\n self.dirPicker.addItems(self.dirHistory)\n Preferences.Prefs.settings.setValue(\n \"FindFileDialog/DirectoryHistory\",\n self.dirHistory[:30])\n \n # set the button states\n self.stopButton.setEnabled(True)\n self.stopButton.setDefault(True)\n self.findButton.setEnabled(False)\n \n # now go through all the files\n self.__populating = True\n self.findList.setUpdatesEnabled(False)\n progress = 0\n breakSearch = False\n occurrences = 0\n fileOccurrences = 0\n for file in files:\n self.__lastFileItem = None\n found = False\n if self.__cancelSearch or breakSearch:\n break\n \n self.findProgressLabel.setPath(file)\n \n if self.projectButton.isChecked():\n fn = os.path.join(self.project.ppath, file)\n else:\n fn = file\n # read the file and split it into textlines\n try:\n text, encoding, hashStr = Utilities.readEncodedFileWithHash(fn)\n lines = text.splitlines(True)\n except (UnicodeError, IOError):\n progress += 1\n self.findProgress.setValue(progress)\n continue\n \n # now perform the search and display the lines found\n count = 0\n for line in lines:\n if self.__cancelSearch:\n break\n \n count += 1\n contains = search.search(line)\n if contains:\n occurrences += 1\n found = True\n start = contains.start()\n end = contains.end()\n if self.__replaceMode:\n rline = search.sub(replTxt, line)\n else:\n rline = \"\"\n line = self.__stripEol(line)\n if len(line) > 1024:\n line = \"{0} ...\".format(line[:1024])\n if self.__replaceMode:\n if len(rline) > 1024:\n rline = \"{0} ...\".format(line[:1024])\n line = \"- {0}\\n+ {1}\".format(\n line, self.__stripEol(rline))\n self.__createItem(file, count, line, start, end,\n rline, hashStr)\n \n if self.feelLikeCheckBox.isChecked():\n fn = os.path.join(self.project.ppath, file)\n self.sourceFile.emit(fn, count, \"\", start, end)\n QApplication.processEvents()\n breakSearch = True\n break\n \n QApplication.processEvents()\n \n if found:\n fileOccurrences += 1\n progress += 1\n self.findProgress.setValue(progress)\n \n if not files:\n self.findProgress.setMaximum(1)\n self.findProgress.setValue(1)\n \n resultFormat = self.tr(\"{0} / {1}\", \"occurrences / files\")\n self.findProgressLabel.setPath(resultFormat.format(\n self.tr(\"%n occurrence(s)\", \"\", occurrences),\n self.tr(\"%n file(s)\", \"\", fileOccurrences)))\n \n self.findList.setUpdatesEnabled(True)\n self.findList.sortItems(self.findList.sortColumn(),\n self.findList.header().sortIndicatorOrder())\n self.findList.resizeColumnToContents(1)\n if self.__replaceMode:\n self.findList.header().resizeSection(0, self.__section0Size + 30)\n self.findList.header().setStretchLastSection(True)\n self.__populating = False\n \n self.stopButton.setEnabled(False)\n self.findButton.setEnabled(True)\n self.findButton.setDefault(True)\n \n if breakSearch:\n self.close()", "def on_action_triggered(self):\n #self.model.clear()\n #self.model.setFilter(u\"代码 = '000002'\")\n #self.model.select()\n # TODO: not implemented yet\n #self.tableView_2.setFreezeNum(2)\n QMessageBox.warning(self,'warning', u\"权限不够\")\n #print str(self.model.data(self.model.index(1, 0)).toString().toUtf8())\n #raise NotImplementedError", "def selecionar_registros_is_checked(self):\n state = self.Registers_Checkbox.isChecked()\n\n self.Registers_Field.setEnabled(state)", "def filter(self):\n _filter = self.ask_filter.text()\n if _filter:\n self.parent().artists = dmla.list_artists(_filter)\n else:\n self.parent().artists = self.parent().all_artists\n self.parent().artist_filter = _filter\n self.parent().do_select()", "def notify_wizard(self):\n self.emit_datachanged()\n #self.emit(SIG(\"condition_update\"), self._conds or None)", "def DoCheck(self,event):\r\n index = event.GetSelection()\r\n item = self.items[index]\r\n if self.list.IsChecked(index):\r\n self.data.check(item)\r\n else:\r\n self.data.uncheck(item)\r\n #self.list.SetSelection(index)\r", "def check_box(self, grid: object, name: str, xposition: int, yposition: int,\n synchronize: bool = False, xspan: int = 1, yspan: int = 1) -> QtWidgets.QCheckBox:\n label = QtWidgets.QLabel()\n label.setText(TR().tr(name) + ':')\n grid.addWidget(label, yposition, xposition, 1, 1)\n\n input = QtWidgets.QCheckBox()\n input.setObjectName(name)\n if synchronize:\n self.synchronize(input)\n grid.addWidget(input, yposition, xposition + 1, yspan, xspan)\n input.stateChanged.connect(self.data_changed)\n\n return input", "def on_pushButton_clicked(self):\n # TODO: not implemented yet\n startD = self.dateEdit.date().toPyDate()\n endD = self.dateEdit_2.date().toPyDate()\n codes = str(self.lineEdit.text().toUtf8())\n #name = str(self.lineEdit_2.text().toUtf8())\n \n #self.model.setTable('baseinfo')\n filter = u\"日期 >= '%s' and 日期 <= '%s'\" %(startD, endD)\n if codes:\n ids = codes.split('|')\n if len(ids) > 1:\n filter = filter + u\" and (代码 = '%s'\" %ids[0]\n for id in ids[1:]: \n id = id.strip()\n filter = filter + u\" or 代码 = '%s'\" %id\n filter = filter + u')'\n else: \n filter = filter + u\" and 代码 = '%s'\" %ids[0]\n #filter += ' limit 2000'\n \n self.filter = filter\n# print filter\n self.model.queryBasic(codes, startD, endD)", "def on_searchin_changed(self):\r\n\r\n self.check_searchin()", "def on_type_check(self, type_item):\n # don't bother for non-checked events\n if not type_item.has_checked_changed():\n return\n # leave checked while search is running\n if self.is_searching():\n type_item.setCheckState(Qt.Checked)\n return\n # keep to same if exporting\n if self.is_exporting():\n if type_item.is_checked:\n type_item.setCheckState(Qt.Checked)\n else:\n type_item.setCheckState(Qt.Unchecked)\n return\n type_item.update_checked()", "def metodo_poderes(self):\r\n if self.usain_nebolt.isChecked():\r\n self.poderes[0] = True\r\n elif not self.usain_nebolt.isChecked():\r\n self.poderes[0] = False\r\n\r\n if self.limpiessa.isChecked():\r\n self.poderes[1] = True\r\n elif not self.limpiessa.isChecked():\r\n self.poderes[1] = False\r\n\r\n if self.sin_rastro.isChecked():\r\n self.poderes[2] = True\r\n elif not self.sin_rastro.isChecked():\r\n self.poderes[2] = False\r\n\r\n if self.cervessa.isChecked():\r\n self.poderes[3] = True\r\n elif not self.cervessa.isChecked():\r\n self.poderes[3] = False\r\n\r\n if self.del_trio.isChecked():\r\n self.poderes[4] = True\r\n elif not self.del_trio.isChecked():\r\n self.poderes[4] = False\r\n\r\n if self.nebcoins.isChecked():\r\n self.poderes[5] = True\r\n elif not self.nebcoins.isChecked():\r\n self.poderes[5] = False\r\n sleep(0.1)\r\n poderes = {\"status\": \"nuevo_poder\",\r\n \"data\": self.poderes}\r\n self.server_signal_2.emit(poderes)", "def toggled_idiom(self):\n QtGui.qApp.removeTranslator(self.translator)\n #print(\"Cambiando idioma a\")\n if self.actionCatala.isChecked() and self.actionCatala.isEnabled():\n #print(\"Catala\")\n cargado = self.translator.load('dcsmonitor_ca.qm')\n \n self.actionCatala.setEnabled(0)\n self.actionEspanol.setChecked(0)\n self.actionEspanol.setEnabled(1)\n self.actionEnglish.setChecked(0)\n self.actionEnglish.setEnabled(1)\n self.actionFrench.setChecked(0)\n self.actionFrench.setEnabled(1)\n \n elif self.actionEspanol.isChecked() and self.actionEspanol.isEnabled():\n #print(\"Espanol\")\n cargado = self.translator.load('dcsmonitor_es.qm')\n \n self.actionEspanol.setEnabled(0)\n self.actionCatala.setChecked(0)\n self.actionCatala.setEnabled(1)\n self.actionEnglish.setChecked(0)\n self.actionEnglish.setEnabled(1)\n self.actionFrench.setChecked(0)\n self.actionFrench.setEnabled(1)\n \n elif self.actionEnglish.isChecked() and self.actionEnglish.isEnabled():\n #print(\"English\")\n cargado = self.translator.load('dcsmonitor_en.qm')\n \n self.actionEnglish.setEnabled(0)\n self.actionCatala.setChecked(0)\n self.actionCatala.setEnabled(1)\n self.actionEspanol.setChecked(0)\n self.actionEspanol.setEnabled(1)\n self.actionFrench.setChecked(0)\n self.actionFrench.setEnabled(1)\n \n elif self.actionFrench.isChecked() and self.actionFrench.isEnabled():\n #print(\"French\")\n cargado = self.translator.load('dcsmonitor_fr.qm')\n \n self.actionFrench.setEnabled(0)\n self.actionCatala.setChecked(0)\n self.actionCatala.setEnabled(1)\n self.actionEspanol.setChecked(0)\n self.actionEspanol.setEnabled(1)\n self.actionEnglish.setChecked(0)\n self.actionEnglish.setEnabled(1)\n else:\n #print(\"error\")\n cargado = 0\n \n if cargado == 1:\n QtGui.qApp.installTranslator(self.translator)\n self.retranslateUi(self)\n #print(\"Idioma cambiado\")\n \n \n \n #self.setupUi(self)", "def on_filterEdit_textEdited(self, text):\n self.__enableFindButton()", "def __updateOK(self):\n enabled = True\n if self.idButton.isChecked():\n enabled = self.idEdit.text() != \"\"\n elif self.tagButton.isChecked():\n enabled = self.tagCombo.currentText() != \"\"\n elif self.branchButton.isChecked():\n enabled = self.branchCombo.currentText() != \"\"\n elif self.remoteBranchButton.isChecked():\n enabled = self.remoteBranchCombo.currentText() != \"\"\n \n enabled &= (self.commitGroupBox.isChecked() and\n self.commitMessageEdit.toPlainText() != \"\")\n \n self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(enabled)", "def uiCheckboxChecked(checkbox):\n\n return clibui.uiCheckboxChecked(checkbox)", "def _paramFixCheck(self, default_value: bool = False) -> QtWidgets.QCheckBox:\n widget = QtWidgets.QCheckBox('')\n widget.setChecked(default_value)\n widget.setToolTip(\"when fixed, the parameter will be fixed to the \"\n \"initial guess value during fitting\")\n return widget", "def eventFilter(self, QObject, QEvent): # real signature unknown; restored from __doc__\r\n return False", "def flag(i, com, xi, yi):\r\n chk = Checkbutton(window1, variable=list_cb[i], command=com)\r\n chk.focus()\r\n chk.place(x=xi, y=yi)\r\n return", "def bool_checkbox(init: bool = False, descr: str = '', data_type: type[Data] = Data):\n\n class StdInpWidget_BoolCheckBox(StdInputWidgetBase, QCheckBox):\n def __init__(self, params):\n StdInputWidgetBase.__init__(self, params)\n QCheckBox.__init__(self)\n\n # tooltip\n self.setToolTip(self.__doc__)\n\n self.stateChanged.connect(self.state_changed)\n\n # initial value\n with self._prevent_update:\n self.setChecked(init)\n\n @property\n def val(self) -> data_type:\n return data_type(self.isChecked())\n\n def load_from(self, val: Data):\n with self._prevent_update:\n self.setChecked(val.payload)\n\n def state_changed(self, _):\n self.on_widget_val_changed(self.val)\n\n def val_update_event(self, val: Data):\n if isinstance(val.payload, bool):\n with self._prevent_update:\n self.setChecked(val.payload)\n\n StdInpWidget_BoolCheckBox.__doc__ = descr\n\n return StdInpWidget_BoolCheckBox", "def storeCheckBoxValue(self):\n\n\t\tcategory, attr = self.getWidgetMeta(self.sender())\n\t\tvalue = self.getCheckBoxValue(self.sender())\n\t\tself.storeValue(category, attr, value)", "def __init__(self, parent=None):\n super().__init__(parent, modal=True)\n self.setWindowTitle(\"Half-Elf Stat Selection\")\n self.resize(400, 100)\n layout = qtw.QGridLayout()\n self.setLayout(layout)\n label = qtw.QLabel('Select 2 abilities to improve by +1:')\n label.setAlignment(qtc.Qt.AlignCenter)\n layout.addWidget(label, 0, 0, 1, 3)\n self.num_checked = 0\n self.str_checkbox = qtw.QCheckBox('Strength')\n self.dex_checkbox = qtw.QCheckBox('Dexterity')\n self.con_checkbox = qtw.QCheckBox('Constitution')\n self.int_checkbox = qtw.QCheckBox('Intelligence')\n self.wis_checkbox = qtw.QCheckBox('Wisdom')\n self.checkboxes = [\n self.str_checkbox, self.dex_checkbox, self.con_checkbox,\n self.int_checkbox, self.wis_checkbox\n ]\n for checkbox in self.checkboxes:\n checkbox.toggled.connect(self.adjust_nums_checked)\n\n layout.addWidget(self.str_checkbox, 1, 0)\n layout.addWidget(self.dex_checkbox, 1, 1)\n layout.addWidget(self.con_checkbox, 1, 2)\n layout.addWidget(self.int_checkbox, 2, 0)\n layout.addWidget(self.wis_checkbox, 2, 1)\n reset_checkboxes = qtw.QPushButton('Clear', self)\n self.submit_btn = qtw.QPushButton('Ok', self, clicked=self.accept)\n self.submit_btn.setDisabled(True)\n layout.addWidget(reset_checkboxes, 3, 1)\n layout.addWidget(self.submit_btn, 3, 2)\n reset_checkboxes.clicked.connect(self.remove_checks)", "def check_state(self, *args, **kwargs):\n\n sender = self.sender()\n validator = sender.validator()\n\n # get validator state\n if type(sender) == QLineEdit:\n state = validator.validate(sender.text(), 0)[0]\n elif type(sender) == QComboBox:\n state = validator.validate(sender.currentText(), 0)[0]\n\n # associate state with color\n if state == QtGui.QValidator.Acceptable:\n color = oPB.OPB_GREEN\n elif state == QtGui.QValidator.Intermediate:\n color = oPB.OPB_YELLOW\n else:\n color = oPB.OPB_RED\n if type(validator) == ScriptFileValidator:\n self._parent.msgbox(sender.text() + \"@@\" +\n translate(\"MainWindow\", \"The script has to be inside the CLIENT_DATA folder of the package!\"),\n oPB.MsgEnum.MS_ERR)\n\n # set background color accoring to state\n if type(sender) == QLineEdit:\n sender.setStyleSheet('QLineEdit { background-color: %s }' % color)\n elif type(sender) == QComboBox:\n sender.setStyleSheet('QComboBox { background-color: %s }' % color)", "def setValue(self,val):\n if val:\n self.input.setCheckState(QtCore.Qt.Checked)\n else:\n self.input.setCheckState(QtCore.Qt.Unchecked)", "def update_checked(self):\n self._is_checked = self.checkState() == Qt.Checked", "def update_checked(self):\n self._is_checked = self.checkState() == Qt.Checked", "def update_checked(self):\n self._is_checked = self.checkState() == Qt.Checked", "def stateChanged(self, obj, box):\n logger.debug(\"checkbox state changed\")\n if(box.isChecked()==False):\n logger.debug(\"deselect: %s\" % obj)\n cmds.select(obj, d=True) #deselect object\n else:\n logger.debug(\"%s is checked\" % obj)", "def filter_subjects(self):\n\n paramPanelWindow = param_panel.Param_panel()\n paramPanelWindow.setMaximumHeight(800)\n paramPanelWindow.setMaximumWidth(600)\n paramPanelWindow.setWindowTitle(\"Select the subjects to show in the subjects list\")\n paramPanelWindow.lbBehaviors.setText(\"Subjects\")\n\n for w in [paramPanelWindow.lwSubjects, paramPanelWindow.pbSelectAllSubjects,\n paramPanelWindow.pbUnselectAllSubjects,\n paramPanelWindow.pbReverseSubjectsSelection, paramPanelWindow.lbSubjects,\n paramPanelWindow.cbIncludeModifiers,\n paramPanelWindow.cbExcludeBehaviors, paramPanelWindow.frm_time]:\n w.setVisible(False)\n\n # subjects filtered\n filtered_subjects = [self.twSubjects.item(i, SUBJECT_NAME_FIELD_IDX).text() for i in\n range(self.twSubjects.rowCount())]\n\n for subject in [self.pj[SUBJECTS][x][\"name\"] for x in sorted_keys(self.pj[SUBJECTS])]:\n\n paramPanelWindow.item = QListWidgetItem(subject)\n if subject in filtered_subjects:\n paramPanelWindow.item.setCheckState(Qt.Checked)\n else:\n paramPanelWindow.item.setCheckState(Qt.Unchecked)\n\n paramPanelWindow.lwBehaviors.addItem(paramPanelWindow.item)\n\n if paramPanelWindow.exec_():\n if self.observationId and set(paramPanelWindow.selectedBehaviors) != set(filtered_subjects):\n self.projectChanged = True\n self.load_subjects_in_twSubjects(paramPanelWindow.selectedBehaviors)\n # update subjects pad\n if hasattr(self, \"subjects_pad\"):\n self.subjects_pad.filtered_subjects = [self.twSubjects.item(i, SUBJECT_NAME_FIELD_IDX).text()\n for i in range(self.twSubjects.rowCount())]\n self.subjects_pad.compose()", "def on_combobox2_changed(self, source=None, event=None):\n\t\tpattern = dict_filter[self.combobox2.get_model()[self.combobox2.get_active()][0]]\n\t\tif not pattern:\tself.treeview1.set_model(self.model1)\t\t# switch used model since one supports sorting only\n\t\telse:\t\tself.treeview1.set_model(self.modelfilter1)\t# and the other filtering only - none of them both\r\n\t\tself.treeview1.set_search_column(self.search_colid)\t\t# re-enable searching in 'URL' column\n\t\tself.modelfilter1.refilter()\t\t\t\t\t# apply filter conditions\n\t\tself.statusbar1.push(0, \"Other filter selected.\")", "def test_checkboxtextgroup(self):\r\n self.check_group('checkboxtextgroup', 'choice', 'checkbox')", "def actualizar_poderes(self, event):\r\n self.usain_nebolt.setChecked(event[0])\r\n self.limpiessa.setChecked(event[1])\r\n self.sin_rastro.setChecked(event[2])\r\n self.cervessa.setChecked(event[3])\r\n self.del_trio.setChecked(event[4])\r\n self.nebcoins.setChecked(event[5])", "def _check_state(self):\n if (self.stock_checker.isChecked() or self.future_checker.isChecked()) and self.name.buddy.text():\n self.btn_ok.setEnabled(True)\n self.btn_ok.setDefault(True)\n else:\n self.btn_ok.setEnabled(False)", "def addCheck(self,text):\n grid = self.layout()\n nr,nc = grid.rowCount(),grid.columnCount()\n check = QtGui.QCheckBox(text)\n grid.addWidget(check,nr,1)\n return check", "def printToTerminal(self, state):\n\n sender = self.sender()\n if state == Qt.Checked:\n print(\"{} Selecionado.\".format(sender.text()))\n else:\n print(\"{} Recusado.\".format(sender.text()))", "def kinnectTB1Checked(self, state):\n if state == QtCore.Qt.Checked:\n print('Show TB1 Kinnect Selected')\n # # release video capture\n # self.cap = cv2.VideoCapture(0)\n # # read image in BGR format\n # ret, img = self.cap.read()\n # image = QtGui.QImage(img, img.shape[1], img.shape[0],\n # img.shape[1] * img.shape[2],\n # QtGui.QImage.Format_RGB888)\n # pixmap = QtGui.QPixmap()\n # pixmap.convertFromImage(image.rgbSwapped())\n # self.simulationWidget.setPixmap(pixmap)\n else:\n print('Hide Kinnect TB1 Unchecked')\n # self.cap.release()", "def on_checkBox_kongtouping_clicked(self, checked):\n # TODO: not implemented yet\n raise NotImplementedError", "def onItemChanged(item=None):\n if item:\n selector.blockSignals(True)\n if item.data(50) == \"Unchecked\":\n item.setCheckState(QtCore.Qt.CheckState(1))\n item.setData(50, \"Partially\")\n elif item.data(50) == \"Partially\":\n item.setCheckState(QtCore.Qt.CheckState(2))\n item.setData(50, \"Checked\")\n else:\n item.setCheckState(QtCore.Qt.CheckState(0))\n item.setData(50, \"Unchecked\")\n selector.blockSignals(False)\n enabled = []\n partially = []\n unchecked = []\n for index in range(selector.count()):\n if selector.item(index).checkState() == QtCore.Qt.Checked:\n enabled.append(selector.item(index).data(32))\n elif (selector.item(index).checkState() ==\n QtCore.Qt.PartiallyChecked):\n partially.append(selector.item(index).data(32))\n else:\n unchecked.append(selector.item(index).data(32))\n p.SetString(\"Enabled\", \",\".join(enabled))\n p.SetString(\"Partially\", \",\".join(partially))\n p.SetString(\"Unchecked\", \",\".join(unchecked))\n onWorkbenchActivated()", "def on_geometry_check(self, geometry_item):\n # don't bother for non-checked events\n if not geometry_item.has_checked_changed():\n return\n # leave checked while search is running\n if self.is_searching():\n geometry_item.setCheckState(Qt.Checked)\n return\n # keep to the same if exporting\n if self.is_exporting():\n if geometry_item.is_checked:\n geometry_item.setCheckState(Qt.Checked)\n else:\n geometry_item.setCheckState(Qt.Unchecked)\n return\n is_checked = geometry_item.current_state()\n for key, type_entry in geometry_item.type_entries.iteritems():\n if is_checked:\n type_entry.enable_geometry(geometry_item.title, self.sources)\n else:\n type_entry.disable_geometry(geometry_item.title, self.sources)\n geometry_item.update_checked()", "def on_filter_settings_triggered(self):\n\n FilterSettings(self, self.control).show()", "def on_action_clicked(self, content):\n checked = content['checked']\n self.set_guarded(checked=checked)\n self.clicked(checked)", "def getResult(self):\n self.show(modal=True)\n self.exec_()\n b = self.clickedButton()\n if not b: # b == 0 or b is None\n b = self.defaultButton()\n if b:\n res = str(b.text())\n else:\n res = ''\n if self.checks:\n return res,[c.isChecked() for c in self.checks]\n else:\n return res", "def paint(self, painter, option, index):\n if self.verbose: print('myCheckBoxDelegate.paint()')\n #print(' option:', option, 'index:', index)\n #print(' index.data():', type(index.data()), index.data())\n #HasCheckIndicator = QtWidget.QStyleOptionViewItem.HasCheckIndicator\n # options.HasCheckIndicator returns hex 4, value of enum\n # how do i query it?\n #print(' ', option.ViewItemFeatures().HasCheckIndicator) # returns PyQt5.QtWidgets.QStyleOptionViewItem.ViewItemFeature\n #print(' ', option.features)\n #print(' ', index.data(QtCore.Qt.CheckStateRole) )\n #state = index.data(QtCore.Qt.CheckStateRole)\n #print(' state:', state, 'option.HasCheckIndicator:', option.HasCheckIndicator)\n self.drawCheck(painter, option, option.rect, QtCore.Qt.Unchecked if int(index.data()) == 0 else QtCore.Qt.Checked)", "def filter(self, *args, **kwargs):", "def cobroNC(self):\n if self.total_a_pagar == 0:\n QtGui.QMessageBox.information(self,\"Aviso\",\"El saldo restante a pagar es cero\")\n else:\n self.rbtnNC.setChecked(True)\n totalFactura = self.total_a_pagar\n numero,ok = QtGui.QInputDialog.getText(self,\"Cobro c/Nota de Crédito\",\"Ingrese número de Nota de Crédito\")\n if ok:\n notaCredito = NotaCreditoModel.getNotaCredito(self.padre.sesion,int(numero))\n if notaCredito == None:\n QtGui.QMessageBox.information(self,\"Aviso\",\"La Nota de Crédito ingresada no existe\")\n elif notaCredito.getTotal(self.padre.sesion) < totalFactura:\n QtGui.QMessageBox.information(self,\"Aviso\",\"El monto de la Nota de Credito es insuficiente\")\n elif notaCredito.getTotal(self.padre.sesion) - CobroClienteModel.getTotalNC(self.padre.sesion,notaCredito.numero) < totalFactura:\n dif = notaCredito.getTotal(self.padre.sesion) - CobroClienteModel.getTotalNC(self.padre.sesion,notaCredito.numero)\n QtGui.QMessageBox.information(self,\"Aviso\",\"La Nota solo posee $\" + str(dif))\n else:\n temp = [\"Nota de Crédito\",self.total_a_pagar,notaCredito.numero]\n self.detalles_cobro[self.tablePagos.rowCount()] = temp\n self.total_a_pagar = 0\n self.actualizar_total()\n self.actualizar_tabla()", "def selectAll(self, value):\n for item in self.getItemsToModify():\n item.setCheckState(Qt.Checked if value else Qt.Unchecked)", "def selectAll(self, value):\n for item in self.getItemsToModify():\n item.setCheckState(Qt.Checked if value else Qt.Unchecked)", "def on_CheckPunish_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def on_pushButton_clicked(self):\n# if self.radioButton_3.isChecked():\n# print(\"恭喜你,答对了\")\n# else:\n# print(\"错误,请再次尝试\")\n\n msgBox = QMessageBox()\n msgBox.setWindowTitle(u'答案')\n msgBox.setText(u\"\\n正确答案为:\\n C\")\n #msgBox.setWindowIcon(QtGui.QIcon(r':/0102.png'))\n #隐藏ok按钮\n msgBox.addButton(QMessageBox.Ok)\n msgBox.button(QMessageBox.Ok).hide()\n #模态对话框\n msgBox.exec_()", "def aoi_button_clicked(self):\n # can't run search during export\n if self.is_exporting():\n self.iface.messageBar().pushMessage(\"Error\", \"Cannot run search while export is running.\",\n level=QgsMessageBar.CRITICAL)\n # can't run multiple search\n elif self.is_searching():\n self.iface.messageBar().pushMessage(\"Error\", \"Cannot run a new search while a search is running.\",\n level=QgsMessageBar.CRITICAL)\n else:\n self.bbox_tool.reset()\n self.iface.mapCanvas().setMapTool(self.bbox_tool)", "def checkbox(self, label, initial=False, handler=None, **kwargs):\n handler = self._changed_handler(handler)\n cb = wx.CheckBox(self, label=label)\n #cb.span = 2\n cb.SetValue(initial)\n cb.Bind(wx.EVT_CHECKBOX, handler)\n self.pack(\"\", cb, **kwargs)\n return cb", "def combobox_change(self):\n full_name = self.CB_employee.currentText()\n # if the selected item is not empty, gets the name and assigns the id to the class. Also sets the checkbox to DB value\n if full_name != \"\":\n first_name, last_name = full_name.split()\n self.employee_id, enabled = self.ms.c.execute(\"SELECT ID, enabled FROM employees WHERE first_name = ? and last_name = ?\",(first_name, last_name)).fetchone()[0:2]\n self.LE_first_name.setText(first_name)\n self.LE_last_name.setText(last_name)\n if enabled:\n self.CHB_active.setChecked(True)\n else:\n self.CHB_active.setChecked(False)\n # if none is selected erase all fields\n else:\n self.LE_first_name.setText(\"\")\n self.LE_last_name.setText(\"\")\n self.CHB_active.setChecked(False)\n self.employee_id = 0", "def filter_form_valid(self, filter_form):\n return True", "def check_state(self, *args, **kwargs):\n\n # TODO: Implement from\n # http://stackoverflow.com/questions/27159575/pyside-modifying-widget-colour-at-runtime-without-overwriting-stylesheet\n\n sender = self.sender()\n validator = sender.validator()\n state = validator.validate(sender.text(), 0)[0]\n if state == QtGui.QValidator.Acceptable:\n color = 'none' # normal background color\n elif state == QtGui.QValidator.Intermediate:\n color = '#fff79a' # yellow\n else:\n color = '#f6989d' # red\n sender.setStyleSheet('QLineEdit { background-color: %s }' % color)", "def _checkstate(self, queryset):\n for presup in queryset.filter(estado='borrador').exclude(fecha_realizado=None):\n if ((presup.fecha_realizado + timedelta(days=21) < datetime.now().date() and presup.tipo != 'asistencia') or\n (presup.fecha_realizado + timedelta(days=60) < datetime.now().date() and presup.tipo == 'asistencia')):\n presup._toState_cancelado(obs=\"Registro automático: Presupuesto vencido\")", "def _set_action_checkable(self, action, index):\n checkedindex = index.sibling(index.row(), self.checked_column)\n checkedflags = checkedindex.flags()\n action.setCheckable(checkedflags & QtCore.Qt.ItemIsUserCheckable)", "def __init__(self,name,value,*args,**kargs):\n if 'text' in kargs:\n text = kargs['text']\n else:\n text = str(name)\n kargs['text'] = '' # Force no label\n self.input = QtGui.QCheckBox(text)\n InputItem.__init__(self,name,*args,**kargs)\n self.setValue(value)\n self.layout().insertWidget(1,self.input)", "def buscarFactura(self):\n if not self.lineNumeroFac.isEnabled() and self.tableNC.rowCount() != 0:\n QtGui.QMessageBox.information(self,\"Aviso\",\"Ya se ha seleccionado una factura\")\n elif not self.lineNumeroFac.isEnabled():\n self.lineNumeroFac.setEnabled(True)\n self.lineNumeroFac.clear()\n self.limpiarTabla(self.tableFactura)\n else:\n self.numeroFacturaActual=str(self.lineNumeroFac.text())\n if len(self.numeroFacturaActual)==0:\n self.showMsjEstado(\"No se ha ingresado numero de factura\")\n else:\n self.facturaSeleccionada=FacturaModel.existeFactura(int(self.numeroFacturaActual),self.sesion)\n if self.facturaSeleccionada==None:\n QtGui.QMessageBox.information(self,\"Aviso\",\"La factura seleccionada no existe\")\n elif self.facturaSeleccionada.getObra() != None and self.facturaSeleccionada.getObra() != self.obraSocial:\n QtGui.QMessageBox.information(self,\"Aviso\",\"La Obra Social seleccionada no corresponde con la factura\")\n elif self.facturaSeleccionada.getFechaEmision()+timedelta(days=int(self.plazo))<date.today():\n QtGui.QMessageBox.information(self,\"Aviso\",\"El tiempo permitido para el reintegro ha expirado\")\n elif self.facturaSeleccionada.estaLiquidada(self.sesion):\n QtGui.QMessageBox.information(self,\"Aviso\",\"La factura se encuentra liquidada a la Obra Social\")\n elif self.facturaSeleccionada.getNC()!=None:\n QtGui.QMessageBox.information(self,\"Aviso\",\"La factura ya posee una Nota de Crédito\")\n else:\n self.lineNumeroFac.setEnabled(False)\n if self.facturaSeleccionada.getObra() == None:\n self.cargarObjetos(self.tableFactura,self.facturaSeleccionada.getDetalles(self.obraSocial, self.sesion),\n [\"producto\",\"cantidad\",\"importe\"])\n else:\n self.cargarObjetos(self.tableFactura,self.facturaSeleccionada.getDetallesSinDescuento(self.sesion),\n [\"producto\",\"cantidad\",\"importe\"])", "def ui_update(self):\n self.ctrls['refilter'].setEnabled(self.is_action_enabled)", "def on_searchEdit_textChanged(self, txt):\n self.searchButton.setEnabled(bool(txt))", "def change_clicked(self):\n # only runs if the CB is not empy\n if self.CB_employee.currentText() == \"\":\n standartbox(\"No employee selected!\", parent=self, devenvironment=self.devenvironment)\n else:\n # the method of the ms can interrupt it the entry is not correct, therefore it returns only true if it ran without only error\n runfill = self.ms.add_employee_clicked(self.LE_first_name, self.LE_last_name, update=True, id=self.employee_id, checkbox_object=self.CHB_active)\n if runfill:\n self.CB_employee.clear()\n self.LE_first_name.clear()\n self.LE_last_name.clear()\n self.fill_combobox()\n self.employee_id = 0\n self.ms.employee_id = 0\n self.ms.employee_name = \"\"\n self.ms.employee_first_name = \"\"\n self.ms.employee_last_name = \"\"", "def add_check_box(self, name, caption, value=False, label=None, add_indicator=None, location=(None,0)):\n widget=QtWidgets.QCheckBox(self)\n widget.setText(_translate(self.name,caption,None))\n widget.setObjectName(_fromUtf8(self.name+\"_\"+name))\n widget.setChecked(value)\n return self.add_simple_widget(name,widget,label=label,add_indicator=add_indicator,location=location)", "def buscarOs(self):\n\n if self.lineRazon.isEnabled():\n self.filtrarObra()\n\n elif not self.lineRazon.isEnabled() and (self.tableNC.rowCount() != 0 or self.tableFactura.rowCount() != 0):\n QtGui.QMessageBox.information(self,\"Aviso\",\"Imposible cambiar de Obra Social. Ya se ha seleccionado\\\n una\")\n else:\n self.gbNotaCredito.setEnabled(False)\n self.gbFactura.setEnabled(False)\n self.lineRazon.clear()\n self.lineRazon.setEnabled(True)\n self.lineCuit.clear()\n self.lineCuit.setEnabled(True)\n self.tableOs.setEnabled(True)", "def _onchange_filter_id(self):\n if self.filter_id:\n domain = safe_eval(self.filter_id.domain)\n domain += ['|', ('website_id', '=', None), ('website_id', '=', self.slider_id.website_id.id),\n ('website_published', '=', True)]\n product_count = self.env['product.template'].sudo().search_count(domain)\n if product_count < 1:\n self.filter_id = False\n raise UserError(_('Sorry! You can not set filter which is content zero product.'))", "def check_le_state(self, *args, **kwargs):\n\t\tsender = self.sender()\n\t\tvalidator = sender.validator()\n\t\tstate = validator.validate(sender.text(), 0)[0]\n\t\tif state == QtGui.QValidator.Acceptable:\n\t\t\tself.btn_next.setEnabled(True)\n\t\telif state == QtGui.QValidator.Intermediate:\n\t\t\tself.btn_next.setEnabled(False)\n\t\telse:\n\t\t\tself.btn_next.setEnabled(False)", "def on_radioButton_clicked(self):\r\n # TODO: not implemented yet\r" ]
[ "0.7090673", "0.6638057", "0.6402959", "0.6243776", "0.6092731", "0.59084094", "0.5837751", "0.5827371", "0.57131046", "0.5701995", "0.568451", "0.5675553", "0.5539249", "0.5522574", "0.55093926", "0.55083394", "0.550656", "0.55010986", "0.5493006", "0.5438975", "0.5406133", "0.5388676", "0.5388201", "0.53816104", "0.53458524", "0.5329506", "0.53159845", "0.5306973", "0.5300607", "0.5298228", "0.5295857", "0.5256925", "0.5250253", "0.5239314", "0.5233293", "0.5216293", "0.52162325", "0.51848763", "0.51609844", "0.5153425", "0.5148804", "0.51469916", "0.5138075", "0.5137483", "0.5131306", "0.5111742", "0.51044303", "0.50911313", "0.50812966", "0.50714135", "0.50581545", "0.50569415", "0.5054121", "0.5054084", "0.50495285", "0.5042583", "0.5040359", "0.5039688", "0.5034965", "0.5034965", "0.5034965", "0.5031064", "0.50159746", "0.50100243", "0.4997559", "0.49932215", "0.4985146", "0.49763846", "0.49707916", "0.4970342", "0.49651566", "0.49576014", "0.49560562", "0.49446592", "0.4942169", "0.4939161", "0.49361387", "0.49337706", "0.4931931", "0.49074328", "0.49074328", "0.48975047", "0.48972198", "0.48828778", "0.48768383", "0.4873207", "0.48721904", "0.48716998", "0.4869841", "0.48623553", "0.4846342", "0.48444286", "0.48380765", "0.4833319", "0.48330536", "0.48325434", "0.4832533", "0.48288137", "0.48177758", "0.48106208" ]
0.57840115
8
Loads package name from command line arguments.
def get_package_name(): package = None try: package = os.environ.get('LOCAL_PART', '') + os.environ.get('LOCAL_PART_SUFFIX', '') if not package and len(sys.argv) > 1: package = sys.argv[-1].lower() except Exception,e: log.error(str(e)) finally: return package
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command_package_init(*args):\n\n if len(args) == 0:\n print 'No package name specified'\n return 1\n packname = args[0]\n\n # Setup build, install, and data directories\n util.ensure_dir_exists(package_path(packname))\n\n # Touch an empty config.py where the user can adjust settings\n config_py_file = open(package_path(packname, 'config.py'), 'w')\n config_py_file.close()\n\n return 0", "def get_package_init_file_name():\n return _PACKAGE_INIT_FILE + FILE_EXTENSION", "def package_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"package_name\")", "def find_name():\n name_file = read_file('__init__.py')\n name_match = re.search(r'^__package_name__ = [\"\\']([^\"\\']*)[\"\\']',\n name_file, re.M)\n if name_match:\n return name_match.group(1)\n raise RuntimeError('Unable to find name string.')", "def main(args):", "def main(args):", "def package(self) -> Optional[pulumi.Input['PackageArgs']]:\n return pulumi.get(self, \"package\")", "def initFromCLI(cls, name=None):\n ap = cls._makeArgumentParser()\n ns = ap.parse_args()\n instance = cls.initFromOptions(ns, name=name)\n return instance", "def main(args=None):", "def main(args=None):", "def FixArgFileName(fileName):\n import os\n\n path, fname = os.path.split(fileName)\n if len(path) == 0:\n path = os.curdir\n path = os.path.abspath(path)\n # must check that the command line arg's path is in sys.path\n for syspath in sys.path:\n if os.path.abspath(syspath) == path:\n break\n else:\n sys.path.append(path)\n return os.path.splitext(fname)[0]", "def load_args():\n parser = argparse.ArgumentParser(description=\"Classify and predict digits using the mnist dataset\")\n parser.add_argument('mode', help='the mode to run in: fit, model or predict')\n parser.add_argument('--algo', help='which algorithm to use: RandomForest, KNN')\n return parser.parse_args()", "def cli(ctx: click.Context) -> None:\n pkg = read_package_from_file(PACKAGE_PATH)\n ctx.obj = pkg", "def main(args):\n cli = CLI()\n # Check arguments\n cli.parse_arguments(args)", "def get_class_name_from_pkg_name(opts):\n pkg_name = opts[\"package\"]\n return \"\".join(map(str.capitalize, pkg_name.split(\"_\")))", "def init_package(self):\n package_name = self.args.name\n if package_name is None:\n msg = 'an package name must provide for enzi init'\n logging.error(msg)\n raise SystemExit(BASE_ESTRING + msg)\n\n initializer = ProjectInitialor(package_name)\n initializer.init()", "def get_package_name(x):\n return re.search(r\"^(\\w|-)*\", x).group()", "def main():\n licensify(_parse_args())", "def pkg_name() -> None:\n print(\"mypkg\")", "def load(args):\n subprocess.check_call([\"/bin/launchctl\", \"load\"] + values.get(args))", "def read_package_variable(key):\n module_path = os.path.join(PACKAGE_NAME, '__init__.py')\n with open(module_path) as module:\n for line in module:\n parts = line.strip().split(' ')\n if parts and parts[0] == key:\n return parts[-1].strip(\"'\")\n assert 0, \"'{0}' not found in '{1}'\".format(key, module_path)", "def run_from_argv(self, argv):\r\n self.progname = argv[0]\r\n super(Command, self).run_from_argv(argv)", "def get_mod_name():\n return sys.argv[0].split(\"/\")[-1].split(\".py\")[0]", "def get_alert_source_module(alert_source_command_line_arg):\n\n return ALERT_SOURCE_MAPPING[alert_source_command_line_arg]", "def get_command_name(args):\n\n # First argument would always be atlas or manage.py, i.e the calling interface\n if len(args) < 2:\n CommandError.print_to_err(f\"Name of command missing. Valid commands are - {VALID_COMMANDS}\")\n\n return args[1]", "def package_name(self):", "def main(args=None):\n pass", "def entry_point() -> None:\n args = parse_args()\n print(hello(args.name))", "def read_package_variable(key):\n module_path = os.path.join(PACKAGE_NAME, '__init__.py')\n with open(module_path) as module:\n for line in module:\n parts = line.strip().split(' ')\n if parts and parts[0] == key:\n return parts[-1].strip(\"'\")\n assert False, \"'{0}' not found in '{1}'\".format(key, module_path)", "def getRegisteredPackageName(*args):\n return _libsbml.SBMLExtensionRegistry_getRegisteredPackageName(*args)", "def parse():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', default='ml-1m', help='which dataset to use')\n args = parser.parse_args()\n main(args)", "def main_argv():\n main_parse_args(sys.argv[1:])", "def main_argv():\n main_parse_args(sys.argv[1:])", "def get_cli_string():\n return os.path.basename(sys.argv[0]) + \" \" + \" \".join(sys.argv[1:])", "def read_package_variable(key):\n module_path = os.path.join(PACKAGE_NAME, '__init__.py')\n with open(module_path) as module:\n for line in module:\n parts = line.strip().split(' ')\n if parts and parts[0] == key:\n return parts[-1].strip(\"'\")\n raise KeyError(\"'{0}' not found in '{1}'\".format(key, module_path))", "def get_argument_module_name(arg, dim):\n return \"arg_%s_dim%s\" % (arg.name, dim)", "def _get_app_name(self):\n # TODO move app name into pyglet.app (also useful for OS X menu bar?).\n return sys.argv[0]", "def fill_parser(self, parser):\n parser.add_argument(\"name\", help=\"The name of the library file (e.g. 'db')\")", "def get_package_name(pkg, rem):\n flavor = rem.os.package_type\n\n try:\n return _PACKAGE_MAP[pkg][flavor]\n except KeyError:\n return None", "def set_python_main(option, opt_str, value, parser):\n\tmain = (value, option.python_loader)\n\tsetattr(parser.values, option.dest, main)\n\t# only terminate parsing if not interspersing arguments\n\tif not parser.allow_interspersed_args:\n\t\tparser.rargs.insert(0, '--')", "def get_package_name(name):\n name = _strip_package_name(name)\n return name", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv[1:]", "def get_package_name():\n return try_get_project_property('packageName')", "def main() -> None:\n init(args=sys.argv[1:])", "def parse_arguments(args):", "def __init__(self, package_name, base_path):\n self.package_name = package_name\n self.base_path = base_path", "def _get_arg_name(self, arg, variable_name):", "def name(self):\n\t\treturn self.args[0]", "def parse_args(args=None):\n return AP.parse_args(args=args)", "def package_name(self) -> str:\n return pulumi.get(self, \"package_name\")", "def get_args_from_command_line():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--country_code\", type=str, default='US')\n args = parser.parse_args()\n return args", "def parse_cmdline(args):\n usage = \"usage: %prog [options] <name> <snpfile> <human asm build No> \" +\\\n \"<database>\"\n parser = OptionParser(usage)\n parser.add_option(\"-v\", \"--verbose\", dest=\"verbose\",\n action=\"store_true\", default=False,\n help=\"Give verbose output\")\n return parser.parse_args()", "def get_package_name(self):\n return self.name + '-' + self.version", "def package_name(self):\n return self._package_name", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv", "def hotfix_deepobs_argparse():\n sys.argv = sys.argv[:1]", "def arg_name(name):\n return \"--\" + name.replace('_', '-')", "def _parse_module_name(program_param):\n if program_param and program_param.endswith(\".py\"):\n return program_param[:-3]\n return program_param", "def main():\n args = parse_args()\n process_args(args)", "def interpret_argument(worker, name):\n if os.path.isdir(name):\n return _discover_enclosing_packages(name, [])\n\n if os.path.isfile(name):\n base, extension = os.path.splitext(name)\n if extension != '.py':\n print('Error - test file lacks .py extension: {0}'.format(name))\n return\n directory, name = os.path.split(base)\n return _discover_enclosing_packages(directory, [name])\n\n with worker:\n worker.call(import_modules, [name])\n module_paths = dict(worker.call(list_module_paths))\n if name in module_paths:\n return None, name\n\n print('Error - can neither open nor import: {0}'.format(name))\n exit(1)", "def load_processor(name: str):\n matches = [\n ep\n for ep in pkg_resources.iter_entry_points(ENTRY_POINT_NAME)\n if ep.name == name\n ]\n if not matches:\n raise ValueError(\n \"entry point '{}' for group '{}' not found\".format(name, ENTRY_POINT_NAME)\n )\n return matches[0].load()", "def import_main(name):\n config.MAIN_MODULE_NAME = name\n return importlib.import_module(name)", "def parse_cmdline():\n\tparser = ArgumentParser(prog=\"FastP_QC.py\", description=\"\"\"Script collects stats from fastp jsons.\"\"\")\n\tparser.add_argument(\"-r1\", \"--r1_stats\", dest=\"r1_stats\", action=\"store\", required=True, help=\"Text file with r1 stats, from q30.py script.\")\n\tparser.add_argument(\"-r2\", \"--r2_stats\", dest=\"r2_stats\", action=\"store\", required=True, help=\"Text file with r2 stats, from q30.py script.\")\n\tparser.add_argument(\"-n\", \"--name\", dest=\"name\", action=\"store\", required=True, help=\"Sample name\")\n\targs = parser.parse_args()\n\treturn args", "def main(argv):\n parsed = parse_args(argv)\n instream = sys.stdin\n name = parsed.name\n if parsed.input_file != \"-\":\n instream = open(parsed.input_file, 'r')\n name = parsed.input_file.split('.')[1]\n print pfm_as_meme_str(parse_scer_pfm(instream, handle_passed=True), name)", "def load(cls, name):\n try:\n return importlib.import_module(cls._plugins[name])\n except Exception as err:\n print(\"** could not load command [%s]:\\n%s\" % (name, err))", "def parse_command_line(argv):\n import argparse\n parser = argparse.ArgumentParser(\n description=\"\"\"\\\nShow information about a list of scales in Neuroglancer \"info\" JSON file format\n\"\"\")\n parser.add_argument(\"info_file\", nargs=\"?\", default=\"./info\",\n help=\"JSON file containing the information\")\n args = parser.parse_args(argv[1:])\n return args", "def get_component_name(arguments, environ, shell_runner):\n arg = arguments.get('--component-name')\n if arg is not None:\n return arg\n env_var = environ.get('COMPONENT_NAME', None)\n if env_var is not None:\n return env_var\n return get_component_name_from_git(shell_runner)", "def __init__(self, name, progname):\n self.name = name\n self.progname = progname\n self.progargs = \"\"", "def main():\n\n # get set shortnames from input\n sets = [i.lower() for i in sys.argv[1:]]\n\n # populate sets by shortname\n populate(sets)", "def get_package_name(self):\n return self.name + '-' + self.version + '-' + self.release", "def find_program(name):\r\n return name", "def cli(args): # noqa; pylint: disable=unused-argument", "def _compute_program_name():\n program_path = os.path.abspath(sys.argv[0])\n if os.path.exists(program_path):\n return os.path.basename(program_path)\n else:\n match = re.match(r\"^.*(?:\\.egg|\\.tar|\\.tar\\.gz)(?=/)\", program_path, re.IGNORECASE)\n if (match is not None) and os.path.exists(match.group(0)):\n # python script is embedded in egg\n return os.path.basename(program_path)\n else:\n return \"unknown\"", "def nameCommand(*args, annotation: AnyStr=\"\", command: Script=None, data1: AnyStr=\"\", data2:\n AnyStr=\"\", data3: AnyStr=\"\", default: bool=True, sourceType: AnyStr=\"\",\n **kwargs)->AnyStr:\n pass", "def arg_parse(args):\r\n path = find_arg('-file', args)\r\n names = args\r\n if path is not None:\r\n names.remove('-file')\r\n names.remove(path)\r\n if len(names) is 0:\r\n names = ['Player']\r\n return names, path", "def load_cmdline_args(subp, parents=[]):\n extra_p = subp.add_parser('extra', help='Additional algorithms and tools')\n extra_sub = extra_p.add_subparsers()\n\n for importer, modname, ispkg in pkgutil.iter_modules(__path__,\n prefix=__name__ + '.'):\n\n logger.debug(\"Found submodule {0} (is a package: {1})\"\n .format(modname, ispkg))\n\n try:\n mod = importlib.import_module(modname, 'inettopology.asmap.extra')\n\n except ImportError as e:\n logger .warn(\"Unable to import '{0}'. [Error: {1}]\"\n .format(modname, e.message))\n\n else:\n if callable(mod.__argparse__):\n mod.__argparse__(extra_sub, parents)", "def parseArguments():\n desc = \"Program that writes a pkl for the topology object.\"\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument(\"path\", type=str, help=\"Path where the topology files are stored\")\n args = parser.parse_args()\n\n return args.path", "def fill_parser(self, parser):\n parser.add_argument(\"name\", help=\"The name of the charm or bundle\")", "def fill_parser(self, parser):\n parser.add_argument(\"name\", help=\"The name of the charm or bundle\")", "def _package_root(name):\n return name.split('.', 1)[0]", "def load_names(args):\n # NAMES is a json document which is just a list of names\n if os.path.isfile(args.names):\n with open(args.names, 'r') as n:\n try:\n names = json.load(n)\n except:\n sys.exit(\"ERROR: {0} is invalid JSON\".format(args.names))\n else:\n sys.exit(\"ERROR {0} file not found.\".format(args.names))\n if len(names) <= 1:\n sys.exit(\"ERROR: {0} needs to have more than 1 name in it\".format(args.names))\n return names", "def main(self, args=None):\n self._configure_parser()\n argcomplete.autocomplete(self.parser)\n args = self.parser.parse_args(args)\n logwood.basic_config(\n format='%(timestamp).6f %(level)-5s %(name)s: %(message)s',\n level=self._get_loglevel(args),\n )\n\n if not args.command:\n self.parser.print_usage()\n sys.exit(1)\n else:\n command = self._commands[args.command]\n if command.needs_app:\n app = Application()\n self.register_application_components(args, app)\n command.__call__(args, app)\n else:\n command.__call__(args)", "def name_option(args, run):\n run.experiment_info[\"name\"] = args\n run.run_logger = run.root_logger.getChild(args)", "def handle_cmdline_args():\n\n parser = argparse.ArgumentParser(\n description='Generate synthetic data from a specification in a json '\n 'file using the \"synth-method\" described in the json file. ')\n\n parser.add_argument(\n '-i', dest='infile', required=True,\n help='The input json file. Must contain a \"synth-method\" property')\n\n parser.add_argument(\n '-o', dest='outfile_prefix', required=True, help='The prefix of the output paths (data json and csv), relative to the QUIPP-pipeline root directory')\n\n args = parser.parse_args()\n return args", "def configure_commandline(cmdline_arguments: argparse.Namespace) -> Optional[Text]:", "def package(self):\n if self.method == 'buildNotification':\n return self.params[1]['name']\n if self.method in ('createImage', 'image', 'livecd'):\n return self.params[0]\n if self.method == 'indirectionimage':\n return self.params[0]['name']\n # params[0] is the source URL for these tasks:\n if self.method not in ('build', 'buildArch', 'buildContainer',\n 'buildMaven', 'buildSRPMFromSCM', 'maven'):\n return None\n # (I wish there was a better way to do this.)\n source = self.params[0]\n o = urlparse(source)\n # build tasks can load an SRPM from a \"cli-build\" tmpdir:\n if source.endswith('.src.rpm'):\n srpm = os.path.basename(source)\n (name, version, release) = srpm.rsplit('-', 2)\n # Note we're throwing away version and release here. They could be\n # useful eventually, maybe in a \"Package\" class.\n return name\n # or an allowed SCM:\n elif o.scheme:\n package = os.path.basename(o.path)\n if package.endswith('.git'):\n package = package[:-4]\n if self.method == 'buildContainer':\n package += '-container'\n return package\n raise ValueError('could not parse source \"%s\"' % source)", "def get_plugin_name(filename):\n m = PLUGIN_PACKAGE_RE.search(filename or '')\n if m:\n return m.group(1)\n else:\n return None", "def load():\n out = load_as_root_module()\n parser = create_parser(os.path.basename(sys.argv[0]))\n opts = parser.parse_args(sys.argv[1:])\n load_env(opts, out.opt)\n\n return out", "def SBMLExtensionRegistry_getRegisteredPackageName(*args):\n return _libsbml.SBMLExtensionRegistry_getRegisteredPackageName(*args)", "def command_line():\n version = ' '.join([__version__, __build__])\n parser = ArgumentParser(\n prog='moniker',\n description='Simple batch file renaming tool.',\n )\n parser.add_argument(\n '-v', '--version', action='version',\n version=\"%s v%s\" % (basename(sys.argv[0]), version)\n )\n parser.add_argument(\n '--depth',\n type=int,\n default=0,\n metavar='depth',\n help='Tiers of file heiarcy explored',\n )\n parser.add_argument(\n '--replace',\n nargs=2,\n default=('', ''),\n metavar='replace',\n help='glob pattern to match'\n )\n parser.add_argument(\n 'directory',\n default='.',\n help='target directory root',\n )\n return parser", "def main():\r\n if len(sys.argv) == 2:\r\n if sys.argv[1] == 'branch_name':\r\n print branch_name()\r\n elif sys.argv[1] == 'plat_id':\r\n print plat_id()\r\n else:\r\n print plat_id()\r\n print branch_name()\r\n return", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--version',\n metavar=\"<str>\",\n help=\"Input data version number\",\n type=str,\n required=True\n )\n args = parser.parse_args()\n return args", "def __init__(self, *args):\n self.env = os.environ.copy()\n \"\"\"Environment variables (:class:`dict`)\"\"\"\n command = \"modulecmd python \"+' '.join(args)\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n retval = p.communicate()\n self._parse(retval)", "def __init__(self, packagename):\n self.packagename = packagename\n self._getversion()", "def import_item(name):\n if sys.version_info < (3,):\n if not isinstance(name, bytes):\n name = name.encode()\n parts = name.rsplit('.', 1)\n if len(parts) == 2:\n # called with 'foo.bar....'\n package, obj = parts\n module = __import__(package, fromlist=[obj])\n try:\n pak = getattr(module, obj)\n except AttributeError:\n raise ImportError('No module named %s' % obj)\n return pak\n else:\n # called with un-dotted string\n return __import__(parts[0])", "def pypi_package(self, file):\n\n file = os.path.basename(file)\n match = RE_PACKAGE_NAME.search(file)\n if not match:\n raise RuntimeError('[ERROR] Invalid package name, %s' & file)\n split = (match.group(\"pkg\"), match.group(\"rest\"))\n to_safe_name = pkg_resources.safe_name\n\n if len(split) != 2 or not split[1]:\n raise RuntimeError('[ERROR] Invalid package name, %s' & file)\n\n return to_safe_name(split[0]).lower()", "def parse_command_line():\n parser = argparse.ArgumentParser(description='Parses ID\\'s from the DDI compendium search results, and then downloads the html and puts them into a sqlite database.')\n parser.add_argument('-f', '--file', dest='file',\n action='store',\n help='Filenname to be read')\n arg_manager = parser.parse_args()\n return arg_manager", "def initialise(self, args, environ):", "def get_word():\n return ' '.join(sys.argv[1:])", "def load_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--batchsize\",\n help=\"the batch size of the dataloader\",\n type=int,\n default=2,\n required=False)\n parser.add_argument(\"--validsize\",\n help=\"percentage that training set split into validation set\",\n type=float,\n default=1/6,\n required=False)\n parser.add_argument(\"--input-prefix\",\n help=\"file for saving the data (.gz file)\",\n type=str,\n default=\"../mnistPC\",\n required=False)\n parser.add_argument(\"--demo\",\n help=\"if demo is true, then only load small number of images\",\n type=bool,\n default=False,\n required=False)\n parser.add_argument(\"--test\",\n help=\"if test is true, then load data from test dataset\",\n type=bool,\n default=False,\n required=False)\n parser.add_argument(\"--epochs\",\n help=\"number of epochs\",\n type=int,\n default=5,\n required=False)\n parser.add_argument(\"--learning_rate\",\n help=\"learning rate of the model\",\n type=float,\n default=1e-2,\n required=False)\n parser.add_argument(\"--num_classes\",\n help=\"number of classes for classification\",\n type=int,\n default=10,\n required=False)\n parser.add_argument(\"--num_neighbors\",\n help=\"num of neighbors for the network\",\n type=int,\n default=15,\n required=False)\n parser.add_argument(\"--cudas\",\n help=\"cuda numbera to use, if use cpu, please enter -1\",\n type=str,\n default=\"0/1/2/3\",\n required=False)\n args = parser.parse_args()\n return args" ]
[ "0.56937206", "0.5565932", "0.5559221", "0.555798", "0.5511743", "0.5511743", "0.549686", "0.54937357", "0.5469482", "0.5469482", "0.54630214", "0.5454337", "0.54044306", "0.5401598", "0.539499", "0.539345", "0.53648907", "0.5363433", "0.5363248", "0.5354271", "0.53508025", "0.5331103", "0.53309596", "0.53285885", "0.53205895", "0.5302353", "0.5293142", "0.5293068", "0.5267956", "0.52528286", "0.5219619", "0.52099484", "0.52099484", "0.51882607", "0.5186254", "0.5167949", "0.51674557", "0.5166698", "0.5156776", "0.5141017", "0.5122469", "0.51214594", "0.5120438", "0.51201904", "0.5112987", "0.51009506", "0.50965023", "0.5093989", "0.5093937", "0.50771564", "0.50765777", "0.50765383", "0.5061237", "0.50601673", "0.50524586", "0.504936", "0.50361633", "0.5032261", "0.50301605", "0.5019144", "0.5015788", "0.50075847", "0.4995892", "0.4994049", "0.49903038", "0.49870777", "0.49826014", "0.49681962", "0.49648398", "0.4938789", "0.4933452", "0.49332616", "0.49327922", "0.49288526", "0.49175575", "0.4915223", "0.4909698", "0.49067178", "0.49067178", "0.4899993", "0.4895731", "0.48861068", "0.48803055", "0.4868567", "0.48570263", "0.48504144", "0.48426574", "0.48380417", "0.48349535", "0.48343962", "0.4831523", "0.4822681", "0.48142236", "0.4814001", "0.48096347", "0.47956806", "0.4787002", "0.4786001", "0.47803143", "0.4779317" ]
0.6878033
0
Returns keyword (from supplied package name, if any).
def get_keyword(package): try: substr = re.search(r'(\S+)_(\S+)', package) if substr: return substr.groups() except Exception,e: log.error(str(e)) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readable_keyword(s):\n if s and not s.startswith(\"*\") and not s.startswith(\"[\"):\n if s.count(\".\"):\n library, name = s.rsplit(\".\", 1)\n return library + \".\" + name[0:].title()\n else:\n return s\n else:\n return s", "def read_package_variable(key):\n module_path = os.path.join(PACKAGE_NAME, '__init__.py')\n with open(module_path) as module:\n for line in module:\n parts = line.strip().split(' ')\n if parts and parts[0] == key:\n return parts[-1].strip(\"'\")\n assert 0, \"'{0}' not found in '{1}'\".format(key, module_path)", "def read_package_variable(key):\n module_path = os.path.join(PACKAGE_NAME, '__init__.py')\n with open(module_path) as module:\n for line in module:\n parts = line.strip().split(' ')\n if parts and parts[0] == key:\n return parts[-1].strip(\"'\")\n assert False, \"'{0}' not found in '{1}'\".format(key, module_path)", "def get_package_name(x):\n return re.search(r\"^(\\w|-)*\", x).group()", "def package(self, pkg_name):\n return self._pkgs[pkg_name]", "def package_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"package_name\")", "def get_product_name(self, package_name):\n return package_name", "def read_package_variable(key):\n module_path = os.path.join(PACKAGE_NAME, '__init__.py')\n with open(module_path) as module:\n for line in module:\n parts = line.strip().split(' ')\n if parts and parts[0] == key:\n return parts[-1].strip(\"'\")\n raise KeyError(\"'{0}' not found in '{1}'\".format(key, module_path))", "def search_package(package, satellite_connection, satellite_connection_auth):\n package_sat_result = satellite_connection.packages.search.name(satellite_connection_auth, package)\n return package_sat_result", "def get_package_info(package_name):\n r = requests.get(f'https://api.npms.io/v2/search?q={package_name}&size=1')\n response_json = r.json()\n\n if 'results' in response_json:\n result = response_json['results'][0]\n return result['package']", "def find_package(self, package_title):\n metadata = self.get_ckan_metadata()\n results = []\n for id, resource in metadata.items():\n if resource['dataset']['title'] == package_title:\n results.append(resource['dataset'])\n return results[0] if len(results) == 1 else results", "def get_package_name(item: str) -> Union[str, None]:\n return remove_prefix(item, PackageInfoPrefix.PACKAGE)", "def get_package_name(name):\n name = _strip_package_name(name)\n return name", "def package_name(self) -> str:\n return pulumi.get(self, \"package_name\")", "def package_name(self):", "def get_keyword(arg: str, keyword_set: set) -> Keyword or None:\n arg = arg.lower().lstrip('-')\n for keyword in keyword_set:\n if arg == keyword.keyword or arg in keyword.aliases:\n return keyword\n\n return None", "def get_package_name(pkg, rem):\n flavor = rem.os.package_type\n\n try:\n return _PACKAGE_MAP[pkg][flavor]\n except KeyError:\n return None", "def pkg_name() -> None:\n print(\"mypkg\")", "def keyword(self, keyword):\r\n return keywords.Keyword(self, keyword)", "def get_package_name():\n return try_get_project_property('packageName')", "def package_name(self):\n return self._package_name", "def GetKeyword(self,prompt=''):\n\t\treturn self.acad.ActiveDocument.Utility.GetKeyword(prompt)", "def find_pkg(self, pkg):\n pass", "def get_package_prefix(package_name):\n # This regex checks for a valid package name as defined by REP-127 including the recommended\n # exemptions. See https://ros.org/reps/rep-0127.html#name\n if re.fullmatch('[a-zA-Z0-9][a-zA-Z0-9_-]+', package_name, re.ASCII) is None:\n raise ValueError(\n \"'{}' is not a valid package name\".format(package_name))\n try:\n content, package_prefix = get_resource('packages', package_name)\n except LookupError:\n raise PackageNotFoundError(\n \"package '{}' not found, searching: {}\".format(package_name, get_search_paths()))\n return package_prefix", "def package(self) -> Optional[pulumi.Input['PackageArgs']]:\n return pulumi.get(self, \"package\")", "def get_package_name():\n\tpackage = None\n\ttry:\n\t\tpackage = os.environ.get('LOCAL_PART', '') + os.environ.get('LOCAL_PART_SUFFIX', '') \n\t\tif not package and len(sys.argv) > 1:\n\t\t\tpackage = sys.argv[-1].lower()\n\texcept Exception,e:\n\t\tlog.error(str(e))\n\tfinally:\n\t\treturn package", "def package_name(string):\n return 'USymbol' + convert_name(string, False)", "def _get_java_package_name(java_ast: CompilationUnit) -> Optional[str]:\n package: Optional[PackageDeclaration] = java_ast.package\n\n return package.name if package else None", "def get_package(self, __package_id):\n raise NotImplementedError", "def get_package(self, mf_module, name):\n module_data = self.get_module(mf_module)\n\n try:\n return module_data[name]\n except KeyError:\n raise KeyError(f'package {name} not found in module {mf_module}.')", "def find_standard_package(self, pkgname):\n\n try:\n if pkgname == 'antigravity':\n return ()\n result = find_module(pkgname)\n return result\n except ImportError:\n return ()", "def __get_package_name_by_understand_query(self, class_longname: str = None):\n package_entity, package_longname = UnderstandUtility.get_package_of_given_class_2(self.db, class_longname)\n # print(package_entity.longname())\n\n return package_longname", "def query(package:str, path:str=None):\r\n data = read_package_info(package, path)\r\n logging.info(\"{0} - v{1}\".format(data[\"name\"], data['version']))", "def search_closest_nuget_package_name(query):\n url_query = f\"https://azuresearch-usnc.nuget.org/autocomplete?q={query}\"\n query_response = requests.get(url_query).json()\n data = query_response.get(\"data\")\n if data:\n return data[0]", "def keyword_search(keywords):\n try:\n return itunespy.search(keywords)[0]\n except LookupError:\n return None", "def get_spynl_package(name, packages=None):\n if packages is None:\n packages = get_spynl_packages()\n return next(filter(lambda p: p.project_name == name, packages), None)", "def getSpecific(self, keyword, key):", "def get_package(package, create=False):\n if isinstance(package, basestring):\n if create:\n package = Package.objects.get_or_create(name=package)[0]\n else:\n try:\n package = Package.objects.get(name=package)\n except Package.DoesNotExist:\n package = None\n return package", "def _package(module):\n return (\n module.__name__ if module.__package__ is None else module.__package__\n )", "def _find(self, keyword):\n for tag in self.meta.findall(CN('meta:keyword')):\n if keyword == tag.text:\n return tag\n return None", "def package_name(self) -> str:\n return self.proto.docid", "def spdx_package(self) -> Optional[pulumi.Input['PackageInfoNoteArgs']]:\n return pulumi.get(self, \"spdx_package\")", "def find_name():\n name_file = read_file('__init__.py')\n name_match = re.search(r'^__package_name__ = [\"\\']([^\"\\']*)[\"\\']',\n name_file, re.M)\n if name_match:\n return name_match.group(1)\n raise RuntimeError('Unable to find name string.')", "def get_class_name_from_pkg_name(opts):\n pkg_name = opts[\"package\"]\n return \"\".join(map(str.capitalize, pkg_name.split(\"_\")))", "def __getattr__(self, key: str) -> Any:\n return getattr(self._package, key)", "def _package_root(name):\n return name.split('.', 1)[0]", "def getPackageName():\n return _libsbml.MultiExtension_getPackageName()", "def cli(env, keyword, package_type):\n manager = ordering.OrderingManager(env.client)\n table = formatting.Table(COLUMNS)\n\n _filter = {'type': {'keyName': {'operation': '!= BLUEMIX_SERVICE'}}}\n if keyword:\n _filter['name'] = {'operation': '*= %s' % keyword}\n if package_type:\n _filter['type'] = {'keyName': {'operation': package_type}}\n\n packages = manager.list_packages(filter=_filter)\n\n for package in packages:\n table.add_row([\n package['id'],\n package['name'],\n package['keyName'],\n package['type']['keyName']\n ])\n env.fout(table)", "def run_keyword(name, *args):\n BuiltIn().run_keyword(name, *args)", "def get_wheel_package_name(provider_package_id: str) -> str:\n return \"apache_airflow_providers_\" + provider_package_id.replace(\".\", \"_\")", "def name_python_package(self) -> str:\n return f'ba{self.name_compact}'", "def GetKeywordPlanAdGroupKeyword(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def __getitem__(self, package):\n\n\t\treturn self._packages.setdefault(\n\t\t\tpackage,\n\t\t\tPackage(package)\n\t\t)", "def get_package(publisher_name, package_name):\n try:\n instance = Package.query.join(Publisher) \\\n .filter(Package.name == package_name,\n Publisher.name == publisher_name).first()\n return instance\n except Exception as e:\n app.logger.error(e)\n return None", "def pypi_package(self, file):\n\n file = os.path.basename(file)\n match = RE_PACKAGE_NAME.search(file)\n if not match:\n raise RuntimeError('[ERROR] Invalid package name, %s' & file)\n split = (match.group(\"pkg\"), match.group(\"rest\"))\n to_safe_name = pkg_resources.safe_name\n\n if len(split) != 2 or not split[1]:\n raise RuntimeError('[ERROR] Invalid package name, %s' & file)\n\n return to_safe_name(split[0]).lower()", "def getKeyword(self, key):\n try:\n return self.raw[0].header[key]\n except:\n return self.raw[0].header['HIERARCH ESO '+key]", "def translate_keyword(keyword):\n define = {\n '运动类型': 'Sport Type',\n '性别': 'Sex',\n '颜色': 'Color',\n '鞋透气指数': 'Shoes breathability index',\n '鞋软硬指数': 'Shoe soft and hard index',\n }\n if keyword in define:\n return define[keyword]\n else:\n return keyword", "def _retreive_package_from_pypi(package_name: str) -> dict:\n\n api_url = f\"https://pypi.org/pypi/{package_name}/json\"\n response = requests.get(api_url)\n\n #\n # handle response\n #\n\n response.raise_for_status()\n json_response = response.json()\n return json_response", "def _hcss_fits_keyword(header, keyword, *args):\n if len(args) > 1:\n raise ValueError('Invalid number of arguments.')\n for k in header.keys():\n if not k.startswith('key.'):\n continue\n if header[k] == keyword:\n return header[k[4:]]\n if len(args) == 1:\n return args[0]\n raise KeyError(\"Keyword '{0}' not found.\".format(keyword))", "def get_package(cls, name: str):\n pkg = Package(name=name)\n return pkg", "def get_package(package, create=False):\n index = PackageIndex.objects.first()\n if isinstance(package, basestring):\n if create:\n package = Package.objects.get_or_create(index=index, name=package)[0]\n else:\n try:\n package = Package.objects.get(index=index, name=package)\n except Package.DoesNotExist:\n package = None\n return package", "def make_key_from_name(datablock):\r\n key = datablock.name\r\n if hasattr(datablock, \"type\"):\r\n key += datablock.type\r\n if hasattr(datablock, \"data\") and hasattr(datablock.data, \"type\"):\r\n key += datablock.data.type\r\n if datablock.library:\r\n key += datablock.library.name\r\n return key", "def pkg(klass, name):\n raise NotImplementedError", "def find_package(xp, **kwargs):\n path = '/search/package'\n if 'schema' not in kwargs:\n kwargs['schema'] = PackageCollection.SCHEMA\n tag_class = {'collection': PackageCollection, 'package': ROPackage}\n return _find(path, xp, tag_class, **kwargs)", "def get_package_name(self):\n return self.name + '-' + self.version", "def getPackageName():\n return _libsbml.FbcExtension_getPackageName()", "def get_keyword(self, collection_id, name):\n sql = \"\"\"SELECT keyword.name, keyword.args, keyword.doc\n FROM keyword_table as keyword\n WHERE keyword.collection_id == ?\n AND keyword.name like ?\n \"\"\"\n cursor = self._execute(sql, (collection_id,name))\n # We're going to assume no library has duplicate keywords\n # While that in theory _could_ happen, it never _should_,\n # and you get what you deserve if it does.\n row = cursor.fetchone()\n if row is not None:\n return {\"name\": row[0],\n \"args\": json.loads(row[1]),\n \"doc\": row[2],\n \"collection_id\": collection_id\n }\n return {}", "def getPackageName(self):\n return _libsbml.MultiPkgNamespaces_getPackageName(self)", "def get_info(self, pkgname):\n for pkg in self.rpc.info(pkgname):\n return pkg", "def getPackageName():\n return _libsbml.QualExtension_getPackageName()", "def getPackageName():\n return _libsbml.LayoutExtension_getPackageName()", "def key_from_req(req):\n if hasattr(req, \"key\"):\n # from pkg_resources, such as installed dists for pip-sync\n key = req.key\n else:\n # from packaging, such as install requirements from requirements.txt\n key = req.name\n\n key = key.replace(\"_\", \"-\").lower()\n return key", "def getpackinfo(package_name: str) -> Dict[str, str]:\n\t# execute command\n\tproc = subprocess.Popen(['pip', 'show', package_name], stdout=subprocess.PIPE)\n\tproc.wait()\n\n\t# get and treate return\n\tlines = proc.stdout.read().decode('utf8')\n\tinfo = {}\n\tlines = list(map(lambda inf: inf.split(': '), lines.split('\\n')))\n\n\t# process retun\n\tfor line in lines:\n\t\tkey = line[0].lower()\n\t\tif not not key and len(key) > 0:\n\t\t\tvalue = line[1]\n\t\t\tif key == 'name':\n\t\t\t\tinfo[key] = value.lower()\n\t\t\telif key == 'requires':\n\t\t\t\tinfo[key] = list(map(lambda x: x.strip(), value.lower().split(','))) if value else []\n\t\t\telif key == 'required-by':\n\t\t\t\tinfo[key] = list(map(lambda x: x.strip(), value.lower().split(','))) if value else []\n\t\t\telse:\n\t\t\t\tinfo[key] = value\n\n\treturn info", "def type(self):\n return 'package'", "def _default_target(package):\n return package[package.rfind('/')+1:]", "def this_is_a_keyword(arg1):\r\n print(arg1)\r\n return 'Whatever'", "def getPackageName(self):\n return _libsbml.SBase_getPackageName(self)", "def getPackageName():\n return _libsbml.CompExtension_getPackageName()", "def cached_dm_find_fits_keyword(key):\n return MODEL.find_fits_keyword(key.upper(), return_result=True)", "def _normalize_package_name(self, name):\n return Prepared.normalize(name)", "def getPackageName(self):\n return _libsbml.ASTBasePlugin_getPackageName(self)", "def get_pkg(pkg_name):\n pkg = Database().db.get_pkg_details(pkg_name, \"\", False)\n pkg = PackageDetail.surClass(pkg)\n print('dir: ', dir(pkg))\n \n #print('name:', pkg.nane)\n #print('props.name:', pkg.props.nane)\n return render_template(\"pkg.html\", \n title=\" - \"+pkg_name,\n repos=Database().db.get_repos_names(),\n pkg=pkg)", "def get_name_with_lib(datablock):\r\n text = datablock.name\r\n if datablock.library:\r\n # text += ' (Lib: \"%s\")' % datablock.library.name\r\n text = \"L \" + text\r\n return text", "def get_closest_nuget_package_name(query):\n url_nuget_service = \"https://api.nuget.org/v3/index.json\"\n url_nuget_search = \"\"\n\n api_resources = requests.get(url_nuget_service).json()\n for resource in api_resources.get(\"resources\") or []:\n if resource.get(\"@type\") == \"SearchQueryService\":\n url_nuget_search = resource[\"@id\"]\n break\n\n if url_nuget_search:\n url_query = urljoin(url_nuget_search, f\"?q={query}\")\n query_response = requests.get(url_query).json()\n if query_response.get(\"data\"):\n return query_response[\"data\"][0][\"id\"]", "def getPackageName(self):\n return _libsbml.QualPkgNamespaces_getPackageName(self)", "def package():\n pass", "def translate(self, package):\r\n if not isinstance(package, self._package_type):\r\n return None\r\n if not package.compatible(identity=self._identity, platform=self._platform):\r\n return None\r\n try:\r\n bdist = package.fetch(location=self._install_cache, conn_timeout=self._conn_timeout)\r\n except package.UnreadableLink as e:\r\n TRACER.log('Failed to fetch %s: %s' % (package, e))\r\n return None\r\n return DistributionHelper.distribution_from_path(bdist)", "def get_package_name(self):\n return self.name + '-' + self.version + '-' + self.release", "def __extract_package_version(package_string):\n # remove leading whitespace\n package_string = package_string.strip()\n # create a re parser\n compil = re.compile(r'(?P<name>.+(-[^-])*)-(?P<version>.+)')\n # search package name and version\n search = compil.search(package_string)\n # retrieve result as list\n output = search.groupdict()\n\n return output", "def get_package_description(self, package):\n with self._conn.begin():\n return self._conn.execute(\n \"VALUES (get_package_description(%s))\", (package,)).scalar()", "def get_package(self, package_id):\n return self._package_cache.get(package_id)", "def getPackageName(self):\n return _libsbml.LayoutPkgNamespaces_getPackageName(self)", "def getPackageName(self):\n return _libsbml.FbcPkgNamespaces_getPackageName(self)", "def cypher_itemTag_keyword(self, variable_tagItem=\"tag_item\"):\n\n if not self.keyword:\n return \"\"\n return f'({variable_tagItem}{self.label}' + \\\n \"{\" + f'{self.databaseInfoTag[\"properties\"][\"keyword\"]}:\\'{self.keyword}\\'' + \"})\"", "def get(self, keyword, not_found=None):\n if (type(keyword) != str):\n raise TypeError(\"The key for metadata items must be a string\")\n if (keyword in self._key_set):\n for item in self._metadata:\n if (item.keyword == keyword):\n return item\n return not_found", "def getPackageName():\n return _libsbml.GroupsExtension_getPackageName()", "def getRegisteredPackageName(*args):\n return _libsbml.SBMLExtensionRegistry_getRegisteredPackageName(*args)", "def get_pkg_meta(self, pkg):\n pass", "def cypher_tag_keyword(self, variable_tag=\"tag\"):\n if not self.keyword:\n return \"\"\n return f'({variable_tag}{self.label}' + \\\n \"{\" + f'{self.databaseInfoTag[\"properties\"][\"keyword\"]}:\\'{self.keyword}\\'' + \"})\"", "def keyword_from_meaning(name):\n # Try to adhere to keyword scheme in DICOM (CP850)\n\n # singular/plural alternative forms are made plural\n # e.g., “Physician(s) of Record” becomes “PhysiciansOfRecord”\n name = name.replace(\"(s)\", \"s\")\n\n # “Patient’s Name” -> “PatientName”\n # “Operators’ Name” -> “OperatorsName”\n name = name.replace(\"’s \", \" \")\n name = name.replace(\"'s \", \" \")\n name = name.replace(\"s’ \", \"s \")\n name = name.replace(\"s' \", \"s \")\n\n # Mathematical symbols\n name = name.replace(\"%\", \" Percent \")\n name = name.replace(\">\", \" Greater Than \")\n name = name.replace(\"=\", \" Equals \")\n name = name.replace(\"<\", \" Lesser Than \")\n\n name = re.sub(r\"([0-9]+)\\.([0-9]+)\", \"\\\\1 Point \\\\2\", name)\n name = re.sub(r\"\\s([0-9.]+)-([0-9.]+)\\s\", \" \\\\1 To \\\\2 \", name)\n\n name = re.sub(r\"([0-9]+)day\", \"\\\\1 Day\", name)\n name = re.sub(r\"([0-9]+)y\", \"\\\\1 Years\", name)\n\n # Remove category modifiers, such as \"(specimen)\", \"(procedure)\",\n # \"(body structure)\", etc.\n name = re.sub(r\"^(.+) \\([a-z ]+\\)$\", \"\\\\1\", name)\n\n name = camel_case(name.strip())\n\n # Python variables must not begin with a number.\n if re.match(r\"[0-9]\", name):\n name = \"_\" + name\n\n return name" ]
[ "0.63861567", "0.62863207", "0.6277217", "0.62762856", "0.62735116", "0.62571263", "0.6205", "0.6157799", "0.59881186", "0.5964056", "0.5946726", "0.591496", "0.588399", "0.5882375", "0.58692354", "0.58623904", "0.5844938", "0.58448356", "0.58406734", "0.58017486", "0.5789892", "0.5722921", "0.5704586", "0.56961656", "0.5675049", "0.5669365", "0.5662334", "0.5632702", "0.56187546", "0.56077415", "0.5601241", "0.5591853", "0.5591852", "0.559009", "0.557434", "0.5568163", "0.55294967", "0.54764396", "0.5455805", "0.5440107", "0.5430064", "0.5412398", "0.5390241", "0.5378489", "0.53748125", "0.5367363", "0.53653544", "0.5360307", "0.53508574", "0.53381664", "0.5313417", "0.5311658", "0.5311069", "0.5290082", "0.5276388", "0.5275722", "0.5270824", "0.52645475", "0.52491295", "0.5248239", "0.5245515", "0.52438724", "0.5232112", "0.52320635", "0.5221454", "0.52152073", "0.5214563", "0.5212409", "0.52057016", "0.52017367", "0.5197706", "0.5193548", "0.5185549", "0.5184245", "0.51799345", "0.5166864", "0.5166292", "0.5162407", "0.5161843", "0.5155815", "0.5153421", "0.51533896", "0.51482314", "0.5142657", "0.51276034", "0.51193494", "0.5117786", "0.5109814", "0.51029986", "0.51020163", "0.5097411", "0.50904757", "0.50874066", "0.50812024", "0.50781447", "0.50558174", "0.5055369", "0.5036643", "0.50190866", "0.5015145" ]
0.84235466
0
Initialise base schema of the DB for car sharing system.
def initialise_schema(db_name: str, password: str): conn = psycopg2.connect(host='localhost', dbname=db_name, user='postgres', password=password) cursor = conn.cursor() cursor.execute(_query) conn.commit() conn.close() print('Database schema was created successfully!\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_schema(self):\n models.Base.metadata.create_all(self.session.bind)", "def initialise(self):\n\n if self.db_type == 'sqlite':\n try:\n # Attempt to create schema if not present, to cope with fresh DB file\n BaseSQLite.metadata.create_all(self.engine)\n except OperationalError:\n print(\"Error creating database schema, possible invalid path? ('\" + self.db_name + \"'). Quitting\")\n exit()\n elif self.db_type == 'postgres':\n try:\n # ensure that create schema scripts created before create table scripts\n event.listen(BasePostgres.metadata, 'before_create', CreateSchema('datastore_schema'))\n BasePostgres.metadata.create_all(self.engine)\n except OperationalError:\n print(f\"Error creating database({self.db_name})! Quitting\")\n exit()", "def initialize_db(self) -> None:\n if not self.check_schema_initialized():\n self._create_genes_table()\n self._create_meta_data_table()", "def initialize():\n \n db.connect()\n db.create_tables([Product], safe=True)", "def create_db_structure(self):\n logger.info(\"Creating CRH database structure.\")\n CrhDbModel.metadata.create_all(bind=self.engine)", "def _init_db(self):\n cursor = self._main_connection.cursor()\n cursor.execute(self.sql[\"create_table\"])\n self._main_connection.commit()", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User, Entry], safe=True)\n DATABASE.close()", "def init_database(self):\n # init_database(self.engine)", "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "def initialize():\n db.connect()\n db.create_tables([Expense], safe=True)", "def create_database_structure(self):\n Base.metadata.create_all(self.engine)", "def setup_db(self) -> None:\n conn = mysql.connector.connect(\n user=self.app.config[\"DATABASE_USER\"], password=self.app.config[\"DATABASE_PASSWORD\"],\n host=self.app.config[\"DATABASE_HOST\"], port=self.app.config[\"DATABASE_PORT\"], raise_on_warnings=True\n )\n try:\n cursor = conn.cursor()\n cursor.execute(\n \"CREATE DATABASE IF NOT EXISTS {} CHARACTER SET utf8\".format(self.app.config[\"DATABASE_NAME\"])\n )\n conn.commit()\n except:\n raise\n else:\n with self.DBManager(self.app) as connection:\n for model in sorted(lib.get_subclasses(lib.models.Model), key=lambda x: x.index):\n model.setup_table(connection=connection)\n finally:\n conn.close()", "def init_db():\n db = get_db()\n Page.create_table(db)\n PageVersion.create_table(db)\n User.create_table(db)", "def init_database(self):\n init_database(self.engine)", "def initialize():\n\n db.connect() # Se conecta\n db.create_tables([Entry], safe=True) # Crea las tablas\n # safe=true evita crear modelos ya creados", "def setup_tables(self):\n try:\n self.cursor.execute('CREATE SCHEMA sandbox')\n self.cursor.execute(\"DROP TABLE sandbox.dvds_rdbhdb_super;\")\n except (db.ProgrammingError, db.OperationalError), e:\n # sandbox may not exist\n pass #raise\n\n try:\n self.cursor.execute(\n \"\"\"CREATE TABLE sandbox.dvds_rdbhdb_super(\n id SERIAL PRIMARY KEY,\n name varchar(40) NOT NULL,\n rating float,\n UNIQUE(name)\n );\n \"\"\" )\n except db.ProgrammingError, e:\n if e[0] != '42P07':\n raise", "def database_setup():\n Base.metadata.create_all(bind=engine)\n db = LocalSession()\n try:\n populate_from_json(db, Vector, str(VECTORS))\n populate_from_json(db, Gender, str(GENDERS))\n populate_from_json(db, Tag, str(TAGS))\n finally:\n db.close()", "def setup_database(self):\n self.db.setup_database()", "def initialize(self):\n\n cursor = self.conn.cursor()\n\n # This table can be used as a parent for a collection of runs\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS RunCollections (\n id INT AUTO_INCREMENT PRIMARY KEY,\n name VARCHAR(14) UNIQUE\n );\"\"\"\n )\n\n # This table holds in which run each appears.\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS Runs (\n id INT AUTO_INCREMENT PRIMARY KEY,\n name VARCHAR(14) UNIQUE,\n collection_id INT,\n FOREIGN KEY (collection_id) REFERENCES RunCollections (id) ON DELETE CASCADE);\"\"\"\n )\n\n # This table holds resources, which can be in multiple runs and have multiple varieties\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS Resources (\n id INT AUTO_INCREMENT PRIMARY KEY, \n extension VARCHAR(20), \n webpage VARCHAR(30),\n run_id INT NOT NULL,\n FOREIGN KEY (run_id) REFERENCES Runs (id) ON DELETE CASCADE);\"\"\"\n )\n\n cursor.execute(\n 'SELECT Table_name FROM information_schema.tables WHERE table_schema = \"vpntfg0\" AND Table_name LIKE \"%Varieties_%\" ORDER BY Table_name'\n )\n for row in cursor.fetchall():\n self.variety_tables.append(row[0])\n\n cursor.close()\n _logger.info(\"Variety tables are: %s\" % self.variety_tables)\n\n _logger.info(\"Database initialized\")", "def initialize_empty_database(self):\r\n Base.metadata.create_all(self.engine)", "def init_db():\n import cerbereapp.models\n Base.metadata.create_all(bind=engine)", "def init_db() -> None: \n \n Base.metadata.create_all(bind=engine)", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User], safe=True)\n DATABASE.close()", "def init_database(db: sa.engine.Connectable):\n\n # setup the Postgres extensions and schema\n db.execute(\"\"\"\n CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\" WITH SCHEMA public;\n \"\"\")\n db.execute(\n ';\\n'.join(\n 'CREATE SCHEMA IF NOT EXISTS {}'.format(s) for s in SCHEMAS.values()\n )\n )\n\n # create the schema from the models\n METADATA.create_all(bind=db)", "def initialize():\n DATABASE.connect()\n DATABASE.drop_tables([Journal], safe=True)\n DATABASE.create_tables([Journal], safe=True)\n DATABASE.close()", "def init_db(base):\n base.metadata.create_all(engine, checkfirst=True)", "def _initialize_db():\n conn, c = _get_db_connection()\n\n with open(str(SCHEMA_PATH)) as f:\n c.executescript(f.read())\n\n conn.close()", "def init():\n database.create_tables([Tracker])\n database.commit()", "def __post_init__(self):\n self.dbase = databases.Database(\n self.dsn,\n min_size=self.min_size,\n max_size=self.max_size\n )\n self.engine, self.meta = self.get_engine_metadata()", "def initdb():\n db.drop_all()\n db.configure_mappers()\n db.create_all()\n db.session.commit()", "def init(self):\n self.db.connect()\n try:\n self.db.create_tables([JambiModel], safe=True)\n JambiModel.create(ref='0')\n self.logger.info('Database initialized')\n except IntegrityError:\n self.logger.info('Database was already initialized')\n self.db.close()", "def init_db():\n db.drop_all()\n db.configure_mappers()\n db.create_all()\n db.session.commit()", "def init_db(self):\n print(\"Initializing database...\", end='')\n self.cursor.execute(\"DROP DATABASE %s\" % self.db.database)\n self.__init__(self.db_name)\n self.cursor.execute(\"USE %s\" % self.db.database)\n\n # Book\n self.cursor.execute(\n \"\"\"CREATE TABLE Book (\n ISBN VARCHAR(13),\n title VARCHAR(300) COLLATE utf8_general_ci,\n publisher VARCHAR(100) COLLATE utf8_general_ci,\n lang VARCHAR(40),\n publicationDate DATE,\n pageCount SMALLINT CHECK(pageCount >= 0),\n stock SMALLINT CHECK(stock >= 0),\n price DECIMAL(5,2),\n subject VARCHAR(100),\n avg_rating DECIMAL(4,2) CHECK(avg_rating <= 10.00),\n total_rating_score INT DEFAULT 0,\n num_ratings INT DEFAULT 0,\n PRIMARY KEY (ISBN))\"\"\")\n\n # Author\n self.cursor.execute(\n \"\"\"CREATE TABLE Author (\n ID INT AUTO_INCREMENT,\n name VARCHAR(200) COLLATE utf8_general_ci,\n lang VARCHAR(40),\n PRIMARY KEY (ID))\"\"\")\n\n # CustomerPersonal\n self.cursor.execute(\n \"\"\"CREATE TABLE CustomerPersonal (\n phone CHAR(10),\n address VARCHAR(300) NOT NULL,\n PRIMARY KEY (phone))\"\"\")\n\n # CustomerCredentials\n self.cursor.execute(\n \"\"\"CREATE TABLE CustomerCredentials (\n loginID VARCHAR(30),\n firstName VARCHAR(50) NOT NULL,\n lastName VARCHAR(50) NOT NULL,\n salt VARBINARY(32) NOT NULL,\n pass_key VARBINARY(32) NOT NULL,\n phone CHAR(10) NOT NULL,\n PRIMARY KEY (loginID),\n FOREIGN KEY (phone) REFERENCES CustomerPersonal(phone)\n ON UPDATE CASCADE ON DELETE RESTRICT)\"\"\")\n\n # ManagerPersonal\n self.cursor.execute(\n \"\"\"CREATE TABLE ManagerPersonal (\n phone CHAR(10),\n address VARCHAR(300) NOT NULL,\n PRIMARY KEY (phone))\"\"\")\n\n # ManagerCredentials\n self.cursor.execute(\n \"\"\"CREATE TABLE ManagerCredentials (\n loginID VARCHAR(30),\n managerID INT UNIQUE NOT NULL AUTO_INCREMENT,\n firstName VARCHAR(50),\n lastName VARCHAR(50),\n salt VARBINARY(32) NOT NULL,\n pass_key VARBINARY(32) NOT NULL,\n phone CHAR(10) NOT NULL,\n PRIMARY KEY (loginID),\n FOREIGN KEY (phone) REFERENCES ManagerPersonal(phone)\n ON UPDATE CASCADE ON DELETE RESTRICT)\"\"\")\n\n # Comment\n self.cursor.execute(\n \"\"\"CREATE TABLE Comment (\n commentID INT AUTO_INCREMENT,\n ISBN VARCHAR(13) NOT NULL,\n loginID VARCHAR(30) NOT NULL,\n score TINYINT NOT NULL,\n message TEXT,\n veryUseful INT DEFAULT 0,\n useful INT DEFAULT 0,\n useless INT DEFAULT 0,\n avg_usefulness DECIMAL (3,2),\n commentDate DATETIME,\n PRIMARY KEY (commentID),\n FOREIGN KEY (ISBN) REFERENCES Book(ISBN)\n ON UPDATE RESTRICT ON DELETE CASCADE,\n FOREIGN KEY (loginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE)\"\"\")\n\n # OrderLog\n self.cursor.execute(\n \"\"\"CREATE TABLE OrderLog (\n orderNumber INT AUTO_INCREMENT,\n loginID VARCHAR(30) NOT NULL,\n orderDate DATE,\n PRIMARY KEY (orderNumber),\n FOREIGN KEY (loginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE)\"\"\")\n\n # Return Request\n self.cursor.execute(\n \"\"\"CREATE TABLE ReturnRequest (\n requestID INT AUTO_INCREMENT,\n orderNumber INT NOT NULL,\n requestDate DATE,\n ISBN VARCHAR(13) NOT NULL,\n quantity SMALLINT,\n status VARCHAR(25) DEFAULT 'PENDING',\n PRIMARY KEY (requestID),\n FOREIGN KEY (orderNumber) REFERENCES OrderLog(orderNumber)\n ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\")\n\n # # HasKeyword\n # self.cursor.execute(\n # \"\"\"CREATE TABLE HasKeyword (\n # ISBN VARCHAR(13),\n # word VARCHAR(50) COLLATE utf8_general_ci,\n # PRIMARY KEY (ISBN, word),\n # FOREIGN KEY (ISBN) REFERENCES Book(ISBN)\n # ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\")\n\n # Wrote\n self.cursor.execute(\n \"\"\"CREATE TABLE Wrote (\n authorID INT,\n ISBN VARCHAR(13),\n PRIMARY KEY (authorID, ISBN),\n FOREIGN KEY (authorID) REFERENCES Author(ID)\n ON UPDATE RESTRICT ON DELETE RESTRICT,\n FOREIGN KEY (ISBN) REFERENCES Book(ISBN)\n ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\")\n\n # ProductOf\n self.cursor.execute(\n \"\"\"CREATE TABLE ProductOf (\n ISBN VARCHAR(13),\n orderNumber INT,\n quantity SMALLINT CHECK(quantity > 0),\n PRIMARY KEY (ISBN, orderNumber),\n FOREIGN KEY (ISBN) REFERENCES Book(ISBN)\n ON UPDATE RESTRICT ON DELETE CASCADE,\n FOREIGN KEY (orderNUmber) REFERENCES OrderLog(orderNumber)\n ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\")\n\n # Trusts\n self.cursor.execute(\n \"\"\"CREATE TABLE Trusts (\n loginID VARCHAR(30),\n otherLoginID VARCHAR(30) CHECK(loginID<>otherLoginID),\n trustStatus VARCHAR(9) CHECK(trustStatus = 'TRUSTED' OR trustStatus = 'UNTRUSTED'),\n PRIMARY KEY (loginID, otherLoginID),\n FOREIGN KEY (loginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE,\n FOREIGN KEY (otherLoginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE)\"\"\")\n\n # Rates\n self.cursor.execute(\n \"\"\"CREATE TABLE Rates (\n loginID VARCHAR(30),\n commentID INT,\n rating VARCHAR(10) NOT NULL,\n PRIMARY KEY (loginID, commentID),\n FOREIGN KEY (loginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE,\n FOREIGN KEY (commentID) REFERENCES Comment(commentID)\n ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\"\n )\n\n print(\"done\")", "def initdb():\n db = getdb()\n\n with open(os.path.join(config.BASE_DIRECTORY, 'schema.sql')) as f:\n db.executescript(f.read())", "def setup_db():\n\n engine = config['tg.app_globals'].sa_engine\n # model.init_model(engine)\n # model.metadata.create_all(engine)", "def initialize_database():\n # Create the schema\n Base.metadata.create_all(engine)\n\n # Create a connection/database session\n session = Session()\n\n # Now, create a few restaurants:\n cupcake = Restaurant(name=\"Cupcakes\")\n five_guys = Restaurant(name=\"Five Guys\")\n ihop = Restaurant(name=\"IHOP\")\n\n # And a few users:\n mike = User(name=\"Mike\")\n ryan = User(name=\"Ryan\")\n\n # And finally a few votes:\n mike.preferences.append(Preference(vote=\"+1\", restaurant=five_guys))\n ryan.preferences.append(Preference(vote=\"+0\", restaurant=five_guys))\n ryan.preferences.append(Preference(vote=\"-0\", restaurant=cupcake))\n\n session.add(mike)\n session.add(ryan)\n session.add(ihop)\n\n session.commit()\n\n session.close()", "def createschema(self):\n def closure(cur):\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS config (\n key varchar(1024) PRIMARY KEY,\n value text\n );\n CREATE TABLE IF NOT EXISTS rooms (\n id serial PRIMARY KEY,\n name text NOT NULL\n );\n CREATE TABLE IF NOT EXISTS slides (\n id serial PRIMARY KEY,\n -- The ordering index of the slide, set to NULL if slide should be hidden\n sequence_no integer NULL UNIQUE,\n -- The room that should be displayed on this slide, set to NULL for master slides aren't associated with a room\n room integer REFERENCES rooms NULL,\n -- The masters are numbered sequentially and defined in content.py\n master integer NOT NULL,\n -- Overrides the title (normally the room name will be used)\n title text NULL,\n -- If max_rows is NULL, use the config default\n max_rows integer NULL\n );\n CREATE TABLE IF NOT EXISTS events (\n id serial PRIMARY KEY,\n room integer REFERENCES rooms NOT NULL,\n begins timestamp NOT NULL,\n ends timestamp NOT NULL,\n name text NOT NULL\n );\n \"\"\")\n \n self.execute(closure)", "def init_tables(self) -> None:\n # TODO(#93) maybe raise flag when the schema of existing tables isn't what we expect\n # it to be?\n # \"How to know that schema changes?\"\n # logger.warning(\"some message\")\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n with conn:\n c = conn.cursor()\n c.execute(CREATE_PROJECTS_TABLE)\n c.execute(CREATE_TASKS_TABLE)\n c.execute(CREATE_REQUESTERS_TABLE)\n c.execute(CREATE_TASK_RUNS_TABLE)\n c.execute(CREATE_ASSIGNMENTS_TABLE)\n c.execute(CREATE_UNITS_TABLE)\n c.execute(CREATE_WORKERS_TABLE)\n c.execute(CREATE_AGENTS_TABLE)\n c.execute(CREATE_QUALIFICATIONS_TABLE)\n c.execute(CREATE_GRANTED_QUALIFICATIONS_TABLE)\n c.execute(CREATE_ONBOARDING_AGENTS_TABLE)", "def init_db(ctx: click.core.Context, force: bool) -> None:\n engine = ctx.obj['engine']\n if force:\n contract.Base.metadata.drop_all(engine)\n contract.Base.metadata.create_all(engine)", "def bootstrap():\n Base.metadata.create_all(engine)", "def init_db():\n # Open connection to the database\n conn = sqlite3.connect(DB_PATH)\n cursor = conn.cursor()\n\n # Open the schema file and execute its SQL code\n with current_app.open_resource('schema.sql') as db_schema:\n cursor.executescript(db_schema.read().decode('utf8'))\n\n # Save (commit) the changes\n conn.commit()\n\n # We can also close the connection if we are done with it.\n conn.close()", "def init_db():\n with app.app_context():\n db = connect_db()\n with app.open_resource('schema.sql') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n db.drop_all()\n db.create_all()\n seed_companies()\n seed_emission_reports()\n seed_reduction_targets()\n seed_milestones()", "def initialize(self):\n\n db = dict()\n\n db['meta'] = Meta(None)\n db['race'] = Race(None, None, None, None, None)\n db['track'] = Track(None, None)\n db['classes'] = set([])\n db['teams'] = set([])\n db['drivers'] = set([])\n\n self.db = db", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n # We are setting the module variables here for the first time, so disable the warning\n global DB_USER_TABLE # pylint: disable=global-variable-undefined\n global DB_CUSTOMER_TABLE # pylint: disable=global-variable-undefined\n global DB_USER_CUSTOMER_RELS_TABLE # pylint: disable=global-variable-undefined\n global DB_TICKET_TABLE # pylint: disable=global-variable-undefined\n global DB_COMMENT_TABLE # pylint: disable=global-variable-undefined\n\n db = TinyDB(app.config['DB_NAME'])\n\n DB_USER_TABLE = db.table('users')\n DB_CUSTOMER_TABLE = db.table('customers')\n DB_USER_CUSTOMER_RELS_TABLE = db.table('user_customer_rels')\n DB_TICKET_TABLE = db.table('tickets')\n DB_COMMENT_TABLE = db.table('comments')", "def _create_database(self):\n self._connect()\n cursor = self._connection.cursor()\n cursor.execute(make_table_creation_command(\"reviews\", FIELD_DESCRIPTIONS))\n self._connection.commit()", "def init_db(self):\n self.db_config = databaseutils.process_db_config(self.config['db'])\n\n from sqlalchemy import create_engine\n from sqlalchemy.orm import sessionmaker, scoped_session\n self.engine = create_engine(self.db_config.constr, pool_recycle=3600)\n self.session = scoped_session(sessionmaker(bind=self.engine))\n\n # Make sure tables are created\n DB_Base.metadata.create_all(self.engine)", "def setup_db(cls):\n if not cls._dbag:\n\n # due to metaclassery, the __class_ get smunged so let's give it a\n # hint as to the type of document we're really creating\n table_name = cls._Meta.table or cls._class_name.split('.')[-1]\n if not cls._Meta.table:\n cls._Meta.table = cls._class_name.split('.')[-1]\n\n BagDocument._dbag = DictBag(\n fpath=cls._Meta.dbpath,\n table=cls._Meta.table\n )\n\n for i in cls._Meta.indexes:\n if isinstance(i, basestring):\n # convenience for just one col in the index\n BagDocument._dbag.ensure_index((i,))\n else:\n BagDocument._dbag.ensure_index(i)", "def init_db():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def __init_database(self):\n from admin.database import init_db\n init_db()", "def initialize_database():\n # TODO: Refactor the funtime library\n this.db = Store(this.host).create_lib(this.store_name).get_store()", "def init_database():\n database.init(DATABASE_NAME)\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON')\n if not database.table_exists([Customer]):\n database.create_tables([Customer])\n database.close()", "def init_db():\n db.drop_all()\n db.create_all()\n\n print(\"Initialized Connect 4 Database.\")", "def init_db():\n db = get_db()\n with current_app.open_resource('schema.sql') as f:\n db.executescript(f.read().decode('utf8'))", "def init_db():\n with closing(connect_db()) as db:\n with app.open_resource('schema.sql') as fobj:\n db.cursor().executescript(fobj.read())\n db.commit()", "def init_db(self):\n\n # The user can provide a custom string\n if self.database is None:\n self.logger.error(\"You must provide a database url, exiting.\")\n sys.exit(1)\n\n self.engine = create_engine(self.database, convert_unicode=True)\n self.session = scoped_session(\n sessionmaker(autocommit=False, autoflush=False, bind=self.engine)\n )\n\n # Database Setup\n Base.query = self.session.query_property()\n\n # import all modules here that might define models so that\n # they will be registered properly on the metadata. Otherwise\n # you will have to import them first before calling init_db()\n import expfactory.database.models\n\n self.Base = Base\n self.Base.metadata.create_all(bind=self.engine)", "def init_db():\n db = get_db()\n\n with current_app.open_resource(\"schema.sql\") as f:\n db.executescript(f.read().decode(\"utf8\"))", "def init_db():\n\twith closing(connect_db()) as db:\n\t\twith app.open_resource('schema.sql', mode='r') as f:\n\t\t\tdb.cursor().executescript(f.read())\n\t\tdb.commit()", "def _load_db(self):\n for type_ in self._types:\n try:\n type_.table(self._metadata)\n except InvalidRequestError:\n pass\n # Reflect metadata so auto-mapping works\n self._metadata.reflect(self._engine)\n # Make sure the tables exist\n self._metadata.create_all()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def create_db(self):", "def dbinit( *args, **kwargs ):", "def init_sensor_db(self, drop_tables=True):\n logger.debug(\"Creating Database Engine.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n\n if drop_tables:\n logger.debug(\"Drop system table if within the existing database.\")\n Base.metadata.drop_all(db_engine)\n\n logger.debug(\"Creating Sentinel1ASF Database.\")\n Base.metadata.bind = db_engine\n Base.metadata.create_all()", "def initDb(self) -> None:\n try: \n connection = None \n dump = open('db/db.sql')\n sql_str = dump.read() \n connection = self.connect()\n cursor = connection.cursor()\n cursor.executescript(sql_str)\n acs_ports = JsonSettings.parseJson('settings.json','AcsPorts')\n db_ports = JsonSettings.parseJson('settings.json','DbPorts')\n acs_port_names = JsonSettings.getKeys('acs_port_', acs_ports) \n db_port_names = JsonSettings.getKeys('db_port_', db_ports)\n for acs_port_name in acs_port_names:\n cursor.execute(f\"ALTER TABLE ports ADD COLUMN {acs_port_name} INTEGER\")\n for db_port_name in db_port_names:\n cursor.execute(f\"ALTER TABLE ports ADD COLUMN {db_port_name} INTEGER\") \n except Exception as e:\n logging.error(f'{self.cn} Error \\n{e}', exc_info=1)\n finally:\n connection.commit() \n tables = cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table' AND name != 'sqlite_sequence'\")\n logging.info(f'{self.cn} Database created with tables:\\n{tables.fetchall()}')\n if connection:\n connection.close()", "def init_db():\n global app\n Promotions.init_db(app)", "def __init__(self):\n self.__db = sqlite3.connect(DB_PATH)\n self.__cur = self.__db.cursor()\n self.__create_tables()", "def initdb():\n db.create_all()", "def initdb():\n db.create_all()", "def init_db():\n # users table\n cur.execute(\n \"CREATE TABLE IF NOT EXISTS users (\"\n \"id INTEGER PRIMARY KEY AUTO_INCREMENT,\"\n \"name VARCHAR(255) NOT NULL,\"\n \"email VARCHAR(255) NOT NULL,\"\n \"password VARCHAR(30) NOT NULL,\"\n \"birthdate DATE);\"\n )\n\n # users' phone records table\n cur.execute(\"CREATE TABLE IF NOT EXISTS records (\"\n \"id INTEGER PRIMARY KEY AUTO_INCREMENT,\"\n \"ownerID INTEGER,\"\n \"name VARCHAR(255),\"\n \"phone VARCHAR(22),\"\n \"birthdate DATE);\")", "def init_db(conn: Connection) -> None:\n\n logger.info(f\"{Fore.YELLOW}Initializing database ...{Style.RESET_ALL}\")\n\n # Create specified schema if not exists\n if not conn.dialect.has_schema(conn, schema_name):\n logger.info(f\"{Fore.YELLOW}Schema {schema_name} does not exist, creating it ...{Style.RESET_ALL}\")\n conn.execute(schema.CreateSchema(schema_name))\n logger.info(f\"{Fore.GREEN}Schema {schema_name} successfully created !{Style.RESET_ALL}\")\n else:\n logger.info(f\"{Fore.GREEN}Schema {schema_name} was found, continuing database initialization \"\n f\"...{Style.RESET_ALL}\")\n\n # Create tables\n Base.metadata.create_all(bind=conn)\n\n logger.info(f\"{Fore.GREEN}Schema {schema_name} successfully configured !{Style.RESET_ALL}\")", "def init_tables(self) -> None:\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n c = conn.cursor()\n c.execute(tables.CREATE_STUDIES_TABLE)\n c.execute(tables.CREATE_SUBMISSIONS_TABLE)\n c.execute(tables.CREATE_REQUESTERS_TABLE)\n c.execute(tables.CREATE_UNITS_TABLE)\n c.execute(tables.CREATE_WORKERS_TABLE)\n c.execute(tables.CREATE_RUNS_TABLE)\n c.execute(tables.CREATE_RUN_MAP_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUPS_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUP_QUALIFICATIONS_MAPPING_TABLE)\n conn.commit()", "def setUp(self):\n create_table(self.DATABASE_PATH)\n self.model = model.CodeReviewDatabase(self.DATABASE_PATH)", "def setup_table(self):\n\n self.setup.create_basic_table_in_dev()\n self.setup.insert_random_records_into_dev()", "def __init__(self):\n engine = db_connect()\n create_reals_table(engine)\n self.Session = sessionmaker(bind=engine)", "def _create_schema(self):\n self._conn.executescript(self._db_schema)", "def _initial_setup(self):\n logger.info(\"Performing initial database setup...\")\n\n # Set up the migration_version table\n self._execute(\n \"\"\"\n CREATE TABLE migration_version (\n version INTEGER PRIMARY KEY\n )\n \"\"\"\n )\n\n # Initially set the migration version to 0\n self._execute(\n \"\"\"\n INSERT INTO migration_version (\n version\n ) VALUES (?)\n \"\"\",\n (0,),\n )\n\n # Set up any other necessary database tables here\n\n logger.info(\"Database setup complete\")", "def init_db():\n\tdb.drop_all()\n\tdb.create_all()\n\n\tprint(\"Initialized Database.\")\n\treturn", "def setUp(self):\n init_db()\n self.client = Client(schema)", "def init():\n print(\"Executing initialization\")\n print(db.dsn)\n cursor = yield momoko.Op(\n db.execute,\n \"\"\"\n DROP SCHEMA public CASCADE;\n CREATE SCHEMA public;\n CREATE TABLE game\n (\n game_id text PRIMARY KEY,\n players integer,\n state bytea,\n timestamp timestamp\n );\n CREATE UNIQUE INDEX ix_game_id\n ON game\n (game_id);\n CREATE INDEX ix_timestamp\n ON game\n (timestamp);\n \"\"\")\n try:\n print(cursor.fetchall())\n except psycopg2.ProgrammingError:\n pass\n io = ioloop.IOLoop.instance()\n io.stop()", "def schema(self):\n pass", "def _initDb(self):\n CREATE_TOKEN_TABLE = '''create table token\n (token text, id int primary key)\n '''\n CREATE_DOCS_TABLE = '''create table docs\n (local_path text, resource_id text primary key, etag text, title text)\n '''\n \n try:\n self.db.execute(CREATE_TOKEN_TABLE)\n self.db.execute(CREATE_DOCS_TABLE)\n except sqlite3.OperationalError, error:\n pass", "def create_db(self):\n\t\tcur, conn = self.open_connection()\n\n\t\t# this creates the meta table\n\t\tcommand = \"\"\" DROP TABLE IF EXISTS main;\n\t\tCREATE TABLE meta (\n\t\t\tid INTEGER PRIMARY KEY AUTOINCREMENT,\n\t\t\tUT_date TEXT,\n\t\t\ttime_of_obs TEXT,\n\t\t\tobject_name TEXT,\n\t\t\tintegration_time FLOAT,\n\t\t\tgrating INTEGER,\n\t\t\tcentral_wavelength FLOAT,\n\t\t\tslit_width INTEGER,\n\t\t\tphase_angle FLOAT,\n\t\t\tcomments TEXT\n\t\t\t);\"\"\"\n\n\t\tcur.executescript(command)\n\n\t\t# this creates the spectra table\n\t\tcommand = \"\"\" DROP TABLE IF EXISTS spectrum;\n\t\tCREATE TABLE spectra (\n\t\t\tspec_id INTEGER PRIMARY KEY AUTOINCREMENT,\n\t\t\tid INTEGER,\n\t\t\twave array,\n\t\t\tspectrum array,\n\t\t\tFOREIGN KEY(id) REFERENCES meta(id)\n\t\t\t);\"\"\"\n\n\t\tcur.executescript(command)\n\n\t\tconn.commit()\n\t\tconn.close()", "def __init__(self):\n engine = db_connect()\n create_tables(engine)\n self.Session = sessionmaker(bind=engine)", "def init_db():\n with LoggerApi.app_context():\n db = get_db()\n with LoggerApi.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def _initialize(self):\n query_table = self._cursor.execute(f\"\"\"\n SELECT name\n FROM sqlite_master\n WHERE type='table' AND name='{self._table_name}';\"\"\")\n\n if not query_table.fetchone():\n self._cursor.execute(f\"\"\"\n CREATE TABLE {self._table_name} (\n id char(36),\n term TEXT,\n timestamp BIGINT\n );\"\"\")\n\n self._cursor.execute(f\"\"\"\n CREATE INDEX index_timestamp\n ON {self._table_name} (timestamp);\"\"\")\n\n self._conn.commit()", "def initDB():\n global DATABASE\n\n uid0 = generate_resource_uid('Admin1', 0)\n\n DATABASE[\"users\"] = {\n \"Admin1\": {\n \"Type\": \"admin\",\n \"Password\": \"AdminPass\",\n \"Quota\": int(sys.maxsize),\n \"Resources\": {uid0},\n \"Created\": 1,\n },\n \"User1\": {\n \"Type\": \"user\",\n \"Password\": \"UserPass\",\n \"Quota\": int(sys.maxsize),\n \"Resources\": set([]),\n \"Created\": 0,\n }\n }\n\n DATABASE[\"resources\"] = {\n uid0: \"Admin1\",\n }", "def startup(self):\n self.load_up_initial_db(TIMESTAMP_PARSE_DICT)\n self.add_numeric_cols()", "def create_all_tables(self):\n pass", "def __init__(self):\n # instantiate logger\n self.log = logging.getLogger('blog')\n # open sqlite db\n db_path=application.config.get('sqlite.path', './db.sqlite')\n self.conn = sqlite3.connect(db_path)\n self.cursor = self.conn.cursor()\n self._create_schema()", "def startConnection(self):\n try:\n self.conn = psycopg2.connect(\"dbname='library' user='postgres' host='localhost' password='Codechef'\")\n # self.conn = psycopg2.connect(\"dbname='library' user='postgres' host='localhost' password='Codechef'\")\n # self.conn = psycopg2.connect(\"dbname='db_b130974cs' user='postgres' host='localhost' password='Codechef'\")\n except:\n print \"I am unable to connect to the database\"\n print \"connected to database...\"\n self.schema = SchemaGraph(self.conn)", "def init_db(cls, app):\n cls.logger.info(\"Initializing database\")\n cls.app = app\n # This is where we initialize SQLAlchemy from the Flask app\n db.init_app(app)\n app.app_context().push()\n db.create_all() # make our sqlalchemy tables", "def init_db(cls, app):\n cls.logger.info(\"Initializing database\")\n cls.app = app\n # This is where we initialize SQLAlchemy from the Flask app\n db.init_app(app)\n app.app_context().push()\n db.create_all() # make our sqlalchemy tables", "def init_tables(self):\n\n settings.Base.metadata.tables[\n 'session_master'].drop(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].drop(bind=settings.engine)\n\n settings.Base.metadata.tables[\n 'session_master'].create(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].create(bind=settings.engine)\n\n logging.info(\"Sessionization Tables created\")" ]
[ "0.7675053", "0.7396573", "0.73593235", "0.72910017", "0.7221896", "0.71476793", "0.71151453", "0.70588493", "0.70209366", "0.70209366", "0.70208734", "0.7015672", "0.6986679", "0.6958186", "0.6949035", "0.69346476", "0.69117707", "0.6891788", "0.6884205", "0.687781", "0.6866837", "0.68382126", "0.68091273", "0.68090725", "0.6787269", "0.6771588", "0.6755212", "0.6744002", "0.67107624", "0.66786623", "0.6672426", "0.6664615", "0.666328", "0.66471213", "0.66284215", "0.66155136", "0.66069216", "0.6596936", "0.65589476", "0.65565497", "0.65525544", "0.6543165", "0.6539885", "0.653085", "0.6523427", "0.65201944", "0.6513794", "0.6501496", "0.6486465", "0.64851725", "0.6476567", "0.6466857", "0.6466512", "0.64573336", "0.6453588", "0.6445228", "0.64383066", "0.64202136", "0.64196557", "0.6414366", "0.6398597", "0.63973266", "0.63973266", "0.63973266", "0.63973266", "0.63973266", "0.63973266", "0.63973266", "0.638809", "0.637919", "0.63704336", "0.6366467", "0.634908", "0.63485235", "0.6346095", "0.6346095", "0.6338504", "0.6336907", "0.6331746", "0.63243115", "0.6319958", "0.6316302", "0.63137656", "0.63094294", "0.6304965", "0.6304107", "0.6294595", "0.6289527", "0.6276533", "0.62683994", "0.6256081", "0.6250763", "0.6241958", "0.6239885", "0.62358457", "0.6228963", "0.6223339", "0.62220925", "0.6206086", "0.6206086", "0.62032306" ]
0.0
-1
This is my implementation of modularity using the formula directly from Wikipedia.
def get_modularity_other_c(A, cluster_indices): # define the number of nodes in the graph and the number of clusters n = len(cluster_indices) nclusters = max(cluster_indices) + 1 # define the row sums of the adjacency matrix row_sums = [sum(row) for row in A] # define one half of the sum of all entries in the adjacency matrix m = sum(row_sums) / 2.0 # define the modularity Q = 0 for i in range(n): for j in range(n): if cluster_indices[i] == cluster_indices[j]: Q += (A[i][j] - row_sums[i] * row_sums[j] / (2*m)) / (2*m) return Q
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_modularity(modules, degree_table, edges, num_of_edges):\n modularity = 0.0\n for module in modules:\n modularity += calculate_q(module, degree_table, edges, num_of_edges)\n\n return modularity / (2.0 * num_of_edges)", "def modularity():\n\n q = 0.0\n for idx in range(0, node_count):\n if _tot[idx] > 0.0:\n q += (_in[idx] / m - math.pow(_tot[idx] / m, 2))\n return q", "def main():\n n = 34\n # create the adjacency matrix\n stripped_lines = Util.get_stripped_lines(g_karate_data.splitlines())\n string_rows = [line.split() for line in stripped_lines if line]\n assert len(string_rows) == n\n for row in string_rows:\n assert len(row) == n\n data_rows = [[float(x) for x in string_row] for string_row in string_rows]\n A = np.array(data_rows)\n # create the ordered module indices\n first_cluster_one_based_indices = [1, 3, 4, 14, 2, 8, 20, 18, 22, 13, 12, 6, 7, 17, 5, 11]\n second_cluster_one_based_indices = [25, 32, 26, 29, 24, 28, 9, 34, 33, 19, 16, 31, 15, 10, 23, 30, 21, 27]\n assert len(first_cluster_one_based_indices + second_cluster_one_based_indices) == n\n assert list(sorted(first_cluster_one_based_indices + second_cluster_one_based_indices)) == range(1, n+1)\n ordered_module_indices = []\n for i in range(n):\n if i+1 in first_cluster_one_based_indices:\n ordered_module_indices.append(0)\n else:\n ordered_module_indices.append(1)\n # print the modularity\n Q = get_modularity_other_b(A, ordered_module_indices)\n print 'modularity calculated using my interpretation of the method of the paper', Q\n Q = get_modularity_other_b2(A, ordered_module_indices)\n print 'modularity calculated using a modification of my interpretation of the method of the paper', Q\n Q = get_modularity_other_c(A, ordered_module_indices)\n print 'modularity calculated using the method on wikipedia', Q\n Q = get_eric_modularity(A, ordered_module_indices)\n print 'modularity calculated using the method eric used:', Q\n print 'expected modularity: .375 +/- .025'", "def compute_modularity(G):\n # convert to undirected graph if necessary\n if isinstance(G, nx.DiGraph):\n G = G.to_undirected(reciprocal=True)\n\n # extract communities\n community_detection = community.greedy_modularity_communities(G)\n # calculate modularity with those communities\n modularity = community.modularity(G, community_detection)\n return modularity", "def neural_modularity_calculator(graph, embedding, means):\n assignments = {}\n for node in graph.nodes():\n positions = means-embedding[node, :]\n values = np.sum(np.square(positions), axis=1)\n index = np.argmin(values)\n assignments[int(node)] = int(index)\n modularity = community.modularity(assignments, graph)\n return modularity, assignments", "def modularity(G, partition):\n m = G.size(weight=\"weight\")\n degrees = dict(G.degree(weight=\"weight\"))\n Q = 0\n for community in partition:\n for u, v in product(community, repeat=2):\n try:\n w = G[u][v].get(\"weight\", 1)\n except KeyError:\n w = 0\n if u == v:\n # Double count self-loop weight.\n w *= 2\n Q += w - degrees[u] * degrees[v] / (2 * m)\n return Q / (2 * m)", "def get_modularity(partition):\n q1 = partition.quality()\n return q1", "def classical_modularity_calculator(graph, embedding, cluster_number=20):\n kmeans = KMeans(n_clusters=cluster_number, random_state=0, n_init=1).fit(embedding)\n assignments = {i: int(kmeans.labels_[i]) for i in range(0, embedding.shape[0])}\n modularity = community.modularity(assignments, graph)\n return modularity, assignments", "def calc_pos_mod(nmodule):\n pass", "def gen_mod(affinities, labels):\n\n for aff in affinities:\n yield modularity.get_modularity(aff, labels).sum()", "def mod(p):\n return (p[0]**2 + p[1]**2 + p[2]**2)**0.5", "def modularity_gain(n, c, dnodecomm):\n\n totc = _tot[c]\n degc = k[n]\n return dnodecomm - (totc * degc) / m", "def calculate_modularity_difference(team_i, team_j):\n # I am calling a dedicated function (i.e. calculate_e_ij)\n e_ij = calculate_e_ij(team_i, team_j)\n # I am returning dq\n return 2 * (e_ij - team_i.a * team_j.a)", "def ModExp(n, k, m):\n a = list(bin(k))[2:]\n a.reverse()\n s = 1\n for i in a:\n if i == '1':\n s = (s * n) % m\n n = (n * n) % m\n return s", "def get_modularity2(adjacency, clusters):\n num_ids = adjacency.shape[0]\n id_to_cluster = get_id_to_cluster(clusters, num_ids)\n S = np.zeros(shape=(adjacency.shape[0], len(clusters))) # S[v,c]=1 iff v is in cluster c\n for id in range(adjacency.shape[0]):\n cluster_id = id_to_cluster[id]\n S[id, cluster_id] = 1\n total_weight = np.sum(adjacency)\n degrees = np.sum(adjacency, axis=1)\n\n C = np.outer(degrees, degrees)\n C = C / total_weight # C[v,w] = deg(v)*deg(w) / 2m\n B = adjacency - C\n M = np.dot(np.dot(S.T, B), S)\n return np.trace(M) / total_weight", "def get_modularity(adjacency, clusters):\n total_weight = np.sum(adjacency)\n e = get_clusters_adjacencies(adjacency, clusters)\n e = e / total_weight\n a = np.sum(e, axis=1)\n return np.sum(e.diagonal() - np.power(a, 2))", "def bulk_modulus():\n\n return 10000.0", "def expMod(b,n,m):\n \"\"\"returns (b^n mod m)\"\"\"\n if n==0:\n return 1\n elif n%2==0:\n return expMod((b*b)%m, n/2, m)\n else:\n return(b*expMod(b,n-1,m))%m", "def numerator(self, ???):", "def __mod__( self, value ):\r\n\t\tif ( type( value ) == type( self ) ):\r\n\t\t\treturnvalue = fraction( self )\r\n\t\t\tif ( returnvalue < 0 ):\r\n\t\t\t\twhile ( returnvalue < -value ): returnvalue += value\r\n\t\t\telse:\r\n\t\t\t\twhile ( returnvalue > value ): returnvlaue -= value\r\n\t\t\treturn returnvalue\r\n\t\telif ( type( value ) in ( types.IntType, types.LongType ) ):\r\n\t\t\treturn fraction( self.numerator % ( value * self.denominator ), self.denominator )\r\n\t\telif ( type ( value ) == types.FloatType ):\r\n\t\t\treturn float( self ) % value\r\n\t\telse: return NotImplemented", "def evansMod(x,n):\n if x%n == 0:\n return 1\n else:\n return 0", "def getDivisors(n):", "def expMod(b,n,m):\r\n \"\"\"returns (b^n mod m)\"\"\"\r\n if n==0:\r\n return 1\r\n elif n%2==0:\r\n return expMod((b*b)%m, n/2, m)\r\n else:\r\n return(b*expMod(b,n-1,m))%m", "def apply_mod(num):\n return num % MODULO", "def modinverse(a: int, m: int) -> int:\n if SHOW_WORKING: print(f\"modinverse(a, m) = modinverse({a}, {m})\")\n if SHOW_WORKING: print(f\"\\tWe want to find some x & y such that {a} * x + {m} * y = 1\")\n\n if a < 0 or m <= 0:\n raise ValueError(\"a must be non-negative and m must be positive\")\n\n if SHOW_WORKING: print(f\"Find gcd(a, b) = gcd({a}, {m})\")\n if m > a:\n if SHOW_WORKING: print(f\"\\tb > a. Set r1[0] := m = {m} and r2[0] := a = {a} so that r1[0] > r2[0\")\n r1s, r2s = [m], [a]\n else:\n if SHOW_WORKING: print(f\"\\ta >= b. Set r1[0] := a = {a} and r2[0] := m = {m} so that r1[0] >= r2[0]\")\n r1s, r2s = [a], [m] \n\n if SHOW_WORKING: print(f\"\\tProceeding with algorithm until r2 hits 0. gcd({a}, {m}) will be the ending r1 value:\")\n qs = []\n i = 0\n while r2s[-1] != 0:\n i += 1\n\n if SHOW_WORKING: print(f\"\\t\\tSet q[{i - 1}] := floor(r1[{i - 1}] / r2[{i - 1}]) = floor({r1s[i - 1]} / {r2s[i - 1]}) = floor({round(r1s[i - 1] / r2s[i - 1], 2)}) = {r1s[i - 1] // r2s[i - 1]}\")\n qs.append(r1s[i - 1] // r2s[i - 1])\n\n if SHOW_WORKING: print(f\"\\t\\tSet (r1[{i}], r2[{i}]) := (r2[{i - 1}], r1[{i - 1}] - r2[{i - 1}] * q[{i - 1}]) = ({r2s[i - 1]}, {r1s[i - 1]} - {r2s[i - 1]} * {qs[i - 1]}) = ({r2s[i - 1]}, {r1s[i - 1] - r2s[i - 1] * qs[i - 1]})\")\n r1, r2 = r2s[i - 1], r1s[i - 1] - r2s[i - 1] * qs[i - 1]\n r1s.append(r1)\n r2s.append(r2)\n\n if SHOW_WORKING: print(\"\\t\\t -\")\n \n if SHOW_WORKING: print(f\"\\tStopping condition hit (r2[{i}] = 0). Result of gcd({a}, {m}) is r1[{i}] = {r1s[-1]}\")\n\n if r1s[-1] != 1:\n if SHOW_WORKING: print(f\"\\t{a} has no inverse modulo {m} because gcd({a}, {m}) = {r1s[-1]} != 1 (they must be coprime)\")\n return None\n\n if SHOW_WORKING: print(f\"\\n\\tBegin working backwards:\")\n\n def getnestedexpressionstr(leftstr: str, nestedr1r2q: List[Union[int, List[int]]], rightstr: str) -> str:\n if SHOW_WORKING: print(f\"\\t\\tgetnestedexpressionstr('{leftstr}', {nestedr1r2q}, '{rightstr}')\")\n r1: int = nestedr1r2q[0]\n r2: Union[int, List[int]] = nestedr1r2q[1]\n q: int = nestedr1r2q[2]\n if SHOW_WORKING: print(f\"\\t\\t\\tr1 = {r1}, r2 = {r2}, q = {q}\")\n\n if isinstance(r2, int):\n return f\"{leftstr}{r1} - {r2} * {q}{rightstr}\"\n \n if leftstr == rightstr == '':\n return getnestedexpressionstr(f\"{r1} - (\", r2, f\") * {q}\")\n\n return getnestedexpressionstr(f\"{leftstr}{r1} - (\", r2, f\") * {q}{rightstr}\")\n\n def backtrack(index: int, nestedr1r2q: List[Union[int, List[int]]]) -> List[Union[int, List[int]]]:\n \"\"\"Provided an index and an ordered list representing the r1, r2, and q values of the equation\n r1 - r2 * q, this function returns another list where r2 has been broken down to the parts of \n its equation on the previous indexed equation, e.g. if the 3rd and 4th equations from the GCD \n algorithm are:\n (3): r1 - r2 * q2 = 4 - 4 * 1\n (4): r1 - r2 * q2 = 3 - 1 * 3\n then: \n backtrack(4, [3, 1, 3]) -> [3, [4, 3, 1], 3].\n \n This also works when the middle element of the list (the r2 element) is given as a list of parts,\n e.g., if we follow the previous example where additionally equation 2 is:\n (2): r1 - r2 * q2 = 11 - 4 * 2\n then:\n backtrack(3, [3, [4, 3, 1], 3]) -> [3, [4, [11, 4, 2], 1], 3].\"\"\"\n \n if SHOW_WORKING: print(f\"\\t\\tbacktrack({index}, {nestedr1r2q})\")\n\n if index <= 0:\n raise ValueError(\"Can't backtrack from here, please supply a positive index\")\n \n r1: int = nestedr1r2q[0]\n r2: Union[int, List[int]] = nestedr1r2q[1]\n q: int = nestedr1r2q[2]\n\n if index == 1:\n return [r1, [r1s[0], r2s[0], qs[0]], q]\n\n return [r1, backtrack(index - 1, [r1s[index - 1], r2s[index - 1], qs[index - 1]]), q]\n\n if i - 2 > 0:\n expression = backtrack(i - 2, [r1s[i - 2], r2s[i - 2], qs[i - 2]])\n\n nestedexpressionstr: str = getnestedexpressionstr('', expression, '')\n nestedexpressionstr = nestedexpressionstr.replace(str(a), 'a').replace(str(m), 'm')\n\n if SHOW_WORKING: print(f\"\\t\\t{nestedexpressionstr}\")\n if SHOW_WORKING: print(f\"\\t\\t{sympy.simplify(nestedexpressionstr)}\")\n\n x, y = sympy.core.numbers.igcdex(a, m)[:2]\n if SHOW_WORKING: print(f\"\\ta * x + m * y = 1 -> {a} * {x} + {m} * {y} = 1\")\n\n if SHOW_WORKING: print(f\"\\tmodinverse({a}, {m}) = {x}\\t(mod {m}) = {x % m}\")\n \n return x % m", "def recalculate_popularity(self):\n self.voters = 0\n for x in self.votes:\n self.voters += 1\n if x.good:\n self.popularity += 1\n else:\n self.popularity -= 1", "def test_mod():\r\n x, y = fscalars('xy')\r\n fn = gof.DualLinker().accept(\r\n gof.FunctionGraph([x, y], [x % y])).make_function()\r\n for a, b in ((0, 1), (1, 1), (0, -1), (1, -1), (-1, -1),\r\n (1, 2), (-1, 2), (1, -2), (-1, -2),\r\n (5, 3), (-5, 3), (5, -3), (-5, -3)\r\n ):\r\n assert fn(a, b) == a % b, (a,)", "def fn(i, x):\n if i == goal: return x == n \n ans = 0 \n if x < n: ans += (n-x) * fn(i+1, x+1) # a new song\n if k < x: ans += (x-k) * fn(i+1, x) # an old song\n return ans % 1_000_000_007", "def get_modularity3(adjacency, clusters):\n\n rows, cols = adjacency.shape\n num_ids = adjacency.shape[0]\n id_to_cluster = get_id_to_cluster(clusters, num_ids)\n degrees = np.sum(adjacency, axis=1)\n total_weight = np.sum(adjacency)\n sum = 0\n for i in range(rows):\n for j in range(cols):\n if id_to_cluster[i] == id_to_cluster[j]:\n sum += adjacency[i, j] - (degrees[i] * degrees[j]) / total_weight\n sum = sum / total_weight\n return sum", "def __ranking_function(self, doc, query_tokens):", "def modExp(a, b, n):\n c = 0\n d = 1\n for bi in bin(b)[2:]:\n c = 2 * c\n d = (d * d) % n\n if bi == '1':\n c += 1\n d = (d * a) % n\n return d", "def mod(num1, num2):\n\n return num1 % num2", "def popularity(self,train = None,test = None,k = 8,nitem = 10):\n train = train or self.traindata\n test = test or self.testdata\n item_popularity = dict()\n for user ,items in train.items():\n for item in items.keys():\n item_popularity.setdefault(item,0)\n item_popularity[item] += 1\n ret = 0\n n = 0\n for user in train.keys():\n rank = self.recommend(user, train, k = k, nitem = nitem)\n for item ,_ in rank.items():\n ret += math.log(1+item_popularity[item])\n n += 1\n return ret / (n * 1.0)", "def ModRev(a, n):\n _n = n\n r = a % _n\n Q = []\n while r:\n Q.append(a // _n)\n a = _n\n _n = r\n r = a % _n\n if _n != 1:\n return None\n x, y = 0, 1\n while Q:\n t = x\n x = y\n y = t - Q.pop() * y\n return x % n", "def mod(num1, num2):\n return num1 % num2", "def powmod(x, k, m):\n ans = 1\n while k > 0:\n if odd(k):\n ans = ans * x % m\n k -= 1\n else:\n x = x * x % m\n k /= 2\n return ans", "def modular_inverse(a, m):\n\n def extended_gcd(_a, _b):\n \"\"\" Use the Extended Euclidean algorithm to calculate the \"extended greatest common divisor\".\n It takes as input two positive integers a and b, then calculates the following:\n 1. The greatest common divisor (gcd) between a and b -- that is, the integer number g which is the largest\n integer for which a/g and b/g both are integers (This can also be obtained using math.gcd)\n 2. The integer x and y so that a*x + b*y = gcd(x, y)\n :param _a: Positive integer\n :param _b: Positive integer\n :return: Tuple (gcd, x, y)\n \"\"\"\n previous_remainder, remainder = _a, _b\n current_x, previous_x, current_y, previous_y = 0, 1, 1, 0\n while remainder > 0:\n previous_remainder, (quotient, remainder) = remainder, divmod(\n previous_remainder, remainder)\n current_x, previous_x = previous_x - quotient * current_x, current_x\n current_y, previous_y = previous_y - quotient * current_y, current_y\n # The loop terminates with remainder == 0, x == b and y == -a. This is not what we want, and is because we have\n # walked it through one time \"too many\". Therefore, return the values\n # of the previous round:\n return previous_remainder, previous_x, previous_y\n\n gcd_value, x, y = extended_gcd(a, m)\n if gcd_value != 1:\n return False\n # print('No inverse. gcd (%d, %d) is %d. Decoding is not unique. Choose another key than %d'\n # % (a, m, math.gcd(a, m), a))\n return x % m", "def popularity(self, user_list):\n item_popular = Counter(self.train['movieId'].values)\n ret = 0\n n = 0\n print('\\nCalculate popularity: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n for rec in set([data[0] for data in recom_data]):\n ret += math.log(1 + item_popular.get(rec))\n n += 1\n ret /= n * 1.0\n print('\\npopularity: ', ret)\n return ret", "def module_calc(level):\n global modules\n global rods\n modules = level*(level-1)/2\n rods = modules*6\n return modules, rods", "def exp_mod( a, b, n):\n r = int(1)\n while(b):\n if(b&1):\n r=(r*a)%n\n a=(a*a)%n\n b>>=1 # b = b>>1\n \n return r", "def __rank__(self) -> int:", "def exp_mod(a, b, nbr):\n bina = [int(x) for x in bin(a)[2:]]\n #binb = [int(x) for x in bin(b)[2:]]\n binn = [int(x) for x in bin(nbr)[2:]]\n #while len(binn)<len(bina):\n # binn = [0]+binn\n #print(bina, binn)\n binn.reverse()\n bina.reverse()\n n = len(bina)+len(binn)*4+1\n na = len(bina)\n nan = len(bina)+len(binn) # debut de Y\n nany = len(bina)+2*len(binn)+1 # debut de \"A\" (ici c'est b)\n nanya = len(bina)+3*len(binn)+1 # debut de \"APOW\" (ce qui doit etre mesuré)\n q = QuantumRegister(n+2, 'q') # +lost+lost2\n circ = QuantumCircuit(q)\n for i in range(na):\n if bina[i]:\n circ.x(q[i])\n for i in range(len(binn)):\n if binn[i]:\n circ.x(q[na+i])\n expmod(circ, q, # X, a, A, APOW, Y, n, N, binn, lost, lost2)\n [q[i] for i in range(len(bina))],\n b%nbr,\n [q[i+nany] for i in range(len(binn))],\n [q[i+nanya] for i in range(len(binn))],\n [q[i+nan] for i in range(len(binn)+1)],\n nbr,\n [q[i+na] for i in range(len(binn))],\n binn,\n q[n],\n q[n+1])\n if len(bina)%2:\n circ_m = measure(circ, q, [i for i in range(nan,nany)])\n else:\n circ_m = measure(circ, q, [i for i in range(nanya,n)])\n #circ_m = measure(circ, q, [i for i in range(n)])\n return circ_m", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def random_percentage(m):\n pages = []\n for i in range(m):\n p = 'https://en.wikipedia.org/wiki/Special:Random'\n pages.append(p)\n return find_percentage(pages)", "def calHash(n, m):\n return int(m*BloomFilter.ln2/n)", "def _divmod(num, den, p):\n mod_inverse = pow(den, p - 2, p) # modular version of 1/den\n return num * mod_inverse", "def process_module_mass(module_mass):\n\n return max(0, module_mass // 3 - 2)", "def reciprocal_rank(ranking, references, atk=None):\n for k, prediction in enumerate(ranking[:atk], 1):\n if prediction in references:\n return 1.0 / k\n return 0.0", "def pollard_rho(g: int, h: int, n: int, order: int = None):\n x = {0: 1}\n a = {0: 0}\n b = {0: 0}\n\n import ressources.multGroup as multGroup\n\n if order is None:\n order = multGroup.multiplicativeOrder(g, n)\n\n # from a, b and c, partitioning the field\n def step_xab(x, a, b, g, h, order, n):\n s = x % 3\n\n # S1\n if s == 1:\n x = x * h % n\n b = (b + 1) % order\n\n # S2\n if s == 0:\n x = square_and_multiply(x, 2, n)\n a = 2 * a % order\n b = 2 * b % order\n\n # S3\n if s == 2:\n x = x * g % n\n a = (a + 1) % order\n\n return x, a, b\n\n # returns x, a, b for a given i using memoization\n def get_xab(i):\n\n if i not in x:\n _x, _a, _b = get_xab(i - 1)\n\n x[i], a[i], b[i] = step_xab(_x, _a, _b, g, h, order, n)\n\n return x[i], a[i], b[i]\n\n def naturals_from(i):\n while True:\n # yield is a keyword that is used like return, except the function will return a generator.\n # https://www.google.com/search?client=firefox-b-d&q=yield+python\n yield i\n i += 1\n\n for i in naturals_from(1):\n\n x_i, a_i, b_i = get_xab(i)\n x_2i, a_2i, b_2i = get_xab(2 * i)\n\n if x_i == x_2i:\n\n r = (b_i - b_2i) % order\n\n if r == 0:\n return False\n\n return multGroup.inv(r, order) * (a_2i - a_i) % order", "def modulo(x, y) :\n if (x / y) < 1:\n return x\n else:\n return modulo(x - y, y)", "def repo_popularity(repo):\n data, code, headers = processor.get_repo_popularity(repo)\n response = make_response(data, code)\n for header in headers.keys():\n response.headers[header] = headers[header]\n return response", "def ngram_modscore(references, sentence, n, weight):\r\n references = ngramify(references, n)\r\n sentence = ngramify(sentence, n)\r\n sent_dict = {}\r\n for gram in sentence:\r\n sent_dict[gram] = sent_dict.get(gram, 0) + 1\r\n max_dict = {}\r\n for reference in references:\r\n this_ref = {}\r\n for gram in reference:\r\n this_ref[gram] = this_ref.get(gram, 0) + 1\r\n for gram in this_ref:\r\n max_dict[gram] = max(max_dict.get(gram, 0), this_ref[gram])\r\n in_ref = 0\r\n for gram in sent_dict:\r\n in_ref += min(max_dict.get(gram, 0), sent_dict[gram])\r\n return weight * np.log(in_ref / len(sentence))", "def inverse_mod(a, m):\r\n g, x, y = extended_greatest_common_denominator(a, m)\r\n if g != 1:\r\n raise Exception('modular inverse does not exist')\r\n else:\r\n return x % m", "def _calc_young_modulus(dataset: np.ndarray) -> YoungModulus:\n segment_x_length: int = len(dataset) // 10\n max_derivative_index = np.argmax([\n dataset[i + segment_x_length][1] - dataset[i][1]\n for i in range(len(dataset) - segment_x_length)\n ])\n first_point = dataset[max_derivative_index]\n second_point = dataset[max_derivative_index + segment_x_length]\n modulus = (second_point[1] - first_point[1]) / (second_point[0] - first_point[0])\n return YoungModulus(modulus, first_point, second_point)", "def modular_geometric_sum(x, n, mod):\n if n == 1:\n return 1 % mod\n elif n % 2 == 0:\n return ((pow(x, n // 2, mod) + 1) * modular_geometric_sum(x, n // 2, mod)) % mod\n else:\n return (pow(x, n - 1, mod) + modular_geometric_sum(x, n - 1, mod)) % mod", "def test_modulo(doctest):", "def mod(num1, num2):\n\n remainder = num1 % num2\n return remainder", "def mod_inverse(base, m):\n\n g, x, y = mod_inverse_iterative(base, m)\n if (g != 1):\n return None\n else:\n return (x % m)", "def advancedStats():", "def resol_modulo(a,b, mod):\r\n\tfor i in range(mod): # Pour tous les nombres du modulo\r\n\t\tif (a*i) % mod == b: # Si a*i modulo mod = b\r\n\t\t\treturn i # Alors on a trouvé ! On renvoit i\r\n\treturn None", "def eggleton_formula(mass_ratio):\n\n two_third = mass_ratio**(2.0/3.0)\n one_third = mass_ratio**(1.0/3.0)\n return 0.49 * two_third / ( 0.6 * two_third + numpy.log(1.0 + one_third))", "def get_modularity_other_b2(A, cluster_indices):\n # define the number of nodes in the graph and the number of clusters\n n = len(cluster_indices)\n nclusters = max(cluster_indices) + 1\n girvan_e = np.zeros((nclusters, nclusters))\n volume = 0\n for i in range(n):\n for j in range(n):\n if i < j:\n weight = A[i][j]\n volume += weight\n a = cluster_indices[i]\n b = cluster_indices[j]\n if a == b:\n girvan_e[a][a] += weight\n else:\n girvan_e[a][b] += weight/2\n girvan_e[b][a] += weight/2\n for a in range(nclusters):\n for b in range(nclusters):\n girvan_e[a][b] /= volume\n girvan_a = [sum(girvan_e[i]) for i in range(nclusters)]\n modularity = sum(girvan_e[i][i] - girvan_a[i]**2 for i in range(nclusters))\n return modularity", "def calculate_modulation_index(self, y, mindom, maxdom, dfrange):\n changes = []\n for j in range(len(y) - 1):\n change = abs(y[j] - y[j + 1])\n changes.append(change)\n modindx = 0. if(mindom == maxdom) else np.mean(changes) / dfrange\n print(modindx)\n return modindx", "def probability(N_dr, L_opmin, L_opmax, L_min, L_max, L_d):\n opening_nomullignas = []\n opening_withmullignas = []\n sum_nomulligans = 0\n sum_withmulligans = 0\n mulligan_coeff = 0\n\n for i in range(L_opmin, min(L_opmax + 1, 8)): # first make a list of tuples of the form:\n # (number_of_lands_in_opening_hand, probability_of_drawing_such_a_hand)\n a = hypergeom(i, 7, 60, L_d)\n opening_nomullignas.append((i, a))\n mulligan_coeff = mulligan_coeff + a # this will be used later for calculating the probability of\n # taking the mulligan and is used as a coefficient before the mulligan sum\n for (x, y) in opening_nomullignas: # use the list of tuples to calculate the first part of equation 5\n partial_nomulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_nomulligans = partial_nomulligans + hypergeom(j, N_dr, 53, L_d - x)\n sum_nomulligans = sum_nomulligans + partial_nomulligans * y\n\n mulligan_coeff = 1 - mulligan_coeff # probability of mulliganing\n for i in range(L_opmin, min(L_opmax + 1, 7)): # doing the same thing as before, but drawing 6 instead of 7 cards\n a = hypergeom(i, 6, 60, L_d)\n opening_withmullignas.append((i, a))\n\n for (x, y) in opening_withmullignas:\n partial_withmulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_withmulligans = partial_withmulligans + hypergeom(j, N_dr, 54, L_d - x)\n sum_withmulligans = sum_withmulligans + partial_withmulligans * y\n total_withmulligans = mulligan_coeff * sum_withmulligans\n\n return total_withmulligans + sum_nomulligans", "def __div__(relpath):", "def mod(numbers):\n result = numbers[0]\n for i in numbers[1:]:\n result = result % i\n return result", "def MEAN(strArg, composList, atomDict):\n accum = 0.0\n nSoFar = 0\n for atom, num in composList:\n tStr = strArg.replace('DEADBEEF', atom)\n accum = accum + eval(tStr) * num\n nSoFar = nSoFar + num\n return accum / nSoFar", "def modReducem(self, x, M):\n tmp_mod, tmp_M_bit, tmp_u = self.mod, self.M_bit, self.u\n self.mod = M\n self.M_bit = M.bit_length()\n self.u = (1 << (2 * self.M_bit)) // M\n r = self.modReduce(x)\n # return initial modular, bit size of modular and precompute u\n self.mod, self.M_bit, self.u = tmp_mod, tmp_M_bit, tmp_u\n return r", "def mod_exp(a, b, n):\n result = 1\n while True:\n if b % 2 == 1:\n result = (a * result) % n\n\n b = b / 2\n\n if b == 0:\n break\n\n a = (a * a) % n\n\n return result", "def mod(num1, num2):\n remainder = num1 % num2\n return remainder", "def fn(x, m):\n if x == 31: return int(m > 2)\n ans = fn(x+1, m)\n if freq[x] and masks[x]: \n if x == 1: ans *= 2**freq[x]\n elif not m & masks[x]: ans += freq[x] * fn(x+1, m | masks[x])\n return ans % 1_000_000_007", "def fitness(self):\r\n history = self.history\r\n return sum(history) / len(history)", "def modulation(minima, contrast, distance):\n \n numerator = contrast - minima\n denominator = contrast + minima\n \n return numerator / denominator", "def mod_inverse(base, m):\n g, x, y = mod_inverse_iterative(base, m)\n if g != 1:\n return None\n else:\n return x % m", "def get_modularity_other_a(A, cluster_indices):\n # define the number of nodes in the graph and the number of clusters\n n = len(cluster_indices)\n nclusters = max(cluster_indices) + 1\n # initialize some intermediate variables\n within_cluster = [0] * nclusters\n between_cluster = [0] * nclusters\n volume = 0\n # calculate the intermediate variables\n # i and j are node indices\n # a and b are cluster indices\n for i in range(n-1):\n a = cluster_indices[i]\n for j in range(i+1, n):\n b = cluster_indices[j]\n weight = A[i][j]\n volume += weight\n if a == b:\n within_cluster[a] += weight\n else:\n between_cluster[a] += weight\n between_cluster[b] += weight\n # get the modularity from the intermediate variables\n modularity = 0\n for within, between in zip(within_cluster, between_cluster):\n modularity += within/volume - ((within+between) / volume)**2\n return modularity", "def modulus(x, y):\n return x % y", "def rank_by_obscurity():\n obscurities = dict()\n reader = open(\"similarities.txt\", \"r\")\n lines = reader.readlines()\n for line in lines:\n a, b, sim = line.split(\"\\t\")\n if a not in obscurities:\n obscurities[a] = [float(sim), 1.0]\n else:\n obscurities[a] = [obscurities[a][0] + float(sim),\n obscurities[a][1] + 1.0]\n \n ranked = sorted(obscurities.keys(),\n key=lambda x: obscurities[x][0] / obscurities[x][1])\n writer = open(\"obscurities.txt\", \"w\")\n for user in ranked:\n writer.write(user + \"\\n\")", "def myExp(base,exponent,modulus):\n result = 1\n while exponent > 0:\n if exponent & 1 == 1:\n result = (result * base) % modulus\n exponent = exponent >> 1\n base = (base * base) % modulus\n return result", "def aks( n ):\n\n def aks_mod( polynomial , r ):\n \"\"\"\n This function is used in aks.\n polynomial modulo ( x^r - 1 )\n \"\"\"\n aks_mod = polynomial.coefficients\n total = aks_mod[ : r ]\n aks_mod = aks_mod[ r : ]\n while len(aks_mod) - 1 >= r :\n for i in range(r):\n total[i] += aks_mod[i]\n aks_mod = aks_mod[ r : ]\n for i in range(len(aks_mod)):\n total[i] += aks_mod[i]\n return array_poly_mod( total , polynomial.mod )\n\n lg = math.log( n , 2 )\n k = int( lg * lg )\n\n if arith1.powerDetection( n )[ 1 ] != 1: #Power Detection\n print(\" n is not prime \")\n return False\n\n start = 3\n while 1:\n d = arith1.gcd.gcd( start , n )\n if 1 < d < n:\n print(\"n is not prime\")\n return False\n x = n % start\n N = x\n for i in range( 1 , k + 1 ):\n if N == 1:\n break\n N = ( N * x ) % start\n if i == k:\n r = start\n break\n start += 1\n d = arith1.gcd.gcd( r , n )\n if 1 < d < n:\n print(\" n is not prime \")\n return False\n if n <= r:\n print(\" n is prime \")\n return True\n\n e = multiplicative.euler( r ) #Cyclotomic Conguence\n e = math.sqrt( e )\n e = int( e * lg )\n for b in range( 1 , e+1 ):\n f = array_poly_mod( [ b , 1 ] , n )\n total = array_poly_mod( [ 1 ] , n )\n count = n\n while count > 0:\n if count & 1:\n total = total * f\n total = aks_mod( total , r )\n f = f.power()\n f = aks_mod( f , r )\n count = count >> 1\n total_poly = total.coefficients_to_dict()\n if total_poly != { 0 : b , n % r : 1 }:\n print(\" n is not prime \")\n return False\n print(\" n is prime \")\n return True", "def detect():\n\n _pass_done = 0\n _improve = True\n new_mod = modularity()\n cur_mod = -999999999.0\n rl = random.sample(range(0, node_count), node_count)\n while _improve & (_pass_done < max_pass) & (new_mod - cur_mod > min_mod):\n cur_mod = new_mod\n _improve = False\n _pass_done += 1\n for node_tmp in rl:\n n = node_tmp\n nc = bl[n]\n ncomm = neigh_comm(n)\n remove(n, nc, ncomm[nc])\n best_c = nc\n best_l = 0.0\n best_incre = 0.0\n for c in ncomm:\n incre = modularity_gain(n, c, ncomm[c])\n if incre > best_incre:\n best_incre = incre\n best_c = c\n best_l = ncomm[c]\n insert(n, best_c, best_l)\n if best_c != nc:\n _improve = True\n new_mod = modularity()\n print new_mod", "def weight(self):", "def test_get_modifier_distribution():\n counter = {}\n for i in range(999):\n modifier = mockdata.get_modifier(i)\n counter[modifier] = counter.get(modifier, 0) + 1\n assert counter[\"Modifier 1\"] == counter[\"Modifier 2\"]\n assert counter[\"Modifier 1\"] == counter[\"Modifier 3\"]", "def d(n):\n return sum(divisors(n))", "def factorPR(n):\n\tnumsteps=2*math.floor(math.sqrt(math.sqrt(n)))\n\tfor additive in range(1,5):\n\t\tfast=slow=1; i=1\n\t\twhile i<numsteps:\n\t\t\tslow = (slow*slow + additive) % n\n\t\t\ti = i + 1\n\t\t\tfast = (fast*fast + additive) % n\n\t\t\tfast = (fast*fast + additive) % n\n\t\t\tg = gcd(fast-slow,n)\n\t\t\tif (g != 1):\n\t\t\t\tif (g == n):\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\treturn g\n\treturn 1", "def Eval(state):\n\n# H1 = htest2(state)\n# return H1\n H2 = h1(state)*monotonic(state)\n return H2", "def test_relevance_with_itself():\n state = gen_state_cgpm(get_data_separated)\n assert np.allclose(state.relevance_probability(2, [2], 1), 1.0)", "def numerator(self,data,gold):\n gold_size = 0.0\n for lead in gold.keys():\n gold_size += len(gold[lead])\n print gold_size\n epsilon = 0.0000000001\n match = 0.0\n not_match = 0.0\n exp_size = 0.0\n for lead in data.keys():\n for hit in data[lead]:\n exp_size += 1\n edge = (lead,hit[0])\n if self.CompareEdges(edge,gold):\n print edge\n match += 1\n else:\n not_match += 1\n\n if match == 0.0:\n match = epsilon\n elif not_match == 0.0:\n not_match = epsilon\n print match\n print not_match\n num1 = match \n num2 = not_match\n return num1/num2", "def diversified_ranking(self):\n self.error_throw('rank')\n instance = Instance(self.table_name)\n instance.addTable(Table(instance,False,'','')) # 'False'->transformed '',''->no describe yet\n if self.import_method == 'mysql': instance = self.mysql_handle(instance)\n elif self.import_method == 'csv': instance = self.csv_handle(instance)\n\n self.rank_partial(instance)\n\n self.rank_method = methods_of_ranking[3] # = 'diversified_ranking'", "def modulus(self):\n return math.sqrt(self._reNum ** 2 + self._imNum ** 2)", "def _naive_greedy_modularity_communities(G):\r\n # First create one community for each node\r\n communities = list([frozenset([u]) for u in G.nodes()])\r\n # Track merges\r\n merges = []\r\n # Greedily merge communities until no improvement is possible\r\n old_modularity = None\r\n new_modularity = modularity(G, communities)\r\n while old_modularity is None or new_modularity > old_modularity:\r\n # Save modularity for comparison\r\n old_modularity = new_modularity\r\n # Find best pair to merge\r\n trial_communities = list(communities)\r\n to_merge = None\r\n for i, u in enumerate(communities):\r\n for j, v in enumerate(communities):\r\n # Skip i=j and empty communities\r\n if j <= i or len(u) == 0 or len(v) == 0:\r\n continue\r\n # Merge communities u and v\r\n trial_communities[j] = u | v\r\n trial_communities[i] = frozenset([])\r\n trial_modularity = modularity(G, trial_communities)\r\n if trial_modularity >= new_modularity:\r\n # Check if strictly better or tie\r\n if trial_modularity > new_modularity:\r\n # Found new best, save modularity and group indexes\r\n new_modularity = trial_modularity\r\n to_merge = (i, j, new_modularity - old_modularity)\r\n elif (\r\n to_merge and\r\n min(i, j) < min(to_merge[0], to_merge[1])\r\n ):\r\n # Break ties by choosing pair with lowest min id\r\n new_modularity = trial_modularity\r\n to_merge = (i, j, new_modularity - old_modularity)\r\n # Un-merge\r\n trial_communities[i] = u\r\n trial_communities[j] = v\r\n if to_merge is not None:\r\n # If the best merge improves modularity, use it\r\n merges.append(to_merge)\r\n i, j, dq = to_merge\r\n u, v = communities[i], communities[j]\r\n communities[j] = u | v\r\n communities[i] = frozenset([])\r\n # Remove empty communities and sort\r\n communities = [c for c in communities if len(c) > 0]\r\n for com in sorted(communities, key=lambda x: len(x), reverse=True):\r\n yield com", "def modinv(a, m):\n b = 1\n while not (a * b) % m == 1:\n b += 1\n return b", "def _repetitions(webpage_tree):\n\n metadata = {\n \"runs\": len(webpage_tree),\n \"max_resources_run\": 0,\n # a huge number\n \"min_resources_run\": time() * 99999,\n \"avg_resources_run\": 0,\n \"static_resources\": 0,\n \"dynamic_resources\": 0,\n \"files\": {},\n }\n data = {}\n\n if len(webpage_tree) > 0:\n for run in webpage_tree:\n files_in_run = len(webpage_tree[run])\n if metadata[\"min_resources_run\"] > files_in_run:\n metadata[\"min_resources_run\"] = files_in_run\n if metadata[\"max_resources_run\"] < files_in_run:\n metadata[\"max_resources_run\"] = files_in_run\n metadata[\"avg_resources_run\"] = metadata[\"avg_resources_run\"] + files_in_run\n for f in webpage_tree[run]:\n filename = f.split(os.path.sep)[-1]\n if filename not in data:\n metadata[\"files\"][filename] = {\n \"reps\": 1,\n }\n data[filename] = {\n \"reps\": 1,\n \"hash\": webpage_tree[run][f],\n }\n else:\n metadata[\"files\"][filename][\"reps\"] = (\n metadata[\"files\"][filename][\"reps\"] + 1\n )\n data[filename][\"reps\"] = data[filename][\"reps\"] + 1\n\n metadata[\"avg_resources_run\"] = int(\n metadata[\"avg_resources_run\"] / metadata[\"runs\"]\n )\n\n for f in data:\n if metadata[\"files\"][f][\"reps\"] >= (metadata[\"runs\"] * _REP_TRESHOLD):\n metadata[\"static_resources\"] = (\n metadata[\"static_resources\"] + metadata[\"files\"][f][\"reps\"]\n )\n else:\n metadata[\"dynamic_resources\"] = (\n metadata[\"dynamic_resources\"] + metadata[\"files\"][f][\"reps\"]\n )\n\n return metadata, data", "def divisori(n):\n div=set()\n for i in range(1,int(n**0.5+1)):\n if n%i==0:\n div.add(int(n/i))\n div.add(i)\n return sorted(div)", "def make_division_by(n):\n def division(x):\n assert x != 0, 'Denominator can\\'t be zero'\n return x / n\n return division", "def modularity_spectrum(G):\n import scipy as sp\n\n if G.is_directed():\n return sp.linalg.eigvals(nx.directed_modularity_matrix(G))\n else:\n return sp.linalg.eigvals(nx.modularity_matrix(G))", "def rank():\n return 0", "def log_modulo(a, b, m):\n # find x in form x = np - q for some (n, p)\n # => a^x = b ~ a^np = a^q * b\n a, b = a % m, b % m\n n = isqrt(m) + 1\n # compute all a^q * b\n rhs = {}\n for q in range(n + 1):\n rhs[b] = q\n b = b * a % m\n\n an = powmod(a, n, m)\n lhs = 1\n for p in range(1, n + 1):\n lhs = lhs * an % m\n if lhs in rhs:\n return n * p - rhs[lhs]\n # some numbers do not have discrete log (example: log_2 3 (mod 7))\n return None", "def multiple(a, b):\n \"*** YOUR CODE HERE ***\"\n r1 = a % b\n if (r1 == 0):\n GCD = b\n else:\n r2 = b % r1\n while (r2 != 0):\n r1, r2 = r2, r1 % r2\n GCD = r1\n return (a * b) // GCD", "def nits(self):", "def findSpecialFactor(divisor):\n for i in range(1, 1000):\n prod = i * factor\n if prod % findMod(prod) == i:\n return i", "def get_popularity(rest_data, item_dict):\n max_review_count = rest_data.review_count.max()\n min_review_count = rest_data.review_count.min()\n result = np.zeros((len(rest_data), 2))\n for i in range(len(rest_data)):\n result[i, 0] = item_dict[rest_data.business_id[i]]\n result[i, 1] = (((rest_data.review_count[i] - min_review_count)/(max_review_count - min_review_count))*4 + 1)\n result = result[result[:, 0].argsort()]\n return result" ]
[ "0.68370956", "0.67086875", "0.616814", "0.61296153", "0.6121604", "0.60440207", "0.59257174", "0.58213174", "0.5771041", "0.57512736", "0.568411", "0.56509286", "0.56152207", "0.55530596", "0.55520487", "0.55343777", "0.5500709", "0.54053086", "0.53607327", "0.53461915", "0.534586", "0.5322193", "0.5320439", "0.53065926", "0.5303879", "0.53024495", "0.5298034", "0.5216271", "0.5192097", "0.5175296", "0.51752913", "0.517235", "0.5161344", "0.5150398", "0.5116544", "0.51072", "0.5106996", "0.5103637", "0.50946", "0.5075343", "0.5062036", "0.5051803", "0.5042758", "0.50398266", "0.49926335", "0.49884447", "0.49762124", "0.49714318", "0.49708697", "0.49685735", "0.49620754", "0.49608734", "0.49594504", "0.49579507", "0.49501413", "0.49458116", "0.49448216", "0.4943585", "0.49306205", "0.4926789", "0.49262977", "0.49184555", "0.49132192", "0.49131495", "0.49109945", "0.49048436", "0.48928607", "0.488772", "0.48862565", "0.48846358", "0.48813018", "0.48796454", "0.4879459", "0.48778778", "0.48742074", "0.48733097", "0.4872049", "0.48596254", "0.4856464", "0.48532426", "0.48348406", "0.48298514", "0.48229694", "0.481645", "0.48048183", "0.4804672", "0.48038936", "0.48035395", "0.48001263", "0.4797309", "0.47942942", "0.47935206", "0.47924381", "0.47903195", "0.47894144", "0.47814915", "0.4775632", "0.4775257", "0.47744742", "0.47733825", "0.47712827" ]
0.0
-1
This is a modification of the original GirvanNewman formulation.
def get_modularity_other_b2(A, cluster_indices): # define the number of nodes in the graph and the number of clusters n = len(cluster_indices) nclusters = max(cluster_indices) + 1 girvan_e = np.zeros((nclusters, nclusters)) volume = 0 for i in range(n): for j in range(n): if i < j: weight = A[i][j] volume += weight a = cluster_indices[i] b = cluster_indices[j] if a == b: girvan_e[a][a] += weight else: girvan_e[a][b] += weight/2 girvan_e[b][a] += weight/2 for a in range(nclusters): for b in range(nclusters): girvan_e[a][b] /= volume girvan_a = [sum(girvan_e[i]) for i in range(nclusters)] modularity = sum(girvan_e[i][i] - girvan_a[i]**2 for i in range(nclusters)) return modularity
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def narration_target(self):", "def substantiate():", "def genPrimerPairs_5Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 5\\' extension half-asstemers')\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[10:12]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_f10 = forwPrimer5_3[:10]\n print(f\"First 10 Nucleotides of forward primer: {forwPrimer_f10}\")\n\n revPrimer_f10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_f10)):\n revPrimer_f10 = GenOligoGC(10,GC_low, GC_high)\n\n revPrimer5_3 = revPrimer_f10 + forwPrimer_f10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3", "def genPrimerPairs_3Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 3\\' extension half-asstemers')\n\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[8:10]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_L10 = forwPrimer5_3[10:]\n print(f\"Last 10 Nucleotides of forward primer: {forwPrimer_L10}\")\n\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_L10[::-1])):\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n\n \"\"\"First 10 Nuc of rev primer must be identical to last 10 Nuc of forward Primer\"\"\"\n revPrimer5_3 = forwPrimer_L10 + revPrimer_L10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3", "def mezclar_bolsa(self):", "def needleman_wunsch1(x,y,lodict=None,gop=-2.5, gep=-1.75, local=False):\n n,m = len(x),len(y)\n dp = np.zeros((n+1,m+1))\n pointers = np.zeros((n+1,m+1),np.int32)\n for i in range(1,n+1):\n dp[i,0] = dp[i-1,0]+(gep if i>1 else gop)\n pointers[i,0]=1\n for j in range(1,m+1):\n dp[0,j] = dp[0,j-1]+(gep if j>1 else gop)\n pointers[0,j]=2\n for i in range(1,n+1):\n for j in range(1,m+1):\n if not lodict:\n if x[i-1] == y[j-1]:\n match = dp[i-1,j-1]+1\n else:\n match = dp[i-1,j-1]-1\n else:\n match = dp[i-1,j-1]+lodict[x[i-1],y[j-1]]\n insert = dp[i-1,j]+(gep if pointers[i-1,j]==1 else gop)\n delet = dp[i,j-1]+(gep if pointers[i,j-1]==2 else gop)\n max_score = max([match,insert,delet])\n dp[i,j] = max_score\n pointers[i,j] = [match,insert,delet].index(max_score)\n alg = []\n i,j = n,m\n while(i>0 or j>0):\n pt = pointers[i,j]\n if pt==0:\n i-=1\n j-=1\n alg = [[x[i],y[j]]]+alg\n if pt==1:\n i-=1\n alg = [[x[i],'-']]+alg\n if pt==2:\n j-=1\n alg = [['-',y[j]]]+alg\n return dp[-1,-1], alg", "def test_born_newton(self):\n\n n0 = 3.4\n omega = 2*np.pi*200e12\n dl = 0.01\n chi3 = 2.8E-18\n\n width = 1\n L = 5\n L_chi3 = 4\n\n width_voxels = int(width/dl)\n L_chi3_voxels = int(L_chi3/dl)\n\n Nx = int(L/dl)\n Ny = int(3.5*width/dl)\n\n eps_r = np.ones((Nx, Ny))\n eps_r[:, int(Ny/2-width_voxels/2):int(Ny/2+width_voxels/2)] = np.square(n0)\n\n nl_region = np.zeros(eps_r.shape)\n nl_region[int(Nx/2-L_chi3_voxels/2):int(Nx/2+L_chi3_voxels/2), int(Ny/2-width_voxels/2):int(Ny/2+width_voxels/2)] = 1\n\n simulation = Simulation(omega, eps_r, dl, [15, 15], 'Ez')\n simulation.add_mode(n0, 'x', [17, int(Ny/2)], width_voxels*3)\n simulation.setup_modes()\n simulation.add_nl(chi3, nl_region, eps_scale=True, eps_max=np.max(eps_r))\n\n srcval_vec = np.logspace(1, 3, 3)\n pwr_vec = np.array([])\n T_vec = np.array([])\n for srcval in srcval_vec:\n simulation.setup_modes()\n simulation.src *= srcval\n\n # Newton\n simulation.solve_fields_nl(solver_nl='newton')\n E_newton = simulation.fields[\"Ez\"]\n\n # Born\n simulation.solve_fields_nl(solver_nl='born')\n E_born = simulation.fields[\"Ez\"]\n\n # More solvers (if any) should be added here with corresponding calls to assert_allclose() below\n\n assert_allclose(E_newton, E_born, rtol=1e-3)", "def SSt_theo_old(D, k):\n\ta1b = k[\"A1B1\"]\n\tba1 = k[\"B1A1\"]\n\tca1 = k[\"C1A1\"]\n\tcb = k[\"B1C1\"]\n\tnum = a1b*ba1*ca1*ca1 + ba1*ba1*ca1*ca1 + 3*a1b*ba1*ca1*cb + 2*ba1*ba1*ca1*cb + \\\n\t\t\ta1b*ca1*ca1*cb + 2*ba1*ca1*ca1*cb + 2*a1b*ba1*cb*cb + ba1*ba1*cb*cb + \\\n\t\t\t2*a1b*ca1*cb*cb + 2*ba1*ca1*cb*cb + ca1*ca1*cb*cb + \\\n\t\t\t\\\n\t\t\t(a1b*ba1*ba1*ca1 + ba1*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + a1b*ca1*ca1*ca1 + \\\n\t\t\t3*ba1*ca1*ca1*ca1 + 2*a1b*ba1*ba1*cb + ba1*ba1*ba1*cb + 2*a1b*ba1*ca1*cb + \\\n\t\t\t3*ba1*ba1*ca1*cb + 4*a1b*ca1*ca1*cb + 5*ba1*ca1*ca1*cb + 3*ca1*ca1*ca1*cb + \\\n\t\t\t2*a1b*ba1*cb*cb + 2*ba1*ba1*cb*cb + 2*a1b*ca1*cb*cb + 4*ba1*ca1*cb*cb + \\\n\t\t\t2*ca1*ca1*cb*cb) * D + \\\n\t\t\t\\\n\t\t\t(a1b*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + 4*ba1*ba1*ca1*ca1 + a1b*ca1*ca1*ca1 + \\\n\t\t\t2*ca1*ca1*ca1*ca1 + ba1*ba1*ba1*cb + 3*a1b*ba1*ca1*cb + 3*ba1*ba1*ca1*cb + \\\n\t\t\ta1b*ca1*ca1*cb + 5*ba1*ca1*ca1*cb + 3*ca1*ca1*ca1*cb + ba1*ba1*cb*cb + \\\n\t\t\t2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) * D*D + \\\n\t\t\t\\\n\t\t\t(ba1*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + 3*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + \\\n\t\t\t2*ba1*ca1*ca1*cb) * D*D*D + \\\n\t\t\t\\\n\t\t\tba1*ba1*ca1*ca1 * D*D*D*D\n\t##\n\tden = a1b*(ba1*ba1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 2*ba1*ca1*ca1*cb + ba1*ba1*cb*cb + \n\t\t\t2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) + \\\n\t\t\t\\\n\t\t\ta1b*(4*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 6*ba1*ca1*ca1*cb + 4*ca1*ca1*ca1*cb + \n\t\t\t2*ba1*ba1*cb*cb + 4*ba1*ca1*cb*cb + 2*ca1*ca1*cb*cb) * D + \\\n\t\t\t\\\n\t\t\ta1b*(2*ba1*ba1*ca1*ca1 + 4*ca1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 6*ba1*ca1*ca1*cb + \n\t\t\t4*ca1*ca1*ca1*cb + ba1*ba1*cb*cb + 2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) * D*D + \\\n\t\t\t\\\n\t\t\ta1b*(4*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 2*ba1*ca1*ca1*cb) * D*D*D + \\\n\t\t\t\\\n\t\t\ta1b*ba1*ba1*ca1*ca1 * D*D*D*D\n\t##\n\ttau = num/den\n\t##\n\treturn tau*np.log(20)", "def nits(self):", "def g(self):\n return 2", "def dis(self):\n return self.nlegomena(2)", "def VerfGanador(N,*J):\n\tJugador=[]\n\tfor x in J:\n\t Jugador.append(x)\n\n\tif Jugador[0] == 17:\n\t\tGanador = True\n\telse:\n\t\tif Jugador[1] == 17:\n\t\t\tGanador = True\n\t\telse:\n\t\t\tif Jugador[2] == 17:\n\t\t\t\tGanador = True\n\t\t\telse:\n\t\t\t\tif Jugador[3] == 17:\n\t\t\t\t\tGanador = True\n\t\t\t\telse:\n\t\t\t\t\tif Jugador[4] == 17:\n\t\t\t\t\t\tGanador = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tif Jugador[5] == 17:\n\t\t\t\t\t\t\tGanador = True\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif Jugador[0] == False and Jugador[1] == False and Jugador[2] == False and Jugador[3] == False and Jugador[4] == False and Jugador[5] == False:\n\t\t\t\t\t\t\t\tGanador = \"Empate\"\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tGanador = \"Siguiente\"\n\treturn Ganador", "def Ncen(self, m):\n pass", "def ner_features(tokens, index, history):\n\n # init the stemmer\n stemmer = SnowballStemmer('english')\n #print tokens\n\n # Pad the sequence with placeholders\n tokens = [('[START2]', '[START2]'), ('[START1]', '[START1]')] + list(tokens) + [('[END1]', '[END1]'),\n ('[END2]', '[END2]')]\n history = ['[START2]', '[START1]'] + list(history)\n\n # shift the index with 2, to accommodate the padding\n index += 2\n\n word, pos = tokens[index]\n prevword, prevpos = tokens[index - 1]\n prevprevword, prevprevpos = tokens[index - 2]\n nextword, nextpos = tokens[index + 1]\n nextnextword, nextnextpos = tokens[index + 2]\n previob = history[index - 1]\n contains_dash = '-' in word\n contains_dot = '.' in word\n allascii = all([True for c in word if c in string.ascii_lowercase])\n\n allcaps = word == word.capitalize()\n capitalized = word[0] in string.ascii_uppercase\n\n prevallcaps = prevword == prevword.capitalize()\n prevcapitalized = prevword[0] in string.ascii_uppercase\n\n nextallcaps = prevword == prevword.capitalize()\n nextcapitalized = prevword[0] in string.ascii_uppercase\n\n return {\n 'word': word,\n 'lemma': stemmer.stem(word),\n 'pos': pos,\n 'all-ascii': allascii,\n 'all-num': word.isdigit(),\n\n 'next-word': nextword,\n 'next-lemma': stemmer.stem(nextword),\n 'next-pos': nextpos,\n\n 'next-next-word': nextnextword,\n 'nextnextpos': nextnextpos,\n\n\n 'prev-word': prevword,\n 'prev-lemma': stemmer.stem(prevword),\n 'prev-pos': prevpos,\n 'prev-pos-num': prevword.isdigit(),\n\n 'prev-prev-word': prevprevword,\n 'prev-prev-pos': prevprevpos,\n\n 'prev-iob': previob,\n\n 'contains-dash': contains_dash,\n 'contains-dot': contains_dot,\n\n 'all-caps': allcaps,\n 'capitalized': capitalized,\n\n 'prev-all-caps': prevallcaps,\n 'prev-capitalized': prevcapitalized,\n\n 'next-all-caps': nextallcaps,\n 'next-capitalized': nextcapitalized,\n }", "def MatchingPennies():\n from sage.matrix.constructor import matrix\n A = matrix([[1, -1], [-1, 1]])\n g = NormalFormGame([A])\n g.rename('Matching pennies - ' + repr(g))\n return g", "def solution(s):", "def short_training_symbol() -> np.ndarray:\n carriers = [0 + 0j] * 64\n carriers[-32] = 0\n carriers[-31] = 0\n carriers[-30] = 0\n carriers[-29] = 0\n carriers[-28] = 0\n carriers[-27] = 0\n carriers[-26] = 0\n carriers[-25] = 0\n carriers[-24] = 1 + 1j\n carriers[-23] = 0\n carriers[-22] = 0\n carriers[-21] = 0\n carriers[-20] = -1 - 1j\n carriers[-19] = 0\n carriers[-18] = 0\n carriers[-17] = 0\n carriers[-16] = 1 + 1j\n carriers[-15] = 0\n carriers[-14] = 0\n carriers[-13] = 0\n carriers[-12] = -1 - 1j\n carriers[-11] = 0\n carriers[-10] = 0\n carriers[-9] = 0\n carriers[-8] = -1 - 1j\n carriers[-7] = 0\n carriers[-6] = 0\n carriers[-5] = 0\n carriers[-4] = 1 + 1j\n carriers[-3] = 0\n carriers[-2] = 0\n carriers[-1] = 0\n carriers[0] = 0\n carriers[1] = 0\n carriers[2] = 0\n carriers[3] = 0\n carriers[4] = -1 - 1j\n carriers[5] = 0\n carriers[6] = 0\n carriers[7] = 0\n carriers[8] = -1 - 1j\n carriers[9] = 0\n carriers[10] = 0\n carriers[11] = 0\n carriers[12] = 1 + 1j\n carriers[13] = 0\n carriers[14] = 0\n carriers[15] = 0\n carriers[16] = 1 + 1j\n carriers[17] = 0\n carriers[18] = 0\n carriers[19] = 0\n carriers[20] = 1 + 1j\n carriers[21] = 0\n carriers[22] = 0\n carriers[23] = 0\n carriers[24] = 1 + 1j\n carriers[25] = 0\n carriers[26] = 0\n carriers[27] = 0\n carriers[28] = 0\n carriers[29] = 0\n carriers[30] = 0\n carriers[31] = 0\n return np.array(carriers) * np.sqrt(13 / 6)", "def J (self, n):", "def bellman_operator(self, v):\n new_v = np.empty(v.shape)\n for i in range(self.N):\n for j in range(self.N):\n # stay put\n v1 = self.theta[i] + self.epsilon[j] + self.beta * v[i, j]\n\n # new job\n v2 = (self.theta[i] + self.G_mean + self.beta *\n np.dot(v[i, :], self.G_probs))\n\n # new life\n v3 = (self.G_mean + self.F_mean + self.beta *\n np.dot(self.F_probs, np.dot(v, self.G_probs)))\n new_v[i, j] = max(v1, v2, v3)\n return new_v", "def G(U):\n n = U.shape[0]\n G_U = np.zeros([n,1])\n DELTA_x = float(2*L)/(n-1)\n for i in range(n):\n G_U[i][0] = U[(i+1)%n][0]\n G_U[i][0] -= U[(i-1)%n][0]\n G_U[i][0] /= (2* DELTA_x)\n G_U[i][0] += (float(epsilon) * (U[(i+1)%n][0]- U[(i-1)%n][0]) * (U[(i-1)%n][0]+U[(i+1)%n][0]+ U[i][0])/ (4* DELTA_x))\n G_U[i][0] += (float(epsilon) * (U[(i+2)%n][0]- 2*U[(i+1)%n][0]+ 2*U[(i-1)%n][0]- U[(i-2)%n][0]) / (12*( DELTA_x**3)))\n return G_U", "def __neg__(self):\n # \n # TODO - your code here\n #\n result = [];\n for row in self.g:\n result.append([-1*n for n in row]);\n \n return Matrix(result);", "def regular(self):", "def g():", "def boundary_op_n(v):\r\n h = list(v.dic.keys())[0]\r\n p = len(h) - 1\r\n s = P_chains([],[])\r\n if (p != 0) and (isinstance(h, str) != True) and (isinstance(h, frozenset) != True) and (isinstance(h, ImmutableMatrix) != True):\r\n if (is_int(list(v.dic.keys())) == True):\r\n for u in v.dic.keys():\r\n c = 0\r\n for i in u: \r\n w = list(u)[:]\r\n w.remove(i)\r\n if (orientation_function(tuple(tuple_sorted(tuple(w))),tuple(w),p) == True):\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**c) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n else:\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**(c+1)) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n return s\r\n else:\r\n aux = P_chains([],[])\r\n D = {}\r\n ct = 0\r\n st = []\r\n for u in v.dic.keys():\r\n for x in u:\r\n if x not in st:\r\n st.append(x)\r\n for i in st:\r\n D[tuple([ct])] = i\r\n ct = ct + 1\r\n for u in v.dic.keys():\r\n w2 = []\r\n for x in u:\r\n for y in list(D.keys()):\r\n if (x == D[y]):\r\n w2.append(y)\r\n aux = aux + P_chains([tuple(w2)],[v.dic[u]]) \r\n v = aux\r\n for u in v.dic.keys():\r\n c = 0\r\n for i in u: \r\n w = list(u)[:]\r\n w.remove(i)\r\n if (orientation_function(tuple(tuple_sorted(tuple(w))),tuple(w),p) == True):\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**c) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n else:\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**(c+1)) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n s2 = P_chains([],[])\r\n for u in s.dic.keys():\r\n w2=[]\r\n for i in u:\r\n w2.append(D[i])\r\n s2 = s2 + P_chains([tuple(w2)],[s.dic[u]])\r\n \r\n return s2\r\n else:\r\n return s", "def partition_girvan_newman(graph, max_depth):\n ###TODO\n pass", "def bergman_input(self):\n return self.m_t", "def FormG():\n for i in range(2):\n for j in range(2):\n G[i, j] = 0.0\n for k in range(2):\n for l in range(2):\n G[i, j] = G[i, j] + P[k, l] * (TT[i, j, k, l] - 0.5 * TT[i, j, k, l])", "def juego_nuevo():\n show_title(\"Crear sopa de NxN letras\")\n nxn = pedir_entero(\"Ingrese un numero entero de la cantidad de\\nfilas y columnas que desea (Entre 10 y 20):\\n\",10,20)\n n_palabras = pedir_entero(\"Ingrese un numero entero de la cantidad de\\npalabas que deasea agregar (Entre 0 y %d):\\n\"%(nxn/2),0,(nxn/2))\n palabras = []\n palabra_min_caracteres = 3\n palabra_repetida = False\n while len(palabras)<n_palabras:\n if palabra_repetida :\n show_msg(\"Ingreso una palabra repetida\")\n palabra_repetida = False\n # Pedir una palabra que cumpla con los requisitos\n palabra = pedir_palabra(\"[%d|%d]Ingrese una palabra entre %d y %d caracteres: \"%(len(palabras)+1,n_palabras,palabra_min_caracteres,(nxn/2)),palabra_min_caracteres,(nxn/2))\n if palabra in palabras:\n palabra_repetida = True\n else :\n palabras.append(palabra)\n matrix = crear_matrix(nxn)\n matrix,posiciones,salteadas = procesar_palabras(matrix, nxn, palabras)\n matrix = completar_matrix(matrix, nxn)\n return procesar_juego(matrix,nxn,n_palabras,salteadas,posiciones)", "def _regr_basic():", "def _init_from_newform_(self, g, PP, C, prec):\n k = g.weight()\n N = g.level()\n if(k % 4 == 0):\n ep = g.atkin_lehner_eigenvalue()\n else:\n ep = -g.atkin_lehner_eigenvalue()\n kappa = mpmath.mpf(3 - k) / mp2\n if(ep == -1):\n WR = WeilRepDiscriminantForm(N, False)\n elif ep == 1:\n WR = WeilRepDiscriminantForm(N, True)\n else:\n raise ValueError(\" Sign of functional equation must be 1 or -1! Got:{0}\".format(ep))\n # print \"kappa=\",kappa\n # print \"WR=\",WR\n M = VVHarmonicWeakMaassForms(WR, kappa, prec)\n # print \"M=\",M\n self._space = M\n self.prec = prec\n self.coeff = dict()\n # We want a Harmonic weak maass form corresponding to the form g\n # that means we need to avoid any other cuspforms as well as\n # theta series...\n # If there are no oldforms we are happy\n if dimension_new_cusp_forms(N, k) == dimension_cusp_forms(N, k):\n # and to avoid theta series we need to avoid square discriminants\n # in the principal part\n if M._is_dual_rep:\n nset = [0, -1]\n else:\n nset = [-1]\n try:\n for n in nset:\n for r in WR.D(): # )_as_integers:\n D = M.D_from_rn((r, n))\n if not is_square(D):\n PP = {(WR.D[r], n): 1}\n self._principal_part = PP\n # print \"PP=\",PP,\"is ok!\"\n raise StopIteration()\n except StopIteration:\n pass\n # if(C is not None and C>0):", "def make_mammalian_n_glycan_neighborhoods():\n neighborhoods = NeighborhoodCollection()\n\n _neuraminic = \"(%s)\" % ' + '.join(map(str, (\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuAc\"),\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuGc\")\n )))\n _terminal = _neuraminic + \\\n \" + max(%s - %%d, 0)\" % FrozenMonosaccharideResidue.from_iupac_lite(\"Hex\")\n _hexose = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['Hex', ])))\n _hexnac = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['HexNAc', ])))\n\n high_mannose = CompositionRangeRule(\n _hexose, 3, 12) & CompositionRangeRule(\n _hexnac, 2, 2) & CompositionRangeRule(\n _neuraminic, 0, 0)\n high_mannose.name = \"high-mannose\"\n neighborhoods.add(high_mannose)\n\n base_hexnac = 3\n base_terminal_groups = 2\n for i, spec in enumerate(['hybrid', 'bi', 'tri', 'tetra', 'penta', \"hexa\", \"hepta\"]):\n if spec == 'hybrid':\n rule = CompositionRangeRule(\n _hexnac, base_hexnac - 1, base_hexnac + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, base_terminal_groups) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 3)\n rule.name = spec\n neighborhoods.add(rule)\n else:\n sialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n (_neuraminic), 1, base_terminal_groups + i\n ) & CompositionExpressionRule(\n \"(Hex > %d) & (Hex < (%d - (NeuAc + NeuGc)))\" % (base_hexnac + i - 2, base_hexnac + (2 * i) + 3))\n\n sialo.name = \"%s-antennary\" % spec\n asialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, 1 if i < 2 else 0\n ) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + (2 * i) + 3)\n asialo.name = \"asialo-%s-antennary\" % spec\n neighborhoods.add(sialo)\n neighborhoods.add(asialo)\n return neighborhoods", "def needleman_wunsch(x, y, lodict={}, gop=-2.5, gep=-1.75, local=False, indel=''):\n n, m = len(x), len(y)\n dp = np.zeros((n + 1, m + 1))\n pointers = np.zeros((n + 1, m + 1), np.int32)\n if not local:\n for i1, c1 in enumerate(x):\n if gop is None:\n dp[i1 + 1, 0] = lodict.get((c1, indel), gep)\n else:\n dp[i1 + 1, 0] = dp[i1, 0]+(gep if i1 + 1 > 1 else gop)\n pointers[i1 + 1, 0] = 1\n for i2, c2 in enumerate(y):\n if gop is None:\n dp[0, i2 + 1] = lodict.get((indel, c2), gep)\n else:\n dp[0, i2 + 1] = dp[0, i2]+(gep if i2 + 1 > 1 else gop)\n pointers[0, i2 + 1] = 2\n for i1, c1 in enumerate(x):\n for i2, c2 in enumerate(y):\n match = dp[i1, i2] + lodict.get(\n (c1, c2),\n 1 if c1 == c2 else -1)\n insert = dp[i1, i2 + 1] + (\n lodict.get((c1, indel), gep) if gop is None else\n gep if pointers[i1, i2 + 1] == 1 else gop)\n delet = dp[i1 + 1, i2] + (\n lodict.get((indel, c2), gep) if gop is None else\n gep if pointers[i1 + 1, i2] == 2 else gop)\n pointers[i1 + 1, i2 + 1] = p = np.argmax([match, insert, delet])\n max_score = [match, insert, delet][p]\n if local and max_score < 0:\n max_score = 0\n dp[i1 + 1, i2 + 1] = max_score\n alg = []\n if local:\n i, j = np.unravel_index(dp.argmax(), dp.shape)\n else:\n i, j = n, m\n score = dp[i, j]\n while (i > 0 or j > 0):\n pt = pointers[i, j]\n if pt == 0:\n i -= 1\n j -= 1\n alg = [(x[i], y[j])] + alg\n if pt == 1:\n i -= 1\n alg = [(x[i], indel)] + alg\n if pt == 2:\n j -= 1\n alg = [(indel, y[j])] + alg\n if local and dp[i, j] == 0:\n break\n return score, alg", "def long_training_symbol() -> np.ndarray:\n carriers = [0 + 0j] * 64\n carriers[-32] = 0\n carriers[-31] = 0\n carriers[-30] = 0\n carriers[-29] = 0\n carriers[-28] = 0\n carriers[-27] = 0\n carriers[-26] = 1\n carriers[-25] = 1\n carriers[-24] = -1\n carriers[-23] = -1\n carriers[-22] = 1\n carriers[-21] = 1\n carriers[-20] = -1\n carriers[-19] = 1\n carriers[-18] = -1\n carriers[-17] = 1\n carriers[-16] = 1\n carriers[-15] = 1\n carriers[-14] = 1\n carriers[-13] = 1\n carriers[-12] = 1\n carriers[-11] = -1\n carriers[-10] = -1\n carriers[-9] = 1\n carriers[-8] = 1\n carriers[-7] = -1\n carriers[-6] = 1\n carriers[-5] = -1\n carriers[-4] = 1\n carriers[-3] = 1\n carriers[-2] = 1\n carriers[-1] = 1\n carriers[0] = 0\n carriers[1] = 1\n carriers[2] = -1\n carriers[3] = -1\n carriers[4] = 1\n carriers[5] = 1\n carriers[6] = -1\n carriers[7] = 1\n carriers[8] = -1\n carriers[9] = 1\n carriers[10] = -1\n carriers[11] = -1\n carriers[12] = -1\n carriers[13] = -1\n carriers[14] = -1\n carriers[15] = 1\n carriers[16] = 1\n carriers[17] = -1\n carriers[18] = -1\n carriers[19] = 1\n carriers[20] = -1\n carriers[21] = 1\n carriers[22] = -1\n carriers[23] = 1\n carriers[24] = 1\n carriers[25] = 1\n carriers[26] = 1\n carriers[27] = 0\n carriers[28] = 0\n carriers[29] = 0\n carriers[30] = 0\n carriers[31] = 0\n return np.array(carriers)", "def fG(self):\n pass", "def glueEmH( Ja, Jf, truncNum = scipy.inf ):\n w, v = truncBasisH( Ja, truncNum )\n sPlus, sMinus, sZ = sPlusAndMinusAndZ( v )\n \n H1 = scipy.zeros( ( len(w)**4, len(w)**4 ) )\n \n for n in range( len(w)**4 ):\n # Diagonal previous generation contributions\n o = oct(n)[-4:].zfill(4)\n o = [int(char) for char in o]\n o_A, o_B, o_C, o_D = o\n \n H1[n, n] += scipy.sum( [ w[ i ] for i in o ] )\n \n # Edge terms\n for np in range( n, len(w)**4 ):\n op = oct(np)[-4:].zfill(4)\n op = [int(char) for char in op]\n op_A, op_B, op_C, op_D = op\n \n x = 0.\n if ( (o_B == op_B) and (o_C == op_C) ):\n x += -Jf * ( .5 * ( sPlus[0][o_A, op_A] * sMinus[0][o_D, op_D] + sMinus[0][o_A, op_A] * sPlus[0][o_D,op_D] ) + sZ[0][o_A, op_A] * sZ[0][o_D, op_D] )\n if ( (o_C == op_C) and (o_A == op_A) ):\n x += -Jf * ( .5 * ( sPlus[1][o_B, op_B] * sMinus[1][o_D, op_D] + sMinus[1][o_B, op_B] * sPlus[1][o_D,op_D] ) + sZ[1][o_B, op_B] * sZ[1][o_D, op_D] )\n if ( (o_A == op_A) and (o_B == op_B) ):\n x += -Jf * ( .5 * ( sPlus[2][o_C, op_C] * sMinus[2][o_D, op_D] + sMinus[2][o_C, op_C] * sPlus[1][o_D,op_D] ) + sZ[1][o_C, op_C] * sZ[2][o_D, op_D] )\n \n H1[n, np] = x\n H1[np, n] = x\n \n return H1", "def coding():\r\n \r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n x=0 #determine the sliding of the letters\r\n \r\n def isKeyEmpty(k):\r\n \"\"\"Utility Function that checks if key is empty\"\"\"\r\n if k=={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}:\r\n return True\r\n return False\r\n \r\n def set_key(vars): #vars=[0]num,[1]rWord,[2]rString\r\n \"\"\"Function that set the new key\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=vars[0]\r\n if (vars[1]=='yes'):\r\n key['reverse_word']=True\r\n if (vars[2]=='yes'):\r\n key['reverse_string']=True\r\n if (x<-26 or x>26):\r\n x=x%26 #makes x to be in range\r\n if (x==0):\r\n x=random.randrange(-26,26) #random number\r\n for i in range (97,123): #26 ABC letters, ASCII value of 'a' is 97 97+26=123\r\n if(i+x>122):\r\n key[chr(i)]=chr(i-25+x)\r\n elif (i+x<97):\r\n key[chr(i)]=chr(i+26+x)\r\n else:\r\n key[chr(i)]=chr(i+x)\r\n print(\"done\")\r\n \r\n def empty_key():\r\n \"\"\"Function makes current key empty\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=0\r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n print(\"done\")\r\n \r\n def export_key():\r\n \"\"\"Function export key\"\"\"\r\n if(isKeyEmpty(key)):\r\n print(\"key empty\")\r\n else:\r\n return key\r\n \r\n def import_key(key2):\r\n \"\"\"Function import key\"\"\"\r\n nonlocal key\r\n if(isKeyEmpty(key2)):\r\n print(\"key is empty\")\r\n else:\r\n key=key2\r\n print(\"done\")\r\n \r\n def encoding(sentence):\r\n \"\"\"function encoding given string with the key\"\"\"\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if (sentence[i]!=' '):\r\n sentence[i]=key[sentence[i]]\r\n sentence=''.join(sentence)\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n return sentence\r\n \r\n def decoding(sentence):\r\n \"\"\"function decoding given string with the key\"\"\"\r\n if(isKeyEmpty(key)):\r\n return \"key empty\"\r\n helpKey=dict((y,x) for x,y in key.items())\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if(sentence[i]!=' '):\r\n sentence[i]=helpKey[sentence[i]]\r\n sentence=''.join(sentence)\r\n return sentence\r\n\r\n def dispatch(message,var=None):\r\n \"\"\"dispatch with message passing\"\"\"\r\n if message=='set_key':\r\n set_key(var)\r\n elif message=='empty_key':\r\n empty_key()\r\n elif message=='export_key':\r\n return export_key()\r\n elif message=='import_key':\r\n import_key(var)\r\n elif message=='encoding':\r\n return encoding(var)\r\n elif message=='decoding':\r\n return decoding(var)\r\n else:\r\n print(\"Unknown message\") \r\n return dispatch", "def __init__(self, sents, n, corpus='', D=None):\n\n self.n = n\n self.D = D\n self.corpus = corpus\n self.smoothingtechnique = 'Kneser Ney Smoothing'\n # N1+(·w_<i+1>)\n self._N_dot_tokens_dict = N_dot_tokens = defaultdict(set)\n # N1+(w^<n-1> ·)\n self._N_tokens_dot_dict = N_tokens_dot = defaultdict(set)\n # N1+(· w^<i-1>_<i-n+1> ·)\n self._N_dot_tokens_dot_dict = N_dot_tokens_dot = defaultdict(set)\n self.counts = counts = defaultdict(int)\n vocabulary = []\n\n if D is None:\n total_sents = len(sents)\n k = int(total_sents*9/10)\n training_sents = sents[:k]\n held_out_sents = sents[k:]\n training_sents = list(map(lambda x: ['<s>']*(n-1) + x + ['</s>'], training_sents))\n for sent in training_sents:\n for j in range(n+1):\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n if ngram:\n if len(ngram) == 1:\n vocabulary.append(ngram[0])\n else:\n right_token, left_token, right_kgram, left_kgram, middle_kgram =\\\n ngram[-1:], ngram[:1], ngram[1:], ngram[:-1], ngram[1:-1]\n N_dot_tokens[right_kgram].add(left_token)\n N_tokens_dot[left_kgram].add(right_token)\n if middle_kgram:\n N_dot_tokens_dot[middle_kgram].add(right_token)\n N_dot_tokens_dot[middle_kgram].add(left_token)\n if n - 1:\n counts[('<s>',)*(n-1)] = len(sents)\n self.vocab = set(vocabulary)\n aux = 0\n for w in self.vocab:\n aux += len(self._N_dot_tokens_dict[(w,)])\n self._N_dot_dot_attr = aux\n D_candidates = [i*0.12 for i in range(1, 9)]\n xs = []\n for D in D_candidates:\n self.D = D\n aux_perplexity = self.perplexity(held_out_sents)\n xs.append((D, aux_perplexity))\n xs.sort(key=lambda x: x[1])\n self.D = xs[0][0]\n with open('old-stuff/kneserney_' + str(n) + '_parameters_'+corpus, 'a') as f:\n f.write('Order: {}\\n'.format(self.n))\n f.write('D: {}\\n'.format(self.D))\n f.write('Perplexity observed: {}\\n'.format(xs[0][1]))\n f.write('-------------------------------\\n')\n f.close()\n\n # discount value D provided\n else:\n sents = list(map(lambda x: ['<s>']*(n-1) + x + ['</s>'], sents))\n for sent in sents:\n for j in range(n+1):\n # all k-grams for 0 <= k <= n\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n if ngram:\n if len(ngram) == 1:\n vocabulary.append(ngram[0])\n else:\n # e.g., ngram = (1,2,3,4,5,6,7,8)\n # right_token = (8,)\n # left_token = (1,)\n # right_kgram = (2,3,4,5,6,7,8)\n # left_kgram = (1,2,3,4,5,6,7)\n # middle_kgram = (2,3,4,5,6,7)\n right_token, left_token, right_kgram, left_kgram, middle_kgram =\\\n ngram[-1:], ngram[:1], ngram[1:], ngram[:-1], ngram[1:-1]\n N_dot_tokens[right_kgram].add(left_token)\n N_tokens_dot[left_kgram].add(right_token)\n if middle_kgram:\n N_dot_tokens_dot[middle_kgram].add(right_token)\n N_dot_tokens_dot[middle_kgram].add(left_token)\n if n-1:\n counts[('<s>',)*(n-1)] = len(sents)\n self.vocab = set(vocabulary)\n\n aux = 0\n for w in self.vocab:\n aux += len(self._N_dot_tokens_dict[(w,)])\n self._N_dot_dot_attr = aux\n\n xs = [k for k, v in counts.items() if v == 1 and n == len(k)]\n ys = [k for k, v in counts.items() if v == 2 and n == len(k)]\n n1 = len(xs)\n n2 = len(ys)\n self.D = n1 / (n1 + 2 * n2)", "def verb_lemma(word):\n if word.endswith(\"ed\"):\n if word[:-2].endswith(\"v\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"at\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"it\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"et\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ut\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ac\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"i\"):\n return word[:-3].lower() + \"y\"\n elif word[:-2].endswith(\"ir\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ag\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"nc\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"nu\"):\n return word[:-2].lower() + \"e\"\n else:\n return word[:-2].lower() \n elif word.endswith(\"ing\"):\n if word[:-3].endswith(\"v\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"at\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"it\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"et\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ut\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ac\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"i\"):\n return word[:-4].lower() + \"y\"\n elif word[:-3].endswith(\"ir\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ag\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"nc\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"nu\"):\n return word[:-3].lower() + \"e\"\n else:\n return word[:-3].lower()\n elif re.match(r\"(does|did|done)\", word):\n return (\"do\")\n elif re.match(r\"(is|are|am|was|will|were|been)\", word):\n return (\"be\")\n elif word == (\"'s\"):\n return (\"be\")\n elif re.match(r\"(had|has|'ve)\", word):\n return (\"have\")\n else:\n return word.lower()", "def solve(self):", "def petit_nettoyage(ligne, lem_v=True, lem_n=True, len_elt=2, stopw=[]):\n lemmatizer = WordNetLemmatizer()\n for elt in ligne:\n if elt in (string.punctuation + string.digits):\n ligne = ligne.replace(elt, \" \")\n if lem_v and lem_n:\n liste = [\n lemmatizer.lemmatize(elt, pos=\"v\")\n for elt in ligne.split()\n if lemmatizer.lemmatize(elt, pos=\"v\") not in stopw\n ]\n liste = [\n lemmatizer.lemmatize(elt, pos=\"n\")\n for elt in liste\n if len(lemmatizer.lemmatize(elt, pos=\"n\")) > len_elt\n ]\n elif lem_v and lem_n:\n liste = [\n lemmatizer.lemmatize(elt, pos=\"v\")\n for elt in ligne.split()\n if (lemmatizer.lemmatize(elt, pos=\"v\") not in stopw)\n and (len(elt) > len_elt)\n ]\n elif lem_v and lem_n:\n liste = [\n lemmatizer.lemmatize(elt, pos=\"n\")\n for elt in ligne.split()\n if (lemmatizer.lemmatize(elt, pos=\"n\") not in stopw)\n and (len(elt) > len_elt)\n ]\n else:\n liste = [\n elt\n for elt in ligne.split()\n if (elt not in stopw) and (len(elt) > len_elt)\n ]\n ligne = \" \".join(liste)\n return ligne", "def remindfa(n):\n if (n==14):\n re2nfa('@', 'at-nfa')\n elif (n==15):\n re2nfa('1', '1-nfa')\n elif (n==16):\n re2nfa('1*', '1-star-nfa') \n elif (n==17):\n re2nfa('0+1', '0-plus-1-nfa') \n elif (n==18):\n re2nfa('01', '0-1-nfa')\n elif (n==19):\n re2nfa('(0+1)*', '0-plus-1-star-nfa')\n elif (n==20):\n re2nfa('(0((1+2)*)+(3(4+5)*))*', '0-1-plus-2-star-3-4-plus-5-star-star-nfa')\n elif (n==21):\n re2nfa('((0101(0+1)*(0+1)*1010)+((0+1)*1010)*)*', 'Example-1-nfa')\n re2dfa('((0101(0+1)*(0+1)*1010)+((0+1)*1010)*)*', 'Example-1-dfa')\n re2mindfa('((0101(0+1)*(0+1)*1010)+((0+1)*1010)*)*', 'Example-1-mindfa')\n # \n elif (n==22):\n re2nfa('@ + (0+1) + (0+1)(0+1) + (0+1)(0+1)(0+1) + (0+1)(0+1)(0+1)(0+1) + ((00000 + 00001 + 00010 + 00100 + 01000 + 10000 + 00011 + 00101 + 00110 + 01001 + 01010 + 01100 + 10001 + 10010 + 10100 + 11000) + (00111 + 01011 + 01101 + 01110 + 10011 + 10101 + 10110 + 11001 + 11010 + 11100))*',\n 'Block5-30-31-nfa')\n re2dfa('@ + (0+1) + (0+1)(0+1) + (0+1)(0+1)(0+1) + (0+1)(0+1)(0+1)(0+1) + ((00000 + 00001 + 00010 + 00100 + 01000 + 10000 + 00011 + 00101 + 00110 + 01001 + 01010 + 01100 + 10001 + 10010 + 10100 + 11000) + (00111 + 01011 + 01101 + 01110 + 10011 + 10101 + 10110 + 11001 + 11010 + 11100))*',\n 'Block5-30-31-dfa')\n re2mindfa('@ + (0+1) + (0+1)(0+1) + (0+1)(0+1)(0+1) + (0+1)(0+1)(0+1)(0+1) + ((00000 + 00001 + 00010 + 00100 + 01000 + 10000 + 00011 + 00101 + 00110 + 01001 + 01010 + 01100 + 10001 + 10010 + 10100 + 11000) + (00111 + 01011 + 01101 + 01110 + 10011 + 10101 + 10110 + 11001 + 11010 + 11100))*',\n 'Block5-30-31-mindfa')\n #\n # \n elif (n==23):\n re2nfa('(0+1)* ( (0101) + ( (0+1)101 + 0(0+1)01 + 01(0+1)1 + 010(0+1) ) + ( (0+1)(0+1)01 + (0+1)1(0+1)1 + (0+1)10(0+1) + 0(0+1)(0+1)1 + 0(0+1)0(0+1) + 01(0+1)(0+1) ) )',\n 'ham2_nfa_via_re')\n re2dfa('(0+1)* ( (0101) + ( (0+1)101 + 0(0+1)01 + 01(0+1)1 + 010(0+1) ) + ( (0+1)(0+1)01 + (0+1)1(0+1)1 + (0+1)10(0+1) + 0(0+1)(0+1)1 + 0(0+1)0(0+1) + 01(0+1)(0+1) ) )',\n 'ham2_dfa_via_re')\n re2mindfa('(0+1)* ( (0101) + ( (0+1)101 + 0(0+1)01 + 01(0+1)1 + 010(0+1) ) + ( (0+1)(0+1)01 + (0+1)1(0+1)1 + (0+1)10(0+1) + 0(0+1)(0+1)1 + 0(0+1)0(0+1) + 01(0+1)(0+1) ) )',\n 'ham2_mindfa_via_re')\n #\n elif (n==0):\n re2nfa('(a+aaa)(@+aa+aaaa+aaaaaa)*', 'lasso0nfa')\n re2dfa('(a+aaa)(@+aa+aaaa+aaaaaa)*', 'lasso0dfa')\n re2mindfa('(a+aaa)(@+aa+aaaa+aaaaaa)*', 'lasso0mindfa')\n elif (n==1):\n re2nfa('(aa+aaa)(@+aaaaa+aaaaaaa)*', 'lasso1nfa')\n re2dfa('(aa+aaa)(@+aaaaa+aaaaaaa)*', 'lasso1dfa')\n re2mindfa('(aa+aaa)(@+aaaaa+aaaaaaa)*', 'lasso1mindfa')\n elif (n==2):\n re2nfa('(a+aaaaa)(aaaaa+aaaaaaa)*', 'lasso2nfa')\n re2dfa('(a+aaaaa)(aaaaa+aaaaaaa)*', 'lasso2dfa')\n re2mindfa('(a+aaaaa)(aaaaa+aaaaaaa)*', 'lasso2mindfa')\n elif (n==3):\n re2nfa('(aaaaa)(aaaaa+aaaaaaa)*', 'lasso3nfa')\n re2dfa('(aaaaa)(aaaaa+aaaaaaa)*', 'lasso3dfa')\n re2mindfa('(aaaaa)(aaaaa+aaaaaaa)*', 'lasso3mindfa')\n elif (n==4):\n re2nfa('(a+b)*babb', 'endsinbabbnfa')\n re2dfa('(a+b)*babb', 'endsinbabbdfa')\n re2mindfa('(a+b)*babb', 'endsinbabbmindfa')\n elif (n==5):\n re2nfa('(a+b)*b(a+b)', 'firstlastbnfa')\n re2dfa('(a+b)*b(a+b)', 'firstlastbdfa')\n re2mindfa('(a+b)*b(a+b)', 'firstlastbmindfa')\n elif (n==6):\n re2nfa('(a+b)*b(a+b)(a+b)', 'secondlastbnfa')\n re2dfa('(a+b)*b(a+b)(a+b)', 'secondlastbdfa')\n re2mindfa('(a+b)*b(a+b)(a+b)', 'secondlastbmindfa')\n elif (n==7):\n re2nfa('(a+b)*b(a+b)(a+b)(a+b)', 'thirdlastbnfa')\n re2dfa('(a+b)*b(a+b)(a+b)(a+b)', 'thirdlastbdfa')\n re2mindfa('(a+b)*b(a+b)(a+b)(a+b)', 'thirdlastbmindfa')\n elif (n==8):\n re2nfa('(a+b)*b(a+b)(a+b)(a+b)(a+b)', 'fourthlastbnfa')\n re2dfa('(a+b)*b(a+b)(a+b)(a+b)(a+b)', 'fourthlastbdfa')\n re2mindfa('(a+b)*b(a+b)(a+b)(a+b)(a+b)', 'fourthlastbmindfa')\n elif (n==9):\n re2nfa('(a+a)*a(a+a)(a+a)(a+a)(a+a)', 'fourthlastbContrastnfa')\n re2dfa('(a+a)*a(a+a)(a+a)(a+a)(a+a)', 'fourthlastbContrastdfa')\n re2mindfa('(a+a)*a(a+a)(a+a)(a+a)(a+a)', 'fourthlastbContrastmindfa')\n elif (n==10):\n re2nfa('(a+b)*babbab', 'endsinbabbabnfa')\n re2dfa('(a+b)*babbab', 'endsinbabbabdfa')\n re2mindfa('(a+b)*babbab', 'endsinbabbabmindfa')\n elif (n==11):\n re2nfa('(a+b)*babbababbaba', 'endsinbabbababbabanfa')\n re2dfa('(a+b)*babbababbaba', 'endsinbabbababbabadfa')\n re2mindfa('(a+b)*babbababbaba', 'endsinbabbababbabamindfa')\n elif (n==12):\n re2nfa( ' ( ( (1) (1) ) ) (1)* (@ + (0) ) + ( ( ( (1) (1) ) ) (1)* ( (0) (0) ) + ( (0) ) (0) ) ( ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* ( (0) (0) ) + ( (1 + (0) (0)* (1) ) (0) ) (0) )* ( (0) (0)* + (1 + (0) (0)* (1) ) + ( (1 + (0) (0)* (1) ) (1) ) + ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* (@ + (0) ) + ( (1 + (0) (0)* (1) ) (0) ) ) + ( ( ( (1) (1) ) ) (1)* ( (0) (1) ) + ( (0) ) (1) + ( ( ( (1) (1) ) ) (1)* ( (0) (0) ) + ( (0) ) (0) ) ( ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* ( (0) (0) ) + ( (1 + (0) (0)* (1) ) (0) ) (0) )* ( ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* ( (0) (1) ) + ( (1 + (0) (0)* (1) ) (0) ) (1) ) ) ( ( (1) (1) ) (1)* ( (0) (1) ) + (0) (1) + ( ( (1) (1) ) (1)* ( (0) (0) ) + (0) (0) ) ( ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* ( (0) (0) ) + ( (1 + (0) (0)* (1) ) (0) ) (0) )* ( ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* ( (0) (1) ) + ( (1 + (0) (0)* (1) ) (0) ) (1) ) )* ( (1) + ( (1) (1) ) (1)* (@ + (0) ) + (0) + ( ( (1) (1) ) (1)* ( (0) (0) ) + (0) (0) ) ( ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* ( (0) (0) ) + ( (1 + (0) (0)* (1) ) (0) ) (0) )* ( (0) (0)* + (1 + (0) (0)* (1) ) + ( (1 + (0) (0)* (1) ) (1) ) + ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* (@ + (0) ) + ( (1 + (0) (0)* (1) ) (0) ) ) ) ', 'bigrenfa')\n re2dfa( ' ( ( (1) (1) ) ) (1)* (@ + (0) ) + ( ( ( (1) (1) ) ) (1)* ( (0) (0) ) + ( (0) ) (0) ) ( ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* ( (0) (0) ) + ( (1 + (0) (0)* (1) ) (0) ) (0) )* ( (0) (0)* + (1 + (0) (0)* (1) ) + ( (1 + (0) (0)* (1) ) (1) ) + ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* (@ + (0) ) + ( (1 + (0) (0)* (1) ) (0) ) ) + ( ( ( (1) (1) ) ) (1)* ( (0) (1) ) + ( (0) ) (1) + ( ( ( (1) (1) ) ) (1)* ( (0) (0) ) + ( (0) ) (0) ) ( ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* ( (0) (0) ) + ( (1 + (0) (0)* (1) ) (0) ) (0) )* ( ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* ( (0) (1) ) + ( (1 + (0) (0)* (1) ) (0) ) (1) ) ) ( ( (1) (1) ) (1)* ( (0) (1) ) + (0) (1) + ( ( (1) (1) ) (1)* ( (0) (0) ) + (0) (0) ) ( ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* ( (0) (0) ) + ( (1 + (0) (0)* (1) ) (0) ) (0) )* ( ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* ( (0) (1) ) + ( (1 + (0) (0)* (1) ) (0) ) (1) ) )* ( (1) + ( (1) (1) ) (1)* (@ + (0) ) + (0) + ( ( (1) (1) ) (1)* ( (0) (0) ) + (0) (0) ) ( ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* ( (0) (0) ) + ( (1 + (0) (0)* (1) ) (0) ) (0) )* ( (0) (0)* + (1 + (0) (0)* (1) ) + ( (1 + (0) (0)* (1) ) (1) ) + ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* (@ + (0) ) + ( (1 + (0) (0)* (1) ) (0) ) ) ) ', 'bigredfa')\n re2mindfa( ' ( ( (1) (1) ) ) (1)* (@ + (0) ) + ( ( ( (1) (1) ) ) (1)* ( (0) (0) ) + ( (0) ) (0) ) ( ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* ( (0) (0) ) + ( (1 + (0) (0)* (1) ) (0) ) (0) )* ( (0) (0)* + (1 + (0) (0)* (1) ) + ( (1 + (0) (0)* (1) ) (1) ) + ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* (@ + (0) ) + ( (1 + (0) (0)* (1) ) (0) ) ) + ( ( ( (1) (1) ) ) (1)* ( (0) (1) ) + ( (0) ) (1) + ( ( ( (1) (1) ) ) (1)* ( (0) (0) ) + ( (0) ) (0) ) ( ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* ( (0) (0) ) + ( (1 + (0) (0)* (1) ) (0) ) (0) )* ( ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* ( (0) (1) ) + ( (1 + (0) (0)* (1) ) (0) ) (1) ) ) ( ( (1) (1) ) (1)* ( (0) (1) ) + (0) (1) + ( ( (1) (1) ) (1)* ( (0) (0) ) + (0) (0) ) ( ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* ( (0) (0) ) + ( (1 + (0) (0)* (1) ) (0) ) (0) )* ( ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* ( (0) (1) ) + ( (1 + (0) (0)* (1) ) (0) ) (1) ) )* ( (1) + ( (1) (1) ) (1)* (@ + (0) ) + (0) + ( ( (1) (1) ) (1)* ( (0) (0) ) + (0) (0) ) ( ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* ( (0) (0) ) + ( (1 + (0) (0)* (1) ) (0) ) (0) )* ( (0) (0)* + (1 + (0) (0)* (1) ) + ( (1 + (0) (0)* (1) ) (1) ) + ( ( (1 + (0) (0)* (1) ) (1) ) (1) ) (1)* (@ + (0) ) + ( (1 + (0) (0)* (1) ) (0) ) ) ) ', 'bigremindfa')\n#--\n elif (n==13):\n re2nfa( ' ( ( (0) (0) ) ) (0)* (@ + (0) ) + ( ( ( (0) (0) ) ) (0)* ( (0) (0) ) + ( (0) ) (0) ) ( ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* ( (0) (0) ) + ( (0 + (0) (0)* (0) ) (0) ) (0) )* ( (0) (0)* + (0 + (0) (0)* (0) ) + ( (0 + (0) (0)* (0) ) (0) ) + ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* (@ + (0) ) + ( (0 + (0) (0)* (0) ) (0) ) ) + ( ( ( (0) (0) ) ) (0)* ( (0) (0) ) + ( (0) ) (0) + ( ( ( (0) (0) ) ) (0)* ( (0) (0) ) + ( (0) ) (0) ) ( ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* ( (0) (0) ) + ( (0 + (0) (0)* (0) ) (0) ) (0) )* ( ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* ( (0) (0) ) + ( (0 + (0) (0)* (0) ) (0) ) (0) ) ) ( ( (0) (0) ) (0)* ( (0) (0) ) + (0) (0) + ( ( (0) (0) ) (0)* ( (0) (0) ) + (0) (0) ) ( ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* ( (0) (0) ) + ( (0 + (0) (0)* (0) ) (0) ) (0) )* ( ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* ( (0) (0) ) + ( (0 + (0) (0)* (0) ) (0) ) (0) ) )* ( (0) + ( (0) (0) ) (0)* (@ + (0) ) + (0) + ( ( (0) (0) ) (0)* ( (0) (0) ) + (0) (0) ) ( ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* ( (0) (0) ) + ( (0 + (0) (0)* (0) ) (0) ) (0) )* ( (0) (0)* + (0 + (0) (0)* (0) ) + ( (0 + (0) (0)* (0) ) (0) ) + ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* (@ + (0) ) + ( (0 + (0) (0)* (0) ) (0) ) ) ) ', 'bigrenfaS')\n re2dfa( ' ( ( (0) (0) ) ) (0)* (@ + (0) ) + ( ( ( (0) (0) ) ) (0)* ( (0) (0) ) + ( (0) ) (0) ) ( ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* ( (0) (0) ) + ( (0 + (0) (0)* (0) ) (0) ) (0) )* ( (0) (0)* + (0 + (0) (0)* (0) ) + ( (0 + (0) (0)* (0) ) (0) ) + ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* (@ + (0) ) + ( (0 + (0) (0)* (0) ) (0) ) ) + ( ( ( (0) (0) ) ) (0)* ( (0) (0) ) + ( (0) ) (0) + ( ( ( (0) (0) ) ) (0)* ( (0) (0) ) + ( (0) ) (0) ) ( ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* ( (0) (0) ) + ( (0 + (0) (0)* (0) ) (0) ) (0) )* ( ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* ( (0) (0) ) + ( (0 + (0) (0)* (0) ) (0) ) (0) ) ) ( ( (0) (0) ) (0)* ( (0) (0) ) + (0) (0) + ( ( (0) (0) ) (0)* ( (0) (0) ) + (0) (0) ) ( ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* ( (0) (0) ) + ( (0 + (0) (0)* (0) ) (0) ) (0) )* ( ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* ( (0) (0) ) + ( (0 + (0) (0)* (0) ) (0) ) (0) ) )* ( (0) + ( (0) (0) ) (0)* (@ + (0) ) + (0) + ( ( (0) (0) ) (0)* ( (0) (0) ) + (0) (0) ) ( ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* ( (0) (0) ) + ( (0 + (0) (0)* (0) ) (0) ) (0) )* ( (0) (0)* + (0 + (0) (0)* (0) ) + ( (0 + (0) (0)* (0) ) (0) ) + ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* (@ + (0) ) + ( (0 + (0) (0)* (0) ) (0) ) ) ) ', 'bigredfaS')\n re2mindfa( ' ( ( (0) (0) ) ) (0)* (@ + (0) ) + ( ( ( (0) (0) ) ) (0)* ( (0) (0) ) + ( (0) ) (0) ) ( ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* ( (0) (0) ) + ( (0 + (0) (0)* (0) ) (0) ) (0) )* ( (0) (0)* + (0 + (0) (0)* (0) ) + ( (0 + (0) (0)* (0) ) (0) ) + ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* (@ + (0) ) + ( (0 + (0) (0)* (0) ) (0) ) ) + ( ( ( (0) (0) ) ) (0)* ( (0) (0) ) + ( (0) ) (0) + ( ( ( (0) (0) ) ) (0)* ( (0) (0) ) + ( (0) ) (0) ) ( ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* ( (0) (0) ) + ( (0 + (0) (0)* (0) ) (0) ) (0) )* ( ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* ( (0) (0) ) + ( (0 + (0) (0)* (0) ) (0) ) (0) ) ) ( ( (0) (0) ) (0)* ( (0) (0) ) + (0) (0) + ( ( (0) (0) ) (0)* ( (0) (0) ) + (0) (0) ) ( ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* ( (0) (0) ) + ( (0 + (0) (0)* (0) ) (0) ) (0) )* ( ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* ( (0) (0) ) + ( (0 + (0) (0)* (0) ) (0) ) (0) ) )* ( (0) + ( (0) (0) ) (0)* (@ + (0) ) + (0) + ( ( (0) (0) ) (0)* ( (0) (0) ) + (0) (0) ) ( ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* ( (0) (0) ) + ( (0 + (0) (0)* (0) ) (0) ) (0) )* ( (0) (0)* + (0 + (0) (0)* (0) ) + ( (0 + (0) (0)* (0) ) (0) ) + ( ( (0 + (0) (0)* (0) ) (0) ) (0) ) (0)* (@ + (0) ) + ( (0 + (0) (0)* (0) ) (0) ) ) ) ', 'bigremindfaS')", "def classes(self):\n #print \"making classes again!\"\n l = []\n for p in self.marks:\n l.append(psi_class(self,p))\n for d in range(1, self.dimension + 1):\n l.append(kappa_class(self,d))\n for i in range(1, self.genus+1):\n l.append(chern_char(self, 2*i-1))\n if True:#self.genus != 0:\n l.append(irreducible_boundary(self))\n marks = set(self.marks)\n reducible_boundaries = []\n if self.n != 0:\n first_mark_list = [marks.pop()] \n for g1 in range(0, self.genus + 1):\n for p in subsets(marks):\n r_marks = set(first_mark_list + p)\n if 3*g1 - 3 + len(r_marks) + 1 >= 0 and 3*(self.genus-g1) - 3 + self.n - len(r_marks) + 1 >= 0:\n reducible_boundaries.append( reducible_boundary(self, Mgn(g1, r_marks)) )\n \n reducible_boundaries.sort(key = lambda b: sorted(list(b.component1.marks)))\n reducible_boundaries.sort(key = lambda b: len(b.component1.marks))\n reducible_boundaries.sort(key = lambda b: b.component1.genus)\n \n else: #self.n == 0\n for g1 in range(1, floor(self.genus/2.0)+1):\n reducible_boundaries.append(reducible_boundary(self, Mgn(g1, []))) \n \n \n l += reducible_boundaries \n \n for i in range(1,self.genus+1):\n l.append(lambda_class(self,i))\n return l", "def probability(N_dr, L_opmin, L_opmax, L_min, L_max, L_d):\n opening_nomullignas = []\n opening_withmullignas = []\n sum_nomulligans = 0\n sum_withmulligans = 0\n mulligan_coeff = 0\n\n for i in range(L_opmin, min(L_opmax + 1, 8)): # first make a list of tuples of the form:\n # (number_of_lands_in_opening_hand, probability_of_drawing_such_a_hand)\n a = hypergeom(i, 7, 60, L_d)\n opening_nomullignas.append((i, a))\n mulligan_coeff = mulligan_coeff + a # this will be used later for calculating the probability of\n # taking the mulligan and is used as a coefficient before the mulligan sum\n for (x, y) in opening_nomullignas: # use the list of tuples to calculate the first part of equation 5\n partial_nomulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_nomulligans = partial_nomulligans + hypergeom(j, N_dr, 53, L_d - x)\n sum_nomulligans = sum_nomulligans + partial_nomulligans * y\n\n mulligan_coeff = 1 - mulligan_coeff # probability of mulliganing\n for i in range(L_opmin, min(L_opmax + 1, 7)): # doing the same thing as before, but drawing 6 instead of 7 cards\n a = hypergeom(i, 6, 60, L_d)\n opening_withmullignas.append((i, a))\n\n for (x, y) in opening_withmullignas:\n partial_withmulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_withmulligans = partial_withmulligans + hypergeom(j, N_dr, 54, L_d - x)\n sum_withmulligans = sum_withmulligans + partial_withmulligans * y\n total_withmulligans = mulligan_coeff * sum_withmulligans\n\n return total_withmulligans + sum_nomulligans", "def vanilaScore(self,attended,state,W):", "def nom_struc_rebuilding(nom_struc):\n\n #init\n nominal_structure = ns = nn = []\n i = 0\n\n while i < len(nom_struc):\n\n if nom_struc[i]._quantifier == 'SOME' and (nom_struc[i].det == ['a'] or nom_struc[i].det == ['an']):\n nom_struc[i]._quantifier = 'ONE'\n\n #The first nominal group not preceded but 'and' if there is\n if nom_struc[i]._conjunction == 'AND' and i > 0:\n nominal_structure = nominal_structure + ['and']\n elif nom_struc[i]._conjunction == 'OR':\n nominal_structure = nominal_structure + ['or']\n elif nom_struc[i]._conjunction == 'BUT':\n nominal_structure = nominal_structure + ['but']\n\n #We recover the nominal group and his complement\n if nom_struc[i]._quantifier == 'SOME' or nom_struc[i]._quantifier == 'ALL' or nom_struc[\n i]._quantifier == 'ANY' or (nom_struc[i]._quantifier == 'DIGIT' and nom_struc[i].det != 'one'):\n #If there is a specific quantifier (plural)\n for n in ResourcePool().plural_nouns:\n if nom_struc[i].noun != [] and n[1] == nom_struc[i].noun[0]:\n nn = [n[0]]\n\n #If it is not a specific plural, we add 's'\n if nom_struc[i].noun != [] and nn == []:\n nn = [nom_struc[i].noun[0] + 's']\n\n #We reconver the other information \n nominal_structure = nominal_structure + nom_struc[i].det\n for z in nom_struc[i].adj:\n nominal_structure = nominal_structure + z[1] + [z[0]]\n nominal_structure = nominal_structure + nn\n\n #Re-init\n nn = []\n\n else:\n #if not plural\n nominal_structure = nominal_structure + nom_struc[i].det\n for z in nom_struc[i].adj:\n nominal_structure = nominal_structure + z[1] + [z[0]]\n nominal_structure = nominal_structure + nom_struc[i].noun\n\n #We recover noun complement\n if nom_struc[i].noun_cmpl:\n nominal_structure = nominal_structure + ['of']\n nominal_structure = nominal_structure + nom_struc_rebuilding(nom_struc[i].noun_cmpl)\n\n #We recover the relative\n for j in nom_struc[i].relative:\n if not j.sn:\n ns = [nom_struc[i]]\n\n nominal_structure = nominal_structure + [j.aim] + sentence_rebuilding.relative(j, ns)\n ns = []\n\n i += 1\n return nominal_structure", "def make_Gr(mlat, *J):\n\n if (len(J)!=3):\n print(\"Number of paramaters are not right, must be 5!\")\n \n NN = 2*mlat # # of sites in one super unitcell\n tau = -np.zeros((NN, NN),dtype=complex)\n h = np.zeros((NN,NN), dtype=complex)\n\n # translational cell's Hamiltonian\n for i in range(mlat-1):\n if (i%2==0):\n h[i,i+1] = J[0]\n h[mlat+i,mlat+i+1] = J[1]\n h[i,mlat+i] = J[2] # horizoltal connection\n elif (i%2==1):\n h[i,i+1] = J[1]\n h[mlat+i,mlat+i+1] = J[0]\n # longitudinal connection of the last sites\n if (mlat-1)%2 == 0:\n h[mlat-1,2*mlat-1] = J[2]\n \n h = h + h.conj().T # make it hermitian\n\n # Hopping matrix\n for i in range(1,mlat,2):\n tau[i+mlat,i] = J[2]\n\n return h, tau", "def w2f(sents,i,j,filename,freq):\n w = sents[i][j][0] #current word\n pos = sents[i][j][1] #POS of current word\n f = [ \n 'bias', #non-contextual feature \n 'w=' + w, #current word \n 'w.istitle=%s' % w.istitle(), #first letter - capitalized\n 'pos=' + pos, # POS tag\n 'w.intitle=%s' % contained_in_title(w, filename), # w matches title\n 'w.lowtitle=%s' % lower_in_title(w, filename), # w lower matches title\n 'w.freq=%s' % frequency(w, freq), # freq of w \n 'w.stopword=%s' % stop_word(w), # # stop word\n ]\n \n # previous word features\n if j>0:\n pw = sents[i][j-1][0] #previous word\n ppos = sents[i][j-1][1] #POS of previous word\n f.extend([ \n 'pw=' + pw, # previous word \n 'pw.istitle=%s' % pw.istitle(), #first letter - capitalized\n 'ppos=' + ppos, # POS tag\n 'pw.intitle=%s' % contained_in_title(pw, filename), # w matches title\n 'pw.lowtitle=%s' % lower_in_title(pw,filename), # w lower matches title\n 'pw.freq=%s' % frequency(pw, freq), # freq of w\n 'pw.stopword=%s' % stop_word(w), # # stop word\n ])\n else: \n f.append('BOS') #first word of a sentence\n\n # next word features\n if j<len(sents[i])-1:\n nw = sents[i][j+1][0] #next word\n npos = sents[i][j+1][1] #POS of next word\n f.extend([ \n 'nw=' + nw, # previous word\n 'nw.istitle=%s' % nw.istitle(), #first letter - capitalized\n 'npos=' + npos, #POS tag\n 'nw.intitle=%s' % contained_in_title(nw, filename), # w matches title\n 'nw.lowtitle=%s' % lower_in_title(nw,filename), # w lower matches title\n 'nw.freq=%s' % frequency(nw, freq), # freq of w\n 'nw.stopword=%s' % stop_word(w), # # stop word\n ])\n else: \n f.append('EOS') # last word of a sentence\n\n #if j>1: ...\n #if j<len(sents[i])-2: ...\n #if j>0 and j<len(sents[i])-1: ...\n return f", "def __init__(self, n, sents, corpus='', beta=None, addone=True):\n self.n = n\n self.beta = beta\n self.corpus = corpus\n self.beta_flag = True\n self.addone = addone\n self.smoothingtechnique = 'Back Off (Katz) with Discounting Smoothing'\n self.counts = counts = defaultdict(int)\n self.A_set = defaultdict(set)\n voc = ['</s>']\n for s in sents:\n voc += s\n self.voc = set(voc)\n if beta is None:\n self.beta_flag = False\n\n # if no beta given, we compute it\n if not self.beta_flag:\n total_sents = len(sents)\n aux = int(total_sents * 90 / 100)\n # 90 per cent por training\n train_sents = sents[:aux]\n # 10 per cent for perplexity (held out data)\n held_out_sents = sents[-total_sents+aux:]\n\n train_sents = list(map((lambda x: ['<s>']*(n-1) + x), train_sents))\n train_sents = list(map((lambda x: x + ['</s>']), train_sents))\n for sent in train_sents:\n for j in range(n+1):\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # for efficiency, we save the A set as a dict of sets\n if j:\n self.A_set[ngram[:-1]].add(ngram[-1])\n for i in range(1, n):\n counts[('<s>',)*i] += len(train_sents)\n counts[('</s>',)] = len(train_sents)\n\n self.tocounts = counts\n # search for the beta that gives lower perplexity\n beta_candidates = [i*0.1 for i in range(1, 10)]\n # xs is a list with (beta, perplexity)\n xs = []\n self.sents = train_sents\n for aux_beta in beta_candidates:\n self.beta = aux_beta\n aux_perx = self.perplexity(held_out_sents)\n xs.append((aux_beta, aux_perx))\n xs.sort(key=lambda x: x[1])\n self.beta = xs[0][0]\n with open('old-stuff/backoff_'+str(n)+'_parameters_'+corpus, 'a') as f:\n f.write('Order: {}\\n'.format(self.n))\n f.write('Beta: {}\\n'.format(self.beta))\n f.write('AddOne: {}\\n'.format(self.addone))\n f.write('Perplexity observed: {}\\n'.format(xs[0][1]))\n f.write('-------------------------------\\n')\n f.close()\n else:\n sents = list(map((lambda x: x + ['</s>']), sents))\n sents = list(map((lambda x: ['<s>']*(n-1) + x), sents))\n\n for sent in sents:\n for j in range(n+1):\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # for efficiency, we save the A set as a dict of sets\n if j:\n self.A_set[ngram[:-1]].add(ngram[-1])\n for i in range(1, n):\n counts[('<s>',)*i] += len(sents)\n counts[('</s>',)] = len(sents)", "def V_lennard_jones(atoms):\n \n Vw = 0 # this is the variable we will store the sum of all the energies in\n N = len(atoms)\n for i in range(N):\n for j in range(i+1, N):\n r = norm(atoms.coords[i] - atoms.coords[j]) # distance from atom i to atom j\n \n Vw += DW*((R0/r)**12 -2*(R0/r)**6) # the Lennard-Jones interaction!\n \n return Vw", "def code() -> str:\n return \"\"\"\n G91 G17\n G0 Y10 X-10\n G0 Y0 X-5\n G0 Y5 X0\n G0 Y0 X5\n G0 Y0 X-5\n G0 Y-5 X0\n G3 Y-5 X5 J0 I5\n G0 Y0 X5\n G0 Y5 X0\n G3 Y5 X-5 J0 I-5\n G0 Y-5 X0\n G0 Y-10 X10\n G0 Y0 X-5\n G0 Y-15 X-15\n G0 Y0 X5\n G0 Y5 X0\n G0 Y0 X-5\n G0 Y-5 X0\n G0 Y5 X0\n G2 Y5 X5 J0 I5\n G0 Y0 X5\n G0 Y-5 X0\n G2 Y-5 X-5 J0 I-5\n G0 Y5 X0\n G0 Y10 X10\n G0 Y0 X-30\n G3 Y0 X-10 J0 I-5\n G3 Y0 X10 J0 I5\n\n G0 Y0 X5\n G3 Y5 X5 J5 I0\n G3 Y10 X-10 J0 I-10\n G3 Y-5 X-5 J-5 I0\n G0 Y-5 X0\n\n G0 Y5 X0\n G3 Y5 X-5 J0 I-5\n G3 Y-10 X-10 J-10 I0\n G3 Y-5 X5 J0 I5\n G0 Y0 X5\n\n G0 Y0 X-5\n G3 Y-5 X-5 J-5 I0\n G3 Y-10 X10 J0 I10\n G3 Y5 X5 J5 I0\n G0 Y5 X0\n\n G0 Y-5 X0\n G3 Y-5 X5 J0 I5\n G3 Y10 X10 J10 I0\n G3 Y5 X-5 J0 I-5\n G0 Y0 X-5\n \"\"\"", "def degibber(self):", "def encode(self, G, v, X, L, W0, W1, W2, W3, U1, U2, U3):\n h = [None] * (L+1)\n h[0] = np.zeros((L + 1, len(X)))\n for i in range(1, L+1):\n h[i] = np.zeros((L + 1, self.embedding_dimension))\n\n \"\"\"hN - embeddings do neighborhood dos nodes nas camadas 2..L (tem a mesma dimensão por uma questao de simplicidade,\n os 2 primeiros elementos vão ficar a 0) \"\"\"\n hN = np.zeros((L + 1, self.embedding_dimension))\n\n h[0] = np.transpose(X)\n\n self.H[0][v] = h[0]\n\n for node in range(self.nNodes):\n self.H[1][node] = ed.ReLU(np.matmul(W0, np.transpose(self.H[0][node])))\n if self.H[1][node].any(): # se nao for um vetor de zeros\n self.H[1][node] = self.H[1][node] / la.norm(self.H[1][node], 2)\n\n h[1] = self.H[1][v]\n\n for l in range(2, L + 1):\n for node in range(self.nNodes):\n \"\"\"AGGREGATE\"\"\"\n self.HN[l, node] = self.aggregateNeighborhood(G, node, G.get_neighbors(node), l)\n \"\"\"COMBINE\"\"\"\n self.H[l][node] = self.GRUCell(self.H[l - 1][node], self.HN[l, node], W1, W2, W3, U1, U2, U3)\n\n self.H[l][v] = self.H[l][v] / la.norm(self.H[l][v], 2)\n h[l] = self.H[l][v]\n\n \"\"\"z sera o embedding final, obtido atraves da funcao maxpool\"\"\"\n z = self.maxPool(h[1:], self.embedding_dimension)\n return [z]", "def gen_graph(self):", "def generate(self):", "def Group(self) -> _n_5_t_0:", "def Group(self) -> _n_5_t_0:", "def update_gol(arr):\n nxt = np.zeros(arr.shape)\n rows,cols = nxt.shape\n for i in range(rows):\n for j in range(cols):\n nn = sum_vonneuman_nn(arr,i,j)\n if arr[i][j]==1:\n if nn==2 or nn==3:\n nxt[i][j]=1\n else:\n if nn==3:\n nxt[i][j]=1\n return nxt", "def order_ideal(self, gens):", "def get_LDAU(self):\n\n # let's simply use the default as a first step\n LDAU_dict, poscar_need_hack, potcar_need_hack = super(U_Strategy_Yamada_Nitrogen, self).get_LDAU()\n\n Na_indices = self.structure.indices_from_symbol('Na')\n\n # hack MAGMOM\n list_reduced_site_indices, list_oxidized_site_indices = \\\n self.Find_Lowest_Energy_Structure_Electrostatics()\n\n MAGMOM = self.build_magmom(list_oxidized_site_indices, list_reduced_site_indices)\n\n LDAU_dict['MAGMOM'] = MAGMOM \n\n return LDAU_dict, poscar_need_hack, potcar_need_hack", "def main():\n\n rules, evolutions = [int(i) for i in input().strip().split()]\n\n rule = {}\n for _ in range(rules):\n start, finish = input().strip().split(' -> ')\n rule[start] = finish\n\n print(lindenmayor(rule, evolutions, input().strip()))", "def get_nnp_ngrams(original_text, highlight=4, minsize=0):\n minsize = minsize-1\n if minsize<0:\n minsize = 0 \n \n tokens = nltk.wordpunct_tokenize(original_text)\n tagged = nltk.word_tokenize(original_text)\n tagged = nltk.pos_tag(tokens)\n #for word in tagged:\n # print word\n doc_length = len(tokens)\n counter = 0\n counter2 = 0\n if highlight==0:\n concated_test = doc_length # This is set to doc_length but could be anything recommend 3.\n else:\n concated_test = highlight\n list_of_NNPs = []\n while counter < (doc_length-1):\n while counter2 < concated_test:\n counter2 = counter2+1\n counter3 = 0\n #print '--------------------'\n temp_array = []\n all_nnp = True\n while counter3 < counter2:\n if counter < (doc_length-counter3):\n #print tokens[counter+counter3],tagged[counter+counter3][1]\n temp_array.append(tokens[counter+counter3])\n if tagged[counter+counter3][1] != 'NNP':\n all_nnp = False\n counter3 = counter3+1\n counter3 = 0\n if all_nnp == True:\n if(len(temp_array)>minsize):\n list_of_NNPs.append(temp_array)\n #print 'added to main array'\n #else:\n #print 'not all NNPs'\n counter2 = 0\n counter = counter+1\n #for ngram in list_of_NNPs:\n # print ngram\n import itertools\n list_of_NNPs.sort()\n unique_NNPs = list(list_of_NNPs for list_of_NNPs,_ in itertools.groupby(list_of_NNPs))\n return unique_NNPs", "def IPTinput_gen(Nom,beta,ef,mutld):\n\tV,D =0.01,1\n\tf1 = open(\"Delta.inp\", 'w')\n\tf2 = open(\"G0.inp\", 'w')\n\tDelta=[]\n\tfor n in range(Nom):\n\t iom = (2.*n+1)*pi/beta\n\t Redelta=-V**2/(2.0*D)*log( ((D/2.-mutld)**2+iom**2)/((D/2.+mutld)**2+iom**2))\n\t Imdelta=-V**2/D*(arctan((D/2.-mutld)/iom)-arctan((-D/2.-mutld)/iom))\n\t print>>f1,iom,Redelta,Imdelta\n\t G0= 1./( 1.0j*iom-(ef-mutld)-(Redelta+1.0j*Imdelta) )\n\t print>>f2,iom,G0.real,G0.imag\n\tf1.close()\n\tf2.close()", "def _make_vbenf_label(chain_parts):\n\n # toy label for development: run simple and dijet independently.\n # simple makes Et cuts on two jets. Independently (sharing possible)\n # of jets choosean by simple, the dijet\n # scenario requires a dijet of mass > 900, and opening angle in phi > 2.6\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n assert scenario.startswith('vbenf')\n args = _args_from_scenario(scenario)\n if not args:\n return 'and([]simple([(50et)(70et)])combgen([(2)] dijet([(900djmass, 26djdphi)])))' \n arg_res = [\n re.compile(r'(?P<lo>\\d*)(?P<key>fbet)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>mass)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>et)(?P<hi>\\d*)'),\n ]\n\n defaults = {\n 'et': ('101', 'inf'),\n 'mass': ('800', 'inf'),\n 'fbet': ('501', 'inf'),\n }\n\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n and\n (\n []\n simple\n (\n [(%(etlo).0fet, 500neta)(%(etlo).0fet, peta500)]\n )\n combgen\n (\n [(10et, 0eta320)]\n dijet\n (\n [(%(masslo).0fdjmass, 26djdphi)]\n ) \n simple\n (\n [(10et, 0eta320)(20et, 0eta320)]\n )\n )\n )\"\"\" % argvals", "def __init__(self, n, sents, corpus='', gamma=None, addone=True):\n self.n = n\n self.smoothingtechnique = 'Interpolated (Jelinek Mercer) Smoothing'\n self.gamma = gamma\n self.addone = addone\n self.counts = counts = defaultdict(int)\n self.gamma_flag = True\n self.corpus = corpus\n # way more efficient than use set unions\n voc = ['</s>']\n for s in sents:\n voc += s\n self.voc = list(set(voc))\n\n if gamma is None:\n self.gamma_flag = False\n\n # if not gamma given\n if not self.gamma_flag:\n total_sents = len(sents)\n aux = int(total_sents * 90 / 100)\n # 90 per cent for training\n train_sents = sents[:aux]\n # 10 per cent for perplexity (held out data)\n held_out_sents = sents[-total_sents+aux:]\n\n train_sents = list(map((lambda x: ['<s>']*(n-1) + x), train_sents))\n train_sents = list(map((lambda x: x + ['</s>']), train_sents))\n\n for sent in train_sents:\n for j in range(n+1):\n # move along the sent saving all its j-grams\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # added by hand\n counts[('</s>',)] = len(train_sents)\n # variable only for tests\n self.tocounts = counts\n # search the gamma that gives lower perplexity\n gamma_candidates = [i*50 for i in range(1, 15)]\n # xs is a list with (gamma, perplexity)\n xs = []\n sents = train_sents\n for aux_gamma in gamma_candidates:\n self.gamma = aux_gamma\n aux_perx = self.perplexity(held_out_sents)\n xs.append((aux_gamma, aux_perx))\n xs.sort(key=lambda x: x[1])\n self.gamma = xs[0][0]\n with open('old-stuff/interpolated_' + str(n) + '_parameters_'+corpus, 'a') as f:\n f.write('Order: {}\\n'.format(self.n))\n f.write('Gamma: {}\\n'.format(self.gamma))\n f.write('AddOne: {}\\n'.format(self.addone))\n f.write('Perplexity observed: {}\\n'.format(xs[0][1]))\n f.write('-------------------------------\\n')\n f.close()\n\n else:\n sents = list(map((lambda x: ['<s>']*(n-1) + x), sents))\n sents = list(map((lambda x: x + ['</s>']), sents))\n\n for sent in sents:\n # counts now holds all k-grams for 0 < k < n + 1\n for j in range(n+1):\n # move along the sent saving all its j-grams\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # added by hand\n counts[('</s>',)] = len(sents)", "def grads(self, inputs):", "def giniIndex(p_m1):\n G = p_m1*(1-p_m1)*2 \n return G", "def modified_gram_schmidt_step_arnoldi(j, vals):\n vector, krylov_vectors, n, H = vals\n v = krylov_vectors[j, :]\n h = jax.numpy.vdot(v, vector)\n H = jax.ops.index_update(H, jax.ops.index[j, n], h)\n vector = vector - h * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors, n, H]", "def g03tojmol(rawstd_geom, title='Untitled'):\n\tPERIODIC_TABLE = {'1':'H','2':'He',\n '3':'Li','4':'Be','5':'B','6':'C','7':'N','8':'O','9':'F','10':'Ne',\n '11':'Na','12':'Mg','13':'Al','14':'Si','15':'P','16':'S','17':'Cl','18':'Ar',\n '19':'K','20':'Ca','21':'Sc','22':'Ti','23':'V','24':'Cr','25':'Mn','26':'Fe','27':'Co','28':'Ni','29':'Cu','30':'Zn','31':'Ga','32':'Ge','33':'As','34':'Se','35':'Br','36':'Kr',\n '37':'Rb','38':'Sr','39':'Y','40':'Zr','41':'Nb','42':'Mo','43':'Tc','44':'Ru','45':'Rh','46':'Pd','47':'Ag','48':'Cd','49':'In','50':'Sn','51':'Sb','52':'Te','53':'I','54':'Xe',\n '55':'Cs','56':'Ba','57':'La','58':'Ce','59':'Pr','60':'Nd','61':'Pm','62':'Sm','63':'Eu','64':'Gd','65':'Tb','66':'Dy','67':'Ho','68':'Er','69':'Tm','70':'Yb','71':'Lu','72':'Hf','73':'Ta','74':'W','75':'Re','76':'Os','77':'Ir','78':'Pt','79':'Au','80':'Hg','81':'Tl','82':'Pb','83':'Bi','84':'Po','85':'At','86':'Rn',\n '87':'Fr','88':'Ra','89':'Ac','90':'Th','91':'Pa','92':'U','93':'Np','94':'Pu','95':'Am','96':'Cm','97':'Bk','98':'Cf','99':'Es','100':'Fm','101':'Md','102':'No','103':'Lr','104':'Rf','105':'Db','106':'Sg','107':'Bh','108':'Hs','109':'Mt','110':'Uun','111':'Uuu','112':'Uub','114':'Uuq','116':'Uuh' }\n\n try:\n raw_list = rawstd_geom.splitlines()\n numatom = len(raw_list)\n geom_jmol = '|'+str(numatom) + '|' + title\n for atom in raw_list:\n try:atom_seg = atom.strip().split() #['1', '13', '0', '0.000000', '0.000000', '0.000000']\n except:\n\t\t\t\traise \n\t\t\t\tcontinue\n else:\n try:\n atom_jmol = '|' + PERIODIC_TABLE[atom_seg[0]] + ' ' + atom_seg[3] + ' ' +atom_seg[4] + ' ' + atom_seg[5]\n except:\n\t\t\t\t\tcontinue\n else:\n geom_jmol += atom_jmol\n geom_jmol += '\\n'\n except:\n print 'Unexpect error occurs during geometry normalization..'\n raise #debug\n return None\n else:\n return geom_jmol", "def narration_self(self):\n pass", "def exercise_b2_53():\r\n pass", "def pgram(w, freqs, N):\n mle = 0\n fifo = [':'] * N\n for i in range(N-1):\n \tw += ':'\n for c in w:\n fifo.pop(0)\n fifo.append(c)\n n = N\n ngram = ''.join(fifo[:n])\n p = log(MLE(ngram, freqs))\n mle += p\n return mle", "def M_g(self):\n\n print(\"\", file=self.logfile)\n print(\"Updating g\", file=self.logfile)\n M_mu1 = np.lib.stride_tricks.as_strided(self.mu_pad,\n shape=[self.P+1, self.L_h],\n strides=[self.mu_pad.strides[-1], self.mu_pad.strides[-1]])\n\n M_mu1 = M_mu1[::-1,:]\n M_mu2 = np.transpose(M_mu1[1:,:])\n M_mu1 = M_mu1*self.e2\n\n M_mu = np.dot(M_mu1, M_mu2)\n v_mu = M_mu[0,:]\n M_mu = M_mu[1:,:]\n\n M_R = np.zeros((self.P,self.P+1))\n for p in range(1,self.P+1):\n for q in range(0,self.P+1):\n M_R[p-1,q] = np.sum(np.diag(self.R, q-p)[:self.L_h-max(p,q)]*self.e2[max(p,q):self.L_h])\n\n v_R = M_R[:,0]\n M_R = M_R[:,1:]\n\n self.alpha_g = np.dot(np.linalg.inv(M_mu + M_R), v_mu+v_R)\n self.A = np.concatenate([[1], -self.alpha_g])\n\n self._propagate_A()", "def ReferenceElement(N):\n\n # Legendre polynomials are Jacobi(alpha,beta) polynomials\n alpha=0.; beta=0;\n\n # Gauss-Lobatto quadrature points for Legendre polynomials\n # According to Hesthaven+Warburton (p. 448, JacobGL.m) these\n # are the order N-2 Gauss quadrature points of Jacobi polynomials\n # with different alpha, beta, enlarged by end-points -1, +1.\n #\n # note len(r)=N, i.e. matrices defined below (V, Vr, etc) are square\n if(N==1):\n r=np.array([-1.,1.])\n else:\n # (N-2)-th order quadrature points are roots of (N-1)-st Jacobi polynomial\n inner_roots, inner_weights = scipy.special.roots_jacobi(N-1, alpha+1, beta+1)\n r = np.concatenate([ [-1.], inner_roots, [1.] ])\n\n # Vandermonde matrix for Legendre polynomials\n # V[i,j] = P_j(r_i), j=0,...,N, i=0,...,len(r)-1\n V = np.zeros( (len(r), N+1))\n for j in range(N+1):\n # scipy normalization determined by trial and error.\n # For **LAGRANGE POLY** ONLY, not general alpha, beta.\n # This makes the returned polynomials orthonormal\n normalization = np.sqrt((1.+2.*j)/2.)\n V[:,j] = scipy.special.eval_jacobi(j, alpha, beta, r)*normalization\n # or V[:,j] = scipy.special.legendre(j)(r)\n\n # check normalization\n # tmp_r, tmp_w = scipy.special.roots_jacobi(j+1, alpha, beta)\n # tmp_L=scipy.special.eval_jacobi(j, alpha, beta, tmp_r)*normalization\n # L_dot_L = sum(tmp_w*tmp_L*tmp_L)\n # print(\"j={}, (L,L)={}\".format(j, L_dot_L))\n\n\n Vinv=np.linalg.inv(V)\n\n # derivatives of Legendre polynomials, evaluated at quadrature points\n # Vr[i,j] = dP_j/dr(r_i), j=0,...,N, i=0,...,len(r)-1\n # use dP_j/dr = sqrt(j(j+1)) J^{alpha+1,beta+1}_{j-1} (H+W, Eq A2)\n #\n Vr = np.zeros( (len(r), N+1))\n for j in range(1,N+1):\n # scipy normalization determined by trial and error.\n # For **LAGRANGE POLY** ONLY, not general alpha, beta.\n # This makes the returned polynomials orthonormal, conforming\n # to H+W conventions\n scipy_normalization=np.sqrt((1.+2.*j)*(j+1.)/(8.*j))\n normed_J = scipy.special.jacobi(j-1, alpha+1, beta+1)(r)*scipy_normalization\n Vr[:,j] = np.sqrt(j*(j+alpha+beta+1.))*normed_J # H+W Eq. A2\n\n # - check normalization\n # - integrate by Legendre quadrature, to explicitly show weight-function in orthogonality\n # tmp_r, tmp_w = scipy.special.roots_jacobi(j+4, alpha, beta)\n # tmp_L=scipy.special.eval_jacobi(j-1, alpha+1, beta+1, tmp_r)*scipy_normalization\n # - evaluate orthogonality; note weight function (1-r)(1+r)\n # L_dot_L = sum(tmp_w*tmp_L*tmp_L*(1-tmp_r)*(1+tmp_r))\n # print(\"j={}, (L,L)={}\".format(j, L_dot_L))\n\n\n # derivatives of Lagrange interpolating polynomials\n # Dr(i,j) = dl_j/dr(r=r_i),\n # where l_j(r_i) = delta_{ij}\n # compute using P_j(r) = V[i,j]*l_i(r) => V[i,j] dl_i/dr = dP_j/dr (*)\n # => V^{-T} V^T[j,i] dl_i/dr = V^{-T} dP_j/dr\n Dr = np.matmul(Vr,Vinv)\n\n # inverse of mass-matrix\n # Using (*), one can show M = (V V^T)^(-1)\n # Turns out that the inverse of M is used in the DG algorithm,\n # and so we can directly compute M-inverse, without computing\n # matrix-inverses:\n Minv = np.matmul(V, V.transpose())\n\n # finally, M^{-1}S = Dr, and since we need S only multiplied by M^{-1},\n # we can just return Dr\n MinvS=Dr\n\n return r, Minv, MinvS", "def nextGeneration(self):\n new_word = []\n for i in range(0,len(self.word)):\n mod = self.word[i]\n left_context = findLeftContext(self.word, i, self.ignore)\n right_context = findRightContext(self.word, i, self.ignore)\n foundOne = False\n for rule in self.productionRules: #find an applicable rule\n if rule.isApplicable(left_context, mod, right_context):\n new_word = new_word + rule.getReplacement(left_context, mod, right_context,self.definitions)\n foundOne = True\n break\n if not foundOne: #then no replacement will occur\n new_word = new_word + [mod]\n self.word = new_word \n return(self.word)", "def _compute_nmig(mus_train, ys_train, active):\n print(\"start nmig\")\n score_dict = {}\n discretized_mus = utils.make_discretizer(mus_train)\n m = utils.discrete_mutual_info(discretized_mus, ys_train)\n # m shape: (10, nr_ground_truth)\n print(\"finished discretizing\")\n assert m.shape[0] == mus_train.shape[0]\n assert m.shape[1] == ys_train.shape[0]\n entropy = utils.discrete_entropy(ys_train)\n if active is not None:\n assert len(active) <= ys_train.shape[0]\n m = m[:, active]\n entropy = entropy[active]\n nr_lt = m.shape[0]\n nr_gt = m.shape[1]\n # m is [num_latents, num_factors]\n\n sorted_m = np.sort(m, axis=0)[::-1]\n individual_mig = np.divide(sorted_m[0, :] - sorted_m[1, :], entropy[:])\n print(\"ind mig\", individual_mig)\n mig = np.mean(individual_mig)\n\n if nr_gt == 1:\n nmig = np.max(np.divide(m, entropy[:]))\n else:\n m = np.divide(m, entropy[:])\n partials = np.zeros((nr_gt))\n best_ids = np.argmax(m, axis=0)\n for i in range(nr_gt):\n mask = np.ones((nr_gt), dtype=np.bool)\n mask[i] = 0\n best_id = best_ids[i]\n partials[i] = m[best_id, i] - np.max(m[best_id, mask])\n nmig = np.mean(partials)\n print(\"ind nmig\", partials)\n score_dict[\"discrete_mig\"] = mig\n score_dict[\"discrete_nmig\"] = nmig\n\n return score_dict", "def makeNewickList(distancesWithNames):\n i = 0\n oldDistance = 0\n while len(distancesWithNames) > 1:\n smallestindex = findSmallest(distancesWithNames)\n distancesWithNames, oldDistance = newMatrixWithSmallest(distancesWithNames, smallestindex, beforeDistance=oldDistance)\n i+=1\n retString = \"(\" + distancesWithNames[0][0] + \",\" + distancesWithNames[0][1] + \");\"\n return retString", "def solution(self):\n return [(\"the\", 1561900)] * 100", "def Forward(Fin, z, sizenew, Nnew ):\n if z <= 0:\n raise ValueError('Forward does not support z<=0')\n Fout = Field.begin(sizenew, Fin.lam, Nnew, Fin._dtype)\n \n field_in = Fin.field\n field_out = Fout.field\n \n field_out[:,:] = 0.0 #default is ones, clear\n \n old_size = Fin.siz\n old_n = Fin.N\n new_size = sizenew #renaming to match cpp code\n new_n = Nnew\n\n on2 = int(old_n/2)\n nn2 = int(new_n/2) #read \"new n over 2\"\n dx_new = new_size/(new_n-1)\n dx_old = old_size/(old_n-1)\n #TODO again, dx seems better defined without -1, check this\n \n R22 = _np.sqrt(1/(2*Fin.lam*z))\n\n X_new = _np.arange(-nn2, new_n-nn2) * dx_new\n Y_new = X_new #same\n X_old = _np.arange(-on2, old_n-on2) * dx_old\n Y_old = X_old #same\n for i_new in range(new_n):\n x_new = X_new[i_new]\n \n P1 = R22*(2*(X_old-x_new)+dx_old)\n P3 = R22*(2*(X_old-x_new)-dx_old)\n Fs1, Fc1 = _fresnel(P1)\n Fs3, Fc3 = _fresnel(P3)\n for j_new in range(new_n):\n y_new = Y_new[j_new]\n \n P2 = R22*(2*(Y_old-y_new)-dx_old)\n P4 = R22*(2*(Y_old-y_new)+dx_old)\n Fs2, Fc2 = _fresnel(P2)\n Fs4, Fc4 = _fresnel(P4)\n \n C4C1=_np.outer(Fc4, Fc1) #out[i, j] = a[i] * b[j] \n C2S3=_np.outer(Fc2, Fs3) #-> out[j,i] = a[j]*b[i] here\n C4S1=_np.outer(Fc4, Fs1)\n S4C1=_np.outer(Fs4, Fc1)\n S2C3=_np.outer(Fs2, Fc3)\n C2S1=_np.outer(Fc2, Fs1)\n S4C3=_np.outer(Fs4, Fc3)\n S2C1=_np.outer(Fs2, Fc1)\n C4S3=_np.outer(Fc4, Fs3)\n S2S3=_np.outer(Fs2, Fs3)\n S2S1=_np.outer(Fs2, Fs1)\n C2C3=_np.outer(Fc2, Fc3)\n S4S1=_np.outer(Fs4, Fs1)\n C4C3=_np.outer(Fc4, Fc3)\n C4C1=_np.outer(Fc4, Fc1)\n S4S3=_np.outer(Fs4, Fs3)\n C2C1=_np.outer(Fc2, Fc1)\n \n Fr = 0.5 * field_in.real\n Fi = 0.5 * field_in.imag\n Temp_c = (Fr * (C2S3 + C4S1 + S4C1 + S2C3\n - C2S1 - S4C3 - S2C1 - C4S3)\n + Fi * (-S2S3 + S2S1 + C2C3 - S4S1\n - C4C3 + C4C1 + S4S3 - C2C1)\n + 1j * Fr *(-C4C1 + S2S3 + C4C3 - S4S3\n + C2C1 - S2S1 + S4S1 - C2C3)\n + 1j * Fi*(C2S3 + S2C3 + C4S1 + S4C1\n - C4S3 - S4C3 - C2S1 - S2C1))\n field_out[j_new, i_new] = Temp_c.sum() #complex elementwise sum\n Fout._IsGauss=False\n return Fout", "def lap_mat(self):", "def letters_to_numbers():\n # Let's define our first FST\n f1 = FST('soundex-generate')\n # Indicate that '1' is the initial state\n f1.add_state('start')\n f1.add_state('1')\n f1.add_state('2')\n f1.add_state('3')\n f1.add_state('4')\n f1.add_state('5')\n f1.add_state('6')\n f1.add_state('7')\n f1.add_state('next')\n f1.initial_state = 'start'\n # Set all the final states\n f1.set_final('7')\n #setting the rules\n non_in = ['a', 'e', 'i', 'o', 'u','h','w','y','A','E','I','O','U','H','W','Y']\n rep1 =['b','f','p','v','B','F','P','V']\n rep2 =['c','g','j','k','q','s','x','z','C','G','J','K','Q','S','X','Z']\n rep3 =['d','t','D','T']\n rep4 =['l','L']\n rep5 =['m','n','M','N']\n rep6 =['r','R']\n \n # Add the rest of the arcs\n for letter in string.ascii_letters:\n if letter in non_in:\n f1.add_arc('start','next',(letter),(letter))\n if letter in rep1:\n f1.add_arc('start','1',(letter),(letter))\n if letter in rep2 :\n f1.add_arc('start','2',(letter),(letter))\n if letter in rep3:\n f1.add_arc('start','3',(letter),(letter))\n if letter in rep4:\n f1.add_arc('start','4',(letter),(letter))\n if letter in rep5:\n f1.add_arc('start','5',(letter),(letter))\n if letter in rep6:\n f1.add_arc('start','6',(letter),(letter))\n \n \n for letter in string.ascii_letters:\n if letter in non_in:\n f1.add_arc('next','next',(letter),())\n if letter in rep1:\n f1.add_arc('next','1',(letter),('1'))\n if letter in rep2 :\n f1.add_arc('next','2',(letter),('2'))\n if letter in rep3:\n f1.add_arc('next','3',(letter),('3'))\n if letter in rep4:\n f1.add_arc('next','4',(letter),('4'))\n if letter in rep5:\n f1.add_arc('next','5',(letter),('5'))\n if letter in rep6:\n f1.add_arc('next','6',(letter),('6'))\n\n f1.add_arc('next','7',(),())\n \n for letter in string.ascii_letters:\n if letter in non_in:\n f1.add_arc('1','next',(letter),())\n if letter in rep1:\n f1.add_arc('1','1',(letter),())\n if letter in rep2 :\n f1.add_arc('1','2',(letter),('2'))\n if letter in rep3:\n f1.add_arc('1','3',(letter),('3'))\n if letter in rep4:\n f1.add_arc('1','4',(letter),('4'))\n if letter in rep5:\n f1.add_arc('1','5',(letter),('5'))\n if letter in rep6:\n f1.add_arc('1','6',(letter),('6'))\n f1.add_arc('1','7',(),())\n \n for letter in string.ascii_letters:\n if letter in non_in:\n f1.add_arc('2','next',(letter),())\n if letter in rep1:\n f1.add_arc('2','1',(letter),('1'))\n if letter in rep2 :\n f1.add_arc('2','2',(letter),())\n if letter in rep3:\n f1.add_arc('2','3',(letter),('3'))\n if letter in rep4:\n f1.add_arc('2','4',(letter),('4'))\n if letter in rep5:\n f1.add_arc('2','5',(letter),('5'))\n if letter in rep6:\n f1.add_arc('2','6',(letter),('6'))\n\n f1.add_arc('2','7',(),())\n \n for letter in string.ascii_letters:\n if letter in non_in:\n f1.add_arc('3','next',(letter),())\n if letter in rep1:\n f1.add_arc('3','1',(letter),('1'))\n if letter in rep2 :\n f1.add_arc('3','2',(letter),('2'))\n if letter in rep3:\n f1.add_arc('3','3',(letter),())\n if letter in rep4:\n f1.add_arc('3','4',(letter),('4'))\n if letter in rep5:\n f1.add_arc('3','5',(letter),('5'))\n if letter in rep6:\n f1.add_arc('3','6',(letter),('6')) \n f1.add_arc('3','7',(),())\n \n for letter in string.ascii_letters:\n if letter in non_in:\n f1.add_arc('4','next',(letter),())\n if letter in rep1:\n f1.add_arc('4','1',(letter),('1'))\n if letter in rep2 :\n f1.add_arc('4','2',(letter),('2'))\n if letter in rep3:\n f1.add_arc('4','3',(letter),(''))\n if letter in rep4:\n f1.add_arc('4','4',(letter),())\n if letter in rep5:\n f1.add_arc('4','5',(letter),('5'))\n if letter in rep6:\n f1.add_arc('4','6',(letter),('6')) \n f1.add_arc('4','7',(),())\n \n for letter in string.ascii_letters:\n if letter in non_in:\n f1.add_arc('5','next',(letter),())\n if letter in rep1:\n f1.add_arc('5','1',(letter),('1'))\n if letter in rep2 :\n f1.add_arc('5','2',(letter),('2'))\n if letter in rep3:\n f1.add_arc('5','3',(letter),(''))\n if letter in rep4:\n f1.add_arc('5','4',(letter),('4'))\n if letter in rep5:\n f1.add_arc('5','5',(letter),())\n if letter in rep6:\n f1.add_arc('5','6',(letter),('6')) \n f1.add_arc('5','7',(),())\n \n for letter in string.ascii_letters:\n if letter in non_in:\n f1.add_arc('6','next',(letter),())\n if letter in rep1:\n f1.add_arc('6','1',(letter),('1'))\n if letter in rep2 :\n f1.add_arc('6','2',(letter),('2'))\n if letter in rep3:\n f1.add_arc('6','3',(letter),(''))\n if letter in rep4:\n f1.add_arc('6','4',(letter),('4'))\n if letter in rep5:\n f1.add_arc('6','5',(letter),('5'))\n if letter in rep6:\n f1.add_arc('6','6',(letter),()) \n f1.add_arc('6','7',(),())\n \n return f1", "def short_vector(self, splitted_words, max_array_size=26):# iterate in another way\r\n self.coder=\"An_real_word_vector_algorith_that_uses_the_letter_position_in_a_dictionary\"\r\n storage=[]#create an array storage\r\n not_found=[]#store the unkown words\r\n array=self.create_new_array(shape=(max_array_size,1),replace=True)# build an new arry and fill it with -1s then 0s\r\n array_position=0#set the positon in the array equal to 0; used to move around the array\r\n if len(self.special_characters_dic)>0:# checks if there are items in these dictionary\r\n for i in splitted_words:# iterate thorugh the list of splitted words\r\n for word in i: # iterate through the list of letters\r\n if word in self.general_lower_word_list:# now figure out if the letter is in that dictionary\r\n array[array_position]=self.general_lower_word_list[word]# and replace these with its number at the given array positon\r\n array_position+=1#add one to the positon to move to the next row in the array\r\n elif word in self.general_upper_word_list:\r\n array[array_position]=self.upper_word_dic[word]\r\n array_position+=1\r\n elif word in self.special_letters_dic:\r\n array[array_position]=self.special_letters_dic[word]\r\n array_position+=1\r\n elif word in self.general_numbers_dic:\r\n array[array_position]=self.general_numbers_dic[word]\r\n array_position+=1\r\n elif word in self.special_characters_dic:\r\n array[array_position]=self.special_characters_dic[word]\r\n array_position+=1\r\n else:\r\n print(\"This word is not know by the list: \"+word)\r\n not_found.append(word)\r\n storage.append(array)\r\n array_position=0\r\n array=self.create_new_array(shape=(max_array_size,1),replace=True)\r\n else:\r\n for i in splitted_words:\r\n for word in i:\r\n if word in self.general_lower_word_list:\r\n array[array_position]=self.general_lower_word_list[word]\r\n array_position+=1\r\n #print(array)\r\n elif word in self.upper_word_dic:\r\n array[array_position]=self.upper_word_dic[word]\r\n array_position+=1\r\n #print(array)\r\n elif word in self.special_letters_dic:\r\n array[array_position]=self.special_letters_dic[word]\r\n array_position+=1\r\n #print(array)\r\n elif word in self.general_numbers_dic:\r\n array[array_position]=self.general_numbers_dic[word]\r\n array_position+=1\r\n #print(array)\r\n else:\r\n print(\"This word is not know by the list: \"+word)\r\n not_found.append(word)\r\n pass\r\n storage.append(array)\r\n array_position=0\r\n array=self.create_new_array(shape=(max_array_size,1),replace=True)\r\n return storage,not_found", "def beta_gen_mnt(p):\n return np.array([-1.0]*int(0.7*p) + [1.0]*(p-int(0.7*p)))", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def Ngal(self, m):\n return self.Ncen(m) + self.Nsat(m)", "def lemma(self) -> str:", "def Extended_Lesk(word1,word2):\n\n #Creates a list of the word, and one layer of hyponyms\n list1 = [word1]\n for i in word1.hyponyms():\n list1.append(i)\n list2 = [word2]\n for i in word2.hyponyms():\n list2.append(i)\n\n #Creates a list of each of the above words' definitions, tokenized\n words1 = []\n words2 = []\n for i in list1:\n words1.append([l for l in word_tokenize(i.definition())])\n for i in list2:\n words2.append([l for l in word_tokenize(i.definition())])\n\n #Calculates the Maximum length of the Longest Definition\n lengths = []\n lengths.extend(len(l) for l in words1)\n lengths.extend(len(l) for l in words2)\n maxim = max(lengths)\n\n igramcount = []\n igram1 = []\n igram2 = []\n\n # Creates N-grams for each definition for each N, from 1:max(lengths)\n for i in range(int(maxim)):\n for g in words1:\n for l in ngrams(g, i+1):\n igram1.append(l)\n for f in words2:\n for m in ngrams(f, i+1):\n igram2.append(m)\n\n #For Each N-gram in the first set, which matches that of the Second set,\n # Denoting a form of \"Similarity\" between the two definitions,\n # Record the Value of N into a new List, igramcount.\n for x in set(igram1):\n if x in set(igram2):\n igramcount.append(i + 1)\n\n igram1 = []\n igram2 = []\n\n #Square the values of igramcount, and return the sum as the value of Extended Lesk.\n squared = [number**2 for number in igramcount]\n return sum(squared)", "def Gd():\n Pz=[8]\n Pp=[1,1]\n return Pz, Pp", "def nw(n):\n return 4*n*n + 1", "def part2a_0():\n xs = exampleInput\n phi = Counter({('-BEGIN-', '-FEAT-'): 1.0, ('-FEAT-', 'Beautiful'): 1.0, ('-FEAT-', 'PREV:-BEGIN-'): 1.0, ('-FEAT-', 'NEXT:2'): 1.0, ('-FEAT-', '-CAPITALIZED-'): 1.0, ('-FEAT-', '-POST-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(0, '-BEGIN-', '-FEAT-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )\n\n phi = Counter({('-FEAT-', '-SIZE-'): 1.0, ('-SIZE-', 'PREV:Beautiful'): 1.0, ('-SIZE-', 'NEXT:bedroom'): 1.0, ('-SIZE-', '-PRE-CAPITALIZED-'): 1.0, ('-SIZE-', '2'): 1.0, ('-SIZE-', '-POST-CAPITALIZED-'): 0.0, ('-SIZE-', '-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(1, '-FEAT-', '-SIZE-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )\n \n phi = Counter({('-SIZE-', '-SIZE-'): 1.0, ('-SIZE-', 'PREV:2'): 1.0, ('-SIZE-', 'bedroom'): 1.0, ('-SIZE-', 'NEXT:-END-'): 1.0, ('-SIZE-', '-CAPITALIZED-'): 0.0, ('-SIZE-', '-PRE-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(2, '-SIZE-', '-SIZE-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )", "def featureLikelihood():\r\n\r\n\t# Lists\r\n\twords = []\r\n\tfinalWords = []\r\n\tposWords = []\r\n\tnegWords = []\r\n\tfeatureListPos = []\r\n\tfeatureListNeg = []\r\n\r\n\t# Counters\r\n\tposCount = 0.0\r\n\tnegCount = 0.0\r\n\r\n\t# Temporary Lists for formating\r\n\tfeatureListPosFormat = []\r\n\tfeatureListNegFormat = []\r\n\r\n\t# Strings\r\n\ts = \" \"\r\n\tposString = \"\"\r\n\tnegString = \"\"\r\n\r\n\tseen = set()\r\n\r\n\t# Add all words to words list and count positive & negative occurences\r\n\tfor item in trainingData:\r\n\t\tfor word in item[2]:\r\n\t\t\twords.append(word)\r\n\t\tif item[1] == '0':\r\n\t\t\tfor word in item[2]:\r\n\t\t\t\tposWords.append(word)\r\n\t\t\t\tposCount += 1\r\n\t\tif item[1] == '1':\r\n\t\t\tfor word in item[2]:\r\n\t\t\t\tnegWords.append(word)\r\n\t\t\t\tnegCount +=1\r\n\r\n\t# Adds all values into finalWords, skipping duplicates\r\n\tfor values in words:\r\n\t\tif values not in seen:\r\n\t\t\tfinalWords.append(values)\r\n\t\t\tseen.add(values)\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t# Add positive and negative counts to feature list and dictionaries\r\n\tfor word in finalWords:\r\n\t\ts += '{:12s}'.format(word)\r\n\t\t\r\n\t\tpCount = 0\r\n\t\tnCount = 0\r\n\t\t\r\n\t\tfor row in trainingData:\r\n\t\t\tif row[1] == '0':\r\n\t\t\t\tif word in row[2]: pCount += 1\r\n\t\t\tif row[1] == '1':\r\n\t\t\t\tif word in row[2]: nCount += 1\r\n\t\t\t\t\r\n\t\tfeatureListPos.append((pCount + 1) / (posCount + 9))\r\n\t\tclass0Dict[word] = ((pCount + 1) / (posCount + 9))\r\n\t\t\r\n\t\tfeatureListNeg.append((nCount + 1) / (negCount + 9))\r\n\t\tclass1Dict[word] = ((nCount + 1) / (negCount + 9))\r\n\r\n\t\t\r\n\t\t\r\n\t# Formatting for the positive feature list\r\n\tfor item in featureListPos:\r\n\t\tfeatureListPosFormat.append('{0:.5f}'.format(item))\r\n\t\t\r\n\tfor item in featureListPosFormat:\r\n\t\tposString += '{:12s}'.format(item)\r\n\r\n\t# Formatting for the negative feature list\r\n\tfor item in featureListNeg:\r\n\t\tfeatureListNegFormat.append('{0:.5f}'.format(item))\r\n\t\t\r\n\tfor item in featureListNegFormat:\r\n\t\tnegString += '{:12s}'.format(item)\r\n\r\n\r\n\t\t\r\n\treturn(s, posString, negString)", "def solution(self) -> State:", "def solution(self):\n return [(\"the\", 1579644)] * 100", "def init_needleman_wunsch_matrix(self):\r\n empty_matrix = self.empty_matrix() # Building on the previous definition, this will give you an empty matrix\r\n for i in range(len(self.s2)+1):\r\n for j in range(len(self.s1)+1):\r\n empty_matrix[0][i] = -i\r\n empty_matrix[j][0] = -j\r\n return empty_matrix", "def rb_nfw(m200,c,z):\n\n #Setting up cosmology\n rho0=1.4876862e+11;\n omegam=0.238000;\n msun=1.98892e+33;\n delta_vir=200.;\n G=6.6730003e-08;\n kmpsToCmps = 1.0*10.**(5.);\n Rvir=200.;\n kpc2cm=3.086*10.**(21);\n \n deltac = (delta_vir/3.)*( (c**3.)/( np.log(1.+c) - (c / (1.+c))));\n rho_crit =rho0*omegam*(1.+z)**3.;\n r200 =(m200/delta_vir / rho_crit / (4.*np.pi/3.) )**0.33333 * 1000. ;\n v200 = ((6.67e-8 * m200 * msun / (r200* 3.086*10.**(21.)) )**0.5)/1e5 ;\n \n r =np.linspace(1.,3.*r200,500); # kpc\n rs = r200 / c; \n ss=(((r/rs)*(1.+(r/rs))**2.)*1000.**3);\n rho = (rho_crit * deltac)/(ss); \n M_r = 4.*np.pi* integrate.cumtrapz((r**2)*rho, r,initial=0.)\n \n x = r/r200 ;\n tab=1./x*(np.log(1.+c*x)-c*x/(1.+c*x))/(np.log(1.+c)-c/(1.+c));\n vcirc = v200*(tab)**0.5 ;\n maxvcirc = np.max(vcirc) ;\n q=np.where((vcirc == np.max(vcirc)));\n maxvcircr = r[q];\n \n \n # Now compute V_Esc as per nfw.pro Binney & Tremaine equation 2.31\n Phi_new = r * 0.0;\n vesc = r * 0.0 ;\n for ir in range(2,len(r)-4):\n term1 = (np.trapz(rho[0:ir]*(r[0:ir]**2.),x=r[0:ir])/(r[ir]))* msun; \n term2 = np.trapz(rho[ir:len(r)]*r[ir:len(r)],x=r[ir:len(r)])*msun; \n Phi_new[ir] = -4. *np.pi*6.67e-8*(term1 + term2)/3.086e21 ;\n vesc[ir] = ((2. * np.abs(Phi_new[ir]))**0.5) / 1e5 ; # See Binney & Tremaine (2-22) \n \n\n # Chage Units to do velocity dispersion calculations\n rcm=r*kpc2cm;\n\n #M_r in gram\n M_r_gram=M_r*msun;\n\n Phi=G*integrate.cumtrapz((M_r_gram/rcm**(2)),rcm,initial=0);\n \n Phi=Phi*(1./((1e5)**2.));#%km^2/s^2\n Phi_out=np.max(Phi);\n\n k=0.41;\n a=0.29;\n\n sig = np.sqrt(a *(( Phi/Phi_out)**(k))*(Phi_out -Phi));\n \n nfw={}\n qqqt=np.where((vesc==0.))\n vesc[qqqt]=1e-99\n\n nfw[\"m200\"]=m200;\n nfw[\"c\"]=c;\n nfw[\"r200\"]=r200;\n nfw[\"v200\"]=v200;\n nfw[\"maxvcirc\"]=maxvcirc;\n nfw[\"maxvcircr\"]=maxvcircr;\n nfw[\"r\"]=r;\n nfw[\"rho\"]=rho;\n nfw[\"vcirc\"]=vcirc;\n nfw[\"M_r\"]=M_r;\n nfw[\"sig_v\"]=sig;\n nfw[\"vesc\"]=vesc;\n \n return nfw", "def pos_to_name(reg):\n l,b = position_region(reg).galactic() \n if numpy.sign(b) == 1:\n pm = \"+\"\n else:\n pm = \"-\"\n text = \"G%4.2f%1s%4.2f\" % (l,pm,abs(b))\n return text", "def pulp_smash():", "def get_sol(self):", "def regnUtVinner(runder, vinnendeSekvenser):\n x = False #False = Xena, True = Ophelia\n antallXenaVinner = 0\n antallOpheliaVinner = 0\n uavgjort = 0\n for runde in runder:\n personX, personO = delOppRunde(runde)\n if sjekkVinnerRunde(personX, vinnendeSekvenser):\n if not x:\n antallXenaVinner += 1 \n else:\n antallOpheliaVinner += 1\n x = not x\n uavgjort = 0 \n elif sjekkVinnerRunde(personO, vinnendeSekvenser):\n if not x:\n antallOpheliaVinner += 1\n else:\n antallXenaVinner += 1 \n uavgjort = 0 \n else:\n uavgjort += 1\n if uavgjort == 3:\n x = not x\n uavgjort = 0\n return max([antallXenaVinner, antallOpheliaVinner])", "def __init__(self):\n super(GELU, self).__init__()", "def silverman(n: int, ess: float) -> float:\n\n return (ess * (n + 2) / 4) ** (-1 / (n + 4))", "def retr_metr(gdat, indxvaluthis=None, strgvarbthis=None):\n\n metr = np.zeros((gdat.numbepoc, 2, 3 )) - 1\n\n loss = np.empty(gdat.numbepoc)\n numbepocchec = 5\n \n print gdat.modl.summary()\n for y in gdat.indxepoc:\n print 'Training epoch %d...' % y\n histinpt = gdat.inpttran[:, :, None]\n hist = gdat.modl.fit(histinpt, gdat.outptran, epochs=1, batch_size=gdat.numbdatabtch, verbose=1)\n loss[y] = hist.history['loss'][0]\n indxepocloww = max(0, y - numbepocchec)\n \n for layr in gdat.modl.layers:\n func = keras.backend.function([gdat.modl.input, keras.backend.learning_phase()], [layr.output])\n \n listweigbias = layr.get_weights()\n #assert len(listweigbias) == 2\n print 'listweigbias'\n for n in range(len(listweigbias)):\n print 'n'\n print n\n print 'listweigbias[n]'\n summgene(listweigbias[n])\n stat = func([histinpt, 1.])\n print 'type(stat)'\n print type(stat)\n print 'len(stat)'\n print len(stat)\n for n in range(len(stat)):\n print 'stat[n]'\n summgene(stat[n])\n print\n print\n\n\n if y == gdat.numbepoc - 1 and 100. * (loss[indxepocloww] - loss[y]):\n print 'Warning! The optimizer may not have converged.'\n print 'loss[indxepocloww]\\n', loss[indxepocloww], '\\nloss[y]\\n', loss[y], '\\nloss\\n', loss\n\n for r in gdat.indxrtyp:\n if r == 0:\n inpt = gdat.inpttran\n outp = gdat.outptran\n numdatatemp = gdat.numbdatatran\n else:\n inpt = gdat.inpttest\n outp = gdat.outptest\n numbdatatemp = gdat.numbdatatest\n inpt = inpt[:, :, None]\n \n outppredsigm = gdat.modl.predict(inpt)\n outppred = (outppredsigm > 0.5).astype(int)\n matrconf = confusion_matrix(outp, outppred)\n if matrconf.size == 1:\n matrconftemp = np.copy(matrconf)\n matrconf = np.empty((2, 2))\n matrconf[0, 0] = matrconftemp\n trne = matrconf[0, 0]\n flpo = matrconf[0, 1]\n flne = matrconf[1, 0]\n trpo = matrconf[1, 1]\n \n if float(trpo + flpo) > 0:\n metr[y, r, 0] = trpo / float(trpo + flpo) # precision\n else:\n pass\n #print ('No positive found...')\n #raise Exception('')\n metr[y, r, 1] = float(trpo + trne) / (trpo + flpo + trne + flne) # accuracy\n if float(trpo + flne) > 0:\n metr[y, r, 2] = trpo / float(trpo + flne) # recall\n else:\n print 'No relevant sample!'\n #raise Exception('')\n \n print 'metr[y, r, :]'\n print metr[y, r, :]\n print \n return metr" ]
[ "0.56081337", "0.5455168", "0.54125476", "0.538638", "0.53829026", "0.5340331", "0.5284942", "0.52760315", "0.5272306", "0.5256002", "0.5245553", "0.5241749", "0.52402973", "0.5214805", "0.5193872", "0.51484257", "0.5143233", "0.51325196", "0.5114381", "0.51098406", "0.5093134", "0.50657725", "0.50648636", "0.5058772", "0.5045587", "0.50416195", "0.5032225", "0.5013828", "0.5005608", "0.5001219", "0.5001205", "0.50003713", "0.49993694", "0.4998752", "0.49974033", "0.49947387", "0.49912947", "0.49870363", "0.49728408", "0.497115", "0.4939359", "0.49375466", "0.49311274", "0.4912828", "0.49015749", "0.489809", "0.4889731", "0.48836127", "0.48794174", "0.48655593", "0.48599005", "0.48520246", "0.48519546", "0.48504436", "0.48491213", "0.48491213", "0.48469216", "0.48466843", "0.484444", "0.4839921", "0.48393056", "0.48386928", "0.48333398", "0.48311898", "0.48201433", "0.48070908", "0.48044276", "0.48020107", "0.4801646", "0.48008484", "0.47936046", "0.47889018", "0.47833478", "0.47812116", "0.4772041", "0.4770697", "0.47677964", "0.47669354", "0.47668228", "0.47633603", "0.47605625", "0.4751773", "0.4749454", "0.47490326", "0.47466528", "0.47449872", "0.47441748", "0.474267", "0.47401485", "0.47400522", "0.47377825", "0.47317886", "0.47310802", "0.47305", "0.47270283", "0.47258452", "0.47248366", "0.47238156", "0.47229987", "0.47224024", "0.47213537" ]
0.0
-1
This is my implementation of modularity using the original GirvanNewman formulation.
def get_modularity_other_b(A, cluster_indices): # define the number of nodes in the graph and the number of clusters n = len(cluster_indices) nclusters = max(cluster_indices) + 1 girvan_e = np.zeros((nclusters, nclusters)) volume = 0 for i in range(n): for j in range(n): if i < j: weight = A[i][j] volume += weight a = cluster_indices[i] b = cluster_indices[j] if a == b: girvan_e[a][a] += weight else: girvan_e[a][b] += weight girvan_e[b][a] += weight for a in range(nclusters): for b in range(nclusters): girvan_e[a][b] /= volume girvan_a = [sum(girvan_e[i]) for i in range(nclusters)] modularity = sum(girvan_e[i][i] - girvan_a[i]**2 for i in range(nclusters)) return modularity
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n n = 34\n # create the adjacency matrix\n stripped_lines = Util.get_stripped_lines(g_karate_data.splitlines())\n string_rows = [line.split() for line in stripped_lines if line]\n assert len(string_rows) == n\n for row in string_rows:\n assert len(row) == n\n data_rows = [[float(x) for x in string_row] for string_row in string_rows]\n A = np.array(data_rows)\n # create the ordered module indices\n first_cluster_one_based_indices = [1, 3, 4, 14, 2, 8, 20, 18, 22, 13, 12, 6, 7, 17, 5, 11]\n second_cluster_one_based_indices = [25, 32, 26, 29, 24, 28, 9, 34, 33, 19, 16, 31, 15, 10, 23, 30, 21, 27]\n assert len(first_cluster_one_based_indices + second_cluster_one_based_indices) == n\n assert list(sorted(first_cluster_one_based_indices + second_cluster_one_based_indices)) == range(1, n+1)\n ordered_module_indices = []\n for i in range(n):\n if i+1 in first_cluster_one_based_indices:\n ordered_module_indices.append(0)\n else:\n ordered_module_indices.append(1)\n # print the modularity\n Q = get_modularity_other_b(A, ordered_module_indices)\n print 'modularity calculated using my interpretation of the method of the paper', Q\n Q = get_modularity_other_b2(A, ordered_module_indices)\n print 'modularity calculated using a modification of my interpretation of the method of the paper', Q\n Q = get_modularity_other_c(A, ordered_module_indices)\n print 'modularity calculated using the method on wikipedia', Q\n Q = get_eric_modularity(A, ordered_module_indices)\n print 'modularity calculated using the method eric used:', Q\n print 'expected modularity: .375 +/- .025'", "def neural_modularity_calculator(graph, embedding, means):\n assignments = {}\n for node in graph.nodes():\n positions = means-embedding[node, :]\n values = np.sum(np.square(positions), axis=1)\n index = np.argmin(values)\n assignments[int(node)] = int(index)\n modularity = community.modularity(assignments, graph)\n return modularity, assignments", "def compute_modularity(G):\n # convert to undirected graph if necessary\n if isinstance(G, nx.DiGraph):\n G = G.to_undirected(reciprocal=True)\n\n # extract communities\n community_detection = community.greedy_modularity_communities(G)\n # calculate modularity with those communities\n modularity = community.modularity(G, community_detection)\n return modularity", "def gen_mod(affinities, labels):\n\n for aff in affinities:\n yield modularity.get_modularity(aff, labels).sum()", "def classical_modularity_calculator(graph, embedding, cluster_number=20):\n kmeans = KMeans(n_clusters=cluster_number, random_state=0, n_init=1).fit(embedding)\n assignments = {i: int(kmeans.labels_[i]) for i in range(0, embedding.shape[0])}\n modularity = community.modularity(assignments, graph)\n return modularity, assignments", "def calculate_modularity(modules, degree_table, edges, num_of_edges):\n modularity = 0.0\n for module in modules:\n modularity += calculate_q(module, degree_table, edges, num_of_edges)\n\n return modularity / (2.0 * num_of_edges)", "def calc_pos_mod(nmodule):\n pass", "def modularity():\n\n q = 0.0\n for idx in range(0, node_count):\n if _tot[idx] > 0.0:\n q += (_in[idx] / m - math.pow(_tot[idx] / m, 2))\n return q", "def modules():", "def modularity(G, partition):\n m = G.size(weight=\"weight\")\n degrees = dict(G.degree(weight=\"weight\"))\n Q = 0\n for community in partition:\n for u, v in product(community, repeat=2):\n try:\n w = G[u][v].get(\"weight\", 1)\n except KeyError:\n w = 0\n if u == v:\n # Double count self-loop weight.\n w *= 2\n Q += w - degrees[u] * degrees[v] / (2 * m)\n return Q / (2 * m)", "def get_modularity2(adjacency, clusters):\n num_ids = adjacency.shape[0]\n id_to_cluster = get_id_to_cluster(clusters, num_ids)\n S = np.zeros(shape=(adjacency.shape[0], len(clusters))) # S[v,c]=1 iff v is in cluster c\n for id in range(adjacency.shape[0]):\n cluster_id = id_to_cluster[id]\n S[id, cluster_id] = 1\n total_weight = np.sum(adjacency)\n degrees = np.sum(adjacency, axis=1)\n\n C = np.outer(degrees, degrees)\n C = C / total_weight # C[v,w] = deg(v)*deg(w) / 2m\n B = adjacency - C\n M = np.dot(np.dot(S.T, B), S)\n return np.trace(M) / total_weight", "def test_mod():\r\n x, y = fscalars('xy')\r\n fn = gof.DualLinker().accept(\r\n gof.FunctionGraph([x, y], [x % y])).make_function()\r\n for a, b in ((0, 1), (1, 1), (0, -1), (1, -1), (-1, -1),\r\n (1, 2), (-1, 2), (1, -2), (-1, -2),\r\n (5, 3), (-5, 3), (5, -3), (-5, -3)\r\n ):\r\n assert fn(a, b) == a % b, (a,)", "def modularity_gain(n, c, dnodecomm):\n\n totc = _tot[c]\n degc = k[n]\n return dnodecomm - (totc * degc) / m", "def detect():\n\n _pass_done = 0\n _improve = True\n new_mod = modularity()\n cur_mod = -999999999.0\n rl = random.sample(range(0, node_count), node_count)\n while _improve & (_pass_done < max_pass) & (new_mod - cur_mod > min_mod):\n cur_mod = new_mod\n _improve = False\n _pass_done += 1\n for node_tmp in rl:\n n = node_tmp\n nc = bl[n]\n ncomm = neigh_comm(n)\n remove(n, nc, ncomm[nc])\n best_c = nc\n best_l = 0.0\n best_incre = 0.0\n for c in ncomm:\n incre = modularity_gain(n, c, ncomm[c])\n if incre > best_incre:\n best_incre = incre\n best_c = c\n best_l = ncomm[c]\n insert(n, best_c, best_l)\n if best_c != nc:\n _improve = True\n new_mod = modularity()\n print new_mod", "def fm_versions(fmagg, suffix = \"\"):\n dert = np.ones_like(z)[:,None]*truth['b1'] # dert === derivative true\n for k in range(2,model.K):\n dert = z[:, None] ** (k - 1) *truth['b'+str(k)]*k +dert\n adert = np.abs(dert)\n\n ederiv =deriv.mean(axis = 1 ) #expectation of derivative\n\n def process_figure_of_merit(kind='worst'):\n if kind=='worst': #looking for minimal true FoM\n xm = np.argmin(adert,axis=0)\n ym = np.argmin(np.abs(ederiv),axis = 0)\n else: #looking for the best FoM\n kind = ''\n xm = np.argmax(adert,axis=0)\n ym = np.argmax(np.abs(ederiv),axis = 0)\n M = len(xm)\n x = np.zeros(M)\n y = np.zeros((fmagg.shape[1],M))\n for m in range(M):\n x[m] = adert[xm[m],m]/truth['S'][m]\n y[:,m] = fmagg[ym[m],:,m]\n printout('Fm'+kind+''+suffix,y,f,x)\n plot_verus('Fm'+kind+''+suffix,y,x,suppress_aspect=True)\n plot_verus('Fm'+kind+''+suffix+' ',y,x,suppress_aspect=False)\n\n\n\n process_figure_of_merit('worst')\n process_figure_of_merit('')", "def find_module(V,m,adj,temperature):\r\n\r\n #compute the goal to approch a module as defined in the assigment\r\n f1 = objective(V, m, adj)\r\n \r\n #find a neighbor\r\n perm = new_permutation(V, m,adj)\r\n \r\n #compute the goal to approch a module as defined in the assigment\r\n f2 = objective(perm, m, adj)\r\n \r\n #compute the delta as describe in the annealing algorithm\r\n delta = f2-f1\r\n\r\n if delta < 0: #accept the new neihbor if we approch the goal\r\n \r\n return perm\r\n \r\n #to avoid to be block in a local minimum we accept wrong neighbours\r\n #with a certain probability\r\n \r\n else: \r\n #compute the probability of accepting a wrong neighbours\r\n p = exp(-delta/temperature)\r\n if uniform(0,100) < p*100:\r\n return perm\r\n \r\n return V", "def test_versioned_symbols(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n\n def forward(self, a, b, alpha: float):\n no_alpha = torch._test_serialization_subcmul(a, b)\n with_alpha = torch._test_serialization_subcmul(a, b, alpha)\n return no_alpha, with_alpha\n\n def historic_subcmul(a, b, alpha=2):\n return b - alpha * a\n\n def current_subcmul(a, b, alpha=1):\n return a - alpha * b\n\n # Loads and verifies the historic behavior of the module\n # that was serialized with version 2\n module_v2 = torch.jit.load(pytorch_test_dir + \"/jit/fixtures/_test_serialization_subcmul_v2.pt\")\n a = torch.randn((5,))\n b = torch.randn((5,))\n alpha = random.random()\n args = (a, b, alpha)\n no_alpha_v2, with_alpha_v2 = module_v2(*args)\n self.assertEqual(no_alpha_v2, historic_subcmul(a, b))\n self.assertEqual(with_alpha_v2, historic_subcmul(*args))\n\n # Scripts, saves, loads and verifies the current behavior of the module\n scripted_module = torch.jit.script(MyModule())\n buffer = io.BytesIO()\n torch.jit.save(scripted_module, buffer)\n buffer.seek(0)\n module_current = torch.jit.load(buffer)\n no_alpha_current, with_alpha_current = module_current(*args)\n self.assertEqual(no_alpha_current, current_subcmul(a, b))\n self.assertEqual(with_alpha_current, current_subcmul(*args))", "def _naive_greedy_modularity_communities(G):\r\n # First create one community for each node\r\n communities = list([frozenset([u]) for u in G.nodes()])\r\n # Track merges\r\n merges = []\r\n # Greedily merge communities until no improvement is possible\r\n old_modularity = None\r\n new_modularity = modularity(G, communities)\r\n while old_modularity is None or new_modularity > old_modularity:\r\n # Save modularity for comparison\r\n old_modularity = new_modularity\r\n # Find best pair to merge\r\n trial_communities = list(communities)\r\n to_merge = None\r\n for i, u in enumerate(communities):\r\n for j, v in enumerate(communities):\r\n # Skip i=j and empty communities\r\n if j <= i or len(u) == 0 or len(v) == 0:\r\n continue\r\n # Merge communities u and v\r\n trial_communities[j] = u | v\r\n trial_communities[i] = frozenset([])\r\n trial_modularity = modularity(G, trial_communities)\r\n if trial_modularity >= new_modularity:\r\n # Check if strictly better or tie\r\n if trial_modularity > new_modularity:\r\n # Found new best, save modularity and group indexes\r\n new_modularity = trial_modularity\r\n to_merge = (i, j, new_modularity - old_modularity)\r\n elif (\r\n to_merge and\r\n min(i, j) < min(to_merge[0], to_merge[1])\r\n ):\r\n # Break ties by choosing pair with lowest min id\r\n new_modularity = trial_modularity\r\n to_merge = (i, j, new_modularity - old_modularity)\r\n # Un-merge\r\n trial_communities[i] = u\r\n trial_communities[j] = v\r\n if to_merge is not None:\r\n # If the best merge improves modularity, use it\r\n merges.append(to_merge)\r\n i, j, dq = to_merge\r\n u, v = communities[i], communities[j]\r\n communities[j] = u | v\r\n communities[i] = frozenset([])\r\n # Remove empty communities and sort\r\n communities = [c for c in communities if len(c) > 0]\r\n for com in sorted(communities, key=lambda x: len(x), reverse=True):\r\n yield com", "def modinv(a, m):\n g, x, y = egcd(a, m)\n if g != 1:\n raise Exception('modular inverse does not exist')\n else:\n return x % m", "def mod(p):\n return (p[0]**2 + p[1]**2 + p[2]**2)**0.5", "def _himf(LATENTDIM, REG, EXPERIMENTNUM, gamma,\n nmfflag=None, lr=0.001, esflag=True):\n fn_hi = '../H3N2_HIdata/H3N2_integrated_/H3N2_HI_data_minority.csv'\n virusindex = readdata.readvirusindex(fn_hi)\n serumindex = readdata.readserumindex(fn_hi)\n ratings = np.load('ratings_minority.npy')\n\n\n \"\"\"\n Cache date check and get simtx from cache\n \"\"\"\n seq_date = os.stat(\"./realdata_minority.fa\").st_mtime\n simtx_date = os.stat(\"./simtx_minority.npy\").st_mtime\n if simtx_date <= seq_date:\n fsim = open(\"./realdata_minority.fa\")\n print(\"making simtx_minority.npy..\")\n simtx = simseq.simseq_parallel(virusindex, fsim)\n np.save(\"simtx_minority.npy\", simtx)\n else:\n simtx = np.load(\"simtx_minority.npy\")\n print(\"simtx_minority ready!\")\n\n\n # create train, validation and test sets.\n n = int(ratings.shape[0] * 0.8)\n train = ratings[:n]\n test = ratings[n:]\n v = int(train.shape[0] * 0.9)\n # split train to 1(validate) : 9(training)\n val = train[v:]\n train = train[:v]\n from rsvd import RSVD\n dims = (len(virusindex), len(serumindex))\n\n \"\"\"\n get the average score\n MF\n \"\"\"\n\n model = RSVD.train(LATENTDIM, train, dims, simtx,\n probeArray=val, esflag=esflag, maxEpochs=1000,\n learnRate=lr,\n regularization=REG,\n nmfflag=nmfflag,\n randomNoise=0.1,\n gamma=gamma)\n\n sqerr = 0.0\n\n reslist = []\n for strainID, serumID, rating in test:\n err = rating - model(strainID, serumID)\n reslist.append([rating, model(strainID, serumID)])\n sqerr += err * err\n sqerr /= test.shape[0]\n\n modelpath = \"./experiment{0}/model-ldim-{1}-reg-{2}\".format(\n EXPERIMENTNUM, LATENTDIM, REG)\n rmsepath = \"./experiment{0}/rmse-ldim-{1}-reg-{2}\".format(\n EXPERIMENTNUM, LATENTDIM, REG)\n if nmfflag:\n modelpath = modelpath + \"-nmf\"\n rmsepath = rmsepath + \"-nmf\"\n modelpath = modelpath + \"-gamma-{0}\".format(gamma)\n rmsepath = rmsepath + \"-gamma-{0}\".format(gamma)\n modelpath = modelpath + \"/\"\n\n if not os.path.exists(os.path.dirname(modelpath)):\n try:\n os.makedirs(os.path.dirname(modelpath))\n model.save(modelpath)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\n f = open(rmsepath, 'a+')\n print \"Test RMSE: {0}\\n\".format(np.sqrt(sqerr))\n f.write(\"Test RMSE: {0}\\n\".format(np.sqrt(sqerr)))\n f.close()\n\n np.save(modelpath + 'true_vs_prediction.npy',\n np.array(reslist))\n\n return reslist", "def substantiate():", "def _naive_greedy_modularity_communities(G):\n # First create one community for each node\n communities = list([frozenset([u]) for u in G.nodes()])\n # Track merges\n merges = []\n # Greedily merge communities until no improvement is possible\n old_modularity = None\n new_modularity = modularity(G, communities)\n while old_modularity is None or new_modularity > old_modularity:\n # Save modularity for comparison\n old_modularity = new_modularity\n # Find best pair to merge\n trial_communities = list(communities)\n to_merge = None\n for i, u in enumerate(communities):\n for j, v in enumerate(communities):\n # Skip i=j and empty communities\n if j <= i or len(u) == 0 or len(v) == 0:\n continue\n # Merge communities u and v\n trial_communities[j] = u | v\n trial_communities[i] = frozenset([])\n trial_modularity = modularity(G, trial_communities)\n if trial_modularity >= new_modularity:\n # Check if strictly better or tie\n if trial_modularity > new_modularity:\n # Found new best, save modularity and group indexes\n new_modularity = trial_modularity\n to_merge = (i, j, new_modularity - old_modularity)\n elif (\n to_merge and\n min(i, j) < min(to_merge[0], to_merge[1])\n ):\n # Break ties by choosing pair with lowest min id\n new_modularity = trial_modularity\n to_merge = (i, j, new_modularity - old_modularity)\n # Un-merge\n trial_communities[i] = u\n trial_communities[j] = v\n if to_merge is not None:\n # If the best merge improves modularity, use it\n merges.append(to_merge)\n i, j, dq = to_merge\n u, v = communities[i], communities[j]\n communities[j] = u | v\n communities[i] = frozenset([])\n # Remove empty communities and sort\n communities = [c for c in communities if len(c) > 0]\n for com in sorted(communities, key=lambda x: len(x), reverse=True):\n yield com", "def exp_mod(a, b, nbr):\n bina = [int(x) for x in bin(a)[2:]]\n #binb = [int(x) for x in bin(b)[2:]]\n binn = [int(x) for x in bin(nbr)[2:]]\n #while len(binn)<len(bina):\n # binn = [0]+binn\n #print(bina, binn)\n binn.reverse()\n bina.reverse()\n n = len(bina)+len(binn)*4+1\n na = len(bina)\n nan = len(bina)+len(binn) # debut de Y\n nany = len(bina)+2*len(binn)+1 # debut de \"A\" (ici c'est b)\n nanya = len(bina)+3*len(binn)+1 # debut de \"APOW\" (ce qui doit etre mesuré)\n q = QuantumRegister(n+2, 'q') # +lost+lost2\n circ = QuantumCircuit(q)\n for i in range(na):\n if bina[i]:\n circ.x(q[i])\n for i in range(len(binn)):\n if binn[i]:\n circ.x(q[na+i])\n expmod(circ, q, # X, a, A, APOW, Y, n, N, binn, lost, lost2)\n [q[i] for i in range(len(bina))],\n b%nbr,\n [q[i+nany] for i in range(len(binn))],\n [q[i+nanya] for i in range(len(binn))],\n [q[i+nan] for i in range(len(binn)+1)],\n nbr,\n [q[i+na] for i in range(len(binn))],\n binn,\n q[n],\n q[n+1])\n if len(bina)%2:\n circ_m = measure(circ, q, [i for i in range(nan,nany)])\n else:\n circ_m = measure(circ, q, [i for i in range(nanya,n)])\n #circ_m = measure(circ, q, [i for i in range(n)])\n return circ_m", "def kegg_module_enrichment(genelist, dbpaths=dbpaths):\n size, kegglist = cbir_to_kegg(genelist)\n\n #print \"Creating Kegg module library\"\n ## create Kegg module dictionary:\n kmod_h = open(dbpaths['kegg'], 'rb')\n\n kmodlist = []\n kmodd = {}\n kmodcount = 0\n kmcount = {}\n kmod = 'none'\n\n kgroupb = {}\n kgroupbcount = {}\n kgroupblist = []\n kbcount = {}\n b_group = 'none'\n\n kgroupc = {}\n kgroupccount = {}\n kccount = {}\n kgroupclist = []\n c_group = 'none'\n\n kod = {} # this is to count how many KOs there are in the list\n\n for line in kmod_h:\n \"\"\"if line[0] == 'B': # higher level functional description (eg 'Energy Metabolism')\n kbcount[b_group] = len(kgroupbcount)# add old b_group count to dictionary\n kgroupbcount = {} # reset counter\n try:\n b_group = re.search(\"<b>(.*)</b>\", line).group(1)\n kgroupblist.append(b_group)\n except:\n b_group = 'none'\n \"\"\"\n if line[0] == 'C': # descriptive function (eg Carbon fixation)\n kccount[c_group] = len(kgroupccount)# add old c_group count to dictionary\n kgroupccount = {} # reset counter\n try:\n c_group = re.search(\"C *(.*)\", line.trim()).group(1)\n kgroupclist.append(c_group)\n except:\n c_group = 'none'\n\n elif line[0] == 'D': # Kegg module (eg M00165 Reductive pentose phosphate cycle)\n kmcount[kmod] = kmodcount\n kmodcount = 0\n try:\n ksearch = re.search(\"(M[0-9]*) *(.*)\\[PATH\", line)\n kmod = ksearch.group(1)\n kmoddef = ksearch.group(2)\n kmodlist.append(kmod)\n except:\n kmod = 'none'\n kmoddef = 'none'\n\n elif line[0] == 'E': # Kegg term\n try:\n ko = re.search(\"(K[0-9]*)\", line).group(1)\n\n kmodcount += 1\n kgroupbcount[ko] = True\n kgroupccount[ko] = True\n try:\n kmodd[ko].append(kmod)\n kgroupb[ko].append(b_group)\n kgroupc[ko].append(c_group)\n except:\n kmodd[ko] = [kmod]\n kgroupb[ko] = [b_group]\n kgroupc[ko] = [c_group]\n except:\n pass\n kccount[c_group] = len(kgroupccount)# add final old c_group count to dictionary\n kmcount[kmod] = kmodcount # add final kmmod group count to dictionary\n\n\n kmodtestsize = len(kmodlist) # for multiple testing correction\n kgrpbtestsize = len(kgroupblist)\n kgrpctestsize = len(kgroupclist)\n\n #print \"calculating Fisher's exact test\"\n # count number of kegglist KOs are in each kegg module, perform Fisher's exact test\n dip = 0\n dipcount = {} # possibly unnecessary\n kmodenrich = {}\n kmododds = {}\n\n for mod in kmodlist:\n for ko in kegglist:\n if ko not in kmodd: # some KOs do not exist in a module.\n pass\n elif mod in kmodd[ko]:\n dip += 1\n dipcount[mod] = dip # possibly unnecessary\n oddrat, pval = fisher_exact([\n [dip, len(kegglist) - dip],\n [kmcount[mod]-dip, len(keggcount) - len(kegglist) - kmcount[mod] + dip]\n ])\n if pval < 0.05:\n print \"%s\\n In Path Not in Path\\nDEG : %-7d %d\\nnon-DEG: %-7d %d\\n%.4f\\n\" % (mod, dip, len(kegglist) - dip,kmcount[mod]-dip, len(keggcount) - len(kegglist) - kmcount[mod] + dip, pval )\n kmododds[mod] = oddrat\n kmodenrich[mod] = pval\n dip = 0 # reset for next module\n #sys.stdout.write(\"%s\\n In Path Not in Path\\nDEG : %-7d %d\\nnon-DEG: %-7d %d\\n%.4f\\n\" % (mod, dip, len(kegglist) - dip,kmcount[mod]-dip, len(keggcount) - len(kegglist) - kmcount[mod] + dip, pval ) )\n #print type(pval), pval\n #print type(kmodtestsize), kmodtestsize\n\n dip = 0\n kcenrich = {}\n kcodds = {}\n\n for fn in kgroupclist:\n for ko in kegglist:\n if fn in kgroupc[ko]:\n dip += 1\n #sys.stdout.write(\" In Path Not in Path\\nDEG :%-7d %d\\nnonDEG:%-7d %d\" % (dip, len(kegglist) - dip, kccount[fn]-dip, len(keggcount) - len(kegglist) - kccount[fn] + dip ))\n oddrat, pval = fisher_exact([\n [dip, len(kegglist) - dip],\n [kccount[fn]-dip, size - len(kegglist) - kccount[fn] + dip]\n ])\n #sys.stdout.write(pval)\n #sys.stdout.flush()\n kcodds[fn] = oddrat\n kcenrich[fn] = pval / kgrpctestsize\n dip = 0 # reset for next module\n\n #print kmodenrich\n return kmodenrich, kcenrich\n ## Fisher's Exact Test:\n # In Pathway: Not in Pathway: SUM:\n # DEG : dip len(kegglist) - dip len(kegglist)\n # non-DEG : kmcount[mod]-dip len(keggcount) - len(kegglist) - kmcount[mod] + dip len(keggcount) - len(kegglist)\n # SUM : kmcount[mod] len(keggcount) - kmcount[mod] len(keggcount)\n #\n\n pass", "def order_ideal(self, gens):", "def calculate_modularity_difference(team_i, team_j):\n # I am calling a dedicated function (i.e. calculate_e_ij)\n e_ij = calculate_e_ij(team_i, team_j)\n # I am returning dq\n return 2 * (e_ij - team_i.a * team_j.a)", "def modular_inverse(a, m):\n\n def extended_gcd(_a, _b):\n \"\"\" Use the Extended Euclidean algorithm to calculate the \"extended greatest common divisor\".\n It takes as input two positive integers a and b, then calculates the following:\n 1. The greatest common divisor (gcd) between a and b -- that is, the integer number g which is the largest\n integer for which a/g and b/g both are integers (This can also be obtained using math.gcd)\n 2. The integer x and y so that a*x + b*y = gcd(x, y)\n :param _a: Positive integer\n :param _b: Positive integer\n :return: Tuple (gcd, x, y)\n \"\"\"\n previous_remainder, remainder = _a, _b\n current_x, previous_x, current_y, previous_y = 0, 1, 1, 0\n while remainder > 0:\n previous_remainder, (quotient, remainder) = remainder, divmod(\n previous_remainder, remainder)\n current_x, previous_x = previous_x - quotient * current_x, current_x\n current_y, previous_y = previous_y - quotient * current_y, current_y\n # The loop terminates with remainder == 0, x == b and y == -a. This is not what we want, and is because we have\n # walked it through one time \"too many\". Therefore, return the values\n # of the previous round:\n return previous_remainder, previous_x, previous_y\n\n gcd_value, x, y = extended_gcd(a, m)\n if gcd_value != 1:\n return False\n # print('No inverse. gcd (%d, %d) is %d. Decoding is not unique. Choose another key than %d'\n # % (a, m, math.gcd(a, m), a))\n return x % m", "def modinverse(a: int, m: int) -> int:\n if SHOW_WORKING: print(f\"modinverse(a, m) = modinverse({a}, {m})\")\n if SHOW_WORKING: print(f\"\\tWe want to find some x & y such that {a} * x + {m} * y = 1\")\n\n if a < 0 or m <= 0:\n raise ValueError(\"a must be non-negative and m must be positive\")\n\n if SHOW_WORKING: print(f\"Find gcd(a, b) = gcd({a}, {m})\")\n if m > a:\n if SHOW_WORKING: print(f\"\\tb > a. Set r1[0] := m = {m} and r2[0] := a = {a} so that r1[0] > r2[0\")\n r1s, r2s = [m], [a]\n else:\n if SHOW_WORKING: print(f\"\\ta >= b. Set r1[0] := a = {a} and r2[0] := m = {m} so that r1[0] >= r2[0]\")\n r1s, r2s = [a], [m] \n\n if SHOW_WORKING: print(f\"\\tProceeding with algorithm until r2 hits 0. gcd({a}, {m}) will be the ending r1 value:\")\n qs = []\n i = 0\n while r2s[-1] != 0:\n i += 1\n\n if SHOW_WORKING: print(f\"\\t\\tSet q[{i - 1}] := floor(r1[{i - 1}] / r2[{i - 1}]) = floor({r1s[i - 1]} / {r2s[i - 1]}) = floor({round(r1s[i - 1] / r2s[i - 1], 2)}) = {r1s[i - 1] // r2s[i - 1]}\")\n qs.append(r1s[i - 1] // r2s[i - 1])\n\n if SHOW_WORKING: print(f\"\\t\\tSet (r1[{i}], r2[{i}]) := (r2[{i - 1}], r1[{i - 1}] - r2[{i - 1}] * q[{i - 1}]) = ({r2s[i - 1]}, {r1s[i - 1]} - {r2s[i - 1]} * {qs[i - 1]}) = ({r2s[i - 1]}, {r1s[i - 1] - r2s[i - 1] * qs[i - 1]})\")\n r1, r2 = r2s[i - 1], r1s[i - 1] - r2s[i - 1] * qs[i - 1]\n r1s.append(r1)\n r2s.append(r2)\n\n if SHOW_WORKING: print(\"\\t\\t -\")\n \n if SHOW_WORKING: print(f\"\\tStopping condition hit (r2[{i}] = 0). Result of gcd({a}, {m}) is r1[{i}] = {r1s[-1]}\")\n\n if r1s[-1] != 1:\n if SHOW_WORKING: print(f\"\\t{a} has no inverse modulo {m} because gcd({a}, {m}) = {r1s[-1]} != 1 (they must be coprime)\")\n return None\n\n if SHOW_WORKING: print(f\"\\n\\tBegin working backwards:\")\n\n def getnestedexpressionstr(leftstr: str, nestedr1r2q: List[Union[int, List[int]]], rightstr: str) -> str:\n if SHOW_WORKING: print(f\"\\t\\tgetnestedexpressionstr('{leftstr}', {nestedr1r2q}, '{rightstr}')\")\n r1: int = nestedr1r2q[0]\n r2: Union[int, List[int]] = nestedr1r2q[1]\n q: int = nestedr1r2q[2]\n if SHOW_WORKING: print(f\"\\t\\t\\tr1 = {r1}, r2 = {r2}, q = {q}\")\n\n if isinstance(r2, int):\n return f\"{leftstr}{r1} - {r2} * {q}{rightstr}\"\n \n if leftstr == rightstr == '':\n return getnestedexpressionstr(f\"{r1} - (\", r2, f\") * {q}\")\n\n return getnestedexpressionstr(f\"{leftstr}{r1} - (\", r2, f\") * {q}{rightstr}\")\n\n def backtrack(index: int, nestedr1r2q: List[Union[int, List[int]]]) -> List[Union[int, List[int]]]:\n \"\"\"Provided an index and an ordered list representing the r1, r2, and q values of the equation\n r1 - r2 * q, this function returns another list where r2 has been broken down to the parts of \n its equation on the previous indexed equation, e.g. if the 3rd and 4th equations from the GCD \n algorithm are:\n (3): r1 - r2 * q2 = 4 - 4 * 1\n (4): r1 - r2 * q2 = 3 - 1 * 3\n then: \n backtrack(4, [3, 1, 3]) -> [3, [4, 3, 1], 3].\n \n This also works when the middle element of the list (the r2 element) is given as a list of parts,\n e.g., if we follow the previous example where additionally equation 2 is:\n (2): r1 - r2 * q2 = 11 - 4 * 2\n then:\n backtrack(3, [3, [4, 3, 1], 3]) -> [3, [4, [11, 4, 2], 1], 3].\"\"\"\n \n if SHOW_WORKING: print(f\"\\t\\tbacktrack({index}, {nestedr1r2q})\")\n\n if index <= 0:\n raise ValueError(\"Can't backtrack from here, please supply a positive index\")\n \n r1: int = nestedr1r2q[0]\n r2: Union[int, List[int]] = nestedr1r2q[1]\n q: int = nestedr1r2q[2]\n\n if index == 1:\n return [r1, [r1s[0], r2s[0], qs[0]], q]\n\n return [r1, backtrack(index - 1, [r1s[index - 1], r2s[index - 1], qs[index - 1]]), q]\n\n if i - 2 > 0:\n expression = backtrack(i - 2, [r1s[i - 2], r2s[i - 2], qs[i - 2]])\n\n nestedexpressionstr: str = getnestedexpressionstr('', expression, '')\n nestedexpressionstr = nestedexpressionstr.replace(str(a), 'a').replace(str(m), 'm')\n\n if SHOW_WORKING: print(f\"\\t\\t{nestedexpressionstr}\")\n if SHOW_WORKING: print(f\"\\t\\t{sympy.simplify(nestedexpressionstr)}\")\n\n x, y = sympy.core.numbers.igcdex(a, m)[:2]\n if SHOW_WORKING: print(f\"\\ta * x + m * y = 1 -> {a} * {x} + {m} * {y} = 1\")\n\n if SHOW_WORKING: print(f\"\\tmodinverse({a}, {m}) = {x}\\t(mod {m}) = {x % m}\")\n \n return x % m", "def __init__(self):\n self.name = \"GomokuAssignment3\"\n self.version = 1.0\n self.NN = 10", "def ModExp(n, k, m):\n a = list(bin(k))[2:]\n a.reverse()\n s = 1\n for i in a:\n if i == '1':\n s = (s * n) % m\n n = (n * n) % m\n return s", "def module_calc(level):\n global modules\n global rods\n modules = level*(level-1)/2\n rods = modules*6\n return modules, rods", "def _regr_basic():", "def find_mpe(fbn, sbn, compat, beta, e):\n evars = set(e)\n freevars = [v for v in fbn.V if v.name not in evars]\n\n # para instanaciar las variables splitted primero. Ver popsition 1\n # del paper\n freevars.sort(key=lambda x: x.name in compat) \n \n t = datetime.now()\n ac = dnnf.todnnf(sbn)\n print datetime.now() - t\n print \"dfs\", freevars\n def dfs(q, varsleft, z, k):\n \"\"\"\n q: cota actual\n varsleft: variables que faltan por instanciar. Se sacan del final.\n z: instanciacion parcial actual\n k: numero de variables splitted que falta por instanciar\n \"\"\"\n var = varsleft.pop()\n varname = var.name\n domain = var.Domain\n k -= 1\n clones = []\n if varname in compat:\n for clone in compat[varname]:\n clones.append(clone)\n\n # probar todos sus posibles valores\n for value in domain:\n # agregar ese valor a la instancia parcial\n z[varname] = value\n for clone in clones:\n z[clone] = value\n p = ac.mpe(z)\n\n if varsleft:\n # si todavia quedan variables por asignar\n # hacer prune si podemos\n \n if k<=0:\n # ya todas las variables splitted estan\n # asignadas. Ahora el MPE(sbn) = MPE(fbn), no hace\n # falta hacer mas asignaciones para obtener el\n # valor exacto (Proposicion 1 del paper)\n q = max(q, beta*p)\n else:\n if p*beta <= q:\n # la cota superior sobre sbc es menor que la\n # cota inferior q que llevamos. Por aqui no\n # hay nada mejor\n continue\n else:\n # todavia puede haber algo bueno por aqui\n q = max(q, dfs(q, varsleft, z, k))\n else:\n # si no queda ninguna variable por asignar.\n # por un teorema, el MPE(fbn, x) == beta*MPE(sbn, x)\n q = max(q, beta*p)\n\n # regresar todo al estado orignal\n varsleft.append(var)\n del z[varname]\n for clone in clones:\n del z[clone]\n return q\n\n return dfs(0.0, freevars, e, len(compat))", "def recommend_nmf():\n pass", "def nits(self):", "def get_modularity(adjacency, clusters):\n total_weight = np.sum(adjacency)\n e = get_clusters_adjacencies(adjacency, clusters)\n e = e / total_weight\n a = np.sum(e, axis=1)\n return np.sum(e.diagonal() - np.power(a, 2))", "def createMirrorModule(self):\n\n userSpecName = str(self.previewName.text())\n networkNode = self.returnNetworkNode\n parent = cmds.getAttr(networkNode + \".parentModuleBone\")\n className = cmds.getAttr(networkNode + \".moduleType\")\n\n # check to see if a module already has that name\n modules = utils.returnRigModules()\n mirrorModule = None\n moduleName = None\n\n for module in modules:\n name = cmds.getAttr(module + \".moduleName\")\n if name == userSpecName:\n cmds.confirmDialog(title=\"Name Exists\",\n message=\"A module with that name already exists. Please enter a unique name \\\n for the module\",\n icon=\"critical\")\n return\n\n # now check the modules that contain the parent bone\n for module in modules:\n bones = cmds.getAttr(module + \".Created_Bones\")\n splitJoints = bones.split(\"::\")\n createdJoints = []\n\n # create a list of the created bones\n for bone in splitJoints:\n if bone != \"\":\n createdJoints.append(bone)\n\n # see if the parent bone is in that list\n if parent in createdJoints:\n mirrorModule = cmds.getAttr(module + \".mirrorModule\")\n moduleName = cmds.getAttr(module + \".moduleName\")\n\n # if our parent bone's module, has a mirror module, we need to create this new mirror module under that\n # parent instead (if parent = l_thigh, mirror parent should be r_thigh)\n if mirrorModule is not None:\n for module in modules:\n modName = cmds.getAttr(module + \".moduleName\")\n if modName == mirrorModule:\n\n # find the parent's mover (if parent is l_thigh, mover would be l_leg_thigh_mover)\n networkNodes = utils.returnRigModules()\n mover = utils.findMoverNodeFromJointName(networkNodes, parent, False, True)\n\n # find mirror mover\n mirrorMover = mover.replace(moduleName, mirrorModule)\n baseName = cmds.getAttr(module + \".baseName\")\n boneList = cmds.getAttr(module + \".Created_Bones\")\n\n # now, we need to find the joint from the mirror mover, and once there is a match, the parent\\\n # var now becomes that joint\n if mirrorMover.find(\"_mover\") != -1:\n jointName = mirrorMover.partition(\"_mover\")[0]\n\n if jointName in boneList:\n parent = jointName\n\n else:\n # if removing _mover didn't yield a matching joint name, take out the baseName from\\\n # the mover name, and then remove the _mover\n jointName = jointName.replace(baseName + \"_\", \"\")\n\n if jointName in boneList:\n parent = jointName\n\n # arms and leg exception\n mirrorSide = None\n specialCaseModules = [\"ART_Leg_Standard\", \"ART_Arm_Standard\"]\n if className in specialCaseModules:\n side = cmds.getAttr(networkNode + \".side\")\n if side == \"Left\":\n mirrorSide = \"Right\"\n if side == \"Right\":\n mirrorSide = \"Left\"\n\n # create an instance of the module\n mod = __import__(\"RigModules.\" + className, {}, {}, [className])\n\n # get the class name from that module file (returns Modules.ART_Root.ART_Root for example)\n moduleClass = getattr(mod, mod.className)\n jmPath = mod.jointMover\n\n # call functions to create network node, skeleton settings UI\n moduleInst = moduleClass(self.rigUiInst, userSpecName)\n self.rigUiInst.moduleInstances.append(moduleInst)\n networkNodeInst = moduleInst.buildNetwork()\n\n # if mirrorSide exists\n if mirrorSide is not None:\n jmPath = jmPath.partition(\".ma\")[0] + \"_\" + mirrorSide + \".ma\"\n if mirrorSide == \"Left\":\n cmds.setAttr(networkNodeInst + \".side\", lock=False)\n cmds.setAttr(networkNodeInst + \".side\", \"Left\", type=\"string\", lock=True)\n if mirrorSide == \"Right\":\n cmds.setAttr(networkNodeInst + \".side\", lock=False)\n cmds.setAttr(networkNodeInst + \".side\", \"Right\", type=\"string\", lock=True)\n\n # build the settings UI/joint mover/add to outliner\n moduleInst.skeletonSettings_UI(userSpecName)\n moduleInst.jointMover_Build(jmPath)\n moduleInst.addJointMoverToOutliner()\n\n # update the created joints attribute on the network node with the new names\n prefix = str(self.prefix.text())\n suffix = str(self.suffix.text())\n\n if len(prefix) > 0:\n if prefix.find(\"_\") == -1:\n prefix = prefix + \"_\"\n if len(suffix) > 0:\n if suffix.find(\"_\") == -1:\n suffix = \"_\" + suffix\n\n createdBones = cmds.getAttr(networkNodeInst + \".Created_Bones\")\n createdBones = createdBones.split(\"::\")\n\n attrString = \"\"\n for bone in createdBones:\n if len(bone) > 1:\n attrString += prefix + bone + suffix + \"::\"\n\n cmds.setAttr(networkNodeInst + \".Created_Bones\", lock=False)\n cmds.setAttr(networkNodeInst + \".Created_Bones\", attrString, type=\"string\", lock=True)\n\n # update the self.currentParent label and the parentModuleBone attr on the network node\n moduleInst.currentParent.setText(parent)\n\n cmds.setAttr(networkNodeInst + \".parentModuleBone\", lock=False)\n cmds.setAttr(networkNodeInst + \".parentModuleBone\", parent, type=\"string\", lock=True)\n\n # find the current parent mover and its scale\n if parent == \"root\":\n mover = \"root_mover\"\n offsetMover = \"root_mover\"\n\n else:\n # find the parent mover name to parent to\n networkNodes = utils.returnRigModules()\n mover = utils.findMoverNodeFromJointName(networkNodes, parent, False, True)\n offsetMover = utils.findMoverNodeFromJointName(networkNodes, parent)\n\n if mover is not None:\n cmds.parentConstraint(mover, userSpecName + \"_mover_grp\", mo=True)\n cmds.scaleConstraint(mover, userSpecName + \"_mover_grp\")\n\n # create the connection geo between the two\n childMover = utils.findOffsetMoverFromName(userSpecName)\n riggingUtils.createBoneConnection(offsetMover, childMover, userSpecName)\n\n globalMover = utils.findGlobalMoverFromName(userSpecName)\n cmds.select(globalMover)\n cmds.setToolTo(\"moveSuperContext\")\n\n utils.fitViewAndShade()\n cmds.refresh(force=True)\n moduleInst.pasteSettings()\n moduleInst.aimMode(True)\n\n # delete UI\n cmds.deleteUI(\"ART_createMirrorModuleUI\", wnd=True)\n\n # update the mirrorModule setting\n self.mirrorMod.setText(userSpecName)\n name = cmds.getAttr(networkNode + \".moduleName\")\n moduleInst.mirrorMod.setText(name)\n\n cmds.setAttr(networkNode + \".mirrorModule\", lock=False)\n cmds.setAttr(networkNode + \".mirrorModule\", userSpecName, type=\"string\", lock=True)\n\n cmds.setAttr(networkNodeInst + \".mirrorModule\", lock=False)\n cmds.setAttr(networkNodeInst + \".mirrorModule\", name, type=\"string\", lock=True)\n\n # mirror transformations\n self.mirrorTransformations()\n\n self.rigUiInst.populateNetworkList()", "def expMod(b,n,m):\n \"\"\"returns (b^n mod m)\"\"\"\n if n==0:\n return 1\n elif n%2==0:\n return expMod((b*b)%m, n/2, m)\n else:\n return(b*expMod(b,n-1,m))%m", "def apply_mod(num):\n return num % MODULO", "def test_shared_members_N(self):\r\n def populate_module(m,x):\r\n m.x=x\r\n m.lx=[x]\r\n m.llx=[[x],[x]]\r\n m.ltx=[(x,)]\r\n m.ldx=[{'x':x}]\r\n m.tx=(x,)\r\n m.tlx=([x],)\r\n m.ttx=((x,),)\r\n m.tdx=({'x':x},)\r\n m.dx={'x':x}\r\n m.dlx={'x':[x]}\r\n m.dtx={'x':(x,)}\r\n m.ddx={'x':{'x':x}}\r\n\r\n def get_element(i):\r\n return [i.x,i.lx[0],i.tx[0],i.dx['x'],i.llx[0][0], i.llx[1][0], i.ltx[0][0], i.ldx[0]['x'], i.tlx[0][0], i.tlx[0][0], i.tdx[0]['x'], i.dlx['x'][0], i.dtx['x'][0], i.ddx['x']['x']]\r\n m1=Module()\r\n m2=Module()\r\n m3=Module()\r\n m4=Module()\r\n x=T.dscalar()\r\n populate_module(m1,x)\r\n populate_module(m2,(x))\r\n populate_module(m4,(x))\r\n #m1.x and m2.x should not be shared as their is no hierarchi link between them.\r\n inst1=m1.make()\r\n inst2=m2.make()\r\n m1.m2=m2\r\n m2.m3=m3\r\n m3.m4=m4\r\n #m1.x and m2.x should be shared as their is a hierarchi link between them.\r\n inst3=m1.make()\r\n inst1.x=1\r\n inst2.x=2\r\n inst3.x=3\r\n for f in get_element(inst1):\r\n assert f==1\r\n for f in get_element(inst2):\r\n assert f==2\r\n for f in get_element(inst3)+get_element(inst3.m2)+get_element(inst3.m2.m3.m4):\r\n assert f==3\r\n\r\n inst3.m2.x=4\r\n for f in get_element(inst3)+get_element(inst3.m2)+get_element(inst3.m2.m3.m4):\r\n assert f==4", "def modify(nets, probs, ranks, desc, hypers, seed=0, seed2=0):\n\n name = str(seed)\n\n np.random.seed(seed2)\n tf.random.set_random_seed(seed2)\n random.seed(seed2)\n\n if not rnd: # If randomness is not applied\n print(ranks.sum(axis=1))\n if (ranks.sum(axis=1) == 0).any(): # If there are any network in the bottom three in importance in all objectives\n probs = (ranks.sum(axis=1) == 0) * probs # Only accept a network as modifiable if they rank between 3 least important networks in all three objectives\n probs = probs / np.sum(probs) # Update probabilities once the networks more important than bottom three have been taken away\n trainables, res, mutation, comp, reaching_outs = reducing_mutations(nets, probs, desc)\n else:\n trainables, res, mutation, comp, reaching_outs = increasing_mutations(nets, probs, desc)\n else: # Random application\n comp = np.random.choice(nets)\n _, in_conns, out_conns, _ = desc.get_net_context(comp)\n conns = in_conns + out_conns # Checka si esto da error\n reaching_outs = list(set([x for x in desc.reachable[comp] if \"o\" in x])) # Outputs affected by the mutation\n mutations = [con for con in conns if is_deletable(desc, con)]\n\n mutations += [\"add_con\", \"divide_con\", \"reinit\"]\n\n if is_bypassable(desc, comp):\n mutations += [\"bypass\"]\n\n mutation = np.random.choice(mutations)\n res, trainables = mutate(mutation, desc, comp, conns)\n print(mutation)\n model = MNM(desc, hypers[\"btch_sz\"], data_inputs[\"Train\"], data_outputs[\"Train\"], loss_func_weights={\"o0\": hypers[\"wo0\"], \"o1\": hypers[\"wo1\"], \"o2\": hypers[\"wo2\"]}, name=name, load=None, init=False, random_seed=seed2, lr=0.001)\n\n model.initialize(load=True, load_path=\"\", variables=trainables)\n\n model.convergence_train(hypers[\"btch_sz\"], iter_lim//100, conv_param, proportion, iter_lim//20, display_step=-1)\n\n results = evaluate_model(model)\n\n del model\n\n if rnd == 1:\n n = \"resultsrandom\"\n else:\n n = \"results\"\n\n np.save(n + str(seed) + \"_\" + str(seed2) + \".npy\", np.concatenate((results, [res, mutation, comp], reaching_outs)))", "def main(project_path='../benchmark_projects/JSON/JSON.und'):\n # project_path = '../benchmark_projects/ganttproject/biz.ganttproject.core/biz.ganttproject.core.und'\n db = understand.open(project_path)\n # entities = db.ents('Java Class')\n cmd_ = 'und export -dependencies class csv {0} {1}'.format('mdg/MDG.csv', project_path)\n os.system('cmd /c \"{0}\"'.format(cmd_))\n\n modulo = Modularity(graph_path=r'mdg/MDG.csv', db=db)\n q = modulo.compute_modularity_newman_leicht()\n print(q)\n return q", "def run_test0():\r\n \r\n ndia, nadi, nnucl, ntraj = 1, 1, 2, 500\r\n\r\n # ======= Hierarchy of Hamiltonians =======\r\n ham = nHamiltonian(ndia, nadi, nnucl)\r\n ham.init_all(2)\r\n print \"id=\", ham.id, \" level=\", ham.level\r\n\r\n ham1 = [] \r\n for tr in xrange(ntraj):\r\n ham1.append( nHamiltonian(ndia, nadi, nnucl) ) \r\n print ham1[tr].id, ham1[tr].level\r\n ham1[tr].init_all(2)\r\n ham.add_child(ham1[tr])\r\n print Cpp2Py(ham1[tr].get_full_id())\r\n\r\n # Set up the models and compute internal variables\r\n # Initialization\r\n # Model parameters \r\n params = { \"model\":1 }\r\n\r\n # Simulation parameters\r\n dt = 1.0\r\n\r\n # Dynamical variables and system-specific properties\r\n mean_q = MATRIX(nnucl,1); \r\n sigma_q = MATRIX(nnucl,1); \r\n mean_p = MATRIX(nnucl,1); \r\n sigma_p = MATRIX(nnucl,1); \r\n iM = MATRIX(nnucl,1);\r\n\r\n for i in xrange(nnucl):\r\n mean_q.set(i,0, -1.0) \r\n sigma_q.set(i,0, 0.05) \r\n mean_p.set(i,0, 0.0) \r\n sigma_p.set(i,0, 0.0)\r\n iM.set(i,0, 1.0/2000.0)\r\n\r\n rnd = Random()\r\n q = MATRIX(nnucl,ntraj); aux_functs.sample(q, mean_q, sigma_q, rnd)\r\n p = MATRIX(nnucl,ntraj); aux_functs.sample(p, mean_p, sigma_p, rnd) \r\n\r\n # Initial calculations\r\n q.show_matrix()\r\n\r\n # Compute Initial trajectory probability distributions for all dof\r\n #bin(q, -2.0, 2.0, 0.01)\r\n\r\n ham.compute_diabatic(compute_model, q, params, 1)\r\n ham.compute_adiabatic(1, 1);\r\n ham.add_ethd_adi(q, iM, 1)\r\n\r\n os.system(\"mkdir _2D_dist\")\r\n out1 = open(\"_output.txt\", \"w\"); out1.close() \r\n\r\n # Do the propagation\r\n for i in xrange(100):\r\n\r\n aux_functs.bin2(q, -2.0, 2.0, 0.1, -2.0, 2.0, 0.1, \"_2D_dist/_2D_distrib_\"+str(i)+\"_.txt\")\r\n\r\n Verlet1(dt, q, p, iM, ham, compute_model, params, 1)\r\n\r\n #=========== Properties ==========\r\n\r\n Ekin, Epot, Etot = aux_functs.compute_etot(ham, p, iM)\r\n\r\n # Print the ensemble average - kinetic, potential, and total energies\r\n # Print the tunneling information. Here, we count each trajectory across the barrier.\r\n out1 = open(\"_output.txt\", \"a\")\r\n out1.write( \" %8.5f %8.5f %8.5f %8.5f\\n\" % ( i*dt, Ekin, Epot, Etot ) )\r\n out1.close()", "def get_modularity_other_b2(A, cluster_indices):\n # define the number of nodes in the graph and the number of clusters\n n = len(cluster_indices)\n nclusters = max(cluster_indices) + 1\n girvan_e = np.zeros((nclusters, nclusters))\n volume = 0\n for i in range(n):\n for j in range(n):\n if i < j:\n weight = A[i][j]\n volume += weight\n a = cluster_indices[i]\n b = cluster_indices[j]\n if a == b:\n girvan_e[a][a] += weight\n else:\n girvan_e[a][b] += weight/2\n girvan_e[b][a] += weight/2\n for a in range(nclusters):\n for b in range(nclusters):\n girvan_e[a][b] /= volume\n girvan_a = [sum(girvan_e[i]) for i in range(nclusters)]\n modularity = sum(girvan_e[i][i] - girvan_a[i]**2 for i in range(nclusters))\n return modularity", "def modular_inverse(self):\n i = gmpy2.invert(self.c2, self.n)\n mx = pow(self.c1, self.a, self.n)\n my = pow(i, int(-self.b), self.n)\n self.m= mx * my % self.n", "def probability(N_dr, L_opmin, L_opmax, L_min, L_max, L_d):\n opening_nomullignas = []\n opening_withmullignas = []\n sum_nomulligans = 0\n sum_withmulligans = 0\n mulligan_coeff = 0\n\n for i in range(L_opmin, min(L_opmax + 1, 8)): # first make a list of tuples of the form:\n # (number_of_lands_in_opening_hand, probability_of_drawing_such_a_hand)\n a = hypergeom(i, 7, 60, L_d)\n opening_nomullignas.append((i, a))\n mulligan_coeff = mulligan_coeff + a # this will be used later for calculating the probability of\n # taking the mulligan and is used as a coefficient before the mulligan sum\n for (x, y) in opening_nomullignas: # use the list of tuples to calculate the first part of equation 5\n partial_nomulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_nomulligans = partial_nomulligans + hypergeom(j, N_dr, 53, L_d - x)\n sum_nomulligans = sum_nomulligans + partial_nomulligans * y\n\n mulligan_coeff = 1 - mulligan_coeff # probability of mulliganing\n for i in range(L_opmin, min(L_opmax + 1, 7)): # doing the same thing as before, but drawing 6 instead of 7 cards\n a = hypergeom(i, 6, 60, L_d)\n opening_withmullignas.append((i, a))\n\n for (x, y) in opening_withmullignas:\n partial_withmulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_withmulligans = partial_withmulligans + hypergeom(j, N_dr, 54, L_d - x)\n sum_withmulligans = sum_withmulligans + partial_withmulligans * y\n total_withmulligans = mulligan_coeff * sum_withmulligans\n\n return total_withmulligans + sum_nomulligans", "def bgll(self, graph, node_count, min_mod, max_pass):\n\n #the belonging of the node\n bl = [i for i in range(node_count)]\n #the node's weight in community\n _in = [0.0] * node_count\n #the node's weight in graph\n _tot = []\n #total weight of a node, just a copy of _tot\n k = []\n #the total weight of the graph\n m = []\n\n #inital the in-param\n network = [[0.0] * node_count for n in range(node_count)]\n for node, tag, weight in graph:\n network[node][tag] = weight\n for node in network:\n k.append(sum(node))\n _tot = k[:]\n m = sum(k)\n #inital the in-param\n\n def modularity():\n \"\"\"\n This function mainly computes the modularity of the network\n Return:\n mod->the modularity value\n \"\"\"\n\n q = 0.0\n for idx in range(0, node_count):\n if _tot[idx] > 0.0:\n q += (_in[idx] / m - math.pow(_tot[idx] / m, 2))\n return q\n\n def modularity_gain(n, c, dnodecomm):\n \"\"\"\n This function mainly computes the modularity gain of a node moving\n Param:\n n->node id\n c->community id\n dnodecomm->the weight between the node and the community\n Return:\n gain->modularity gain\n \"\"\"\n\n totc = _tot[c]\n degc = k[n]\n return dnodecomm - (totc * degc) / m\n\n def neigh_comm(n):\n \"\"\"\n This function mainly computes the weight between the node and it's neighbour community\n Param:\n n->node id\n Return:\n nc->the map of the weight between the node and it's neighbour community\n nc=>{cid,weight}\n \"\"\"\n\n nc = {bl[n]: 0.0}\n for idx in range(0, node_count):\n neigh = idx\n ncomm = bl[neigh]\n nei_weight = network[n][idx]\n if (neigh != n) & (nei_weight > 0.0):\n if ncomm in nc:\n nc[ncomm] += nei_weight\n else:\n nc[ncomm] = nei_weight\n return nc\n\n def insert(n, c, dnodecomm):\n \"\"\"\n This function mainly get the effect of insert the node into community\n Param:\n n->node id\n c->community id\n dnodecomm->the weight between the node and the community\n \"\"\"\n\n _tot[c] += k[n]\n _in[c] += 2 * dnodecomm + network[n][n]\n bl[n] = c\n\n def remove(n, c, dnodecomm):\n \"\"\"\n This function mainly get the effect of remove the node off community\n Param:\n n->node id\n c->community id\n dnodecomm->the weight between the node and the community\n \"\"\"\n\n _tot[c] -= k[n]\n _in[c] -= 2 * dnodecomm + network[n][n]\n bl[n] = -1\n\n def detect():\n \"\"\"\n This function mainly detect the community of the graph.\n \"\"\"\n\n _pass_done = 0\n _improve = True\n new_mod = modularity()\n cur_mod = -999999999.0\n rl = random.sample(range(0, node_count), node_count)\n while _improve & (_pass_done < max_pass) & (new_mod - cur_mod > min_mod):\n cur_mod = new_mod\n _improve = False\n _pass_done += 1\n for node_tmp in rl:\n n = node_tmp\n nc = bl[n]\n ncomm = neigh_comm(n)\n remove(n, nc, ncomm[nc])\n best_c = nc\n best_l = 0.0\n best_incre = 0.0\n for c in ncomm:\n incre = modularity_gain(n, c, ncomm[c])\n if incre > best_incre:\n best_incre = incre\n best_c = c\n best_l = ncomm[c]\n insert(n, best_c, best_l)\n if best_c != nc:\n _improve = True\n new_mod = modularity()\n print new_mod\n\n detect()\n return bl", "def mutate(self):\n\n if len(self.genes) < 250:\n for g in self.genes:\n\n if MUTATION_CHANCE < random.random(): # random.random() gives float in [0,1)\n g.mutate()\n\n else:\n k = int(MUTATION_CHANCE*len(self.genes))\n for g in random.sample(self.genes,int(k)): #int(k)\n g.mutate()\n\n #To add random gene\n if ADD_GENE_CHANCE < random.random():\n self.genes.append(Gene(self.size)) #Call to Gene to add to genes list\n\n #To randomly remove genes\n\n if REM_GENE_CHANCE < random.random() and len(self.genes)>0:\n self.genes.remove(random.choice(self.genes))", "def create_soft_mod_deformer(wave=False, *args):\n bpf = cmds.intFieldGrp(widgets[\"bpFrameIFG\"], q=True, v1=True)\n currentTime = cmds.currentTime(q=True)\n cmds.currentTime(bpf)\n\n check = cmds.checkBoxGrp(widgets[\"checkCBG\"], q=True, v1=True)\n defName = cmds.textFieldGrp(widgets[\"smdTFG\"], tx=True, q=True)\n scaleFactor = cmds.floatFieldGrp(widgets[\"scaleFFG\"], q=True, v1=True)\n front = cmds.checkBoxGrp(widgets[\"frontCBG\"], q=True, v1=True)\n auto = cmds.checkBoxGrp(widgets[\"autoscaleCBG\"], q=True, v1=True)\n\n if (cmds.objExists(defName)):\n cmds.warning(\"An object of this name, {0}, already exists! Choose a new name!\".format(defName))\n return()\n\n# TODO - check that we've got only verts (or cv's?) selected - always do average?\n vertex = cmds.ls(sl=True, fl=True)\n if not vertex:\n cmds.warning(\"Must select at least one vertex\")\n return()\n\n # get objects - in no parent object (to parent rig to) then parent to the object itself\n obj = vertex[0].partition(\".\")[0]\n\n# TODO below is if we have a mesh . . . broaden to nurbs? or curve?\n if cmds.objectType(obj) == \"mesh\":\n obj = cmds.listRelatives(obj, p=True)[0]\n parentObject = cmds.textFieldButtonGrp(widgets[\"mainCtrlTFBG\"], q=True, tx=True)\n if not parentObject:\n parentObject = obj\n\n vertPos = rig.average_point_positions(vertex)\n\n if check and (not front):\n deformer_check(obj)\n\n cmds.select(obj)\n softMod = defName\n softModAll = cmds.softMod(relative=False, falloffCenter=vertPos, falloffRadius=5.0, n=softMod,\n frontOfChain=front)\n cmds.rename(softModAll[0], softMod)\n softModXform = cmds.listConnections(softModAll[0], type=\"transform\")[0]\n\n ctrlName = defName + \"_zeroed_GRP\"\n control = cmds.group(name=ctrlName, em=True)\n\n# TODO - make a little swatch to set the color of the control\n controlGrp = cmds.group(control, n=\"{0}_static_GRP\".format(control.rpartition(\"_\")[0]))\n cmds.xform(controlGrp, ws=True, t=vertPos)\n if wave:\n ctrlType = \"arrow\"\n else:\n ctrlType = \"cube\"\n topCtrl, topGrp = add_top_level_ctrl(control, ctrlType, cmds.listRelatives(obj, s=True)[0])\n\n# TODO figure out how to transpose the space for just the scale\n rig.connect_transforms(control, softModXform)\n\n cmds.addAttr(topCtrl, ln=\"__XTRA__\", at=\"enum\", k=True)\n cmds.setAttr(\"{0}.__XTRA__\".format(topCtrl), l=True)\n cmds.addAttr(topCtrl, ln=\"envelope\", at=\"float\", min=0, max=1, k=True, dv=1)\n cmds.addAttr(topCtrl, ln=\"falloff\", at=\"float\", min=0, max=100, k=True, dv=5)\n cmds.addAttr(topCtrl, ln=\"mode\", at=\"enum\", enumName= \"volume=0:surface=1\", k=True)\n\n # connect that attr to the softmod falloff radius\n cmds.connectAttr(\"{0}.envelope\".format(topCtrl), \"{0}.envelope\".format(softMod))\n cmds.connectAttr(\"{0}.falloff\".format(topCtrl), \"{0}.falloffRadius\".format(softMod))\n cmds.connectAttr(\"{0}.mode\".format(topCtrl), \"{0}.falloffMode\".format(softMod))\n cmds.setAttr(\"{0}.inheritsTransform\".format(softModXform), 0)\n cmds.setAttr(\"{0}.visibility\".format(softModXform), 0)\n\n if auto:\n calsz = rig.calibrate_size(obj, .15)\n if calsz:\n rig.scale_nurbs_control(topCtrl, calsz, calsz, calsz)\n cmds.setAttr(\"{0}.falloff\".format(topCtrl), 2*calsz)\n else:\n cmds.warning(\"I had an issue getting the calibrated scale of {0}\".format(obj))\n rig.scale_nurbs_control(topCtrl, scaleFactor, scaleFactor, scaleFactor)\n\n defGroup = cmds.group(empty=True, n=(defName + \"_deform_GRP\"))\n cmds.xform(defGroup, ws=True, t=vertPos)\n cmds.parent(softModXform, controlGrp, defGroup)\n\n# TODO - use the name of the deformer instead. . .\n masterGrp = cmds.group(name=\"{0}_mstr_GRP\".format(obj), em=True)\n cmds.parent(topGrp, defGroup, masterGrp)\n\n if wave:\n softWave(softMod, topCtrl, control)\n\n cmds.parent(masterGrp, parentObject)\n\n newName = rig.increment_name(defName)\n cmds.textFieldGrp(widgets[\"smdTFG\"], tx=newName, e=True)\n\n cmds.select(topCtrl)\n cmds.currentTime(currentTime)\n\n return (softMod, control, obj, defGroup)", "def J (self, n):", "def _calc_young_modulus(dataset: np.ndarray) -> YoungModulus:\n segment_x_length: int = len(dataset) // 10\n max_derivative_index = np.argmax([\n dataset[i + segment_x_length][1] - dataset[i][1]\n for i in range(len(dataset) - segment_x_length)\n ])\n first_point = dataset[max_derivative_index]\n second_point = dataset[max_derivative_index + segment_x_length]\n modulus = (second_point[1] - first_point[1]) / (second_point[0] - first_point[0])\n return YoungModulus(modulus, first_point, second_point)", "def aks( n ):\n\n def aks_mod( polynomial , r ):\n \"\"\"\n This function is used in aks.\n polynomial modulo ( x^r - 1 )\n \"\"\"\n aks_mod = polynomial.coefficients\n total = aks_mod[ : r ]\n aks_mod = aks_mod[ r : ]\n while len(aks_mod) - 1 >= r :\n for i in range(r):\n total[i] += aks_mod[i]\n aks_mod = aks_mod[ r : ]\n for i in range(len(aks_mod)):\n total[i] += aks_mod[i]\n return array_poly_mod( total , polynomial.mod )\n\n lg = math.log( n , 2 )\n k = int( lg * lg )\n\n if arith1.powerDetection( n )[ 1 ] != 1: #Power Detection\n print(\" n is not prime \")\n return False\n\n start = 3\n while 1:\n d = arith1.gcd.gcd( start , n )\n if 1 < d < n:\n print(\"n is not prime\")\n return False\n x = n % start\n N = x\n for i in range( 1 , k + 1 ):\n if N == 1:\n break\n N = ( N * x ) % start\n if i == k:\n r = start\n break\n start += 1\n d = arith1.gcd.gcd( r , n )\n if 1 < d < n:\n print(\" n is not prime \")\n return False\n if n <= r:\n print(\" n is prime \")\n return True\n\n e = multiplicative.euler( r ) #Cyclotomic Conguence\n e = math.sqrt( e )\n e = int( e * lg )\n for b in range( 1 , e+1 ):\n f = array_poly_mod( [ b , 1 ] , n )\n total = array_poly_mod( [ 1 ] , n )\n count = n\n while count > 0:\n if count & 1:\n total = total * f\n total = aks_mod( total , r )\n f = f.power()\n f = aks_mod( f , r )\n count = count >> 1\n total_poly = total.coefficients_to_dict()\n if total_poly != { 0 : b , n % r : 1 }:\n print(\" n is not prime \")\n return False\n print(\" n is prime \")\n return True", "def PolyMod(f, g):\n return f % g", "def YoungModulus(material):\n if material == \"mild\":\n return 200e9\n else:\n if material == \"al\":\n return 69e9\n else:\n raise ValueError(\"Invalid material `\"+material+\"'\")", "def simulate_modules(self):\n for discrete_mod in list(self.modcells.keys()):\n # print(discrete_mod)\n # print('in simulate_modules, iterating to ', discrete_mod)\n self.simulate_module(discrete_mod)", "def mortality(self):\n pass", "def inverse_mod(a, m):\r\n g, x, y = extended_greatest_common_denominator(a, m)\r\n if g != 1:\r\n raise Exception('modular inverse does not exist')\r\n else:\r\n return x % m", "def expMod(b,n,m):\r\n \"\"\"returns (b^n mod m)\"\"\"\r\n if n==0:\r\n return 1\r\n elif n%2==0:\r\n return expMod((b*b)%m, n/2, m)\r\n else:\r\n return(b*expMod(b,n-1,m))%m", "def pohlig_hellman(g: int, h: int, n: int):\n\n def group_of_prime_power_order(g, h, n=tuple):\n # n = (p, e) prime factor exponent times he appears\n p, e = n\n n = square_and_multiply(p, e)\n\n x = 0\n # By Lagrange's theorem, this element has order p.\n y = square_and_multiply(g, square_and_multiply(p, e - 1, n), n)\n\n for k in range(e):\n hk = square_and_multiply(\n square_and_multiply(g, -x, n) * h,\n square_and_multiply(p, e - 1 - k, n),\n n,\n )\n dk = pollard_rho(y, hk, n)\n x += dk * square_and_multiply(p, k, n)\n\n return x\n\n pFactors = findPrimeFactors(n, True)\n integers, modulis = [], []\n\n for p, e in pFactors.items():\n ni = square_and_multiply(p, e)\n gi = square_and_multiply(g, (n // ni), n)\n hi = square_and_multiply(h, (n // ni), n)\n\n xi = group_of_prime_power_order(gi, hi, (p, e))\n\n integers.append(xi)\n modulis.append(ni)\n\n return ChineseRemainder(integers, modulis)", "def test_relevance_with_itself():\n state = gen_state_cgpm(get_data_separated)\n assert np.allclose(state.relevance_probability(2, [2], 1), 1.0)", "def initial_shear_modulus(self):\n pass", "def test_get_modifier_distribution():\n counter = {}\n for i in range(999):\n modifier = mockdata.get_modifier(i)\n counter[modifier] = counter.get(modifier, 0) + 1\n assert counter[\"Modifier 1\"] == counter[\"Modifier 2\"]\n assert counter[\"Modifier 1\"] == counter[\"Modifier 3\"]", "def preProcess(self):\n\n for moduleName in self.module.keys():\n # find the one with the most votes per module:\n votes = 0\n winner = ''\n for voter in self.module[moduleName].keys():\n if self.module[moduleName][voter] > votes:\n votes = self.module[moduleName][voter]\n winner = voter\n self.module[moduleName] = winner\n\n # quick and dirty algorithm O(n^2). Can be done in O(n*lg(n))\n moduleLength = {}\n # find module lengths first\n for moduleName in self.module.keys():\n parts = moduleName.split('/')\n if len(parts) not in moduleLength:\n moduleLength[len(parts)] = []\n moduleLength[len(parts)].append(moduleName)\n lengths = moduleLength.keys()\n lengths.sort(reverse=True)\n\n for length in lengths:\n # FIXME: needs to be configurable.\n if length > 2:\n parents = {}\n for moduleName in self.module.keys():\n parts = moduleName.split('/')\n # group all parts of same length.\n if len(parts) == length:\n parent = moduleName.rsplit('/', 1)[0]\n if parent not in parents:\n parents[parent] = []\n parents[parent].append([moduleName, self.module[moduleName]])\n # check if all the children have the same developer as parent. If so remove the children.\n for parent in parents.keys():\n same = True\n parentDeveloper = self.module[parent]\n for moduleName, developer in parents[parent]:\n if developer != parentDeveloper:\n same = False\n if same:\n for moduleName, developer in parents[parent]:\n del self.module[moduleName]", "def mutate(self, module, operators, output):\n _, mutant_asts = self.generate_mutant_asts(module, operators)\n assert self.original_ast is not None\n\n mutated_modules = []\n mut_num = 0\n with timeblock('Time for generating modules'):\n for (mutant_ast, operator) in mutant_asts:\n module_name = \"mutant_\" + operator[1].name()+'_'+operator[0].__name__+'_'+ str(mut_num)\n mut_num += 1\n mutated_module = self.generate_mutant_module(mutant_ast, module_name)\n mutated_modules.append((mutated_module, mutant_ast, operator))\n\n if output:\n MuUtilities.output(self.original_ast, mutant_ast, module_name)\n return mutated_modules", "def __init__(self,\n numberAnts,\n numberFeatures,\n dictionaryName,\n dictionaryFolderHier='',\n numberCycles=50,\n decayRate=0.2,\n beta=1,\n initialPheromone=0.2,\n exploreExploitCoeff=0.7\n ):\n # Initialize posting tokens\n self.postingTokens = set()\n\n # Attempt to load dictionary\n self.dictionary = Dictionary(dictionaryName=dictionaryName, folderHierarchy=dictionaryFolderHier)\n self.dictExists = self.dictionary.loadFromDisk()\n\n # Load dictionary similarities\n if self.dictExists is True:\n # Load similarities\n self.dictionary.loadSimilarities()\n\n # Keep dictionary postings as a set\n self.postingTokens = set(self.dictionary.postings)\n\n # Set parameters for algorithm\n self.numberCycles = numberCycles\n self.numberAnts = numberAnts\n self.beta = beta\n self.initialPheromone = initialPheromone\n\n # Verify decay rate between 0 and 1\n if 0 <= decayRate <= 1:\n self.decayRate = decayRate\n else:\n self.decayRate = 0.2\n\n # Verify exploration/exploitation coefficient between 0 and 1\n if 0 <= exploreExploitCoeff <= 1:\n self.exploreExploitCoefficient = exploreExploitCoeff\n else:\n self.exploreExploitCoefficient = 0.7\n\n # Verify number of terms in dictionary to set selected number of features\n if self.dictionary.termCount > numberFeatures:\n self.numberFeatures = numberFeatures\n else:\n self.numberFeatures = self.dictionary.termCount\n\n # List of feature selection counter for each iteration\n self.featureCounterIteration = {}\n\n # Initialize feature counter variable for ants\n self.featureCounter = {}\n self.totalFeatureCounter = 0", "def g_n():\n for gname in os.listdir(sroot):\n if gname != 's1-league1-game1':\n continue\n if gname.startswith('s1'):\n p0 = os.path.join(sroot, gname)\n p1 = os.path.join(p0, 'commitment', 'jperret')\n p2 = os.path.join(p0, 'commitment', 'sa')\n if os.path.isdir(p1) and os.path.isdir(p2):\n for fname in os.listdir(p1):\n if fname.endswith('.aa'):\n bname = fname[:-3]\n #~ if bname == 's1-league1-game2_07':\n #~ continue\n a = ad.Annotations(os.path.join(p1, fname))\n a.load_text(os.path.join(p0, 'unannotated', bname+'.ac'))\n a.gen_full_struct()\n a.commitments = list(u for u in a.units if u.type == 'Commitment')\n a2 = ad.Annotations(os.path.join(p2, fname))\n a2.load_text(os.path.join(p0, 'unannotated', bname+'.ac'))\n a2.gen_full_struct()\n a2.commitments = list(u for u in a2.units if u.type == 'Commitment')\n yield bname, (a, a2)", "def __init__(self):\n self.modulo = Bn.from_decimal(\n \"104274339861599109435228713715012587636997755949475388588516377743858594829526246207815488124753620113654378182611410869843692693515483841382145633329409600605358434237971173658402530546783352648106247803514459454270482848535758539851532076708790494943517894654046363923325714750480680188239471613308156143136830981518627799499285672172738874571644891075726999700275877298890101149587792836886648258733566308895110719770960720300899066897289080371563621668124216187770149740826973622700315037066876583866156345639276386510201006397141393775575135928749962477326783336184434815042335057049432193006499521591281357491659\")\n self.generator = FFElement(Bn.from_decimal(\n \"81099144573950922883933823309397903831307729923277144841334749422315595743437219371821139976270089085817737914449263008752457618988770955139245864971428025146021819160336876692205993068777078938240475549226164124952577975303221660397947822711916352061614341728562734417872584743294922245761212731150483802964283263230741041446988298186702952974697967148198190463075071628059974486966250538161512056563568090071474143434146441589514816635339916481756264419884177841781745530245175458079612447970067897693825433138760936325168807521204548329680909932742314536162869895548442852131478295912996232046258690790851591666552\"),\n self.modulo, self.order())", "def modularity_spectrum(G):\n import scipy as sp\n\n if G.is_directed():\n return sp.linalg.eigvals(nx.directed_modularity_matrix(G))\n else:\n return sp.linalg.eigvals(nx.modularity_matrix(G))", "def bsgs(g: int, res: int, modulo: int):\n\n assert millerRabin(modulo)\n\n # https://en.wikipedia.org/wiki/Baby-step_giant-step\n\n from ressources.multGroup import inv\n\n m = integer_sqrt(modulo) + 1\n\n hashTable = {square_and_multiply(g, j, modulo): j for j in range(m)} # Baby-Step\n\n gm = square_and_multiply(g, m, modulo)\n invGm = inv(gm, modulo)\n\n # Initialization\n y = res\n\n # Search for an equivalence in the table - Giant-Step\n for i in range(m):\n\n if y in hashTable:\n return i * m + hashTable[y]\n\n y = (y * invGm) % modulo\n\n return -1", "def mmn(modul_number, modul_size, kmin, kmax, g, c, offset):\n check_unique = 0 # Checks if inter mod. con. are unique\n check_con = 0 # Checks if network is connected\n while check_unique != modul_number*c/2 or check_con != 1:\n inter_nodes = np.zeros((modul_number, c))\n network = gt.Graph(directed=False)\n # Constructs disconnected modules and combines them in a network\n # in the graph tool format.\n for i in range(modul_number):\n module_network, inter_nodes[i] = configuration_model(\n g, kmin, kmax,\n modul_size, c, offset)\n # Assigns the nodes to the corresponding module.\n inter_nodes[i] += i*modul_size\n network = gt.generation.graph_union(network, module_network)\n\n inter_nodes = np.transpose(inter_nodes)\n for row in inter_nodes:\n np.random.shuffle(row)\n\n inter_links = inter_nodes.ravel().reshape((int(modul_number*c/2), 2))\n check_unique = len(np.unique(inter_links, axis=0))\n network.add_edge_list(inter_links)\n\n _, check_con = gt.topology.label_components(network)\n check_con = len(check_con)\n \n return network", "def update_module_indexes(self, generation):\n self.species_module_index_map = {}\n\n if Config.blueprint_nodes_use_representatives:\n # For representatives species_module_index_map becomes: representative -> (species index, member index)\n for rep, module in self.species_module_ref_map.items():\n if module is None:\n continue\n\n for species_index, species in enumerate(generation.module_population.species):\n if module in species:\n self.species_module_index_map[rep] = \\\n (species_index, generation.module_population.species[species_index].members.index(module))\n break\n else:\n for spc_index, module in self.species_module_ref_map.items():\n if module is None:\n continue\n\n if spc_index < len(generation.module_population.species) and \\\n module in generation.module_population.species[spc_index]:\n\n self.species_module_index_map[spc_index] = \\\n generation.module_population.species[spc_index].members.index(module)\n\n elif Config.allow_cross_species_mappings:\n for new_species_index, species in enumerate(generation.module_population.species):\n if module in species:\n \"\"\"found module in new species\"\"\"\n self.species_module_index_map[spc_index] = \\\n (new_species_index,\n generation.module_population.species[new_species_index].members.index(module))\n break", "def bulk_modulus():\n\n return 10000.0", "def define_seq_modifications():\n modifications = {\n '0': {\n 'colour': 'k',\n 'name': 'unmodified',\n },\n '1': {\n 'colour': 'firebrick',\n 'name': 'succinylated',\n 'target_res': 'LYS',\n 'replace_res': 'GLU',\n }\n }\n\n return modifications", "def derive_mod_name(self):\n\n # a) if we're lucky, this is a Fomod install w/ a modname attr\n # TODO: some non-Fomod mods still include an \"info.xml\" file\n if self.has_fomod:\n fname = self.fomod.modname.name\n # fix: the fomod name often includes a version number on the end (like \"Soul Gem Things v1.4.5\")\n vmatch = _version_format.search(fname)\n if vmatch:\n fname = fname[:vmatch.start()].strip()\n\n print(\"fomod found:\")\n print(\" orig:\", self.fomod.modname.name)\n print(\" name:\", fname)\n\n # return self.fomod.modname.name\n return fname\n\n # if not, we'll have to get clever\n\n # b) if the mod includes esp/bsa/etc. files, they're often\n # labeled with the mod's \"real\" name\n bname = os.path.basename\n split = os.path.splitext\n\n # check top 2 levels\n # accumulate names\n _names = []\n ext_re = re.compile(r\".*\\.(es[pm]|bsa)$\")\n for f in filter(lambda s: ext_re.search(s.lower()),\n self.archive_files):\n # if re.search(r\".*\\.(es[pm]|bsa)$\", f.lower()):\n _names.append(split(bname(f))[0])\n\n print(f\"names from esp/bsa ({len(_names)}):\")\n for n in _names:\n print(f\" {n}\")\n\n # c) see if we can figure it out from the archive name;\n # try to ignore the version numbers\n archive_name = self.arc_path.stem\n\n # archives downloaded from the nexus generally have\n # the mod name, then a hyphen followed by the modid, then\n # (optionally) another hyphen and version info\n m = _nexus_archive_name_format.match(archive_name)\n\n if m:\n name = m['name']\n\n # TODO: if we can get the modid, we should be able to look up the mod info on the nexus...though that would of course require writing an async web-request module...\n modid = m['modid']\n ver = m['version']\n\n if name:\n # ==> eventually, this should pull the name from the nexus\n\n # sometimes there's some extra stuff like (redundant)\n # version info on the end of the name\n exm = _extra_stuff.search(name)\n if exm:\n name = name[:exm.start()]\n\n if ver:\n ver = ver.replace(\"-\", \".\")\n\n print(\"Derived from archive name:\")\n print(\" name:\", name)\n print(\" modid:\", modid)\n print(\" version:\", ver)\n return name\n\n return \"\"", "def advancedStats():", "def getLoadOrder(self,modNames,asTuple=True):\n data = self.data\n modNames = list(modNames) #--Don't do an in-place sort.\n modNames.sort()\n modNames.sort(key=lambda a: (a in data) and data[a].mtime) #--Sort on modified\n modNames.sort(key=lambda a: a[-1].lower()) #--Sort on esm/esp\n #--Match Bethesda's esm sort order\n # - Start with masters in chronological order.\n # - For each master, if it's masters (mm's) are not already in list, \n # then place them ahead of master... but in REVERSE order. E.g., last\n # grandmaster will be first to be added.\n def preMaster(modName,modDex):\n \"\"\"If necessary, move grandmasters in front of master -- but in \n reverse order.\"\"\"\n if self.data.has_key(modName):\n mmNames = list(self.data[modName].masterNames[:])\n mmNames.reverse()\n for mmName in mmNames:\n if mmName in modNames:\n mmDex = modNames.index(mmName)\n #--Move master in front and pre-master it too.\n if mmDex > modDex:\n del modNames[mmDex]\n modNames.insert(modDex,mmName)\n modDex = 1 + preMaster(mmName,modDex)\n return modDex\n #--Read through modNames.\n modDex = 1\n while modDex < len(modNames):\n modName = modNames[modDex]\n if modName[-1].lower() != 'm': break\n if self.circularMasters([modName]):\n modDex += 1\n else:\n modDex = 1 + preMaster(modName,modDex)\n #--Convert? and return\n if asTuple:\n return tuple(modNames)\n else:\n return modNames", "def g():", "def test_reiterative_leiden(self):\n edges = _get_edges()\n single_modularity, single_partitions = gpn.leiden(edges, seed=seed)\n\n repetitive_modularity, repetitive_partitions = gpn.leiden(edges, seed=seed, trials=10)\n self.assertTrue(single_modularity < repetitive_modularity)", "def greedy_modularity_communities(G, weight=None):\n\n # Count nodes and edges\n N = len(G.nodes())\n m = sum([d.get('weight', 1) for u, v, d in G.edges(data=True)])\n q0 = 1.0 / (2.0*m)\n\n # Map node labels to contiguous integers\n label_for_node = dict((i, v) for i, v in enumerate(G.nodes()))\n node_for_label = dict((label_for_node[i], i) for i in range(N))\n\n # Calculate degrees\n k_for_label = G.degree(G.nodes(), weight=weight)\n k = [k_for_label[label_for_node[i]] for i in range(N)]\n\n # Initialize community and merge lists\n communities = dict((i, frozenset([i])) for i in range(N))\n merges = []\n\n # Initial modularity\n partition = [[label_for_node[x] for x in c] for c in communities.values()]\n q_cnm = modularity(G, partition)\n\n # Initialize data structures\n # CNM Eq 8-9 (Eq 8 was missing a factor of 2 (from A_ij + A_ji)\n # a[i]: fraction of edges within community i\n # dq_dict[i][j]: dQ for merging community i, j\n # dq_heap[i][n] : (-dq, i, j) for communitiy i nth largest dQ\n # H[n]: (-dq, i, j) for community with nth largest max_j(dQ_ij)\n a = [k[i]*q0 for i in range(N)]\n dq_dict = dict(\n (i, dict(\n (j, 2*q0 - 2*k[i]*k[j]*q0*q0)\n for j in [\n node_for_label[u]\n for u in G.neighbors(label_for_node[i])]\n if j != i))\n for i in range(N))\n dq_heap = [\n MappedQueue([\n (-dq, i, j)\n for j, dq in dq_dict[i].items()])\n for i in range(N)]\n H = MappedQueue([\n dq_heap[i].h[0]\n for i in range(N)\n if len(dq_heap[i]) > 0])\n\n # Merge communities until we can't improve modularity\n while len(H) > 1:\n # Find best merge\n # Remove from heap of row maxes\n # Ties will be broken by choosing the pair with lowest min community id\n try:\n dq, i, j = H.pop()\n except IndexError:\n break\n dq = -dq\n # Remove best merge from row i heap\n dq_heap[i].pop()\n # Push new row max onto H\n if len(dq_heap[i]) > 0:\n H.push(dq_heap[i].h[0])\n # If this element was also at the root of row j, we need to remove the\n # duplicate entry from H\n if dq_heap[j].h[0] == (-dq, j, i):\n H.remove((-dq, j, i))\n # Remove best merge from row j heap\n dq_heap[j].remove((-dq, j, i))\n # Push new row max onto H\n if len(dq_heap[j]) > 0:\n H.push(dq_heap[j].h[0])\n else:\n # Duplicate wasn't in H, just remove from row j heap\n dq_heap[j].remove((-dq, j, i))\n # Stop when change is non-positive\n if dq <= 0:\n break\n\n # Perform merge\n communities[j] = frozenset(communities[i] | communities[j])\n del communities[i]\n merges.append((i, j, dq))\n # New modularity\n q_cnm += dq\n # Get list of communities connected to merged communities\n i_set = set(dq_dict[i].keys())\n j_set = set(dq_dict[j].keys())\n all_set = (i_set | j_set) - set([i, j])\n both_set = i_set & j_set\n # Merge i into j and update dQ\n for k in all_set:\n # Calculate new dq value\n if k in both_set:\n dq_jk = dq_dict[j][k] + dq_dict[i][k]\n elif k in j_set:\n dq_jk = dq_dict[j][k] - 2.0*a[i]*a[k]\n else:\n # k in i_set\n dq_jk = dq_dict[i][k] - 2.0*a[j]*a[k]\n # Update rows j and k\n for row, col in [(j, k), (k, j)]:\n # Save old value for finding heap index\n if k in j_set:\n d_old = (-dq_dict[row][col], row, col)\n else:\n d_old = None\n # Update dict for j,k only (i is removed below)\n dq_dict[row][col] = dq_jk\n # Save old max of per-row heap\n if len(dq_heap[row]) > 0:\n d_oldmax = dq_heap[row].h[0]\n else:\n d_oldmax = None\n # Add/update heaps\n d = (-dq_jk, row, col)\n if d_old is None:\n # We're creating a new nonzero element, add to heap\n dq_heap[row].push(d)\n else:\n # Update existing element in per-row heap\n dq_heap[row].update(d_old, d)\n # Update heap of row maxes if necessary\n if d_oldmax is None:\n # No entries previously in this row, push new max\n H.push(d)\n else:\n # We've updated an entry in this row, has the max changed?\n if dq_heap[row].h[0] != d_oldmax:\n H.update(d_oldmax, dq_heap[row].h[0])\n\n # Remove row/col i from matrix\n i_neighbors = dq_dict[i].keys()\n for k in i_neighbors:\n # Remove from dict\n dq_old = dq_dict[k][i]\n del dq_dict[k][i]\n # Remove from heaps if we haven't already\n if k != j:\n # Remove both row and column\n for row, col in [(k, i), (i, k)]:\n # Check if replaced dq is row max\n d_old = (-dq_old, row, col)\n if dq_heap[row].h[0] == d_old:\n # Update per-row heap and heap of row maxes\n dq_heap[row].remove(d_old)\n H.remove(d_old)\n # Update row max\n if len(dq_heap[row]) > 0:\n H.push(dq_heap[row].h[0])\n else:\n # Only update per-row heap\n dq_heap[row].remove(d_old)\n\n del dq_dict[i]\n # Mark row i as deleted, but keep placeholder\n dq_heap[i] = MappedQueue()\n # Merge i into j and update a\n a[j] += a[i]\n a[i] = 0\n\n communities = [\n frozenset([label_for_node[i] for i in c])\n for c in communities.values()]\n return sorted(communities, key=len, reverse=True)", "def test_shared_members(self):\r\n\r\n def populate_module(m,x):\r\n m.x=x\r\n m.lx=[x]\r\n m.llx=[[x],[x]]\r\n m.ltx=[(x,)]\r\n m.ldx=[{'x':x}]\r\n m.tx=(x,)\r\n m.tlx=([x],)\r\n m.ttx=((x,),)\r\n m.tdx=({'x':x},)\r\n m.dx={'x':x}\r\n m.dlx={'x':[x]}\r\n m.dtx={'x':(x,)}\r\n m.ddx={'x':{'x':x}}\r\n\r\n def get_element(i):\r\n return [i.x,i.lx[0],i.tx[0],i.dx['x'],i.llx[0][0], i.llx[1][0], i.ltx[0][0], i.ldx[0]['x'], i.tlx[0][0], i.tlx[0][0], i.tdx[0]['x'], i.dlx['x'][0], i.dtx['x'][0], i.ddx['x']['x']]\r\n\r\n m1=Module()\r\n m2=Module()\r\n x=T.dscalar()\r\n populate_module(m1,x)\r\n populate_module(m2,x)\r\n #m1.x and m2.x should not be shared as their is no hierarchi link between them.\r\n inst1=m1.make()\r\n inst2=m2.make()\r\n m1.m2=m2\r\n #m1.x and m2.x should be shared as their is a hierarchi link between them.\r\n inst3=m1.make()\r\n inst1.x=1\r\n inst2.x=2\r\n inst3.x=3\r\n for f in get_element(inst1):\r\n assert f==1\r\n for f in get_element(inst2):\r\n assert f==2\r\n for f in get_element(inst3)+get_element(inst3.m2):\r\n assert f==3\r\n\r\n inst3.m2.x=4\r\n for f in get_element(inst3)+get_element(inst3.m2):\r\n assert f==4", "def _mult_inverse(self, a, m):\n g, x, y = self._egcd(a, m)\n if g != 1:\n raise Exception('modular inverse does not exist')\n else:\n return x % m", "def _compute_nmig(mus_train, ys_train, active):\n print(\"start nmig\")\n score_dict = {}\n discretized_mus = utils.make_discretizer(mus_train)\n m = utils.discrete_mutual_info(discretized_mus, ys_train)\n # m shape: (10, nr_ground_truth)\n print(\"finished discretizing\")\n assert m.shape[0] == mus_train.shape[0]\n assert m.shape[1] == ys_train.shape[0]\n entropy = utils.discrete_entropy(ys_train)\n if active is not None:\n assert len(active) <= ys_train.shape[0]\n m = m[:, active]\n entropy = entropy[active]\n nr_lt = m.shape[0]\n nr_gt = m.shape[1]\n # m is [num_latents, num_factors]\n\n sorted_m = np.sort(m, axis=0)[::-1]\n individual_mig = np.divide(sorted_m[0, :] - sorted_m[1, :], entropy[:])\n print(\"ind mig\", individual_mig)\n mig = np.mean(individual_mig)\n\n if nr_gt == 1:\n nmig = np.max(np.divide(m, entropy[:]))\n else:\n m = np.divide(m, entropy[:])\n partials = np.zeros((nr_gt))\n best_ids = np.argmax(m, axis=0)\n for i in range(nr_gt):\n mask = np.ones((nr_gt), dtype=np.bool)\n mask[i] = 0\n best_id = best_ids[i]\n partials[i] = m[best_id, i] - np.max(m[best_id, mask])\n nmig = np.mean(partials)\n print(\"ind nmig\", partials)\n score_dict[\"discrete_mig\"] = mig\n score_dict[\"discrete_nmig\"] = nmig\n\n return score_dict", "def inverse_mod( a, m ):\r\n\r\n if a < 0 or m <= a: a = a % m\r\n\r\n # From Ferguson and Schneier, roughly:\r\n\r\n c, d = a, m\r\n uc, vc, ud, vd = 1, 0, 0, 1\r\n while c != 0:\r\n q, c, d = divmod( d, c ) + ( c, )\r\n uc, vc, ud, vd = ud - q*uc, vd - q*vc, uc, vc\r\n\r\n # At this point, d is the GCD, and ud*a+vd*m = d.\r\n # If d == 1, this means that ud is a inverse.\r\n\r\n assert d == 1\r\n if ud > 0: return ud\r\n else: return ud + m", "def DM(self):", "def generate_transitional_modifications(self, word: str = \"\") -> list: # noqa: C901\n possible_modifications = []\n\n # Create a list of possible initial words\n word_list = [word]\n\n # Replace Umlaute to handle things like \"H_ä_userschlucht\"\n # If the first letter is an Umlaut, it is not going to be changed\n if 'ä' in word and not word[0].lower() == 'ä':\n tmp_word = word.replace(u'ä', 'a')\n word_list.append(tmp_word)\n\n if 'ö' in word and not word[0].lower() == 'ö':\n tmp_word = word.replace(u'ö', 'o')\n word_list.append(tmp_word)\n\n if 'ü' in word and not word[0].lower() == 'ü':\n tmp_word = word.replace(u'ü', 'u')\n word_list.append(tmp_word)\n\n for word in word_list:\n # Consider the unmodified word lowered and capitalized as possible modifications\n possible_modifications.append(word.lower())\n possible_modifications.append(word.capitalize())\n\n \"\"\"\n M O D I F Y W O R D S\n Noun Rules\n \"\"\"\n # If not last letter is 's'\n # Remove s\n # Remove s, add e\n if word[-1:] == \"s\": #\n # action = [\"-s\"]\n # action2 = [\"-s\", \"+e\"]\n possible_modifications.append(word[:-1].lower())\n possible_modifications.append(word[:-1].lower() + \"e\")\n\n possible_modifications.append(word[:-1].capitalize())\n possible_modifications.append(word[:-1].capitalize() + \"e\")\n\n # If not last letter is 'e'\n # Add e\n if not word[-1:] == \"e\": # Kirch|turm (Kirch) -> (Kirche)\n # action = [\"+e\"]\n possible_modifications.append(word.lower() + \"e\")\n possible_modifications.append(word.capitalize() + \"e\")\n\n # If not last letter is 'n'\n # Add n\n if word[-1:] == \"n\": # Hasen|braten (Hasen) -> (Hase)\n # action = [\"-n\"]\n possible_modifications.append(word[:-1].lower())\n possible_modifications.append(word[:-1].capitalize())\n\n # If last letter IS 'e'\n # Remove e\n if word[-1:] == \"e\": # Hunde|hütte (Hunde) -> (Hund)\n # action = [\"-e\"]\n possible_modifications.append(word[:-1].lower())\n possible_modifications.append(word[:-1].capitalize())\n\n # If word ends on \"en\"\n # Remove \"en\"\n if word[-2:] == \"en\": # Taten|drang (Taten) -> (Tag)\n # action = [\"-en\"]\n possible_modifications.append(word[:-2].lower())\n possible_modifications.append(word[:-2].capitalize())\n\n # If word ends on \"er\"\n # Remove \"er\"\n if word[-2:] == \"er\": # Bücher|Regal (Bücher/Bucher) -> (Büch/Buch)\n # action = [\"-er\"]\n possible_modifications.append(word[:-2].lower())\n possible_modifications.append(word[:-2].capitalize())\n\n # If word ends on \"ns\"\n # Remove \"ns\"\n if word[-2:] == \"ns\": # Glaubens|frage (Glaubens) -> (Glaube)\n # action = [\"-ns\"]\n possible_modifications.append(word[:-2].lower())\n possible_modifications.append(word[:-2].capitalize())\n\n # If word ends on \"ens\"\n # Remove \"ens\"\n if word[-3:] == \"ens\": # Herzens|güte (Herzens) -> (Herz)\n # action = [\"-ens\"]\n possible_modifications.append(word[:-3].lower())\n possible_modifications.append(word[:-3].capitalize())\n\n # If ends on \"es\"\n # Remove \"es\"\n if word[-2:] == \"es\": # Kindes|wohl (Kindes) -> (Kind)\n # action = [\"-es\"]\n possible_modifications.append(word[:-2].lower())\n possible_modifications.append(word[:-2].capitalize())\n\n \"\"\"\n Verb Rules\n \"\"\"\n # If word does not end on \"en\" and not on \"e\"\n # Add -en\n if not word[-2:] == \"en\" and not word[-1:] == \"e\":\n # action = [\"+en\"]\n possible_modifications.append(word.lower() + \"en\")\n possible_modifications.append(word.capitalize() + \"en\")\n\n # If word ends on \"en\" PR word ends on \"em\"\n # Add -en, remove -e- in context of n, m)\n # This is totally different to the NOUN rule above\n if word[-2:] == \"en\" or word[-2:] == \"em\":\n # action = [\"+n\", \"+en\"]\n possible_modifications.append(word[:-2].lower() + word[-1:] + \"en\")\n possible_modifications.append(word[:-2].capitalize() + word[-1:] + \"en\")\n\n # If word does not end on \"n\"\n # Add -n\n if not word[-1:] == \"n\":\n # action = [\"+n\"]\n possible_modifications.append(word.lower() + \"n\")\n possible_modifications.append(word.capitalize() + \"n\")\n\n # modification is valid if:\n # - not in stopwords\n # - len > 2\n # - not in forbidden modifier list\n # - in lemma list\n\n possible_modifications = [w for w in possible_modifications if w.lower() not in self.stop_words\n and len(w) > 2\n and str(w) in self.lemma_data]\n\n return possible_modifications", "def tes_mod(self):\r\n x, y = ints('xy')\r\n fn = gof.DualLinker().accept(FunctionGraph([x,y], [x%y])).make_function()\r\n for a,b in ((0,1), (1,1), (0,-1), (1,-1), (-1,-1),\r\n (1,2), (-1,2), (1,-2), (-1,-2),\r\n (5,3), (-5,3), (5,-3), (-5,-3)\r\n ):\r\n self.assertTrue(fn(a,b) == a%b, (a,))", "def mutate_opacity(mutated_genome):\n seed = random.randint(0,2)\n if seed == 0:\n new_opacity(mutated_genome)\n elif seed == 1:\n change_opacity(mutated_genome)\n else: #seed == 2:\n switch_opacities(mutated_genome)\n #else: #seed == 3: # depricated\n # shuffle_opacities(mutated_genome)", "def regular(self):", "def mod_inverse(base, m):\n\n g, x, y = mod_inverse_iterative(base, m)\n if (g != 1):\n return None\n else:\n return (x % m)", "def mod_map(mods, plinkmap):\n modmap = {}\n for chrom in plinkmap:\n if chrom not in modmap:\n modmap[chrom] = []\n markers = plinkmap[chrom]\n modif = mods[chrom]\n for i, m in enumerate(modif):\n if m == 'I':\n p2 = float(markers[i+1][3])\n p1 = float(markers[i-1][3])\n pk = float(markers[i][3])\n g2 = float(markers[i+1][2])\n g1 = float(markers[i-1][2])\n d = (p2 - pk) / (p2 - p1)\n gu = g2 - d*(g2 - g1)\n if g2 == gu:\n gi = str(round((g2 + g1)/2, ndigits=2))\n else:\n gi = str(round(gu, ndigits=2))\n modmar = [markers[i][0], markers[i][1], gi, markers[i][3]]\n elif m == 'J':\n jgpos = marker[i][2] + '1'\n modmar = [markers[i][0], markers[i][1], jgpos, markers[i][3]]\n else:\n modmar = markers[i]\n modmap[chrom].append(modmar)\n return modmap", "def split_nomig_growth(params, ns):\n #7 parameters\t\n nu1, nuA0, nuA, nu2, nu3, T1, T2 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function for T1\n nuA_func = lambda t: nuA0 * (nuA/nuA0)**(t/T1) \n nu_T1_func = lambda t: [nu1, nuA_func(t)]\n fs.integrate(nu_T1_func, T1)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function for T2\n nu_T2 = [nu1, nu2, nu3]\n fs.integrate(nu_T2, T2) \n return fs", "def createInstanceHeader(pcol, path, originalFilename, nr_robots):\n\n needsWildcardExpansion = False\n\n with open(path, \"w\") as fout:\n fout.write(\"\"\"// vim:filetype=c\n/**\n * @file lulu_instance.h\n * @brief Lulu P colony simulator internal structure corresponding to the P colony defined in '%s'.\n * In this header we define the structure of the Pcolony that will power the simulated robot\n * This file was generated automatically by lulu_c.py on %s\n * @author Andrei G. Florea\n * @author Catalin Buiu\n * @date 2016-02-29\n */\n#ifndef LULU_INSTANCE_H\n#define LULU_INSTANCE_H\n\n#include \"lulu.h\" \"\"\" % (originalFilename, time.strftime(\"%d %h %Y at %H:%M\")))\n\n fout.write(\"\\nenum objects {\")\n # extend wildcard objects to _0, _1, ... _n where n = nr_robots\n for a in pcol.A[:]:\n # both $ and $id wildcards need extended objects\n if (\"_W_ALL\" in a or \"_W_ID\" in a):\n needsWildcardExpansion = True\n logging.debug(\"Extending %s wildcarded object\" % a)\n # construct extended object list\n extension = [a.replace(\"W_ID\", \"%d\" % i).replace(\"W_ALL\", \"%d\" % i) for i in range(nr_robots)]\n # if this extension has not been previously added\n if (not set(extension).issubset(set(pcol.A))):\n #add the extetendet object list to the alphabet\n pcol.A.extend(extension)\n\n # sort objects naturally\n pcol.A = natsort.natsorted(pcol.A, key=lambda x: x.replace('_W_ID', '/').replace('_W_ALL', '.'))\n for i, obj in enumerate(pcol.A):\n if (obj in ['e', 'f']):\n continue; # they are already defined in lulu.h\n if (i == 0):\n # NO_OBJECT = 0, OBJECT_ID_E = 1, OBJECT_ID_F = 2\n fout.write(\"\\n OBJECT_ID_%s = 3,\" % obj.upper());\n else:\n fout.write(\"\\n OBJECT_ID_%s,\" % obj.upper());\n\n fout.write(\"\\n};\")\n\n fout.write(\"\\n\\nenum agents {\")\n for i, agent_name in enumerate(pcol.B):\n fout.write(\"\\n AGENT_%s,\" % agent_name.upper());\n\n fout.write(\"\\n};\")\n\n if (needsWildcardExpansion):\n fout.write(\"\"\"\\n#define NEEDING_WILDCARD_EXPANSION //this ensures that the wildcard expansion code is included\"\"\")\n\n if (\"motion\" in pcol.B):\n fout.write(\"\\n#define USING_AGENT_MOTION //this ensures that the code associated with the MOTION agent is included in Lulu_kilobot\")\n if (\"led_rgb\" in pcol.B):\n fout.write(\"\\n#define USING_AGENT_LED_RGB //this ensures that the code associated with the LED_RGB agent is included in Lulu_kilobot\")\n if (\"msg_distance\" in pcol.B):\n fout.write(\"\\n#define USING_AGENT_MSG_DISTANCE //this ensures that the code associated with the MSG_DISTANCE agent is included in Lulu_kilobot\")\n if (\"timer\" in pcol.B):\n fout.write(\"\\n#define USING_AGENT_TIMER //this ensures that the code associated with the TIMER agent is included in Lulu_kilobot\")\n\n fout.write(\"\\n\")\n if (\"d_all\" in pcol.A):\n fout.write(\"\"\"\\n#define USING_OBJECT_D_ALL //this ensures that the code associated with processing D_ALL objects is included in Lulu_kilobot\"\"\")\n if (\"d_next\" in pcol.A):\n fout.write(\"\"\"\\n#define USING_OBJECT_D_NEXT //this ensures that the code associated with processing D_NEXT objects is included in Lulu_kilobot\"\"\")\n\n # check if using {IN,OUT}_EXTEROCEPTIVE rules (<I=> or <=O>)\n using_in_out_exteroceptive_rules = False\n for agent in pcol.agents.values():\n for program in agent.programs:\n for rule in program:\n if (rule.type == sim.RuleType.in_exteroceptive or rule.type == sim.RuleType.out_exteroceptive or\n rule.alt_type == sim.RuleType.in_exteroceptive or rule.alt_type == sim.RuleType.out_exteroceptive):\n using_in_out_exteroceptive_rules = True\n break;\n if (using_in_out_exteroceptive_rules):\n fout.write(\"\"\"\\n#define USING_IN_OUT_EXTEROCEPTIVE_RULES //this ensures that the code associated with processing IN_EXTEROCEPTIVE (<I=>) or OUT_EXTEROCEPTIVE (<=O>) rules is included in Lulu_kilobot\"\"\")\n\n fout.write(\"\"\"\\n\\n//if building Pcolony simulator for PC\n#ifdef PCOL_SIM\n //define array of names for objects and agents for debug\n extern char* objectNames[];\n extern char* agentNames[];\n#endif\n\n/**\n * @brief The smallest kilo_uid from the swarm (is set in instance.c by lulu_c.py)\n */\nextern const uint16_t smallest_robot_uid;\n\n/**\n * @brief The number of robots that make up the swarm (is set in instance.c by lulu_c.py)\n */\nextern const uint16_t nr_swarm_robots;\"\"\");\n\n fout.write(\"\"\"\\n\\n/**\n * @brief Initialises the pcol object and all of it's components\n *\n * @param pcol The P colony that will be initialized\n */\nvoid lulu_init(Pcolony_t *pcol);\n\n/**\n * @brief Destroys the pcol objects and all of it's components\n *\n * @param pcol The P colony that will be destroyed\n */\nvoid lulu_destroy(Pcolony_t *pcol);\n\n#ifdef NEEDING_WILDCARD_EXPANSION\n /**\n * @brief Expands and replaces wildcarded objects with the appropriate objects\n * Objects that end with _W_ID are replaced with _i where i is the the id of the robot, provided with my_id parameter\n *\n * @param pcol The pcolony where the replacements will take place\n * @param my_id The kilo_uid of the robot\n * @return The symbolic id that corresponds to this robot (my_id - smallest_robot_uid)\n */\n uint16_t expand_pcolony(Pcolony_t *pcol, uint16_t my_id);\n#endif\n#endif\"\"\")", "def do_one_mod(self, names: List[str], infer: bool, exec_: bool, conf: dict):\n\n p = lambda: Progress(\n TextColumn(\"[progress.description]{task.description}\", justify=\"right\"),\n BarColumn(bar_width=None),\n \"[progress.percentage]{task.percentage:>3.1f}%\",\n \"[progress.completed]{task.completed} / {task.total}\",\n TimeElapsedColumn(),\n )\n # step one collect all the modules instances we want to analyse.\n\n modules = []\n for name in names:\n x, *r = name.split(\".\")\n n0 = __import__(name)\n for sub in r:\n n0 = getattr(n0, sub)\n modules.append(n0)\n\n # step 2 try to guess the version number from the top module.\n version = getattr(modules[0], \"__version__\", \"???\")\n\n root = names[0].split(\".\")[0]\n module_conf = conf.get(root, {})\n examples_folder = module_conf.get(\"examples_folder\", None)\n print(\"EF\", examples_folder)\n if examples_folder is not None:\n examples_folder = Path(examples_folder).expanduser()\n examples_data = self.collect_examples(examples_folder)\n for edoc, figs in examples_data:\n self.examples.update(\n {k: json.dumps(v.to_json()) for k, v in edoc.items()}\n )\n for name, data in figs:\n print(\"put one fig\", name)\n self.put_raw(name, data)\n print(\"Configuration:\", json.dumps(module_conf, indent=2))\n self.root = root\n self.version = version\n subs = module_conf.get(\"submodules\", [])\n extra_from_conf = [root + \".\" + s for s in subs]\n for name in extra_from_conf:\n x, *r = name.split(\".\")\n n0 = __import__(name)\n for sub in r:\n n0 = getattr(n0, sub)\n modules.append(n0)\n\n # print(modules)\n\n collector = DFSCollector(modules[0], modules[1:])\n collected: Dict[str, Any] = collector.items()\n\n # collect all items we want to document.\n for qa, item in collected.items():\n if (nqa := full_qual(item)) != qa:\n print(\"after import qa differs : {qa} -> {nqa}\")\n if collected[nqa] == item:\n print(\"present twice\")\n del collected[nqa]\n else:\n print(\"differs: {item} != {other}\")\n\n for target in module_conf.get(\"exclude\", []):\n print(\"exclude tgt:\", target)\n del collected[target]\n # p = nullcontext\n with p() as p2:\n\n # just nice display of progression.\n taskp = p2.add_task(description=\"parsing\", total=len(collected))\n\n for qa, target_item in collected.items():\n short_description = (qa[:19] + \"..\") if len(qa) > 21 else qa\n p2.update(taskp, description=short_description.ljust(17))\n p2.advance(taskp)\n item_docstring = target_item.__doc__\n\n # TODO: we may not want tosip items as they may have children\n # right now keep modules, but we may want to keep classes if\n # they have documented descendants.\n\n if item_docstring is None and not isinstance(target_item, ModuleType):\n continue\n elif item_docstring is None and isinstance(target_item, ModuleType):\n item_docstring = \"\"\"This module has no documentation\"\"\"\n\n # progress.console.print(qa)\n try:\n if tsparse is None:\n print(\n \"please see how to install Tree-sitter in the readme to parse complex RST documents\"\n )\n arbitrary = tsparse(dedent_but_first(item_docstring).encode())\n except Exception as e:\n print(f\"TS could not parse: {qa}\")\n raise ValueError(f\"from {qa}\") from e\n arbitrary = []\n # raise\n try:\n ndoc = NumpyDocString(dedent_but_first(item_docstring))\n except Exception:\n if not isinstance(target_item, ModuleType):\n p2.console.print(\n \"Unexpected error parsing\",\n target_item,\n target_item.__name__,\n )\n if isinstance(target_item, ModuleType):\n # from .take2 import main\n # main(item_docstring)\n ndoc = NumpyDocString(\n f\"Was not able to parse docstring for {qa}\"\n )\n else:\n continue\n if not isinstance(target_item, ModuleType):\n arbitrary = []\n execute_exclude_patterns = module_conf.get(\n \"execute_exclude_patterns\", None\n )\n ex = exec_\n if execute_exclude_patterns and exec_:\n for pat in execute_exclude_patterns:\n if qa.startswith(pat):\n ex = False\n break\n # else:\n # print(\"will run\", qa)\n\n try:\n doc_blob, figs = self.do_one_item(\n target_item, ndoc, infer, ex, qa, config=module_conf\n )\n doc_blob.arbitrary = arbitrary\n except Exception:\n raise\n if module_conf.get(\"exec_failure\", None) == \"fallback\":\n print(\"Re-analysing \", qa, \"without execution\")\n # debug:\n doc_blob, figs = self.do_one_item(\n target_item, ndoc, infer, False, qa, config=module_conf\n )\n doc_blob.aliases = collector.aliases[qa]\n\n # processing....\n doc_blob.signature = doc_blob.content.pop(\"Signature\")\n try:\n for section in [\"Extended Summary\", \"Summary\", \"Notes\", \"Warnings\"]:\n if section in doc_blob.content:\n if data := doc_blob.content[section]:\n PX = P2(data)\n doc_blob.content[section] = Section(PX)\n else:\n doc_blob.content[section] = Section()\n except Exception as e:\n raise type(e)(f\"during {qa}\")\n\n doc_blob.references = doc_blob.content.pop(\"References\")\n if isinstance(doc_blob.references, str):\n if doc_blob.references == \"\":\n doc_blob.references = None\n else:\n assert False\n doc_blob.references = list(doc_blob.references)\n assert (\n isinstance(doc_blob.references, list) or doc_blob.references is None\n )\n del doc_blob.content[\"Examples\"]\n del doc_blob.content[\"index\"]\n sections_ = [\n \"Parameters\",\n \"Returns\",\n \"Raises\",\n \"Yields\",\n \"Attributes\",\n \"Other Parameters\",\n \"Warns\",\n ##\"Warnings\",\n \"Methods\",\n # \"Summary\",\n \"Receives\",\n ]\n from .take2 import Param\n\n # new_doc_blob._content[\"Parameters\"] = [\n # Parameter(a, b, c)\n # for (a, b, c) in new_doc_blob._content.get(\"Parameters\", [])\n # ]\n\n for s in sections_:\n if s in doc_blob.content:\n assert isinstance(\n doc_blob.content[s], list\n ), f\"{s}, {doc_blob.content[s]} \"\n new_content = Section()\n for param, type_, desc in doc_blob.content[s]:\n assert isinstance(desc, list)\n items = []\n if desc:\n items = P2(desc)\n new_content.append(Param(param, type_, items))\n doc_blob.content[s] = new_content\n\n doc_blob.see_also = []\n if see_also := doc_blob.content.get(\"See Also\", None):\n for nts, d0 in see_also:\n try:\n d = d0\n for (name, type_or_description) in nts:\n if type_or_description and not d:\n desc = type_or_description\n if isinstance(desc, str):\n desc = [desc]\n assert isinstance(desc, list)\n desc = paragraphs(desc)\n type_ = None\n else:\n desc = d0\n type_ = type_or_description\n assert isinstance(desc, list)\n desc = paragraphs(desc)\n\n sai = SeeAlsoItem(Ref(name, None, None), desc, type_)\n doc_blob.see_also.append(sai)\n del desc\n del type_\n except Exception as e:\n raise ValueError(\n f\"Error {qa}: {see_also=} | {nts=} | {d0=}\"\n ) from e\n del doc_blob.content[\"See Also\"]\n\n for k, v in doc_blob.content.items():\n assert isinstance(v, Section), f\"{k} is not a section {v}\"\n # end processing\n\n self.put(qa, json.dumps(doc_blob.to_json(), indent=2))\n for name, data in figs:\n self.put_raw(name, data)\n\n found = {}\n not_found = []\n for k, v in collector.aliases.items():\n if [item for item in v if item != k]:\n if shorter := find_cannonical(k, v):\n found[k] = shorter\n else:\n not_found.append((k, v))\n\n if logo := module_conf.get(\"logo\", None):\n self.put_raw(\"logo.png\", Path(logo).read_bytes())\n self.metadata = {\n \"version\": version,\n \"logo\": \"logo.png\",\n \"aliases\": found,\n \"module\": root,\n }", "def modality(v):\n \n s = st.skew(vel, bias=False)\n k = st.kurtosis(vel, bias=False)\n m = (1+s**2)/(3+k**2)\n return s, k, m", "def update(self, state, action, nextState, reward):\n \"\"\"Description:\n Use second equation in slide 71 of MDP\n Adjest weight of active features depend on tranistion \n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n feat = self.featExtractor.getFeatures(state, action)\n\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n maxQns = self.getValue(nextState)\n if maxQns == None:\n maxQns = 0\n Qsa = self.getQValue(state, action)\n difference = ( reward + self.discountRate * maxQns ) - Qsa\n \n for key in self.weight.keys():\n self.weight[key] += (self.alpha * difference * feat[key])\n \n \n \"\"\" END CODE \"\"\"", "def pulp_smash():", "def mezclar_bolsa(self):", "def mutate_monster(generikmon):\r\n childmon = generikmon.copy()\r\n for ikey, ival in childmon.items():\r\n childmon[ikey] = childmon[ikey] * (1 + randint(-10, 10)/100)\r\n # ival not used in this case\r\n return childmon", "def __init__(self, n): # this is equivalent to starting a random one\n self.n = n\n # From table S1 in the supplemental materials\n # each c parameters is [body,limb]\n self.cv0 = [0.3, 0.0]\n self.cv1 = [0.2, 0.2]\n self.cR0 = [0.196,0.131]\n self.cR1 = [0.065,0.131]\n #[[dbodylow,dbodyhigh],[dlimblow,dlimbhigh]]\n self.d_params = [[1,5],[1,3]]\n # which oscillators are limb oscillators and which ones are body oscillators is pretty constant\n n_body = n - 4\n self.osc_class = [0 if i < n_body else 1 for i in range(self.n)] # 0 for body oscillator, 1 for limb oscillator\n # list of keys that can be mutated during evolution\n self.evolvables = ['w', 'phi', 'a', 'gsl', 'gsh', 'gb1', 'gb2', 'theta', 'ampl', 'ampl_dot']\n self.scalars = set(['gsl', 'gsh', 'gb1', 'gb2'])\n self.nonzeros = set([int(i) for i in \"8 160 29 181 50 202 71 223 92 244 113 265 134 286 155 307 1 20 22 41 43 62 64 83 85 104 106 125 127 146 169 188 190 209 211 230 232 251 253 272 274 293 295 314 320 321 322 323 364 365 366 367 348 349 350 351 392 393 394 395 338 376 337 356 359 397 379 398\".split(\" \")])\n self.shapes = {'w':n*n,\n 'phi':n*n,\n 'a':n,\n 'theta':n,\n 'ampl':n,\n 'ampl_dot':n}\n self.sizes = {'w':n*n,\n 'phi':n*n,\n 'a':n,\n 'theta':n,\n 'ampl':n,\n 'ampl_dot':n}" ]
[ "0.6405175", "0.58857876", "0.58554703", "0.5850018", "0.58353317", "0.5766119", "0.5741885", "0.54117924", "0.53281623", "0.5319941", "0.5288762", "0.5186005", "0.516735", "0.515262", "0.5149235", "0.51382715", "0.51001173", "0.50766313", "0.50506675", "0.50342005", "0.50274074", "0.5018293", "0.5011716", "0.49999815", "0.49743754", "0.49681878", "0.49676415", "0.49631327", "0.49228647", "0.49207047", "0.49055332", "0.4899006", "0.4884149", "0.48721015", "0.48545527", "0.4851883", "0.48508888", "0.48455113", "0.48184037", "0.48182175", "0.48165268", "0.4811617", "0.4809831", "0.48050812", "0.48042902", "0.4801807", "0.47870138", "0.47829577", "0.47817725", "0.47739866", "0.47739428", "0.47737706", "0.47710088", "0.47692427", "0.47584313", "0.47468156", "0.474391", "0.47360566", "0.47356096", "0.4731327", "0.4726984", "0.4716338", "0.47097778", "0.4707593", "0.47048342", "0.47021344", "0.47017106", "0.4693963", "0.4688144", "0.46871534", "0.46831745", "0.4678884", "0.46775237", "0.4677234", "0.4674338", "0.46667165", "0.466459", "0.46581733", "0.465754", "0.465445", "0.46529323", "0.4652776", "0.46508378", "0.46411034", "0.46400824", "0.46394533", "0.46344835", "0.46321532", "0.4626335", "0.46224022", "0.4622157", "0.4621759", "0.46193546", "0.46185014", "0.46167684", "0.461506", "0.46005508", "0.45911857", "0.45909896", "0.45823961" ]
0.4608002
96
This was my first implementation of modularity using Eric's definition in his paper.
def get_modularity_other_a(A, cluster_indices): # define the number of nodes in the graph and the number of clusters n = len(cluster_indices) nclusters = max(cluster_indices) + 1 # initialize some intermediate variables within_cluster = [0] * nclusters between_cluster = [0] * nclusters volume = 0 # calculate the intermediate variables # i and j are node indices # a and b are cluster indices for i in range(n-1): a = cluster_indices[i] for j in range(i+1, n): b = cluster_indices[j] weight = A[i][j] volume += weight if a == b: within_cluster[a] += weight else: between_cluster[a] += weight between_cluster[b] += weight # get the modularity from the intermediate variables modularity = 0 for within, between in zip(within_cluster, between_cluster): modularity += within/volume - ((within+between) / volume)**2 return modularity
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modules():", "def test_versioned_symbols(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n\n def forward(self, a, b, alpha: float):\n no_alpha = torch._test_serialization_subcmul(a, b)\n with_alpha = torch._test_serialization_subcmul(a, b, alpha)\n return no_alpha, with_alpha\n\n def historic_subcmul(a, b, alpha=2):\n return b - alpha * a\n\n def current_subcmul(a, b, alpha=1):\n return a - alpha * b\n\n # Loads and verifies the historic behavior of the module\n # that was serialized with version 2\n module_v2 = torch.jit.load(pytorch_test_dir + \"/jit/fixtures/_test_serialization_subcmul_v2.pt\")\n a = torch.randn((5,))\n b = torch.randn((5,))\n alpha = random.random()\n args = (a, b, alpha)\n no_alpha_v2, with_alpha_v2 = module_v2(*args)\n self.assertEqual(no_alpha_v2, historic_subcmul(a, b))\n self.assertEqual(with_alpha_v2, historic_subcmul(*args))\n\n # Scripts, saves, loads and verifies the current behavior of the module\n scripted_module = torch.jit.script(MyModule())\n buffer = io.BytesIO()\n torch.jit.save(scripted_module, buffer)\n buffer.seek(0)\n module_current = torch.jit.load(buffer)\n no_alpha_current, with_alpha_current = module_current(*args)\n self.assertEqual(no_alpha_current, current_subcmul(a, b))\n self.assertEqual(with_alpha_current, current_subcmul(*args))", "def main():\n modtype = input( \"test iterative (i) or recursive (r) module: \" )\n if modtype.lower().strip() == \"i\":\n import myListIter \n listmodule = myListIter\n print( 'iter' )\n elif modtype.lower().strip() == \"r\":\n import myListRec \n listmodule = myListRec\n print( 'rec' )\n else:\n print( \"Please enter 'i' or 'r' to test iterative/recursive library.\" )\n return\n testAppendAndToString( listmodule )\n testClear( listmodule )\n testInsert( listmodule )\n testGet( listmodule )\n testSet( listmodule )\n testPop( listmodule )\n testIndex( listmodule )\n testCursor( listmodule ) \n\n #testClone( listmodule )\n #testExtend( listmodule )\n testRemove( listmodule )\n testCount( listmodule )\n testPyListToMyList( listmodule )\n testMyListToPyList( listmodule )\n print()", "def modulename():\n from inspect import getmodulename,getfile\n return getmodulename(getfile(lambda x:x))", "def findModule(name):", "def require():", "def __init__(self, name):\r\n super(Module, self).__init__()\r\n self.name = name", "def calculate_modularity(modules, degree_table, edges, num_of_edges):\n modularity = 0.0\n for module in modules:\n modularity += calculate_q(module, degree_table, edges, num_of_edges)\n\n return modularity / (2.0 * num_of_edges)", "def require(name):", "def factory(module, name):\n\n class DummyClass(object):\n \"\"\"\n _DummyClass_\n Dummy class to return when a cms class cannot be imported \n\n \"\"\"\n def __init__(self, module, name='', *args, **kwargs):\n self.__module = module\n self.__name = name\n self.__d = dict()\n\n def __setitem__(self, key, value):\n self.__d[key] = value\n\n def __getitem__(self, item):\n return self.__d[item]\n\n def __call__(self, *args, **kwargs):\n pass\n\n def __repr__(self):\n return \"{module}.{name}\".format(module=self.__module, name=self.__name)\n\n return DummyClass", "def test_mod():\r\n x, y = fscalars('xy')\r\n fn = gof.DualLinker().accept(\r\n gof.FunctionGraph([x, y], [x % y])).make_function()\r\n for a, b in ((0, 1), (1, 1), (0, -1), (1, -1), (-1, -1),\r\n (1, 2), (-1, 2), (1, -2), (-1, -2),\r\n (5, 3), (-5, 3), (5, -3), (-5, -3)\r\n ):\r\n assert fn(a, b) == a % b, (a,)", "def public(f):\n all = sys.modules[f.__module__].__dict__.setdefault('__all__', [])\n if f.__name__ not in all: # Prevent duplicates if run from an IDE.\n all.append(f.__name__)\n return f", "def test_get_module_name_alternate(self):\n target = Mock(['__name__'])\n target.__name__ = 'hello'\n self.assertEqual('hello', reloading.get_module_name(target))", "def test_shared_method(self):\r\n\r\n m1=Module()\r\n m1.x=T.dscalar()\r\n x=T.dscalar()\r\n fy=Method(x,x*2)\r\n fz=Method([],m1.x*2)\r\n m1.y=fy\r\n m1.z=fz\r\n m1.ly=[fy]\r\n m1.lz=[fz]\r\n m1.lly=[[fy]]\r\n m1.llz=[[fz]]\r\n m1.ty=(fy,)\r\n m1.tz=(fz,)\r\n m1.tty=((fy,),)\r\n m1.ttz=((fz,),)\r\n m1.dy={'y':fy}\r\n m1.dz={'z':fz}\r\n\r\n inst=m1.make()\r\n inst.x=1\r\n assert inst.y(2)==4\r\n assert inst.z()==2\r\n assert inst.ly[0](2)==4\r\n assert inst.lz[0]()==2\r\n assert inst.ty[0](2)==4\r\n assert inst.tz[0]()==2\r\n assert inst.dy['y'](2)==4\r\n assert inst.dz['z']()==2\r\n assert inst.lly[0][0](2)==4\r\n assert inst.llz[0][0]()==2\r\n assert inst.tty[0][0](2)==4\r\n assert inst.ttz[0][0]()==2\r\n assert isinstance(inst.z,theano.compile.function_module.Function)\r\n assert isinstance(inst.lz[0],theano.compile.function_module.Function)\r\n assert isinstance(inst.llz[0][0],theano.compile.function_module.Function)\r\n assert isinstance(inst.tz[0],theano.compile.function_module.Function)\r\n assert isinstance(inst.dz['z'],theano.compile.function_module.Function)\r\n assert isinstance(inst.ttz[0][0],theano.compile.function_module.Function)\r\n assert isinstance(inst.y,theano.compile.function_module.Function)\r\n assert isinstance(inst.ly[0],theano.compile.function_module.Function)\r\n assert isinstance(inst.lly[0][0],theano.compile.function_module.Function)\r\n assert isinstance(inst.ty[0],theano.compile.function_module.Function)\r\n assert isinstance(inst.dy['y'],theano.compile.function_module.Function)\r\n assert isinstance(inst.tty[0][0],theano.compile.function_module.Function)\r\n\r\n\r\n assert m1.y is m1.ly[0]\r\n assert inst.y is inst.ly[0]\r\n assert inst.y is inst.lly[0][0]\r\n assert inst.y is inst.ty[0]\r\n assert inst.y is inst.tty[0][0]\r\n assert inst.y is inst.dy['y']", "def derive_mod_name(self):\n\n # a) if we're lucky, this is a Fomod install w/ a modname attr\n # TODO: some non-Fomod mods still include an \"info.xml\" file\n if self.has_fomod:\n fname = self.fomod.modname.name\n # fix: the fomod name often includes a version number on the end (like \"Soul Gem Things v1.4.5\")\n vmatch = _version_format.search(fname)\n if vmatch:\n fname = fname[:vmatch.start()].strip()\n\n print(\"fomod found:\")\n print(\" orig:\", self.fomod.modname.name)\n print(\" name:\", fname)\n\n # return self.fomod.modname.name\n return fname\n\n # if not, we'll have to get clever\n\n # b) if the mod includes esp/bsa/etc. files, they're often\n # labeled with the mod's \"real\" name\n bname = os.path.basename\n split = os.path.splitext\n\n # check top 2 levels\n # accumulate names\n _names = []\n ext_re = re.compile(r\".*\\.(es[pm]|bsa)$\")\n for f in filter(lambda s: ext_re.search(s.lower()),\n self.archive_files):\n # if re.search(r\".*\\.(es[pm]|bsa)$\", f.lower()):\n _names.append(split(bname(f))[0])\n\n print(f\"names from esp/bsa ({len(_names)}):\")\n for n in _names:\n print(f\" {n}\")\n\n # c) see if we can figure it out from the archive name;\n # try to ignore the version numbers\n archive_name = self.arc_path.stem\n\n # archives downloaded from the nexus generally have\n # the mod name, then a hyphen followed by the modid, then\n # (optionally) another hyphen and version info\n m = _nexus_archive_name_format.match(archive_name)\n\n if m:\n name = m['name']\n\n # TODO: if we can get the modid, we should be able to look up the mod info on the nexus...though that would of course require writing an async web-request module...\n modid = m['modid']\n ver = m['version']\n\n if name:\n # ==> eventually, this should pull the name from the nexus\n\n # sometimes there's some extra stuff like (redundant)\n # version info on the end of the name\n exm = _extra_stuff.search(name)\n if exm:\n name = name[:exm.start()]\n\n if ver:\n ver = ver.replace(\"-\", \".\")\n\n print(\"Derived from archive name:\")\n print(\" name:\", name)\n print(\" modid:\", modid)\n print(\" version:\", ver)\n return name\n\n return \"\"", "def _process_modulename(self,modulename):\n\t\tif modulename:\n\t\t\tif modulename in modules.keys():\n\t\t\t\traise NameError(\"Module name has already been used in this instance of Python.\")\n\t\t\tself._modulename = modulename\n\t\telse:\n\t\t\twhile self._modulename in modules.keys():\n\t\t\t\tself._modulename = count_up(self._modulename)\n\t\t\n\t\tmodulefile = self._tmpfile(self._modulename + \".so\")\n\t\tif path.isfile(modulefile):\n\t\t\traise OSError(\"Module file already exists.\")", "def MODULES(self):\n pass", "def calc_pos_mod(nmodule):\n pass", "def test_shared_members(self):\r\n\r\n def populate_module(m,x):\r\n m.x=x\r\n m.lx=[x]\r\n m.llx=[[x],[x]]\r\n m.ltx=[(x,)]\r\n m.ldx=[{'x':x}]\r\n m.tx=(x,)\r\n m.tlx=([x],)\r\n m.ttx=((x,),)\r\n m.tdx=({'x':x},)\r\n m.dx={'x':x}\r\n m.dlx={'x':[x]}\r\n m.dtx={'x':(x,)}\r\n m.ddx={'x':{'x':x}}\r\n\r\n def get_element(i):\r\n return [i.x,i.lx[0],i.tx[0],i.dx['x'],i.llx[0][0], i.llx[1][0], i.ltx[0][0], i.ldx[0]['x'], i.tlx[0][0], i.tlx[0][0], i.tdx[0]['x'], i.dlx['x'][0], i.dtx['x'][0], i.ddx['x']['x']]\r\n\r\n m1=Module()\r\n m2=Module()\r\n x=T.dscalar()\r\n populate_module(m1,x)\r\n populate_module(m2,x)\r\n #m1.x and m2.x should not be shared as their is no hierarchi link between them.\r\n inst1=m1.make()\r\n inst2=m2.make()\r\n m1.m2=m2\r\n #m1.x and m2.x should be shared as their is a hierarchi link between them.\r\n inst3=m1.make()\r\n inst1.x=1\r\n inst2.x=2\r\n inst3.x=3\r\n for f in get_element(inst1):\r\n assert f==1\r\n for f in get_element(inst2):\r\n assert f==2\r\n for f in get_element(inst3)+get_element(inst3.m2):\r\n assert f==3\r\n\r\n inst3.m2.x=4\r\n for f in get_element(inst3)+get_element(inst3.m2):\r\n assert f==4", "def __init__(self, name):\n super(Module, self).__init__()\n self.name = name", "def test_shared_members_N(self):\r\n def populate_module(m,x):\r\n m.x=x\r\n m.lx=[x]\r\n m.llx=[[x],[x]]\r\n m.ltx=[(x,)]\r\n m.ldx=[{'x':x}]\r\n m.tx=(x,)\r\n m.tlx=([x],)\r\n m.ttx=((x,),)\r\n m.tdx=({'x':x},)\r\n m.dx={'x':x}\r\n m.dlx={'x':[x]}\r\n m.dtx={'x':(x,)}\r\n m.ddx={'x':{'x':x}}\r\n\r\n def get_element(i):\r\n return [i.x,i.lx[0],i.tx[0],i.dx['x'],i.llx[0][0], i.llx[1][0], i.ltx[0][0], i.ldx[0]['x'], i.tlx[0][0], i.tlx[0][0], i.tdx[0]['x'], i.dlx['x'][0], i.dtx['x'][0], i.ddx['x']['x']]\r\n m1=Module()\r\n m2=Module()\r\n m3=Module()\r\n m4=Module()\r\n x=T.dscalar()\r\n populate_module(m1,x)\r\n populate_module(m2,(x))\r\n populate_module(m4,(x))\r\n #m1.x and m2.x should not be shared as their is no hierarchi link between them.\r\n inst1=m1.make()\r\n inst2=m2.make()\r\n m1.m2=m2\r\n m2.m3=m3\r\n m3.m4=m4\r\n #m1.x and m2.x should be shared as their is a hierarchi link between them.\r\n inst3=m1.make()\r\n inst1.x=1\r\n inst2.x=2\r\n inst3.x=3\r\n for f in get_element(inst1):\r\n assert f==1\r\n for f in get_element(inst2):\r\n assert f==2\r\n for f in get_element(inst3)+get_element(inst3.m2)+get_element(inst3.m2.m3.m4):\r\n assert f==3\r\n\r\n inst3.m2.x=4\r\n for f in get_element(inst3)+get_element(inst3.m2)+get_element(inst3.m2.m3.m4):\r\n assert f==4", "def _createModuleObj(self):\n ModuleTimeWeakening.__init__(self)\n return", "def __init__(self):\n self.modules = {}", "def getObjectsDefinedIn(modulename, directory=None):\n\n if directory:\n searchpath = [directory]\n else:\n searchpath = sys.path # searches usual Python path\n\n #might be a package. If so, check the top level\n #package is there, then recalculate the path needed\n words = modulename.split('.')\n if len(words) > 1:\n packagename = words[0]\n packagefound = imp.find_module(packagename, searchpath)\n assert packagefound, \"Package %s not found\" % packagename\n (file, packagepath, description) = packagefound\n #now the full path should be known, if it is in the\n #package\n\n directory = os.path.join(*([packagepath] + words[1:-1]))\n modulename = words[-1]\n searchpath = [directory]\n\n\n\n #find and import the module.\n found = imp.find_module(modulename, searchpath)\n assert found, \"Module %s not found\" % modulename\n (file, pathname, description) = found\n mod = imp.load_module(modulename, file, pathname, description)\n\n #grab the code too, minus trailing newlines\n lines = open(pathname, 'r').readlines()\n lines = list(map(str.rstrip, lines))\n\n result = Struct()\n result.functions = []\n result.classes = []\n result.doc = mod.__doc__\n for name in dir(mod):\n value = getattr(mod, name)\n if type(value) is types.FunctionType:\n path, file = os.path.split(value.__code__.co_filename)\n root, ext = os.path.splitext(file)\n #we're possibly interested in it\n if root == modulename:\n #it was defined here\n funcObj = value\n fn = Struct()\n fn.name = name\n fn.proto = getFunctionPrototype(funcObj, lines)\n if funcObj.__doc__:\n fn.doc = dedent(funcObj.__doc__)\n else:\n fn.doc = '(no documentation string)'\n #is it official?\n if name[0:1] == '_':\n fn.status = 'private'\n elif name[-1] in '0123456789':\n fn.status = 'experimental'\n else:\n fn.status = 'official'\n\n result.functions.append(fn)\n elif type(value) == type:\n if value.__module__ == modulename:\n cl = Struct()\n cl.name = name\n if value.__doc__:\n cl.doc = dedent(value.__doc__)\n else:\n cl.doc = \"(no documentation string)\"\n\n cl.bases = []\n for base in value.__bases__:\n cl.bases.append(base.__name__)\n if name[0:1] == '_':\n cl.status = 'private'\n elif name[-1] in '0123456789':\n cl.status = 'experimental'\n else:\n cl.status = 'official'\n\n cl.methods = []\n #loop over dict finding methods defined here\n # Q - should we show all methods?\n # loop over dict finding methods defined here\n items = list(value.__dict__.items())\n items.sort()\n for (key2, value2) in items:\n if type(value2) != types.FunctionType:\n continue # not a method\n elif os.path.splitext(value2.__code__.co_filename)[0] == modulename:\n continue # defined in base class\n else:\n #we want it\n meth = Struct()\n meth.name = key2\n name2 = value2.__code__.co_name\n meth.proto = getFunctionPrototype(value2, lines)\n if name2!=key2:\n meth.doc = 'pointer to '+name2\n meth.proto = meth.proto.replace(name2,key2)\n else:\n if value2.__doc__:\n meth.doc = dedent(value2.__doc__)\n else:\n meth.doc = \"(no documentation string)\"\n #is it official?\n if key2[0:1] == '_':\n meth.status = 'private'\n elif key2[-1] in '0123456789':\n meth.status = 'experimental'\n else:\n meth.status = 'official'\n cl.methods.append(meth)\n result.classes.append(cl)\n return result", "def avail(self,pattern=str):\t\n import re\n\n availmods = []\n avail_out = self._modulecmd(\"\"\"%s python avail %s\"\"\" % (self.modulecmd, pattern)).decode('utf-8')\n if avail_out.strip() == '':\n return availmods\n alines = [str(x) for x in avail_out.strip().splitlines()]\n repo = None\n top_insert = 0 # keep track of the head based on each time repo changes\n for aline in alines:\n if aline.strip() == '':\n repo = None\n continue\n try:\n repo = re.match(r'^-+\\s*([^-]+)\\s*-+\\s*$', aline).group(1)\n top_insert = len(availmods)\n continue\n except AttributeError:\n pass \n if repo:\n for tmpmod in aline.split():\n fullpath = os.path.join(repo, tmpmod)\n if tmpmod.lower().endswith(\"(default)\"):\n tmpmod = re.sub(r'(?i)\\(default\\)$', '', tmpmod)\n availmods.insert(\n top_insert, (\n tmpmod,\n fullpath\n )\n )\n else:\n availmods.append((tmpmod, fullpath))\n return availmods", "def module() -> str:\n return \"summarize\"", "def do_one_mod(self, names: List[str], infer: bool, exec_: bool, conf: dict):\n\n p = lambda: Progress(\n TextColumn(\"[progress.description]{task.description}\", justify=\"right\"),\n BarColumn(bar_width=None),\n \"[progress.percentage]{task.percentage:>3.1f}%\",\n \"[progress.completed]{task.completed} / {task.total}\",\n TimeElapsedColumn(),\n )\n # step one collect all the modules instances we want to analyse.\n\n modules = []\n for name in names:\n x, *r = name.split(\".\")\n n0 = __import__(name)\n for sub in r:\n n0 = getattr(n0, sub)\n modules.append(n0)\n\n # step 2 try to guess the version number from the top module.\n version = getattr(modules[0], \"__version__\", \"???\")\n\n root = names[0].split(\".\")[0]\n module_conf = conf.get(root, {})\n examples_folder = module_conf.get(\"examples_folder\", None)\n print(\"EF\", examples_folder)\n if examples_folder is not None:\n examples_folder = Path(examples_folder).expanduser()\n examples_data = self.collect_examples(examples_folder)\n for edoc, figs in examples_data:\n self.examples.update(\n {k: json.dumps(v.to_json()) for k, v in edoc.items()}\n )\n for name, data in figs:\n print(\"put one fig\", name)\n self.put_raw(name, data)\n print(\"Configuration:\", json.dumps(module_conf, indent=2))\n self.root = root\n self.version = version\n subs = module_conf.get(\"submodules\", [])\n extra_from_conf = [root + \".\" + s for s in subs]\n for name in extra_from_conf:\n x, *r = name.split(\".\")\n n0 = __import__(name)\n for sub in r:\n n0 = getattr(n0, sub)\n modules.append(n0)\n\n # print(modules)\n\n collector = DFSCollector(modules[0], modules[1:])\n collected: Dict[str, Any] = collector.items()\n\n # collect all items we want to document.\n for qa, item in collected.items():\n if (nqa := full_qual(item)) != qa:\n print(\"after import qa differs : {qa} -> {nqa}\")\n if collected[nqa] == item:\n print(\"present twice\")\n del collected[nqa]\n else:\n print(\"differs: {item} != {other}\")\n\n for target in module_conf.get(\"exclude\", []):\n print(\"exclude tgt:\", target)\n del collected[target]\n # p = nullcontext\n with p() as p2:\n\n # just nice display of progression.\n taskp = p2.add_task(description=\"parsing\", total=len(collected))\n\n for qa, target_item in collected.items():\n short_description = (qa[:19] + \"..\") if len(qa) > 21 else qa\n p2.update(taskp, description=short_description.ljust(17))\n p2.advance(taskp)\n item_docstring = target_item.__doc__\n\n # TODO: we may not want tosip items as they may have children\n # right now keep modules, but we may want to keep classes if\n # they have documented descendants.\n\n if item_docstring is None and not isinstance(target_item, ModuleType):\n continue\n elif item_docstring is None and isinstance(target_item, ModuleType):\n item_docstring = \"\"\"This module has no documentation\"\"\"\n\n # progress.console.print(qa)\n try:\n if tsparse is None:\n print(\n \"please see how to install Tree-sitter in the readme to parse complex RST documents\"\n )\n arbitrary = tsparse(dedent_but_first(item_docstring).encode())\n except Exception as e:\n print(f\"TS could not parse: {qa}\")\n raise ValueError(f\"from {qa}\") from e\n arbitrary = []\n # raise\n try:\n ndoc = NumpyDocString(dedent_but_first(item_docstring))\n except Exception:\n if not isinstance(target_item, ModuleType):\n p2.console.print(\n \"Unexpected error parsing\",\n target_item,\n target_item.__name__,\n )\n if isinstance(target_item, ModuleType):\n # from .take2 import main\n # main(item_docstring)\n ndoc = NumpyDocString(\n f\"Was not able to parse docstring for {qa}\"\n )\n else:\n continue\n if not isinstance(target_item, ModuleType):\n arbitrary = []\n execute_exclude_patterns = module_conf.get(\n \"execute_exclude_patterns\", None\n )\n ex = exec_\n if execute_exclude_patterns and exec_:\n for pat in execute_exclude_patterns:\n if qa.startswith(pat):\n ex = False\n break\n # else:\n # print(\"will run\", qa)\n\n try:\n doc_blob, figs = self.do_one_item(\n target_item, ndoc, infer, ex, qa, config=module_conf\n )\n doc_blob.arbitrary = arbitrary\n except Exception:\n raise\n if module_conf.get(\"exec_failure\", None) == \"fallback\":\n print(\"Re-analysing \", qa, \"without execution\")\n # debug:\n doc_blob, figs = self.do_one_item(\n target_item, ndoc, infer, False, qa, config=module_conf\n )\n doc_blob.aliases = collector.aliases[qa]\n\n # processing....\n doc_blob.signature = doc_blob.content.pop(\"Signature\")\n try:\n for section in [\"Extended Summary\", \"Summary\", \"Notes\", \"Warnings\"]:\n if section in doc_blob.content:\n if data := doc_blob.content[section]:\n PX = P2(data)\n doc_blob.content[section] = Section(PX)\n else:\n doc_blob.content[section] = Section()\n except Exception as e:\n raise type(e)(f\"during {qa}\")\n\n doc_blob.references = doc_blob.content.pop(\"References\")\n if isinstance(doc_blob.references, str):\n if doc_blob.references == \"\":\n doc_blob.references = None\n else:\n assert False\n doc_blob.references = list(doc_blob.references)\n assert (\n isinstance(doc_blob.references, list) or doc_blob.references is None\n )\n del doc_blob.content[\"Examples\"]\n del doc_blob.content[\"index\"]\n sections_ = [\n \"Parameters\",\n \"Returns\",\n \"Raises\",\n \"Yields\",\n \"Attributes\",\n \"Other Parameters\",\n \"Warns\",\n ##\"Warnings\",\n \"Methods\",\n # \"Summary\",\n \"Receives\",\n ]\n from .take2 import Param\n\n # new_doc_blob._content[\"Parameters\"] = [\n # Parameter(a, b, c)\n # for (a, b, c) in new_doc_blob._content.get(\"Parameters\", [])\n # ]\n\n for s in sections_:\n if s in doc_blob.content:\n assert isinstance(\n doc_blob.content[s], list\n ), f\"{s}, {doc_blob.content[s]} \"\n new_content = Section()\n for param, type_, desc in doc_blob.content[s]:\n assert isinstance(desc, list)\n items = []\n if desc:\n items = P2(desc)\n new_content.append(Param(param, type_, items))\n doc_blob.content[s] = new_content\n\n doc_blob.see_also = []\n if see_also := doc_blob.content.get(\"See Also\", None):\n for nts, d0 in see_also:\n try:\n d = d0\n for (name, type_or_description) in nts:\n if type_or_description and not d:\n desc = type_or_description\n if isinstance(desc, str):\n desc = [desc]\n assert isinstance(desc, list)\n desc = paragraphs(desc)\n type_ = None\n else:\n desc = d0\n type_ = type_or_description\n assert isinstance(desc, list)\n desc = paragraphs(desc)\n\n sai = SeeAlsoItem(Ref(name, None, None), desc, type_)\n doc_blob.see_also.append(sai)\n del desc\n del type_\n except Exception as e:\n raise ValueError(\n f\"Error {qa}: {see_also=} | {nts=} | {d0=}\"\n ) from e\n del doc_blob.content[\"See Also\"]\n\n for k, v in doc_blob.content.items():\n assert isinstance(v, Section), f\"{k} is not a section {v}\"\n # end processing\n\n self.put(qa, json.dumps(doc_blob.to_json(), indent=2))\n for name, data in figs:\n self.put_raw(name, data)\n\n found = {}\n not_found = []\n for k, v in collector.aliases.items():\n if [item for item in v if item != k]:\n if shorter := find_cannonical(k, v):\n found[k] = shorter\n else:\n not_found.append((k, v))\n\n if logo := module_conf.get(\"logo\", None):\n self.put_raw(\"logo.png\", Path(logo).read_bytes())\n self.metadata = {\n \"version\": version,\n \"logo\": \"logo.png\",\n \"aliases\": found,\n \"module\": root,\n }", "def test_module_initialization(self):\n m = Module('foo')\n assert str(m) == 'foo'\n\n m = Module('foo.bar')\n assert str(m) == 'foo.bar'\n\n m = Module('foo.bar.qux')\n assert str(m) == 'foo.bar.qux'", "def test_module(self):\n pass", "def add_if_new(self, new_name):\n # self.logger.debug(\"add_if_new({0})\".format(new_name))\n\n if new_name in self.COMMON_STD_MODULES:\n # scan through existing STD LIB like \"self.logger\", \"sys\", \"time\"\n # self.logger.debug(\"Mod({}) is in std lib.\".format(new_name))\n return 0\n\n # if self.ignore_pip and new_name in self.COMMON_PIP:\n if new_name in self.COMMON_PIP:\n # scan through existing STD LIB like \"requests\"\n self.logger.debug(\"Mod({}) is in PIP lib.\".format(new_name))\n return 0\n\n # handle importing sub modules, like os.path or self.logger.handlers\n if new_name.find('.') >= 0:\n # then we have a x.y\n name = new_name.split('.')\n if name[0] in self.COMMON_STD_MODULES:\n # self.logger.debug(\"Mod({}) is in std lib.\".format(new_name))\n return 0\n\n if new_name in self.dep_list:\n # scan through existing names\n self.logger.debug(\"Mod({}) already known.\".format(new_name))\n return 0\n\n # if still here, then is a new name\n self.logger.debug(\"Mod({}) is NEW!\".format(new_name))\n\n # convert from network.tcp_echo.ftplib to network/tcp_echo/ftplib\n path_name = new_name.replace('.', os.sep)\n\n added_count = 0\n if not os.path.isdir(path_name):\n # only ADD is not a subdirectory\n self.dep_list.append(new_name)\n added_count = 1\n\n # handle is file or sub-directory\n self.logger.info(\"_add_recurse:{} {}\".format(path_name, new_name))\n added_count += self._add_recurse(path_name, new_name)\n\n return added_count", "def _createModuleObj(self):\n raise NotImplementedError(\"Implement in derived class.\")", "def rimport(self, modulename):\r\n if modulename not in self.module_cache:\r\n module = self.sync_request(\"handle_import\", modulename)\r\n self.module_cache[modulename] = module\r\n return self.module_cache[modulename]", "def base():", "def process_module_list(self, modules):", "def module_info():\n pass", "def test_unique_naming(self):\r\n locator = BlockUsageLocator(\r\n CourseLocator(org='testx', offering='GreekHero', branch='draft'),\r\n 'problem', block_id='problem1'\r\n )\r\n original = modulestore().get_item(locator)\r\n\r\n locator = BlockUsageLocator(\r\n CourseLocator(org='guestx', offering='contender', branch='draft'), 'course', 'head345679'\r\n )\r\n category = 'problem'\r\n new_payload = \"<problem>empty</problem>\"\r\n new_module = modulestore().create_item(\r\n locator, category, 'anotheruser',\r\n fields={'display_name': 'problem 1', 'data': new_payload},\r\n )\r\n another_payload = \"<problem>not empty</problem>\"\r\n another_module = modulestore().create_item(\r\n locator, category, 'anotheruser',\r\n fields={'display_name': 'problem 2', 'data': another_payload},\r\n definition_locator=original.definition_locator,\r\n )\r\n # check that course version changed and course's previous is the other one\r\n parent = modulestore().get_item(locator)\r\n self.assertNotEqual(new_module.location.block_id, another_module.location.block_id)\r\n self.assertIn(new_module.location.version_agnostic(), version_agnostic(parent.children))\r\n self.assertIn(another_module.location.version_agnostic(), version_agnostic(parent.children))\r\n self.assertEqual(new_module.data, new_payload)\r\n self.assertEqual(another_module.data, another_payload)\r\n # check definition histories\r\n new_history = modulestore().get_definition_history_info(new_module.definition_locator)\r\n self.assertIsNone(new_history['previous_version'])\r\n self.assertEqual(new_history['original_version'], new_module.definition_locator.definition_id)\r\n self.assertEqual(new_history['edited_by'], \"anotheruser\")\r\n another_history = modulestore().get_definition_history_info(another_module.definition_locator)\r\n self.assertEqual(another_history['previous_version'], original.definition_locator.definition_id)", "def exercise_b2_53():\r\n pass", "def __init__(self):\n ScriptedLoadableModuleLogic.__init__(self)", "def find_module(cls, *args, **kwargs): # real signature unknown\n pass", "def find_module(cls, *args, **kwargs): # real signature unknown\n pass", "def find_module(cls, *args, **kwargs): # real signature unknown\n pass", "def plothub1():\r\n pass", "def main():\n compare_versions(Reactome())", "def hookInfo() :\n return ModuleInfo(\n name=\"My Module\",\n version=\"1.2\",\n author=\"Some Author\",\n description=\"A module that does something.\"\n )", "def smarter():\r\n pass", "def example_function_in_example_module():\n pass", "def cache_modules(*modnames) -> None:\n inspector = Inspector()\n modnames = { n for m in modnames for n in find_submodules(m) }\n\n for modname in modnames:\n logging.debug(f\"inspecting: {modname}\")\n objdoc = inspector.inspect_module(modname)\n logging.debug(f\"writing cache: {modname}\")\n try:\n PYCACHE[modname] = objdoc\n except CannotCache as exc:\n logging.warning(f\"cannot cache: {exc}\")", "def gen_mod(affinities, labels):\n\n for aff in affinities:\n yield modularity.get_modularity(aff, labels).sum()", "def package():\n pass", "def VER(self):", "def main():\n n = 34\n # create the adjacency matrix\n stripped_lines = Util.get_stripped_lines(g_karate_data.splitlines())\n string_rows = [line.split() for line in stripped_lines if line]\n assert len(string_rows) == n\n for row in string_rows:\n assert len(row) == n\n data_rows = [[float(x) for x in string_row] for string_row in string_rows]\n A = np.array(data_rows)\n # create the ordered module indices\n first_cluster_one_based_indices = [1, 3, 4, 14, 2, 8, 20, 18, 22, 13, 12, 6, 7, 17, 5, 11]\n second_cluster_one_based_indices = [25, 32, 26, 29, 24, 28, 9, 34, 33, 19, 16, 31, 15, 10, 23, 30, 21, 27]\n assert len(first_cluster_one_based_indices + second_cluster_one_based_indices) == n\n assert list(sorted(first_cluster_one_based_indices + second_cluster_one_based_indices)) == range(1, n+1)\n ordered_module_indices = []\n for i in range(n):\n if i+1 in first_cluster_one_based_indices:\n ordered_module_indices.append(0)\n else:\n ordered_module_indices.append(1)\n # print the modularity\n Q = get_modularity_other_b(A, ordered_module_indices)\n print 'modularity calculated using my interpretation of the method of the paper', Q\n Q = get_modularity_other_b2(A, ordered_module_indices)\n print 'modularity calculated using a modification of my interpretation of the method of the paper', Q\n Q = get_modularity_other_c(A, ordered_module_indices)\n print 'modularity calculated using the method on wikipedia', Q\n Q = get_eric_modularity(A, ordered_module_indices)\n print 'modularity calculated using the method eric used:', Q\n print 'expected modularity: .375 +/- .025'", "def test_handle_collisions_with_base_module_rpms(mock_grft, mock_get_session):\n mmd = load_mmd(read_staged_data(\"formatted_testmodule.yaml\"))\n xmd = mmd.get_xmd()\n xmd[\"mbs\"][\"buildrequires\"][\"platform\"][\"koji_tag\"] = \"module-el-build\"\n xmd[\"mbs\"][\"buildrequires\"][\"python\"] = {\"koji_tag\": \"module-python27\"}\n xmd[\"mbs\"][\"buildrequires\"][\"bash\"] = {\"koji_tag\": \"module-bash\"}\n mmd.set_xmd(xmd)\n\n bm_rpms = {\n \"bash-completion-1:2.7-5.el8.noarch\",\n \"bash-0:4.4.19-7.el8.aarch64\",\n \"python2-tools-0:2.7.16-11.el8.aarch64\",\n \"python2-tools-0:2.7.16-11.el8.x86_64\",\n \"python3-ldap-0:3.1.0-4.el8.aarch64\",\n \"python3-ldap-0:3.1.0-4.el8.x86_64\",\n }\n non_bm_rpms = {\n \"bash-0:4.4.20-1.el8.aarch64\",\n \"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.aarch64\",\n \"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.x86_64\",\n }\n mock_grft.side_effect = [bm_rpms, non_bm_rpms]\n\n default_modules.handle_collisions_with_base_module_rpms(mmd, [\"aarch64\", \"x86_64\"])\n\n mock_get_session.assert_called_once()\n xmd_mbs = mmd.get_xmd()[\"mbs\"]\n assert set(xmd_mbs[\"ursine_rpms\"]) == {\n \"bash-0:4.4.19-7.el8.aarch64\",\n \"python2-tools-0:2.7.16-11.el8.aarch64\",\n \"python2-tools-0:2.7.16-11.el8.x86_64\",\n }\n assert mock_grft.call_count == 2\n # We can't check the calls directly because the second argument is a set converted to a list,\n # so the order can't be determined ahead of time.\n first_call = mock_grft.mock_calls[0][1]\n assert first_call[0] == mock_get_session.return_value\n assert first_call[1] == [\"module-el-build\"]\n assert first_call[2] == [\"aarch64\", \"x86_64\"]\n\n second_call = mock_grft.mock_calls[1][1]\n assert second_call[0] == mock_get_session.return_value\n assert set(second_call[1]) == {\"module-bash\", \"module-python27\"}\n assert second_call[2] == [\"aarch64\", \"x86_64\"]", "def module_file(module):\n ...", "def exercise_b2_106():\r\n pass", "def pymod_cache():\n pymod.cache.cache = Singleton(pymod.cache.factory)", "def test_fold_module(source_files) -> None:\n # Given\n conf_graph = {\"fold_modules\": [\"amodule\"]}\n drawer = Mock()\n use_case = DrawGraphUC(drawer, PARSER, source_files, conf_graph)\n\n # When\n use_case.run()\n\n # Then\n drawer.write.assert_called() # type: ignore\n global_dep = drawer.write.call_args[0][0]\n\n assert global_dep == {\n \"simple_module\": OrderedSet(\n (\n Dependency(Module(\"module\")),\n Dependency(Module(\"module.inside.module\")),\n Dependency(Module(\"amodule\")),\n )\n ),\n \"amodule\": OrderedSet(\n (Dependency(Module(\"module\")), Dependency(Module(\"module.inside.module\")))\n ),\n }", "def test_same_function_name_from_module_level(self):\n Vegetable.foo(1, 2)\n vegetable_static_method_key = get_function_cache_key('function', 'tests.Vegetable.foo', (1, 2), {})\n self.assertExpectedKeyInCache(vegetable_static_method_key)\n\n foo(1, 2)\n module_function_key = get_function_cache_key('function', 'tests.foo', (1, 2), {})\n self.assertExpectedKeyInCache(module_function_key)\n\n self.assertNotEqual(vegetable_static_method_key, module_function_key)", "def exercise_b2_69():\r\n pass", "def make_dotted(name):\n sofar, parts = [], name.split('.')\n oldmod = None\n for part in parts:\n if not part:\n raise ImportError(\"Invalid module name %r\" % (name,))\n partname = \".\".join(sofar + [part])\n try:\n fresh, mod = False, load_dotted(partname)\n except ImportError:\n mod = _imp.new_module(partname)\n mod.__path__ = []\n fresh = mod == _sys.modules.setdefault(partname, mod)\n if oldmod is not None:\n setattr(oldmod, part, mod)\n oldmod = mod\n sofar.append(part)\n\n return mod, fresh", "def util():\n pass", "def util():\n pass", "def simple():", "def simple():", "def getlastmod():\n raise NotImplementedError()", "def sort_by_dependencies(\n mod_infos: List[Loader.ModInfo]) -> List[Loader.ModInfo]:\n # First compile a list of module infos with no dependencies and a mapping\n # from modules with at least one dependency to their respective\n # dependencies.\n no_deps: List[Loader.ModInfo] = []\n with_deps: Dict[str, Set[str]] = {}\n by_name: Dict[str, Loader.ModInfo] = {}\n\n for mod_info in mod_infos:\n assert mod_info.mod_def.name # Set, or inferred during loading\n by_name[mod_info.mod_def.name] = mod_info\n\n for mod_info in mod_infos:\n assert mod_info.mod_def.name # Set, or inferred during loading\n name = mod_info.mod_def.name\n opt_deps = {opt for opt in mod_info.mod_def.optional if opt in by_name}\n if not mod_info.mod_def.required and not opt_deps:\n no_deps.append(mod_info)\n continue\n with_deps[name] = set(mod_info.mod_def.required).union(opt_deps)\n\n sorted_infos = []\n while no_deps:\n # Remove the first element without dependencies from no_deps and\n # add it to the list of sorted infos.\n info = no_deps.pop()\n sorted_infos.append(info)\n\n # Then remove the info from all module infos that list it as a\n # dependency. If an entry in with_deps points to an empty set, remove\n # it from with_deps and add the module info to no_deps.\n\n # Copy with_deps.keys into a list to allow modifying with_deps during\n # iteration.\n for name in list(with_deps.keys()):\n # load_modules set this name if it was None\n assert info.mod_def.name is not None\n if info.mod_def.name not in with_deps[name]:\n continue\n with_deps[name].remove(info.mod_def.name)\n if not with_deps[name]:\n no_deps.append(by_name[name])\n del with_deps[name]\n\n # If with_deps is not empty by now, the contained modules have cyclic\n # dependencies.\n if with_deps:\n unmet = [f\"{n}: {' -> '.join(vs)}\" for n, vs in with_deps.items()]\n msg = \"\\n\\t\".join(unmet)\n msg = f\"Unmet or cyclic module dependencies:\\n\\n{msg}\"\n raise DependencyError(msg)\n\n return sorted_infos", "def test_non_leaf_module_names(self):\n class Net(torch.nn.Module):\n \"\"\"\n Model using multiply as functional and module at different depths\n \"\"\"\n def __init__(self):\n super().__init__()\n self.layer = HierarchicalMultiplyModule()\n\n def forward(self, x):\n return self.layer(x)\n\n model = Net()\n dummy_input = torch.randn(10, 1, 3)\n onnx_path = './data/MyModel.onnx'\n\n torch.onnx.export(model, dummy_input, onnx_path)\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input)\n\n onnx_model = onnx.load(onnx_path)\n onnx.checker.check_model(onnx_model)\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n expected_names = [\n # names compatible with torch 1.9.1 version (should be removed in the future)\n 'layer.mul1.mul',\n 'layer.mul1.Mul_7',\n 'layer.mul2.mul',\n 'layer.mul2.Mul_15',\n 'layer.Mul_18',\n \n # names compatible with torch 1.13.1 version \n '/layer/mul1/Mul',\n '/layer/mul2/Mul',\n '/layer/Mul'\n ]\n for node in onnx_model.graph.node:\n assert 'Constant' in node.name or node.name in expected_names\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def exercise_b2_52():\r\n pass", "def exercise_b2_107():\r\n pass", "def test_get_module_name(self):\n target = MagicMock()\n target.__spec__ = MagicMock()\n target.__spec__.name = 'hello'\n self.assertEqual('hello', reloading.get_module_name(target))", "def get_mod_name():\n return sys.argv[0].split(\"/\")[-1].split(\".py\")[0]", "def mod_load(self):\n raise NotImplementedError(\"Mod load isn't overriden\")", "def getModules(runName=\"run\", ofClass=None):\n # Container dict for all modules found with a runName function\n modules = {}\n \n # Cycle through all python files, excluding any starting with '_' in this\n # package dir\n for f in os.listdir(os.path.dirname(__file__)):\n # Split into module name and extension\n mod_name, ext = os.path.splitext(f)\n # Must be a .py file and not start with '_'\n if ext != '.py' or mod_name.startswith('_'):\n continue\n # Import the module relative to the current package\n mod = importlib.import_module(\".\"+mod_name, __package__)\n\n # Cycle through all members in the module, looking for the entry point\n # function and subclasses if needed\n members = {'runName': None, 'subClass': []}\n for obj_name, obj in inspect.getmembers(mod):\n # The .getmembers() method returns a tuple with the first element\n # the full member name , and the second the member definition.\n \n # Check for our entry function if we have not found it yet\n if members['runName'] is None and \\\n inspect.isfunction(obj) and \\\n obj.__name__ == runName:\n members['runName'] = obj\n continue\n\n # Check for any subclasses\n if ofClass is not None and \\\n inspect.isclass(obj) and \\\n issubclass(obj, ofClass) and \\\n obj != ofClass:\n members['subClass'].append(obj)\n continue\n\n # Only add this module if we found a runName\n if members['runName'] is not None:\n modules[mod_name] = members\n\n return modules", "def _mod_only(func):\n func._mods_only = True\n return func", "def lassh():", "def _create_module(name):\n module = new.module(name)\n sys.modules[name] = module\n return module", "def exercise_b2_56():\r\n pass", "def version(self):", "def get_docs( mysource , basename ):\n import parser\n ast = parser.suite(mysource)\n return ModuleInfo(ast.totuple(1), basename)", "def __init__(self, name, loader):\n _, packagename = dotpath_split(name)\n super(ModuleSpec, self).__init__(name, loader,\n origin=packagename)", "def exec_module(cls, *args, **kwargs): # real signature unknown\n pass", "def exec_module(cls, *args, **kwargs): # real signature unknown\n pass", "def main():\n\tpass", "def compute_modularity(G):\n # convert to undirected graph if necessary\n if isinstance(G, nx.DiGraph):\n G = G.to_undirected(reciprocal=True)\n\n # extract communities\n community_detection = community.greedy_modularity_communities(G)\n # calculate modularity with those communities\n modularity = community.modularity(G, community_detection)\n return modularity", "def test_handle_collisions_with_same_rpms(mock_grft, mock_get_session):\n mmd = load_mmd(read_staged_data(\"formatted_testmodule.yaml\"))\n xmd = mmd.get_xmd()\n xmd[\"mbs\"][\"buildrequires\"][\"platform\"][\"koji_tag\"] = \"module-el-build\"\n xmd[\"mbs\"][\"buildrequires\"][\"python\"] = {\"koji_tag\": \"module-python27\"}\n xmd[\"mbs\"][\"buildrequires\"][\"bash\"] = {\"koji_tag\": \"module-bash\"}\n mmd.set_xmd(xmd)\n\n bm_rpms = {\n \"bash-completion-1:2.7-5.el8.noarch\",\n \"bash-0:4.4.19-7.el8.aarch64\",\n \"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.aarch64\",\n \"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.x86_64\",\n }\n non_bm_rpms = {\n \"bash-0:4.4.20-1.el8.aarch64\",\n \"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.aarch64\",\n \"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.x86_64\",\n }\n mock_grft.side_effect = [bm_rpms, non_bm_rpms]\n\n default_modules.handle_collisions_with_base_module_rpms(mmd, [\"aarch64\", \"x86_64\"])\n\n mock_get_session.assert_called_once()\n xmd_mbs = mmd.get_xmd()[\"mbs\"]\n assert set(xmd_mbs[\"ursine_rpms\"]) == {\n \"bash-0:4.4.19-7.el8.aarch64\",\n }\n assert mock_grft.call_count == 2\n # We can't check the calls directly because the second argument is a set converted to a list,\n # so the order can't be determined ahead of time.\n first_call = mock_grft.mock_calls[0][1]\n assert first_call[0] == mock_get_session.return_value\n assert first_call[1] == [\"module-el-build\"]\n assert first_call[2] == [\"aarch64\", \"x86_64\"]\n\n second_call = mock_grft.mock_calls[1][1]\n assert second_call[0] == mock_get_session.return_value\n assert set(second_call[1]) == {\"module-bash\", \"module-python27\"}\n assert second_call[2] == [\"aarch64\", \"x86_64\"]", "def create_module(cls, *args, **kwargs): # real signature unknown\n pass", "def create_module(cls, *args, **kwargs): # real signature unknown\n pass", "def preProcess(self):\n\n for moduleName in self.module.keys():\n # find the one with the most votes per module:\n votes = 0\n winner = ''\n for voter in self.module[moduleName].keys():\n if self.module[moduleName][voter] > votes:\n votes = self.module[moduleName][voter]\n winner = voter\n self.module[moduleName] = winner\n\n # quick and dirty algorithm O(n^2). Can be done in O(n*lg(n))\n moduleLength = {}\n # find module lengths first\n for moduleName in self.module.keys():\n parts = moduleName.split('/')\n if len(parts) not in moduleLength:\n moduleLength[len(parts)] = []\n moduleLength[len(parts)].append(moduleName)\n lengths = moduleLength.keys()\n lengths.sort(reverse=True)\n\n for length in lengths:\n # FIXME: needs to be configurable.\n if length > 2:\n parents = {}\n for moduleName in self.module.keys():\n parts = moduleName.split('/')\n # group all parts of same length.\n if len(parts) == length:\n parent = moduleName.rsplit('/', 1)[0]\n if parent not in parents:\n parents[parent] = []\n parents[parent].append([moduleName, self.module[moduleName]])\n # check if all the children have the same developer as parent. If so remove the children.\n for parent in parents.keys():\n same = True\n parentDeveloper = self.module[parent]\n for moduleName, developer in parents[parent]:\n if developer != parentDeveloper:\n same = False\n if same:\n for moduleName, developer in parents[parent]:\n del self.module[moduleName]", "def myfunction1(x):\n print(\"module name :\",__name__)\n x=x+10\n print(\"x:\",x)\n return x", "def requires(self):", "def _build_impl(self):", "def import_phantom_module(xml_file):\r\n import lxml.etree as etree\r\n\r\n object_cache = {}\r\n\r\n tree = etree.parse(xml_file)\r\n root = tree.getroot()\r\n\r\n # Sort items so that\r\n # - Base classes come before classes inherited from them\r\n # - Modules come before their contents\r\n all_nodes = dict([(n.attrib['id'], n) for n in root])\r\n\r\n def _get_bases(node, recurse=False):\r\n bases = [x.attrib['ref'] for x in node.findall('base')]\r\n if recurse:\r\n j = 0\r\n while True:\r\n try:\r\n b = bases[j]\r\n except IndexError:\r\n break\r\n if b in all_nodes:\r\n bases.extend(_get_bases(all_nodes[b]))\r\n j += 1\r\n return bases\r\n\r\n type_index = ['module', 'class', 'callable', 'object']\r\n\r\n def base_cmp(a, b):\r\n x = cmp(type_index.index(a.tag), type_index.index(b.tag))\r\n if x != 0:\r\n return x\r\n\r\n if a.tag == 'class' and b.tag == 'class':\r\n a_bases = _get_bases(a, recurse=True)\r\n b_bases = _get_bases(b, recurse=True)\r\n x = cmp(len(a_bases), len(b_bases))\r\n if x != 0:\r\n return x\r\n if a.attrib['id'] in b_bases:\r\n return -1\r\n if b.attrib['id'] in a_bases:\r\n return 1\r\n\r\n return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.'))\r\n\r\n nodes = root.getchildren()\r\n nodes.sort(base_cmp)\r\n\r\n # Create phantom items\r\n for node in nodes:\r\n name = node.attrib['id']\r\n doc = (node.text or '').decode('string-escape') + \"\\n\"\r\n if doc == \"\\n\":\r\n doc = \"\"\r\n\r\n # create parent, if missing\r\n parent = name\r\n while True:\r\n parent = '.'.join(parent.split('.')[:-1])\r\n if not parent:\r\n break\r\n if parent in object_cache:\r\n break\r\n obj = imp.new_module(parent)\r\n object_cache[parent] = obj\r\n sys.modules[parent] = obj\r\n\r\n # create object\r\n if node.tag == 'module':\r\n obj = imp.new_module(name)\r\n obj.__doc__ = doc\r\n sys.modules[name] = obj\r\n elif node.tag == 'class':\r\n bases = [object_cache[b] for b in _get_bases(node)\r\n if b in object_cache]\r\n bases.append(object)\r\n init = lambda self: None\r\n init.__doc__ = doc\r\n obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init})\r\n obj.__name__ = name.split('.')[-1]\r\n elif node.tag == 'callable':\r\n funcname = node.attrib['id'].split('.')[-1]\r\n argspec = node.attrib.get('argspec')\r\n if argspec:\r\n argspec = re.sub('^[^(]*', '', argspec)\r\n doc = \"%s%s\\n\\n%s\" % (funcname, argspec, doc)\r\n obj = lambda: 0\r\n obj.__argspec_is_invalid_ = True\r\n obj.func_name = funcname\r\n obj.__name__ = name\r\n obj.__doc__ = doc\r\n if inspect.isclass(object_cache[parent]):\r\n obj.__objclass__ = object_cache[parent]\r\n else:\r\n class Dummy(object):\r\n pass\r\n obj = Dummy()\r\n obj.__name__ = name\r\n obj.__doc__ = doc\r\n if inspect.isclass(object_cache[parent]):\r\n obj.__get__ = lambda: None\r\n object_cache[name] = obj\r\n\r\n if parent:\r\n if inspect.ismodule(object_cache[parent]):\r\n obj.__module__ = parent\r\n setattr(object_cache[parent], name.split('.')[-1], obj)\r\n\r\n # Populate items\r\n for node in root:\r\n obj = object_cache.get(node.attrib['id'])\r\n if obj is None:\r\n continue\r\n for ref in node.findall('ref'):\r\n if node.tag == 'class':\r\n if ref.attrib['ref'].startswith(node.attrib['id'] + '.'):\r\n setattr(obj, ref.attrib['name'],\r\n object_cache.get(ref.attrib['ref']))\r\n else:\r\n setattr(obj, ref.attrib['name'],\r\n object_cache.get(ref.attrib['ref']))", "def __init__(self, name, dynloadmodule = None):\n\n\t\tif dynloadmodule is not None:\n\t\t\tobject.__setattr__(self, 'module', dynloadmodule)\n\t\t\tobject.__setattr__(self, 'name', name)\n\t\telse:\n\t\t\tindex = name.rfind('.')\n\t\t\tif index == -1:\n\t\t\t\traise ValueError('Need a module attribut as NAME.')\n\n\t\t\tmodulename = name[:index]\n\t\t\tattributename = name[index+1:]\n\n\t\t\tdynloadmodule = Dynload(modulename)\n\t\t\tobject.__setattr__(self, 'module', dynloadmodule)\n\t\t\tobject.__setattr__(self, 'name', attributename)", "def test_default_instance_initialize():\r\n\r\n class M1(Module):\r\n def __init__(self):\r\n super(M1, self).__init__()\r\n self.a = T.dscalar()\r\n self.b = T.lscalar()\r\n self.c = T.lvector()\r\n\r\n class M2(Module):\r\n def __init__(self):\r\n super(M2, self).__init__()\r\n self.a = T.lscalar()\r\n self.x = M1()\r\n self.y = self.x\r\n self.z = M1()\r\n\r\n m = M2().make(a = 13,\r\n x = dict(a = 1, b = 2, c = [3, 4]),\r\n z = dict(a = 5, b = 6, c = [7, 8]))\r\n\r\n assert m.a == 13\r\n assert m.x.a == 1\r\n assert m.x.b == 2\r\n assert all(m.x.c == [3, 4])\r\n assert m.y.a == 1\r\n assert m.y.b == 2\r\n assert all(m.y.c == [3, 4])\r\n assert m.z.a == 5\r\n assert m.z.b == 6\r\n assert all(m.z.c == [7, 8])", "def test_vendored_libjuju(self):\n for name in sys.modules:\n if name.startswith(\"juju\"):\n module = sys.modules[name]\n if getattr(module, \"__file__\"):\n print(getattr(module, \"__file__\"))\n assert re.search('n2vc', module.__file__, re.IGNORECASE)\n\n # assert module.__file__.find(\"N2VC\")\n # assert False\n return", "def num_41(func=None):\n def predicates(func):\n \"\"\" \"\"\"\n predicate = [['isfunction', ['__doc__', '__name__', '__code__', '__defaults__', '__globals__', '__kwdefaults__']],\n ['ismodule',[]], ['isroutine', []],\n ['ismethod'], []\n ]\n def demo_def():\n \"\"\"dummy...\n : Demonstrates retrieving and documenting module and function info.\n :\n \"\"\"\n def sub():\n \"\"\"sub in dummy\"\"\"\n pass\n return None\n import inspect\n if func is None:\n func = demo_def\n script = sys.argv[0] # a useful way to get a file's name\n lines, line_num = inspect.getsourcelines(func)\n code = \"\".join([\"{:4d} {}\".format(idx, line)\n for idx, line in enumerate(lines)])\n defs = [key for key, value in globals().items()\n if callable(value) and value.__module__ == __name__]\n args = [line_num, code,\n inspect.getcomments(func), inspect.isfunction(func),\n inspect.ismethod(func), inspect.getmoduleinfo(script),\n defs\n ]\n members = []\n funcs = []\n if inspect.ismodule(func): #ismodule, isfunction\n m_s = inspect.getmembers(func)\n for m in m_s:\n members.append(m[0])\n if inspect.isfunction(func):\n f_s = inspect.getmembers(func)\n for f in f_s:\n funcs.append(f[0])\n # **** work on this\n mem = [i[0] for i in inspect.getmembers(art)]\n frmt = \"\"\"\n :----------------------------------------------------------------------\n :Code for a function on line...{}...\n {}\n :Comments preceeding function\n {}\n :function?... {} ... or method? {}\n :Module info...\n {}\n :\n :Module functions...\n {} \n :----------------------------------------------------------------------\n \"\"\"\n print(dedent(frmt).format(*args))\n print(\"function member names\\n{}\".format(members))\n return None", "def proxyModule(original, **replacements):\n class _ModuleProxy(object):\n def __getattribute__(self, name):\n if name in replacements:\n return replacements[name]\n else:\n return getattr(original, name)\n\n def __repr__(self):\n return \"<Proxy for %r: %s replaced>\" % (\n original, ', '.join(replacements.keys()))\n return _ModuleProxy()", "def exercise_b2_43():\r\n pass", "def exercise_b2_93():\r\n pass", "def get_module(name) -> Module:\n if isinstance(name, str):\n obj = get_object(name)\n else:\n obj = name\n\n name = obj.__name__\n if name in modules:\n return modules[name]\n else:\n module = Module(obj)\n modules[name] = module\n return module", "def common(self):", "def emptyModule():\n return(Module(\"\",[]))" ]
[ "0.6275559", "0.5581727", "0.53932595", "0.53165233", "0.52899075", "0.52877235", "0.52483445", "0.5226746", "0.5212492", "0.51966196", "0.5192233", "0.5163047", "0.5147938", "0.5141009", "0.51342773", "0.51297754", "0.51278263", "0.51182884", "0.51020133", "0.51010454", "0.5098081", "0.5064919", "0.50605595", "0.50496733", "0.50340617", "0.5013887", "0.49936232", "0.4990406", "0.49861813", "0.49857292", "0.4966192", "0.4954318", "0.49518302", "0.49483287", "0.49462783", "0.4941234", "0.49302757", "0.49210936", "0.49209812", "0.49209812", "0.49209812", "0.4920862", "0.49190977", "0.48996928", "0.48934042", "0.4891384", "0.48830345", "0.48736843", "0.48571992", "0.4850839", "0.48468", "0.48466045", "0.481783", "0.48106223", "0.48096755", "0.48093402", "0.48062235", "0.48008525", "0.47953647", "0.47898224", "0.47898224", "0.47875836", "0.47875836", "0.47853336", "0.47751805", "0.476717", "0.4760241", "0.475614", "0.4747679", "0.47465467", "0.47415763", "0.47412553", "0.47299156", "0.47229782", "0.4722705", "0.47223774", "0.47095442", "0.47080114", "0.47053972", "0.4702483", "0.4702483", "0.47021154", "0.47011584", "0.469734", "0.4695874", "0.4695874", "0.4694598", "0.46934962", "0.4687381", "0.4687112", "0.468244", "0.46813238", "0.46810836", "0.46796533", "0.46756238", "0.4674586", "0.46743232", "0.46679723", "0.46635294", "0.46618262", "0.46607712" ]
0.0
-1
Analyze the Zachary karate club data.
def main(): n = 34 # create the adjacency matrix stripped_lines = Util.get_stripped_lines(g_karate_data.splitlines()) string_rows = [line.split() for line in stripped_lines if line] assert len(string_rows) == n for row in string_rows: assert len(row) == n data_rows = [[float(x) for x in string_row] for string_row in string_rows] A = np.array(data_rows) # create the ordered module indices first_cluster_one_based_indices = [1, 3, 4, 14, 2, 8, 20, 18, 22, 13, 12, 6, 7, 17, 5, 11] second_cluster_one_based_indices = [25, 32, 26, 29, 24, 28, 9, 34, 33, 19, 16, 31, 15, 10, 23, 30, 21, 27] assert len(first_cluster_one_based_indices + second_cluster_one_based_indices) == n assert list(sorted(first_cluster_one_based_indices + second_cluster_one_based_indices)) == range(1, n+1) ordered_module_indices = [] for i in range(n): if i+1 in first_cluster_one_based_indices: ordered_module_indices.append(0) else: ordered_module_indices.append(1) # print the modularity Q = get_modularity_other_b(A, ordered_module_indices) print 'modularity calculated using my interpretation of the method of the paper', Q Q = get_modularity_other_b2(A, ordered_module_indices) print 'modularity calculated using a modification of my interpretation of the method of the paper', Q Q = get_modularity_other_c(A, ordered_module_indices) print 'modularity calculated using the method on wikipedia', Q Q = get_eric_modularity(A, ordered_module_indices) print 'modularity calculated using the method eric used:', Q print 'expected modularity: .375 +/- .025'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_karate_club_data():\n\n # Edge list of Zachary's karate club.\n edge_list = [\n (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8),\n (0, 10), (0, 11), (0, 12), (0, 13), (0, 17), (0, 19), (0, 21), (0, 31),\n (1, 2), (1, 3), (1, 7), (1, 13), (1, 17), (1, 19), (1, 21), (1, 30),\n (2, 3), (2, 7), (2, 8), (2, 9), (2, 13), (2, 27), (2, 28), (2, 32),\n (3, 7), (3, 12), (3, 13), (4, 6), (4, 10), (5, 6), (5, 10), (5, 16),\n (6, 16), (8, 30), (8, 32), (8, 33), (9, 33), (13, 33), (14, 32), (14, 33),\n (15, 32), (15, 33), (18, 32), (18, 33), (19, 33), (20, 32), (20, 33),\n (22, 32), (22, 33), (23, 25), (23, 27), (23, 29), (23, 32), (23, 33),\n (24, 25), (24, 27), (24, 31), (25, 31), (26, 29), (26, 33), (27, 33),\n (28, 31), (28, 33), (29, 32), (29, 33), (30, 32), (30, 33), (31, 32),\n (31, 33), (32, 33)\n ]\n\n # Student-teacher assignment (before split) as in Zachary (1977).\n # Part-time karate instructor: Mr. Hi, node 0 (labeled as 0).\n # President: John A., node 33 (labeled as 1).\n node_labels = jnp.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0,\n 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n \n return create_graph_data(edge_list=edge_list, node_labels=node_labels)", "def main():\n filename = \"data/exercise.csv\"\n analyze(filename)", "def analyze(self):\n\n self.makeSessions()\n self.collectPlayers()\n self.__analyze()", "def get_graph_karateclub():\n all_members = set(range(34))\n club1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 16, 17, 19, 21}\n # club2 = all_members - club1\n\n G = eg.Graph(name=\"Zachary's Karate Club\")\n for node in all_members:\n G.add_node(node+1)\n\n zacharydat = \"\"\"\\\n0 1 1 1 1 1 1 1 1 0 1 1 1 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 1 0 0\n1 0 1 1 0 0 0 1 0 0 0 0 0 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 0\n1 1 0 1 0 0 0 1 1 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 1 0\n1 1 1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1\n0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1\n1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 1 0 0 1 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1\n0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 1\n0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 1 1\n0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 1 0 0 0 1 1\n0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 1 0 0 1 0 1 0 1 1 0 0 0 0 0 1 1 1 0 1\n0 0 0 0 0 0 0 0 1 1 0 0 0 1 1 1 0 0 1 1 1 0 1 1 0 0 1 1 1 1 1 1 1 0\"\"\"\n\n for row, line in enumerate(zacharydat.split('\\n')):\n thisrow = [int(b) for b in line.split()]\n for col, entry in enumerate(thisrow):\n if entry == 1:\n G.add_edge(row+1, col+1)\n\n # Add the name of each member's club as a node attribute.\n for v in G:\n G.nodes[v]['club'] = 'Mr. Hi' if v in club1 else 'Officer'\n return G", "def analyse(self):\n pass", "def analysis(self, game_info):\n pass", "def get_jaccard_info(self, gtf_version):\n if gtf_version == \"A\":\n isoseqid2besttransid = self.isoseqid2besttransidA\n transid2line = self.transidA2line\n elif gtf_version == \"B\":\n isoseqid2besttransid = self.isoseqid2besttransidB\n transid2line = self.transidB2line\n isoseqid2line = self.isoseqid2line\n\n isoseqid2jaccardinfo = dict()\n count = 0\n for isoseqid in isoseqid2besttransid.keys():\n if count % 1000 == 0:\n print(count)\n count += 1\n transid = isoseqid2besttransid[isoseqid]\n #print(\"#################################\")\n #print(isoseqid, transid, line)\n (chrom1, chromStart1, chromEnd1, isoseqid, score1, strand1, thickStart1, thickEnd1, itemRgb1, blockCount1, blockSizes1, blockStarts1) = isoseqid2line[isoseqid].rstrip().split(\"\\t\")\n\n (chrom2, chromStart2, chromEnd2, transid, score2, strand2, thickStart2, thickEnd2, itemRgb2, blockCount2, blockSizes2, blockStarts2) = transid2line[transid].rstrip().split(\"\\t\")\n chromStart1 = int(chromStart1)\n chromEnd1 = int(chromEnd1)\n chromStart2 = int(chromStart2)\n chromEnd2 = int(chromEnd2)\n exons1 = get_exons(chromStart1, chromEnd1, blockSizes1, blockStarts1)\n exons2 = get_exons(chromStart2, chromEnd2, blockSizes2, blockStarts2)\n minStart = min(chromStart1, chromStart2)\n maxEnd = max(chromEnd1, chromEnd2)\n\n union_sum = 0\n intersection_sum = 0\n for i in range1(minStart, maxEnd):\n first = inside_exons(i, exons1)\n second = inside_exons(i, exons2)\n if first + second > 0:\n union_sum += 1\n if first + second == 2:\n intersection_sum += 1\n jaccard = intersection_sum / union_sum\n isoseqid2jaccardinfo[isoseqid] = [intersection_sum, union_sum, jaccard]\n return(isoseqid2jaccardinfo)", "def run_tests(self):\n\n self.test_report = []\n\n #dict of unsorted lists\n dict_of_un_lists = self.dict_un_lists_intersection_test(self.data_dict)\n self.test_report.append(dict_of_un_lists)\n\n #dict of sets\n dict_of_sets = self.build_dict_of_sets(self.data_dict)\n self.test_report.append(self.dict_sets_intersection_test(dict_of_sets))\n\n #pandas - experimental and probably not the way to use pandas\n # dict_of_pandas = self.build_dict_of_panda_series(self.data_dict)\n # self.test_report.append(self.dicts_any_intersection_node_test(dict_of_pandas))\n\n # print results\n\n if self.verbose:\n self.print_tests_results()", "def karate_club(metadata: bool = False) -> Union[sparse.csr_matrix, Bunch]:\n row = np.array(\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,\n 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3,\n 3, 4, 4, 5, 5, 5, 6, 8, 8, 8, 9, 13, 14, 14, 15, 15, 18,\n 18, 19, 20, 20, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24, 25, 26, 26,\n 27, 28, 28, 29, 29, 30, 30, 31, 31, 32])\n col = np.array(\n [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 2,\n 3, 7, 13, 17, 19, 21, 30, 3, 7, 8, 9, 13, 27, 28, 32, 7, 12,\n 13, 6, 10, 6, 10, 16, 16, 30, 32, 33, 33, 33, 32, 33, 32, 33, 32,\n 33, 33, 32, 33, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 31, 29, 33,\n 33, 31, 33, 32, 33, 32, 33, 32, 33, 33])\n adjacency = sparse.csr_matrix((np.ones(len(row), dtype=bool), (row, col)), shape=(34, 34))\n adjacency = sparse.csr_matrix(adjacency + adjacency.T, dtype=bool)\n\n if metadata:\n labels = np.array(\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n x = np.array(\n [0.04, 0.24, 0.01, 0.13, 0.02, -0.08, 0.04, 0.21, 0.08, -0.11, -0.13, -0.28, 0.2, 0.08,\n 0.23, 0.06, -0.06, 0.32, 0.15, 0.19, 0.27, 0.39, -0.04, -0.26, -0.51, -0.49, -0.19, -0.28,\n -0.11, -0.17, 0.22, -0.21, 0.03, 0])\n y = np.array(\n [-0.33, -0.15, -0.01, -0.28, -0.64, -0.75, -0.76, -0.25, 0.09, 0.23, -0.62, -0.4, -0.53, -0.07,\n 0.55, 0.64, -1., -0.42, 0.6, -0.01, 0.45, -0.34, 0.61, 0.41, 0.14, 0.28, 0.68, 0.21,\n 0.12, 0.54, 0.19, 0.09, 0.38, 0.33])\n graph = Bunch()\n graph.adjacency = adjacency\n graph.labels = labels\n graph.position = np.vstack((x, y)).T\n graph.name = 'karate_club'\n return graph\n else:\n return adjacency", "def analyze_data(self):\n\n self.truth = self.analyze_folder(\"Truth\")\n self.truth.to_csv(self.folder + \"/truth.csv\")\n self.false = self.analyze_folder(\"False\")\n self.flase.to_csv(self.folder + \"/false.csv\")", "def run(self, verbose=True):\n\n # Initialise\n dataset_dir = self.dataset_dir\n sensor_data = self.sensor_data\n output_dir = self.output_dir\n plots_dir = self.plots_dir\n\n ################################################################################################################\n # 1.\tRead Match-up Data and Harmonisation Result Data\n ################################################################################################################\n\n print\"Match-up Dataset:\"\n print \">\", dataset_dir\n\n print\"\\nSensor Data:\"\n print \">\", sensor_data\n\n print \"\\nHarmonisation Output:\"\n print \">\", output_dir\n\n print(\"\\nOpening Files...\")\n MatchUpData = open_matchup(dataset_dir, open_uncertainty=False)\n MatchUpData.setSensorData(sensor_data)\n HarmResult = HarmonisationResult(output_dir, open_residuals=False)\n\n if verbose:\n print HarmResult.parameter_sensors\n print HarmResult.parameter\n print HarmResult.parameter_covariance_matrix\n print \"\\n\"\n\n ################################################################################################################\n # 2.\tMake plots\n ################################################################################################################\n\n print(\"Generating Plots...\")\n\n HarmVisOp = HarmonisationVis(MatchUpData, HarmResult)\n HarmVisOp.plot_compare_calibration(plots_dir, HarmResult.parameter,\n HarmResult.parameter_covariance_matrix, verbose=verbose)\n\n print \"\\nPlots written to:\", plots_dir", "def summarize_jaccard(sample):\n (species, sex, tissue, replicate) = sample.split(\"_\")\n ins = FocalIntersect(species, sex, tissue, replicate)\n with open(\"../data/output/\" + ins.name + \".A.txt\", 'w') as f:\n for isoseqid in ins.jaccard_infoA.keys():\n intersection_sum, union_sum, jaccard = ins.jaccard_infoA[isoseqid]\n transid = ins.isoseqid2besttransidA[isoseqid]\n f.write(isoseqid + \"\\t\" + transid + \"\\t\" + str(intersection_sum) + \"\\t\" + str(union_sum) + \"\\t\" + str(jaccard) + \"\\n\")\n\n with open(\"../data/output/\" + ins.name + \".B.txt\", 'w') as f:\n for isoseqid in ins.jaccard_infoB.keys():\n intersection_sum, union_sum, jaccard = ins.jaccard_infoB[isoseqid]\n transid = ins.isoseqid2besttransidB[isoseqid]\n f.write(isoseqid + \"\\t\" + transid + \"\\t\" + str(intersection_sum) + \"\\t\" + str(union_sum) + \"\\t\" + str(jaccard) + \"\\n\")", "def main():\n # start_time = time.time()\n\n # city: (latitude, longtitude) +/- 0.5 degree\n # geocenter for EU: 50 9\n cities = {\n 'Vienna': (48, 16),\n 'Brussels': (51, 4),\n 'Sofia': (43, 23),\n 'Zagreb': (46, 16),\n 'Nicosia': (35, 33),\n 'Prague': (50, 14),\n 'Copenhagen': (55, 13),\n 'Tallinn': (59, 25),\n 'Helsinki': (60, 25),\n 'Paris': (49, 2),\n 'Berlin': (53, 13),\n 'Athens': (38, 24),\n 'Budapest': (48, 19),\n 'Dublin': (53, -6),\n 'Rome': (42, 13),\n 'Riga': (57, 24),\n 'Vilnius': (55, 25),\n 'Luxembourg': (50, 6),\n 'Valletta': (36, 15),\n 'Amsterdam': (52, 5),\n 'Warsaw': (52, 21),\n 'Lisbon': (39, -9),\n 'Bucharest': (44, 26),\n 'Bratislava': (48, 17),\n 'Ljubljana': (46, 15),\n 'Madrid': (40, -4),\n 'Stockholm': (59, 18),\n 'London': (52, 0)\n }\n\n cities = OrderedDict(sorted(cities.items(), key=lambda t: t[0]))\n cities_indices = [x for x in range(len(cities))]\n cities_names = [key for key in cities.keys()]\n\n for key, value in cities.items():\n nu_v = hf.equirectangular_projection(\n value[0], value[1], phi_0=50, l_0=9)\n cities[key] = nu_v\n\n decoder = {value: key for (key, value) in cities.items()}\n\n ga.cities = cities\n # ga.cities_names = cities_names\n # ga.cities_indices = cities_indices\n param_names = ['v1', 'v2', 't', 'n', 'pm', 'pc', 'tournsize', 'size']\n f = open('params.txt', 'r')\n param_values = [float(l) if '.' in l else int(l) for l in f]\n f.close()\n params = dict(zip(param_names, param_values))\n\n ga.Salesman.diploid = True\n starters = ga.mfp(params['size'])\n v1 = params['v1'] # velocity 1 in Poland\n v2 = params['v2'] # velocity 2 in Poland\n t = params['t'] # period of change of velocity in Poland\n n = params['n'] # number of generations\n pm = params['pm'] # probabilty of mutation (per gene)\n pc = params['pc'] # probability of crossover\n tournsize = params['tournsize']\n\n start_time = time.time()\n salesmen = starters\n ga.Salesman.velocity_pol = v1\n path_s = ga.findbest(salesmen).fitness\n print('first population best: ' + str(round(1 / path_s, 2)) + ' hours')\n\n results = [[0, path_s]]\n counter = 0\n for i in range(n):\n if counter == t // 2 - 1:\n ga.Salesman.velocity_pol = v1 if ga.Salesman.velocity_pol == v2 \\\n else v2\n counter = 0\n counter += 1\n salesmen = ga.evolution(salesmen, pm, pc, tournsize)\n path = ga.findbest(salesmen).fitness\n results.append([i + 1, path])\n\n path_d = ga.findbest(salesmen).fitness\n path_d_seq = ga.findbest(salesmen).best_seq\n print(str(n) + '-th population best (diploidal): ' +\n str(round(1 / path_d, 2)) + ' hours')\n print([decoder[x] for x in path_d_seq])\n print(\"Time elapsed: \" + str(time.time() - start_time) + 's')\n\n start_time = time.time()\n salesmen = starters\n ga.Salesman.diploid = False\n ga.Salesman.velocity_pol = v1\n\n results2 = [[0, path_s]]\n counter = 0\n for i in range(n):\n if counter == t // 2 - 1:\n ga.Salesman.velocity_pol = v1 if ga.Salesman.velocity_pol == v2 \\\n else v2\n counter = 0\n counter += 1\n salesmen = ga.evolution(salesmen, pm, pc, tournsize)\n path = ga.findbest(salesmen).fitness\n results2.append([i + 1, path])\n\n path_h = ga.findbest(salesmen).fitness\n path_h_seq = ga.findbest(salesmen).city_seq\n print(str(n) + '-th population best (haploidal): ' +\n str(round(1 / path_h, 2)) + ' hours')\n print([decoder[x] for x in path_h_seq])\n print(\"Time elapsed: \" + str(time.time() - start_time) + 's')\n\n # plot fitnesses:\n results = np.asarray(results)\n results2 = np.asarray(results2)\n plt.plot(results[:, 0], results[:, 1], 'b-', label='diploidal')\n plt.plot(results2[:, 0], results2[:, 1], 'g-', label='haploidal')\n plt.legend(loc=4)\n plt.show()\n\n # plot paths:\n fig, ax = plt.subplots(1)\n\n starters_best_seq = ga.findbest(starters).city_seq\n starters_best_seq += [starters_best_seq[0]] # close the loop\n starters_best_seq = np.asarray(starters_best_seq)\n plt.plot(starters_best_seq[:, 0], starters_best_seq[:, 1], 'r-', alpha=0.2)\n\n labels = cities_indices\n cities = np.asarray(list(ga.cities.values()))\n\n plt.scatter(cities[:, 0], cities[:, 1], color='r')\n for label, x, y in zip(labels, cities[:, 0], cities[:, 1]):\n plt.annotate(label, xy=(x, y), xytext=(-6, -12),\n textcoords='offset points')\n poland_c = hf.equirectangular_projection(52, 19, 50, 9)\n poland = plt.Circle(poland_c, .047, color='r', alpha=0.3)\n ax.add_artist(poland)\n\n path_d_seq = path_d_seq + [path_d_seq[0]]\n path_d_seq = np.asarray(path_d_seq)\n\n path_h_seq = path_h_seq + [path_h_seq[0]]\n path_h_seq = np.asarray(path_h_seq)\n\n plt.plot(path_h_seq[:, 0],\n path_h_seq[:, 1], 'g-', label='haploidal')\n plt.plot(path_d_seq[:, 0],\n path_d_seq[:, 1], 'b-', label='diploidal')\n\n legend = \"Legend:\\n\"\n legend += \"\\n\".join([str(ii) + ': ' + name\n for ii, name in enumerate(cities_names)])\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n ax.text(-0.15, 0.95, legend,\n transform=ax.transAxes,\n fontsize=14, verticalalignment='top', bbox=props)\n\n plt.axis('off')\n plt.legend(loc=4)\n plt.show()", "def main():\n\takpPoints,chpPoints = extractSupporterCities(\"Data/PreprocessedAkpTweets.csv\",\n\t\t\t\t\t\t\t\t\t\t\t \"Data/PreprocessedChpTweets.csv\")\n\tgenerateMapPoints(akpPoints,chpPoints)\n\tgenerateCitySentimentData(akpPoints,chpPoints)\n\tgenerateChoroplethMap(\"Data/tr_cities_modified.json\",\"Data/city_ratio.csv\")", "def do_analyse(args):\n\n pd.set_option('display.max_columns', None)\n pd.set_option('display.width', None)\n\n score = 'score'\n\n # Read in the results, and add a boolean target column.\n df = pd.read_csv(args.results, index_col=0)\n df['target'] = df['verify_speaker'] == df['enrol_speaker']\n\n # Calculate ideal 0.01% threshold over the multi-session data.\n nontarget_df = df.loc[df['target'] == False].sort_values(score, ascending=False)\n nontarget_count = nontarget_df[score].count()\n th_calc = nontarget_df.iloc[int(nontarget_count * (1 / 10000))][score]\n\n # Now filter the data so that we only consider mono-session enrolment and verification.\n df = df.loc[df['verify_room'] == df['enrol_room']]\n target_df = df.loc[df['target'] == True].sort_values(score, ascending=False)\n nontarget_df = df.loc[df['target'] == False].sort_values(score, ascending=False)\n target_count = target_df[score].count()\n nontarget_count = nontarget_df[score].count()\n\n # Calculate FA/FR for the user-defined threshold.\n th_user = args.th_user\n fr_user = target_df.loc[target_df[score] < th_user][score].count()\n fa_user = nontarget_df.loc[nontarget_df[score] > th_user][score].count()\n frr_user = fr_user / target_count\n far_user = fa_user / nontarget_count\n label_user = 'User Threshold: th {:.4f}, FR {} ({:.3f}%), FA {} ({:.3f}%)'.format(th_user, fr_user, frr_user * 100,\n fa_user, far_user * 100)\n\n # Calculate the FA/FR for the ideal threshold calculated from the multi-session data.\n fr_calc = target_df.loc[target_df[score] < th_calc][score].count()\n fa_calc = nontarget_df.loc[nontarget_df[score] > th_calc][score].count()\n frr_calc = fr_calc / target_count\n far_calc = fa_calc / nontarget_count\n label_calc = 'Calc Threshold: th {:.4f}, FR {} ({:.3f}%), FA {} ({:.3f}%)'.format(th_calc, fr_calc, frr_calc * 100,\n fa_calc, far_calc * 100)\n\n # Print the stats.\n print('\\nTarget Stats:')\n print(target_df[score].describe())\n print('\\nNon-Target Stats:')\n print(nontarget_df[score].describe())\n print('\\nThresholds:')\n print(label_user)\n print(label_calc)\n\n # Paint the graphs.\n paint_graph(score, 'verify_room', df, th_user, label_user, th_calc, label_calc)\n paint_graph(score, 'enrol_room', df, th_user, label_user, th_calc, label_calc)\n paint_graph(score, 'verify_speaker', df, th_user, label_user, th_calc, label_calc)\n paint_graph(score, 'enrol_speaker', df, th_user, label_user, th_calc, label_calc)", "def analyze(self):\n self.grayscale = (input(\"[G]rayscale or [C]olor? \").lower()[0] == \"g\")\n for i in range(1, 6):\n for j in range(1, 10):\n network_name = \"acas_%d_%d\" % (i, j)\n try:\n distance_classified = self.read_artifact(\n \"%s/distance\" % network_name)\n theta_classified = self.read_artifact(\n \"%s/theta\" % network_name)\n sample_pre, sample_post = self.read_artifact(\n \"%s/sample\" % network_name)\n single_line_data = self.read_artifact(\n \"%s/single_lines\" % network_name)\n except KeyError:\n # Skip due to missing data.\n continue\n print(\"Analyzing network:\", network_name)\n self.distance_plot(distance_classified)\n self.finalize_plot(\"%s/distance\" % network_name)\n self.theta_plot(theta_classified)\n self.finalize_plot(\"%s/theta\" % network_name)\n self.overlapping_plot(distance_classified, theta_classified)\n self.finalize_plot(\"%s/overlapping\" % network_name)\n self.sample_plot(sample_pre, sample_post)\n self.finalize_plot(\"%s/sample\" % network_name)\n\n self.single_line_plots(network_name, single_line_data)\n return True", "def cingAnalyseResults(self): \n\n pass", "def test_assemble_stats(self):\n lar_data = loan_originations_as_json(request)\n tracts = Geo.objects.filter(geo_type=Geo.TRACT_TYPE, cbsa=request.GET.get('metro'))\n lender_stats = assemble_stats(lar_data, tracts)\n self.assertEqual(lender_stats['hma_pct'], 0)\n self.assertEqual(lender_stats['lma_pct'], 1)\n self.assertEqual(lender_stats['mma_pct'], 0)\n self.assertEqual(lender_stats['lma'], 7)\n self.assertEqual(lender_stats['mma'], 0)\n self.assertEqual(lender_stats['hma'], 0)\n self.assertEqual(lender_stats['lar_total'], 7)", "def help_analyze(self):\n print(ANALYZE)", "def analyse(self, data=None):\n pass", "def main():\n tl = TwoLocus(in_path='/csbiodata/public/www.csbio.unc.edu/htdocs/sgreens/pairwise_origins/')\n # tl = TwoLocus()\n # tl.preprocess(glob.glob('OR_ss_origins/*.hap'))\n print len(tl.list_available_strains())\n exit()\n # print len(tl.list_available_strains())\n # tl.preprocess(['cc_origins.csv'])\n # tl.preprocess(['ccv_origins.csv'])\n classical = [s for s in\n [\"129P1/ReJ\", # \"129P3/J\", \"129S1SvlmJ\", \"129S6\", \"129T2/SvEmsJ\", \"129X1/SvJ\", \"A/J\", \"A/WySnJ\",\n \"AEJ/GnLeJ\", \"AEJ/GnRk\", \"AKR/J\", \"ALR/LtJ\", \"ALS/LtJ\", \"BALB/cByJ\", \"BALB/cJ\", \"BDP/J\", \"BPH/2J\",\n # \"BPL/1J\", \"BPN/3J\", \"BTBR T<+>tf/J\", \"BUB/BnJ\", \"BXSB/MpJ\", \"C3H/HeJ\", \"C3HeB/FeJ\", \"C57BL/10J\",\n # \"C57BL/10ScNJ\", \"C57BL/10SAAAJ\", \"C57BL/6CR\", \"C57BL/6J\", \"C57BL/6NCI\", \"C57BL/6Tc\", \"C57BLKS/J\",\n # \"C57BR/cdJ\", \"C57L/J\", \"C58/J\", \"CBA/CaJ\", \"CBA/J\", \"CE/J\", \"CHMU/LeJ\", \"DBA/1J\", \"DBA/1LacJ\",\n # \"DBA/2DeJ\", \"DBA/2HaSmnJ\", \"DBA/2J\", \"DDK/Pas\", \"DDY/JclSidSeyFrkJ\", \"DLS/LeJ\", \"EL/SuzSeyFrkJ\",\n # \"FVB/NJ\", \"HPG/BmJ\", \"I/LnJ\", \"IBWSP2\", \"IBWSR2\", \"ICOLD2\", \"IHOT1\", \"IHOT2\", \"ILS\", \"ISS\", \"JE/LeJ\",\n # \"KK/HlJ\", \"LG/J\", \"LP/J\", \"LT/SvEiJ\", \"MRL/MpJ\", \"NOD/ShiLtJ\", \"NON/ShiLtJ\", \"NONcNZO10/LtJ\",\n # \"NONcNZO5/LtJ\", \"NOR/LtJ\", \"NU/J\", \"NZB/BlNJ\", \"NZL/LtJ\", \"NZM2410/J\", \"NZO/HlLtJ\", \"NZW/LacJ\", \"P/J\",\n # \"PL/J\", \"PN/nBSwUmabJ\", \"RF/J\", \"RHJ/LeJ\", \"RIIIS/J\", \"RSV/LeJ\", \"SB/LeJ\", \"SEA/GnJ\", \"SEC/1GnLeJ\",\n # \"SEC/1ReJ\", \"SH1/LeJ\", \"SI/Col Tyrp1 Dnahc11/J\", \"SJL/Bm\", \"SJL/J\", \"SM/J\", \"SSL/LeJ\", \"ST/bJ\",\n \"STX/Le\", ] # \"SWR/J\", \"TALLYHO/JngJ\", \"TKDU/DnJ\", \"TSJ/LeJ\", \"YBR/EiJ\", \"ZRDCT Rax<+>ChUmdJ\"]\n if tl.is_available(s)]\n wild_derived = [s for s in\n ['22MO',\n # 'BIK/g', 'BULS', 'BUSNA', 'BZO', 'CALB/RkJ', 'CASA/RkJ', 'CAST/EiJ', 'CIM', 'CKN', 'CKS',\n 'CZECHI/EiJ', 'CZECHII/EiJ', 'DCA', 'DCP', 'DDO', 'DEB', 'DGA', 'DIK', 'DJO', 'DKN', 'DMZ', 'DOT',\n # 'IS/CamRkJ', 'JF1/Ms', 'LEWES/EiJ', 'MBK', 'MBS', 'MCZ', 'MDG', 'MDGI', 'MDH', 'MGA', 'MH',\n # 'MOLD/RkJ', 'MOLF/EiJ', 'MOLG/DnJ', 'MOR/RkJ', 'MPB', 'MSM/Ms', 'PERA/EiJ', 'PERC/EiJ', 'POHN/Deh',\n # 'PWD/PhJ', 'PWK/PhJ', 'RBA/DnJ', 'RBB/DnJ', 'RBF/DnJ', 'SF/CamEiJ', 'SKIVE/EiJ', 'SOD1/EiJ',\n # 'STLT', 'STRA', 'STRB', 'STUF', 'STUP', 'STUS', 'TIRANO/EiJ', 'WLA', 'WMP', 'WSB/EiJ',\n 'ZALENDE/EiJ'] if tl.is_available(s)]\n tl.contingency_table(classical, wild_derived, '/csbiohome01/sgreens/Projects/intervals/contingency.csv')\n exit()\n x = TwoLocus(chrom_sizes=[20e6, 20e6])\n x.preprocess([\"test2.csv\"])\n x.unique_combos(['A', 'B', 'D'], ['C', 'E'])\n x.sources_at_point_pair('1', 1, '1', 10000000, ['A'])\n # x.interlocus_dependence([chr(c) for c in xrange(ord('A'), ord('J')+1)])\n # exit()\n\n x = TwoLocus(chrom_sizes=[20 * 10 ** 6, 20 * 10 ** 6])\n x.preprocess([\"test.csv\"])\n rez = x.pairwise_frequencies([\"A\"])\n\n areas = x.calculate_genomic_area(rez[0], rez[1])\n total = 0.0\n\n for combo in subspecies.iter_combos():\n print \"\\t{:15s}({:4d}):{:1.5f}\".format(subspecies.to_string(combo), combo,\n areas[str(subspecies.to_string(combo))])\n total += areas[str(subspecies.to_string(combo))]\n print \"\\t{:21s}:{:1.5f}\".format(\"Total\", total)\n\n sys.exit(1)\n # for code, combo in combos.iteritems():\n # print \"\\n\", rez[1]\n # print \"\\t{} ({}):\\n{}\".format(combo, code, rez[0][code])", "def show_results():\n print 'Distancia total: ', total_distance\n print 'Ruta: ', visited_cities", "def main():\n # openfile allows for CSV files with stored data of two columns\n # data = openfile(\"filename\")\n data = get_data()\n abtest = AB_test(data)\n abtest.stats()\n abtest.print_stats()", "def get_jhu(data_path: str, filter_: Union[dict, bool] = True) -> None:\n # Where JHU stores their data\n url_template = (\"https://raw.githubusercontent.com/CSSEGISandData/\"\n \"COVID-19/master/csse_covid_19_data/\"\n \"csse_covid_19_time_series/time_series_covid19_%s_%s.csv\")\n\n # Scrape the data\n dfs = {}\n for region in ['global', 'US']:\n dfs[region] = {}\n for kind in ['confirmed', 'deaths', 'recovered']:\n url = url_template % (kind, region) # Create the full data URL\n try:\n df = pd.read_csv(url) # Download the data into a dataframe\n except HTTPError:\n print(\"Could not download data for %s, %s\" % (kind, region))\n else:\n if region == 'global':\n has_no_province = df['Province/State'].isnull()\n # Whole countries only; use country name as index\n df1 = df[has_no_province].set_index('Country/Region')\n more_dfs = []\n for country in ['China', 'Canada', 'Australia']:\n if country == 'Canada' and kind in 'recovered':\n continue\n is_c = df['Country/Region'] == country\n df2 = df[is_c].sum(axis=0, skipna=False).to_frame().T\n df2['Country/Region'] = country\n df2 = df2.set_index('Country/Region')\n more_dfs.append(df2)\n df = pd.concat([df1] + more_dfs)\n elif region == 'US':\n # Use state name as index\n for k, v in us_state_abbrev.items(): # get US state abbrev\n if not us_state_abbrev[k].startswith('US_'):\n us_state_abbrev[k] = 'US_' + v # Add 'US_' to abbrev\n df.replace(us_state_abbrev, inplace=True)\n df = df.set_index('Province_State')\n df = df.groupby('Province_State').sum() # combine counties to create state level data\n\n df = df[[x for x in df if any(year in x for year in ['20', '21'])]] # Use only data columns\n # 20 or 21 signifies 2020 or 2021\n dfs[region][kind] = df # Add to dictionary of dataframes\n\n # Generate a list of countries that have \"good\" data,\n # according to these criteria:\n good_countries = get_countries(dfs['global'], filter_=filter_)\n\n # For each \"good\" country,\n # reformat and save that data in its own .csv file.\n source = dfs['global']\n for country in tqdm(good_countries, desc='Countries'): # For each country\n if country in ['Diamond Princess', 'MS Zaandam', 'Samoa',\n 'Vanuatu', 'Marshall Islands', 'US', 'Micronesia']:\n print(\"Skipping {}\".format(country))\n continue\n # If we have data in the downloaded JHU files for that country\n if country in source['confirmed'].index:\n df = pd.DataFrame(columns=['dates2', 'cum_cases', 'cum_deaths',\n 'cum_recover', 'new_cases',\n 'new_deaths', 'new_recover',\n 'new_uninfected'])\n df['dates2'] = source['confirmed'].columns\n df['dates2'] = df['dates2'].apply(fix_jhu_dates)\n df['cum_cases'] = source['confirmed'].loc[country].values\n df['cum_deaths'] = source['deaths'].loc[country].values\n df['cum_recover'] = source['recovered'].loc[country].values\n df[['new_cases', 'new_deaths', 'new_recover']] = \\\n df[['cum_cases', 'cum_deaths', 'cum_recover']].diff()\n df['new_uninfected'] = df['new_recover'] + df['new_deaths']\n\n\n try:\n population = get_population_count(data_path, country)\n df['population'] = population\n except:\n pass\n\n # Fill NaN with 0 and convert to int\n dfs[country] = df.set_index('dates2').fillna(0).astype(int)\n dfs[country].to_csv(data_path / ('covidtimeseries_%s.csv' % country))\n\n else:\n print(\"No data for %s\" % country)\n\n source = dfs['US']\n states = source['confirmed'].index.tolist()\n us_recovery_data = covid_tracking_recovery(data_path)\n for state in tqdm(states, desc='US States'): # For each country\n if state in ['Diamond Princess', 'Grand Princess', 'MS Zaandam', 'US_AS']:\n print(\"Skipping {}\".format(state))\n continue\n # If we have data in the downloaded JHU files for that country\n if state in source['confirmed'].index:\n df = pd.DataFrame(columns=['dates2', 'cum_cases', 'cum_deaths',\n 'new_cases','new_deaths','new_uninfected'])\n df['dates2'] = source['confirmed'].columns\n df['dates2'] = df['dates2'].apply(fix_jhu_dates)\n df['cum_cases'] = source['confirmed'].loc[state].values\n df['cum_deaths'] = source['deaths'].loc[state].values\n\n df[['new_cases', 'new_deaths']] = df[['cum_cases', 'cum_deaths']].diff()\n\n # add recovery data\n df.set_index('dates2', inplace=True)\n df = df.merge(us_recovery_data[state], on='dates2', how='left')\n df['new_uninfected'] = df['new_recover'] + df['new_deaths']\n\n try:\n population = get_population_count(data_path, state)\n df['population'] = population\n except:\n pass\n # Fill NaN with 0 and convert to int\n dfs[state] = df.fillna(0).astype(int)\n dfs[state].to_csv(data_path /\n ('covidtimeseries_%s.csv' % state))\n else:\n print(\"No data for %s\" % state)", "def north_america_countries():\r\n north_america_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in north_america:\r\n north_america_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in north_america_data:\r\n if idx != None and idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def main(verbose=True):\n if verbose: \n print(\"\\n---------------\")\n printCommonSNPCounts()\n print(\"---------------\")\n \n print(\"Charles River\")\n print(\"---------------\") \n getCommonSNPIndices(\"C\", save=True)\n print(\"---------------\")\n \n print(\"Harlan River\")\n getCommonSNPIndices(\"H\", save=True)\n print(\"---------------\")\n else:\n getCommonSNPIndices(\"C\", save=True)\n getCommonSNPIndices(\"H\", save=True)", "def run(input_loc, output_loc):\n\n df_activity, df_population, df_vacc_global, df_vacc_usa_doses, df_vacc_usa_people = read_sources(spark, input_loc)\n\n # calculate dimentions\n _run_dim_continent(df_activity, df_population)\n _run_dim_data_sources(df_activity, df_population, output_loc)\n _run_dim_vaccinations_types(df_vacc_usa_doses, output_loc)\n df_countries = _run_dim_countries(df_activity, df_population, output_loc)\n\n # calculate fact table\n _run_fact_activity(\n df_activity, df_population,\n df_countries, \n df_vacc_global,\n df_vacc_usa_doses,\n df_vacc_usa_people,\n output_loc)\n\n # test\n test_outputs(\n df_activity,\n df_countries,\n )", "def train():\n rng = random.PRNGKey(0)\n\n # Get Zachary's karate club graph dataset.\n node_feats, node_labels, sources, targets = get_karate_club_data()\n\n # Create model and optimizer.\n _, initial_params = GNN.init(\n rng, node_x=node_feats, edge_x=None, sources=sources, targets=targets)\n model = nn.Model(GNN, initial_params)\n optimizer = optim.Adam(learning_rate=0.01).create(model)\n\n # Train for 20 iterations.\n for iteration in range(20):\n optimizer, loss = train_step(optimizer, node_feats, sources, targets)\n\n accuracy = eval_step( # Model is stored in `optimizer.target`.\n optimizer.target, node_feats, sources, targets, node_labels)\n\n print('iteration: %d, loss: %.4f, accuracy: %.2f'\n % (iteration+1, loss, accuracy * 100))", "def geocube():", "def test_with_bunch(filename) :\n\n\tif not os.path.exists(filename) :\n\t\tprint('File not exists: ' + filename)\n\t\tsys.exit(-1)\n\n\n\t# Read CSV file\n\tprint('Load CSV file')\n\n\tcsv.field_size_limit(sys.maxsize) # Set CSV limit to sys.maxsize\n\tfiledata = []\n\twith open(filename) as csvfile :\n\t\treader = csv.reader(csvfile, delimiter=',')\n\t\tfor row in reader :\n\t\t\tfiledata.append(row)\n\n\n\tdetector = shaman.Shaman.default()\n\n\tcorrect = 0\n\ttotals = len(filedata)\n\n\tresults = {}\n\tprint('Start testing')\n\n\tfor index, (language, code) in enumerate(filedata) :\n\t\tprint ('Testing %s/%s ' % (index, len(filedata)), end=\"\\r\")\n\n\t\tif language not in shaman.SUPPORTING_LANGUAGES:\n\t\t\ttotals -= 1\n\t\t\tcontinue\n\n\t\ttry :\n\t\t\tglang = detector.detect( code )[0][0]\n\t\texcept IndexError :\n\t\t\tglang = None\n\n\t\tif language not in results :\n\t\t\tresults[ language ] = [0, 0, 0]\n\n\t\tif glang == language :\n\t\t\tcorrect += 1\n\t\t\tresults[ language ][0] += 1\n\n\t\t\n\t\tresults[ language ][1] += 1\n\t\tresults[ language ][2] = results[ language ][0] / results[ language ][1]\n\n\n\t\n\tprint(\"------------------------------------------------\")\n\tprint(\"Accuracy: %.2lf%% (Correct: %d / Valid Data: %d)\" % (correct/totals*100, correct, totals))\n\tprint(\"------------------------------------------------\")\n\t\n\tresults = sorted(results.items(), key=lambda x: x[1][0], reverse=True)\n\tfor lang, l in results :\n\t\tprint(\"%s: %.2lf%% (%s/%s)\" % (lang, l[2] * 100, l[0], l[1]))", "def Analyze(self, data):\n self._AddResult()", "def main():\n stats = []\n start = timer()\n\n for file_name in get_dataset():\n\n # load image and ground truth detection mask\n img = cv2.imread(settings.PATH + file_name)\n ground_truth_mask = cv2.imread(settings.PATH_GT_MASKS + file_name)\n\n # Find list of barcode regions (rotated rectangle) within image\n barcode_regions, debug_img = find_barcodes(img)\n barcode_regions_mask = np.zeros(img.shape, np.uint8)\n barcode_images = None\n result = []\n\n # Decode barcode regions\n for barcode_region in barcode_regions:\n\n # Decode barcode image\n barcode_img = barcode_region.extract_from(img)\n barcode_mask = barcode_region.get_mask(img)\n debug_img = barcode_region.draw(debug_img)\n\n # Combine masks from multiple detected regions\n barcode_regions_mask += barcode_mask\n\n # Decode barcode\n decoded = pyzbar.decode(barcode_img)\n\n # Keep result for logging\n data = \", \".join([d.data.decode(\"utf-8\") for d in decoded])\n result.append({\"data\": data, \"region\": barcode_region.json()})\n\n if settings.SHOW_IMAGE:\n barcode_images = img_concat(barcode_images, barcode_img)\n\n # Jaccard_accuracy = intersection over union of the two binary masks\n jaccard_accuracy = 0\n if ground_truth_mask is not None:\n r = barcode_regions_mask.max(axis=-1).astype(bool)\n u = ground_truth_mask.max(axis=-1).astype(bool)\n jaccard_accuracy = float((r & u).sum()) / (r | u).sum()\n stats.append(jaccard_accuracy)\n\n # Log result\n logger.info(\n \"Image processed\",\n file_name=file_name,\n jaccard_accuracy=jaccard_accuracy,\n success=jaccard_accuracy > 0.5,\n result=result,\n )\n\n # In debug mode show visualization of detection algorithm\n if settings.SHOW_IMAGE:\n\n # Add alpha channel\n debug_img = cv2.cvtColor(debug_img, cv2.COLOR_BGR2BGRA)\n if barcode_images is not None:\n barcode_images = cv2.cvtColor(barcode_images, cv2.COLOR_BGR2BGRA)\n\n # Overlay error mask\n # Pixel-wise difference between ground truth and detected barcodes\n if ground_truth_mask is not None:\n error_img = np.zeros(debug_img.shape, np.uint8)\n error_img[r & u] = np.array([0, 0, 0, 0], dtype=np.uint8)\n error_img[np.logical_xor(r, u)] = np.array(\n [0, 0, 255, 1], dtype=np.uint8\n )\n debug_img = cv2.addWeighted(debug_img, 1, error_img, 0.5, 0)\n\n # Append barcode pictures to the right\n debug_img = img_concat(debug_img, barcode_images, axis=1)\n\n # Show visualization\n cv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"img\", debug_img)\n cv2.waitKey(0)\n\n # Calculate final stats\n end = timer()\n accuracy = np.array(stats).mean()\n successes = np.where(np.array(stats) > 0.5)[0]\n logger.info(\n \"Final stats\",\n accuracy=accuracy,\n detection_rate=float(len(successes)) / len(stats),\n fps=len(stats) / (end - start),\n )", "def main():\n # Return needed Data Frames to analyze\n data_frame, seasons, col, labels, stats, kaggle = load_frames()\n\n # Create the maps now\n create_shot_maps(data_frame,seasons)\n create_scenario_map()\n \n # Create the Plots\n plot_season_graphs(stats)\n plot_pie_charts(kaggle)\n plot_shot_timings(kaggle)\n plot_radar(stats, col, labels)", "def sub_saharan_africa_countries():\r\n sub_saharan_africa_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in sub_saharan_africa:\r\n sub_saharan_africa_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in sub_saharan_africa_data:\r\n if idx != None:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def run_all(self):\n\n self.run_mash() ###Run MASH analysis\n self.filter_query() ###Filter fasta sequences out based on p value\n self.build_index(self.filtered_out_path) ###Build index for off-target analysis\n os.remove(self.filtered_out_path) ###Clean up intermediate fasta file\n self.format_gRNA(self.path1) ###Format everything in the right order\n self.run_OTF() ###Run off-target analysis\n self.output_parse() ###Parse output values and update table", "def main():\n analyze_perturbations()", "def test_compute_jaccard(self):\n pass", "def latin_america_countries():\r\n latin_america_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in latin_america:\r\n latin_america_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in latin_america_data:\r\n if idx != None and idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def cluster(approach, datapath):\n report_path = test(datapath, approach, params[approach])\n c.echo('Report compiled at {0}.'.format(report_path))", "def main():\n # create a Spark session\n spark = create_spark_session()\n\n # set input & output data locations\n input_data = \"data/\"\n output_data = \"results/\"\n\n # Gather/read the datasets\n df_visits = spark.read.parquet(\"data/immigration_data\")\n df_demo = spark.read.csv(\"data/us-cities-demographics.csv\", sep=\";\", header=True)\n df_airports = spark.read.csv(\"data/airport-codes_csv.csv\", header=True)\n df_airport_codes = get_airport_codes(spark)\n df_countries = get_countries(spark)\n df_states = get_states(spark)\n df_visa = get_visa(spark)\n\n # clean the datasets\n df_airports_clean = clean_airport_codes(spark,df_airports)\n df_demo_clean= clean_demographics(spark,df_demo)\n df_visits_clean = clean_immigration_data(spark, df_visits, df_airport_codes, df_countries, df_states, df_visa)\n\n # load the fact and dimensions in parquet files\n load_dimensions(output_data, df_countries, df_states, df_visa, df_demo_clean, df_airports_clean)\n load_fact(spark,output_data, df_visits_clean)\n\n # run validation checks\n validate_dimensions(spark,['dim_visa','dim_state','dim_country','dim_us_demo','dim_airports'],output_data)\n validate_fact(spark,'fact_visits',output_data)", "def main():\n test_cases = ast.literal_eval(sys.argv[1])\n results = str(my_info()) + '\\t\\t'\n for test_case in test_cases:\n mode = test_case[0]\n id_1 = int(test_case[1])\n id_2 = int(test_case[2])\n if mode == 'jc':\n results += str(Jaccard_Coefficient(id_1, id_2)) + '\\t\\t'\n elif mode == 'cc':\n results += str(Correlation_Coefficient(id_1, id_2)) + '\\t\\t'\n else:\n exit('bad command')\n print results + '\\n'", "def test_northamerica_population(self):\n\n self.driver.get(self.url_ +\n '/ranking/Count_Person/Country/northamerica')\n\n subtitle_present = EC.text_to_be_present_in_element(\n (By.TAG_NAME, 'h3'), 'All Countries in North America')\n WebDriverWait(self.driver, self.TIMEOUT_SEC).until(subtitle_present)\n\n self.assertEqual(\n self.driver.find_element_by_tag_name('h1').text,\n 'Ranking by Population')\n\n table = self.driver.find_element_by_xpath(\n '//*[@id=\"main-pane\"]/div/table')\n headers = table.find_elements_by_xpath('.//thead/tr/th')\n self.assertEqual(headers[0].text, 'Rank')\n self.assertEqual(headers[1].text, 'Country')\n self.assertEqual(headers[2].text, 'Value')\n row = table.find_elements_by_xpath('.//tbody/tr[1]/td')\n self.assertEqual(row[0].text, '1')\n self.assertEqual(row[1].text, 'United States of America')\n\n chart = self.driver.find_element_by_id('ranking-chart')\n y_text = chart.find_elements_by_class_name(\n 'y')[0].find_elements_by_tag_name('text')\n self.assertEqual(y_text[0].text, '0')\n self.assertEqual(y_text[-1].text, '300M')\n\n x_text = chart.find_elements_by_class_name(\n 'x')[0].find_elements_by_tag_name('text')\n self.assertEqual(x_text[0].text, 'United States of America')\n self.assertEqual(x_text[-1].text, 'Montserrat')", "def algorithm(df, params):\n\n output = {}\n\n # PUT YOUR OWN IMPLEMENTATION HERE\n # STORE YOUR ANALYSIS OUTPUT IN OUTPUT\n\n return output", "def main():\n data = pd.read_csv('countries.csv')\n # import_data_pandas(data)\n # continent_data(data)\n # continent_data_le(data)\n continent_data_gdp_growth(data)", "def full_analysis(self):\n print('FULL ANALYSIS\\n' +\n '----------------------------------\\n')\n #print('Basic Statistics') # Remove this and run 'basic_stats'\n results.append('FULL ANALYSIS\\n' +\n '----------------------------------\\n')\n print('Basic Information\\n' +\n '----------------------------')\n results.append('Basic Information\\n' +\n '----------------------------')\n self.info_density()\n self.calc_total_rows()\n self.show_empty()\n self.calc_null()\n self.calc_col_len()\n self.calc_row_len()\n self.calc_col_info()\n self.regex_info()", "def download(self, verbose):\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from Our World In Data https://github.com/owid/covid-19-data/\")\n # Vaccinations\n v_rec_cols = [\n \"date\", \"location\", \"iso_code\", \"total_vaccinations\", \"people_vaccinated\", \"people_fully_vaccinated\"]\n v_rec_df = pd.read_csv(self.URL_V_REC, usecols=v_rec_cols)\n v_loc_df = pd.read_csv(self.URL_V_LOC, usecols=[\"location\", \"vaccines\"])\n v_df = v_rec_df.merge(v_loc_df, how=\"left\", on=\"location\")\n # Tests\n pcr_rec_cols = [\"ISO code\", \"Date\", \"Daily change in cumulative total\", \"Cumulative total\"]\n pcr_df = pd.read_csv(self.URL_P_REC, usecols=pcr_rec_cols)\n pcr_df = pcr_df.rename(columns={\"ISO code\": \"iso_code\", \"Date\": \"date\"})\n pcr_df[\"cumsum\"] = pcr_df.groupby(\"iso_code\")[\"Daily change in cumulative total\"].cumsum()\n pcr_df = pcr_df.assign(tests=lambda x: x[\"Cumulative total\"].fillna(x[\"cumsum\"]))\n # Combine data (vaccinations/tests)\n df = v_df.set_index([\"iso_code\", \"date\"])\n df = df.combine_first(pcr_df.set_index([\"iso_code\", \"date\"]).loc[:, [\"tests\"]])\n df = df.reset_index()\n # Location (country/province)\n df[\"location\"] = df[\"location\"].replace(\n {\n # COG\n \"Congo\": \"Republic of the Congo\",\n }\n )\n df = df.loc[~df[\"iso_code\"].str.contains(\"OWID_\")]\n df[\"location\"] = df.groupby(\"iso_code\")[\"location\"].bfill()\n df.loc[df[\"location\"] == df[\"iso_code\"], \"location\"] = None\n df.loc[df[\"location\"].isna(), \"location\"] = df.loc[df[\"location\"].isna(), \"iso_code\"].apply(\n lambda x: coco.convert(x, to=\"name_short\", not_found=None))\n df[self.PROVINCE] = self.UNKNOWN\n return df", "def main():\n\n file_name_base = \"./lab-record/result/fairness/\"\n scenarios = ['lan', 'wan1', 'wan2']\n scenario = scenarios[2]\n\n algorithms = [\"bbr\", \"scalable\", \"bic\", \"highspeed\", \"htcp\", \"hybla\",\n \"illinois\", \"vegas\", \"yeah\"]\n names = [\"BBR\", \"Scalable\", \"BIC\", \"High Speed\",\n \"H-TCP\", \"Hybla\", \"Illinois\", \"Vegas\", \"YeAH\"]\n\n test_types = [\"vs_reno\", \"vs_cubic\", \"vs_itself\"]\n\n fsize = 36\n \n index_reno = []\n index_cubic = []\n index_itself = []\n\n data = []\n \n print 'Loadint statistics for ' + file_name_base + '/' + scenario\n\n for algorithm in algorithms:\n for test in test_types:\n path_base = file_name_base + \"/\" + scenario + \"/\" + test + \"/\" + \\\n algorithm + \"/\"\n if test == \"vs_itself\":\n exp_name = names[algorithms.index(algorithm)] + \"_1\"\n con_name = names[algorithms.index(algorithm)] + \"_2\"\n print path_base + exp_name\n print path_base + con_name\n exp_filename = \"/\" + algorithm + \"_1.log\"\n con_filename = \"/\" + algorithm + \"_2.log\"\n process(path_base, exp_filename, con_filename, index_itself)\n if test == \"vs_reno\":\n exp_name = names[algorithms.index(algorithm)]\n con_name = \"Reno\"\n print path_base + exp_name\n print path_base + con_name\n exp_filename = \"/\" + algorithm + \".log\"\n con_filename = \"/reno.log\"\n process(path_base, exp_filename, con_filename, index_reno)\n if test == \"vs_cubic\":\n con_name = \"CUBIC\"\n exp_name = names[algorithms.index(algorithm)]\n print path_base + exp_name\n print path_base + con_name\n exp_filename = \"/\" + algorithm + \".log\"\n con_filename = \"/cubic.log\"\n process(path_base, exp_filename, con_filename, index_cubic)\n\n size = 9\n x = numpy.arange(size)\n\n total_width, n = 1.2, 2.5\n width = 1.0 / n\n x = x - (total_width - width) / 2\n\n for i in range(0, len(x)):\n x[i] += 0.5 * i\n\n # Exp\n fig = plt.figure()\n\n # Con\n con_reno = plt.bar(x + 0 * width - 1.2,\n index_reno,\n width=width,\n label='Against Reno',\n alpha=0.5,\n color=\"darkorange\")\n\n con_cubic = plt.bar(x + 1 * width - 1.2,\n index_cubic,\n width=width,\n label='Against CUBIC',\n alpha=0.5,\n color=\"lawngreen\")\n\n con_itself = plt.bar(x + 2 * width - 1.2,\n index_itself,\n width=width,\n label='Against Another Same CCA',\n alpha=0.5,\n color=\"dodgerblue\")\n\n # Index\n plt.xticks(x + 1.5 * width - 1.2, [\"BBR\", \"Scalable\", \"BIC\", \"High Speed\",\n \"H-TCP\", \"Hybla\", \"Illinois\", \"Vegas\",\n \"YeAH\"],\n fontsize=fsize,\n rotation=\"45\")\n plt.ylabel(\"Jain`s Fairness Index\", fontsize=fsize)\n plt.yticks(fontsize=fsize)\n plt.ylim(0.5, 1.1)\n\n ax = plt.subplot(111)\n ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\n ncol=3, mode=\"expand\", borderaxespad=0., fontsize=fsize)\n\n plt.subplots_adjust(left=0.07, right=0.98, top=0.9, bottom=0.2)\n\n plt.show()", "def compute_angams(self, compute_lagnams=True):\n\n # INITIALISE VARIABLES\n self.jd_sunrise = [None] * jyotisha.panchangam.temporal.MAX_SZ\n self.jd_sunset = [None] * jyotisha.panchangam.temporal.MAX_SZ\n self.jd_moonrise = [None] * jyotisha.panchangam.temporal.MAX_SZ\n self.jd_moonset = [None] * jyotisha.panchangam.temporal.MAX_SZ\n self.solar_month = [None] * jyotisha.panchangam.temporal.MAX_SZ\n self.solar_month_day = [None] * jyotisha.panchangam.temporal.MAX_SZ\n\n solar_month_sunrise = [None] * jyotisha.panchangam.temporal.MAX_SZ\n\n self.lunar_month = [None] * jyotisha.panchangam.temporal.MAX_SZ\n self.month_data = [None] * jyotisha.panchangam.temporal.MAX_SZ\n self.tithi_data = [None] * jyotisha.panchangam.temporal.MAX_SZ\n self.tithi_sunrise = [None] * jyotisha.panchangam.temporal.MAX_SZ\n self.nakshatram_data = [None] * jyotisha.panchangam.temporal.MAX_SZ\n self.nakshatram_sunrise = [None] * jyotisha.panchangam.temporal.MAX_SZ\n self.yogam_data = [None] * jyotisha.panchangam.temporal.MAX_SZ\n self.yogam_sunrise = [None] * jyotisha.panchangam.temporal.MAX_SZ\n self.karanam_data = [None] * jyotisha.panchangam.temporal.MAX_SZ\n self.rashi_data = [None] * jyotisha.panchangam.temporal.MAX_SZ\n self.lagna_data = [None] * jyotisha.panchangam.temporal.MAX_SZ\n\n self.weekday = [None] * jyotisha.panchangam.temporal.MAX_SZ\n self.kaalas = [dict() for _x in range(jyotisha.panchangam.temporal.MAX_SZ)]\n daily_panchaangas = [None] * jyotisha.panchangam.temporal.MAX_SZ\n\n self.fest_days = {}\n self.festivals = [[] for _x in range(jyotisha.panchangam.temporal.MAX_SZ)]\n\n # Computing solar month details for Dec 31\n # rather than Jan 1, since we have an always increment\n # solar_month_day at the start of the loop across every day in\n # year\n daily_panchangam_start = daily.Panchangam(city=self.city, julian_day=self.jd_start - 1,\n ayanamsha_id=self.ayanamsha_id)\n daily_panchangam_start.compute_solar_day()\n self.solar_month[1] = daily_panchangam_start.solar_month\n solar_month_day = daily_panchangam_start.solar_month_day\n\n if self.solar_month[1] != 9:\n logging.error(self.solar_month[1])\n raise (ValueError('Dec 31 does not appear to be Dhanurmasa!'))\n\n month_start_after_sunset = False\n\n #############################################################\n # Compute all parameters -- sun/moon latitude/longitude etc #\n #############################################################\n\n for d in range(jyotisha.panchangam.temporal.MAX_SZ):\n self.weekday[d] = (self.weekday_start + d - 1) % 7\n\n for d in range(-1, jyotisha.panchangam.temporal.MAX_DAYS_PER_YEAR + 2):\n [y, m, dt, t] = swe.revjul(self.jd_start + d - 1)\n\n # checking @ 6am local - can we do any better?\n local_time = tz(self.city.timezone).localize(datetime(y, m, dt, 6, 0, 0))\n # compute offset from UTC in hours\n tz_off = (datetime.utcoffset(local_time).days * 86400 +\n datetime.utcoffset(local_time).seconds) / 3600.0\n\n # What is the jd at 00:00 local time today?\n jd = self.jd_start - (tz_off / 24.0) + d - 1\n\n # TODO: Eventually, we are shifting to an array of daily panchangas. Reason: Better modularity.\n # The below block is temporary code to make the transition seamless.\n daily_panchaangas[d + 1] = daily.Panchangam(city=self.city, julian_day=jd + 1, ayanamsha_id=self.ayanamsha_id)\n daily_panchaangas[d + 1].compute_sun_moon_transitions()\n daily_panchaangas[d + 1].compute_solar_month()\n self.jd_sunrise[d + 1] = daily_panchaangas[d + 1].jd_sunrise\n self.jd_sunset[d + 1] = daily_panchaangas[d + 1].jd_sunset\n self.jd_moonrise[d + 1] = daily_panchaangas[d + 1].jd_moonrise\n self.jd_moonset[d + 1] = daily_panchaangas[d + 1].jd_moonset\n self.solar_month[d + 1] = daily_panchaangas[d + 1].solar_month_sunset\n\n solar_month_sunrise[d + 1] = daily_panchaangas[d + 1].solar_month_sunrise\n\n if (d <= 0):\n continue\n # This is just to initialise, since for a lot of calculations,\n # we require comparing with tomorrow's data. This computes the\n # data for day 0, -1.\n\n # Solar month calculations\n if month_start_after_sunset is True:\n solar_month_day = 0\n month_start_after_sunset = False\n\n solar_month_end_jd = None\n if self.solar_month[d] != self.solar_month[d + 1]:\n solar_month_day = solar_month_day + 1\n if self.solar_month[d] != solar_month_sunrise[d + 1]:\n month_start_after_sunset = True\n [_m, solar_month_end_jd] = jyotisha.panchangam.temporal.get_angam_data(\n self.jd_sunrise[d], self.jd_sunrise[d + 1], jyotisha.panchangam.temporal.SOLAR_MONTH,\n ayanamsha_id=self.ayanamsha_id)[0]\n elif solar_month_sunrise[d] != self.solar_month[d]:\n # sankrAnti!\n # sun moves into next rAshi before sunset\n solar_month_day = 1\n [_m, solar_month_end_jd] = jyotisha.panchangam.temporal.get_angam_data(\n self.jd_sunrise[d], self.jd_sunrise[d + 1], jyotisha.panchangam.temporal.SOLAR_MONTH,\n ayanamsha_id=self.ayanamsha_id)[0]\n else:\n solar_month_day = solar_month_day + 1\n solar_month_end_jd = None\n\n # if self.solar_month[d-1] != self.solar_month[d]:\n # # We have a sUrya sankrAnti between yest. and today's sunsets\n # solar_month_day = 1\n # if solar_month_sunrise[d] == self.solar_month[d]:\n # #the sankrAnti happened before today's sunrise\n # #so search for the end time between yesterday and\n # #today's sunrises\n # [_m, solar_month_end_jd] = helper_functions.get_angam_data(self.jd_sunrise[d-1],\n # self.jd_sunrise[d],SOLAR_MONTH)[0]\n # else:\n # #the sankrAnti happens after today's sunrise\n # #so search for the end time between today and\n # #tomorrow's sunrises\n # [_m, solar_month_end_jd] = helper_functions.get_angam_data(self.jd_sunrise[d],\n # self.jd_sunrise[d + 1],SOLAR_MONTH)[0]\n # #print ('-----',revjul(jd = solar_month_end_jd, tz_off = tz_off))\n # else:\n # solar_month_day += 1\n # solar_month_end_jd = None\n\n if solar_month_end_jd is None:\n solar_month_end_time = ''\n else:\n solar_month_end_time = '\\\\mbox{%s{\\\\tiny\\\\RIGHTarrow}\\\\textsf{%s}}' % (\n jyotisha.panchangam.temporal.NAMES['RASHI_NAMES'][self.script][_m], jyotisha.panchangam.temporal.Time(\n 24 * (solar_month_end_jd - jd)).toString(format=self.fmt))\n\n # logging.debug(jyotisha.panchangam.temporal.NAMES)\n\n self.month_data[d] = '\\\\sunmonth{%s}{%d}{%s}' % (\n jyotisha.panchangam.temporal.NAMES['RASHI_NAMES'][self.script][self.solar_month[d]],\n solar_month_day, solar_month_end_time)\n self.solar_month_day[d] = solar_month_day\n\n # KARADAYAN NOMBU -- easy to check here\n if solar_month_end_jd is not None: # month ends today\n if (self.solar_month[d] == 12 and solar_month_day == 1) or \\\n (self.solar_month[d] == 11 and solar_month_day != 1):\n self.fest_days['ta:kAraDaiyAn2 nOn2bu'] = [d]\n\n # Compute the various kaalas\n # Sunrise/sunset and related stuff (like rahu, yama)\n YAMAGANDA_OCTETS = [4, 3, 2, 1, 0, 6, 5]\n RAHUKALA_OCTETS = [7, 1, 6, 4, 5, 3, 2]\n GULIKAKALA_OCTETS = [6, 5, 4, 3, 2, 1, 0]\n\n self.kaalas[d] = {\n 'prAtaH sandhyA': jyotisha.panchangam.temporal.get_kaalas(self.jd_sunset[d - 1], self.jd_sunrise[d], 14, 15),\n 'prAtaH sandhyA end': jyotisha.panchangam.temporal.get_kaalas(self.jd_sunrise[d], self.jd_sunset[d], 4, 15),\n 'prAtah': jyotisha.panchangam.temporal.get_kaalas(self.jd_sunrise[d], self.jd_sunset[d], 0, 5),\n 'saGgava': jyotisha.panchangam.temporal.get_kaalas(self.jd_sunrise[d], self.jd_sunset[d], 1, 5),\n 'madhyAhna': jyotisha.panchangam.temporal.get_kaalas(self.jd_sunrise[d], self.jd_sunset[d], 2, 5),\n 'mAdhyAhnika sandhyA': jyotisha.panchangam.temporal.get_kaalas(self.jd_sunrise[d], self.jd_sunset[d], 5, 15),\n 'mAdhyAhnika sandhyA end': jyotisha.panchangam.temporal.get_kaalas(self.jd_sunrise[d], self.jd_sunset[d], 13, 15),\n 'aparAhna': jyotisha.panchangam.temporal.get_kaalas(self.jd_sunrise[d], self.jd_sunset[d], 3, 5),\n 'sAyAhna': jyotisha.panchangam.temporal.get_kaalas(self.jd_sunrise[d], self.jd_sunset[d], 4, 5),\n 'sAyaM sandhyA': jyotisha.panchangam.temporal.get_kaalas(self.jd_sunrise[d], self.jd_sunset[d], 14, 15),\n 'sAyaM sandhyA end': jyotisha.panchangam.temporal.get_kaalas(self.jd_sunset[d], self.jd_sunrise[d + 1], 1, 15),\n 'rAtri yAma 1': jyotisha.panchangam.temporal.get_kaalas(self.jd_sunset[d], self.jd_sunrise[d + 1], 1, 4),\n 'zayana': jyotisha.panchangam.temporal.get_kaalas(self.jd_sunset[d], self.jd_sunrise[d + 1], 3, 8),\n 'dinAnta': jyotisha.panchangam.temporal.get_kaalas(self.jd_sunset[d], self.jd_sunrise[d + 1], 18.25, 30),\n 'rahu': jyotisha.panchangam.temporal.get_kaalas(self.jd_sunrise[d], self.jd_sunset[d],\n RAHUKALA_OCTETS[self.weekday[d]], 8),\n 'yama': jyotisha.panchangam.temporal.get_kaalas(self.jd_sunrise[d], self.jd_sunset[d],\n YAMAGANDA_OCTETS[self.weekday[d]], 8),\n 'gulika': jyotisha.panchangam.temporal.get_kaalas(self.jd_sunrise[d], self.jd_sunset[d],\n GULIKAKALA_OCTETS[self.weekday[d]], 8)\n }\n\n # Compute all the anga datas\n self.tithi_data[d] = jyotisha.panchangam.temporal.get_angam_data(self.jd_sunrise[d], self.jd_sunrise[d + 1],\n jyotisha.panchangam.temporal.TITHI,\n ayanamsha_id=self.ayanamsha_id)\n self.tithi_sunrise[d] = self.tithi_data[d][0][0]\n self.nakshatram_data[d] = jyotisha.panchangam.temporal.get_angam_data(self.jd_sunrise[d],\n self.jd_sunrise[d + 1],\n jyotisha.panchangam.temporal.NAKSHATRAM,\n ayanamsha_id=self.ayanamsha_id)\n self.nakshatram_sunrise[d] = self.nakshatram_data[d][0][0]\n self.yogam_data[d] = jyotisha.panchangam.temporal.get_angam_data(self.jd_sunrise[d], self.jd_sunrise[d + 1],\n jyotisha.panchangam.temporal.YOGAM,\n ayanamsha_id=self.ayanamsha_id)\n self.yogam_sunrise[d] = self.yogam_data[d][0][0]\n self.karanam_data[d] = jyotisha.panchangam.temporal.get_angam_data(self.jd_sunrise[d],\n self.jd_sunrise[d + 1],\n jyotisha.panchangam.temporal.KARANAM,\n ayanamsha_id=self.ayanamsha_id)\n self.rashi_data[d] = jyotisha.panchangam.temporal.get_angam_data(self.jd_sunrise[d], self.jd_sunrise[d + 1],\n jyotisha.panchangam.temporal.RASHI,\n ayanamsha_id=self.ayanamsha_id)\n if compute_lagnams:\n self.lagna_data[d] = get_lagna_data(self.jd_sunrise[d], self.city.latitude,\n self.city.longitude, tz_off, ayanamsha_id=self.ayanamsha_id)", "def main(options, (gender, races)):\n results = aggregate_races(gender, races)\n results.score()\n if options.exclude_scoreless:\n results.purge_scoreless_runners()\n if options.show_races:\n for row in RaceDumper(results.races):\n print row\n print\n for row in SeasonsBestDumper(results.results, options.distance):\n print row\n else:\n for row in ResultsDumper(results.results, options.distance):\n print row\n print\n for row in ScoreDumper(results.scores):\n print row", "def main():\n global options\n parser = OptionParser(\n usage = '%prog [OPTIONS] RESULTPATH',\n version='%prog 0.99', #\n description='Calculate results on acl2018 datasets',\n epilog='Contact [email protected]'\n )\n parser.add_option('-l', '--logfile', dest='logfilename',\n help='write log to FILE', metavar='FILE')\n parser.add_option('-q', '--quiet',\n action='store_true', dest='quiet', default=False,\n help='do not print status messages to stderr')\n parser.add_option('-d', '--debug',\n action='store_true', dest='debug', default=False,\n help='print debug information')\n parser.add_option('-g', '--glob',\n action='store', dest='glob', default='{RESULTPATH}/x*x/*/s*/*eval',\n help='change file globbing for accessing evaluation results (%default)')\n parser.add_option('-f', '--fold_filter',\n action='store', dest='fold_filter', default=None,\n help='only use folds matching (re.search) the specified regular expression on the fold name (e.g. \"^english\" for all folds starting with the string english) (Default \"%default\")')\n parser.add_option('-D', '--decoder_filter',\n action='store', dest='decoder_filter', default=\"greedy|beam4\",\n help='''used on decoding mode label; matches (re.search) with the specified regular expression (Default \"%default\")''')\n parser.add_option('-m', '--mode',\n action='store', dest='mode', default='ms',\n help='''compatibel characters can be combined\n s: individual seed results;\n S: only individual seed results;\n m: mean/sd values (on seeds and folds);\n M: mean/sd (on folds only);\n e: include ensembles;\n E: only ensembles;\n T: only test results;\n D: only dev results\n q: sort the results by accuracy\n L: evaluate on edit distance, not on Accuracy\n ''')\n\n (options, args) = parser.parse_args()\n if options.debug:\n print(\"options=\",options,file=sys.stderr)\n\n if len(args) < 1:\n print('# RESULTPATH needed')\n parser.print_help()\n exit(1)\n options.mode = set(options.mode)\n process(options=options,args=args)", "def main():\n path_for_data = '/Users/avielshtern/Desktop/semb/iml/IML.HUJI-master/data/kc_house_data (1).csv'\n design_matrix, response_vector = load_data(path_for_data)\n putting_it_all_together_1(design_matrix, response_vector)\n putting_it_all_together_2(design_matrix, response_vector)\n feature_evaluation(design_matrix, response_vector)", "def print_data(year):\n locale.setlocale(locale.LC_ALL, '') # Use locale to pretty-print the combined distance run.\n\n # parse our data...\n runners = parsers.parse(year)\n if runners:\n\n print(\"\\n\\n{} Marathon Runners\".format(len(runners)))\n print(\"From {} different states\".format(len(set(runner.state for runner in runners))))\n print(\"Total distance combined ==> {:n}+ miles\".format(int(len(runners) * 26.2)))\n\n print(\"Mostly from (top-10 cities):\")\n cities = Counter(runner.city for runner in runners)\n for city, count in cities.most_common(10):\n print(\"- {} ({})\".format(city, count))\n\n # Average age.\n ages = [int(runner.age) for runner in runners if is_numeric(runner.age)]\n try:\n mode_age = mode(ages)\n except StatisticsError:\n mode_age = 'No unique'\n\n print(\"Average age: {} mean / {} median / {} mode\".format(\n int(mean(ages)), int(median(ages)), mode_age))\n\n # Count Female / Male participants.\n females = len([runner.sex for runner in runners if runner.sex == \"F\"])\n males = len([runner.sex for runner in runners if runner.sex == \"M\"])\n print(\"Females: {}!\\nMales: {}!\".format(females, males))\n\n # Calculate Average paces.\n paces = []\n for runner in runners:\n minutes, seconds = runner.pace.split(\":\")\n paces.append(int(seconds) + (int(minutes) * 60))\n\n mean_pace = int(mean(paces))\n mean_pace_minutes, mean_pace_seconds = divmod(mean_pace, 60)\n median_pace = int(median(paces))\n median_pace_minutes, median_pace_seconds = divmod(median_pace, 60)\n\n try:\n mode_pace = mode(paces)\n mode_pace_minutes, mode_pace_seconds = divmod(mode_pace, 60)\n mode_pace = \"{}:{}\".format(mode_pace_minutes, mode_pace_seconds)\n except StatisticsError:\n mode_pace = 'No unique'\n\n print(\"Average Pace: {}:{} mean / {}:{} median / {} mode\".format(\n mean_pace_minutes, mean_pace_seconds,\n median_pace_minutes, median_pace_seconds,\n mode_pace\n ))\n\n # Average finish times.\n times = []\n for runner in runners:\n hours, minutes, seconds = runner.time.split(\":\")\n times.append(int(seconds) + (int(minutes) * 60) + (int(hours) * 3600))\n\n mean_time = int(mean(times))\n minutes, seconds = divmod(mean_time, 60)\n hours, minutes = divmod(minutes, 60)\n mean_time = \"{}:{}:{}\".format(hours, minutes, seconds)\n\n median_time = int(median(times))\n minutes, seconds = divmod(median_time, 60)\n hours, minutes = divmod(minutes, 60)\n median_time = \"{}:{}:{}\".format(hours, minutes, seconds)\n\n try:\n mode_time = mode(times)\n minutes, seconds = divmod(mode_time, 60)\n hours, minutes = divmod(minutes, 60)\n mode_time = \"{}:{}:{}\".format(hours, minutes, seconds)\n except StatisticsError:\n mode_time = 'No unique'\n print(\"Average Finish Time: {} mean / {} median / {} mode.\".format(\n mean_time, median_time, mode_time))\n else:\n print(\"Sorry, either no data or parser for {}.\".format(year))", "def main():\n data = pd.read_csv('./house-votes-84.data', header = None)\n\n class_names = [\"republican\", \"democrat\"]\n\n print(\"\\n-- Train and Test with Winnow --\\n\")\n train_and_test_with_winnow(data, class_names)\n\n print(\"\\n-- Train and Test with Naive Bayes --\\n\")\n train_and_test_with_naive_bayes(data, class_names)", "def main():\n parser = argparse.ArgumentParser(description=\"Analyze requirement coverage\")\n parser.add_argument(\n \"project_info_path\",\n help=\"JSON file containing project information\",\n type=Path,\n )\n parser.add_argument(\n \"test_result_path\",\n help=\"XML file containing test result\",\n type=Path,\n )\n parser.add_argument(\n \"requirements_path\",\n help=\"CSV file containing requirements\",\n type=Path,\n )\n\n args = parser.parse_args()\n\n ok = analyze(args.project_info_path, args.test_result_path, args.requirements_path)\n if not ok:\n exit(1)\n else:\n exit(0)", "def _cluster_matching(self, diarization_time=None, interactive=False,\n quiet=False, thrd_n=1, start_t=0):\n basename = self.get_file_basename()\n self._extract_clusters()\n self._match_clusters(interactive, quiet)\n# if not interactive:\n# #merging\n# self.automerge_clusters()\n self._status = 4\n sec = fm.wave_duration(basename + '.wav')\n total_time = time.time() - start_t\n self._set_time(total_time)\n if not quiet:\n print self.get_working_status()\n if interactive:\n print \"Updating db\"\n self.update_db(thrd_n, automerge=True)\n if not interactive:\n if not quiet:\n for clu in self._clusters:\n #print \"**********************************\"\n #print clu\n #for speaker in self[clu].speakers:\n # print \"\\t %s %s\" % (speaker, self[clu].speakers[speaker])\n #print '\\t ------------------------'\n distance = self[clu].get_distance()\n try:\n mean = self[clu].get_mean()\n m_distance = self[clu].get_m_distance()\n except (KeyError, ValueError):\n mean = 0\n m_distance = 0\n #print \"\"\"\\t best speaker: %s (distance from 2nd %f - mean %f - distance from mean %f ) \"\"\" % (self[clu],\n # distance, mean, m_distance)\n speakers_in_db = self.get_db().get_speakers()\n tot_voices = len(speakers_in_db['F']) + \\\n len(speakers_in_db['M']) + len(speakers_in_db['U'])\n\n #if diarization_time != None:\n # voice_time = float(total_time - diarization_time)\n # t_f_s = voice_time / len(speakers_in_db)\n #print \"\"\"\\nwav duration: %s\\nall done in %dsec (%s) (diarization %dsec time:%s ) with %s threads and %d voices in db (%f) \"\"\" % (utils.humanize_time(sec),\n #total_time,\n #utils.humanize_time(total_time),\n #diarization_time,\n #utils.humanize_time(diarization_time),\n #thrd_n,\n #tot_voices,\n #t_f_s)", "def search(self, passions, number_of_results):\n\n # Sanitize spaces with -\n passions = [p.lower().replace(' ', '_') for p in passions]\n cities_with_scores = {}\n\n for d in self.city_passion_matrix:\n # Initialize score of cities for the passions\n cities_with_scores[d] = { 'score': 0.0, 'total': self.total_endorsement_per_city[d]};\n # Score of d for given passions is the multiplication of individual values\n for p in passions:\n # If passion does not exist then endorsement_count is 1 to avoid zeroth bias,\n # otherwise stored value\n if p in self.city_passion_matrix[d]:\n endorsement_count = self.city_passion_matrix[d][p]\n else:\n endorsement_count = 1\n cities_with_scores[d]['score'] += math.log((endorsement_count / (self.total_endorsement_per_city[d] + 1.0)) )\n\n # Sort the cities in descending order of relavency score; use total endorsement count to break tie\n sorted_cities_with_scores = sorted(cities_with_scores.items(), key= lambda x: (x[1]['score'], x[1]['total']), reverse=True)\n\n return sorted_cities_with_scores[:number_of_results]", "def alltests(opts):\n \n print \"API Root: %s\" % options.apiroot\n print \"Token: %s\" % options.token\n print \"Output dir: %s\" % options.output\n print \"Running %d%% of tests\" % options.percent\n print\n \n # need to use DEPT-001, not ID#\n coursehistory_tests = [\n # basic tests:\n \"cis-120\", \"math-114\", \"engl-101\", \"econ-001\",\n # miscellaneously somewhat problematic:\n \"engl-016\", \"law-205\", \"hpr-612\", \"rels-414\", \"nurs-322\",\n \"writ-030\", \"be-310\", \"psci-010\", \"psci-136\",\n # crosslistings:\n \"engl-135\", \"writ-135\", \"fnar-264\", \"cogs-001\", \"russ-048\", \"hist-048\",\n # no reviews?:\n \"afam-271\", \"ames-071\", \"slav-532\", \"afam-285\", \"prtg-213\", \"slav-533\",\n # errors:\n \"99999\", \"moo\",\n ]\n\n instructor_tests = [\n # basic tests:\n \"403\", \"631\", \"1883\", \"2217-FERNANDO-C--PEREIRA\", \"1602-BENJAMIN-PIERCE\",\n # crosslistings:\n \"1034-LYLE-H-UNGAR\", \"2709-DAVID-P--COMBERG\",\n # miscellaneously somewhat problematic:\n \"1040-DAVID-FOX\", \"4268-BART-GERARD-C-DE-JONGHE\",\n # the instructors w/ the most sections\n \"1883\", \"1619\", \"2869\", \"942\", \"1644\", \"541\", \"767\", \"434\",\n # concerned citizens:\n \"1759-MAX-C--CAVITCH\", \"2824-TIMOTHY-CORRIGAN\",\n \"1763-EMILY-R-STEINER\", \"1624-VALERIE-ROSS\",\n # no reviews?:\n \"416-LUDO-ROCHER\", \"715-ELIZABETH-ANN-POLLARD\", \"1094-MARIA-A-COWLES\",\n \"1500-ANDREW-GALLIA\", \"1888-RUSSELL-DILEO\",\n \"1450-SORMANE-PEREIRA-GOMES\", \"2188-HUI-YI-CHEN\", \"1165-DOMENIC-VITIELLO\",\n \"2359-CLAUDIA-CANCINO\", \"2737-SHEN-WANG\", \"3229-BERLE-WHITBY\",\n # errors:\n \"99999\", \"moo\",\n ]\n\n dept_tests = [\n #fast\n \"CSE\", \"LAW\", \"ANAT\", \"KORN\", \"LATN\", \"COGS\", \"MSCI\", \"GAS\",\n #medium\n \"CIS\", \"MATH\", \"FNAR\", \"ACCT\", \"FNCE\", \"BE\", \"MUSC\", \"OPIM\",\n #slow\n #\"SPAN\", \"NURS\", \"ENGL\",\n #error\n \"EROR\"]\n\n index_tests = [\"\", \"instructors\", \"coursehistories\", \"depts\",\n \"semesters\", \"semesters/2010c\"]\n\n course_tests = [] # filled in by coursehistory_tests\n\n for t in fraclist(index_tests, options.percent):\n test(opts, t)\n \n for t in fraclist(coursehistory_tests, options.percent):\n obj = test(opts, \"coursehistories/%s\" % t)\n test(opts, \"coursehistories/%s/reviews\" % t)\n\n # now \"find\" some courses\n course_tests.append(\"2010c-%s\" % t)\n try:\n courseid = sorted(obj[\"result\"][\"courses\"])[0][\"id\"]\n course_tests.append(courseid)\n except (TypeError, KeyError, IndexError):\n pass\n \n for t in course_tests: # don't fraclist an autogenerated list\n # Some of the autogenerated courses don't exist, so ignore errors.\n root_success = test(opts, \"courses/%s\" % t, lderror_ok=True)\n if root_success:\n # Course exists, don't expect errors.\n test(opts, \"courses/%s/reviews\" % t)\n test(opts, \"courses/%s/coursehistories/\" % t)\n test(opts, \"courses/%s/sections\" % t)\n \n if test(opts, \"courses/%s/sections/001\" % t, lderror_ok=True):\n test(opts, \"courses/%s/sections/001/reviews\" % t) \n if '-' in str(t): # if we have a yyyys-dept-num test\n test(opts, \"sections/%s-001\" % t)\n # not tested: sections/001/reviews/instructor-id\n test(opts, \"courses/%s/sections/401\" % t, lderror_ok=True)\n \n for t in fraclist(instructor_tests, options.percent):\n test(opts, \"instructors/%s\" % t)\n test(opts, \"instructors/%s/sections\" % t)\n test(opts, \"instructors/%s/reviews\" % t)\n \n for t in fraclist(dept_tests, options.percent):\n test(opts, \"depts/%s\" % t)\n test(opts, \"depts/%s/reviews\" % t)\n test(opts, \"semesters/2010c/%s\" % t)", "def analyze_data():\n attack_free_1 = load_messages(\"data/csv/Attack_free_dataset.csv\", verbose=True)\n\n impersonation_1 = load_messages(\"data/csv/170907_impersonation.csv\", verbose=True)\n impersonation_2 = load_messages(\"data/csv/170907_impersonation_2.csv\", verbose=True)\n impersonation_3 = load_messages(\"data/csv/Impersonation_attack_dataset.csv\", verbose=True)\n\n information = {\n \"Mean time between normal messages\":\n get_mean_time_between_normal_messages(attack_free_1),\n \"Mean time between split messages\":\n get_mean_time_between_split_messages(attack_free_1),\n \"Sum of removed intervals in '170907_impersonation.csv'\":\n get_sum_of_removed_intervals(impersonation_1, 250),\n \"Sum of removed intervals in '170907_impersonation_2.csv'\":\n get_sum_of_removed_intervals(impersonation_2, 250),\n \"Sum of removed intervals in 'Impersonation_attack_dataset.csv'\":\n get_sum_of_removed_intervals(impersonation_3, 250),\n \"Index of split in '170907_impersonation.csv'\":\n get_index_before_time(impersonation_1, 250 - 23.434627056121826),\n \"Index of split in '170907_impersonation_2.csv'\":\n get_index_before_time(impersonation_2, 250 - 20.980855226516724),\n \"Index of split in 'Impersonation_attack_dataset.csv'\":\n get_index_before_time(impersonation_3, 250 - 2.1056361198425293)\n }\n\n return information", "def main(cls, args):\n #cls.trainOfflineAndTest(100, 0.1, 0.1, 0.9);\n #cls.trainOfflineAndTest(500, 0.1, 0.1, 1.0);\n\n cls.trainer.teachActiveAndSaveStatistics(\"onlineTest\", 10, 0.8, 1.0 ,1.0, 0.0, 0.3, True, True,True);\n cls.trainer.teachActiveAndSaveStatistics(\"path\", 10, 0.0, 0.0, 0.0, 0.0, 0.0, True, False, False)\n\n #trainer.teachActiveAndSaveStatistics(\"onlineTest\", 10000, 0.1f, 0.1f, 1.0f, 0.0f, 0.1f,true, true, true);\n # \t\ttrainer.teachActiveAndSaveStatistics(\"onlineTest\", 10000, 0.1f, 0.1f, 1.0f, 0.0f, 0.1f,\n # \t\t\t\tfalse, true, true);\n # \t\t\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_first.net\", 10000, true);\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_secound.net\", 10000, true);\n #cls.testAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_first.net\", 10000, False)\n #cls.testAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_secound.net\", 10, False)\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1.net\", 10000, false);", "def main():\n directors = get_movies_by_director()\n directors = get_average_scores(directors)\n print_results(directors)", "def main():\n regexham = r'\\s+\\((\\d+,\\s*\\d+)\\)\\s+([\\-+]?\\d+\\.\\d+[eEdD]?[\\-+]?\\d+)' #to extract the Hamiltonian.\n root = '.'\n #fname = 'output_files/'\n ciffci = CIFlow_Reader('testfci.dat', regexp = regexham , read_ham= True)\n ciffcipar = CIFlow_Reader( 'psi0_output10outputfci.dat', regexp = regexham , read_ham = True)\n #print ciffci.calc_overlap(cifdoci)\n #print e.get_groundstate('00000000000011|00000000000011') \n\n psir = rp.PsiReader('psi0_output10.dat', isbig = False, numorbs = -1 , read_ints = False)\n\n detlist = dw.cimain(psir.values['nalpha'],psir.values['nbeta'], psir.values['norb'], [range(1,psir.values['nalpha']+psir.values['nbeta']), []], [] , fname = 'determinants.dat' ,ref = [lambda x , y , z : psir.get_hf_orbs()] , add_frozen = 0, write = False) #CISDDOCI\n count = 0\n for det in detlist:\n for det2 in detlist:\n #+ because the eigenvectors have already a different phasefactor of 1.\n if abs(ciffci.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) - ciffcipar.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) ) > 1e-10 :\n print 'difference in hamiltonian row: ' , det[0]+'|'+det[1] , \" col: \" , det2[0]+'|'+det2[1] , 'fci: ', ciffci.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) , 'fciaddres: ' , ciffcipar.get_mat_element(det[0]+'|'+det[1],det2[0]+'|'+det2[1]) \n count += 1\n print 'There were ' , count , ' different elements'", "def main():\n # First grab data from db\n engine = create_engine(\"sqlite:///cities.db\")\n session = Session(bind=engine)\n # Grab all data from flat stats table\n query = session.query(FlatStat).all()\n\n X = transform_to_np_array(query)\n\n # Run dbscan now\n results = DBSCAN(eps=2, min_samples=10).fit(X)\n\n # TODO: get matplotlib in here, but for now, just print all properties of dbscan\n print(results.__dict__)", "def qa_test():\r\n # Reads Code and Runs Code Metrics\r\n with open(\"BrainDataVisualiser.py\",\"r\") as file:\r\n code = file.read()\r\n with open(\"QA_LOGS.txt\",\"a\") as file:\r\n # Timestamp and append metric results to log\r\n file.write(datetime.date.today().strftime(\"%b-%d-%Y\")+\"\\n\\t\")\r\n file.write(\"General Analysis\\n\\t\\t\")\r\n file.write(str(analyze(code))+\"\\n\\t\")\r\n file.write(\"Cyclomatic Complexity\\n\")\r\n for i in cc_visit(code):\r\n file.write(\"\\t\\t\"+cc_rank(i.complexity)+\" \"+str(i)+\"\\n\")", "def analyse ( self ) :\n odin = self.get( self.RootInTES + 'DAQ/ODIN' )\n \n ## Check for PVs\n PVs = self.get( self.RootInTES + self.InputPrimaryVertices )\n if not PVs or PVs.size() == 0:\n self.setFilterPassed( False )\n return SUCCESS\n\n ## get recontructed B+ mesons\n Bs = self.select ( 'B' , eval( self._cut % self._selection ) )\n \n if not Bs or Bs.size() == 0:\n self.setFilterPassed( False )\n return SUCCESS \n\n ## Select random candidate\n r = self.random( odin )\n n = Bs.size()\n for i in xrange( n ):\n if r <= ( float( i ) / float( n ) ): break\n B = Bs[ i ]\n \n tisTos = self.tisTosSignal( B, \"Hlt1Track(AllL0|Muon)Decision\" )\n if tisTos.tos():\n ## This has to be a clone, otherwise it doesn't work...\n self.markParticle( B.clone() )\n self.setFilterPassed( True )\n else:\n self.setFilterPassed( False )\n\n return SUCCESS", "def main(args):\n \n ## Load & Preprocess data \n if args.data_name == 'amsterdam': \n file_name = '../data/amsterdam/test_longitudinal_data.csv'\n ori_data = data_preprocess(file_name, args.max_seq_len)\n \n # Divide the data into training and testing\n divided_data, _ = data_division(ori_data, seed = args.seed, divide_rates = [args.train_rate, 1-args.train_rate])\n \n train_data = np.asarray(divided_data[0])\n test_data = np.asarray(divided_data[1])\n\n print('Finish data loading: ' + str(args.data_name)) \n \n ## Run hider algorithm\n if args.hider_model == 'timegan':\n generated_data = timegan.timegan(train_data)\n elif args.hider_model == 'add_noise':\n generated_data = add_noise.add_noise(train_data, args.noise_size) \n print('Finish hider algorithm training') \n \n ## Define enlarge data and its labels\n enlarge_data = np.concatenate((train_data, test_data), axis = 0)\n enlarge_data_label = np.concatenate((np.ones([train_data.shape[0],]), np.zeros([test_data.shape[0],])), axis = 0)\n \n # Mix the order\n idx = np.random.permutation(enlarge_data.shape[0])\n enlarge_data = enlarge_data[idx]\n enlarge_data_label = enlarge_data_label[idx]\n \n ## Run seeker algorithm\n reidentified_data = knn_seeker(generated_data, enlarge_data)\n \n print('Finish seeker algorithm training') \n \n ## Evaluate the performance\n # 1. Feature prediction\n feat_idx = np.random.permutation(train_data.shape[2])[:args.feature_prediction_no]\n ori_feat_pred_perf = feature_prediction(train_data, test_data, feat_idx)\n new_feat_pred_perf = feature_prediction(generated_data, test_data, feat_idx)\n \n feat_pred = [ori_feat_pred_perf, new_feat_pred_perf]\n \n print('Feature prediction results: ' + \n '(1) Ori: ' + str(np.round(ori_feat_pred_perf, 4)) + \n '(2) New: ' + str(np.round(new_feat_pred_perf, 4)))\n \n # 2. One step ahead prediction\n ori_step_ahead_pred_perf = one_step_ahead_prediction(train_data, test_data)\n new_step_ahead_pred_perf = one_step_ahead_prediction(generated_data, test_data)\n \n step_ahead_pred = [ori_step_ahead_pred_perf, new_step_ahead_pred_perf]\n \n print('One step ahead prediction results: ' + \n '(1) Ori: ' + str(np.round(ori_step_ahead_pred_perf, 4)) + \n '(2) New: ' + str(np.round(new_step_ahead_pred_perf, 4)))\n \n # 3. Reidentification score\n reidentification_score = reidentify_score(enlarge_data_label, reidentified_data)\n \n print('Reidentification score: ' + str(np.round(reidentification_score, 4)))\n \n shutil.rmtree('tmp')\n \n return feat_pred, step_ahead_pred, reidentification_score", "def analyze(self, event):\n jets = self.inputCollection(event)\n \n predictionsPerCtauAndClass = {ctau: {className: [] for className in self.predictionLabels} for ctau in self.logctauValues}\n for ijet,jet in enumerate(jets):\n if not hasattr(jet,self.taggerName):\n print \"WARNING - jet \",jet,\" has no \",self.taggerName,\" result stored for \",self.outputName,\" -> skip\"\n continue\n predictions = getattr(jet,self.taggerName)\n for ctau in self.logctauValues:\n for label in self.predictionLabels:\n predictionsPerCtauAndClass[ctau][label].append(predictions[ctau][label])\n \n for ctau in self.logctauValues:\n for label in self.predictionLabels:\n predictionsPerCtauAndClass[ctau][label] = sorted(predictionsPerCtauAndClass[ctau][label],reverse=True)\n\n for m in self.multiplicities:\n if m<len(predictionsPerCtauAndClass[ctau][label]):\n self.out.fillBranch(self.outputName+\"_\"+getCtauLabel(ctau)+\"_\"+label+\"_min\"+str(m),predictionsPerCtauAndClass[ctau][label][m])\n else:\n self.out.fillBranch(self.outputName+\"_\"+getCtauLabel(ctau)+\"_\"+label+\"_min\"+str(m),0)\n \n \n return True", "def main():\n \n Y1, Y2 = 2005, 2017 ### range with coordinates supplied in pre-2018 generated archive\n\n if len(sys.argv) > 1 and int(sys.argv[1]) > 0:\n Y1 = int(sys.argv[1])\n \n if len(sys.argv) > 2 and int(sys.argv[2]) > Y1:\n Y2 = int(sys.argv[2])\n \n with open('data/audit.log','w') as output:\n for Y in range(Y1, Y2):\n df = pd.read_csv('data/{}.csv'.format(Y), low_memory = False)\n output.write('\\n--- {} --------------------\\n'.format(Y))\n\n # remove `deleted` records\n df['deleted'] = df['deleted'].apply(yes_no)\n df = df[df['deleted'] == 0]\n\n # remove misc misdemeanors\n df = df[~df['category'].isin(drop)]\n\n # validate date and expand into Y,N,D,W,H\n df['dt'] = df['incident_date'].apply(extract)\n df = df[~df['dt'].isnull()]\n\n # convert from plane state to longitude-latitude\n df['ll'] = df.apply(to_lnglat, axis = 1)\n\n # init features\n features = df.loc[:,['category','stat','address','city','zip']]\n features['id'] = df['incident_id']\n dt = ['year','month','day','weekday','hour']\n for i in range(len(dt)):\n features[dt[i]] = df['dt'].apply(lambda x: x[i] )\n\n features['lng'] = df['ll'].apply(lambda x: x[0])\n features['lat'] = df['ll'].apply(lambda x: x[1])\n\n features['gang'] = df['gang_related'].apply(yes_no)\n features['category'] = df['category'].apply(collapse)\n cat = set(features.groupby(['category']).size().reset_index(name='count')['category'].tolist())\n output.write('Categories: {}\\n'.format(len(cat)))\n\n output.write('Date miss: {:.4f}%\\n'\\\n .format(100 * (1 - len(features[(features['year'] > 2000) & (~features['weekday'].isnull())])/len(features))))\n output.write('Location miss: {:.4f}%\\n'\\\n .format(100 * (1 - len(features[(features['zip'] > 0) | (features['lat'] > 0)])/len(features))))\n\n # keep records with valid date\n features['date'] = df['dt'].apply(lambda x: datetime.date(x[0], x[1], x[2]))\n features = features[(features['year'] > 2000) & (~features['weekday'].isnull())]\n output.write('Time miss: {:.4f}%\\n'.format(100 * len(features[features['hour'] == -1])/len(features)))\n\n # potential `time-unknown` issue\n output.write('Hour ZERO: {:.4f}%\\n'.format(100 * len(features[features['hour'] == 0])/len(features)))\n output.write('Hour NOON: {:.4f}%\\n'.format(100 * len(features[features['hour'] == 12])/len(features)))\n\n features = features[(features['zip'] > 0) | (features['lat'] > 0)]\n\n # get the best possible coordinates + zipcode assessment\n features[['zip','lng','lat']] = features[['zip','lng','lat']].apply(fix_location, axis = 1)\n output.write('Failed location: {:.4f}%\\n'.format(100 * len(features[features['zip'].isnull()])/len(features)))\n features = features[~features['zip'].isnull()]\n features['zip'] = df['zip'].apply(lambda x: str(x)[:5])\n \n # normalize city attr\n features = features.join(zipcodes[['zip','city']].set_index('zip'), on = 'zip', lsuffix = '_orig', rsuffix = '')\n features.loc[features['city'].isnull(), 'city'] = features.loc[features['city'].isnull(), 'city_orig']\\\n .apply(lambda x: x if type(x) == float else ' '.join([l[0].upper() + l[1:] for l in x.split()]))\n\n # reduce to LA bounding-box\n features = features[(features['lng'] > -119) & (features['lng'] < -116)]\n features = features[(features['lat'] > 32) & (features['lat'] < 35)]\n\n # save csv\n features[fields].to_csv('data/F{}.csv'.format(Y), index = False)\n features[fields].to_json('data/F{}.json'.format(Y), orient = 'records')\n output.close()", "def test_output(self, test_data):\n for finput in test_data[\"EKFSLAM.EKFSLAM.h_jac\"]:\n params = tuple(finput.values())\n\n self_1, eta_1 = deepcopy(params)\n\n self_2, eta_2 = deepcopy(params)\n\n H_1 = EKFSLAM.EKFSLAM.h_jac(self_1, eta_1)\n\n H_2 = solution.EKFSLAM.EKFSLAM.h_jac(self_2, eta_2)\n \n assert compare(H_1, H_2)\n \n assert compare(self_1, self_2)\n assert compare(eta_1, eta_2)", "def test(indices_to_visit = None):\n ##0 Chicago\n ##1 New York City\n ##2 Los Angeles\n ##3 Minneapolis\n ##4 Denver\n ##5 Dallas\n ##6 Seattle\n ##7 Boston\n ##8 San Francisco\n ##9 St. Louis\n ##10 Houston\n ##11 Phoenix\n ##12 Salt Lake City\n ##13 Miami\n ##14 Atlanta\n ##15 Kansas City\n home_index = 15 # Kansas city\n # 15x15 matrix with main diagonal consisting of 0s and to which data is mirrored along\n # (values are derived from external resource and multiplied by 1000 for higher accuracy)\n matrix = np.array([[0.0, 1148413.3550047704, 2813453.6297408855, 572861.4368351421, 1483440.7452179305, 1296355.2188721865, 2801269.1215845253, 1370943.3069385102, 2996683.256068982, 422589.4697157836, 1515737.0196676727, 2343639.7107855356, 2031500.319603397, 1913900.3015914203, 946854.1020487415, 665894.0336505901],\n [1148413.3550047704, 0.0, 3949451.153672887, 1642119.4792808082, 2628946.6435325537, 2212019.1209020815, 3882177.952930788, 306997.0343229422, 4144977.810718553, 1408454.3261387087, 2286054.8575902223, 3455343.3108375454, 3179102.5335818897, 1754834.3710577146, 1202616.154562711, 1766599.1336905772],\n [2813453.6297408855, 3949451.153672887, 0.0, 2455296.3791196346, 1339227.410707824, 1998182.1420783552, 1545364.434045008, 4184394.186016967, 559978.4273194656, 2560790.9591738936, 2212581.51715849, 575975.8749662543, 933602.6426595236, 3767490.41517038, 3120118.850020503, 2186473.1552241463],\n [572861.4368351421, 1642119.4792808082, 2455296.3791196346, 0.0, 1127312.7583590776, 1390159.7734006236, 2249169.1308160927, 1811513.5290266906, 2554165.8167895717, 750916.7305340832, 1701189.1538312144, 2062079.2399570548, 1590460.9488364782, 2434801.332310659, 1462408.5353501518, 662752.1291133759],\n [1483440.7452179305, 2628946.6435325537, 1339227.410707824, 1127312.7583590776, 0.0, 1067257.7993323756, 1646308.7967673023, 2852307.4164419994, 1530510.2790658756, 1283707.511393525, 1414308.8805983758, 943721.1931707633, 598728.757362067, 2779561.192116527, 1952618.0544916363, 899656.1020173575],\n [1296355.2188721865, 2212019.1209020815, 1998182.1420783552, 1390159.7734006236, 1067257.7993323756, 0.0, 2709804.112590561, 2500314.4507069485, 2390841.4329337194, 882457.80942383, 361482.7025425731, 1427995.4150203674, 1610768.421819668, 1788903.6065106322, 1161480.3557326929, 730446.8613086065],\n [2801269.1215845253, 3882177.952930788, 1545364.434045008, 2249169.1308160927, 1646308.7967673023, 2709804.112590561, 0.0, 4018059.834330202, 1093104.7332788548, 2778905.575804111, 3046648.362755992, 1794989.6453295103, 1129464.5539648102, 4404737.747850686, 3516794.375197078, 2427457.036285458],\n [1370943.3069385102, 306997.0343229422, 4184394.186016967, 1811513.5290266906, 2852307.4164419994, 2500314.4507069485, 4018059.834330202, 0.0, 4350710.853063807, 1673216.4080939887, 2586942.3262796295, 3706392.097841614, 3382851.415271485, 2022974.6418062754, 1509585.60107986, 2015770.1390589625],\n [2996683.256068982, 4144977.810718553, 559978.4273194656, 2554165.8167895717, 1530510.2790658756, 2390841.4329337194, 1093104.7332788548, 4350710.853063807, 0.0, 2812916.3098878833, 2650547.941880299, 1053620.7288649315, 967859.8344376946, 4179636.203479384, 3448359.745690545, 2428862.4239271535],\n [422589.4697157836, 1408454.3261387087, 2560790.9591738936, 750916.7305340832, 1283707.511393525, 882457.80942383, 2778905.575804111, 1673216.4080939887, 2812916.3098878833, 0.0, 1093601.4408876144, 2050115.5214378452, 1872971.1741522516, 1708236.6189296674, 752855.8488125347, 384122.2000072272],\n [1515737.0196676727, 2286054.8575902223, 2212581.51715849, 1701189.1538312144, 1414308.8805983758, 361482.7025425731, 3046648.362755992, 2586942.3262796295, 2650547.941880299, 1093601.4408876144, 0.0, 1636770.4499809493, 1932616.2801687205, 1559260.024532222, 1130480.278513877, 1039856.4844335921],\n [2343639.7107855356, 3455343.3108375454, 575975.8749662543, 2062079.2399570548, 943721.1931707633, 1427995.4150203674, 1794989.6453295103, 3706392.097841614, 1053620.7288649315, 2050115.5214378452, 1636770.4499809493, 0.0, 812548.5062332726, 3191662.5092484164, 2564665.4531581327, 1690942.142157212],\n [2031500.319603397, 3179102.5335818897, 933602.6426595236, 1590460.9488364782, 598728.757362067, 1610768.421819668, 1129464.5539648102, 3382851.415271485, 967859.8344376946, 1872971.1741522516, 1932616.2801687205, 812548.5062332726, 0.0, 3364908.7076308434, 2551338.215149899, 1490589.7393085626],\n [1913900.3015914203, 1754834.3710577146, 3767490.41517038, 2434801.332310659, 2779561.192116527, 1788903.6065106322, 4404737.747850686, 2022974.6418062754, 4179636.203479384, 1708236.6189296674, 1559260.024532222, 3191662.5092484164, 3364908.7076308434, 0.0, 973244.7750437199, 2000112.4162614697],\n [946854.1020487415, 1202616.154562711, 3120118.850020503, 1462408.5353501518, 1952618.0544916363, 1161480.3557326929, 3516794.375197078, 1509585.60107986, 3448359.745690545, 752855.8488125347, 1130480.278513877, 2564665.4531581327, 2551338.215149899, 973244.7750437199, 0.0, 1089830.6426635552],\n [665894.0336505901, 1766599.1336905772, 2186473.1552241463, 662752.1291133759, 899656.1020173575, 730446.8613086065, 2427457.036285458, 2015770.1390589625, 2428862.4239271535, 384122.2000072272, 1039856.4844335921, 1690942.142157212, 1490589.7393085626, 2000112.4162614697, 1089830.6426635552, 0.0]])\n\n solver = FacilityOrderSolver(matrix, home_index)\n \n return solver.solve(indices_to_visit)", "def main():\r\n test = TesterNeighbour()\r\n test.setUp()\r\n test.test_result_n()\r\n print(\"result_of_algorithm_test - passed\")", "def build_all_analysis(self, matrix_handler, trajectory_handler):\n distance_matrix = matrix_handler.distance_matrix\n\n self.all_possible_analysis = {}\n\n # Pure queries\n self.all_possible_analysis[\"Details\"] = Analysis(\"Details\", self.analysis_function_details)\n self.all_possible_analysis[\"NumClusters\"] = Analysis(\"Number of clusters\", self.analysis_function_num_clusters)\n self.all_possible_analysis[\"NumClusteredElems\"] = Analysis(\"Number of clustered elements\", self.analysis_function_total_elements)\n self.all_possible_analysis[\"MeanClusterSize\"] = Analysis(\"Mean cluster size\", self.analysis_function_mean_cluster_size)\n self.all_possible_analysis[\"PercentInTop4\"] = Analysis(\"Percent in top 4 clusters\", self.analysis_function_top_4)\n self.all_possible_analysis[\"PercentInTop\"] = Analysis(\"Percent in top cluster\", self.analysis_function_top_percent)\n self.all_possible_analysis[\"ClustersTo90\"] = Analysis(\"Clusters to 90\", self.analysis_function_num_clusters_to_percent, 90)\n self.all_possible_analysis[\"NoiseLevel\"] = Analysis(\"Noise level\", self.analysis_function_noise_level, distance_matrix.row_length)\n\n # Evaluators\n self.all_possible_analysis[\"MirrorCohesion\"] = Analysis(\"MirrorCohesion\", self.evaluate_with_calculator,\n {\"class\":MirrorCohesionCalculator,\"matrix\":distance_matrix})\n\n self.all_possible_analysis[\"Cohesion\"] = Analysis(\"Cohesion\", self.evaluate_with_calculator,\n {\"class\":CohesionCalculator,\"matrix\":distance_matrix})\n\n self.all_possible_analysis[\"Separation\"] = Analysis(\"Separation\", self.evaluate_with_calculator,\n {\"class\":SeparationCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"MinimumMeanSeparation\"] = Analysis(\"MinimumMeanSeparation\", self.evaluate_with_calculator,\n {\"class\":MeanMinimumDistanceCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"Silhouette\"] = Analysis(\"Silhouette\", self.evaluate_with_calculator,\n {\"class\":SilhouetteCoefficientCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"Calinski-Harabasz\"] = Analysis(\"Calinski-Harabasz\", self.evaluate_with_calculator,\n {\"class\":CalinskiHarabaszCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"Dunn\"] = Analysis(\"Dunn\", self.evaluate_with_calculator,\n {\"class\":DunnCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"Davies-Bouldin\"] = Analysis(\"Davies-Bouldin\", self.evaluate_with_calculator,\n {\"class\":DaviesBouldinCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"GaussianSeparation\"] = Analysis(\"GaussianSeparation\", self.evaluate_with_calculator,\n {\"class\":GaussianSeparationCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"Compactness\"] = Analysis(\"Compactness\", self.evaluate_with_calculator,\n {\"class\":CompactnessCalculator,\"matrix\":distance_matrix})\n\n # Cython\n self.all_possible_analysis[\"CythonMirrorCohesion\"] = Analysis(\"CythonMirrorCohesion\", self.evaluate_with_calculator,\n {\"class\":CythonMirrorCohesionCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"CythonMinimumMeanSeparation\"] = Analysis(\"CythonMinimumMeanSeparation\", self.evaluate_with_calculator,\n {\"class\":CythonMeanMinimumDistanceCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"CythonSilhouette\"] = Analysis(\"CythonSilhouette\", self.evaluate_with_calculator,\n {\"class\":CythonSilhouetteCoefficientCalculator,\"matrix\":distance_matrix})\n\n # Graph\n self.all_possible_analysis[\"RatioCut\"] = Analysis(\"RatioCut\", self.evaluate_with_calculator,\n {\"class\":RatioCut,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"NCut\"] = Analysis(\"NCut\", self.evaluate_with_calculator,\n {\"class\":NCut,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"NormNCut\"] = Analysis(\"NormNCut\", self.analysis_function_norm_n_cut,distance_matrix)\n self.all_possible_analysis[\"MinMaxCut\"] = Analysis(\"MinMaxCut\", self.evaluate_with_calculator,\n {\"class\":MinMaxCut,\"matrix\":distance_matrix})\n\n # Cython & Graph\n self.all_possible_analysis[\"CythonNormNCut\"] = Analysis(\"CythonNormNCut\", self.analysis_function_cython_norm_n_cut,distance_matrix)\n\n # PCA\n self.all_possible_analysis[\"PCAanalysis\"] = Analysis(\"PCAanalysis\", self.analysis_function_pca, trajectory_handler)", "def test_scrape(self):\n simfiles = scrape_category.get_category_from_ziv(\"category_test\", self.CATEGORY_URL)\n compare_simfile_records(simfiles, EXPECTED_SIMFILES)", "def get_filters():\n print(\"Welcome to bikeshare data! \\n\")\n\n # Taksk0.1: Users choose a city, create a list with length=CITY_DATA , \n #in future, it other cities are added codes will updated automatically\n all_cities=list(CITY_DATA)\n for i in range(len(all_cities)):\n message = (\"For the city of {} ,The code is {}\\n\".format(all_cities[i],i+1))\n print(message)\n ##output of the for loop will code each city with a number, according to it's (position+1) in the dictionary \n ## position is from 0 to 2, city codes are from 1 to 3, no manual recoding for city names\n ## user chooses a city \"user input\" by entering 1,2,3\n \n print(\"***choose a city to start\\n***\")\n city_code=input()\n \n ## check if user choice is valid, if enter a number out of range code restarts until a correct city chosen\n ## decode the user input number into city name\n ## print a confirmation message to the user with his choice\n while int(city_code) > (len(all_cities)):\n print(\"Sorry, City choice invalid\")\n city_code = input (\"Please choose a number from 1 to 3\\n\")\n city_code=int(city_code)\n else:\n city= all_cities[int(city_code)-1] \n print(\"Nice! you will get statistics for**\",city.title())\n\n # Task0.2: get user input for month (all, january, february, ... , june)\n ##user enter month number from 1 to 6 representing january to June or all for all month\n print(\"Choose a month from January to June\\n\") \n month = input (\"Please type (all) to see all months \\nor select a number from 1 to 6 : January is 1 \\n---------------------------\\n\\n\")\n ## if user chooses all month, no filtering, if month choice greater than 6 print warning and loop, until a correct choice\n if month != \"all\":\n month=int(month)\n while month > 6:\n print('**Warning** OUT OF RANGE!only months from 1 to 6 available')\n month = input (\"Please choose a number from 1 to 6\\n\\n\")\n month=int(month)\n else:\n month==month\n print(\"\\n Month chosen is ***\" ,month, \"***\")\n else:\n print(\"No Month filter applied\")\n \n print(\"-\"*40)\n\n\n #task0.3: TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n ##create a list matching week days short names to their full name, write the list in title for \"first letter capital\"\n days_dict = {\"Su\": \"Sunday\",\n \"Mo\": \"Monday\",\n \"Tu\": \"Tuesday\",\n \"We\": \"Wednesday\", \n \"Th\": \"Thursday\",\n \"Fr\": \"Friday\",\n \"Sa\": \"Saturday\"}\n ## ask user to choose a day name by giving at least the first 2 letters of the day\n ## conver user input to title format NOTE!! dont trim now as user may choose \"all\" days\n ## if user choose all days, no warnings\n ## if user enters a day name, trim only the first 2 letters to avoid Typos, look up if the letters correspond to day name\n ## by consulting the dictionay, then return the day full name\n print(\"Choose a day? \") \n day= input (\"Please type (all) for no filtering \\nor choose a weeek day \\n \")\n day=day.title()\n \n if day != \"All\":\n day=(day[0:2]).title() \n while day not in days_dict:\n print('**Warning** not a valid day')\n day = input (\"please enter at least first 2 characters of the day \\n\\n\")\n day=(day[0:2]).title()\n else:\n day=days_dict[day]\n print(\"\\n Day chosen is ***\" ,day, \"***\")\n \n else:\n print(\"No day filter applied\") \n \n print('-'*40)\n ## end of first function to gather user choices\n ## return values for city name, month and day selction or all\n ## saved as 3 inputs city,month,day\n ##print a confirmation method to the user with his choices! city,month and day\n print(\"you will see data for ** {}** city, for month *{}* and day * {}*\".format(city,month,day))\n print(\"-\"*40)\n return city, month, day", "def analyze(self):\n self.__data = Ranking.ranks()\n self.__motion, self.__blur, self.__text, self.__audio = self.__data\n self.__rank_length = len(self.__motion)\n\n self.__ranks = [self.__motion[i] + self.__blur[i] +\n self.__text[i] + self.__audio[i] for i in range(self.__rank_length)]\n\n try:\n self.__timestamps = Ranking.get_timestamps()\n except RankingOfFeatureMissing:\n Log.e(RankingOfFeatureMissing.cause)\n return\n\n self.__output_length = Ranking.get_video_length()\n self.__actual_length = abs(self.__cache.read_data(CACHE_FRAME_COUNT) /\n self.__cache.read_data(CACHE_FPS))\n\n self.__plot_rank_line()\n self.__analytics()", "def read_data(self):\n print 'Getting team stats...'\n self.team_stats = get_team_stats(self.recent_years)\n\n print 'Getting matches...'\n self.matches = get_matches(\n with_team_stats=True,\n duplicate_with_reversed=self.duplicate_with_reversed,\n exclude_ties=self.exclude_ties,\n recent_years=self.recent_years,\n use_these_team_stats=self.team_stats,\n )", "def analysis():\n global prediction\n\n json_path = os.path.join(basedir, 'static', 'data', 'tmp_json')\n # csv_path = os.path.join(basedir, 'static', 'data', 'csv')\n # if not os.path.exists(csv_path):\n # os.mkdir(csv_path)\n\n if os.name == 'nt':\n audio_file = Path(os.path.join(json_path, 'audio_data.shlf.dir'))\n video_file = Path(os.path.join(json_path, 'facial_data.shlf.dir'))\n else:\n audio_file = Path(os.path.join(json_path, 'audio_data.shlf'))\n video_file = Path(os.path.join(json_path, 'facial_data.shlf'))\n\n # Files exists\n if audio_file.is_file() and video_file.is_file():\n with shelve.open(os.path.join(json_path, 'facial_data.shlf')) as shelf:\n emotion_data = shelf['emotion_data']\n microexpression_data = shelf['micro_expression_data']\n blink_data = shelf['blink_data']\n\n with shelve.open(os.path.join(json_path, 'audio_data.shlf')) as shelf:\n mean_energy = shelf['mean_energy']\n max_pitch_amp = shelf['max_pitch_amp']\n vowel_duration = shelf['vowel_duration']\n pitch_contour = shelf['pitch_contour']\n\n else:\n emotion_data = None\n microexpression_data = None\n blink_data = None\n mean_energy = None\n max_pitch_amp = None\n vowel_duration = None\n pitch_contour = None\n\n # Training Files (choose one)\n # soc_file = os.path.join(basedir, 'static', 'data', 'train_files', 'first_soc.txt')\n # niko_file = os.path.join(basedir, 'static', 'data', 'train_files', 'first_niko.txt')\n # vero_file = os.path.join(basedir, 'static', 'data', 'train_files', 'first_vero.txt')\n\n # txt_file = soc_file\n\n # train_data = []\n\n # for cases where one parameter has more elements\n # for i in range(min(len(blink_data), len(microexpression_data), len(mean_energy))):\n # train_data.append(0)\n\n # train_file = open(txt_file)\n\n # for line in train_file:\n # index1 = int((int(line[4]) * 600) + ((int(line[5]) * 60) + (int(line[7]) * 10) + int(line[8])) / 2)\n # index2 = int((int(line[10]) * 600) + ((int(line[11]) * 60) + (int(line[13]) * 10) + int(line[14])) / 2)\n # if line[0] == 'F':\n # train_data[index1] = 1\n # train_data[index2] = 1\n\n # with open(os.path.join(csv_path, 'train.csv'), 'w', newline='') as csv_file:\n # writer = csv.writer(csv_file)\n # writer.writerow(['Time Interval', 'Micro-expressions', 'Blinks',\n # 'Mean Energy', 'Max Pitch Amplitude', 'Vowel Duration', 'Fundamental Frequency',\n # 'False/True'])\n\n # # for cases where one parameter has more elements than another\n # for index in range(min(len(mean_energy), len(blink_data), len(microexpression_data))):\n # writer.writerow([index, microexpression_data[index], blink_data[index],\n # mean_energy[index], max_pitch_amp[index], vowel_duration[index], pitch_contour[index],\n # train_data[index]])\n\n # finalresults = [['Time Interval', 'Micro-expressions', 'Blinks',\n # 'Mean Energy', 'Max Pitch Amplitude', 'Vowel Duration', 'Fundamental Frequency' ]]\n final_results = []\n\n for index in range((min(len(mean_energy), len(blink_data), len(microexpression_data)))):\n final_results.append([microexpression_data[index], blink_data[index],\n mean_energy[index], max_pitch_amp[index], vowel_duration[index],\n pitch_contour[index]])\n\n prediction[0] = predict(final_results)\n\n return render_template('analysis.html', mean_energy=mean_energy, max_pitch_amp=max_pitch_amp,\n vowel_duration=vowel_duration, pitch_contour=pitch_contour, blink_data=blink_data,\n microexpression_data=microexpression_data, emotion_data=emotion_data)", "def compute_statistics(self):", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"data/analytics\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def analyse():\n identity_list = create_identity_list()\n frequency = create_identity_list()\n\n for sim in range(SIMULATIONS):\n # Generate the hands and board\n cards_used = []\n hands_array = main.generate_hands(cards_used, CARDS, HANDS)\n ftr = main.generate_ftr(cards_used, CARDS)\n\n # Find strengths of each hand and determine winner list, and amount of winning hands\n strengths = main.get_strengths(hands_array, ftr, HANDS)\n winner = main.determine_winner(hands_array, ftr, strengths)\n winning_hands = len(main.get_winning_hands(winner))\n\n # Get the 'identities' of every hand\n hand_identities = get_identities(hands_array)\n\n # Add 1 to corresponding identity value if the hand won\n for hand in range(HANDS):\n row_col = get_row_col_index(hand_identities[hand])\n if winner[hand] == 1: # The corresponding hand won\n identity_list[row_col[0]][row_col[1]] += 1.0/winning_hands\n frequency[row_col[0]][row_col[1]] += 1\n\n # Turn the identity list into percentages using frequency list\n percentage_list = convert_to_percentages(identity_list, frequency)\n\n # Nicely print the results\n print_results(percentage_list)", "def main():\n if len(sys.argv) != 4:\n sys.exit('Please run with : python data-eng.py donors_file.txt zipcode_output_filename date_output_filename')\n compute_stats(sys.argv[1], sys.argv[2], sys.argv[3])", "def main():\n # Constants\n groundstation_name = 'Wallops Antenna'\n groundstation_address = 'Radar Road, Temperanceville, VA 23442'\n satnum = 25544 # ISS = 25544\n saturl=\"http://www.celestrak.com/NORAD/elements/stations.txt\"\n gs_minimum_elevation_angle = 10.0\n\n # Alternate constants\n gs_alt_lat = 37.854886 # Only needed if address not found\n gs_alt_lon = -75.512936 # Ditto\n gs_alt_el_meters = 3.8 # Ditto\n gs_alt_tz_offset_seconds = -18000.0 # Ditto\n gs_tzname = 'US/Eastern'\n\n # Construct the ground station info\n try:\n # Try to use the address...\n gs = GroundStation.from_address(groundstation_address, \\\n groundstation_name, \\\n gs_minimum_elevation_angle)\n except:\n # Otherwise, use explicit location data...\n gs = GroundStation.from_location(gs_alt_lat, gs_alt_lon, \\\n gs_alt_el_meters, \\\n gs_tzname, \\\n groundstation_name, \\\n gs_minimum_elevation_angle)\n\n # Times we need\n now = datetime.now()\n gs_today = gs.get_tz().localize(datetime(now.year, now.month, now.day))\n gs_today_start = gs.get_tz().localize(datetime(now.year, now.month, now.day, \\\n 0, 0, 0)) \n gs_today_end = gs.get_tz().localize(datetime(now.year, now.month, now.day, \\\n 23, 59, 59))\n\n # Get the InviewCalculator and compute the inviews\n st = SatelliteTle(satnum, tle_url=saturl)\n ic = InviewCalculator(gs, st)\n inviews = ic.compute_inviews(gs_today_start, gs_today_end)\n\n # Print the results\n print_satellite_header(st)\n print_inview_header(gs.get_minimum_elevation_angle(), gs_today, gs)\n print_inviews(gs, inviews)\n print_azeltables(inviews, ic)", "def analyze_run():\n file_datas_dict = load_datas(Args.data_files)\n plotables_dict = dict()\n for file_name, datas in file_datas_dict.viewitems():\n analized_datas = analyze_datas(datas,Args.analysis_attributes)\n plotables = ana_results_to_plotables(\n analized_datas,\n Args.analysis_attributes\n )\n if Args.dm_file_out:\n analysis_save_dm(\n analized_datas,\n plotables,\n Args.analysis_attributes,\n Args.dm_file_out\n )\n if Args.mat_file_out:\n analysis_save(\n plotables,\n Args.analysis_attributes,\n Args.mat_file_out\n )\n if Args.verbose:\n plotables_dict[file_name] = plotables\n if Args.verbose:\n ana_plot_figures(plotables_dict,Args.analysis_attributes)", "def compute(self, download_data=None):\n if self.extractor is None:\n # If download_data is None, decide based on whether eid or session path was provided\n ensure_data = self.download_data if download_data is None else download_data\n self.load_data(download_data=ensure_data)\n self.log.info(f\"Session {self.session_path}: Running QC on habituation data...\")\n\n # Initialize checks\n prefix = '_task_'\n data = self.extractor.data\n metrics = {}\n passed = {}\n\n # Check all reward volumes == 3.0ul\n check = prefix + 'reward_volumes'\n metrics[check] = data['rewardVolume']\n passed[check] = metrics[check] == 3.0\n\n # Check session durations are increasing in steps >= 12 minutes\n check = prefix + 'habituation_time'\n if not self.one or not self.session_path:\n self.log.warning('unable to determine session trials without ONE')\n metrics[check] = passed[check] = None\n else:\n subject, session_date = self.session_path.parts[-3:-1]\n # compute from the date specified\n date_minus_week = (\n datetime.strptime(session_date, '%Y-%m-%d') - timedelta(days=7)\n ).strftime('%Y-%m-%d')\n sessions = self.one.alyx.rest('sessions', 'list', subject=subject,\n date_range=[date_minus_week, session_date],\n task_protocol='habituation')\n # Remove the current session if already registered\n if sessions and sessions[0]['start_time'].startswith(session_date):\n sessions = sessions[1:]\n metric = ([0, data['intervals'][-1, 1] - data['intervals'][0, 0]] +\n [(datetime.fromisoformat(x['end_time']) -\n datetime.fromisoformat(x['start_time'])).total_seconds() / 60\n for x in [self.one.alyx.get(s['url']) for s in sessions]])\n\n # The duration from raw trial data\n # duration = map(float, self.extractor.raw_data[-1]['elapsed_time'].split(':'))\n # duration = timedelta(**dict(zip(('hours', 'minutes', 'seconds'),\n # duration))).total_seconds() / 60\n metrics[check] = np.array(metric)\n passed[check] = np.diff(metric) >= 12\n\n # Check event orders: trial_start < stim on < stim center < feedback < stim off\n check = prefix + 'trial_event_sequence'\n nans = (\n np.isnan(data[\"intervals\"][:, 0]) | # noqa\n np.isnan(data[\"stimOn_times\"]) | # noqa\n np.isnan(data[\"stimCenter_times\"]) |\n np.isnan(data[\"valveOpen_times\"]) | # noqa\n np.isnan(data[\"stimOff_times\"])\n )\n a = np.less(data[\"intervals\"][:, 0], data[\"stimOn_times\"], where=~nans)\n b = np.less(data[\"stimOn_times\"], data[\"stimCenter_times\"], where=~nans)\n c = np.less(data[\"stimCenter_times\"], data[\"valveOpen_times\"], where=~nans)\n d = np.less(data[\"valveOpen_times\"], data[\"stimOff_times\"], where=~nans)\n\n metrics[check] = a & b & c & d & ~nans\n passed[check] = metrics[check].astype(float)\n\n # Check that the time difference between the visual stimulus center-command being\n # triggered and the stimulus effectively appearing in the center is smaller than 150 ms.\n check = prefix + 'stimCenter_delays'\n metric = np.nan_to_num(data[\"stimCenter_times\"] - data[\"stimCenterTrigger_times\"],\n nan=np.inf)\n passed[check] = (metric <= 0.15) & (metric > 0)\n metrics[check] = metric\n\n # Phase check\n check = prefix + 'phase'\n metric = data['phase']\n passed[check] = (metric <= 2 * np.pi) & (metric >= 0)\n metrics[check] = metric\n\n check = prefix + 'phase_distribution'\n metric, _ = np.histogram(data['phase'])\n _, p = chisquare(metric)\n passed[check] = p < 0.05\n metrics[check] = metric\n\n # Checks common to training QC\n checks = [check_goCue_delays, check_stimOn_goCue_delays,\n check_stimOn_delays, check_stimOff_delays]\n for fcn in checks:\n check = prefix + fcn.__name__[6:]\n metrics[check], passed[check] = fcn(data)\n\n self.metrics, self.passed = (metrics, passed)", "def __init__(self, langName):\n self.langName = langName\n self.readDataSets(langName)\n # self.getVMWEReport()\n self.analyzeSents()\n self.orderParentVMWEs()\n self.getTrainAndTest()\n self.cleanSents()\n self.extractDictionaries()\n self.deleteNonRecognizableMWE()\n printStats(self.trainingSents, 'Train', mweDic=self.mweDictionary, langName=langName, test=False)\n printStats(self.testingSents, 'Test', mweDic=self.mweDictionary, test=True)", "def jaccard_sim(news_data_dict: dict, song_data_dict: dict):\n jaccard_dict = defaultdict(list)\n\n for news_yr, news_txt in news_data_dict.items():\n news_txt_flat = []\n for nt in news_txt:\n news_txt_flat += nt\n news_txt_flat = set(news_txt_flat)\n if news_yr <= 2011:\n song_txt_flat = set()\n for i in range(5):\n song_yr = news_yr+i\n for st in song_data_dict[song_yr]:\n for stw in st:\n song_txt_flat.add(stw)\n shared_words = news_txt_flat.intersection(song_txt_flat)\n jaccard = len(shared_words) / (len(song_txt_flat) + len(news_txt_flat) - len(shared_words))\n jaccard = round(jaccard, 3)\n jaccard_dict[news_yr].append(jaccard)\n\n return jaccard_dict", "def test_parse_hit_details(self):\n for query in self.result:\n first_hsp = self.result[query][0][0]\n self.assertEqual(first_hsp[\"SUBJECT_ID\"], \"gi|148670104|gb|EDL02051.1|\")\n self.assertEqual(\n first_hsp[\"HIT_DEF\"],\n \"insulin-like growth factor 2 receptor, isoform CRA_c [Mus musculus]\",\n )\n self.assertEqual(first_hsp[\"HIT_ACCESSION\"], \"2001\")\n self.assertEqual(first_hsp[\"HIT_LENGTH\"], 707)", "def main():\n\n # confusion matrix model ensemble\n df = pd.read_csv('pred_test_ensemble.csv')\n print('Real test accuracy:', accuracy_score(df.labels.values, df.class_preds.values))\n conf_matrix = confusion_matrix(df.labels.values, df.class_preds.values, labels=[0, 1, 2, 3])\n\n dct = {'': [0, 90, 180, 270]}\n for i in range(4):\n dct[str(i*90)] = conf_matrix[:, i]\n \n conf_matrix = pd.DataFrame(dct)\n print(conf_matrix)\n conf_matrix.to_csv('confusion_matrix_ensemble.csv', index=False)\n\n\n\n # # Statistical gama\n # df = pd.read_csv('pred_test.csv')\n # print('Statistical... ')\n # statistical = gama_statistic(df)\n # statistical.to_csv('gama_statistic.csv', index=False)\n # print(statistical)", "def run_main():\n # Matching lines against a matcher function.\n matched_lines = match_file(file_names, matcher)\n\n # Will contain data sorted by file.\n binned_data = {}\n\n # Looking through the lines that were inserted into the metrics file via the metrics component.\n for key in matched_lines:\n\n # Grabbing matched lines by the file or orgination.\n buffer = matched_lines[key]\n\n # This will contain dictionaries converted from JSON.\n data = []\n\n # Loop through the collection, appending data converted from JSON entries.\n for line in buffer:\n data.append(extract_data(line))\n\n # Sort the data by file.\n binned_data[key] = sort_data(data)\n\n # Output the final results.\n generate_statistics(binned_data)\n return 0", "def main():\n # Load in original data\n origin_data = pd.read_csv('/Users/apple/Desktop/CSE_163/cse163_project/'\n + 'Admission_Predict_Ver1.1.csv',\n sep=r'\\s*,\\s*', header=0, encoding='ascii',\n engine='python')\n\n # Research question 1\n lasso_regression(origin_data)\n\n # Research question 2\n # We drop the 'Serial No.' column because it is unrelated to our analysis.\n df = origin_data.drop(columns=['Serial No.'])\n find_correlation(df)\n boxplots_testscores_vs_admission(df)\n\n # Research question 3\n university_rating_analysis(origin_data)", "def harmony(self):\n\n self.config.logger.info(\"Harmonizing grid area...\")\n\n # reset start time\n t0 = time.time()\n\n # reconcile GCAM land use area with base layer land use data\n recon_data = rec.reconcile(self.allreg, self.allaez, self.allregnumber, self.allregaez, self.spat_aez,\n self.spat_region, self.spat_ludata, self.user_years, self.gcam_ludata, self.gcam_aez,\n self.gcam_regionnumber)\n\n # unpack variables\n self.spat_regaezarea, self.gcam_regaezarea, self.areacoef, self.gcam_regaezareaharm, self.ixr_idm, \\\n self.ixy_ixr_ixm, self.gcam_ludata = recon_data\n\n # write harmonization coefficient array as a diagnostics file\n if self.config.diagnostic == 1:\n wdr.save_array(self.areacoef, self.config.harm_coeff_file)\n\n self.config.logger.info('PERFORMANCE: Harmonization completed in {0} seconds'.format(time.time() - t0))", "def spectra_analysis(file_name, sky_file_name): \n\n # read file name and select out the id that we are dealing with\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = int(re.search(r'\\d+', stk_f_n).group())\n\n # read catalogue and obtain the HST redshift estimate\n #catalogue = np.load(\"data/matched_catalogue.npy\")\n catalogue = np.load(\"data/low_redshift_catalogue.npy\")\n cat_loc = np.where(catalogue[:,0] == cube_id)[0]\n cube_info = catalogue[cat_loc][0]\n \n hst_redshift = cube_info[7]\n\n # spectra and sky noise data\n spectra_data = spectrum_creator(file_name)\n wl_soln = wavelength_solution(file_name)\n sn_data = sky_noise(sky_file_name)\n\n galaxy_data = spectra_data['galaxy']\n\n # removing baseline from data\n base = peakutils.baseline(galaxy_data, 3)\n gd_mc = galaxy_data - base\n\n # scaling sky-noise to be similar to spectra data\n gd_max = np.amax(galaxy_data)\n sn_data_max = np.amax(sn_data)\n sn_scale = gd_max / sn_data_max\n\n sn_data = sn_data * sn_scale\n\n # spectra lines\n sl = {\n 'emis': {\n '[OII]': '3727',\n 'CaK': '3933',\n 'CaH': '3968',\n 'Hdelta': '4101', \n }, \n 'abs': {'K': '3934.777',\n }\n } \n\n # we can use the redshift from the HST catalogue to define the region to search for\n # the doublet in\n\n # lower and upper bound on wavelength range\n lower_lambda = (1+hst_redshift)*3600\n upper_lambda = (1+hst_redshift)*3850\n\n # x-axis data\n data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda) \n\n lambda_data = data_h_range[mask]\n flux_data = gd_mc[mask] \n \n # Finding peaks with PeakUtils\n pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)\n pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)\n\n pu_peaks_x = np.sort(pu_peaks_x)\n pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]\n pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]\n \n data_dir = 'cube_results/' + stk_f_n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')\n peaks_file.write(\"Peaks found on \" + str(datetime.datetime.now()) + \"\\n\\n\")\n\n peaks_file.write(\"Number Wavelength \\n\")\n for i_peak in range(len(pu_peaks_x)):\n curr_peak = pu_peaks_x[i_peak]\n peaks_file.write(str(i_peak) + \" \" + str(curr_peak) + \"\\n\")\n\n # manually selecting which peak is the [OII] peak - given in wavelength\n if (pu_peaks_x.size != 0):\n otwo_wav = float(pu_peaks_x[0]) \n otwo_acc = float(sl['emis']['[OII]'])\n\n redshift = (otwo_wav / otwo_acc) - 1\n else:\n # accepting HST redshift if cannot find peak\n redshift = hst_redshift\n\n return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift': \n redshift, 'pu_peaks': pu_peaks_x}", "def main():\n region = 'Kanto'\n year = 2000\n # callParallelGA(region)\n callParallelReducedGA(region)\n \n\n region = 'EastJapan'\n year = 2000\n callParallelReducedGA(region)\n # callParallelGA(region)\n\n\n region = 'Tohoku'\n year = 2000\n callParallelReducedGA(region)\n # callParallelGA(region)\n\n \n region = 'Kansai'\n year = 2000\n callParallelReducedGA(region)\n # callParallelGA(region)", "def run():\n\n data = parse_data()\n\n wide = 25\n tall = 6\n\n layers = []\n for index in range(0, len(data), wide * tall):\n item = data[index : index + wide * tall]\n item = [item[x : x + wide] for x in range(0, wide * tall, wide)]\n layers.append(item)\n\n lowest, layer = get_layer_containing_fewest_zeroes(layers)\n\n ones = sum([Counter(l).get(\"1\", 0) for l in layer])\n twos = sum([Counter(l).get(\"2\", 0) for l in layer])\n assert (ones * twos) == 1820\n\n display_layers(layers, wide, tall) # ckuj", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def func(self):\n account = self.account\n city_name = 'Phoenix' if not self.args else self.args\n a = Astral()\n a.solar_depression = 'civil'\n city = a[city_name]\n if not city:\n return\n timezone = city.timezone\n sun = city.sun(date=datetime.date.today(), local=True)\n\n account.msg('Information for %s/%s\\n' % (city_name, city.region))\n account.msg('Timezone: %s' % timezone)\n account.msg('Latitude: %.02f; Longitude: %.02f' % (city.latitude, city.longitude))\n account.msg('Dawn: %s' % str(sun['dawn']))\n account.msg('Sunrise: %s' % str(sun['sunrise']))\n account.msg('Noon: %s' % str(sun['noon']))\n account.msg('Sunset: %s' % str(sun['sunset']))\n account.msg('Dusk: %s' % str(sun['dusk']))", "def test_2():\n table = pandas.read_csv('data/matches.csv')\n query_result = show.show(table,\n dimensions=['player_of_match'],\n metric='win_by_runs' ,\n slices=[('season', Filters.EQUAL_TO, 2017)],\n \t date_range=('2017-05-09', '2017-05-12'),\n \t date_column_name='date', day_first=False,\n \t summary_operator=SummaryOperators.MEAN)\n print(query_result)\n expected_result = \"\"\" player_of_match MEAN of win_by_runs\n0 KK Nair 7\n1 MM Sharma 14\n2 SS Iyer 0\n3 WP Saha 7\"\"\"\n\n expected_suggestions = \"[]\"\n\n assert(expected_result == query_result[0].to_string())\n assert(expected_suggestions == str(query_result[1]))", "def get_filters():\r\n print('Welcome to Bikeshare data Analysis...')\r\n try:\r\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\r\n while True:\r\n try:\r\n cit_num = int(input('Enter city you would like to analyze \\n1 : Chicago\\n2 : Newyork\\n3 : Washington :\\n'))\r\n except ValueError:\r\n print(\"Please enter a number value\\n\")\r\n continue\r\n if cit_num not in (1,2,3):\r\n print(\"Please enter a valid number that refers to cities\\n\")\r\n continue\r\n else:\r\n break\r\n\r\n city = LST_CITY[cit_num - 1].lower()\r\n # TO DO: get user input for month (all, january, february, ... , june)\r\n while True:\r\n try:\r\n month_num = int(input('Enter month of the year you would like to analyze \\n1 : JAN\\n2 : FEB\\n3 : MAR\\n4 : APR\\n5 : MAY\\n6 : JUN\\n7 : WHOLE WEEK\\n'))\r\n except ValueError:\r\n print(\"Please enter a num value\\n\")\r\n continue\r\n if month_num not in range(1,8):\r\n print(\"Please enter a valid month...\\n\")\r\n continue\r\n else:\r\n break\r\n month = VALID_MONTHS[month_num-1]\r\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\r\n while True:\r\n try:\r\n day_num = int(input('Enter day of the week you would like to analyze \\n1 : MON\\n2 : TUE\\n3 : WED\\n4 : THU\\n5 : FRI\\n6 : SAT\\n7 : SUN\\n8 : ALL MONTHS\\n'))\r\n except ValueError:\r\n print(\"Please enter a num value\\n\")\r\n continue\r\n if day_num not in range(1, 9):\r\n print(\"Please enter a valid month number...\\n\")\r\n continue\r\n else:\r\n break\r\n day = VALID_DAYS[day_num-1]\r\n\r\n print('\\n'+'*'*20)\r\n print('Your selections are stated below\\nCity : {} , Month : {} , Day : {} '.format(city.title(),month.title(),day.title()))\r\n print('*'*20)\r\n \r\n return city, month, day ,cit_num , month_num , day_num\r\n \r\n except Exception as e:\r\n print('An exception has been occurred : {}'.format(e))", "def test_search(self):\n response = Tmdb.search('ozark')\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)", "def test_jaccard(self):\n self.region_sets([['chr1',50,58],['chr1',70,80],['chr1',90,94]],\n [['chr1',45,55],['chr1',76,86]])\n result = self.setA.jaccard(self.setB)\n self.assertEqual(result, 9/33)", "def main():\n while True:\n city, month, day, filters = get_filters()\n dataframe = load_data(city, month, day, filters)\n\n print('\\n\\n************DISPLAYING STATISTICS*************')\n time_stats(dataframe, filters)\n station_stats(dataframe, filters)\n trip_duration_stats(dataframe, filters)\n user_stats(dataframe, filters)\n visualize_data(dataframe, filters, city)\n show_data(dataframe, filters, city)\n\n # To restart or quit program\n restart_program()", "def sky_orbits(test=True):\n \n t = Table.read('/home/ana/data/baumgardt_positions.fits')\n \n ind_disterr = ~np.isfinite(t['e_Rsun'])\n t['e_Rsun'][ind_disterr] = 0.1 * t['Rsun'][ind_disterr]\n e_max = np.nanmax(t['e_Rsun'][~ind_disterr])\n ind_cap = t['e_Rsun']>e_max\n t['e_Rsun'][ind_cap] = e_max\n \n clusters = ['NGC 3201', 'NGC 4590', 'NGC 5824', 'NGC 5272', 'NGC 5139', 'NGC 5024']\n #clusters = ['NGC 5824', 'NGC 5024']\n N = len(clusters)\n \n match = dict()\n match['NGC 3201'] = dict(streams=['gjoll'], direction=[-1], nstep=[35], gc_label='NGC\\n3201', gcra_off=0*u.deg, gcdec_off=-13*u.deg, gcl_off=0*u.deg, gcb_off=-13*u.deg, stream_label=['$Gj\\\\\\\" oll$'], stream_ra=[-156*u.deg], stream_dec=[-4.5*u.deg], eq_angle=[-45*u.deg], stream_l=[-148*u.deg], stream_b=[-33*u.deg], gal_angle=[22*u.deg])\n \n match['NGC 4590'] = dict(streams=['fjorm'], direction=[1], nstep=[100], gc_label='NGC\\n4590', gcra_off=-15*u.deg, gcdec_off=0*u.deg, gcl_off=-13*u.deg, gcb_off=-10*u.deg, stream_label=['$Fj\\\\\\\" orm$'], stream_ra=[-22*u.deg], stream_dec=[66*u.deg], eq_angle=[35*u.deg], stream_l=[110*u.deg], stream_b=[50*u.deg], gal_angle=[-50*u.deg])\n \n match['NGC 5024'] = dict(streams=['sylgr', 'ravi'], direction=[-1, 1], nstep=[300,500], gc_label='NGC\\n5024', gcra_off=-15*u.deg, gcdec_off=0*u.deg, gcl_off=10*u.deg, gcb_off=-20*u.deg, stream_label=['Sylgr', 'Ravi'], stream_ra=[-70*u.deg, 83*u.deg], stream_dec=[2*u.deg, -47*u.deg], eq_angle=[25*u.deg, 65*u.deg], stream_l=[-110*u.deg, -18.5*u.deg], stream_b=[62*u.deg, -47*u.deg], gal_angle=[30*u.deg, -10*u.deg])\n \n match['NGC 5139'] = dict(streams=['fimbulthul'], direction=[-1], nstep=[70], gc_label='NGC\\n5139', gcra_off=-5*u.deg, gcdec_off=-15*u.deg, gcl_off=0*u.deg, gcb_off=-12*u.deg, stream_label=['Fimbulthul'], stream_ra=[-20*u.deg], stream_dec=[-15*u.deg], eq_angle=[0*u.deg], stream_l=[-20*u.deg], stream_b=[45*u.deg], gal_angle=[0*u.deg])\n \n match['NGC 5272'] = dict(streams=['svol'], direction=[1], nstep=[70], gc_label='NGC\\n5272', gcra_off=-15*u.deg, gcdec_off=10*u.deg, gcl_off=-23*u.deg, gcb_off=-17*u.deg, stream_label=['$Sv\\\\\\\" ol$'], stream_ra=[-2*u.deg], stream_dec=[34*u.deg], eq_angle=[-10*u.deg], stream_l=[55*u.deg], stream_b=[55*u.deg], gal_angle=[-65*u.deg])\n \n match['NGC 5824'] = dict(streams=['triangulum', 'turbio'], direction=[1,1], nstep=[700,1], gc_label='NGC\\n5824', gcra_off=15*u.deg, gcdec_off=-5*u.deg, gcl_off=15*u.deg, gcb_off=-5*u.deg, stream_label=['Triangulum', 'Turbio'], stream_ra=[152*u.deg, 130*u.deg], stream_dec=[32*u.deg, -51*u.deg], eq_angle=[-48*u.deg, 30*u.deg], stream_l=[120*u.deg, -82*u.deg], stream_b=[-31*u.deg, -57*u.deg], gal_angle=[70*u.deg, 105*u.deg])\n \n dt = 0.5*u.Myr\n wangle = 180*u.deg\n ra_off = 120*u.deg\n l_off = 0*u.deg\n \n colors = [mpl.cm.plasma(0.95*x/N) for x in range(N)]\n \n np.random.seed(27529)\n if test:\n Nsample = 1\n else:\n Nsample = 100\n \n plt.close()\n fig = plt.figure(figsize=(12,12))\n \n ax0 = fig.add_subplot(211, projection='mollweide')\n ax1 = fig.add_subplot(212, projection='mollweide')\n ax = [ax0, ax1]\n \n for i in range(N):\n #ind = t['Name']== clusters[i]\n ind = t['Name']==clusters[i]\n t_ = t[ind]\n \n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'], pm_ra_cosdec=t_['pmRA_'], pm_dec=t_['pmDE'], radial_velocity=t_['RV'], frame='icrs')\n cgal = c.transform_to(coord.Galactic)\n #w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n color = colors[i]\n alpha_text = 0.8\n \n plt.sca(ax[0])\n plt.plot((c.ra + ra_off).wrap_at(wangle).rad, c.dec.rad, '+', color=color, mew=3, ms=15, label=t_['Name'][0])\n plt.text((c.ra + ra_off + match[clusters[i]]['gcra_off']).wrap_at(wangle).rad, (c.dec + match[clusters[i]]['gcdec_off']).rad, match[clusters[i]]['gc_label'], fontsize='small', ha='center', va='center', alpha=alpha_text)\n \n plt.sca(ax[1])\n plt.plot((cgal.l + l_off).wrap_at(wangle).rad, cgal.b.rad, '+', color=color, mew=3, ms=15, label=t_['Name'][0])\n plt.text((cgal.l + l_off + match[clusters[i]]['gcl_off']).wrap_at(wangle).rad, (cgal.b + match[clusters[i]]['gcb_off']).rad, match[clusters[i]]['gc_label'], fontsize='small', ha='center', va='center', alpha=alpha_text)\n \n\n for j in range(len(match[clusters[i]]['direction'])):\n # sample gc positional uncertainties\n for k in range(-1, Nsample):\n if k==-1:\n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'], pm_ra_cosdec=t_['pmRA_'], pm_dec=t_['pmDE'], radial_velocity=t_['RV'], frame='icrs')\n w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n lw = 1.5\n alpha = 1\n else:\n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'] + np.random.randn()*t_['e_Rsun'], pm_ra_cosdec=t_['pmRA_'] + np.random.randn()*t_['e_pmRA_'], pm_dec=t_['pmDE'] + np.random.randn()*t_['e_pmDE'], radial_velocity=t_['RV'] + np.random.randn()*t_['e_RV'], frame='icrs')\n w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n lw = 1\n alpha = 0.1\n \n orbit = ham.integrate_orbit(w0, dt=dt*match[clusters[i]]['direction'][j], n_steps=match[clusters[i]]['nstep'][j])\n orbit_eq = orbit.to_coord_frame(coord.ICRS, galactocentric_frame=gc_frame)\n orbit_gal = orbit.to_coord_frame(coord.Galactic, galactocentric_frame=gc_frame)\n \n \n plt.sca(ax[0])\n dra = (orbit_eq.ra+ra_off).wrap_at(wangle)[1:] - (orbit_eq.ra+ra_off).wrap_at(wangle)[:-1]\n if np.any(np.abs(dra)>180*u.deg):\n pos_break = dra>180*u.deg\n ind_break = np.argmax(pos_break)\n ipad = 1\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad[:ind_break-ipad], orbit_eq.dec.rad[:ind_break-ipad], '-', color=color, lw=lw, label='', alpha=alpha)\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad[ind_break+ipad:], orbit_eq.dec.rad[ind_break+ipad:], '-', color=color, lw=lw, label='', alpha=alpha)\n else:\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad, orbit_eq.dec.rad, '-', color=color, lw=lw, label='', alpha=alpha)\n \n plt.sca(ax[1])\n dl = orbit_gal.l.wrap_at(wangle)[1:] - orbit_gal.l.wrap_at(wangle)[:-1]\n if np.any(np.abs(dl)>180*u.deg):\n pos_break = dl>180*u.deg\n ind_break = np.argmax(pos_break)\n ipad = 1\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad[:ind_break-ipad], orbit_gal.b.rad[:ind_break-ipad], '-', color=color, lw=lw, label='', alpha=alpha)\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad[ind_break+ipad:], orbit_gal.b.rad[ind_break+ipad:], '-', color=color, lw=lw, label='', alpha=alpha)\n else:\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad, orbit_gal.b.rad, '-', color=color, lw=lw, label='', alpha=alpha)\n \n # add streams\n pkl = pickle.load(open('../data/streams/data_{:s}.pkl'.format(match[clusters[i]]['streams'][j]), 'rb'))\n cs = coord.SkyCoord(ra=pkl['dec'][0], dec=pkl['dec'][1], frame='icrs')\n cs_gal = cs.transform_to(coord.Galactic)\n \n plt.sca(ax[0])\n plt.plot((cs.ra+ra_off).wrap_at(wangle).rad, cs.dec.rad, 'o', color=color, ms=8, label=match[clusters[i]]['streams'][j])\n plt.text(coord.Longitude(match[clusters[i]]['stream_ra'][j]).wrap_at(wangle).rad, coord.Latitude(match[clusters[i]]['stream_dec'][j]).rad, match[clusters[i]]['stream_label'][j], fontsize='small', alpha=alpha_text, rotation=match[clusters[i]]['eq_angle'][j].value, ha='center', va='center')\n \n plt.sca(ax[1])\n plt.plot((cs_gal.l+l_off).wrap_at(wangle).rad, cs_gal.b.rad, 'o', color=color, ms=8, label=match[clusters[i]]['streams'][j])\n plt.text(coord.Longitude(match[clusters[i]]['stream_l'][j]).wrap_at(wangle).rad, coord.Latitude(match[clusters[i]]['stream_b'][j]).rad, match[clusters[i]]['stream_label'][j], fontsize='small', alpha=alpha_text, rotation=match[clusters[i]]['gal_angle'][j].value, ha='center', va='center')\n \n \n plt.sca(ax[0])\n plt.grid(ls=':')\n plt.xlabel('R.A. [deg]')\n plt.ylabel('Dec [deg]')\n\n plt.gca().xaxis.set_ticklabels([])\n \n xloc = coord.Longitude(np.arange(-150,180,30)*u.deg)\n xloc = np.delete(xloc, [3])\n yloc = coord.Latitude(5*u.deg)\n Nx = len(xloc)\n \n for i in range(Nx):\n plt.text(xloc[i].wrap_at(wangle).rad, yloc.rad, '{:.0f}$\\degree$'.format((xloc[i]-ra_off).wrap_at(wangle).degree), alpha=0.6, ha='center', va='center')\n \n \n plt.sca(ax[1])\n plt.grid(ls=':')\n plt.xlabel('Galactic longitude [deg]')\n plt.ylabel('Galactic latitude [deg]')\n \n plt.gca().xaxis.set_ticklabels([])\n \n xloc = coord.Longitude(np.arange(-150,180,30)*u.deg)\n xloc = np.delete(xloc, [2,3])\n yloc = coord.Latitude(5*u.deg)\n Nx = len(xloc)\n \n for i in range(Nx):\n plt.text(xloc[i].wrap_at(wangle).rad, yloc.rad, '{:.0f}$\\degree$'.format((xloc[i]+l_off).wrap_at(wangle).degree), alpha=0.6, ha='center', va='center')\n \n \n \n plt.tight_layout(h_pad=2)\n plt.savefig('../paper/sky_orbits.pdf')" ]
[ "0.66694874", "0.57432646", "0.5723401", "0.5522852", "0.55180657", "0.5504678", "0.5437675", "0.54102576", "0.53872174", "0.53547543", "0.53277946", "0.53205276", "0.53092575", "0.5265327", "0.5183534", "0.5149629", "0.5142744", "0.51422846", "0.51374143", "0.51307535", "0.51246846", "0.5111326", "0.5106823", "0.50977486", "0.5078957", "0.5071514", "0.5066866", "0.5049063", "0.502671", "0.5012002", "0.49999216", "0.49736392", "0.49687007", "0.4966849", "0.49654552", "0.4960341", "0.49488854", "0.49487597", "0.49459764", "0.49377188", "0.4931297", "0.49193537", "0.49175823", "0.49143606", "0.49073637", "0.49037388", "0.49020556", "0.48998183", "0.48910233", "0.48891777", "0.48838216", "0.4879003", "0.48724163", "0.48652583", "0.4865132", "0.48533365", "0.48488814", "0.48408285", "0.48324284", "0.48274356", "0.4823161", "0.4822759", "0.48198643", "0.48191303", "0.48177466", "0.4817187", "0.48090997", "0.48070917", "0.48062077", "0.48010913", "0.48003522", "0.479872", "0.47873342", "0.47846764", "0.47828007", "0.47808087", "0.4779133", "0.47774383", "0.4774203", "0.4767178", "0.47660217", "0.4764641", "0.4762371", "0.475913", "0.47568718", "0.47547925", "0.47526044", "0.47461602", "0.47422677", "0.4735532", "0.4732296", "0.472427", "0.4717124", "0.47163177", "0.4716019", "0.4715762", "0.47137326", "0.4711089", "0.47096556", "0.4702856", "0.4702148" ]
0.0
-1
Send an email message.
def send_message(user_id, message): try: service = get_service('token.pickle') message = (service.users().messages().send(userId=user_id, body=message).execute()) print('Message Id: %s' % message['id']) return message except errors.HttpError as error: print('An error occurred: %s' % error)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_email(self, message):\n pass", "def send_mail(email):\n return email.send()", "def send_email(msg):\n\tprint(\"sendEmail: \" + msg)", "def send_email(message):\n mail_server = smtplib.SMTP('localhost')\n mail_server.send_message(message)\n mail_server.quit()", "def send_email(msg):\n common_send_email(subject=msg.subject, recipients=msg.recipients, html=msg.html)", "def send_message(self, message):\n try:\n msg = sg.client.mail.send.post(request_body=message)\n app.logger.info(\"{error} with {response}\".format(error=msg.status_code, response=msg.body))\n app.logger.info(\"Successfully sent message: {msg}\".format(msg=msg))\n except Exception as e:\n app.logger.exception(\"Error While sending emails: {msg}\".format(msg=message))\n app.logger.exception(e)", "def send_email(self, email_from, email_to, message):\n logging.info(\"Attempting to send email from \" + email_from + \" to \" + email_to)\n self.conn.sendmail(email_from, email_to, message)\n logging.info(\"Email sent\")", "def send_message():\n # @todo validation & error handling.\n sg = SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))\n log(\"Message generated and sent at {}\".format(strftime('%x %H:%M:%S')))\n sg.client.mail.send.post(request_body=build_message())", "def send(message):\n mail_server = smtplib.SMTP('localhost')\n mail_server.send_message(message)\n mail_server.quit()", "def send_email(my_email, password, message):\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.starttls()\n server.login(my_email, password)\n # send from my_email to my_email (from, to, message)\n server.sendmail(my_email, my_email, message)\n server.quit()", "def send_mail(self, address, title, message):\n pass", "def send_email(self):\n message = MIMEText(self.email_body, 'plain', 'utf-8')\n\n message['Subject'] = self.email_subject\n message['From'] = gmail_user\n message['To'] = ', '.join(self.recipients)\n\n try:\n server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n server.ehlo()\n\n server.login(gmail_user, gmail_password)\n\n server.sendmail(message['From'], self.recipients, message.as_string())\n\n server.close()\n\n print('Email sent!')\n except Exception as err:\n # TODO Write error to log file\n raise err", "def send(self, email):\r\n smtp = smtplib.SMTP(self.server, self.port)\r\n smtp.ehlo()\r\n \r\n if self.tls:\r\n smtp.starttls()\r\n smtp.ehlo()\r\n\r\n if self.user and self.passwd:\r\n smtp.login(self.user, self.passwd)\r\n\r\n smtp.sendmail(email.from_address, email.to + email.ccs, str(email))\r\n if email.bccs:\r\n email.root['X-antroy-sent'] = \"True\"\r\n smtp.sendmail(email.from_address, email.bccs, str(email))\r\n del email.root['X-antroy-sent']\r\n smtp.quit()", "def email(self, email_address, message):\n self.server.sendmail(self.username, email_address, message)", "def send_email(subject, sender, recipients, text_body, html_body):\n\t\tmsg = Message(subject, sender=sender, recipients=recipients)\n\t\tmsg.body = text_body\n\t\tmsg.html = html_body\n\t\tmail.send(msg)", "def send_email():\n send_mail(\"You've got some problem.\", 'REPAIR IT', '[email protected]',\n ['[email protected]'], fail_silently=False,)", "def send_mail(subject):\r\n obj = EmailNotification().emailobj()\r\n obj.send_mail(subject)", "def send_email(subject, sender, recipients, text_body, html_body):\n msg = Message(subject=subject, sender=sender, recipients=recipients)\n msg.body = text_body\n msg.html = html_body\n mail.send(msg)", "def send_email(self, to, subject, message):\n\n email_to = \"[email protected]\"\n try:\n mx_alarm = AlertEmail(email_to, self.subject, self.message)\n mx_alarm.send()\n print(\"\\t{} |{}| Successfully sent email.\".format(Timer.OK, self.tinfo['name']))\n return True\n except Exception as e:\n print(\"\\t{} Exception in send_email! {}\".format(Timer.FAIL, e))", "def send_message(self, subject, body):\n headers = [\n \"From: \" + self.email,\n \"Subject: \" + subject,\n \"To: \" + self.email,\n \"MIME-Version: 1.0\",\n \"Content-Type: text/html\"]\n headers = \"\\r\\n\".join(headers)\n self.session.sendmail(\n self.email,\n self.email,\n headers + \"\\r\\n\\r\\n\" + body)", "def send_message(user_id, name, user_info, subject, body):\n send_mail(subject, body, settings.SERVER_EMAIL, [\"%s <%s>\" % (name, user_id)],\n fail_silently=False, html_message=body)", "def send_email(self, email):\n\n if not isinstance(email, str):\n raise TypeError('type of email must be str not %s' % type(email))\n\n message = self.get_message(email)\n self.server.send_message(message)", "def send():\n try:\n data = request.get_json()\n if data['authkey'] != os.environ.get('MAIL_AUTHKEY'): \n return \"Ooops. Wrong `authkey`.\"\n msg = Message(data['subject'],\n sender=os.environ.get('MAIL_USERNAME'),\n recipients=[data['recipient']])\n msg.body = data['body'] \n mail.send(msg)\n return 'Mail sent!'\n except Exception as e:\n print('We got an error at ' + httpdate(datetime.datetime.now()))\n print(str(e)) \n return 'There was an error with that request.'", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def send_email(recipient, subject, message):\n from_email = os.getenv(\"EMAIL_SENDER\")\n status = send_mail(subject, message, from_email, [recipient])\n return status", "def send_email(to_address, from_address, subject, body):\n mail = \"\"\"echo \"From: %(from)s\\r\\nDate: $(date)\\r\\nSubject: %(subject)s\\r\\nMIME-Version: 1.0\\r\\nContent-Type: text/html; charset=utf-8\\r\\n\\r\\n%(body)s\" | ssmtp %(to)s\"\"\" % {\n \"to\": to_address,\n \"from\": from_address,\n \"subject\": subject,\n \"body\": body,\n }\n cmd(mail)", "def sendEmail(message):\n message_string = '\\n'.join(message)\n recipients = ['[email protected]', '[email protected]']\n msg = EmailMessage()\n msg['Subject'] = 'Finished training and predicting MEMM'\n msg['From'] = '[email protected]'\n msg['To'] = ', '.join(recipients)\n msg.set_content(message_string)\n sender = SMTP('localhost')\n sender.send_message(msg)\n sender.quit()", "def send_mail(to, sender, subject, message):\n\n msg = MIMEText(message)\n msg['From'] = sender\n msg['To'] = to\n msg['Subject'] = subject\n body = {'raw': base64.urlsafe_b64encode(msg.as_bytes()).decode()}\n MESSAGES.send(userId='me', body=body).execute()", "def sending(self, message):\n sending_mail.send(sender=self.__class__, message=message)", "def send_mail(self, subject):\r\n pass", "def send_email(self, to, content):\r\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\r\n server.ehlo()\r\n server.starttls()\r\n server.login(self.from_, self.password)\r\n server.sendmail(self.from_, to, content)\r\n speak(\"Email has been sent Succesfully!\")\r\n return \"None\"", "def send_mail(subject, message):\n \n import smtplib\n \n msg = \"\\r\\n\".join([\n \"From: \" + gmail_sender,\n \"To: \" + email_to,\n \"Subject: \" + subject,\n \"\",\n message\n ])\n\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.ehlo()\n server.starttls()\n server.login(gmail_sender,gmail_password)\n server.sendmail(gmail_sender, email_to, msg)\n server.quit()", "def sendEmail(_name, _email, _body):\n\n _mailer = app.config['MAIL_USERNAME']\n msg = Message(\"Contact Form\", sender=('iSOLveIT Contact', f'{_mailer}'), recipients=[f'{_mailer}'])\n msg.body = f'''{_body}\n\n\nSender's Name: {_name}\nSender's Email: {_email}\nDate Sent: {dt.now(tz=GMT_tz).strftime('%B %d, %Y, %H:%M ') + 'GMT'}\n'''\n mail.send(msg)\n return 'OK'", "def _send_smtp(message, subject, to, to_name, sender, sender_name):\n host = app.config.get('MAIL_HOST')\n\n if not host:\n raise MailFailure('SMTP Server Not Configured')\n\n try:\n server = smtplib.SMTP(host)\n except (smtplib.SMTPConnectError, socket.error) as ex:\n app.logger.error('Unable to send mail: %s', str(ex))\n raise MailFailure('Error connecting to SMTP server.')\n\n msg = text.MIMEText(message)\n msg['Subject'] = subject\n msg['To'] = email.utils.formataddr((to_name, to))\n msg['From'] = email.utils.formataddr((sender_name, sender))\n\n try:\n if app.debug:\n server.set_debuglevel(True)\n server.sendmail(sender, [to], msg.as_string())\n except (smtplib.SMTPException, socket.error) as ex:\n app.logger.error('Unable to send mail: %s', str(ex))\n raise MailFailure('Error sending mail to SMTP server.')\n finally:\n try:\n server.quit()\n except smtplib.SMTPException:\n pass", "def send_mail(self, msg):\n mail_queue.put(msg)", "def send_email_via_api(self, to, subject, message):\n\n return self.mail.send(to, subject, message)", "def send_email(self, to_address, subject, body, cc_recipients=[]):\n\n # Build and send message\n msg = Message(\n account=self.account,\n folder=self.account.sent,\n subject=subject,\n body= HTMLBody(body),\n to_recipients=[Mailbox(email_address=to_address)],\n cc_recipients=[(Mailbox(email_address=x)) for x in cc_recipients]\n )\n\n msg.send_and_save()\n print(\"Message to {} sent.\".format(to_address))", "def quick_email(self, send_to, subject, body, style=None):\n message = Message(body, style=style)\n\n self.send_message(message, send_to, subject)", "def send_email(recipient,subject,message):\n msg = MIMEText(message)\n me = '[email protected]'\n \n msg['Subject'] = subject\n msg['From'] = me\n msg['To'] = recipient\n\n # Send the message via our own SMTP server, but don't include the\n # envelope header.\n username='cryolt2'\n password='Diamond=Geil!'\n\n server = smtplib.SMTP('smtp.gmail.com:587') \n server.starttls() \n server.login(username,password) \n server.sendmail(me, recipient, msg.as_string()) \n server.quit()", "def send_message(self, message, send_to, subject):\n message = message.mime()\n\n message['From'] = self.email_address\n message['To'] = send_to\n\n message['Subject'] = subject\n\n self._login()\n self.server.sendmail(self.email_address, send_to, message.as_string())\n self._logout()", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def send(self, address_to, message, emailSubject = \"Automated Email\", attachmentFilePath = None):\r\n\t\tmail = self._createEmail(address_to, message, emailSubject)\r\n\t\tif attachmentFilePath != None:\r\n\t\t\tmail.attachment = self._createAttachment(attachmentFilePath)\r\n\t\tsg = sendgrid.SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))\r\n\t\tresponse = sg.send(mail)\r\n\t\tif response.status_code == 202:\r\n\t\t\tprint(\"Email sent\")\r\n\t\telse:\r\n\t\t\tprint(\"Email not sent. Please check error codes below - \")\r\n\t\t\tprint(response.status_code)\r\n\t\t\tprint(response.headers)", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None, **kwargs):\r\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\r\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def send_mail(self, html):\n message = Message(\n From=self._config['mail']['address'], To=self._config['mail']['to'],\n Subject=self._config['mail']['subject']\n )\n message.Html = html\n return self.sender.send(message)", "def send_mail(from_email, to_emails, subject, plain_body, html_body):\n\n # Implementation goes here\n # ...", "def test_send_email(self):\n\t\trecipient = \"\"\n\t\tself.email.send_email(self.subject, recipient, self.content)", "def email_user(self, subject: str, message: str, from_email: str = None) -> None:\n send_mail(subject, message, from_email, [self.email])", "def sent(self, message):\n sent_mail.send(sender=self.__class__, message=message)", "def send_email(subject, text):\n url = ('https://api.mailgun.net/v3/%s/messages' %\n cfg('mail:mailgun_domain'))\n auth = ('api', cfg('mail:mailgun_key'))\n data = {'from': 'Akari Bot <%s>' % cfg('mail:from'),\n 'to': [cfg('mail:to')],\n 'subject': subject, 'text': text}\n return requests.post(url, auth=auth, data=data)", "def _send(message: Message, application: Flask) -> None:\n\n with application.app_context():\n mail.send(message)", "def email_user(self, subject, message, from_email=None):\n\t\tsend_mail(subject, message, from_email, [self.email])", "def send_email(request):\n # send emails and return some manner of success response\n send(**request.params)\n return {'success': 'mail sent!'}", "def send_email(self):\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(\"[email protected]\", \"tdcvgycwrzthjqgj\")\n\n subject = \"Price Fell Down\"\n body = \"Check the amazon link \" + self.__product_URL\n message = f\"Subject: {subject}\\n\\n{body}\"\n server.sendmail(\n \"[email protected]\",\n self.__email,\n message\n )\n #print(\"Our mail is sent!!!!\")", "def email_user(self, subject, message, from_email=None, **kwargs):\n\t\tsend_mail(subject, message, from_email, [self.email], **kwargs)", "def email_to_user(self, subject, message, sender=None, **kwargs):\n send_mail(subject, message, sender, [self.email], **kwargs)", "def send(self):\n return send_mail(self.subject, self.message, self.sender, self.recipients, fail_silently=False)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def send_async_email(self, msg):\n with app.app_context():\n result = mail.send(msg)\n print result", "def send_email(sender: str, password: str) -> None:\r\n\r\n # Check for a email and password\r\n if not sender or not password:\r\n print('A recipent or password was not supplied')\r\n exit(1)\r\n \r\n initialize_server()\r\n\r\n webserver.login(sender, password)\r\n webserver.send_message(message)\r\n webserver.quit()", "def sendEmail(recipient, content):\n server = smtplib.SMTP(\"[email protected]\", 587)\n server.ehlo()\n server.starttls()\n server.login(\"[email protected]\", \"password\")\n server.sendmail(\"[email protected]\", recipient, content)\n server.close()", "def send(self):\n logger.debug('Sending Email')\n self.mimepgp.send()", "def send_email(subject, message):\n email_from = config.get('email', 'email_from')\n email_to = config.get('email', 'email_to')\n email_password = config.get('email', 'email_password')\n msg = MIMEMultipart()\n msg['From'] = email_from\n msg['To'] = email_to\n msg['Subject'] = subject\n body = message\n msg.attach(MIMEText(body, 'plain'))\n\n server = smtplib.SMTP(config.get('email', 'smtp_server'))\n server.ehlo()\n server.starttls()\n server.login(email_from, email_password)\n text = msg.as_string()\n server.sendmail(email_from, email_to, text)\n server.quit()", "def simple_send_email(sender, recipient, subject, message, server=EMAIL_HOST, port=EMAIL_PORT):\n headers = [\"From: \" + sender,\n \"Subject: \" + subject,\n \"To: \" + recipient,\n \"MIME-Version: 1.0\",\n \"Content-Type: text/plain\"]\n headers = \"\\r\\n\".join(headers)\n\n session = smtplib.SMTP(server, port)\n\n session.ehlo()\n session.starttls()\n session.ehlo()\n session.login(EMAIL_HOST_USER, EMAIL_HOST_PASSWORD)\n\n session.sendmail(sender, recipient, headers + \"\\r\\n\\r\\n\" + message)\n session.close()", "def send_test_email():\n from flask_mail import Message\n from website.core import mail\n\n to = ['[email protected]']\n subject = 'Test Email'\n template = '<h1>You Ok?</h1>'\n\n msg = Message(\n subject,\n recipients=to,\n html=template,\n sender=current_app.config['SECURITY_EMAIL_SENDER']\n )\n mail.send(msg)", "def __send_message(self, message):\n logging.debug(\"Sending message\")\n try:\n message = self.__email_api.messages.send(message=message)\n return message\n except Error as error:\n logging.error('An error occurred emailing a user: {0}'.format(error))\n raise error", "def send(self, email):\n client = self.clients[email.addressee]\n client.receive(email)", "def send_email(recipient, subject, body) -> None:\n port = 465\n smtp_server = \"smtp.gmail.com\"\n sender_email = user['username']\n password = user['password']\n\n message = MIMEMultipart()\n message['From'] = sender_email\n message['To'] = recipient\n message['Subject'] = subject\n body = MIMEText(body) \n message.attach(body)\n\n server = smtplib.SMTP_SSL(smtp_server, port)\n server.login(sender_email, password)\n server.sendmail(sender_email, recipient, message.as_string())\n server.quit()", "def send_mail(self):\n try:\n mail = smtplib.SMTP('smtp.gmail.com', 587)\n mail.ehlo()\n mail.starttls()\n mail.login(self.mail_user, self.mail_pass)\n content = \"Subject: Test %s %s on host %s\\n\\n%s\\n logs are save at localhost path:\\n%s\" % (\n self.test_name, self.event, self.host_name, self.event_details, self.log_path\n )\n mail.sendmail(self.mail_user, self.target_mail, content)\n mail.close()\n except Exception as e:\n self.logger.error(\"Sending mail failed with Error %s\", e)\n\n else:\n self.logger.info(\"Mail sent to %s\", self.target_mail)", "def email_user(self, subject, message,\n from_email=settings.DEFAULT_FROM_EMAIL, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def send_email(subject, body, mail_to, reply_to=None):\n email_message = EmailMessage(\n subject=settings.EMAIL_SUBJECT.format(subject),\n body=body,\n from_email=settings.DEFAULT_FROM_EMAIL,\n to=mail_to,\n reply_to=reply_to,\n )\n email_message.content_subtype = 'html'\n email_message.send()", "def _send(self, email_message):\n if not email_message.to:\n return False\n try:\n if (isinstance(email_message,gmail.EmailMessage)):\n e = message\n elif (isinstance(email_message,mail.EmailMessage)):\n e = gmail.EmailMessage(sender=email_message.from_email,\n to=email_message.to,\n subject=email_message.subject,\n body=email_message.body)\n if email_message.extra_headers.get('Reply-To', None):\n e.reply_to = email_message.extra_headers['Reply-To']\n if email_message.bcc:\n e.bcc = list(email_message.bcc)\n #TODO - add support for html messages and attachments...\n e.send()\n except:\n if not self.fail_silently:\n raise\n return False\n return True", "def send_email(email_subject, recipient, message, config = None):\n try:\n config = current_app.config\n except:\n config = config\n\n sender = sendgrid.SendGridClient(config['SENDGRID_API_KEY'])\n\n email = sendgrid.Mail()\n\n email.set_subject(email_subject)\n email.add_to(recipient)\n email.set_from(config['FROM_EMAIL'])\n email.set_from_name(config['FROM_NAME'])\n email.set_replyto(config['FROM_NAME'])\n email.set_html(message)\n\n status, msg = sender.send(email)\n\n return status, msg", "def mail_send():\n report_file_path = (\n f'{os.path.abspath(\".\")}/{Common.get_config_value(\"report_location\")}'\n )\n with open(f\"{report_file_path}/subject\", \"rb\") as subject_handler:\n subject = pickle.load(subject_handler)\n with open(f\"{report_file_path}/{'recipient'}\", \"rb\") as recipient_handler:\n recipient = pickle.load(recipient_handler)\n report_file_path = (\n f\"{os.path.abspath('.')}/{Common.get_config_value('report_location')}\"\n )\n try:\n if os.path.isfile(f\"{report_file_path}/mail_report.html\"):\n os.popen(\n f\"ssh -i {Common.get_config_value('build_server_pemfile')} \"\n f\"-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no\"\n f\" root@{Common.get_config_value('build_server_hostname')}\"\n f\" {Common.get_config_value('mail_script_location')}/\"\n f\"{Common.get_config_value('mail_script_name')} \"\n f\"{subject} {recipient}\"\n )\n Common.logger.info(\"Mail send successfully\")\n except Exception as ex:\n Common.logger.warning(f\"Mail sent failed due to exception: {ex}\")", "def send_email(self, text):\n msg_text = MIMEText(text)\n msg_text['Subject'] = '[WebSite Watchdog] Failure'\n msg_text['From'] = self.from_email\n msg_text['To'] = self.to_email\n \n s = smtplib.SMTP(self.smtp_server)\n s.sendmail(self.from_email, [self.to_email], msg_text.as_string())\n s.quit()", "def _send_mail(self, subject, content, email):\n msg = MIMEText(content, 'plain', 'utf-8')\n\n msg['Subject'] = '%s' % (subject)\n msg['From'] = email\n msg['To'] = self._get_config_value('email', 'toemail')\n\n try:\n smtp_conn = SMTP('localhost')\n smtp_conn.sendmail(msg['From'], [msg['To']], msg.as_string())\n smtp_conn.quit()\n except (socket.error, SMTPException), e:\n self._logger.error('Mail could not be sent (%s)' % e)\n return False\n return True", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.username])", "def send_email(email: str, subject: str, message_body: str) -> bool:\n message = Mail(\n from_email=\"[email protected]\",\n to_emails=email,\n subject=subject,\n plain_text_content=message_body,\n )\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_API_KEY\"))\n sg.send(message)\n return True" ]
[ "0.8239757", "0.80890095", "0.79037267", "0.78920776", "0.7768939", "0.7730623", "0.7727504", "0.76961035", "0.76466435", "0.7635667", "0.7613354", "0.7608988", "0.7607686", "0.7554999", "0.7533295", "0.7488125", "0.74320126", "0.74103194", "0.7409468", "0.7407818", "0.7407659", "0.7379054", "0.73781234", "0.7377925", "0.7371525", "0.73646206", "0.7337151", "0.733522", "0.73204684", "0.7319344", "0.7315561", "0.7312886", "0.72968775", "0.7276211", "0.7274837", "0.72666043", "0.72538304", "0.72514015", "0.72259086", "0.72171575", "0.7209841", "0.72038347", "0.7203337", "0.7203337", "0.7203337", "0.7203337", "0.7203337", "0.7203337", "0.7203337", "0.7203337", "0.7203047", "0.7203047", "0.7188015", "0.71835506", "0.7174273", "0.7171283", "0.71695065", "0.7169298", "0.7150135", "0.7144315", "0.71131754", "0.7108381", "0.70906496", "0.7089287", "0.7087746", "0.7068474", "0.7068474", "0.7068474", "0.7068474", "0.7068474", "0.7068474", "0.7068474", "0.7068474", "0.7068474", "0.7068474", "0.7068474", "0.7068474", "0.7068474", "0.7068474", "0.7068474", "0.7068474", "0.70606685", "0.70528996", "0.7043591", "0.7036554", "0.70276695", "0.701519", "0.7012193", "0.70107424", "0.7001337", "0.70003504", "0.69914514", "0.6991162", "0.69862646", "0.6980836", "0.69771427", "0.6975864", "0.69592273", "0.6957208", "0.6956571", "0.69260556" ]
0.0
-1
Shows basic usage of the Gmail API. Lists the user's Gmail labels.
def build_service(): creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( f"{EMAIL_ACCOUNT_FILE}", SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) service = build('gmail', 'v1', credentials=creds) return service
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_labels():\n\n logging.info(\"Getting metadata about labels\")\n\n labels = []\n\n if len(args.labels) == 0:\n logging.warning(\"No labels specified, assuming all labels. If you have a lot of labels in your inbox you could hit API limits quickly.\")\n results = GMAIL_CLIENT.users().labels().list(userId='me').execute()\n\n labels = results.get('labels', [])\n else:\n logging.info('Using labels: %s ', args.labels)\n\n for label in args.labels:\n labels.append({'id': label})\n\n if not labels:\n logging.info('No labels found.')\n sys.exit()\n\n return labels", "def gmail(screen):\n\n\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n results = service.users().labels().list(userId='me').execute()\n labels = results.get('labels', [])\n\n\n\n if not labels:\n print('No labels found.')\n else:\n if PRINT_CATEGORY: print('Labels:')\n for label in labels:\n if PRINT_CATEGORY: print(label['name'])\n if label['name']=='UNREAD':\n listMessages = ListMessagesWithLabels(service, 'me', label['name'])\n nbMessages = len(listMessages)\n nbMess = 0\n\n printTerminal('ENZO! Tu as ['+str(nbMessages)+'] messages non lus.',True)\n say('ENZO! Tu as: '+str(nbMessages)+' messages non lus.')\n\n for message in listMessages:\n #print(GetMessage(service, 'me', message['id'], False))\n nbMess+=1\n ggMessage = GetMessage(service, 'me', message['id'], False)\n #print(ggMessage)\n\n #msg_str = base64.urlsafe_b64decode(ggMessage['raw'].encode('ASCII'))\n #print(msg_str)\n\n for header in ggMessage['payload']['headers']:\n #print(header)\n if header['name']=='Subject':\n #unicode(text,'utf-8')\n #screen.addstr(0,1,\"\")\n if screen:\n screen.addstr(str(nbMess)+'] '+header['value'])\n say(header['value'])\n screen.refresh()\n else:\n print(str(nbMess)+'] '+header['value'])\n say(header['value'])\n #TTS(header['value'],'french', 50 ,2 )\n #status=subprocess.call([\"espeak\",\"-s 100 -v fr \",header['value']], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n #for part in ggMessage['payload']['parts']:\n # msg = base64.urlsafe_b64decode(part['body']['data'].encode('ASCII'))\n # print(removehtml(msg))\n #print(part['body']['data'])\n #say(part['body']['data'])\n if len(sys.argv) > 1:\n if sys.argv[1]=='-t':\n TTS(ggMessage,'french', 50 ,2 )\n #for toto in label:\n # print(toto)", "def main():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('gmail', 'v1', credentials=creds)\n\n # Call the Gmail API\n results = service.users().labels().list(userId='me').execute()\n labels = results.get('labels', [])\n\n if not labels:\n print('No labels found.')\n else:\n print('Labels:')\n for label in labels:\n print(label['name'])\n path = \"./ham\"\n try:\n os.mkdir(path)\n except OSError:\n print (\"Creation of the directory %s failed\" % path)\n else:\n print (\"Successfully created the directory %s \" % path)\n\n messages = []\n messages = ListMessagesMatchingQuery(service, 'me', 'in:inbox')\n idx = 0\n for message in messages:\n GetMimeMessage(service, 'me', message['id'], idx)\n idx+=1", "def main():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server()\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n\n service = build('gmail', 'v1', credentials=creds)\n\n labels = ListLabels(service, 'me')\n\n messages = ListMessagesWithLabels(service, 'me', label_ids=[\"CATEGORY_FORUMS\"])", "def getLabels(self) -> List[str]:\n\n results = self.service.users().labels().list(userId='me').execute()\n labels = results.get('labels', [])\n\n return labels", "def ListLabels(service, user_id):\n try:\n response = service.users().labels().list(userId=user_id).execute()\n labels = response['labels']\n for label in labels:\n print ('Label id: %s - Label name: %s' % (label['id'], label['name']))\n return labels\n except errors.HttpError as error:\n print ('An error occurred: %s' % error)", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n user_id = 'me'\n\n ## get_labels ##\n #print_all_labels(service,user_id)\n #fetch_and_store(service,user_id)\n #apply_rules()", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n results = service.users().labels().list(userId='me').execute()\n labels = results.get('labels', [])\n\n FPS = 30\n WINDOWWIDTH = 480\n WINDOWHEIGHT = 320\n\n WHITE = (255, 255, 255)\n RED = (255, 0, 0)\n BLUE = (0, 0, 255)\n GREEN = (0, 255, 0)\n BLACK = (0, 0, 0)\n LIGHTGREEN = (53, 230, 97)\n LIGHTBLUE = (53, 156, 230)\n LIGHTORANGE = (242, 109, 19)\n\n windowBgColor = WHITE\n\n pygame.init()\n FPSCLOCK = pygame.time.Clock()\n DISPLAYSURFACE = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n pygame.display.set_caption('PLUTO')\n\n myfont = pygame.font.Font(None, 30)\n label = myfont.render(\"<-- Enter address\", 1, BLACK)\n\n buttonEnterEmail = pygbutton.PygButton((10, 10, 100, 80), 'Enter Email', bgcolor=LIGHTORANGE)\n buttonScan = pygbutton.PygButton((10, 100, 225, 210), 'Scan and Send', bgcolor=LIGHTGREEN, font=myfont)\n buttonReceive = pygbutton.PygButton((245, 100, 225, 210), 'Receive and Print', bgcolor=LIGHTBLUE, font=myfont)\n buttonPrintICR = pygbutton.PygButton((370, 10, 100, 80), 'Letterhead', bgcolor=RED)\n winBgButtons = (buttonEnterEmail, buttonScan, buttonReceive, buttonPrintICR)\n\n allButtons = winBgButtons\n\n userinput = \"\"\n\n while True:\n for event in pygame.event.get(): # event handling loop\n \n if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):\n pygame.quit()\n sys.exit()\n\n if 'click' in buttonReceive.handleEvent(event): #User called for printing of file\n \n idenListCurr = ListMessagesMatchingQuery(service, 'me', 'in:inbox')\n idenListLast = None\n \n #if idenListLast != idenListCurr: #to be used in future with inbox functionality\n\n #msgList = [] #should be a way to do this where only add the new messages\n \n #for each in idenListCurr:\n #iden = each[u'id']\n #mimeMsg = GetMimeMessage(service, 'me', iden)\n \n #msgList.append(mimeMsg)\n\n #idenListLast = idenListCurr\n \n\n #displayInterface(msgList)\n htmlMsg = GetRawMessageHtml(service, 'me', idenListCurr[0][u'id'])\n writeFile(htmlMsg, \"temp\", \"html\")\n \n try:\n pdfkit.from_file(\"/home/pi/git/PlutoTest/temp.html\", \"temp.pdf\") #change to your directory\n except IOError:\n pass\n \n popup = Popup(DISPLAYSURFACE)\n tempInput = popup.run(\"Your message will print\")\n printCups.executePrint(\"/home/pi/git/PlutoTest/temp.pdf\") #change to your directory\n os.remove(\"/home/pi/git/PlutoTest/temp.pdf\") #change to your directory\n os.remove(\"/home/pi/git/PlutoTest/temp.html\") #change to your directory\n time.sleep(5)\n \n if 'click' in buttonScan.handleEvent(event):#user called for scanning and sending of file\n scan.executeScan(\"temp\")\n \n message = CreateMessageWithAttachment(\"[email protected]\", userinput, \"Hello from Pluto!\", \"Enjoy!\",\n \"/home/pi/git/PlutoTest/\", \"temp.png\") #change to your email, directory\n SendMessage(service, 'me', message)\n \n \n os.remove(\"/home/pi/git/PlutoTest/temp.png\") #change to your directory\n popup = Popup(DISPLAYSURFACE)\n tempInput = popup.run(\"Your message has been sent\")\n print(\"sent\")\n time.sleep(5)\n \n if 'click' in buttonEnterEmail.handleEvent(event): #user called to enter e-mail address\n vkeybd = VirtualKeyboard(DISPLAYSURFACE)\n tempInput = vkeybd.run(\"...\")\n if tempInput != \"...\":\n userinput = tempInput\n label = myfont.render(\"To: \" + userinput, 1, BLACK)\n \n if 'click' in buttonPrintICR.handleEvent(event): #user called to print letterhead\n popup = Popup(DISPLAYSURFACE)\n tempInput = popup.run(\"The letterhead will print\")\n printCups.executePrint(\"/home/pi/git/PlutoTest/DemoPaper.png\") #change to your directory\n time.sleep(5)\n\n\n DISPLAYSURFACE.fill(windowBgColor)\n\n for b in allButtons:\n b.draw(DISPLAYSURFACE)\n\n # draw the text onto the surface\n DISPLAYSURFACE.blit(label, (120, 35, 350, 80))\n\n pygame.display.update()\n FPSCLOCK.tick(FPS)", "def main():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('gmail', 'v1', credentials=creds)\n\n user_id = 'me'\n label_id_one = 'INBOX'\n label_id_two = 'UNREAD'\n\n # Call the Gmail API\n results = service.users().labels().list(userId='me').execute()\n labels = results.get('labels', [])\n unread_msgs = service.users().messages().list(userId='me',labelIds=[label_id_one, label_id_two]).execute()\n mssg_list = unread_msgs['messages']\n print (\"Total unread messages in inbox: \", str(len(mssg_list)))\n final_list = [ ]\n\n for mssg in mssg_list:\n temp_dict = { }\n m_id = mssg['id'] # get id of individual message\n message = service.users().messages().get(userId=user_id, id=m_id).execute() # fetch the message using API\n payld = message['payload'] # get payload of the message \n headr = payld['headers'] # get header of the payload\n\n\n for one in headr: # getting the Subject\n if one['name'] == 'Subject':\n msg_subject = one['value']\n temp_dict['Subject'] = msg_subject\n else:\n pass\n\n\n for two in headr: # getting the date\n if two['name'] == 'Date':\n msg_date = two['value']\n date_parse = (parser.parse(msg_date))\n m_date = (date_parse.date())\n temp_dict['Date'] = str(m_date)\n else:\n pass\n\n for three in headr: # getting the Sender\n if three['name'] == 'From':\n msg_from = three['value']\n temp_dict['Sender'] = msg_from\n else:\n pass\n\n temp_dict['Snippet'] = message['snippet'] # fetching message snippet\n\n\n try:\n \n # Fetching message body\n mssg_parts = payld['parts'] # fetching the message parts\n part_one = mssg_parts[0] # fetching first element of the part \n part_body = part_one['body'] # fetching body of the message\n part_data = part_body['data'] # fetching data from the body\n clean_one = part_data.replace(\"-\",\"+\") # decoding from Base64 to UTF-8\n clean_one = clean_one.replace(\"_\",\"/\") # decoding from Base64 to UTF-8\n clean_two = base64.b64decode (bytes(clean_one, 'UTF-8')) # decoding from Base64 to UTF-8\n soup = BeautifulSoup(clean_two , \"lxml\" )\n mssg_body = soup.body()\n # mssg_body is a readible form of message body\n # depending on the end user's requirements, it can be further cleaned \n # using regex, beautiful soup, or any other method\n temp_dict['Message_body'] = mssg_body\n\n except :\n pass\n\n print (temp_dict)\n final_list.append(temp_dict) # This will create a dictonary item in the final list\n return final_list[:3]\n # This will mark the messagea as read\n #service.users().messages().list(userId=user_id, id=m_id,body={ 'removeLabelIds': ['UNREAD']}).execute() \n\n\n if not labels:\n print('No labels found.')\n else:\n print('Labels:')\n for label in labels:\n print(label['name'])", "def ListMessagesWithLabels(service, user_id, label_ids=[]):\n try:\n response = service.users().messages().list(userId=user_id,\n labelIds=label_ids).execute()\n messages = []\n if 'messages' in response:\n messages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId=user_id,\n labelIds=label_ids,\n pageToken=page_token).execute()\n messages.extend(response['messages'])\n\n return messages\n except errors.HttpError:\n print(\"An error occurred\")", "def ListMessagesWithLabels(service, user_id, label_ids=[]):\n try:\n response = service.users().messages().list(userId=user_id,\n labelIds=label_ids).execute()\n messages = []\n if 'messages' in response:\n messages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId=user_id,\n labelIds=label_ids,\n pageToken=page_token).execute()\n #print (response)\n messages.extend(response['messages'])\n for m in messages:\n print(m)\n DeleteMessage(service, 'me', m['id'])\n\n except errors.HttpError as error:\n print ('An error occurred: %s' % error)", "def get(self, request, format=None):\n user_social_auth = UserSocialAuth.objects.get(user=self.request.user)\n credentials = AccessTokenCredentials(user_social_auth.extra_data['access_token'],\n 'my-user-agent/1.0')\n http = httplib2.Http()\n http = credentials.authorize(http)\n service = discovery.build('gmail', 'v1', credentials=credentials)\n results = service.users().messages().list(userId='me').execute()\n messages = []\n for result in results['messages'][:100]:\n \n msg = service.users().messages().get(userId='me', id=result['id']).execute()\n subject = ''\n _from = ''\n for header in msg['payload']['headers']:\n if header['name'] == 'Subject':\n subject = header['value']\n elif header['name'] == 'From':\n _from = header['value']\n messages.append({'subject': subject, 'from': _from})\n \n return Response(messages)", "def ListMessagesWithLabels(service, user_id, label_ids=[]):\r\n try:\r\n response = service.users().messages().list(userId=user_id,\r\n labelIds=label_ids).execute()\r\n messages = []\r\n if \"messages\" in response:\r\n messages.extend(response[\"messages\"])\r\n\r\n while 'nextPageToken' in response:\r\n page_token = response[\"nextPageToken\"]\r\n response = service.users().messages().list(userId=user_id,\r\n labelIds=label_ids,\r\n pageToken=page_token).execute()\r\n messages.extend(response[\"messages\"])\r\n return messages\r\n except errors.HttpError as error:\r\n print(\"An error occurred: %s\" % error)", "def main():\r\n credentials = get_credentials()\r\n http = credentials.authorize(httplib2.Http())\r\n service = discovery.build('gmail', 'v1', http=http)\r\n\r\n response = service.users().messages().list(userId=USER_ID, labelIds=[\"SPAM\"]).execute()\r\n messages = []\r\n if 'messages' in response:\r\n messages.extend(response['messages'])\r\n\r\n while 'nextPageToken' in response:\r\n page_token = response['nextPageToken']\r\n response = service.users().messages().list(userId=USER_ID, labelIds=[\"SPAM\"], pageToken=page_token).execute()\r\n messages.extend(response['messages'])\r\n\r\n i = 0\r\n for message in messages:\r\n msg_id = message[\"id\"]\r\n message = service.users().messages().get(userId=USER_ID, id=msg_id).execute()\r\n for prop in message[\"payload\"][\"headers\"]:\r\n if prop[\"name\"] == \"From\":\r\n print(\"ID:\", i, \"\\tFrom:\", prop[\"value\"].encode('ascii','replace'), end=\"\\t\")\r\n elif prop[\"name\"] == \"Subject\":\r\n print(\"Subject:\", prop[\"value\"].encode('ascii','replace'))\r\n i += 1\r\n\r\n to_keep = raw_input(\"Do you want to keep any emails? [N / 0,1,...] \")\r\n if \",\" in to_keep:\r\n to_keep = to_keep.split(\",\")\r\n for i in range(len(to_keep)):\r\n to_keep[i] = int(to_keep[i])\r\n elif to_keep != \"N\":\r\n to_keep = [int(to_keep)]\r\n\r\n if isinstance(to_keep, list):\r\n for i in range(len(to_keep)-1,-1,-1):\r\n msg_labels = {'removeLabelIds': [\"SPAM\"], 'addLabelIds': [\"INBOX\"]}\r\n msg_id = messages[to_keep[i]][\"id\"]\r\n message = service.users().messages().modify(userId=USER_ID, id=msg_id, body=msg_labels).execute()\r\n del messages[to_keep[i]]\r\n\r\n # ANe1BmiDP-rAoJSwkw8T119UU0Z7oisOlVJ4xQ\r\n # filter0 = service.users().settings().filters().get(userId=USER_ID, id=\"ANe1BmiDP-rAoJSwkw8T119UU0Z7oisOlVJ4xQ\").execute()\r\n # print(filter0)\r\n\r\n for message in messages:\r\n msg_id = message[\"id\"]\r\n # for prop in message[\"payload\"][\"headers\"]:\r\n # if prop[\"name\"] == \"From\":\r\n # start_email = prop[\"value\"].find(\"<\")\r\n # end_email = prop[\"value\"].find(\">\", start_email + 1)\r\n # email_address = prop[\"value\"][start_email + 1:end_email]\r\n # filter0[\"criteria\"][\"from\"] = filter0[\"criteria\"][\"from\"] + \" OR \" + email_address\r\n service.users().messages().delete(userId=USER_ID, id=msg_id).execute()\r\n\r\n # service.users().settings().filters().delete(userId=USER_ID, id=\"ANe1BmiDP-rAoJSwkw8T119UU0Z7oisOlVJ4xQ\").execute()\r\n # service.users().settings().filters().create(userId=USER_ID, body=filter0).execute()\r\n print(\"All Spam Deleted!\")", "def ListMessagesWithLabels(service, user_id, label_ids=[]):\n try:\n response = service.users().messages().list(userId=user_id, labelIds=label_ids).execute()\n messages = []\n if 'messages' in response:\n messages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId=user_id, labelIds=label_ids, pageToken=page_token).execute()\n messages.extend(response['messages'])\n return messages\n except errors.HttpError, error:\n print('An error occurred: %s' % error)", "def ListMessagesWithLabels(service, user_id, label_ids=[]):\n try:\n response = service.users().messages().list(userId=user_id,\n labelIds=label_ids).execute()\n messages = []\n if 'messages' in response:\n messages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId=user_id,\n labelIds=label_ids,\n pageToken=page_token).execute()\n messages.extend(response['messages'])\n\n return messages\n except errors.HttpError as error:\n print('An error occurred: %s' % error)", "def main():\n token = 'C:/Users/asif.rouf/PycharmProjects/pythonProject/AX_Admin_portal/Test/utils/google-api-token.json'\n credential = 'C:/Users/asif.rouf/PycharmProjects/pythonProject/AX_Admin_portal/Test/utils/google-api-credentials.json'\n creds = None\n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists(token):\n creds = Credentials.from_authorized_user_file(token, SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n credential, SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n # with open('token.json', 'w') as token:\n # token.write(creds.to_json())\n\n service = build('gmail', 'v1', credentials=creds)\n\n # # Call the Gmail API\n # results = service.users().labels().list(userId='me').execute()\n # labels = results.get('labels', [])\n #\n # if not labels:\n # print('No labels found.')\n # else:\n # print('Labels:')\n # for label in labels:\n # print(label['name'])\n\n # Call the Gmail API to fetch INBOX\n results = service.users().messages().list(userId='me', labelIds=['INBOX']).execute()\n messages = results.get('messages', [])\n # message1 = messages[0]\n # print(message1)\n message1 = {'id': '17a5ca5f5f4bd0aa', 'threadId': '17a5b1bb861b3bc2'}\n message1 = {'id': '17a5cbc54c546465', 'threadId': '17a5b1bb861b3bc2'}\n\n # message1 = {'id': '17a5b852afe04a52', 'threadId': '17a50c997c059e68'}\n print(messages)\n print(message1)\n\n if not messages:\n print(\"No messages found.\")\n else:\n print(\"Message snippets:\")\n # for message in messages:\n # msg = service.users().messages().get(userId='me', id=message['id']).execute()\n # print(messages)\n # print(msg['snippet'])\n\n # msg = service.users().messages().get(userId='me', id=message1['id']).execute()\n # print(msg['snippet'])\n ###############################\n msg = service.users().messages().get(userId='me', id=message1['id'], format='raw').execute()\n msg_str = base64.urlsafe_b64decode(msg['raw'].encode('ASCII'))\n mime_msg = email.message_from_bytes(msg_str)\n print(msg['snippet'])\n print(mime_msg)\n print(mime_msg['Date'])\n print(mime_msg['From'])\n print(mime_msg['To'])\n print(mime_msg['Subject'])\n #\n # print(datetime.utcnow())\n\n ######################################################\n # msg = service.users().messages().get(userId='me', id=message1['id'], format='full').execute()\n # # parts can be the message body, or attachments\n # payload = msg['payload']\n # headers = payload.get(\"headers\")\n # parts = payload.get(\"parts\")\n # # print(payload)\n # # print(parts)\n # # print(headers)\n # for header in headers:\n # print(header['name'])\n # print(header['value'])\n #\n ######################################################\n msg = service.users().messages().get(userId='me', id=message1['id']).execute()\n\n # Use try-except to avoid any Errors\n try:\n # Get value of 'payload' from dictionary 'txt'\n payload = msg['payload']\n headers = payload['headers']\n subject = ''\n sender = ''\n\n # Look for Subject and Sender Email in the headers\n for d in headers:\n if d['name'] == 'Subject':\n subject = d['value']\n if d['name'] == 'From':\n sender = d['value']\n # The Body of the message is in Encrypted format. So, we have to decode it.\n # Get the data and decode it with base 64 decoder.\n parts = payload.get('parts')[0]\n data = parts['body']['data']\n data = data.replace(\"-\", \"+\").replace(\"_\", \"/\")\n decoded_data = base64.b64decode(data)\n\n # Now, the data obtained is in lxml. So, we will parse\n # it with BeautifulSoup library\n soup = BeautifulSoup(decoded_data, \"lxml\")\n body = soup.body()\n\n # Printing the subject, sender's email and message\n print(\"Subject: \", subject)\n print(\"From: \", sender)\n print(\"Message: \", body)\n # for link in soup.find_all('a', href=True):\n # print(link['href'])\n link = soup.find('a', href=True)\n print(link['href'])\n except:\n pass", "def new_label(self, context, payload):\n\n labels = GmailActions.labels(context)['labels']\n label_id = \"\"\n\n for label in labels:\n if label['name'] == payload['name']:\n label_id = label['id']\n break\n\n access_token = util.get_access_token(context['headers'])\n url = util.get_url(context) + f\"labels/{label_id}\"\n response = util.rest(\"GET\", url, access_token)\n\n if response.status_code > 400:\n raise Exception(\"Error \", response.text)\n\n return json.loads(response.text)", "def list_labels(service, repo):\n app = App()\n if repo:\n serv = app.get_service(service, repo=repo)\n else:\n serv = app.guess_service()\n repo_labels = serv.list_labels()\n if not repo_labels:\n print(\"No labels.\")\n return\n print(tabulate([\n (\n label.name,\n label.color,\n label.description\n )\n for label in repo_labels\n ], tablefmt=\"fancy_grid\"))", "def search(self, query, labels=[]):\n qstring = query + ' ' + self.opts.query\n if labels:\n query += ' (' + ' OR '.join(['label:' + l for l in labels]) + ')'\n print(query)\n cmd = self.service.users().messages()\n try:\n results = cmd.list(userId='me', q=query,\n includeSpamTrash=True).execute()\n if 'messages' not in results:\n return []\n gids = [m['id'] for m in results['messages']]\n \n while 'nextPageToken' in results:\n page_token = results['nextPageToken']\n results = cmd.list(userId='me', q=query,\n pageToken=page_token,\n includeSpamTrash=True).execute()\n gids.extend([m['id'] for m in results['messages']])\n return gids\n except errors.HttpError as ex:\n print('An error occurred: %s' % ex)\n return []", "def label_list(request):\n\n labels = Label.objects.all().order_by('group__id', 'name')\n\n return render_to_response('annotations/label_list.html', {\n 'labels': labels,\n },\n context_instance=RequestContext(request)\n )", "def ListMessagesWithLabels(service, user_id, label_ids=[]):\n try:\n response = service.users().messages().list(userId=user_id,\n labelIds=label_ids).execute()\n messages = []\n if 'messages' in response:\n messages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId=user_id,\n labelIds=label_ids,\n pageToken=page_token).execute()\n messages.extend(response['messages'])\n\n return messages\n\n except errors.HttpError, error:\n print('An error occurred: %s' % error)", "def main():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('gmail', 'v1', credentials=creds)\n\n # Calls the Gmail API to get Emails\n threads = listMessages(service, 'me', 'Jay Patel,')\n\n if not threads:\n print('No TUalerts found.')\n else:\n getCrimeLocation(service, 'me', threads)\n\n # Prints the TUlalerts (Mostly for testing purposes)\n printAlerts()", "def get_labels(self):\n return get_labels(self.api_key)", "def labels(self) -> list[\"Label\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"labels\", _args)\n _ctx = Label(_ctx)._select_multiple(\n _name=\"name\",\n _value=\"value\",\n )\n return _ctx.execute_sync(list[Label])", "def get_label_list(\n self,\n project_id: int\n ) -> requests.models.Response:\n return self.get(\n 'v1/projects/{project_id}/labels'.format(\n project_id=project_id\n )\n )", "def labels_by_name(self, username, repository_name, access_token=None):\n return self._complete_request_by_name(\n username, repository_name, \"labels\", access_token)", "def GetAdGroupLabel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_labels():\n json_request = request.json # get the json from the server\n keys = sort_keys(json_request.keys()) # sort the keys (i.e. the token ids)\n labels = []\n for k in keys:\n # get the labels that the user input to the UI\n val = (json_request[k]['text'], json_request[k]['value'])\n labels.append(val)\n return labels", "def user_labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"user_labels\")", "def list_gpo(self, _):\n results = self.engine.query(self.engine.GPO_INFO_FILTER(), [\"cn\", \"displayName\"])\n for gpo in results:\n print(\"{cn}: {name}\".format(cn=gpo[\"cn\"], name=gpo[\"displayName\"]))", "def get_labels(self):\n resp = self._client.scan(TableName=self.LABELS_TABLE)\n return [self._item_to_label(item) for item in resp['Items']]", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n messageIds = []\n i = 0\n nextPageToken = None\n while (i <= 15):\n try:\n response = service.users().messages().list(userId='me', q='after:2016/09/01', maxResults=10000, pageToken=nextPageToken).execute()\n messages = response.get('messages')\n nextPageToken = response['nextPageToken']\n\n for m in messages:\n messageIds.append(m['id'])\n\n i+=1 \n except KeyError:\n break\n\n senders = []\n counter = 0\n for i in messageIds:\n data = service.users().messages().get(userId='me', id=i).execute()\n for d in data['payload']['headers']:\n if d['name'] == 'Received':\n print(d['value'][d['value'].find('; ')+1:d['value'].find('(PST)')])\n if d['name'] == 'From' and 'bounce' not in d['value']:\n senders.append(d['value'])\n print(counter, ' ', d['value'])\n counter += 1\n break\n\n emails = []\n with open('out.csv', 'wb') as f:\n writer = csv.writer(f, delimiter=',')\n for person in set(senders):\n cleaned = clean_data(person)\n name = cleaned[0]\n email = cleaned[1]\n if email not in emails:\n emails.append(email)\n if name != None and email != None:\n writer.writerow([name, email])", "def addmessageslabels(self, uidlist, labels):\n\n labels = labels - self.ignorelabels\n result = self._messagelabels_aux('+X-GM-LABELS', uidlist, labels)\n if result:\n for uid in uidlist:\n self.messagelist[uid]['labels'] = self.messagelist[uid]['labels'] | labels", "def build_user_labels_request(self):\n request = {\n \"systemLabels\": {\n \"name\": \"appName\",\n \"list_name\": [ \"a\",\"b\",\"c\"],\n \"boolean_value\": False\n },\n \"userLabels\": {\n \"key1\": \"value1\",\n \"key2\": \"value2\"\n }\n }\n return request", "def get_emails():\n\n # generate the gmail api service\n service = build_gmail_api_v1()\n\n # compute date for one year ago\n today = date.today()\n one_year_ago = today - timedelta(days=365.25)\n start = one_year_ago - timedelta(days=1)\n end = one_year_ago + timedelta(days=1)\n start_string = start.strftime(\"%Y/%m/%d\")\n end_string = end.strftime(\"%Y/%m/%d\")\n query_string = f'after:{start_string} before:{end_string}'\n\n # generate the gmail api request (get list of messages from one year ago)\n request = service.users().messages().list(userId='me', q=query_string)\n\n # try to get the api response\n try:\n response = request.execute()\n except HTTPError as e:\n print('Error response status code : {0}, reason : {1}'.format(\n e.resp.status, e.error_details))\n return []\n\n # get list of message ids from the api response\n messages = list(response[\"messages\"])\n ids = [message[\"id\"] for message in messages]\n\n # store all emails in a list\n data_to_display = []\n\n # loop through each message id\n for id in ids:\n\n try:\n # store email data in a dict\n email = {}\n\n # get message data by querying gmail api using message id\n request = service.users().messages().get(userId='me', id=id)\n response = request.execute()\n\n # get date, subject, from, to, etc from message header\n headers = list(response[\"payload\"][\"headers\"])\n looking_for = [\"Date\", \"Subject\", \"From\", \"To\"]\n for header in headers:\n if header[\"name\"] in looking_for:\n email[header[\"name\"]] = header[\"value\"]\n\n # try to get message body (base64) from response\n # the json structure varies a lot so that is why there are no many try/except\n try:\n base64_message = response[\"payload\"][\"parts\"][0][\"parts\"][0][\"body\"][\"data\"]\n except (KeyError, TypeError) as e:\n try:\n base64_message = response[\"payload\"][\"parts\"][1][\"body\"][\"data\"]\n except (KeyError, TypeError, IndexError) as e:\n try:\n base64_message = response[\"payload\"][\"parts\"][0][\"body\"][\"data\"]\n except (KeyError, TypeError, IndexError) as e:\n try:\n base64_message = response[\"payload\"][\"body\"][\"data\"]\n except (KeyError, TypeError, IndexError) as e:\n base64_message = \"Ti9B\"\n\n # decode the email body\n email[\"body\"] = base64.urlsafe_b64decode(\n base64_message).decode('utf-8')\n\n # populate list with email\n data_to_display.append(email)\n\n except HTTPError as e:\n print('Error response status code : {0}, reason : {1}'.format(\n e.resp.status, e.error_details))\n\n return data_to_display", "def get_all(self):\n data = {\n 'readByQuery': {\n 'object': 'EEACCOUNTLABEL',\n 'fields': '*',\n 'query': None,\n 'pagesize': '1000'\n }\n }\n\n return self.format_and_send_request(data)['data']['eeaccountlabel']", "def main():\n #Gmail2TelegramClient(\"1234\") -- a person\n #Gmail2TelegramClient(\"-1234\") -- group chat", "def retrieve_labels(user_id: int) -> dict:\n user_label_table = dict()\n cur.execute('''SELECT USER_ID, NAME, CONTENT FROM \"labels\"''')\n rows = cur.fetchall()\n for row in rows:\n if user_id == row[0]:\n user_label_table[row[1]] = row[2]\n return user_label_table", "def lookup_label_id(service, labels):\n if not labels:\n return\n\n labelids = {} # label name => label id\n results = service.users().labels().list(userId='me').execute()\n mylabs = results.get('labels', [])\n for lab in mylabs:\n if len(labelids) == len(labels):\n break\n if lab['name'] in labels:\n labelids[lab['name']] = lab['id']\n return labelids", "def labels(self, number=-1, etag=None):\n url = self._build_url(\"labels\", base_url=self._api)\n return self._iter(int(number), url, label.ShortLabel, etag=etag)", "def build_gmail_api_v1():\n\n credentials = build_credentials()\n return googleapiclient.discovery.build('gmail', 'v1', credentials=credentials)", "def main():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first time.\n scores = {} # scores is an empty dict already\n\n if os.path.getsize('token.pickle') > 0: \n with open('token.pickle', \"rb\") as f:\n unpickler = pickle.Unpickler(f)\n # if file is not empty scores will be equal\n # to the value unpickled\n scores = unpickler.load()\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n # Starts Gmail V1 with logged in user\n service = build('gmail', 'v1', credentials=creds)\n \n# ================================================================================== \n\n\n # MAIL CHECKHER ================================================================\n\n # get mails via gmail api\n results = service.users().messages().list(userId='me', labelIds=['INBOX']).execute()\n messages = results.get('messages', [])\n\n # mail number\n mail_nr = 0\n\n # variabel for how many mails we want to search through\n message_count = int(input(\"Hur många mails vill du söka igenom? \"))\n # if 0 mails are chosen\n if not messages:\n print('Inga mail i inkorgen')\n else:\n # looks through the email inbox for mails \"message_count\" amount of times\n for message in messages[:message_count]:\n # gets the email id's in full format so we can extraqct information via the gmail api\n msg = service.users().messages().get(userId='me', id=message['id'], format='full', metadataHeaders=None).execute()\n # gets the headers of the email in a variable\n headers = msg[\"payload\"][\"headers\"]\n # from headers gets the sender email, who it was from \n from_ = [i['value'] for i in headers if i[\"name\"]==\"From\"]\n # from headers gets the subject of the email\n subject = [i['value'] for i in headers if i[\"name\"]==\"Subject\"]\n # keeps count of the current email\n mail_nr += 1\n # if the email is from the security system email print it's information\n if from_ == ['Python Ormarna <[email protected]>'] or from_ == ['[email protected]']:\n # gets the email in raw format via gmail api\n rawmsg = service.users().messages().get(userId=\"me\", id=message[\"id\"], format=\"raw\", metadataHeaders=None).execute()\n print(\"=\"*100)\n print(\"\\nMail:\", mail_nr)\n print(\"Detta mail är från erat säkerhetssystem\")\n # variable the UNIX time of when the email was sent\n datum = int(msg['internalDate'])\n datum /= 1000\n # prints the date and time when the email was revived in local y/m/d/h/m/s\n print(\"Mottaget:\", datetime.fromtimestamp(datum).strftime('%Y-%m-%d %H:%M:%S'))\n print(\"Från:\", from_)\n print(\"Ämne:\", subject)\n # prints a snippet from the email\n print(msg['snippet'])\n print(\"\\n\")\n else:\n print(\"=\"*100)\n print(\"\\nMail:\", mail_nr)\n print(\"Detta mail är INTE från erat säkerhetssystem\\n\")\n time.sleep(1)\n print(\"Inga fler mail hittades\")", "def new_labeled_email(self, context, payload):\n\n access_token = util.get_access_token(context['headers'])\n url = util.get_url(context) + f\"messages/{payload['id']}\"\n response = util.rest(\"GET\", url, access_token)\n\n return GmailApi.get_email_data(json.loads(response.text))", "def test_get_form_label_email(self):\n self.assertEqual(\n self.user.get_form_label(email=True),\n 'testuser <[email protected]>',\n )", "def get_labels(self) -> List[str]:\n return self.labels", "def print_labels(self,labels):\n\t\tfor key in labels:\n\t\t\tprint key, ':\\t', labels[key]", "def get_project_labels(session=konfuzio_session()) -> List[dict]:\n url = get_project_url()\n r = retry_get(session, url)\n sorted_labels = sorted(r.json()['labels'], key=itemgetter('id'))\n return sorted_labels", "def GetLabels(args, client, instance_properties=False):\n labels_value = client.messages.Instance.LabelsValue\n if instance_properties:\n labels_value = client.messages.InstanceProperties.LabelsValue\n if args.labels:\n return labels_value(additionalProperties=[\n labels_value.AdditionalProperty(key=key, value=value)\n for key, value in sorted(six.iteritems(args.labels))\n ])\n return None", "def list_metering_labels(self, retrieve_all=True, **_params):\r\n return self.list('metering_labels', self.metering_labels_path,\r\n retrieve_all, **_params)", "def label_messages(self, org, messages, label):\n pass", "def list_labels(self, repository):\n data = self._get_all_data('/repos/{}/labels'.format(repository))\n return {l['name']: str(l['color']) for l in data}", "def get_label():\n inp = option_text('Input label name (leave blank for no label):')\n add_to_collected('label', inp)\n OPTIONS['label'] = inp\n return", "def labels_by_id(self, repository_id, access_token=None):\n return self._complete_request_by_id(\n repository_id, \"labels\", access_token)", "def get_messages_from_navbar():\n\tlabels = frappe.get_all(\"Navbar Item\", filters={\"item_label\": (\"is\", \"set\")}, pluck=\"item_label\")\n\treturn [(\"Navbar:\", label, \"Label of a Navbar Item\") for label in labels]", "def labels(self, label_type = 'basic'):\n\t\tif label_type == None:\n\t\t\treturn {}\n\t\telif label_type == 'basic':\n\t\t\treturn self.dependency_labels()\n\t\telif label_type == 'SAMT':\n\t\t\treturn self.SAMT_labels()\n\t\telif label_type == 'all':\n\t\t\treturn self.label_all()\n\t\telse:\n\t\t\traise ValueError(\"%s is no valid labeltype\" %label_type)", "def get_labels():\n return if_found(dao.get_labels())", "def get_labels(self, uuid=None):\n return self._get_query('labels', self._build_params(uuid=uuid), Label)", "def label_list(entry):\n printing_resident_sheets(entry, rf'{constants.OUTPUTS_DIR}\\label_sheet.xlsx')\n printing_documents.create_label_list()", "def user_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"user_labels\")", "def get_labels(self, uuids=None, name=None, pager=None):\n params = self._build_params(uuid=uuids, name=name)\n return Label.deserialize_list(self._get_multiple('labels', params, pager))", "def display_name_labels(self):\n for name in self.names:\n # create a label for each name\n self.root.add_widget(Label(text=name))", "def get_labels(pr_id):\n label_json = get_status_json(pr_id, 'labels')\n current_labels = [l['name'] for l in label_json]\n return current_labels", "def add_labels(self, labels: dict):\n self.status = \"Creating labels\"\n for lname, value in labels.items():\n self.labels.add_label(lname, value)", "def get(self):\n app_id = app_identity.get_application_id()\n users = User.query(User.email != None)\n for user in users:\n games = Game.query(Game.user == user.key, Game.game_over == False)\n if games:\n subject = \"This is a reminder!\"\n body = \"Hello {}, you have some unfinished games.\".format(\n user.name)\n\n mail.send_mail('noreply@{}.appspot.com'.format(app_id),\n user.email, subject, body)", "def labels(self):\r\n return labels.RepoLabels(self)", "def _add_user_label(self):\n user_label = tk.Label(parent, text='Пользователь: ' +\n self.user_info.ShortUserName + ' Версия ' + __version__,\n font=('Arial', 8))\n user_label.pack(side=tk.RIGHT, anchor=tk.NE)", "def pull_labels(self, org):\n pass", "def _get_labels_for_user(self, project, assignment, user):\n return Session.query(model.Label).filter(and_(model.Label.user_id==user.id,\n model.Label.project_id==project.id,\n model.Label.assignment_id==assignment.id)).all()", "def labels(self):\n return self._labels", "def user_labels_new(*args):\n return _ida_hexrays.user_labels_new(*args)", "def __get_emails(self):\n # This returns a list of Gmail message objects. Documentation can be found at\n # https://developers.google.com/gmail/api/v1/reference/users/messages/list\n return self.__service.users().messages().list(userId='me').execute()['messages']", "def labels(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"labels\")", "def get_labels():\n session = get_session()\n try:\n query = session.query(Album.label).distinct()\n final_query = query\n labels = query.all()\n label_list = []\n for label in labels:\n label_list.append(label[0])\n count = final_query.count()\n return jsonify({\n 'items': label_list,\n 'count': count\n })\n finally:\n session.close()", "def get(self, id):\n context = request.environ.get('context')\n obj = dbapi.netdevices_get_by_id(context, id)\n response = {\"labels\": list(obj.labels)}\n return response, 200, None", "def _messagelabels_aux(self, arg, uidlist, labels):\n labels = labels - self.ignorelabels\n uidlist = [uid for uid in uidlist if uid > 0]\n if len(uidlist) > 0:\n imapobj = self.imapserver.acquireconnection()\n try:\n labels_str = '(' + ' '.join([imaputil.quote(lb) for lb in labels]) + ')'\n # Coalesce uid's into ranges\n uid_str = imaputil.uid_sequence(uidlist)\n result = self._store_to_imap(imapobj, uid_str, arg, labels_str)\n\n except imapobj.readonly:\n self.ui.labelstoreadonly(self, uidlist, labels)\n return None\n\n finally:\n self.imapserver.releaseconnection(imapobj)\n\n if result:\n retlabels = imaputil.flags2hash(imaputil.imapsplit(result)[1])['X-GM-LABELS']\n retlabels = set([imaputil.dequote(lb) for lb in imaputil.imapsplit(retlabels)])\n return retlabels\n return None", "async def test__get_labels():\n # Uppercase 'C'\n assert ImageConfig._get_labels(json.loads('{\"Config\":{\"Labels\":{\"x\":\"5\"}}}')) == {\n \"x\": \"5\"\n }\n\n # Lowercase 'C'\n assert ImageConfig._get_labels(json.loads('{\"config\":{\"Labels\":{\"x\":\"5\"}}}')) == {\n \"x\": \"5\"\n }\n\n # Missing 'Labels'\n assert ImageConfig._get_labels(json.loads('{\"Config\":{}}')) == {}", "def list_groups(self):\n\n for counter, label in enumerate(self.exp_labels_list):\n print('Key {}: {} \\n'.format(str(counter), label))", "def get_labels() -> list[Label]:\n\n labels_file = deepcopy(get_data(\"labels.yml\"))\n standard_labels = []\n for group_info in labels_file[\"groups\"]:\n labels = group_info.pop(\"labels\", [])\n group = LabelGroup(**group_info)\n for label_info in labels:\n label = Label(**label_info, group=group)\n standard_labels.append(label)\n for label_info in labels_file[\"standalone\"]:\n label = Label(**label_info)\n standard_labels.append(label)\n return standard_labels", "def get_all_labels(self):\n labels = self.wls_board.get_labels\n return labels", "def get_labels(self) -> List[str]:\n raise NotImplementedError()", "def build_expected_user_labels_response(self):\n labels = [\n {\n \"key\": \"key1\",\n \"value\": \"value1\"\n },\n {\n \"key\": \"key2\",\n \"value\": \"value2\"\n }\n ]\n return labels", "def user_labels_next(*args):\n return _ida_hexrays.user_labels_next(*args)", "def labels(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"labels\")", "def get_labels(self):\n\t\traise NotImplementedError()", "def labels(self) -> pulumi.Output[Optional[Mapping[str, str]]]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> pulumi.Output[Optional[Mapping[str, str]]]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> pulumi.Output[Optional[Mapping[str, str]]]:\n return pulumi.get(self, \"labels\")", "def getGroup():\n\tprint\n\tprint \"Requesting the list of groups for this account\"\n\n\tgroups_result = getResult('/papi/v0/groups')\n\n\treturn (groups_result)", "def greet_users(names):\n for name in names:\n msg = \"Hello, \" + name.title() + \"!\"\n print(msg)", "def greet_users(names):\n for name in names:\n msg = \"Hello, \" + name.title() + \"!\"\n print(msg)", "def get_users(msg: telebot.types.Message):\n users = User.select()\n m = ''\n for user in users:\n menu_caption = \"In PVP game\" if user.state == states.USER_IN_PVP_GAME else \"In AI game\" if user.state == states.USER_IN_AI_GAME else \"In menu\"\n m += f'[{user.first_name}](tg://user?id={user.user_id}) - {menu_caption}\\n'\n\n bot.send_message(\n msg.from_user.id,\n m,\n parse_mode='Markdown'\n )", "def do_list(client, args):\n\trepos = client.repos.list(args.user)\n\tprint '%s has the following repositories:' % args.user\n\tprint 'Name - Description'\n\tfor repo in repos:\n\t\tprint '%s - %s' % (repo.name, repo.description)", "def get_labels(self):\n return self.labels", "def get_label_list():\n f_name = os.path.join(FLAGS.labels_dir, FLAGS.labels_name)\n if os.path.exists(f_name):\n with open(f_name, 'rb') as f:\n try:\n label_list = [line.rstrip('\\n') for line in f]\n except:\n print(\"Could not read file:\" + f_name)\n sys.exit()\n return label_list", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return user_name_for( self.name )", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def ListAnnotations(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.69438267", "0.67967373", "0.67262995", "0.64360845", "0.63330704", "0.62724566", "0.6237401", "0.5750789", "0.5673543", "0.5591335", "0.55725247", "0.5570501", "0.55700606", "0.5568798", "0.5558453", "0.55524904", "0.550932", "0.549548", "0.5492455", "0.5464197", "0.5463122", "0.5420545", "0.5341041", "0.52676064", "0.5200485", "0.51316243", "0.5107093", "0.50787425", "0.50407284", "0.50254977", "0.50041085", "0.49922433", "0.4991385", "0.49253178", "0.49183184", "0.490567", "0.48841432", "0.48712835", "0.48693612", "0.48648402", "0.48414052", "0.48378563", "0.48214775", "0.47722575", "0.47654605", "0.47642642", "0.47605622", "0.4754152", "0.4749384", "0.4711193", "0.47110534", "0.47039685", "0.46741915", "0.46413237", "0.4639425", "0.46300715", "0.4627345", "0.46050972", "0.45976943", "0.45974928", "0.4572201", "0.4571045", "0.45691425", "0.45686793", "0.45579994", "0.4556998", "0.45491806", "0.45491183", "0.45362765", "0.45296088", "0.4525228", "0.45229295", "0.45086512", "0.45086512", "0.44879213", "0.44854105", "0.4483515", "0.4482555", "0.4472183", "0.4472051", "0.44700742", "0.44667396", "0.44641134", "0.44639027", "0.44597068", "0.44554645", "0.44546765", "0.44546765", "0.44546765", "0.44518867", "0.44507888", "0.44507888", "0.44472745", "0.44424102", "0.44413784", "0.44405758", "0.44337413", "0.44319054", "0.44319054", "0.44319054", "0.44279084" ]
0.0
-1
Input of the Network
def __init__(self, shape, input_var=None): self.output = layers.InputLayer(shape, input_var=input_var)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def input(self):", "def input(self):\r\n pass", "def d_input(self):\n pass", "def inputs(self):\n pass", "def input(self):\n return self[\"input\"]", "def input(self):\n return self[\"input\"]", "def input_nodes(self):\n pass", "def processInputs(self):", "def input(self):\n connected_node = self.get_connected_node()\n if connected_node:\n #it is not possible to connect to an input\n return connected_node.output()\n return None", "def get_input(self):\n pass", "def read_input():\n input()\n size = int(input().split()[-1])\n nb_edges = int(input().split()[-1])\n\n g = UndirectedGraph()\n\n if parameters.DEBUG:\n print('Build nodes')\n\n nodes = [g.add_node() for _ in range(size)]\n\n if parameters.DEBUG:\n print('Build edges')\n edges = []\n weights = {}\n i = 0\n for i in range(nb_edges):\n if parameters.DEBUG:\n i += 1\n if i % 1000 == 0:\n print('Edge %d / %d' % (i, nb_edges))\n line = input()\n _, u, v, w = line.split()\n\n e = g.add_edge(nodes[int(u) - 1], nodes[int(v) - 1])\n weights[e] = int(w)\n\n edges.append((int(u), int(v), int(w)))\n\n line = input()\n while 'Terminals' not in line:\n line = input()\n if 'SECTION' in line:\n line = input()\n while 'Terminals' not in line:\n line = input()\n nb_terms = int(line.split()[-1])\n terms = []\n for i in range(nb_terms):\n line = input()\n _, t = line.split()\n terms.append(nodes[int(t) - 1])\n\n return instances.SteinerInstance(g, terms, weights)", "def _get_input(self):\n return self.__input", "def _get_input(self):\n return self.__input", "def _get_input(self):\n return self.__input", "def _get_input(self):\n return self.__input", "def _get_input(self):\n return self.__input", "def _get_input(self):\n return self.__input", "def _get_input(self):\n return self.__input", "def n_inputs(self):", "def input(self):\n return self._input", "def input(self):\n return self._input", "def input(self):\n return self._input", "def process_inputs(self, inputs):", "def input(self, name: str) -> bpy.types.NodeSocket:\n\t\treturn self.inputs[name]", "def input(self):\r\n\r\n if len(self.inputs) == 1:\r\n return self.inputs[0]\r\n else:\r\n raise Exception(\"Single input requested. Node has none or more than one input (%d).\"\r\n % len(self.inputs))", "def _input(self):\n\n return self.input_processor.readline().strip()", "def network(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network\")", "def get_inputs(self):\r\n raise NotImplementedError", "def get_inputs(self):\r\n raise NotImplementedError", "def read_input(E):\n # ---------- INSERT CODE BELOW ----------\n edge_list = []\n\n for _ in range(E):\n src, dst, cost = input('').rstrip('\\r\\n').split()\n edge_list.append((int(src),int(dst),int(cost)))\n \n return edge_list\n # ---------- INSERT CODE ABOVE ----------", "def input_handling():\r\n # get program parameters from user\r\n k_in, n_in, Random = io.get_values()\r\n\r\n # Generation of n data points (2d or 3d)\r\n n, K, d, data, labels = io.sk_generator(n_in, k_in, Random)\r\n # Print description of Inputs and Random choices\r\n io.print_description(k_in, n_in, K, n, Random, d)\r\n\r\n return n, K, d, data, labels, Random", "def read_input(self) -> None:\n raw_input = sys.stdin.read()\n\n self.input = raw_input.split('\\n')\n self.input = self.input[0:-1]\n\n self.packets = []\n for item in self.input:\n if item:\n self.packets.append(literal_eval(item))", "def QNetwork(input_var):\n n_actions = 2\n\n from lasagne.layers import batch_norm\n from lasagne.layers import DenseLayer\n from lasagne.layers import InputLayer\n from lasagne.nonlinearities import rectify, linear, sigmoid, softmax, tanh\n from lasagne.init import GlorotNormal\n network = InputLayer(shape=(None,4), input_var=input_var, name='Input')\n network = (DenseLayer(incoming=network,\n num_units=24,\n nonlinearity=rectify,\n W=GlorotNormal())\n )\n network = (DenseLayer(incoming=network,\n num_units=24,\n nonlinearity=rectify,\n W=GlorotNormal())\n\n# W=lasagne.init.HeUniform())\n )\n network = DenseLayer(incoming=network,\n num_units=n_actions,\n W=GlorotNormal(),\n b=lasagne.init.Constant(0),\n nonlinearity=linear)\n network = lasagne.layers.ReshapeLayer(network, (-1, n_actions))\n return network", "def get_input(inputs):\n return input(inputs)", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def inputs(self):\n return NotImplementedError", "def net_input(self,X):\n return np.dot(X,self.w_[1:])+self.w_[0]", "def inputs(self) -> 'Input':\n return self.Input", "def parse(self, input):\n pass", "def get_input(self):\n if self.console_label is None:\n self.text_area.delete('1.0', END)\n print(\"The training has finished and the training file was created and sent to the server! Go Back.\")\n return\n\n valid_responses = {'y', 'n', 'u', 'f'}\n\n user_input = self.user_input.get()\n\n self.user_input.delete(0, END)\n\n if user_input not in valid_responses:\n return\n\n self.console_label.label_record_pair(user_input, self.current_record_pair)\n\n if user_input == 'f':\n self.upload_training_file()\n self.current_record_pair = None\n self.console_label = None\n self.text_area.delete('1.0', END)\n return\n\n self.text_area.yview(END)\n\n self.current_record_pair = self.console_label.get_uncertain_pair()", "def stdin(self):\n pass", "def __init__(self):\n self.inputs = {}", "def out(self, inputs):", "def trainNet():", "def input(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"input\")", "def process_input(self,r,g,b):\n pass", "def get_input_id(self):\n return self.net.inputs[0]", "def get_input(cls, message):\n data = input(message)\n return data", "def input(self, input):\n\n self._input = input", "def set_input(self, input):\n pass", "def set_input(self, input):\n pass", "def _get_inputs(self):\n return self.__inputs", "def _get_inputs(self):\n return self.__inputs", "def _get_inputs(self):\n return self.__inputs", "def inp(text):\r\n input(text)", "def get_input_data(self, name='0'):\n return data", "def requestInput(st):\n return input(st+\": \")", "def getInput():\t\n\tglobal active_\n\n\t#to disable the service \n\tactive_ = False \n\t\n\t# reading the previous input\n\tprev_input_ = rospy.get_param('/input')\n\tinput_ = prev_input_\n\t\n\t#in order to make the user to choose one of the 5 possible inputs\n\twhile (prev_input_ == input_) or (input_ > 5 or input_ < 1):\n\t\tif input_ > 5 or input_ < 1: \n\t\t\t#in the case in which the user make another selection\n\t\t\tprint \"Unknown input, please try again\" \n\t\t\n\t\t#propose to the user which are the real possibilities\n\t\tprint(\"Please select one of the following senteces\\n\")\n\t\tprint(\"1 - Move the robot randomly in the environment, by choosing one of six possible target positions\\n\")\n\t\tprint(\"2 - The user can chose the next target position\\n\")\n\t\tprint(\"3 - Start following the external walls\\n\")\n\t\tprint(\"4 - Stop the robot in the last position\\n\")\n\t\tprint(\"5 - Change the planning algorithm from move_base to bug0 and vice versa\\n\")\n\n\t\t#read the input typed by the user\t\n\t\tinput_ = (int(raw_input(\"Please select a number between 1 and 5: \")))\n\n\t#set the choice made by the user\n\tif input_ >= 1 and input_ <= 5:\n\t\trospy.set_param('/input', input_)", "def run(self, input):\n print self.print_meep(input)", "def get_input():\r\n operation = input()\r\n\r\n return operation", "def _nn_read_data(self):\n\t\treaData = True\n\t\tnnIncomingData = False\n\t\tnnData = \"\"\n\t\twhile reaData and self._neuralNetwork.poll()==None:\n\t\t\tnnIncomingMsg = self._neuralNetwork.stdout.readline().rstrip(\"\\n\").split()\n\t\t\tif \"COMM_OUT\" in nnIncomingMsg: nnIncomingData = True\n\t\t\telif \"END\" in nnIncomingMsg: reaData = False\n\t\t\telif nnIncomingData: nnData += \" \".join(nnIncomingMsg)+\"\\n\"\n\t\t\tprint \"\\t\\tNeuron: \"+\" \".join(nnIncomingMsg)\n\t\treturn nnData", "def _send_data_to_nn(self,wbtData):\n\t\tself._neuralNetwork.stdin.write(\"COMM IN\\n\") # this shitty COMM IN is not really needed..to modify in closedloop.py\n\t\tself._neuralNetwork.stdin.write(wbtData)", "def __init__(self, incoming, name='RNNInputLayer'):\n super(RNNInputLayer, self).__init__()\n self.incoming, self.incoming_shape = get_input(incoming)\n with tf.variable_scope(name) as self.layer_scope:\n self.out = self.incoming()\n self.name = name", "def writeInput(self) -> str:\n\n self.init()\n\n self.prepareConnectors()\n\n self.writeHeaders()\n self.writeMesh()\n self.writeNodeSets()\n self.writeElementSets()\n self.writeKinematicConnectors()\n self.writeMPCs()\n self.writeMaterials()\n self.writeMaterialAssignments()\n self.writeInitialConditions()\n self.writeAnalysisConditions()\n self.writeLoadSteps()\n\n return self._input", "def inputs(self):\n return self._inputs", "def input(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"input\")", "def hyper(self, input):\n return", "def getInput(self):\n return self.__string", "def input():\n return list_of_inputs.pop(0)", "def call(self, inputs, **kwargs):\n nodes, edges, edge_index = inputs\n n_in = self.lay_gather_in([nodes, edge_index])\n n_out = self.lay_gather_out([nodes, edge_index])\n msg = self.message_function([n_in, n_out, edges])\n pool_n = self.aggregate_message([nodes, msg, edge_index])\n n_new = self.update_nodes([nodes, pool_n])\n return n_new", "def __call__(self, inputs, training):\n\n\t\treturn self._build_network(inputs, training)", "def _buildInput (self):\n\n\t\tindata = self.config['input']\n\t\tif not isinstance (indata, dict):\n\t\t\tindata = ','.join(utils.alwaysList (indata))\t\t\t\n\t\t\tdepdchan = channel.fromChannels (*[d.channel for d in self.depends])\n\t\t\tindata = {indata: depdchan if self.depends else channel.fromArgv()}\n\t\t\t\n\t\t# expand to one key-channel pairs\n\t\tfor inkeys, invals in indata.iteritems():\n\t\t\tkeys = utils.split(inkeys, ',')\n\t\t\tif callable (invals):\n\t\t\t\tvals = invals (*[d.channel.copy() for d in self.depends] if self.depends else channel.fromArgv())\n\t\t\t\tvals = vals.split()\n\t\t\telif isinstance (invals, basestring): # only for files: \"/a/b/*.txt, /a/c/*.txt\"\n\t\t\t\tvals = utils.split(invals, ',')\n\t\t\telif isinstance (invals, channel):\n\t\t\t\tvals = invals.split()\n\t\t\telif isinstance (invals, list):\n\t\t\t\tvals = channel.create(invals).split()\n\t\t\telse:\n\t\t\t\traise ValueError (\"%s: Unexpected values for input. Expect dict, list, str, channel, callable.\" % self._name())\n\t\t\t\n\t\t\twidth = len (vals)\n\t\t\tif len (keys) > width:\n\t\t\t\traise ValueError ('%s: Not enough data for input variables.\\nVarialbes: %s\\nData: %s' % (self._name(), keys, vals))\n\t\t\t\n\t\t\tfor i, key in enumerate(keys):\n\t\t\t\tintype = key.split(':')[-1]\n\t\t\t\tthekey = key.split(':')[0]\n\t\t\t\tval = vals[i].toList() #if isinstance(vals[i], channel) else vals[i]\n\n\t\t\t\tif intype not in proc.IN_VARTYPE + proc.IN_FILESTYPE + proc.IN_FILETYPE:\n\t\t\t\t\tintype = proc.IN_VARTYPE[0]\n\t\t\t\t\n\t\t\t\tif intype in proc.IN_FILESTYPE:\n\t\t\t\t\tfor x, v in enumerate(val):\n\t\t\t\t\t\tif isinstance (v, basestring):\n\t\t\t\t\t\t\tval[x] = channel.fromPath (v).toList()\n\t\t\t\t\n\t\t\t\tif self.length == 0: \n\t\t\t\t\tself.props['length'] = len (val)\n\t\t\t\tif self.length != len (val):\n\t\t\t\t\traise ValueError ('%s: Expect same lengths for input channels, but got %s and %s (keys: %s).' % (self._name(), self.length, len (val), key))\n\t\t\t\tself.props['indata'][thekey] = {\n\t\t\t\t\t'type': intype,\n\t\t\t\t\t'data': val\n\t\t\t\t}\n\t\t\tself.props['jobs'] = [None] * self.length", "def build_graph_from_input(self, input_node):\n raise NotImplementedError", "def revisar_input(self):\n pass", "def inputs(self):\n return super().inputs", "def inputs(self):\n return super().inputs", "def inputs(self):\n return super().inputs", "def inputs(self):\n return super().inputs", "def inputs(self):\n return self.inputs", "def read_inputs(self):\n self.in_power.read()\n self.in_alert.read()", "def input(self, description):\n if isinstance(description, (int, long)):\n self._input = description\n self._sendCommand('%02dFN' % description)\n elif description in self.inputs:\n self._input = self.inputs[description]\n self._sendCommand('%02dFN' % self.inputs[description])\n else:\n raise Exception('No such input: %s' % description)", "def buildPackets(self):\n return self.input", "def net_input(self, X):\n return np.dot(X, self.w[1:]) + self.w[0]", "def build_user_input(self):\n pass", "def get_input():\n parser = argparse.ArgumentParser(description='Parameters')\n parser.add_argument('--host', help='adress of the host')\n parser.add_argument('--port', help='port of IPMI host')\n parser.add_argument('--user', help='user allowed to acces IPMI')\n parser.add_argument('--passwd', help='password for the specific user')\n parser.add_argument('--interval', help='seconds between each data reading')\n parser.add_argument('--nread', help='number of time to collect data')\n parser.add_argument('--store', action='store_true',\n help='save the data collected in a nosql db')\n args = parser.parse_args()\n return args, parser", "def load(self, input):", "def net_input(self, X):\n return np.dot(X, self.weight_[1:]) + self.weight_[0]", "def net_input(self, X):\n return np.dot(X, self.weight_[1:]) + self.weight_[0]", "def writeInput(self):\n\n #self.collect.writeInput()", "def input(self):\n\t\treturn self.image", "def generate_input_data(self):\n for socket_name, c in self.__taskobject.inputs.items():\n if c['optional']:\n self.__inputs[socket_name] = {'data': None, 'enable': False}\n else:\n self.__inputs[socket_name] = {'data': None, 'enable': True}", "def run(self, input):\n return {}", "def getInput(self):\n self.userInput = self.entry.get()", "def single_input_node(self, port: int):\n input_nodes = self.input_nodes(port)\n if len(input_nodes) != 1:\n raise Error('The amount of input nodes for port \"{}\" is not equal to 1. '.format(port) +\n refer_to_faq_msg(33))\n return input_nodes[0]", "def build_input(self):\n n_input = tf.placeholder(tf.int32, [None, None], name='n_input')\n t_input = tf.placeholder(tf.int32, [None, None], name='t_input')\n n_target = tf.placeholder(tf.int32, [None, None], name='n_target')\n t_target = tf.placeholder(tf.int32, [None, None], name='t_target')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n return n_input, t_input, n_target, t_target, keep_prob", "def input_type():\n pass", "def read_input(self) -> None:\n raw_input = sys.stdin.read()\n\n self._input = raw_input.split('\\n')\n self._input = self._input[0:-1]\n\n for line in self._input:\n direction, steps = line.split()\n self._instructions.append((direction, int(steps)))", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n settings, notFound = paramInput.findNodesAndExtractValues(['nu','C', 'kernel', 'degree', 'gamma', 'coef0',\n 'tol', 'cache_size', 'shrinking', 'max_iter'])\n # notFound must be empty\n assert(not notFound)\n self.initializeModel(settings)", "def input(self, inputters, field, number=None):\n raise NotImplementedError" ]
[ "0.7633749", "0.7437", "0.68596274", "0.684022", "0.6768303", "0.6768303", "0.6635569", "0.6624624", "0.6530338", "0.644091", "0.6440604", "0.6437605", "0.6437605", "0.6437605", "0.6437605", "0.6437605", "0.6437605", "0.6437605", "0.64358646", "0.6357148", "0.6357148", "0.6357148", "0.63256097", "0.63188505", "0.62520593", "0.62299395", "0.6215211", "0.6170456", "0.6170456", "0.61694866", "0.61469495", "0.6097177", "0.6080556", "0.60722476", "0.60571694", "0.60571694", "0.60571694", "0.6019627", "0.5994837", "0.5971399", "0.5957929", "0.5955149", "0.5938013", "0.591181", "0.58880574", "0.5884213", "0.583897", "0.58353806", "0.58327454", "0.583251", "0.58272004", "0.58250463", "0.58250463", "0.58243096", "0.58243096", "0.58243096", "0.58237857", "0.5821127", "0.5807407", "0.580276", "0.57961625", "0.5792817", "0.57895315", "0.5771906", "0.57534945", "0.57451105", "0.57423997", "0.57403266", "0.5728213", "0.5725835", "0.57142794", "0.5710048", "0.5703046", "0.5703034", "0.5680818", "0.5679648", "0.56794214", "0.56794214", "0.56794214", "0.56794214", "0.5668195", "0.5667299", "0.5665239", "0.5654016", "0.5652643", "0.5642237", "0.56366974", "0.56300527", "0.56252944", "0.56252944", "0.56243384", "0.5597075", "0.5597029", "0.5584721", "0.557096", "0.55664986", "0.55631864", "0.55621743", "0.55562806", "0.55456275", "0.5539064" ]
0.0
-1
Allocate a TransposedConvLayer with shared variable internal parameters.
def __init__(self, input, num_filters, filter_size, stride=(2, 2), padding=(0, 0), activation=rectify): self.input = input self.output = layers.TransposedConv2DLayer(self.input, num_filters, filter_size, stride=stride, crop=padding, W=initialize_parameters()[0], b=initialize_parameters()[1], nonlinearity=activation)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transpose(incoming, conv, nonlinearity, *args, **kwargs):\n return TransposedConv2DLayer(incoming, conv.input_shape[1],\n conv.filter_size, stride=conv.stride,\n crop=conv.pad, W=conv.W,\n flip_filters=not conv.flip_filters,\n nonlinearity=nonlinearity, *args,\n **kwargs)", "def create(self) -> None:\n if self.torch is not None: # type: ignore\n return\n self.torch = torch.nn.ConvTranspose2d(**self.args.arg_values)", "def TCN(input_dim): \r\n # Number of dilations in order to use for the temporal blocks.\r\n dilations = np.array([1, 2, 4, 8, 16, 32])\r\n\r\n input_dim.insert(0,1)\r\n print(f\"input_dim: {input_dim}\")\r\n input_layer = Input(shape=input_dim)\r\n cropping = 0\r\n assert (sum(dilations) * block_size + 1) == 127, \"Paper specifies receptive field size should be 127\"\r\n \r\n prev_layer, skip_layer, _ = add_temporal_block(input_layer, None, 1, 1, cropping)\r\n \r\n for dilation in dilations:\r\n prev_layer, skip_layer, cropping = add_temporal_block(prev_layer, skip_layer, 2, dilation, cropping)\r\n\r\n output_layer = PReLU(shared_axes=[2, 3])(skip_layer)\r\n output_layer = SpectralNormalization(Conv1D(fixed_filters, kernel_size=1))(output_layer)\r\n output_layer = PReLU(shared_axes=[2, 3])(output_layer)\r\n output_layer = SpectralNormalization(Conv1D(1, kernel_size=1))(output_layer)\r\n\r\n return Model(input_layer, output_layer)", "def convt_block(layer, concat, fsize, name):\n with tf.variable_scope(name):\n\n layer = tf.layers.conv2d_transpose(layer, filters=fsize, kernel_size=2, strides=2, \n kernel_regularizer=l2_reg(1e-1), name='convt')\n layer = tf.concat([layer, concat], axis=-1, name='concat')\n\n return layer", "def CustomConv3DTranspose(x_in, nf, strides=2, kernel_size = 3):\r\n\tx_out = Conv3DTranspose(nf, kernel_size=3, padding='same',kernel_initializer='he_normal', strides=strides)(x_in)\r\n\t#print(\"AAAAA\", x_out.shape)\r\n\tx_out = BatchNormalization()(x_out)\r\n\tx_out = LeakyReLU(0.2)(x_out)\r\n\treturn x_out", "def __init__(\n self,\n *,\n input_dims: Union[List[int], Tuple[int]],\n cnn_transpose_filter_specifiers: List[List[Union[int, List]]],\n cnn_transpose_use_bias: bool = True,\n cnn_transpose_activation: Optional[str] = \"relu\",\n cnn_transpose_use_layernorm: bool = False,\n ):\n super().__init__()\n\n assert len(input_dims) == 3\n\n cnn_transpose_activation = get_activation_fn(\n cnn_transpose_activation, framework=\"tf2\"\n )\n\n layers = []\n\n # Input layer.\n layers.append(tf.keras.layers.Input(shape=input_dims))\n\n for i, (num_filters, kernel_size, strides) in enumerate(\n cnn_transpose_filter_specifiers\n ):\n is_final_layer = i == len(cnn_transpose_filter_specifiers) - 1\n layers.append(\n tf.keras.layers.Conv2DTranspose(\n filters=num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=\"same\",\n # Last layer is never activated (regardless of config).\n activation=(\n None\n if cnn_transpose_use_layernorm or is_final_layer\n else cnn_transpose_activation\n ),\n # Last layer always uses bias (b/c has no LayerNorm, regardless of\n # config).\n use_bias=cnn_transpose_use_bias or is_final_layer,\n )\n )\n if cnn_transpose_use_layernorm and not is_final_layer:\n # Use epsilon=1e-5 here (instead of default 1e-3) to be unified with\n # torch. Need to normalize over all axes.\n layers.append(\n tf.keras.layers.LayerNormalization(axis=[-3, -2, -1], epsilon=1e-5)\n )\n layers.append(tf.keras.layers.Activation(cnn_transpose_activation))\n\n # Create the final CNNTranspose network.\n self.cnn_transpose = tf.keras.Sequential(layers)\n\n self.expected_input_dtype = tf.float32", "def conv2d_transpose(self, output_shape, filter_):\n return self.add_layer(conv2d_transpose, output_shape, filter_)", "def copy_conv(sess, tftensor, layer):\n\n W = sess.graph.get_tensor_by_name('{}/conv2d_params:0'.format(tftensor)).eval()\n W = W.transpose((3, 2, 0, 1))\n\n assert W.shape == layer.W.data.shape\n\n layer.W.data = W", "def test_transposed_conv2d_model(self):\n tf.compat.v1.reset_default_graph()\n\n _ = transposed_conv2d_model()\n\n conn_graph = ConnectedGraph(tf.compat.v1.get_default_graph(), ['input_1'], ['conv2d_transpose/BiasAdd'])\n self.assertEqual(conn_graph.get_all_ops()['conv2d_transpose/conv2d_transpose'].type, 'Conv2DTranspose')", "def __init__(self, img_size, latent_dim=10):\n super(DecoderBurgess, self).__init__()\n\n # Layer parameters\n hid_channels = 32\n kernel_size = 4\n hidden_dim = 256\n self.img_size = img_size\n # Shape required to start transpose convs\n self.reshape = (hid_channels, kernel_size, kernel_size)\n n_chan = self.img_size[0]\n self.img_size = img_size\n\n # Fully connected layers\n self.lin1 = nn.Linear(latent_dim, hidden_dim)\n self.lin2 = nn.Linear(hidden_dim, hidden_dim)\n self.lin3 = nn.Linear(hidden_dim, np.product(self.reshape))\n\n # Convolutional layers\n cnn_kwargs = dict(stride=2, padding=1)\n # If input image is 64x64 do fourth convolution\n if self.img_size[1] == self.img_size[2] == 64:\n self.convT_64 = nn.ConvTranspose2d(\n hid_channels, hid_channels, kernel_size, **cnn_kwargs\n )\n\n self.convT1 = nn.ConvTranspose2d(\n hid_channels, hid_channels, kernel_size, **cnn_kwargs\n )\n self.convT2 = nn.ConvTranspose2d(\n hid_channels, hid_channels, kernel_size, **cnn_kwargs\n )\n self.convT3 = nn.ConvTranspose2d(\n hid_channels, n_chan, kernel_size, **cnn_kwargs\n )", "def __init__(self, img_size,\n latent_dim=10):\n super(DecoderBurgess, self).__init__()\n\n # Layer parameters\n hid_channels = 32\n kernel_size = 4\n hidden_dim = 256\n self.img_size = img_size\n # Shape required to start transpose convs\n self.reshape = (hid_channels, kernel_size, kernel_size)\n n_chan = self.img_size[0]\n self.img_size = img_size\n\n # Fully connected layers\n self.lin1 = nn.Linear(latent_dim, hidden_dim)\n self.lin2 = nn.Linear(hidden_dim, hidden_dim)\n self.lin3 = nn.Linear(hidden_dim, np.product(self.reshape))\n\n # Convolutional layers\n cnn_kwargs = dict(stride=2, padding=1)\n # If input image is 64x64 do fourth convolution\n if self.img_size[1] == self.img_size[2] == 64:\n self.convT_64 = nn.ConvTranspose2d(\n hid_channels, hid_channels, kernel_size, **cnn_kwargs)\n\n self.convT1 = nn.ConvTranspose2d(\n hid_channels, hid_channels, kernel_size, **cnn_kwargs)\n self.convT2 = nn.ConvTranspose2d(\n hid_channels, hid_channels, kernel_size, **cnn_kwargs)\n self.convT3 = nn.ConvTranspose2d(\n hid_channels, n_chan, kernel_size, **cnn_kwargs)", "def convert_conv2d_transpose(g, op, block):\n\n dilations = op.attr(\"dilations\")\n groups = op.attr(\"groups\")\n paddings = op.attr(\"paddings\")\n padding_algorithm = op.attr(\"padding_algorithm\")\n strides = op.attr(\"strides\")\n output_padding = op.attr(\"output_padding\") if op.attr(\"output_padding\") else [0, 0]\n\n kernel = g.get_node(op.input(\"Filter\")[0])\n input_x = g.get_node(op.input(\"Input\")[0])\n _, out_channels, k_h, k_w = infer_shape(kernel)\n k_size = [k_h, k_w]\n if padding_algorithm == \"VALID\":\n paddings = [0, 0]\n elif padding_algorithm == \"SAME\":\n # SAME padding of conv2d_transpose is not same with conv2d\n # We cannot use auto_pad here, only static shape is supported now\n dilations = [1, 1]\n input_shape = shape_of(input_x)\n h_w = _op.strided_slice(input_shape, [2], [4])\n try:\n h_w = infer_value(h_w, g.get_params()).numpy().tolist()\n except Exception as e:\n msg = \"The SAME padding algorithm of conv2d_transpose not support dynamic shape\"\n raise tvm.error.OpAttributeInvalid(msg) from e\n paddings = []\n for i in range(2):\n if strides[i] == 1 or h_w[i] % strides[i] == 0:\n pad = max(k_size[i] - strides[i], 0)\n else:\n pad = max(k_size[i] - (h_w[i] % strides[i]), 0)\n pad_before = pad // 2\n pad_after = pad - pad_before\n paddings.insert(-1, pad_before)\n paddings.append(pad_after)\n elif padding_algorithm == \"EXPLICIT\":\n if len(paddings) == 2:\n paddings = [paddings[0], paddings[1], paddings[0], paddings[1]]\n elif len(paddings) == 4:\n paddings = [paddings[0], paddings[2], paddings[1], paddings[3]]\n else:\n msg = f'Value {padding_algorithm} in attribute \"padding\" of operator Conv is not \"valid.\"'\n raise tvm.error.OpAttributeInvalid(msg)\n\n out = _op.nn.conv2d_transpose(\n input_x,\n kernel,\n strides=strides,\n padding=paddings,\n dilation=dilations,\n groups=groups,\n channels=out_channels * groups,\n kernel_size=k_size,\n output_padding=output_padding,\n )\n g.add_node(op.output(\"Output\")[0], out)", "def __init__(self, img_size,\n latent_dim=10):\n \n super(DecoderRezendeViola, self).__init__()\n\n # Layer parameters\n hid_channels = 32\n kernel_size = 4\n hidden_dim = 256\n self.img_size = img_size\n # Shape required to start transpose convs\n self.reshape = (hid_channels, kernel_size, kernel_size)\n n_chan = self.img_size[0]\n self.img_size = img_size\n\n # Fully connected layers\n self.lin1 = nn.Linear(latent_dim, hidden_dim)\n self.lin2 = nn.Linear(hidden_dim, hidden_dim)\n self.lin3 = nn.Linear(hidden_dim, np.product(self.reshape))\n\n # Convolutional layers\n cnn_kwargs = dict(stride=2, padding=1)\n # If input image is 64x64 do fourth convolution\n if self.img_size[1] == self.img_size[2] == 64:\n self.convT_64 = nn.ConvTranspose2d(\n hid_channels, hid_channels, kernel_size, **cnn_kwargs)\n\n self.convT1 = nn.ConvTranspose2d(\n hid_channels, hid_channels, kernel_size, **cnn_kwargs)\n self.convT2 = nn.ConvTranspose2d(\n hid_channels, hid_channels, kernel_size, **cnn_kwargs)\n self.convT3 = nn.ConvTranspose2d(\n hid_channels, 2 * n_chan, kernel_size, **cnn_kwargs)", "def conv_transpose1d(self, kernel, **kwargs):\n raise NotImplementedError(\"conv_transpose1d is not implemented\")", "def _create_conv_pool(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n k = [op.handle.kernel_h, op.handle.kernel_w]\n s = [op.handle.stride_h, op.handle.stride_w]\n oddp = op.odd_padding\n p = [\n op.handle.pad_h + oddp[0],\n op.handle.pad_w + oddp[1],\n op.handle.pad_w + oddp[2],\n op.handle.pad_h + oddp[3],\n ]\n\n node.attribute.extend([\n helper.make_attribute('kernel_shape', k),\n helper.make_attribute('pads', p),\n helper.make_attribute('strides', s),\n ])\n if cls._get_singa_op_type(op) == '_Conv2d':\n node.op_type = cls._rename_operators.get('_Conv2d')\n node.attribute.extend([\n helper.make_attribute('group', op.handle.group),\n helper.make_attribute('auto_pad', 'NOTSET'),\n ])\n\n elif op.handle.is_max_pooling:\n node.op_type = cls._rename_operators.get('MaxPool2d')\n else:\n node.op_type = cls._rename_operators.get('AvgPool2d')\n return node", "def __init__(self, in_channels, out_channels, kernel_size, num_layers=3,\n use_conv_transpose=True):\n super().__init__()\n\n _validate_args(in_channels, out_channels, kernel_size, num_layers)\n\n self.kernel_size = kernel_size\n self.use_conv_transpose = use_conv_transpose\n\n self.padding = (kernel_size-1)//2\n\n # All deconv blocks must contain at least one block mapping the\n # number of input channels to the number of output channels\n conv_list = [\n self._create_single_block(in_channels, out_channels)\n ]\n\n # Create the rest of the blocks\n for _ in range(num_layers - 1):\n conv_list.append(\n self._create_single_block(out_channels, out_channels)\n )\n\n self.convs = nn.Sequential(*conv_list)\n self.pool = nn.MaxUnpool2d((2, 2))", "def conv_transpose2d(self, kernel, **kwargs):\n raise NotImplementedError(\"conv_transpose2d is not implemented\")", "def __call__(self, inputs):\n with tf.variable_scope('conv_t_{}'.format(self.idx)):\n activation_fn = get_act_fn(self.act_fn)\n\n if self.cfg.VAR_ON_CPU:\n kernels = variable_on_cpu(\n name='kernels',\n shape=[self.kernel_size, self.kernel_size,\n self.n_kernel, inputs.get_shape().as_list()[3]],\n initializer=self.w_init_fn,\n dtype=tf.float32)\n conv_t = tf.nn.conv2d_transpose(\n value=inputs,\n filter=kernels,\n output_shape=self.output_shape,\n strides=[1, self.stride, self.stride, 1],\n padding=self.padding)\n\n if self.use_bias:\n biases = variable_on_cpu(\n name='biases',\n shape=[self.n_kernel],\n initializer=tf.zeros_initializer(),\n dtype=tf.float32)\n conv_t = tf.nn.bias_add(conv_t, biases)\n\n if activation_fn is not None:\n conv_t = activation_fn(conv_t)\n\n else:\n biases_initializer = tf.zeros_initializer() if self.use_bias else None\n conv_t = tf.contrib.layers.conv2d_transpose(\n inputs=inputs,\n num_outputs=self.n_kernel,\n kernel_size=self.kernel_size,\n stride=self.stride,\n padding=self.padding,\n activation_fn=activation_fn,\n weights_initializer=self.w_init_fn,\n biases_initializer=biases_initializer)\n\n return conv_t", "def __init__(self,\n intermediate_channels,\n output_channels,\n pred_key,\n name,\n conv_type='depthwise_separable_conv',\n bn_layer=tf.keras.layers.BatchNormalization):\n super(PanopticDeepLabSingleHead, self).__init__(name=name)\n self._pred_key = pred_key\n\n self.conv_block = convolutions.StackedConv2DSame(\n conv_type=conv_type,\n num_layers=1,\n output_channels=intermediate_channels,\n kernel_size=5,\n name='conv_block',\n use_bias=False,\n use_bn=True,\n bn_layer=bn_layer,\n activation='relu')\n self.final_conv = layers.Conv2D(\n output_channels,\n kernel_size=1,\n name='final_conv',\n kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01))", "def __init__(self, in_channels, out_channels, kernel_size, padding=0, **kwargs):\n ConstrainedLayer.__init__(self, nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding, bias=False), **kwargs)", "def _conv2d_transpose_block(in_f, out_f, *args, **kwargs):\n return nn.Sequential(\n nn.ConvTranspose2d(in_f, out_f, *args, **kwargs),\n nn.BatchNorm2d(out_f),\n nn.ReLU(),\n nn.Dropout2d(p=0.2)\n )", "def __init__(\n self,\n in_shape: Tuple,\n kernel_size: int,\n out_channels: int = None,\n stride: int = 1,\n aux_shape: Optional[Tuple] = None,\n downsampling_mode: str = \"convolutional\",\n upsampling_mode: str = \"convolutional\",\n transposed: bool = False,\n residual: bool = True,\n weightnorm: bool = True,\n gated: bool = True,\n activation: nn.Module = nn.ReLU,\n dropout: Optional[float] = None,\n ):\n super().__init__(in_shape=in_shape, transposed=transposed, residual=residual, aux_shape=aux_shape)\n\n # some parameters\n self.channels_in = in_shape[0]\n self.channels_out = out_channels\n self.kernel_size = kernel_size\n self.stride = stride\n self.resample_mode = upsampling_mode if transposed else downsampling_mode\n self.transposed = transposed\n self.residual = residual\n self.gated = gated\n self.activation_pre = activation() if self.residual else None\n\n # first convolution is always non-transposed and stride 1\n self.conv1 = TransposeableNormedSameConv2d(\n in_shape=in_shape,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=1,\n transposed=False,\n resample_mode=\"convolutional\",\n weightnorm=weightnorm,\n )\n\n # aux op\n if aux_shape is not None:\n self.activation_aux = activation()\n\n if list(aux_shape[1:]) > list(self.conv1.out_shape[1:]):\n # Downsample height and width (and match channels)\n aux_stride = tuple(np.asarray(aux_shape[1:]) // np.asarray(self.conv1.out_shape[1:]))\n self.aux_op = TransposeableNormedSameConv2d(\n in_shape=aux_shape,\n out_channels=self.conv1.out_shape[0],\n kernel_size=kernel_size,\n stride=aux_stride,\n transposed=False,\n resample_mode=self.resample_mode,\n weightnorm=weightnorm,\n )\n elif list(aux_shape[1:]) < list(self.conv1.out_shape[1:]):\n # Upsample height and width (and match channels)\n aux_stride = tuple(np.asarray(self.conv1.out_shape[1:]) // np.asarray(aux_shape[1:]))\n self.aux_op = TransposeableNormedSameConv2d(\n in_shape=aux_shape,\n out_channels=self.conv1.out_shape[0],\n kernel_size=kernel_size,\n stride=aux_stride,\n transposed=True,\n resample_mode=self.resample_mode,\n weightnorm=weightnorm,\n )\n elif aux_shape[0] != self.conv1.out_shape[0]:\n # Change only channels using 1x1 convolution\n self.aux_op = TransposeableNormedSameConv2d(\n in_shape=aux_shape,\n out_channels=self.conv1.out_shape[0],\n kernel_size=1,\n stride=1,\n transposed=False,\n resample_mode=self.resample_mode,\n weightnorm=weightnorm,\n )\n else:\n # aux_shape and out_shape are the same\n assert aux_shape == self.conv1.out_shape\n self.aux_op = None\n else:\n self.aux_op = None\n\n self.activation_mid = activation()\n\n # dropout\n self.dropout = nn.Dropout(dropout) if dropout else dropout\n\n # second convolution is potentially transposed and potentially resampling\n gated_channels = 2 * out_channels if self.gated else out_channels\n self.conv2 = TransposeableNormedSameConv2d(\n in_shape=self.conv1.out_shape,\n out_channels=gated_channels,\n kernel_size=kernel_size,\n stride=self.stride,\n weightnorm=weightnorm,\n transposed=transposed,\n resample_mode=self.resample_mode,\n ) # doubled out channels for gating\n\n # output shape\n self._out_shape = (out_channels, *self.conv2.out_shape[1:]) # always out_channels regardless of gating\n\n # residual connections\n self.residual_op = ResidualConnectionConv2d(self._in_shape, self._out_shape, residual)", "def BasicConv3d(in_planes, out_planes, kernel_size, stride=(1, 1, 1), padding=(0, 0, 0),\n bias=False, dw_t_conv=False):\n return nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size,\n stride=stride, padding=padding, bias=bias,\n groups=in_planes if dw_t_conv else 1)", "def intermediate_layer(layer, filters, kernel_size):\n layer = Conv2D(filters=filters, kernel_size=kernel_size,\n strides=(2, 2), padding=\"same\")(layer)\n layer = LeakyReLU(alpha=LEAKY_RELU_ALPHA)(layer)\n return layer", "def conv_2d_transpose(incoming, nb_filter, filter_size, output_shape,\n strides=1, padding='same', activation='linear',\n bias=True, weights_init='uniform_scaling',\n bias_init='zeros', regularizer=None, weight_decay=0.001,\n trainable=True, restore=True, name=\"Conv2DTranspose\"):\n assert padding in ['same', 'valid', 'SAME', 'VALID'], \\\n \"Padding must be same' or 'valid'\"\n\n input_shape = utils.get_incoming_shape(incoming)\n assert len(input_shape) == 4, \"Incoming Tensor shape must be 4-D\"\n\n filter_size = utils.autoformat_filter_conv2d(filter_size,\n nb_filter,\n input_shape[-1])\n strides = utils.autoformat_kernel_2d(strides)\n padding = utils.autoformat_padding(padding)\n\n with tf.name_scope(name) as scope:\n\n W_init = initializations.get(weights_init)()\n W_regul = None\n if regularizer:\n W_regul = lambda x: losses.get(regularizer)(x, weight_decay)\n W = vs.variable(scope + 'W', shape=filter_size,\n regularizer=W_regul, initializer=W_init,\n trainable=trainable, restore=restore)\n # Track per layer variables\n tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, W)\n\n b = None\n if bias:\n b_init = initializations.get(bias_init)()\n b = vs.variable(scope + 'b', shape=nb_filter,\n initializer=b_init, trainable=trainable,\n restore=restore)\n # Track per layer variables\n tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, b)\n\n # Determine the complete shape of the output tensor.\n batch_size = tf.gather(tf.shape(incoming), tf.constant([0]))\n if len(output_shape) == 2:\n output_shape = output_shape + [nb_filter]\n elif len(output_shape) != 3:\n raise Exception(\"output_shape length error: \" \n + str(len(output_shape))\n + \", only a length of 2 or 3 is supported.\")\n complete_out_shape = tf.concat(0, [batch_size, tf.constant(output_shape)])\n \n inference = tf.nn.conv2d_transpose(incoming, W, complete_out_shape,\n strides, padding)\n \n # Reshape tensor so its shape is correct.\n inference.set_shape([None] + output_shape)\n\n if b: inference = tf.nn.bias_add(inference, b)\n\n if isinstance(activation, str):\n inference = activations.get(activation)(inference)\n elif hasattr(activation, '__call__'):\n inference = activation(inference)\n else:\n raise ValueError(\"Invalid Activation.\")\n\n # Track activations.\n tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)\n\n # Add attributes to Tensor to easy access weights.\n inference.scope = scope\n inference.W = W\n inference.b = b\n\n return inference", "def __init__(self, last_channels, this_channels, repeat_time, max_pool = False):\n super().__init__()\n\n self.repeat_time = repeat_time\n self.activate = nn.SELU()\n self.bn = nn.BatchNorm2d\n self.max_pool = None\n\n if max_pool:\n self.max_pool = nn.MaxPool2d(3, 2, 1)\n else:\n self.conv1_1 = nn.Sequential(\n nn.Conv2d(last_channels, this_channels, 3, stride=2, padding=1),\n self.bn(this_channels),\n )\n self.conv1_2 = nn.Sequential(\n nn.Conv2d(this_channels, this_channels, 3, stride=1, padding=1),\n self.bn(this_channels),\n )\n\n self.conv2 = nn.Sequential(\n nn.Conv2d(this_channels, this_channels, 3, stride=1, padding=1),\n self.bn(this_channels),\n self.activate,\n nn.Conv2d(this_channels, this_channels, 3, stride=1, padding=1),\n self.bn(this_channels),\n )", "def __init__(self, channel_in, channel_out, kernel_size, stride, padding, output_padding=0):\n super().__init__()\n self.block = nn.Sequential(\n nn.ConvTranspose2d(channel_in, channel_out, kernel_size, stride, padding, output_padding),\n nn.BatchNorm2d(channel_out)\n )\n self.act = nn.ReLU()", "def _init_layers(self) -> None:\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n pred_pad_size = self.pred_kernel_size // 2\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_reg = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 4,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_centerness = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 1,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])", "def pretrained_conv_layer(name, input_tensor, params):\n with tf.name_scope(name):\n weights = tf.constant(params[name+'_W'])\n biases = tf.constant(params[name+'_b'])\n\n conv = tf.nn.conv2d(input=input_tensor,\n filter=weights,\n strides=[1, 1, 1, 1],\n padding='SAME',\n name='convolution')\n\n preactivations = tf.nn.bias_add(conv, biases, name='bias_addition')\n activations = tf.nn.relu(preactivations, name='activation')\n return activations", "def _append_conv_layer(self, param):\n self._parameterized_conv_layers.append(\n tf.keras.layers.Conv3D(\n padding='same',\n use_bias=False,\n kernel_regularizer=self._kernel_regularizer,\n **param,\n ))\n norm_layer_params = self._build_norm_layer_params(param)\n self._parameterized_conv_layers.append(self._norm(**norm_layer_params))\n\n relu_layer_params = self._build_activation_layer_params(param)\n self._parameterized_conv_layers.append(\n tf.keras.layers.Activation('relu', **relu_layer_params))", "def __init__(self, img_size, latent_dim=10):\n super(EncoderBurgess, self).__init__()\n\n # Layer parameters\n hid_channels = 32\n kernel_size = 4\n hidden_dim = 256\n self.latent_dim = latent_dim\n self.img_size = img_size\n # Shape required to start transpose convs\n self.reshape = (hid_channels, kernel_size, kernel_size)\n n_chan = self.img_size[0]\n\n # Convolutional layers\n cnn_kwargs = dict(stride=2, padding=1)\n self.conv1 = nn.Conv2d(n_chan, hid_channels, kernel_size, **cnn_kwargs)\n self.conv2 = nn.Conv2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)\n self.conv3 = nn.Conv2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)\n\n # If input image is 64x64 do fourth convolution\n if self.img_size[1] == self.img_size[2] == 64:\n self.conv_64 = nn.Conv2d(\n hid_channels, hid_channels, kernel_size, **cnn_kwargs\n )\n\n # Fully connected layers\n self.lin1 = nn.Linear(np.product(self.reshape), hidden_dim)\n self.lin2 = nn.Linear(hidden_dim, hidden_dim)\n\n # Fully connected layers for mean and variance\n self.mu_logvar_gen = nn.Linear(hidden_dim, self.latent_dim * 2)", "def __init__(self, incoming, W=None, b=tf.zeros, ksize: int = None, num_outputs: int = None,\n weight_initializer=None, a=tf.nn.elu, strides=(1, 1, 1, 1), padding='ZEROPAD', dilation_rate=(1, 1),\n name='ConvLayer'):\n super(ConvLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n \n # Set init for W and b\n if all(p is not None for p in [weight_initializer, ksize, num_outputs]):\n W = tofov(weight_initializer, shape=(ksize, ksize, self.incoming_shape[-1], num_outputs),\n var_params=dict(name='W_conv'))\n else:\n W = tofov(W, shape=None, var_params=dict(name='W_conv'))\n ksize = W.get_shape()[0].value\n if b is not None:\n b = tofov(b, shape=W.get_shape().as_list()[-1], var_params=dict(name='b_conv'))\n \n self.a = a\n self.b = b\n self.W = W\n self.padding = padding\n self.strides = strides\n self.dilation_rate = dilation_rate\n \n self.out = None\n self.name = name", "def declaration_conv2d_transpose_impl(data, kernel, strides, padding, out_dtype):\n data_pad, kernel_transform = \\\n conv2d_transpose_nchw_preprocess(data, kernel, strides, padding, out_dtype)\n batch, in_c, in_h, in_w = data_pad.shape\n out_c, _, filter_h, filter_w = kernel_transform.shape\n stride_h, stride_w = strides\n\n # convolution stage\n out_c = simplify(out_c)\n out_h = simplify(in_h - filter_h + 1)\n out_w = simplify(in_w - filter_w + 1)\n dc = tvm.reduce_axis((0, in_c), name='dc')\n dh = tvm.reduce_axis((0, filter_h), name='dh')\n dw = tvm.reduce_axis((0, filter_w), name='dw')\n\n Output = tvm.compute(\n (batch, out_c, out_h, out_w),\n lambda b, c, h, w: tvm.sum(\n data_pad[b, dc, h+dh, w+dw].astype(out_dtype) *\n kernel_transform[c, dc, dh, dw].astype(out_dtype),\n axis=[dc, dh, dw]), tag=\"conv2d_transpose_nchw\")\n\n return Output", "def _conv_transpose(\n conv_type,\n nd_util,\n input,\n weight,\n bias=None,\n stride=1,\n padding=0,\n output_padding=0,\n groups=1,\n dilation=1,\n):\n weight_shape = list(weight.shape)\n return FunctionLib.apply(\n conv_type,\n input.device,\n [input, weight] + ([bias] if bias else []),\n in_channels=weight_shape[0],\n out_channels=weight_shape[1],\n kernel_shape=weight_shape[2:],\n strides=nd_util(stride),\n pads=nd_util(padding),\n dilations=nd_util(dilation),\n group=groups,\n output_padding=nd_util(output_padding),\n bias=bias is not None,\n dtype=weight.dtype,\n input_shape=list(input.shape),\n )", "def _conv2d(self, prev_layer, layer_idx, layer_name):\n W, b = self._weights(layer_idx, layer_name)\n W = tf.constant(W)\n b = tf.constant(np.reshape(b, (b.size)))\n return tf.nn.conv2d(prev_layer, filter=W, strides=[1, 1, 1, 1], padding='SAME') + b", "def _init_layers(self) -> None:\n super()._init_layers()\n self.controller = nn.Conv2d(\n self.feat_channels, self.num_params, 3, padding=1)", "def __init__(self,\n channels: int,\n kernel_size: int=15,\n activation: nn.Layer=nn.ReLU(),\n norm: str=\"batch_norm\",\n causal: bool=False,\n bias: bool=True,\n adaptive_scale: bool=False,\n init_weights: bool=False):\n assert check_argument_types()\n super().__init__()\n self.bias = bias\n self.channels = channels\n self.kernel_size = kernel_size\n self.adaptive_scale = adaptive_scale\n if self.adaptive_scale:\n ada_scale = self.create_parameter(\n [1, 1, channels], default_initializer=I.Constant(1.0))\n self.add_parameter('ada_scale', ada_scale)\n ada_bias = self.create_parameter(\n [1, 1, channels], default_initializer=I.Constant(0.0))\n self.add_parameter('ada_bias', ada_bias)\n\n self.pointwise_conv1 = Conv1D(\n channels,\n 2 * channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias_attr=None\n if bias else False, # None for True, using bias as default config\n )\n\n # self.lorder is used to distinguish if it's a causal convolution,\n # if self.lorder > 0:\n # it's a causal convolution, the input will be padded with\n # `self.lorder` frames on the left in forward (causal conv impl).\n # else: it's a symmetrical convolution\n if causal:\n padding = 0\n self.lorder = kernel_size - 1\n else:\n # kernel_size should be an odd number for none causal convolution\n assert (kernel_size - 1) % 2 == 0\n padding = (kernel_size - 1) // 2\n self.lorder = 0\n\n self.depthwise_conv = Conv1D(\n channels,\n channels,\n kernel_size,\n stride=1,\n padding=padding,\n groups=channels,\n bias_attr=None\n if bias else False, # None for True, using bias as default config\n )\n\n assert norm in ['batch_norm', 'layer_norm']\n if norm == \"batch_norm\":\n self.use_layer_norm = False\n self.norm = BatchNorm1D(channels)\n else:\n self.use_layer_norm = True\n self.norm = LayerNorm(channels)\n\n self.pointwise_conv2 = Conv1D(\n channels,\n channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias_attr=None\n if bias else False, # None for True, using bias as default config\n )\n self.activation = activation\n\n if init_weights:\n self.init_weights()", "def _dense_connect_layer(self, input_data, name):\n with tf.variable_scope(name):\n conv_out = self._composite_conv(input_data=input_data, out_channel=self._growth_rate,\n name='composite_conv')\n concate_cout = tf.concat(values=[conv_out, input_data], axis=3, name='concatenate')\n\n return concate_cout", "def __init__(self, nfeat, nhid, nclass, dropout, alpha):\n super(GCN, self).__init__()\n self.dropout = dropout\n\n self.conv1 = GraphConvolutionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, not_final=True)\n \n self.add_module('conv1', self.conv1)\n\n self.conv2 = GraphConvolutionLayer(nhid, nclass, dropout=dropout, alpha=alpha, not_final=False)", "def __init__(\n self,\n latentspace,\n first_reshape_shape,\n channels=None,\n kernel_widths=None,\n strides=None,\n up_sampling=None,\n hidden_activation=\"relu\",\n output_activation=\"tanh\",\n **kwargs):\n # assign the default values if they were not specified\n channels = default_value([256, 128, 128, 64, 64, 3], channels)\n kernel_widths = default_value([4, 4, 4, 4, 4, 4], kernel_widths)\n strides = default_value([1, 1, 1, 1, 1, 1], strides)\n up_sampling = default_value([2, 2, 2, 2, 1, 1], up_sampling)\n\n h_Activation = lambda activation: LeakyReLU(0.02) if activation == \"leaky_relu\" else Activation(activation)\n initializer = tf.keras.initializers.RandomNormal(0, 0.02)\n\n inp = Input((latentspace,))\n\n # A Block\n A = Dense(np.prod(first_reshape_shape), kernel_initializer=initializer)(inp)\n A = LayerNormalization()(A)\n A = h_Activation(hidden_activation)(A)\n A = Reshape(first_reshape_shape)(A)\n\n # B Block\n B = Dense(64)(inp)\n B = LayerNormalization()(B)\n B = h_Activation(hidden_activation)(B)\n B = Reshape((1, 1, 64))(B)\n B = UpSampling2D(first_reshape_shape[0])(B)\n\n for channel, kernel_width, stride, up in zip(channels[:-1], kernel_widths[:-1], strides[:-1], up_sampling[:-1]):\n A, B = up_sampling_block(\n A, B, # Concat A&B to A\n up, # Up sampling for A&B\n channel, kernel_width, stride, initializer, # Conv2D for A\n # Layer normalization\n h_Activation(hidden_activation) # Activation for A\n ) # returns A, B\n\n # Final block which finally produces the output image.\n A = Concatenate()([A, B])\n A = Conv2D(channels[-1], kernel_widths[-1], strides[-1],\n padding=\"same\", kernel_initializer=initializer)(A)\n out = h_Activation(output_activation)(A)\n\n # Construct the functional model by calling the constructor of the Model super class.\n super(Decoder, self).__init__(inp, out, **kwargs)", "def Conv2DTranspose(\n inputs,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='same',\n data_format='channels_last',\n activation=None,\n use_bias=True,\n kernel_initializer=None,\n bias_initializer=tf.zeros_initializer(),\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n dyn_hw=None):\n if kernel_initializer is None:\n if get_tf_version_tuple() <= (1, 12):\n kernel_initializer = tf.contrib.layers.variance_scaling_initializer(2.0)\n else:\n kernel_initializer = tf.keras.initializers.VarianceScaling(2.0, distribution='untruncated_normal')\n\n if get_tf_version_tuple() <= (1, 12):\n with rename_get_variable({'kernel': 'W', 'bias': 'b'}):\n layer = tf.layers.Conv2DTranspose(\n filters,\n kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n _reuse=tf.get_variable_scope().reuse)\n ret = layer.apply(inputs, scope=tf.get_variable_scope())\n ret = tf.identity(ret, name='output')\n ret.variables = VariableHolder(W=layer.kernel)\n if use_bias:\n ret.variables.b = layer.bias\n else:\n # Our own implementation, to avoid Keras bugs. https://github.com/tensorflow/tensorflow/issues/25946\n assert kernel_regularizer is None and bias_regularizer is None and activity_regularizer is None, \\\n \"Unsupported arguments due to Keras bug in TensorFlow 1.13\"\n data_format = get_data_format(data_format, keras_mode=False)\n shape_dyn = tf.shape(inputs)\n strides2d = shape2d(strides)\n channels_in = inputs.shape[1 if data_format == 'NCHW' else 3]\n if data_format == 'NCHW':\n channels_in = inputs.shape[1]\n out_shape_dyn = tf.stack(\n [shape_dyn[0], filters,\n shape_dyn[2] * strides2d[0],\n shape_dyn[3] * strides2d[1]])\n out_shape3_sta = [filters,\n None if inputs.shape[2] is None else inputs.shape[2] * strides2d[0],\n None if inputs.shape[3] is None else inputs.shape[3] * strides2d[1]]\n else:\n channels_in = inputs.shape[-1]\n out_shape_dyn = tf.stack(\n [shape_dyn[0],\n shape_dyn[1] * strides2d[0],\n shape_dyn[2] * strides2d[1],\n filters])\n out_shape3_sta = [None if inputs.shape[1] is None else inputs.shape[1] * strides2d[0],\n None if inputs.shape[2] is None else inputs.shape[2] * strides2d[1],\n filters]\n\n kernel_shape = shape2d(kernel_size)\n W = tf.get_variable('W', kernel_shape + [filters, channels_in], initializer=kernel_initializer)\n if use_bias:\n b = tf.get_variable('b', [filters], initializer=bias_initializer)\n conv = tf.nn.conv2d_transpose(\n inputs, W, out_shape_dyn,\n shape4d(strides, data_format=data_format),\n padding=padding.upper(),\n data_format=data_format)\n conv.set_shape(tf.TensorShape([None] + out_shape3_sta))\n ret = activation(tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv, name='output')\n\n ret.variables = VariableHolder(W=W)\n if use_bias:\n ret.variables.b = b\n return ret", "def __init__(self, input_channel_dim: int, hidden_channel_dim: int = 64):\n\n super().__init__()\n layers = OrderedDict(\n [\n (\n \"conv_1\",\n nn.Sequential(\n nn.Conv2d(\n in_channels=input_channel_dim,\n out_channels=hidden_channel_dim,\n kernel_size=4,\n stride=2,\n ),\n nn.LeakyReLU(0.2, True),\n ),\n ),\n ]\n )\n current_dim = hidden_channel_dim\n for i in range(1, 4):\n prev_dim = current_dim\n current_dim *= 2\n layers[f\"conv_{i+1}\"] = nn.Sequential(\n nn.Conv2d(\n in_channels=prev_dim,\n out_channels=current_dim,\n kernel_size=4,\n stride=2,\n ),\n nn.LeakyReLU(0.2, True),\n )\n layers[\"conv_5\"] = nn.Sequential(\n nn.Conv2d(\n in_channels=current_dim,\n out_channels=1,\n kernel_size=4,\n stride=2,\n )\n )\n self.layers = nn.Sequential(layers)", "def build(self, input_shape: tf.Tensor):\n self.conv = tf.keras.layers.Conv2D(\n self.channels, (1, 1), input_shape=input_shape)\n self.bn = tf.keras.layers.BatchNormalization(momentum=self.momentum)", "def conv1d_transpose(inputs,\n filters,\n kernel_size,\n strides=2,\n padding='same',\n data_format='channels_last',\n activation=None,\n use_bias=True,\n kernel_initializer=None,\n bias_initializer=tf.zeros_initializer(),\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n trainable=True,\n name=None,\n reuse=None):\n\n # expand inputs dimension\n inputs = tf.expand_dims(inputs,axis=1) # (batch_size, 1, N, d)\n\n params = {\"inputs\":inputs, \"filters\":filters, \"kernel_size\":(1,kernel_size),\n \"strides\":(1,strides),\"padding\":padding,\"data_format\":data_format,\n \"activation\":activation,\"use_bias\":use_bias,\n \"kernel_initializer\":kernel_initializer,\"bias_initializer\":bias_initializer,\n \"kernel_regularizer\":kernel_regularizer,\"bias_regularizer\":bias_regularizer,\n \"activity_regularizer\":activity_regularizer,\"kernel_constraint\":kernel_constraint,\n \"bias_constraint\":bias_constraint,\"trainable\":trainable,\"name\":name,\"reuse\":reuse} \n\n conv_out = tf.squeeze(tf.layers.conv2d_transpose(**params),axis=1) # (batch_size, 2N, kernel_size)\n\n return conv_out", "def __init__(self, momentum: float = .5):\n super(VanillaEncoder, self).__init__()\n self.conv1 = PointNetConv2Layer(64, momentum)\n self.conv2 = PointNetConv2Layer(64, momentum)\n self.conv3 = PointNetConv2Layer(64, momentum)\n self.conv4 = PointNetConv2Layer(128, momentum)\n self.conv5 = PointNetConv2Layer(1024, momentum)", "def declaration_conv2d_transpose_impl(data, kernel, strides, padding, out_dtype, output_padding):\n data_pad, kernel_transform = conv2d_transpose_nchw_preprocess(\n data, kernel, strides, padding, out_dtype, output_padding\n )\n batch, in_c, in_h, in_w = data_pad.shape\n out_c, _, filter_h, filter_w = kernel_transform.shape\n\n # convolution stage\n out_c = simplify(out_c)\n\n out_h = simplify(in_h - filter_h + 1)\n out_w = simplify(in_w - filter_w + 1)\n dc = te.reduce_axis((0, in_c), name=\"dc\")\n dh = te.reduce_axis((0, filter_h), name=\"dh\")\n dw = te.reduce_axis((0, filter_w), name=\"dw\")\n\n Output = te.compute(\n (batch, out_c, out_h, out_w),\n lambda b, c, h, w: te.sum(\n data_pad[b, dc, h + dh, w + dw].astype(out_dtype)\n * kernel_transform[c, dc, dh, dw].astype(out_dtype),\n axis=[dc, dh, dw],\n ),\n tag=\"conv2d_transpose_nchw\",\n )\n\n return Output", "def _composite_conv(self, input_data, out_channel, name):\n with tf.variable_scope(name):\n bn_1 = self.layer_bn(input_data=input_data, is_training=self._is_training, name='bn_1')\n\n relu_1 = self.relu(input_data=bn_1, name='relu_1')\n\n if self._with_bc:\n conv_1 = self.conv2d(input_data=relu_1, out_channel=out_channel, kernel_size=1,\n padding='SAME', stride=1, use_bias=False, name='conv_1')\n\n bn_2 = self.layer_bn(input_data=conv_1, is_training=self._is_training, name='bn_2')\n relu_2 = self.relu(input_data=bn_2, name='relu_2')\n conv_2 = self.conv2d(input_data=relu_2, out_channel=out_channel, kernel_size=3,\n padding='SAME', stride=1, use_bias=False, name='conv_2')\n\n else:\n conv_2 = self.conv2d(input_data=relu_1, out_channel=out_channel, kernel_size=3,\n padding='SAME', stride=1, use_bias=False, name='conv_2')\n\n return conv_2", "def resblock(input_tensor, num_channels):\n tensor = Conv2D(filters=num_channels, kernel_size=3, padding='same')(\n input_tensor)\n tensor = Activation('relu')(tensor)\n tensor = Conv2D(filters=num_channels, kernel_size=3, padding='same')(\n tensor)\n tensor = Add()([tensor, input_tensor])\n output_tensor = Activation('relu')(tensor)\n return output_tensor", "def allocate_primal_variables_conv(layer):\n\n assert layer.isconv\n\n gpu_vars = ((layer.l, [1]),\n (layer.k, [1]))\n cpu_vars = ()\n\n heavy_vars = ((layer.W_i, [Cfg.n_batches] + layer.W_shape),\n (layer.b_i, [Cfg.n_batches] + layer.b_shape),\n (layer.l_i, [Cfg.n_batches]))\n\n if Cfg.store_on_gpu:\n gpu_vars += heavy_vars\n else:\n cpu_vars += heavy_vars\n\n gpu_fun = update_gpu_fun(gpu_vars)\n cpu_fun = update_cpu_fun(cpu_vars, layer)\n\n def update_all_fun():\n gpu_fun()\n cpu_fun()\n\n return update_all_fun", "def _conv2d_layer(self, inputs, filters_num, kernel_size, name, use_bias=False, strides=1):\n if strides > 1: # modified 0327\n inputs = tf.pad(inputs, paddings=[[0, 0], [1, 0], [1, 0], [0, 0]], mode='CONSTANT')\n conv = tf.layers.conv2d(inputs=inputs, filters=filters_num,\n kernel_size=kernel_size, strides=[strides, strides],\n padding=('SAME' if strides == 1 else 'VALID'), # padding = 'SAME', #\n use_bias=use_bias,\n name=name) # , kernel_initializer = tf.contrib.layers.xavier_initializer()\n return conv", "def build(self,\r\n conv_filters=196,\r\n conv_size=13,\r\n conv_strides=4,\r\n act='relu',\r\n rnn_layers=2,\r\n LSTM_units=128,\r\n drop_out=0.8):\r\n i = Input(shape=self.input_size, name='input')\r\n x = Conv1D(conv_filters,\r\n conv_size,\r\n strides=conv_strides,\r\n name='conv1d')(i)\r\n x = BatchNormalization()(x)\r\n x = Activation(act)(x)\r\n for _ in range(rnn_layers):\r\n x = Bidirectional(LSTM(LSTM_units,\r\n return_sequences=True))(x)\r\n x = Dropout(drop_out)(x)\r\n x = BatchNormalization()(x)\r\n y_pred = TimeDistributed(Dense(self.output_size,\r\n activation='softmax'))(x)\r\n # ctc inputs\r\n labels = Input(name='the_labels', shape=[None, ], dtype='int32')\r\n input_length = Input(name='input_length', shape=[1], dtype='int32')\r\n label_length = Input(name='label_length', shape=[1], dtype='int32')\r\n # Keras doesn't currently support loss funcs with extra parameters\r\n # so CTC loss is implemented in a lambda layer\r\n loss_out = Lambda(ctc_lambda_func,\r\n output_shape=(1,),\r\n name='ctc')([y_pred,\r\n labels,\r\n input_length,\r\n label_length])\r\n self.tm = Model(inputs=i,\r\n outputs=y_pred)\r\n self.m = Model(inputs=[i,\r\n labels,\r\n input_length,\r\n label_length],\r\n outputs=loss_out)\r\n return self.m, self.tm", "def up_conv_2d(input_tensor, nb_filters, name):\n resize = UpSampling2D(size=(2, 2), interpolation='nearest')(input_tensor)\n paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])\n resize = tf.pad(resize, paddings, \"SYMMETRIC\")\n output_layer = Conv2D(\n filters=nb_filters,\n kernel_size=(3, 3),\n activation='relu',\n name=name)(\n resize)\n\n return output_layer", "def __init__(self,\n in_channels: int,\n out_channels: int,\n kernel_size: Union[int, Sequence[int]],\n stride: Union[int, Sequence[int]] = 1,\n padding: PaddingArgType = PaddingMode.DEFAULT,\n output_padding: Union[int, Sequence[int]] = 0,\n dilation: Union[int, Sequence[int]] = 1,\n resize_at_exit: bool = False,\n use_shortcut: Optional[bool] = None,\n shortcut: Optional[LayerOrLayerFactory] = None,\n conv0: Optional[LayerOrLayerFactory] = None,\n conv1: Optional[LayerOrLayerFactory] = None,\n merge_context0: Optional[Module] = None,\n merge_context1: Optional[Module] = None,\n activation: Optional[LayerFactory] = None,\n normalizer: Optional[NormalizerFactory] = None,\n dropout: Optional[Union[float, LayerOrLayerFactory]] = None,\n weight_norm: WeightNormArgType = False,\n gated: bool = False,\n gate_bias: float = DEFAULT_GATE_BIAS,\n use_bias: Optional[bool] = None,\n weight_init: TensorInitArgType = DEFAULT_WEIGHT_INIT,\n bias_init: TensorInitArgType = DEFAULT_BIAS_INIT,\n data_init: Optional[DataInitArgType] = None,\n device: Optional[str] = None,\n ):\n def use_bias_or_else(default_val: bool):\n if use_bias is None:\n return default_val\n return use_bias\n\n def compile_layer_list(layers: List[Module]) -> Module:\n if len(layers) == 0:\n return Identity()\n elif len(layers) == 1:\n return layers[0]\n else:\n return Sequential(layers)\n\n spatial_ndims = self._get_spatial_ndims()\n is_deconv = self._is_deconv()\n\n # validate arguments\n in_channels = int(in_channels)\n out_channels = int(out_channels)\n\n kernel_size = validate_conv_size('kernel_size', kernel_size, spatial_ndims)\n stride = validate_conv_size('strides', stride, spatial_ndims)\n dilation = validate_conv_size('dilation', dilation, spatial_ndims)\n padding = validate_padding(padding, kernel_size, dilation, spatial_ndims)\n\n if output_padding != 0 and not is_deconv:\n raise ValueError(f'The `output_padding` argument is not allowed '\n f'by {self.__class__.__qualname__}.')\n output_padding = validate_output_padding(\n output_padding, stride, dilation, spatial_ndims)\n\n if conv0 is None:\n conv0 = self._default_conv_factory()\n\n if conv1 is None:\n conv1 = self._default_conv_factory()\n\n orig_merge_context0 = merge_context0\n if merge_context0 is None:\n merge_context0 = IgnoreContext()\n else:\n merge_context0 = validate_layer('merge_context0', merge_context0)\n\n if merge_context1 is None:\n merge_context1 = IgnoreContext()\n else:\n merge_context1 = validate_layer('merge_context1', merge_context1)\n\n if shortcut is not None:\n use_shortcut = True\n if use_shortcut is None:\n use_shortcut = (\n any(s != 1 for s in stride) or\n any(p[0] + p[1] != (k - 1) * d\n for p, k, d in zip(padding, kernel_size, dilation)) or\n in_channels != out_channels)\n\n if activation is not None:\n activation_factory = validate_layer_factory('activation', activation)\n else:\n activation_factory = None\n\n if normalizer is not None:\n normalizer_factory = validate_layer_factory('normalizer', normalizer)\n else:\n normalizer_factory = None\n\n if isinstance(dropout, float):\n dropout = Dropout(p=dropout)\n elif dropout is not None:\n dropout = get_layer_from_layer_or_factory('dropout', dropout)\n\n conv0_weight_norm = weight_norm\n if conv0_weight_norm is True:\n conv0_weight_norm = (\n WeightNormMode.FULL if normalizer is None or dropout is not None\n else WeightNormMode.NO_SCALE\n )\n\n kwargs = {'weight_init': weight_init, 'bias_init': bias_init,\n 'data_init': data_init, 'device': device}\n\n # build the shortcut path\n if use_shortcut:\n if shortcut is None:\n shortcut = self._default_conv_factory()\n if not isinstance(shortcut, Module):\n shortcut = get_layer_from_layer_or_factory(\n 'shortcut', shortcut, kwargs=dict(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n use_bias=use_bias_or_else(gated),\n weight_norm=weight_norm,\n **self._add_output_padding_to_kwargs(output_padding, kwargs)\n )\n )\n else:\n shortcut = Identity()\n\n # prepare the arguments for the residual path\n if resize_at_exit:\n conv0_out_channels = in_channels\n conv0_stride = 1\n conv0_padding = PaddingMode.HALF # such that it can keep the output shape\n conv0_kwargs = kwargs\n conv1_stride = stride\n conv1_padding = padding\n conv1_kwargs = self._add_output_padding_to_kwargs(output_padding, kwargs)\n else:\n conv0_out_channels = out_channels\n conv0_stride = stride\n conv0_padding = padding\n conv0_kwargs = self._add_output_padding_to_kwargs(output_padding, kwargs)\n conv1_stride = 1\n conv1_padding = PaddingMode.HALF # such that it can keep the output shape\n conv1_kwargs = kwargs\n\n conv1_out_channels = out_channels\n if gated:\n conv1_out_channels *= 2\n\n # pre_conv0\n pre_conv0 = []\n if normalizer_factory is not None:\n pre_conv0.append(normalizer_factory(in_channels))\n if activation_factory is not None:\n pre_conv0.append(activation_factory())\n pre_conv0 = compile_layer_list(pre_conv0)\n\n # conv0\n conv0 = get_layer_from_layer_or_factory( # conv0\n 'conv0', conv0, kwargs=dict(\n in_channels=in_channels,\n out_channels=conv0_out_channels,\n kernel_size=kernel_size,\n stride=conv0_stride,\n padding=conv0_padding,\n dilation=dilation,\n use_bias=use_bias_or_else(normalizer_factory is None or\n dropout is not None or\n orig_merge_context0 is not None),\n weight_norm=conv0_weight_norm,\n **conv0_kwargs,\n )\n )\n\n # pre_conv1\n pre_conv1 = []\n if dropout is not None:\n pre_conv1.append(dropout)\n if normalizer_factory is not None:\n pre_conv1.append(normalizer_factory(conv0_out_channels))\n if activation_factory is not None:\n pre_conv1.append(activation_factory())\n pre_conv1 = compile_layer_list(pre_conv1)\n\n # conv1\n conv1 = get_layer_from_layer_or_factory(\n 'conv1', conv1, kwargs=dict(\n in_channels=conv0_out_channels,\n out_channels=conv1_out_channels,\n kernel_size=kernel_size,\n stride=conv1_stride,\n padding=conv1_padding,\n dilation=dilation,\n use_bias=use_bias_or_else(True),\n weight_norm=weight_norm,\n **conv1_kwargs,\n )\n )\n\n # post_conv1\n if gated:\n post_conv1 = Gated(\n feature_axis=-(spatial_ndims + 1),\n num_features=out_channels,\n gate_bias=gate_bias,\n )\n else:\n post_conv1 = Identity()\n\n # construct the layer\n super().__init__()\n self.shortcut = shortcut\n self.pre_conv0 = pre_conv0\n self.merge_context0 = merge_context0\n self.conv0 = conv0\n self.pre_conv1 = pre_conv1\n self.merge_context1 = merge_context1\n self.conv1 = conv1\n self.post_conv1 = post_conv1", "def _make_conv_layers(self):\n conv = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=1), # padding=3 so, output is 224.\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(64, 192, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(192, 128, 1, padding=1), ## kernel size = 1 이므로 padding = 0(defalut)\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(128, 256, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 512, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(1024, 512, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(1024, 512, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True)\n )\n return conv", "def initParams(self):\n sizes = [self.inputDim]+self.layerSizes+[self.outputDim]\n scales = [np.sqrt(6)/np.sqrt(n+m) for n,m in zip(sizes[:-1],sizes[1:])]\n self.stack = [[np.random.rand(m,n)*2*s-s,np.zeros((m,1))] \\\n for n,m,s in zip(sizes[:-1],sizes[1:],scales)]\n self.hActs_M = [cm.empty((s,self.maxBatch)) for s in sizes]\n\n if self.train:\n # Now assuming that all layers are the same size\n self.grad = [[cm.empty(w.shape),cm.empty(b.shape)] for w,b in self.stack]\n self.deltasC_M = cm.empty((self.outputDim,self.maxBatch))\n self.deltasOut_M = cm.empty((sizes[1],self.maxBatch)) \n self.deltasIn_M = cm.empty((sizes[1],self.maxBatch)) \n self.tmpGrad_M = cm.empty((self.layerSize,self.maxBatch))\n \n # Allocate memory once here and reuse\n # Store probs\n self.probs_M = cm.empty((self.outputDim,self.maxBatch))\n # Store col max\n self.rowVec_M = cm.empty((1,self.maxBatch))\n \n self.stack = [[cm.CUDAMatrix(w),cm.CUDAMatrix(b)]\n for w,b in self.stack]\n\n if self.temporalLayer > 0:\n # dummy bias used for temporal layer\n dummy = cm.empty((1,1))\n dummy.assign(0.0)\n\n scale = np.sqrt(6)/np.sqrt(self.layerSize*2)\n wtf = cm.CUDAMatrix(2*scale*np.random.rand(self.layerSize,\n self.layerSize)-scale)\n wtb = cm.CUDAMatrix(2*scale*np.random.rand(self.layerSize,\n self.layerSize)-scale)\n self.stack.append([wtf,dummy])\n self.stack.append([wtb,dummy])\n\n # forward and backward activations for temporal layer\n self.hActsFor_M = cm.empty((self.layerSize,self.maxBatch))\n self.hActsBack_M = cm.empty((self.layerSize,self.maxBatch))\n\n if self.train:\n dwtf = cm.empty(wtf.shape)\n self.grad.append([dwtf,dummy])\n dwtb = cm.empty(wtb.shape)\n self.grad.append([dwtb,dummy])\n\n self.tmpGradBack_M = cm.empty((self.layerSize,self.maxBatch))\n self.deltasFor_M = cm.empty((self.layerSize,self.maxBatch))\n self.deltasBack_M = cm.empty((self.layerSize,self.maxBatch))", "def transition_layer(X, nb_filters, compression):\n\n init = K.initializers.he_normal(seed=None)\n\n bn1 = K.layers.BatchNormalization()(X)\n activation1 = K.layers.Activation('relu')(bn1)\n filters = int(nb_filters * compression)\n conv1 = K.layers.Conv2D(\n filters=filters,\n kernel_size=(1, 1),\n padding='same',\n kernel_initializer=init\n )(activation1)\n\n avgpool = K.layers.AveragePooling2D(\n pool_size=(2, 2),\n strides=(2, 2),\n padding='same',\n )(conv1)\n\n return avgpool, filters", "def test_multiple_transpose_conv2d(self):\n\n tf.compat.v1.reset_default_graph()\n with tf.device('/cpu:0'):\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Conv2DTranspose(1, (4, 4), input_shape=(28, 28, 3)))\n model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n model.add(tf.keras.layers.Conv2DTranspose(1, (4, 4), input_shape=(28, 28, 3)))\n model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n model.summary()\n\n conn_graph = ConnectedGraph(tf.compat.v1.get_default_graph(), [model.input.op.name], [model.output.op.name])\n self.assertEqual(conn_graph.get_all_ops()['conv2d_transpose/conv2d_transpose'].type, 'Conv2DTranspose')\n self.assertEqual(conn_graph.get_all_ops()['conv2d_transpose_1/conv2d_transpose'].type, 'Conv2DTranspose')", "def test_on_conv_transpose_2d_padding(self):\n x = jn.array([[[[2., 1., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [13., 14., 15., 16.]]]])\n y = jn.array([[[[2., 5., 5., 10.], [11., 27., 32., 46.], [24., 66., 76., 86.], [40., 106., 116., 126.]]]])\n w_init = lambda s: jn.array([[[[1., 2.], [3., 4.]]]]).transpose((2, 3, 0, 1))\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=objax.ConvPadding.SAME, w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding='same', w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding='Same', w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding='SAME', w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=(1, 0), w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=[(1, 0), (1, 0)], w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n y = [[[[2., 5., 5., 10., 8.], [11., 27., 32., 46., 32.], [24., 66., 76., 86., 56.],\n [40., 106., 116., 126., 80.], [39., 94., 101., 108., 64.]]]]\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=1, w_init=w_init)\n self.assertEqual(conv(x).tolist(), y)\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=(1, 1), w_init=w_init)\n self.assertEqual(conv(x).tolist(), y)\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=[(1, 1), (1, 1)], w_init=w_init)\n self.assertEqual(conv(x).tolist(), y)", "def create_cnn(num_half_rows, num_half_columns, num_channels):\n\n error_checking.assert_is_integer(num_half_rows)\n error_checking.assert_is_integer(num_half_columns)\n error_checking.assert_is_integer(num_channels)\n\n error_checking.assert_is_greater(num_half_rows, 0)\n error_checking.assert_is_greater(num_half_columns, 0)\n error_checking.assert_is_greater(num_channels, 0)\n\n regularizer_object = keras.regularizers.l1_l2(l1=L1_WEIGHT, l2=L2_WEIGHT)\n\n num_grid_rows = 2 * num_half_rows + 1\n num_grid_columns = 2 * num_half_columns + 1\n input_layer_object = keras.layers.Input(\n shape=(num_grid_rows, num_grid_columns, num_channels)\n )\n\n current_num_filters = None\n current_layer_object = None\n\n # Add convolutional layers.\n for _ in range(NUM_CONV_LAYER_SETS):\n for _ in range(NUM_CONV_LAYERS_PER_SET):\n\n if current_num_filters is None:\n current_num_filters = (\n num_channels * NUM_CHANNELS_TO_FIRST_NUM_FILTERS)\n this_input_layer_object = input_layer_object\n\n else:\n current_num_filters *= 2\n this_input_layer_object = current_layer_object\n\n current_layer_object = keras.layers.Conv2D(\n filters=current_num_filters,\n kernel_size=(NUM_CONV_FILTER_ROWS, NUM_CONV_FILTER_COLUMNS),\n strides=(1, 1), padding='valid', data_format='channels_last',\n dilation_rate=(1, 1), activation=None, use_bias=True,\n kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=regularizer_object\n )(this_input_layer_object)\n\n current_layer_object = keras.layers.LeakyReLU(\n alpha=SLOPE_FOR_RELU\n )(current_layer_object)\n\n if CONV_LAYER_DROPOUT_FRACTION is not None:\n current_layer_object = keras.layers.Dropout(\n rate=CONV_LAYER_DROPOUT_FRACTION\n )(current_layer_object)\n\n if USE_BATCH_NORMALIZATION:\n current_layer_object = keras.layers.BatchNormalization(\n axis=-1, center=True, scale=True\n )(current_layer_object)\n\n current_layer_object = keras.layers.MaxPooling2D(\n pool_size=(NUM_POOLING_ROWS, NUM_POOLING_COLUMNS),\n strides=(NUM_POOLING_ROWS, NUM_POOLING_COLUMNS),\n padding='valid', data_format='channels_last'\n )(current_layer_object)\n\n these_dimensions = numpy.array(\n current_layer_object.get_shape().as_list()[1:], dtype=int)\n num_features = numpy.prod(these_dimensions)\n\n current_layer_object = keras.layers.Flatten()(current_layer_object)\n\n # Add intermediate dense layers.\n _, num_outputs_by_dense_layer = (\n architecture_utils.get_dense_layer_dimensions(\n num_input_units=num_features, num_classes=NUM_CLASSES,\n num_dense_layers=NUM_DENSE_LAYERS)\n )\n\n for k in range(NUM_DENSE_LAYERS - 1):\n current_layer_object = keras.layers.Dense(\n num_outputs_by_dense_layer[k], activation=None, use_bias=True,\n kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=regularizer_object\n )(current_layer_object)\n\n current_layer_object = keras.layers.LeakyReLU(\n alpha=SLOPE_FOR_RELU\n )(current_layer_object)\n\n if DENSE_LAYER_DROPOUT_FRACTION is not None:\n current_layer_object = keras.layers.Dropout(\n rate=DENSE_LAYER_DROPOUT_FRACTION\n )(current_layer_object)\n\n if USE_BATCH_NORMALIZATION:\n current_layer_object = keras.layers.BatchNormalization(\n axis=-1, center=True, scale=True\n )(current_layer_object)\n\n # Add output layer (also dense).\n current_layer_object = keras.layers.Dense(\n NUM_CLASSES, activation=None, use_bias=True,\n kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=regularizer_object\n )(current_layer_object)\n\n current_layer_object = keras.layers.Activation(\n 'softmax'\n )(current_layer_object)\n\n if DENSE_LAYER_DROPOUT_FRACTION is not None and NUM_DENSE_LAYERS == 1:\n current_layer_object = keras.layers.Dropout(\n rate=DENSE_LAYER_DROPOUT_FRACTION\n )(current_layer_object)\n\n # Put the whole thing together and compile.\n cnn_model_object = keras.models.Model(\n inputs=input_layer_object, outputs=current_layer_object)\n cnn_model_object.compile(\n loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adam(),\n metrics=LIST_OF_METRIC_FUNCTIONS)\n\n cnn_model_object.summary()\n return cnn_model_object", "def TCN_V3(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 128\n\n config = [ \n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(2,8,256)],\n [(1,8,256)],\n [(1,8,256)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n #dr = Dropout(dropout)(conv)\n\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def __init__(self, dim, dim_head = 64, heads = 8, window_size = 16, skip_type = 'ConcatCross'):\n super().__init__()\n self.scale = dim_head ** -0.5\n self.heads = heads\n self.window_size = window_size\n self.skip_type = skip_type\n inner_dim = dim_head * heads\n\n self.to_q = nn.Conv3d(dim, inner_dim, 1, bias = False)\n self.to_kv = nn.Conv3d(dim, inner_dim * 2, 1, bias = False)\n self.to_out = nn.Conv3d(inner_dim, dim, 1)", "def convolutional(input_layer, filters_shape, downsample=False, activate=True, bn=True):\n if downsample:\n input_layer = ZeroPadding2D(((1, 0), (1, 0)))(input_layer)\n padding = 'valid'\n strides = 2\n else:\n strides = 1\n padding = 'same'\n\n conv = Conv2D(filters=filters_shape[-1], kernel_size=filters_shape[0], strides=strides,\n padding=padding, use_bias=not bn, kernel_regularizer=l2(0.0005),\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n bias_initializer=tf.constant_initializer(0.))(input_layer)\n if bn:\n conv = BatchNormalization()(conv)\n if activate == True:\n conv = LeakyReLU(alpha=0.1)(conv)\n\n return conv", "def create_conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "def resblock(input_tensor, num_channels):\r\n step_1_conv = Conv2D(num_channels, (3, 3), padding='same')(input_tensor)\r\n step_2_relu = Activation('relu')(step_1_conv)\r\n step_3_conv = Conv2D(num_channels, (3, 3), padding='same')(step_2_relu)\r\n output = Add()([input_tensor, step_3_conv])\r\n return Activation('relu')(output)", "def _transition_layer(self, input_data, name):\n input_channels = input_data.get_shape().as_list()[3]\n\n with tf.variable_scope(name):\n # First batch norm\n bn = self.layer_bn(input_data=input_data, is_training=self._is_training, name='bn')\n\n # Second 1 X 1 conv\n if self._with_bc:\n out_channels = int(input_channels * self._bc_theta)\n conv = self.conv2d(input_data=bn, out_channel=out_channels, kernel_size=1,\n stride=1, use_bias=False, name='conv')\n # Third average pooling\n avgpool_out = self.avg_pooling(input_data=conv, kernel_size=2, stride=2, name='avgpool')\n else:\n conv = self.conv2d(input_data=bn, out_channel=input_channels, kernel_size=1,\n stride=1, use_bias=False, name='conv')\n # Third average pooling\n avgpool_out = self.avg_pooling(input_data=conv, kernel_size=2, stride=2, name='avgpool')\n\n return avgpool_out", "def deconv2d_bn_act(inputs, filters, kernel_size, kernel_init, activation, strides, padding=\"SAME\"):\n _tmp = tf.layers.conv2d_transpose(inputs=inputs, filters=filters, kernel_size=kernel_size,\n kernel_initializer=kernel_init, activation=None, strides=strides, padding=padding)\n _tmp = tf.contrib.layers.batch_norm(_tmp, center=True, scale=True, is_training=phase)\n _tmp = activation(_tmp)\n\n return _tmp", "def Conv2DTranspose_BN(x, filters, kernel_size, strides=(1,1), padding='same', activation='relu', kernel_initializer='he_normal', kernel_regularizer=None):\n\n x = Conv2DTranspose(filters, kernel_size=kernel_size, strides=strides, padding=padding, kernel_regularizer=kernel_regularizer)(x)\n x = BatchNormalization()(x)\n x = Activation(activation)(x)\n return x", "def TCN_V2(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 64\n\n config = [ \n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(2,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n #dr = Dropout(dropout)(conv)\n\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def contract(self, depth, kernel_cc_weight=0.0):\n\n def composite_layer(x):\n name = 'contr_%d' % depth\n num_filters = self.base_num_filters * 2 ** depth\n x = self.conv_depth(num_filters=num_filters, name=name, kernel_cc_weight=kernel_cc_weight)(x)\n self.contr_tensors[depth] = x\n x = MaxPoolingND(x)(x)\n return x\n\n return composite_layer", "def __init__(self, in_dim, activation):\r\n super(Self_Attn, self).__init__()\r\n self.input_channel = in_dim\r\n self.activation = activation\r\n self.k = 8\r\n self.query = nn.Conv2d(self.input_channel, self.input_channel // self.k, kernel_size=1)\r\n self.key = nn.Conv2d(self.input_channel, self.input_channel // self.k, kernel_size=1)\r\n self.value = nn.Conv2d(self.input_channel, self.input_channel, kernel_size=1)\r\n self.h = nn.Conv2d(self.input_channel, self.input_channel, kernel_size=1)\r\n self.gamma = nn.Parameter(torch.zeros(1), requires_grad=True)\r\n self.softmax = nn.Softmax(dim=-1)", "def _conv_block(x, filters, activation=True):\n h = tf.keras.layers.Conv2D(\n filters,\n kernel_size=[3, 3],\n kernel_initializer='he_normal',\n bias_initializer='zeros',\n strides=[1, 1],\n padding='same',\n use_bias=True)(\n x)\n if activation:\n h = tf.keras.layers.LeakyReLU(0.2)(h)\n return h", "def __init__(self, in_size, out_size, kernel_size=3, stride=2, padding=1, output_padding=1):\n super().__init__()\n ConvTransBlockList = nn.ModuleList()\n ConvTransBlockList.append(nn.ConvTranspose2d(in_size, out_size,\n kernel_size=kernel_size, stride=stride,\n padding=padding, output_padding=output_padding,\n bias=False)\n )\n ConvTransBlockList.append(nn.InstanceNorm2d(out_size))\n ConvTransBlockList.append(nn.ReLU())\n self.model = nn.Sequential(*ConvTransBlockList)", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * self.cls_out_channels,\n 3,\n padding=1)\n self.atss_reg = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n self.atss_iou = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 1, 3, padding=1)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])\n\n # we use the global list in loss\n self.cls_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]\n self.reg_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]", "def conv3x3(in_channels, out_channels, stride=1, \n padding=1, bias=True, groups=1):\n return nn.Conv2d(\n in_channels, \n out_channels, \n kernel_size=3, \n stride=stride,\n padding=padding,\n bias=bias,\n groups=groups)", "def __init__(self, indim, outdim, ksize=3, stride=1, activation=nn.ReLU):\n\n # Run initialization for super class\n super(ConvBlock, self).__init__()\n\n # Check ksize, stride requirements\n assert (ksize % 2) == 1\n assert stride == 1\n assert indim == outdim\n\n # Store proper activation function depending on configuration\n self.activ = activation\n\n # Compute padding according to `ksize`. Make sure\n # that this will not cause image width and height to change.\n padding = ksize // 2\n\n # We will follow the architecture in slide 76 of lecture 21, but with\n # our `_conv` function as our conv ``block''. We'll also use\n # nn.Sequential() and its `add_module' function. Note that the 64 and\n # 256 in that slide are just examples, and you should instead use indim\n # and outdim.\n #\n # Also note that we are creating these layers with support for\n # different `ksize`, `stride`, `padding`, unlike previous assignment.\n self.layers = nn.Sequential()\n self.layers.add_module(\"conv_1\", self._conv(indim, indim, 1, 1, 0))\n self.layers.add_module(\"conv_2\", self._conv(\n indim, indim, ksize, 1, padding))\n self.layers.add_module(\"conv_3\", self._conv(indim, outdim, 1, 1, 0))", "def conv_3d(self):\n # Model.\n model = Sequential()\n model.add(Conv3D(32, (3,3,3), activation='relu', input_shape=self.input_shape))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))\n model.add(Conv3D(64, (3,3,3), activation='relu'))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))\n model.add(Conv3D(128, (3,3,3), activation='relu'))\n model.add(Conv3D(128, (3,3,3), activation='relu'))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))\n model.add(Conv3D(256, (2,2,2), activation='relu'))\n model.add(Conv3D(256, (2,2,2), activation='relu'))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))\n\n model.add(Flatten())\n model.add(Dense(1024))\n model.add(Dropout(0.5))\n model.add(Dense(1024))\n model.add(Dropout(0.5))\n model.add(Dense(self.nb_classes, activation='softmax'))\n\n return model", "def create(self) -> None:\n if self.torch is not None: # type: ignore\n return\n self.torch = torch.nn.Conv2d(**self.args.arg_values)", "def _init_predictor(self):\n self.conv_cls_prev = self._init_branch(\n conv_channels=self.cls_branch,\n conv_strides=(1, ) * len(self.cls_branch))\n self.conv_cls = nn.Conv2d(self.cls_branch[-1], self.cls_out_channels,\n 1)\n # init regression head\n self.conv_reg_prevs = nn.ModuleList()\n # init output head\n self.conv_regs = nn.ModuleList()\n # group_reg_dims:\n # ((4, ), (2, ), (20, ), (3, ), (3, ), (8, 8), (1, ), (1, ))\n for i in range(len(self.group_reg_dims)):\n reg_dims = self.group_reg_dims[i]\n reg_branch_channels = self.reg_branch[i]\n out_channel = self.out_channels[i]\n reg_list = nn.ModuleList()\n if len(reg_branch_channels) > 0:\n self.conv_reg_prevs.append(\n self._init_branch(\n conv_channels=reg_branch_channels,\n conv_strides=(1, ) * len(reg_branch_channels)))\n for reg_dim in reg_dims:\n reg_list.append(nn.Conv2d(out_channel, reg_dim, 1))\n self.conv_regs.append(reg_list)\n else:\n self.conv_reg_prevs.append(None)\n for reg_dim in reg_dims:\n reg_list.append(nn.Conv2d(self.feat_channels, reg_dim, 1))\n self.conv_regs.append(reg_list)", "def conv_mpool_activation(scope, input_layer, n_channels=2, mpool=False, kernel_conv=(5, 5), \n stride_conv=(1, 1), kernel_pool=(2, 2), stride_pool=(2, 2), activation=tf.nn.relu):\n with tf.variable_scope(scope):\n # infer input_nchannels\n inp_channels = input_layer.shape.as_list()[-1]\n\n # define var for conv-filter\n filter_shape = tuple(kernel_conv) + (inp_channels, ) + (n_channels, )\n filter_weights = tf.Variable(tf.truncated_normal(shape=filter_shape, stddev=0.01), name='weights')\n\n # bias\n bias = tf.Variable(tf.zeros(shape=[n_channels]), name='bias')\n\n # apply the filter\n strides = (1, ) + tuple(stride_conv) + (1, )\n output = tf.nn.conv2d(input=input_layer, filter=filter_weights, strides=strides, padding='SAME')\n\n # bias\n output = output + bias\n\n # apply mpooling if needed\n if mpool:\n ksize = (1, ) + tuple(kernel_pool) + (1, )\n strides = (1, ) + tuple(stride_pool) + (1, )\n output = tf.nn.max_pool(output, ksize=ksize, strides=strides, padding='SAME')\n\n return tf.identity(activation(output), name='output')", "def c3d(self):\n model = Sequential()\n # 1st layer group\n model.add(Conv3D(64, 3, 3, 3, activation='relu',\n border_mode='same', name='conv1',\n subsample=(1, 1, 1),\n input_shape=self.input_shape))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),\n border_mode='valid', name='pool1'))\n # 2nd layer group\n model.add(Conv3D(128, 3, 3, 3, activation='relu',\n border_mode='same', name='conv2',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='valid', name='pool2'))\n # 3rd layer group\n model.add(Conv3D(256, 3, 3, 3, activation='relu',\n border_mode='same', name='conv3a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(256, 3, 3, 3, activation='relu',\n border_mode='same', name='conv3b',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='valid', name='pool3'))\n # 4th layer group\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv4a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv4b',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='valid', name='pool4'))\n\n # 5th layer group\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv5a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv5b',\n subsample=(1, 1, 1)))\n model.add(ZeroPadding3D(padding=(0, 1, 1)))\n # model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n # border_mode='valid', name='pool5', dim_ordering=\"tf\"))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='same', name='pool5', dim_ordering=\"tf\"))\n model.add(Flatten())\n\n # FC layers group\n model.add(Dense(4096, activation='relu', name='fc6'))\n model.add(Dropout(0.5))\n model.add(Dense(4096, activation='relu', name='fc7'))\n model.add(Dropout(0.5))\n model.add(Dense(self.nb_classes, activation='softmax'))\n for layer in model.layers:\n print(layer.output_shape)\n return model", "def setUp(self):\n\n super(Conv2DTransposeTest, self).setUp()\n\n self.batch_size = 100\n self.in_height = 32\n self.in_width = 32\n self.in_channels = 3\n self.out_channels = 10\n self.kernel_shape_h = 5\n self.kernel_shape_w = 5\n self.strides = (1, 1, 1, 1)\n self.padding = snt.SAME\n\n self.in_shape = (self.batch_size, self.in_height, self.in_width,\n self.in_channels)\n\n self.out_shape = (self.in_height, self.in_width)\n\n self.kernel_shape = (self.kernel_shape_h, self.kernel_shape_w)\n\n self.kernel_shape2 = (self.kernel_shape_h, self.kernel_shape_w,\n self.out_channels, self.in_channels)", "def TCN_V1(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 64\n\n config = [ \n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(2,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(2,8,256)],\n [(1,8,256)],\n [(1,8,256)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def transition_up(self, x, filters, name):\n with tf.name_scope(name):\n x = tf.layers.conv2d_transpose(x,\n filters=filters,\n kernel_size=[3, 3],\n strides=[2, 2],\n padding='SAME',\n activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name=name+'_trans_conv3x3')\n\n return x", "def preprocess_module(mod):\n\n def alter_conv(attrs, inputs, tinfos, out_type):\n new_attrs = dict(attrs)\n data_info = tinfos[0]\n weight_info = tinfos[1]\n (desired_data_layout, desired_kernel_layout) = (\"NCHW\", \"OIHW\")\n new_attrs[\"data_layout\"] = desired_data_layout\n new_attrs[\"kernel_layout\"] = desired_kernel_layout\n\n if is_depthwise_conv2d(\n data_info.shape,\n attrs[\"data_layout\"],\n weight_info.shape,\n attrs[\"kernel_layout\"],\n attrs[\"groups\"],\n ):\n dkl = desired_kernel_layout\n new_attrs[\"kernel_layout\"] = dkl[1] + dkl[0] + dkl[2] + dkl[3]\n return relay.nn.conv2d(*inputs, **new_attrs)\n\n with OpAttrContext(\"nn.conv2d\", \"FTVMAlterOpLayout\", alter_conv):\n seq = tvm.transform.Sequential(\n [\n transform.ConvertLayout({\"nn.conv2d\": [\"NCHW\", \"OIHW\"]}),\n transform.ConvertLayout({\"nn.conv2d_transpose\": [\"NCHW\", \"OIHW\"]}),\n transform.AlterOpLayout(),\n transform.FoldConstant(),\n ]\n )\n with tvm.transform.PassContext(opt_level=3):\n preprocessed_mod = seq(mod)\n return preprocessed_mod", "def _get_model(\n shape,\n kernel_h,\n kernel_w,\n input_zp,\n input_sc,\n kernel_zp,\n kernel_sc,\n output_zp,\n output_sc,\n stride,\n dilation,\n groups,\n kernel_layout,\n dtype,\n out_channels,\n bias,\n):\n a = relay.var(\"a\", shape=shape, dtype=dtype)\n p = tei.get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w), dilation, stride)\n weight_shape = (shape[3], out_channels // groups, kernel_h, kernel_w)\n\n weight_data = tvm.nd.array(\n np.random.randint(\n np.iinfo(dtype).min,\n high=(np.iinfo(dtype).max + 1),\n size=weight_shape,\n dtype=dtype,\n )\n )\n weights = relay.const(weight_data, dtype)\n op = relay.qnn.op.conv2d_transpose(\n a,\n weights,\n input_zero_point=relay.const(input_zp, \"int32\"),\n input_scale=relay.const(input_sc, \"float32\"),\n kernel_zero_point=relay.const(kernel_zp, \"int32\"),\n kernel_scale=relay.const(kernel_sc, \"float32\"),\n kernel_size=(kernel_h, kernel_w),\n padding=p,\n strides=stride,\n dilation=dilation,\n data_layout=\"NHWC\",\n kernel_layout=kernel_layout,\n out_dtype=\"int32\",\n channels=out_channels,\n groups=groups,\n )\n if bias:\n bias_data = tvm.nd.array(\n np.random.randint(\n np.iinfo(dtype).min,\n high=np.iinfo(dtype).max + 1,\n size=(out_channels,),\n dtype=\"int32\",\n )\n )\n biasc = relay.const(bias_data, \"int32\")\n op = relay.nn.bias_add(op, biasc, axis=3)\n\n if isinstance(kernel_sc, tvm.runtime.ndarray.NDArray):\n req_input_sc = [sc * input_sc for sc in kernel_sc.numpy()]\n else:\n req_input_sc = input_sc * kernel_sc\n\n op = relay.qnn.op.requantize(\n op,\n input_zero_point=relay.const(input_zp, \"int32\"),\n input_scale=relay.const(req_input_sc, \"float32\"),\n output_zero_point=relay.const(output_zp, \"int32\"),\n output_scale=relay.const(output_sc, \"float32\"),\n axis=3,\n rounding=\"UPWARD\",\n out_dtype=dtype,\n )\n params = {\"w\": weight_data}\n if bias:\n params[\"b\"] = bias_data\n return op, params", "def ConvBlock(tensor, nb_filters, kernel_size=3, padding='same', initializer='he_normal', activation=\"relu\", regularization=None):\n\n\n x = Conv2D(filters=nb_filters, kernel_size=kernel_size, padding=padding, kernel_initializer=initializer, kernel_regularizer=regularization)(tensor)\n x = BatchNormalization()(x)\n x = Activation(activation)(x)\n x = Conv2D(filters=nb_filters, kernel_size=kernel_size, padding=padding, kernel_initializer=initializer, kernel_regularizer=regularization)(x)\n x = BatchNormalization()(x)\n x = Activation(activation)(x)\n return x", "def create_conv2d(self, x, w, b, stride = 1, name = None):\n x = tf.nn.conv2d(x, w, strides=[1, stride, stride, 1], padding='VALID', name = name)\n x = tf.nn.bias_add(x, b)\n return tf.nn.relu(x)", "def __init__(self):\n # def __init__(self, embed_size, input_channels):\n super(OffsetCNN, self).__init__()\n self.conv0 = nn.Conv3d(1, 1, (1, 1, 1), padding=(0,0,0))\n self.bn0 = nn.BatchNorm3d(1)\n self.lrelu = nn.LeakyReLU()", "def tfconv2d_transpose(in_channels,\n out_channels,\n kernel_size,\n stride=1,\n output_padding = 0,\n groups=1,\n bias=True,\n dilation=1,\n tf_padding_type = None):\n modules = []\n if tf_padding_type == 'same':\n padding = nn.ZeroPad2d(0)\n hook = hook_factory_tf_inverse_padding_same(kernel_size, stride)\n padding.register_forward_pre_hook(hook)\n modules.append(padding)\n\n # eliminate the effect of the in-build padding (is not capable of asymmeric padding)\n if isinstance(kernel_size, int):\n padding = kernel_size - 1\n else:\n padding = (kernel_size[0] - 1, kernel_size[1] - 1)\n\n modules.append(nn.ConvTranspose2d(in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n output_padding,\n groups,\n bias,\n dilation))\n\n return nn.Sequential(*modules)", "def _build_conv_layer_params(self, input_shape):\n conv_layer_params = []\n if self._conv_type == '3d':\n conv_layer_params.append(\n dict(\n filters=self._filters,\n kernel_size=[self._kernel_size] * 3,\n strides=self._strides,\n dilation_rate=self._rates,\n kernel_initializer=tf_utils.clone_initializer(\n self._kernel_initializer),\n ))\n elif self._conv_type == '2d':\n conv_layer_params.append(\n dict(\n filters=self._filters,\n kernel_size=[1, self._kernel_size, self._kernel_size],\n strides=[1, self._strides[1], self._strides[2]],\n dilation_rate=[1, self._rates[1], self._rates[2]],\n kernel_initializer=tf_utils.clone_initializer(\n self._kernel_initializer),\n ))\n elif self._conv_type == '1+2d':\n channels_in = input_shape[self._channel_axis]\n conv_layer_params.append(\n dict(\n filters=channels_in,\n kernel_size=[self._kernel_size, 1, 1],\n strides=[self._strides[0], 1, 1],\n dilation_rate=[self._rates[0], 1, 1],\n kernel_initializer=tf_utils.clone_initializer(\n self._temporal_conv_initializer),\n ))\n conv_layer_params.append(\n dict(\n filters=self._filters,\n kernel_size=[1, self._kernel_size, self._kernel_size],\n strides=[1, self._strides[1], self._strides[2]],\n dilation_rate=[1, self._rates[1], self._rates[2]],\n kernel_initializer=tf_utils.clone_initializer(\n self._kernel_initializer),\n ))\n elif self._conv_type == '2+1d':\n conv_layer_params.append(\n dict(\n filters=self._filters,\n kernel_size=[1, self._kernel_size, self._kernel_size],\n strides=[1, self._strides[1], self._strides[2]],\n dilation_rate=[1, self._rates[1], self._rates[2]],\n kernel_initializer=tf_utils.clone_initializer(\n self._kernel_initializer),\n ))\n conv_layer_params.append(\n dict(\n filters=self._filters,\n kernel_size=[self._kernel_size, 1, 1],\n strides=[self._strides[0], 1, 1],\n dilation_rate=[self._rates[0], 1, 1],\n kernel_initializer=tf_utils.clone_initializer(\n self._temporal_conv_initializer),\n ))\n elif self._conv_type == '1+1+1d':\n conv_layer_params.append(\n dict(\n filters=self._filters,\n kernel_size=[1, 1, self._kernel_size],\n strides=[1, 1, self._strides[2]],\n dilation_rate=[1, 1, self._rates[2]],\n kernel_initializer=tf_utils.clone_initializer(\n self._kernel_initializer),\n ))\n conv_layer_params.append(\n dict(\n filters=self._filters,\n kernel_size=[1, self._kernel_size, 1],\n strides=[1, self._strides[1], 1],\n dilation_rate=[1, self._rates[1], 1],\n kernel_initializer=tf_utils.clone_initializer(\n self._kernel_initializer),\n ))\n conv_layer_params.append(\n dict(\n filters=self._filters,\n kernel_size=[self._kernel_size, 1, 1],\n strides=[self._strides[0], 1, 1],\n dilation_rate=[self._rates[0], 1, 1],\n kernel_initializer=tf_utils.clone_initializer(\n self._kernel_initializer),\n ))\n else:\n raise ValueError('Unsupported conv_type: {}'.format(self._conv_type))\n return conv_layer_params", "def TBLCCCNN_Model(pan_image_height_size, pan_image_width_size, ms_to_pan_ratio, n_bands, n1_pan, n2_pan, n3_pan, \r\n n1_ms, n2_ms, n3_ms, dropout_rate, n_classes, l_r):\r\n \r\n if (pan_image_height_size % ms_to_pan_ratio) != 0 or (pan_image_width_size % ms_to_pan_ratio) != 0:\r\n raise ValueError('Please make sure that both pan_image_height_size and pan_image_width_size can be divided by {}'.format(int(ms_to_pan_ratio)))\r\n \r\n pan_img_input = Input(shape = (pan_image_height_size, pan_image_width_size, 1))\r\n conv_1_pan = Conv2D(n1_pan, (7, 7), padding = 'same', activation = 'relu')(pan_img_input)\r\n max_pool_1_pan = MaxPooling2D(pool_size = (2, 2))(conv_1_pan)\r\n conv_2_pan = Conv2D(n2_pan, (3, 3), padding = 'same', activation = 'relu')(max_pool_1_pan)\r\n max_pool_2_pan = MaxPooling2D(pool_size = (2, 2))(conv_2_pan)\r\n conv_3_pan = Conv2D(n3_pan, (3, 3), padding = 'same', activation = 'relu')(max_pool_2_pan)\r\n glob_max_pool_pan = GlobalMaxPooling2D()(conv_3_pan)\r\n glob_max_pool_pan = Dropout(dropout_rate)(glob_max_pool_pan)\r\n \r\n ms_img_input = Input(shape = (int(pan_image_height_size / ms_to_pan_ratio), int(pan_image_width_size / ms_to_pan_ratio), \r\n n_bands))\r\n conv_1_ms = Conv2D(n1_ms, (3, 3), padding = 'same', activation = 'relu')(ms_img_input)\r\n conv_2_ms = Conv2D(n2_ms, (3, 3), padding = 'same', activation = 'relu')(conv_1_ms)\r\n conv_3_ms = Conv2D(n3_ms, (3, 3), padding = 'same', activation = 'relu')(conv_2_ms)\r\n glob_max_pool_ms = GlobalMaxPooling2D()(conv_3_ms)\r\n glob_max_pool_ms = Dropout(dropout_rate)(glob_max_pool_ms)\r\n \r\n all_features = concatenate([glob_max_pool_pan, glob_max_pool_ms])\r\n \r\n pred_layer = Dense(n_classes, activation = 'softmax')(all_features)\r\n \r\n tblcccnn_model = Model(inputs = [ms_img_input, pan_img_input], outputs = pred_layer)\r\n tblcccnn_model.compile(loss = 'categorical_crossentropy', optimizer = Adam(lr = l_r), \r\n metrics = ['categorical_crossentropy'])\r\n \r\n return tblcccnn_model", "def __init__(self, channels, momentum):\n super(PointNetConv2Layer, self).__init__()\n self.channels = channels\n self.momentum = momentum", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.retina_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n 3,\n padding=1)\n self.retina_reg = nn.Conv2d(\n self.feat_channels, self.num_anchors * 4, 3, padding=1)", "def __init__(self, in_channels, num_classes):\n super(InceptionAux, self).__init__()\n self.relu = nn.LeakyReLU()\n self.dropout1 = nn.Dropout(p=0.5)\n self.pool = nn.AvgPool2d(kernel_size=5, stride=3)\n self.conv = conv_block(in_channels, 128, kernel_size=1)\n self.fc1 = nn.Linear(12800, 1024)\n self.fc2 = nn.Linear(1024, 420)\n self.dropout2 = nn.Dropout(p=0.4)\n self.fc3 = nn.Linear(420,128)\n self.dropout3 = nn.Dropout(p=0.4)\n self.fc4 = nn.Linear(128,num_classes)", "def __init__(self, dropout=0, input_dim=(3, 32, 32), num_filters=32, filter_size=7,\r\n hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0, \r\n use_batch_norm=False, dtype=np.float32):\r\n self.use_dropout = dropout > 0\r\n self.use_batch_norm = use_batch_norm\r\n self.params = {}\r\n self.reg = reg\r\n self.num_layers = 3\r\n self.dtype = dtype\r\n self.pool_height = 2\r\n self.pool_width = 2\r\n self.pool_stride = 2\r\n\r\n ############################################################################\r\n # TODO: Initialize weights and biases for the three-layer convolutional #\r\n # network. Weights should be initialized from a Gaussian with standard #\r\n # deviation equal to weight_scale; biases should be initialized to zero. #\r\n # All weights and biases should be stored in the dictionary self.params. #\r\n # Store weights and biases for the convolutional layer using the keys 'W1' #\r\n # and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the #\r\n # hidden affine layer, and keys 'W3' and 'b3' for the weights and biases #\r\n # of the output affine layer. #\r\n ############################################################################\r\n # NUmber of channels\r\n C, H, W = input_dim\r\n self.params['W1'] = np.random.randn(num_filters, C, filter_size, filter_size) * weight_scale\r\n self.params['b1'] = np.zeros(num_filters)\r\n H_pool = (H - self.pool_height) / 2 + 1\r\n W_pool = (W - self.pool_width) / 2 + 1\r\n self.params['W2'] = np.random.randn(np.prod((num_filters, H_pool, W_pool)), hidden_dim) * weight_scale\r\n self.params['b2'] = np.zeros(hidden_dim)\r\n self.params['W3'] = np.random.randn(hidden_dim, num_classes) * weight_scale\r\n self.params['b3'] = np.zeros(num_classes)\r\n\r\n # Initialize the parameters for batch normalization if necessary\r\n if self.use_batch_norm:\r\n self.params['gamma1'] = np.ones(num_filters) \r\n self.params['beta1'] = np.zeros(num_filters)\r\n self.params['gamma2'] = np.ones(hidden_dim)\r\n self.params['beta2'] = np.zeros(hidden_dim)\r\n\r\n # Set dropout parameters if necessary\r\n self.dropout_param={}\r\n if self.use_dropout:\r\n self.dropout_param ={'mode':'train', 'p':dropout}\r\n\r\n self.bn_params = []\r\n if self.use_batch_norm:\r\n self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]\r\n\r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n for k, v in self.params.items():\r\n self.params[k] = v.astype(dtype)", "def __init__(self):\n\n super(ConvModule, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=[1, 2])\n self.conv1_bn = nn.BatchNorm2d(64)\n self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=[1, 2])\n self.conv2_bn = nn.BatchNorm2d(128)\n self.pool1 = nn.MaxPool2d(kernel_size=4, stride=2)\n self.dropout0 = nn.Dropout(p=0.4)\n\n self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=[1, 2])\n self.conv3_bn = nn.BatchNorm2d(256)\n self.conv4 = nn.Conv2d(in_channels=256, out_channels=64, kernel_size=3, stride=[1, 2])\n self.conv4_bn = nn.BatchNorm2d(64)\n self.pool2 = nn.MaxPool2d(kernel_size=4, stride=2)\n #\n # self.conv5 = nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=[1, 2])\n # self.conv5_bn = nn.BatchNorm2d(64)\n # self.pool3 = nn.MaxPool2d(kernel_size=3, stride=[1, 2])", "def conv_layer( x, params, training ):\n\n batch_norm = params[4] # Boolean\n\n if batch_norm:\n activation = None\n else:\n activation = tf.nn.relu\n\n kernel_initializer = tf.contrib.layers.variance_scaling_initializer()\n bias_initializer = tf.constant_initializer( value=0.0 )\n\n top = tf.layers.conv2d( x, \n filters=params[0],\n kernel_size=params[1],\n padding=params[2],\n activation=activation,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n name=params[3] )\n if batch_norm:\n top = norm_layer( top, training, params[3]+'/batch_norm' )\n top = tf.nn.relu( top, name=params[3]+'/relu' )\n\n return top", "def init(self):\n self.reparam_layers = []\n if self.model_type == \"GCN\":\n for i in range(self.num_layers):\n if self.reparam_all_layers is True:\n is_reparam = True\n elif isinstance(self.reparam_all_layers, tuple):\n reparam_all_layers = tuple([kk + self.num_layers if kk < 0 else kk for kk in self.reparam_all_layers])\n is_reparam = i in reparam_all_layers\n else:\n raise\n if is_reparam:\n self.reparam_layers.append(i)\n setattr(self, \"conv{}\".format(i + 1),\n GCNConv(self.num_features if i == 0 else self.latent_size,\n self.latent_size if i != self.num_layers - 1 else self.num_classes,\n cached=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n sample_size=self.sample_size,\n bias=True if self.with_relu else False,\n val_use_mean=self.val_use_mean,\n normalize=self.normalize,\n ))\n # self.conv1 = ChebConv(self.num_features, 16, K=2)\n # self.conv2 = ChebConv(16, self.num_features, K=2)\n\n elif self.model_type == \"GAT\":\n latent_size = int(self.latent_size / 2) # Under the default setting, latent_size = 8\n for i in range(self.num_layers):\n if i == 0:\n input_size = self.num_features\n else:\n if self.struct_dropout_mode[0] == 'DNsampling' or (self.struct_dropout_mode[0] == 'standard' and len(self.struct_dropout_mode) == 3):\n input_size = latent_size * 8 * 2\n else:\n input_size = latent_size * 8\n if self.reparam_all_layers is True:\n is_reparam = True\n elif isinstance(self.reparam_all_layers, tuple):\n reparam_all_layers = tuple([kk + self.num_layers if kk < 0 else kk for kk in self.reparam_all_layers])\n is_reparam = i in reparam_all_layers\n else:\n raise\n if is_reparam:\n self.reparam_layers.append(i)\n setattr(self, \"conv{}\".format(i + 1), GATConv(\n input_size,\n latent_size if i != self.num_layers - 1 else self.num_classes,\n heads=8 if i != self.num_layers - 1 else 1, concat=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n val_use_mean=self.val_use_mean,\n struct_dropout_mode=self.struct_dropout_mode,\n sample_size=self.sample_size,\n ))\n if self.struct_dropout_mode[0] == 'DNsampling' or (self.struct_dropout_mode[0] == 'standard' and len(self.struct_dropout_mode) == 3):\n setattr(self, \"conv{}_1\".format(i + 1), GATConv(\n input_size,\n latent_size if i != self.num_layers - 1 else self.num_classes,\n heads=8 if i != self.num_layers - 1 else 1, concat=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n val_use_mean=self.val_use_mean,\n struct_dropout_mode=self.struct_dropout_mode,\n sample_size=self.sample_size,\n ))\n # On the Pubmed dataset, use heads=8 in conv2.\n \n else:\n raise Exception(\"Model_type {} is not valid!\".format(self.model_type))\n\n self.reparam_layers = sorted(self.reparam_layers)\n \n if self.model_type == \"GCN\":\n if self.with_relu:\n reg_params = [getattr(self, \"conv{}\".format(i+1)).parameters() for i in range(self.num_layers - 1)]\n self.reg_params = itertools.chain(*reg_params)\n self.non_reg_params = getattr(self, \"conv{}\".format(self.num_layers)).parameters()\n else:\n self.reg_params = OrderedDict()\n self.non_reg_params = self.parameters()\n else:\n self.reg_params = self.parameters()\n self.non_reg_params = OrderedDict()\n self.to(self.device)", "def testTransposeNDHWC(self, use_bias):\n\n conv3_transpose = snt.Conv3DTranspose(\n output_channels=self.out_channels,\n output_shape=self.out_shape,\n kernel_shape=self.kernel_shape,\n padding=self.padding,\n stride=self.strides,\n name=\"conv3_transpose\",\n use_bias=use_bias,\n data_format=conv.DATA_FORMAT_NDHWC)\n conv3 = conv3_transpose.transpose()\n\n # Check kernel shapes, strides and padding match.\n self.assertEqual(conv3_transpose.kernel_shape, conv3.kernel_shape)\n self.assertEqual((1,) + self.strides + (1,), conv3.stride)\n self.assertEqual(conv3_transpose.padding, conv3.padding)\n\n # Before conv3_transpose is connected, we cannot know how many\n # `output_channels` conv1 should have.\n err = \"Variables in conv3_transpose not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n _ = conv3.output_channels\n\n # After connection the number of `output_channels` is known.\n x = tf.constant(np.random.randn(self.batch_size,\n self.in_depth,\n self.in_height,\n self.in_width,\n self.in_channels),\n dtype=np.float32)\n conv3_transpose(x)\n self.assertEqual(self.in_channels, conv3.output_channels)\n\n # However, even after connection, the `input_shape` of the forward\n # convolution is not known until it is itself connected (i.e. it can be\n # connected to a different shape input from the `output_shape` of the\n # transpose convolution!)\n err = \"Variables in conv3_transpose_transpose not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n self.assertEqual(conv3_transpose.output_shape, conv3.input_shape)", "def _cnn(self, state):\n\n x = self.relu(self.bn1(self.conv1(state)))\n x = self.relu(self.bn2(self.conv2(x)))\n x = self.relu(self.bn3(self.conv3(x)))\n x = x.reshape(x.size(0), -1)\n\n return x" ]
[ "0.63744015", "0.5864105", "0.586131", "0.5837967", "0.5762193", "0.5750853", "0.57461226", "0.57346827", "0.57076824", "0.5697772", "0.56805575", "0.5673117", "0.5603193", "0.5555773", "0.5510417", "0.5501909", "0.5458268", "0.5454152", "0.5436939", "0.54160196", "0.5399038", "0.5372593", "0.5354265", "0.53359497", "0.5329868", "0.5324369", "0.5316291", "0.5275935", "0.527529", "0.5274735", "0.52554935", "0.5250095", "0.5249805", "0.5241396", "0.52408975", "0.5237673", "0.5217749", "0.52164125", "0.5203961", "0.51971924", "0.5193027", "0.5169981", "0.5166881", "0.5160659", "0.51566744", "0.5151312", "0.5146965", "0.5131167", "0.5129982", "0.51272506", "0.51125085", "0.5103719", "0.51014584", "0.5098909", "0.50944656", "0.50888586", "0.5088253", "0.50842744", "0.5081012", "0.5079887", "0.50786436", "0.5075608", "0.5066643", "0.506659", "0.50654864", "0.50645113", "0.50586843", "0.50512624", "0.5050964", "0.5050336", "0.5045266", "0.5042272", "0.50382125", "0.50376624", "0.5027292", "0.5026898", "0.5022594", "0.5020507", "0.501732", "0.5014824", "0.5010148", "0.5006858", "0.50068164", "0.5005773", "0.50027585", "0.49993378", "0.49941555", "0.49938512", "0.49931356", "0.49913782", "0.49899685", "0.49841", "0.49810472", "0.49802715", "0.49791726", "0.49772426", "0.4971043", "0.49682346", "0.49646273", "0.49598143" ]
0.61901104
1
Allocate an Embedding Layer.
def __init__(self, input, input_size, embedding_size): self.input = input self.output = layers.EmbeddingLayer(self.input, input_size, embedding_size, W=initialize_parameters()[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def embedding_layer(self):\n with tf.name_scope(\"Embedding_Layer\"):\n V_size = len(self.vocab)\n embed_dim = len(self.embed[0]) \n W_embed_ = tf.get_variable(\"W_embed\",shape=[V_size, embed_dim],trainable=False).assign(np.asarray(self.embed))\n W_analogy_embed_ = tf.get_variable(\"W_analogy_embed\",shape=[V_size, embed_dim],trainable=True,initializer=tf.random_uniform_initializer(minval=-1,maxval=1))\n return W_embed_, W_analogy_embed_", "def build_embedding_layer(inputs_, vocab_size, embed_size):\n embedding = tf.Variable(tf.random_uniform((vocab_size, embed_size), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, inputs_)\n \n return embed", "def __init__(self, layer_id,\n shape, X):\n prefix = 'Embedding' + layer_id\n self.n_words, self.in_size = shape\n\n # weights for embedding, the only parameters\n self.W = init_weights(shape=(self.n_words, self.in_size),\n name=prefix + '#W')\n\n self.params = [self.W]\n\n # Compute the embedded samples\n self.n_timesteps = X.shape[0]\n self.n_samples = X.shape[1]\n\n self.activation = self.W[X.flatten()].reshape([self.n_timesteps,\n self.n_samples,\n self.in_size])", "def _embed(self):\n with tf.variable_scope('word_embedding'):\n self.pretrained_word_mat = tf.get_variable(\"word_emb_mat\",\n [self.vocab.word_size() - 2, self.vocab.word_embed_dim],\n dtype=tf.float32,\n initializer=tf.constant_initializer(\n self.vocab.word_embeddings[2:],\n dtype=tf.float32),\n trainable=False)\n self.word_pad_unk_mat = tf.get_variable(\"word_unk_pad\",\n [2, self.pretrained_word_mat.get_shape()[1]],\n dtype=tf.float32,\n initializer=tf.constant_initializer(\n self.vocab.word_embeddings[:2],\n dtype=tf.float32),\n trainable=True)\n\n self.word_mat = tf.concat([self.word_pad_unk_mat, self.pretrained_word_mat], axis=0)\n self.p_emb = tf.nn.embedding_lookup(self.word_mat, self.p)\n self.q_emb = tf.nn.embedding_lookup(self.word_mat, self.q)", "def __init__(self, input_size, config):\r\n super(EmbeddingLayer, self).__init__()\r\n\r\n if config.emtraining:\r\n self.embedding = nn.Sequential(OrderedDict([\r\n ('embedding', nn.Embedding(input_size, config.emsize)),\r\n ('dropout', nn.Dropout(config.dropout))\r\n ]))\r\n else:\r\n self.embedding = nn.Embedding(input_size, config.emsize)\r\n self.embedding.weight.requires_grad = False", "def __init__(self, cfg, name=''):\n nn.Layer.__init__(self)\n self.cfg = cfg\n d_model = cfg['hidden_size']\n d_emb = cfg.get('emb_size', cfg['hidden_size'])\n d_vocab = cfg['vocab_size']\n d_pos = cfg['max_position_embeddings']\n # d_sent = cfg.get(\"sent_type_vocab_size\", 4) or cfg.get('type_vocab_size', 4)\n if cfg.get('sent_type_vocab_size'):\n d_sent = cfg['sent_type_vocab_size']\n else:\n d_sent = cfg.get('type_vocab_size', 2)\n self.n_head = cfg['num_attention_heads']\n self.return_additional_info = cfg.get('return_additional_info', False)\n self.initializer = nn.initializer.TruncatedNormal(std=cfg['initializer_range'])\n\n self.ln = _build_ln(d_model, name=append_name(name, 'pre_encoder'))\n self.word_emb = nn.Embedding(d_vocab,\n d_emb,\n weight_attr=paddle.ParamAttr(name=append_name(name, 'word_embedding'),\n initializer=self.initializer))\n self.pos_emb = nn.Embedding(d_pos,\n d_emb,\n weight_attr=paddle.ParamAttr(name=append_name(name, 'pos_embedding'),\n initializer=self.initializer))\n # self.sent_emb = nn.Embedding(\n # d_sent,\n # d_emb,\n # weight_attr=paddle.ParamAttr(name=append_name(name, 'sent_embedding'), initializer=self.initializer))\n self._use_sent_id = cfg.get('use_sent_id', True)\n self._use_sent_id = False\n if self._use_sent_id:\n self.sent_emb = nn.Embedding(d_sent,\n d_emb,\n weight_attr=paddle.ParamAttr(name=append_name(name, 'sent_embedding'),\n initializer=self.initializer))\n self._use_task_id = cfg.get('use_task_id', False)\n self._use_task_id = False\n if self._use_task_id:\n self._task_types = cfg.get('task_type_vocab_size', 3)\n logging.info('using task_id, #task_types:{}'.format(self._task_types))\n self.task_emb = nn.Embedding(self._task_types,\n d_emb,\n weight_attr=paddle.ParamAttr(name=append_name(name, 'task_embedding'),\n initializer=self.initializer))\n\n prob = cfg['hidden_dropout_prob']\n self.dropout = nn.Dropout(p=prob)\n\n self.encoder_stack = ErnieEncoderStack(cfg, append_name(name, 'encoder'))\n\n if cfg.get('has_pooler', True):\n self.pooler = _build_linear(cfg['hidden_size'], cfg['hidden_size'], append_name(name, 'pooled_fc'),\n self.initializer)\n else:\n self.pooler = None\n\n self.key_tag = None\n self._checkpoints = []\n self.train()", "def __init__(self, heropool_size, embedding_dim):\r\n super().__init__()\r\n self.embedding_dim = embedding_dim\r\n self.embeddings = nn.Embedding(heropool_size, embedding_dim)\r\n self.affine = nn.Linear(embedding_dim, heropool_size)\r\n self.init_emb()", "def _embed(self):\n with tf.variable_scope('word_embedding'):\n self.word_embeddings = tf.get_variable(\n 'word_embeddings',\n shape=(self.term_vocab.size(), self.term_vocab.embed_dim),\n initializer=tf.constant_initializer(self.term_vocab.embeddings),\n trainable=True\n )\n self.p_word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.p)\n self.q_word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.q)\n\n with tf.variable_scope('char_embedding'):\n self.char_embeddings = tf.get_variable(\n 'char_embeddings',\n shape=(self.char_vocab.size(), self.char_vocab.embed_dim),\n initializer=tf.constant_initializer(self.char_vocab.embeddings),\n trainable=True\n )\n self.p_char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.p_char) # [batch, seqlen, max_char_num, embedding_size]\n self.q_char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.q_char)\n\n self.p_char_emb = self.cnn_emb(self.p_char_emb, \"p_emb\")\n self.q_char_emb = self.cnn_emb(self.q_char_emb, \"q_emb\")\n '''\n self.p_char_emb = tf.reshape(self.p_char_emb, [-1, self.max_char_num, self.emb_size])\n self.q_char_emb = tf.reshape(self.q_char_emb, [-1, self.max_char_num, self.emb_size])\n\n self.p_char_emb = cnn_layer.conv(self.p_char_emb, self.emb_size,\n bias=True, activation=tf.nn.relu, kernel_size=5, name=\"char_conv\", reuse=None)\n self.q_char_emb = cnn_layer.conv(self.q_char_emb, self.emb_size,\n bias=True, activation=tf.nn.relu, kernel_size=5, name=\"char_conv\", reuse=True)\n\n self.p_char_emb = tf.reduce_max(self.p_char_emb, axis=1) # [batch*seqlen, 1, emb_size]\n self.q_char_emb = tf.reduce_max(self.q_char_emb, axis=1)\n\n batch_size = tf.shape(self.p_word_emb)[0]\n self.p_char_emb = tf.reshape(self.p_char_emb, [batch_size, -1, self.emb_size])\n self.q_char_emb = tf.reshape(self.q_char_emb, [batch_size, -1, self.emb_size])\n\n self.p_char_emb = tf.nn.dropout(self.p_char_emb, 0.95)\n self.q_char_emb = tf.nn.dropout(self.q_char_emb, 0.95)\n '''\n self.p_emb = tf.concat([self.p_word_emb, self.p_char_emb], -1)\n self.q_emb = tf.concat([self.q_word_emb, self.q_char_emb], -1)", "def __init__(self,\n embedding_dim: int = 64,\n scale: bool = False,\n vocab_size: int = 0,\n padding_idx: int = 1,\n freeze: bool = False,\n **kwargs):\n super(Embeddings, self).__init__()\n\n self.embedding_dim = embedding_dim\n self.scale = scale\n self.vocab_size = vocab_size\n self.lut = nn.Embedding(vocab_size, self.embedding_dim,\n padding_idx=padding_idx)\n\n if freeze:\n freeze_params(self)", "def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n \n vocab_size = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n any_word = list(word_to_vec_map.keys())[0]\n emb_dim = word_to_vec_map[any_word].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n \n ### START CODE HERE ###\n # Step 1\n # Initialize the embedding matrix as a numpy array of zeros.\n # See instructions above to choose the correct shape.\n emb_matrix = np.zeros((vocab_size, emb_dim))\n \n # Step 2\n # Set each row \"idx\" of the embedding matrix to be \n # the word vector representation of the idx'th word of the vocabulary\n for word, idx in word_to_index.items():\n emb_matrix[idx, :] = word_to_vec_map[word]\n\n # Step 3\n # Define Keras embedding layer with the correct input and output sizes\n # Make it non-trainable.\n embedding_layer = tensorflow.keras.layers.Embedding(input_dim = vocab_size, output_dim = emb_dim, trainable = False)\n ### END CODE HERE ###\n\n # Step 4 (already done for you; please do not modify)\n # Build the embedding layer, it is required before setting the weights of the embedding layer. \n embedding_layer.build((None,)) # Do not modify the \"None\". This line of code is complete as-is.\n \n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n embedding_layer.set_weights([emb_matrix])\n \n return embedding_layer", "def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n vocab_len = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n emb_dim = word_to_vec_map[\"cucumber\"].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n emb_matrix = np.zeros((vocab_len, emb_dim)) # Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors = emb_dim)\n for word, index in word_to_index.items(): # Set each row \"index\" of the embedding matrix to be the word vector representation of the \"index\"th word of the vocabulary\n emb_matrix[index, :] = word_to_vec_map[word]\n embedding_layer = Embedding(vocab_len, emb_dim, trainable = False) # Define Keras embedding layer with the correct output/input sizes, make it trainable. Use Embedding(...). Make sure to set trainable=False. \n embedding_layer.build((None,)) # Build the embedding layer, it is required before setting the weights of the embedding layer. Do not modify the \"None\".\n embedding_layer.set_weights([emb_matrix]) # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n return embedding_layer", "def add_embedding(self):\n ### YOUR CODE HERE (~4-6 lines)\n embeddingTensor = tf.Variable(self.pretrained_embeddings)\n embeddings = tf.nn.embedding_lookup(embeddingTensor, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [-1, self.max_length, Config.n_features * Config.embed_size])\n ### END YOUR CODE\n return embeddings", "def __init__(self, vocab_size, embedding_size, context_size, hid_dim, out_dim):\n super(Net, self).__init__()\n self.E = nn.Embedding(vocab_size, embedding_size) # Embedding matrix\n self.after_embed_size = embedding_size * context_size\n self.lin = nn.Linear(self.after_embed_size, hid_dim)\n self.lin2 = nn.Linear(hid_dim, out_dim)", "def __init__(self, embed_size, device, pool_size, attribute_dim):\n super(EncoderClothing, self).__init__()\n self.device = device\n self.linear = nn.Linear(512*pool_size*pool_size, embed_size)\n self.relu = nn.ReLU()\n #self.module_list = nn.ModuleList([nn.Linear(embed_size, att_size) for att_size in attribute_dim])\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)\n self.dropout = nn.Dropout(0.5)\n self.pool_size = pool_size\n\n self.module_list = nn.ModuleList([self.conv_bn(512, 256, 1, embed_size, att_size) for att_size in attribute_dim])", "def __init__(self, input_dim, output_dim, name='embedding_layer'):\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.name = name\n\n # Randomly generate weights\n self.embeddings = shared((input_dim, output_dim),\n self.name + '__embeddings')\n\n # Define parameters\n self.params = [self.embeddings]", "def __init__(self, embed_size, device, pool_size, attribute_dim):\n super(EncoderClothing, self).__init__()\n self.device = device\n self.linear = nn.Linear(512*pool_size*pool_size, embed_size)\n self.module_list = nn.ModuleList([nn.Linear(512*pool_size*pool_size, att_size) for att_size in attribute_dim])\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)\n self.pool_size = pool_size", "def _get_embedding_layer(self, input_data, doc_input_data):\n opts = self._options\n word_embedding = tf.Variable(tf.random_uniform((self.vocab_size, opts.embed_dim), -1.0, 1.0))\n embed = []\n\n temp = tf.zeros([opts.batch_size, opts.embed_dim])\n embed_d = []\n for n in range(opts.sentence_sample):\n temp = tf.add(temp, tf.nn.embedding_lookup(word_embedding, doc_input_data[:, n]))\n embed_d.append(temp)\n\n if opts.concat == 'True':\n combined_embed_vector_length = opts.embed_dim * opts.window_size + opts.embed_dim\n for j in range(opts.window_size):\n embed_w = tf.nn.embedding_lookup(word_embedding, input_data[:, j])\n embed.append(embed_w)\n embed.append(embed_d)\n else:\n combined_embed_vector_length = opts.embed_dim\n embed_w = tf.zeros([opts.batch_size, opts.embed_dim])\n for j in range(opts.window_size):\n embed_w += tf.nn.embedding_lookup(word_embedding, input_data[:, j])\n embed_w += embed_d\n embed.append(embed_w)\n\n return tf.concat(embed, 1), word_embedding, combined_embed_vector_length", "def embedding_layer(n_categories, embedding_dim, name=None):\n\n input_tensor = Input(shape=(1,))\n x = Embedding(n_categories, embedding_dim, name=name)(input_tensor)\n x = Reshape(target_shape=(embedding_dim,))(x)\n\n return input_tensor, x", "def __init__(self, embed_size):\n super(ImgAttentionEncoder, self).__init__()\n vggnet_feat = models.vgg19(pretrained=True).features\n modules = list(vggnet_feat.children())[:-2]\n self.cnn = nn.Sequential(*modules)\n self.fc = nn.Sequential(nn.Linear(self.cnn[-3].out_channels, embed_size),\n nn.Tanh()) # feature vector of image", "def add_embedding(self, prefix=''):\n with tf.variable_scope(prefix + 'embed'):\n if self.cfg.fix_emb:\n assert (hasattr(self.cfg, 'W_emb'))\n W_emb = pkl.load(open(self.cfg.W_emb_path, 'rb'))\n W = tf.get_variable('W', initializer= W_emb, trainable=True)\n print(\"iniitalize word embedding finished\")\n else:\n weightInit = tf.random_uniform_initializer(-0.001, 0.001)\n vocab = pkl.load(open(self.cfg.vocab_path, 'rb'))\n W = tf.get_variable('W', [len(vocab), self.cfg.emb_size], initializer=weightInit)\n if hasattr(self.cfg, 'relu_w') and self.cfg.relu_w:\n W = tf.nn.relu(W)\n return W", "def __init__(self, vocab, embed_size=512, dropout_rate=0.1, max_len=200):\n super(DecoderEmbeddings, self).__init__()\n pad_token_idx = 0 #vocab.tokenizer.ids_to_tokens[0]\n assert vocab.tokenizer.ids_to_tokens[0] == '[PAD]'\n self.embeddings = nn.Embedding(len(vocab.tokenizer.ids_to_tokens), embed_size, padding_idx=pad_token_idx)\n self.positional_encoding = PositionalEncoding(d_model=embed_size, dropout=dropout_rate, max_len=max_len)", "def add_embeddings(self):\n\n with tf.device('/cpu:0'):\n with tf.variable_scope('Embedding_Layer'):\n embeddings = tf.Variable(self.initial_embeddings,name = 'Embeddings')\n self.input_embeddings = tf.nn.embedding_lookup(embeddings, self.inputs_placeholder) #(N,S,D)\n self.question_embeddings = tf.nn.embedding_lookup(embeddings, self.questions_placeholder) #(N,S,D)", "def pretrained_embedding_layer(model,model2,model3, word_to_index,emb_dim_max):\n words_ignored = []\n vocab_len = len(word_to_index) + 1 \n emb_matrix = np.zeros([vocab_len,emb_dim_max])\n \n print(' Total words would be processed : '+str(vocab_len))\n for word, idx in word_to_index.items():\n if word in model:\n emb_matrix[idx,:200] = model[word]\n emb_matrix[idx,200:] = 0\n if word in model2:\n emb_matrix[idx, :100] = model2[word]\n emb_matrix[idx, 100:] = 0\n if word in model3.keys():\n emb_matrix[idx,:] = model3[word]\n else:\n words_ignored.append(word)\n print(str(len(words_ignored))+\" words ignored\")\n print(emb_matrix.shape) \n \n \n embedding_layer = Embedding(vocab_len,emb_dim_max,trainable = True)\n \n # Build the embedding layer, it is required before setting the weights of the embedding layer. \n embedding_layer.build((None,)) # Do not modify the \"None\". This line of code is complete as-is.\n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n embedding_layer.set_weights([emb_matrix])\n \n return embedding_layer,words_ignored", "def __init__(self, embed_size, vocab):\n super(ModelEmbeddings, self).__init__()\n \n self.embed_size = embed_size\n self.char_embed_size = 50\n self.max_word_len = 21\n self.dropout_rate = 0.3\n self.vocab = vocab \n \n ## A4 code\n pad_token_idx = vocab.char2id['<pad>']\n self.embedding = nn.Embedding(num_embeddings =len(vocab.char2id),\n embedding_dim =self.char_embed_size,\n padding_idx =pad_token_idx,)\n \n self.CNN = CNN(char_embed_size=self.char_embed_size,\n num_filters=embed_size,\n max_word_length=self.max_word_len,)\n self.Highway = Highway(word_embed_size=self.embed_size)\n self.dropout = nn.Dropout(p=self.dropout_rate)\n ## End A4 code\n\n ### YOUR CODE HERE for part 1j\n\n\n ### END YOUR CODE", "def _add_embedding_layer(model_1, model_2):\n result_layer = torch.nn.Embedding(\n model_1.num_embeddings, model_1.embedding_dim + model_2.embedding_dim\n )\n result_layer.weight = torch.nn.Parameter(\n torch.cat((model_1.weight.data, model_2.weight.data), dim=1)\n )\n return result_layer", "def add_out_layer(self, embedding):\n self.ac_dim = self.ac_space.shape[-1] # num dims\n embedding = snt.Linear(output_size=self.ac_dim, name='final',\n initializers=self.out_initializers)(embedding)\n return embedding", "def make_embedding(src_emb_hparams, src_token_to_id_map,\n tgt_emb_hparams=None, tgt_token_to_id_map=None,\n emb_init_share=False):\n src_embedding = MonoTextData.make_embedding(src_emb_hparams,\n src_token_to_id_map)\n\n if emb_init_share:\n tgt_embedding = src_embedding\n else:\n tgt_emb_file = tgt_emb_hparams[\"file\"]\n tgt_embedding = None\n if tgt_emb_file is not None and tgt_emb_file != \"\":\n tgt_embedding = Embedding(tgt_token_to_id_map, tgt_emb_hparams)\n\n return src_embedding, tgt_embedding", "def __init__(self, embed_size):\n super(Encoder, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules) \n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)", "def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.pooling = nn.MaxPool2d(2,stride = 2)\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)\n self.init_weights()", "def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)\n self.init_weights()", "def get_keras_layer(self, trainable=False, **kwargs):\n\n try:\n from keras.layers.embeddings import Embedding\n except:\n raise ImportError('Keras not found')\n\n return Embedding(self.vocab_size, self.dim, weights=[self._matrix], trainable=trainable, **kwargs)", "def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)", "def __init__(self, num_words, embedding_size, use_cuda):\n super(StandardEmbedding, self).__init__()\n self.embedding_size = embedding_size\n self.num_hash_functions = 0\n self.embeddings = nn.Embedding(num_words, embedding_size)\n self.embeddings = self.embeddings.cuda() if use_cuda else self.embeddings", "def build(self, unused_input_shapes):\n if self.embedding_lookup is None:\n self.embedding_lookup = layers.OnDeviceEmbedding(\n vocab_size=self.config.vocab_size,\n embedding_width=self.config.hidden_size,\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=self.config.initializer_range),\n name=\"target_embeddings\")\n self.embedding_postprocessor = EmbeddingPostprocessor(\n use_type_embeddings=False,\n use_position_embeddings=True,\n max_position_embeddings=self.config.max_position_embeddings,\n dropout_prob=self.config.hidden_dropout_prob,\n initializer=tf.keras.initializers.VarianceScaling(\n scale=self.config.initializer_gain,\n mode=\"fan_avg\",\n distribution=\"uniform\"),\n name=\"embedding_postprocessor\")\n # Decoder can use a different intermediate size.\n self.multi_channel_cross_attention = self.config.get(\n \"multi_channel_cross_attention\", False)\n self.decoder = TransformerDecoder(\n num_hidden_layers=self.config.num_decoder_layers,\n hidden_size=self.config.hidden_size,\n num_attention_heads=self.config.num_decoder_attn_heads,\n intermediate_size=self.config.decoder_intermediate_size,\n intermediate_activation=self.config.hidden_act,\n hidden_dropout_prob=self.config.hidden_dropout_prob,\n attention_probs_dropout_prob=self.config.attention_probs_dropout_prob,\n initializer_range=self.config.initializer_range,\n multi_channel_cross_attention=self.multi_channel_cross_attention,\n name=\"decoder\")\n super(Decoder, self).build(unused_input_shapes)", "def _embed(self):\n batch_size = tf.shape(self.p)[0]\n with tf.variable_scope(\"emb\"):\n with tf.variable_scope(\"char\"):\n pc_emb = tf.reshape(tf.nn.embedding_lookup(\n self.char_embed, self.pc), \n [batch_size * self.max_p_len, self.max_w_len, self.vocab.char_embed_dim])\n qc_emb = tf.reshape(tf.nn.embedding_lookup(\n self.char_embed, self.qc), \n [batch_size * self.max_q_len, self.max_w_len, self.vocab.char_embed_dim])\n cell_fw = tf.contrib.rnn.GRUCell(self.char_hidden_size)\n cell_bw = tf.contrib.rnn.GRUCell(self.char_hidden_size)\n _, (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, pc_emb, self.pc_length, dtype=tf.float32)\n pc_emb = tf.concat([state_fw, state_bw], axis=1)\n _, (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, qc_emb, self.qc_length, dtype=tf.float32)\n qc_emb = tf.concat([state_fw, state_bw], axis=1)\n pc_emb = tf.reshape(pc_emb, [batch_size, self.max_p_len, 2 * self.char_hidden_size])\n qc_emb = tf.reshape(qc_emb, [batch_size, self.max_q_len, 2 * self.char_hidden_size])\n\n with tf.name_scope(\"word\"):\n p_emb = tf.nn.embedding_lookup(self.word_embed, self.p)\n q_emb = tf.nn.embedding_lookup(self.word_embed, self.q)\n\n with tf.name_scope(\"pos\"):\n p_pos_emb = tf.nn.embedding_lookup(self.pos_embed, self.p_pos)\n q_pos_emb = tf.nn.embedding_lookup(self.pos_embed, self.q_pos)\n \n with tf.name_scope(\"em\"):\n sh = tf.shape(self.p_em)\n resh = [sh[0], sh[1], 1]\n p_em_feat = tf.reshape(tf.cast(self.p_em, dtype=tf.float32), shape=resh)\n\n self.p_emb = tf.concat([p_emb, pc_emb, p_pos_emb, p_em_feat], axis=2)\n self.q_emb = tf.concat([q_emb, qc_emb, q_pos_emb], axis=2)", "def __init__(self, embeddings, char_embeddings=None,\n pos_embeddings=None, **kwargs):\n super(WordRepresentationLayer, self).__init__()\n self.embeddings = embeddings\n self.embedding_dim = embeddings.embedding_dim\n self.char_embeddings = char_embeddings\n self.train_char_embeddings = kwargs.get('train_char_embeddings',\n False)\n self.use_cuda = kwargs.get('cuda', True)\n\n if self.char_embeddings:\n self.char_merging_method = kwargs.get('char_merging_method', 'sum')\n char_hidden_dim = kwargs.get('char_hidden_dim', 50)\n bidirectional = kwargs.get('bidirectional', False)\n\n if self.char_merging_method == 'lstm':\n self.char_encoder = LSTMCharEncoder(\n char_embeddings,\n char_hidden_dim,\n bidirectional,\n train_char_embeddings=self.train_char_embeddings,\n cuda=self.use_cuda)\n\n self.embedding_dim += char_hidden_dim\n\n elif self.char_merging_method in ['mean', 'sum']:\n self.char_encoder = LinearCharEncoder(\n char_embeddings,\n train_char_embeddings=self.train_char_embeddings,\n char_merging_method=self.char_merging_method)\n\n self.embedding_dim += self.char_embeddings.embedding_dim\n else:\n raise NotImplementedError\n\n self.pos_embeddings = pos_embeddings\n if self.pos_embeddings:\n self.embedding_dim += self.pos_embeddings.embedding_dim", "def __init__(self, embed_size):\n super(ImgEncoder, self).__init__()\n model = models.vgg19(pretrained=True)\n in_features = model.classifier[-1].in_features # input size of feature vector\n model.classifier = nn.Sequential(\n *list(model.classifier.children())[:-1]) # remove last fc layer\n\n self.model = model # loaded model without last fc layer\n self.fc = nn.Linear(in_features, embed_size) # feature vector of image", "def get_embedding(self, resp):\n\n feed_dict = {self.anchor: resp}\n embedding = self.sess.run(self.embed_anchor, feed_dict=feed_dict)\n return embedding", "def instantiate_weights(self):\n with tf.variable_scope(\"embedding_projection\"), tf.device('/cpu:0'): # embedding matrix\n self.Embedding = tf.get_variable(\"Embedding\", shape=[self.vocab_size, self.embed_size],\n initializer=self.initializer)\n # self.Embedding_label = tf.get_variable(\"Embedding_label\", shape=[self.num_classes, self.embed_size],\n # dtype=tf.float32) # ,initializer=self.initializer\n # self.W_projection = tf.get_variable(\"W_projection\", shape=[self.sequence_length * self.d_model, self.num_classes],\n # initializer=self.initializer) # [embed_size,label_size]\n # self.b_projection = tf.get_variable(\"b_projection\", shape=[self.num_classes])", "def __init__(self,\n vocab_size=None,\n embed_dim=None,\n existing_vocab=None,\n initializers=None,\n partitioners=None,\n regularizers=None,\n trainable=True,\n custom_getter=None,\n name=\"embed\"):\n if vocab_size is None and existing_vocab is None:\n raise ValueError(\"Must provide on of vocab_size or existing_vocab.\")\n\n if existing_vocab is not None and not all(\n x is None for x in [vocab_size, embed_dim, initializers, partitioners]):\n raise ValueError(\"If existing_vocab is provided, none of vocab_size, \"\n \"embedding_dim, initializers, or partitioners is \"\n \"needed.\")\n\n super(Embed, self).__init__(custom_getter=custom_getter, name=name)\n self._existing_vocab = None\n if existing_vocab is None:\n self._vocab_size = vocab_size\n self._embed_dim = embed_dim or _embedding_dim(self._vocab_size)\n else:\n self._existing_vocab = tf.convert_to_tensor(\n existing_vocab, dtype=tf.float32)\n existing_vocab_shape = self._existing_vocab.get_shape().with_rank(2)\n existing_vocab_shape.assert_is_fully_defined()\n self._vocab_size, self._embed_dim = existing_vocab_shape.as_list()\n\n self._initializers = util.check_initializers(\n initializers, self.POSSIBLE_INITIALIZER_KEYS)\n self._partitioners = util.check_partitioners(\n partitioners, self.POSSIBLE_INITIALIZER_KEYS)\n self._regularizers = util.check_regularizers(\n regularizers, self.POSSIBLE_INITIALIZER_KEYS)\n self._trainable = trainable", "def _create_layer() -> Image:\n data = np.random.random((32, 16))\n return Image(data)", "def add_layer(self, layer_pos, lay_dims, init_w_function, init_a_function, dropout, drop_prob, batch_norm):\n\n # If not within feasible bounds, return\n if layer_pos < 0 or layer_pos >= self.number_hidden_layers:\n return\n\n # We create the new layer and add it to the network descriptor\n self.dims = np.insert(self.dims, layer_pos, lay_dims)\n self.init_functions = np.insert(self.init_functions, layer_pos, init_w_function)\n self.act_functions = np.insert(self.act_functions, layer_pos, init_a_function)\n\n # Finally the number of hidden layers is updated\n self.number_hidden_layers = self.number_hidden_layers + 1\n if not (isinstance(self.batch_norm, tuple) or self.batch_norm.shape[0] == 0):\n self.batch_norm = np.insert(self.batch_norm, layer_pos, batch_norm)\n if not (isinstance(self.dropout, tuple) or self.dropout.shape[0] == 0):\n self.dropout = np.insert(self.dropout, layer_pos, dropout)\n self.dropout_probs = np.insert(self.dropout_probs, layer_pos, drop_prob)", "def __init__(self, vocab_size, embedding_size, output_size, lat_dim):\n super(Encoder, self).__init__()\n\n self.vocab_size = vocab_size\n self.embedding = nn.Embedding(vocab_size, embedding_size)\n self.lstm = nn.LSTM(embedding_size, output_size)\n \n self.hidden2mean = nn.Linear(output_size, lat_dim)\n self.hidden2logv = nn.Linear(output_size, lat_dim)\n \n self.cell2mean = nn.Linear(output_size, lat_dim)\n self.cell2logv = nn.Linear(output_size, lat_dim)", "def __init__(self, embed_size, hidden_size, vocab_size, num_layers, max_seq_length=20):\n super(Decoder, self).__init__()\n self.embed = nn.Embedding(vocab_size, embed_size)\n self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)\n self.linear = nn.Linear(hidden_size, vocab_size)\n self.max_seg_length = max_seq_length", "def init_word_embed(config):\n embedding_mat_val = np.load(config.wordembed_params)\n with tf.variable_scope('vc'):\n with tf.variable_scope('lstm', reuse=True):\n embedding_mat = tf.get_variable(\"embedding_mat\", [config.num_vocab, config.embed_dim])\n init_we = tf.assign(embedding_mat, embedding_mat_val)\n return [init_we]", "def __init__(self, embed_size, hidden_size, vocab_size, num_layers):\n super(DecoderRNN, self).__init__()\n self.embed = nn.Embedding(vocab_size, embed_size)\n self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)\n self.linear = nn.Linear(hidden_size, vocab_size)\n self.init_weights()", "def add_embedding(self):\n #with tf.variable_scope(\"RNN\", reuse = tf.AUTO_REUSE):\n embeddings = tf.get_variable(\"embeddings\", initializer = self.pretrained_embeddings,trainable=True)\n inputs = self.input_placeholder\n inputs = tf.reshape(inputs, [self.config.batch_size, -1 , self.config.n_features])\n embeddings = tf.nn.embedding_lookup(embeddings, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [self.config.batch_size, -1, self.config.n_features* self.config.embed_size])\n embeddings = tf.cast(embeddings, tf.float32)\n return embeddings", "def __init__(self, embed_size, hidden_size, vocab_size, num_layers,\n max_seq_length=20):\n # Decoder\n super(CaptioningModel, self).__init__()\n resnet = models.resnet18(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.linear1 = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01) \n # Encoder \n self.embed = nn.Embedding(vocab_size, embed_size)\n self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)\n self.linear2 = nn.Linear(hidden_size, vocab_size)\n self.max_seg_length = max_seq_length", "def __init__(self, word_embed_size, vocab):\n super(ModelEmbeddings, self).__init__()\n\n ### YOUR CODE HERE for part 1h\n dropout_rate = 0.3\n n_chars = len(vocab.char2id)\n self.char_embed_size = 50\n self.word_embed_size = word_embed_size\n self.vocab = vocab\n self.char_embed = nn.Embedding(n_chars, self.char_embed_size)\n self.conv = CNN(self.char_embed_size, word_embed_size)\n self.highway = Highway(word_embed_size)\n self.dropout = nn.Dropout(dropout_rate)\n ### END YOUR CODE", "def _use_embeddings(self, word):\n if word == \"@PAD@\":\n return torch.zeros(self.embeddings_dim)\n else:\n return self.embeddings[word]", "def build(self,unused):\n # (word_embeddings_shape, _) = input_shapes\n # width = word_embeddings_shape.as_list()[-1]\n if self.use_type_embeddings:\n self.type_embeddings = self.add_weight(\n \"token_type_embeddings\",\n shape=[self.token_type_vocab_size, self.word_embedding_width],\n initializer=get_initializer(self.initializer_range),\n dtype=self.dtype)\n\n self.position_embeddings = None\n if self.use_position_embeddings:\n self.position_embeddings = self.add_weight(\n \"position_embeddings\",\n shape=[self.max_position_embeddings, self.word_embedding_width],\n initializer=get_initializer(self.initializer_range),\n dtype=self.dtype)\n\n self.output_layer_norm = tf.keras.layers.LayerNormalization(\n name=LAYER_NORM_NAME, axis=-1, epsilon=1e-12, dtype=tf.float32)\n self.output_dropout = tf.keras.layers.Dropout(\n rate=self.dropout_prob, dtype=tf.float32)\n super().build(unused)", "def _add_pre_trained_embedding(self):\n\n if self.embedding_type['type'] == 'glove':\n self.logging.info('use pre-trained glove word2vec')\n # a. load pre trained glove\n GLOVE_DIR = '../data/glove_pretrained/glove.6B'\n glove_suffix_name = 'glove.6B.' + str(self.embedding_size) + 'd.txt'\n import os\n import numpy as np\n\n embeddings_index = {}\n f = open(os.path.join(GLOVE_DIR, glove_suffix_name)) # 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n\n self.logging.info('')\n self.logging.info('Found %s word vectors.' % len(embeddings_index))\n\n # b. compute embedding matrix\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector # words not found in embedding index will be all-zeros.\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt) + ' / ' + str(len(self.word_index)))\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n\n elif self.embedding_type['type'] == 'gensim':\n self.logging.info('use pre-trained gensim word2vec')\n\n import gzip\n import gensim\n from keras.layers import Embedding\n import numpy as np\n\n # fname = '../data/word2vec_pretrained/motors/d_300_k_712904_w_6_e_60_v_motors'\n # fname = '../data/word2vec_pretrained/fashion/d_300_k_1341062_w_6_e_70_v_fashion'\n\n self.logging.info('load word2vec path: ' + str(self.embedding_type['path']))\n model = gensim.models.Word2Vec.load(self.embedding_type['path'])\n pretrained_weights = model.wv.syn0\n vocab_size, vector_dim = pretrained_weights.shape\n\n method = 3\n if method == 1:\n self.logging.info('word2vec attempt to fit into embedding layer - middle complex')\n # convert the wv word vectors into a numpy matrix that is suitable for insertion\n # into our TensorFlow and Keras models\n\n embedding_matrix = np.zeros((len(model.wv.vocab), vector_dim))\n for i in range(len(model.wv.vocab)):\n embedding_vector = model.wv[model.wv.index2word[i]]\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n embedding_layer = Embedding(input_dim=embedding_matrix.shape[0],\n output_dim=embedding_matrix.shape[1],\n # input_length=self.maxlen,\n weights=[embedding_matrix],\n trainable=False)\n elif method == 2:\n self.logging.info('word2vec simple embedding matching - simple complex')\n embedding_layer = Embedding(input_dim=vocab_size,\n output_dim=vector_dim,\n input_length=self.maxlen,\n weights=[pretrained_weights],\n trainable=False)\n elif method == 3:\n\n self.logging.info('word2vec match using word_index from keras tokenizer - as used in glove match above')\n # b. compute embedding matrix\n\n # sd = 1 / np.sqrt(len(self.word_index) + 1)\n # embedding_matrix = np.random.normal(0, scale=sd, size=(len(self.word_index) + 1, self.embedding_size))\n\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n if word in model.wv:\n embedding_vector = model.wv[word]\n embedding_matrix[i] = embedding_vector\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt))\n\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n else:\n raise ValueError('unknown method value')\n\n else:\n raise ValueError('unknown embedding type')\n self.logging.info('create glove pre-trained embedding: ' + str(self.embedding_size))\n return embedding_layer", "def __init__(self,\n vocab_size,\n embed_dim,\n dropout,\n pretrained,\n embedding=None,\n num_gpus=1,\n default_gpu_id=0,\n regularizer=None,\n random_seed=0,\n trainable=True,\n scope=\"word_feat\"):\n self.vocab_size = vocab_size\n self.embed_dim = embed_dim\n self.dropout = dropout\n self.pretrained = pretrained\n self.embedding = embedding\n self.num_gpus = num_gpus\n self.default_gpu_id = default_gpu_id\n self.regularizer = regularizer\n self.random_seed = random_seed\n self.trainable = trainable\n self.scope = scope\n \n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):\n self.embedding_layer = create_embedding_layer(self.vocab_size, self.embed_dim, self.pretrained,\n self.embedding, self.num_gpus, self.default_gpu_id, None, self.random_seed, self.trainable)\n \n self.dropout_layer = create_dropout_layer(self.dropout, self.num_gpus, self.default_gpu_id, self.random_seed)", "def __init__(self, vocab_size, emb_size, bias=True, M=None, b=None, pretrained_emb=None, fix_w_emb=False):\n super(AttentionEncoder, self).__init__()\n\n self.lookup = nn.Embedding(vocab_size, emb_size)\n if pretrained_emb is None:\n xavier_uniform(self.lookup.weight.data)\n else:\n assert pretrained_emb.size() == (vocab_size, emb_size), \\\n \"Word embedding matrix has incorrect size: {} instead of {}\".format(w_emb.size(), (vocab_size, emb_size))\n self.lookup.weight.data.copy_(pretrained_emb)\n self.lookup.weight.requires_grad = not fix_w_emb\n\n self.M = nn.Parameter(torch.Tensor(emb_size, emb_size))\n if M is None:\n xavier_uniform(self.M.data)\n else:\n self.M.data.copy_(M)\n if bias:\n self.b = nn.Parameter(torch.Tensor(1))\n if b is None:\n self.b.data.zero_()\n else:\n self.b.data.copy_(b)\n else:\n self.b = None", "def load_embedding(src: str, embedding_type, layers) -> TransformerEmbedding:\n emb = TransformerEmbedding(src, embedding_type=embedding_type, layers=layers)\n return emb", "def embedding_setup(self, embedding, emb_trainable):\n if emb_trainable == True:\n emb_variable = tf.get_variable(\n name=\"embedding_matrix\", shape=embedding.shape,\n initializer = tf.constant_initializer(embedding))\n return emb_variable\n else:\n return embedding", "def __init__(self, vocab_size, embedding_size, output_size):\n super(VanillaEncoder, self).__init__()\n\n self.vocab_size = vocab_size\n self.embedding = nn.Embedding(vocab_size, embedding_size)\n self.gru = nn.GRU(embedding_size, output_size)", "def construct_embedding(self):\n i = 0\n self.load_dicts()\n embedding_shape = (max(self.word2idx.values()) + 1,\n self.embedding_size)\n self.embedding = np.zeros(embedding_shape)\n\n with open(self.config.word_vec_fi_glove, 'r') as fi:\n for line in fi:\n word_vec = line.split(\" \")[1:]\n self.embedding[i, :] = np.array(word_vec, dtype=np.float32)\n i += 1\n\n self.write_embedding()", "def pool(embed, seq_len):\n attention_w = ops.last_dim_weighted_sum(embed, \"w\")\n attention_w = ops.mask_logits(attention_w, seq_len)\n attention_w = tf.expand_dims(tf.nn.softmax(attention_w), 1)\n\n # [batch, 1, len] * [batch, len, dim] -> [batch, 1, dim]\n return tf.squeeze(tf.matmul(attention_w, embed), 1)", "def build(self, input_shapes):\n (word_embeddings_shape, _) = input_shapes\n width = word_embeddings_shape.as_list()[-1]\n self.type_embeddings = None\n if self.use_type_embeddings:\n self.type_embeddings = self.add_weight(\n \"type_embeddings\",\n shape=[self.token_type_vocab_size, width],\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=self.initializer_range),\n dtype=self.dtype)\n\n self.position_embeddings = None\n if self.use_position_embeddings:\n self.position_embeddings = self.add_weight(\n \"position_embeddings\",\n shape=[self.max_position_embeddings, width],\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=self.initializer_range),\n dtype=self.dtype)\n\n self.output_layer_norm = tf.keras.layers.LayerNormalization(\n name=\"layer_norm\", axis=-1, epsilon=1e-12, dtype=tf.float32)\n self.output_dropout = tf.keras.layers.Dropout(\n rate=self.dropout_prob, dtype=tf.float32)\n super(EmbeddingPostprocessor, self).build(input_shapes)", "def EmbeddingLayers_pooling(pretrained=False, progress=True, **kwargs):\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 4\n # return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,\n # **kwargs) \n return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],\n pretrained, progress, **kwargs)", "def __init__(self, emb_dim=100, window_size=3, init_emb=None,\n hidden_dim=100, vocab_size=0, splitter=u' ', add_dim=0,\n PAD_IDX=None):\n assert window_size % 2 == 1, 'window_size must be odd.'\n dim = emb_dim\n hidden_dim = hidden_dim + add_dim\n self.add_dim = add_dim\n self.hidden_dim = hidden_dim\n super(BaseCNNEncoder, self).__init__(emb=L.EmbedID(vocab_size, emb_dim, ignore_label=-1),\n conv=L.Convolution2D(1, hidden_dim, ksize=(window_size, dim),\n stride=(1, dim), pad=(window_size // 2, 0)))\n self.splitter = splitter\n self.char_level_flag = True if self.splitter is None else False\n self.word_level_flag = not self.char_level_flag\n self.emb_dim = emb_dim\n self.window_size = window_size\n self.dim = dim\n self.PAD_IDX = PAD_IDX\n self.train = True\n # initialize embeddings\n if init_emb is not None:\n self.emb.W = init_emb", "def __init__(self,embedding_size):\n super(ResNetEncoder,self).__init__()\n resnet = models.resnet50(pretrained=True)\n modules = list(resnet.children())[:-1]\n #Create a sequential models upto top fc layer add a custom fc layer compatible with embedding size of decoder RNN\n self.resnet = nn.Sequential(*modules)\n self.embed = nn.Linear(resnet.fc.in_features,embedding_size)\n self.bn = nn.BatchNorm1d(embedding_size,momentum=0.01)\n self.init_weights()", "def _build(self, ids):\n # Construct embeddings.\n if self._existing_vocab is None:\n if self.EMBEDDINGS not in self._initializers:\n self._initializers[self.EMBEDDINGS] = basic.create_linear_initializer(\n self._vocab_size)\n self._embeddings = tf.get_variable(\n \"embeddings\",\n shape=[self._vocab_size, self._embed_dim],\n dtype=tf.float32,\n initializer=self._initializers[self.EMBEDDINGS],\n partitioner=self._partitioners.get(self.EMBEDDINGS, None),\n regularizer=self._regularizers.get(self.EMBEDDINGS, None),\n trainable=self._trainable)\n else:\n self._embeddings = tf.get_variable(\n \"embeddings\",\n dtype=tf.float32,\n initializer=self._existing_vocab,\n regularizer=self._regularizers.get(self.EMBEDDINGS, None),\n trainable=self._trainable)\n\n # Lookup embeddings\n return tf.nn.embedding_lookup(\n self._embeddings, ids, name=\"embedding_lookup\")", "def get_embed(input_data, vocab_size, embed_dim):\n embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, input_data)\n\n return embed", "def get_embed(input_data, vocab_size, embed_dim):\n # todo 需要编程:\n # 1、构建嵌入矩阵的查找表\n lookup_w = tf.Variable(\n initial_value=tf.random_uniform([vocab_size, embed_dim], -1.0, 1.0)\n )\n # 2、获得嵌入输出\n embed = tf.nn.embedding_lookup(params=lookup_w, ids=input_data)\n # [N, n_steps, embed_size]\n return embed", "def build_embeddings(opt, word_dict, for_encoder='src'):\n if for_encoder=='src':\n embedding_dim = opt.src_word_vec_size #512\n elif for_encoder=='tgt':\n embedding_dim = opt.tgt_word_vec_size\n elif for_encoder=='structure':\n embedding_dim = 64\n\n word_padding_idx = word_dict.stoi[Constants.PAD_WORD]\n num_word_embeddings = len(word_dict)\n \n if for_encoder=='src' or for_encoder=='tgt':\n\n return Embeddings(word_vec_size=embedding_dim,\n position_encoding=opt.position_encoding,\n dropout=opt.dropout,\n word_padding_idx=word_padding_idx,\n word_vocab_size=num_word_embeddings,\n sparse=opt.optim == \"sparseadam\")\n elif for_encoder=='structure':\n return Embeddings(word_vec_size=embedding_dim,\n position_encoding=False,\n dropout=opt.dropout,\n word_padding_idx=word_padding_idx,\n word_vocab_size=num_word_embeddings,\n sparse=opt.optim == \"sparseadam\")", "def _configure_embeddings(self):\r\n # TODO(omalleyt): Add integration tests.\r\n from tensorflow.python.keras.layers import embeddings\r\n try:\r\n from tensorboard.plugins import projector\r\n except ImportError:\r\n raise ImportError('Failed to import TensorBoard. Please make sure that '\r\n 'TensorBoard integration is complete.\"')\r\n config = projector.ProjectorConfig()\r\n for layer in self.model.layers:\r\n if isinstance(layer, embeddings.Embedding):\r\n embedding = config.embeddings.add()\r\n embedding.tensor_name = layer.embeddings.name\r\n\r\n if self.embeddings_metadata is not None:\r\n if isinstance(self.embeddings_metadata, str):\r\n embedding.metadata_path = self.embeddings_metadata\r\n else:\r\n if layer.name in embedding.metadata_path:\r\n embedding.metadata_path = self.embeddings_metadata.pop(layer.name)\r\n\r\n if self.embeddings_metadata:\r\n raise ValueError('Unrecognized `Embedding` layer names passed to '\r\n '`keras.callbacks.TensorBoard` `embeddings_metadata` '\r\n 'argument: ' + str(self.embeddings_metadata.keys()))\r\n\r\n class DummyWriter(object):\r\n \"\"\"Dummy writer to conform to `Projector` API.\"\"\"\r\n\r\n def __init__(self, logdir):\r\n self.logdir = logdir\r\n\r\n def get_logdir(self):\r\n return self.logdir\r\n\r\n writer = DummyWriter(self.log_dir)\r\n projector.visualize_embeddings(writer, config)", "def TransformerTokenEmbedding(\n num_embeddings, embedding_dim, padding_idx, freeze_embed=False\n):\n m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)\n nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)\n nn.init.constant_(m.weight[padding_idx], 0)\n if freeze_embed:\n m.weight.requires_grad = False\n return m", "def _load_byte_embedding(self):\n char_embed_weights = self.npz_weights['char_embed']\n num_tags = TAGS.__len__()\n weights = np.zeros((char_embed_weights.shape[0] + num_tags + 1, char_embed_weights.shape[1]), dtype='float32')\n weights[1:-num_tags, :] = char_embed_weights\n self.embed_chars = rnn.Embedding(num_embeddings=self.num_embeddings, embedding_dim=self.char_embed_dim, padding_idx=self.padding_idx, freeze_embed=self._finetune_pretrained_weights)\n self.embed_chars.weight.data.copy_(torch.FloatTensor(weights))", "def __init__(self, embed_size, dropout=0.5, image_model='resnet101', simple=False, pretrained=True):\n super(EncoderCNNFeatures, self).__init__()\n resnet = globals()[image_model](pretrained=pretrained)\n\n self.linear = nn.Sequential(nn.Conv2d(resnet.fc.in_features, embed_size, kernel_size=1, padding=0),\n nn.Dropout2d(dropout))\n\n self.simple = simple\n if simple:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))", "def embed(self, features, feature_name, params):\n with tf.variable_scope(\"%s_embed\" % feature_name):\n embed_matrix = tf.get_variable(\"embedding_matrix\",\n [getattr(params, \"%s_vocab_size\" % feature_name), \n getattr(params, \"%s_embed_size\" % feature_name)])\n return tf.nn.embedding_lookup(embed_matrix, features[feature_name])", "def Aut(A):\n return Embeddings(A,A)", "def __init__(self, # noqa: R0913\n hid_dim,\n n_layers,\n n_heads,\n pf_dim,\n dropout,\n max_length=100):\n super().__init__()\n\n self.pos_embedding = nn.Embedding(max_length, hid_dim)\n\n self.layers = nn.ModuleList([TransformerEncoderLayer(hid_dim,\n n_heads,\n pf_dim,\n dropout)\n for _ in range(n_layers)])\n\n self.dropout = nn.Dropout(dropout)\n\n self.register_buffer('scale', torch.sqrt(torch.FloatTensor([hid_dim])))", "def add_embedding(self, token, embedding):\n self.word2idx[token] = self.vocab_size\n self.vocab_size += 1\n\n self.embedding = np.vstack((self.embedding, embedding))", "def __init__(self, \n k=DEFAULT_EMBEDDING_SIZE, \n eta=DEFAULT_ETA, \n epochs=DEFAULT_EPOCH, \n batches_count=DEFAULT_BATCH_COUNT, \n seed=DEFAULT_SEED,\n embedding_model_params={'norm':DEFAULT_NORM_TRANSE, \n 'normalize_ent_emb':DEFAULT_NORMALIZE_EMBEDDINGS,\n 'negative_corruption_entities':DEFAULT_CORRUPTION_ENTITIES},\n optimizer=DEFAULT_OPTIM, \n optimizer_params={'lr':DEFAULT_LR},\n loss=DEFAULT_LOSS, \n loss_params={},\n regularizer=DEFAULT_REGULARIZER, \n regularizer_params={},\n verbose=DEFAULT_VERBOSE):\n super().__init__(k=k, eta=eta, epochs=epochs, batches_count=batches_count, seed=seed,\n embedding_model_params=embedding_model_params,\n optimizer=optimizer, optimizer_params=optimizer_params,\n loss=loss, loss_params=loss_params,\n regularizer=regularizer, regularizer_params=regularizer_params,\n verbose=verbose)", "def CreateWavelet(self):\n self.__context.builder.WaveletCreate(self.GetId())", "def create_embedding(skills):\n corpus = list(skills[\"description\"].values)\n embedder = SentenceTransformer(config[\"sentence_transformer\"][\"model\"])\n embedding = embedder.encode(corpus, show_progress_bar=True)\n return embedding", "def __init__(self, name, config, embed_mat):\n if 'tune_embeds' not in config.keys():\n raise ValueError('config must define \"tune_embeds\".')\n super(TextModel, self).__init__(name, config)\n self.embeds = nn.Embedding(embed_mat.shape[0], embed_mat.shape[1])\n self.embeds.weight = nn.Parameter(torch.from_numpy(embed_mat),\n requires_grad=self.tune_embeds)", "def __init__(self, out_embed_dims, vocab_size, vocab_reduction_module=None, fixed_weights=None, hidden_layer_size=32, activation_fn=torch.nn.ReLU, logit_fn=torch.exp):\n super().__init__(out_embed_dims, vocab_size, vocab_reduction_module)\n if fixed_weights is None:\n self.fixed_weights = None\n self.gating_network = nn.Sequential(Linear(sum(out_embed_dims), hidden_layer_size, bias=True), activation_fn(), Linear(hidden_layer_size, len(out_embed_dims), bias=True))\n self.logit_fn = logit_fn\n else:\n assert len(fixed_weights) == len(out_embed_dims)\n self.fixed_weights = maybe_cuda(torch.Tensor(fixed_weights).view(1, 1, -1))", "def init_embedding(self):\n self.embedding.weight.data.uniform_(-1./self.num_embeddings, 1./self.num_embeddings)", "def build(self, input_shape):\n hidden_dim = input_shape[2]\n self.W = self.add_weight(\n name='{}_W'.format(self.name),\n shape=(hidden_dim, hidden_dim,),\n initializer='uniform',\n trainable=True)\n self.b = self.add_weight(\n name='{}_b'.format(self.name),\n shape=(hidden_dim,),\n initializer='zeros',\n trainable=True)\n self.u = self.add_weight(\n name='{}_u'.format(self.name),\n shape=(hidden_dim,),\n initializer='uniform',\n trainable=True)\n super(AttentionLayer, self).build(input_shape)", "def add_embedding_layer(self, emb_matrix, ans2id, context2id, use_same=True):\n with vs.variable_scope(\"embeddings\"):\n qn_embedding_matrix = tf.Variable(emb_matrix, dtype=tf.float32,\n name=\"qn_emb_matrix\") # shape (400002, embedding_size)\n self.qn_embs = embedding_ops.embedding_lookup(qn_embedding_matrix, self.qn_ids)\n\n self.context_embedding_matrix = tf.Variable(tf.one_hot(range(len(context2id)), len(context2id)),\n name=\"context_emb_matrix\")\n if not use_same:\n self.ans_embedding_matrix = tf.Variable(tf.one_hot(range(len(ans2id)), len(ans2id)),\n dtype=tf.float32, name=\"ans_emb_matrix\")\n else:\n self.ans_embedding_matrix = tf.identity(self.context_embedding_matrix, name=\"ans_emb_matrix\")\n\n if self.FLAGS.use_raw_graph:\n self.context_embs = embedding_ops.embedding_lookup(self.context_embedding_matrix, self.context_ids)\n else:\n self.context_embs = tf.identity(self.context_embedding)\n self.ans_embs = embedding_ops.embedding_lookup(self.ans_embedding_matrix, self.ans_ids)", "def embed_word(self):\n return self.emb.get_keras_embedding(dropout = self.emb_dropout,\n trainable = self.trainable_emb,\n input_length = self.sent_maxlen)", "def __init__(self, embed_size, dropout=0.5, image_model='resnet101', simple=False, pretrained=True):\n super(EncoderCNN, self).__init__()\n resnet = globals()[image_model](pretrained=pretrained)\n modules = list(resnet.children())[:-2] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n \n self.linear = nn.Sequential(nn.Conv2d(resnet.fc.in_features, embed_size, kernel_size=1, padding=0),\n nn.Dropout2d(dropout))\n\n self.simple = simple\n if simple:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))", "def __init__(self, embed_size, hidden_size, vocab_size, vocab, num_layers, max_seq_length=30):\n super(DecoderClothing, self).__init__()\n self.embed = nn.Embedding(vocab_size, embed_size)\n self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)\n self.linear = nn.Linear(hidden_size, vocab_size)\n self.max_seg_length = max_seq_length\n self.vocab = vocab\n self.clothing_class = {\n 'shirt':0, 'jumper':1, 'jacket':2, 'vest':3, 'coat':4,\n 'dress':5, 'pants':6, 'skirt':7, 'scarf':8, 'cane':9, 'bag':10, 'shoes':11,\n 'hat':12, 'face':13, 'glasses':14 }", "def forward(self, tgt, m, enc_embed, mask):\n bs = tgt.shape[0]\n enc_embed = enc_embed.permute(2, 0, 1)\n m = m.permute(2, 0, 1)\n tgt = tgt.permute(2, 0, 1)\n dec_embed = self.dec_embed.weight.unsqueeze(1).repeat(1, bs, 1)\n\n out = tgt\n for layer in self.decoder_layers:\n out = layer(out, m, \n pos=enc_embed,\n query_pos=dec_embed\n )\n \n return self.decoder_norm(out).permute(1, 2, 0), dec_embed.permute(1, 2, 0)", "def embedding(org_input):\n # Create the embedding list\n for f in range(Config.num_feature):\n num_cat_value = Config.schema[f]\n\n if num_cat_value == 1:\n pass\n elif num_cat_value > 1:\n embed_dict[f] = tf.get_variable(\n name=\"embed_\" + str(f),\n shape=[num_cat_value, Config.embed_size[f]],\n trainable=True)\n else:\n raise ValueError(\"Schema values should be positive integers!\")\n\n # Create embedded inputs\n f_size = np.sum(Config.embed_size)\n embedded_input = embed_events(org_input, f_size)\n\n return embedded_input", "def build(self, input_shape):\n self.embedding = layers.Embedding(MAX_WORDS, 7, input_length=MAX_LEN)\n self.conv_1 = layers.Conv1D(16, kernel_size=5, name=\"conv_1\", activation=\"relu\")\n self.pool_1 = layers.MaxPool1D(name=\"pool_1\")\n self.conv_2 = layers.Conv1D(\n 128, kernel_size=2, name=\"conv_2\", activation=\"relu\"\n )\n self.pool_2 = layers.MaxPool1D(name=\"pool_2\")\n self.flatten = layers.Flatten()\n self.dense = layers.Dense(1, activation=\"sigmoid\")\n super(CnnModel, self).build(input_shape)", "def embed(self, word: Any) -> dy.Expression:\n raise NotImplementedError('embed must be implemented in Embedder subclasses')", "def embed_word(self):\n return self.emb.get_keras_embedding(trainable = self.trainable_emb,\n input_length = self.sent_maxlen)", "def randomly_init_embeddings(self, embed_dim):\n self.embed_dim = embed_dim\n self.embeddings = np.random.rand(self.size(), embed_dim)\n for term in [self.pad_term, self.unk_term, self.eos_term]:\n self.embeddings[self.get_id(term)] = np.zeros([self.embed_dim])", "def __init__(self, \n k=DEFAULT_EMBEDDING_SIZE, \n eta=DEFAULT_ETA, \n epochs=DEFAULT_EPOCH, \n batches_count=DEFAULT_BATCH_COUNT, \n seed=DEFAULT_SEED,\n embedding_model_params={'normalize_ent_emb':DEFAULT_NORMALIZE_EMBEDDINGS,\n 'negative_corruption_entities':DEFAULT_CORRUPTION_ENTITIES},\n optimizer=DEFAULT_OPTIM, \n optimizer_params={'lr':DEFAULT_LR},\n loss=DEFAULT_LOSS, \n loss_params={},\n regularizer=DEFAULT_REGULARIZER, \n regularizer_params={},\n verbose=DEFAULT_VERBOSE):\n super().__init__(k=k, eta=eta, epochs=epochs, batches_count=batches_count, seed=seed,\n embedding_model_params=embedding_model_params,\n optimizer=optimizer, optimizer_params=optimizer_params,\n loss=loss, loss_params=loss_params,\n regularizer=regularizer, regularizer_params=regularizer_params,\n verbose=verbose)", "def build(self, observation):\n raise NotImplementedError(\n 'Needs to be implemented as part of Embedder Interface')", "def set_embeddings(self):", "def initialize_model(self):\n\n input_layer = Input(\n shape=(self.input_length,), \n dtype='int32', \n name='input'\n )\n\n if self.embedding_matrix is None:\n embedding = Embedding(\n output_dim=self.embedding_size,\n input_dim=self.vocabulary_size + 1, # for mask\n input_length=self.input_length,\n mask_zero=True,\n name='embedding'\n )(input_layer)\n else:\n embedding = Embedding(\n output_dim=self.embedding_size,\n input_dim=self.vocabulary_size + 1,\n input_length=self.input_length,\n mask_zero=True,\n weights=[np.vstack((np.zeros((1, self.embedding_size)),\n self.embedding_matrix))],\n name='embedding'\n )(input_layer)\n\n encoder = self.recurrent_cell(\n self.latent_dim,\n dropout=self.dropout,\n recurrent_dropout=self.dropout,\n name='encoder',\n recurrent_regularizer=l1_l2(*self.regularization)\n )\n\n if self.use_bidirection:\n encoder = Bidirectional(\n encoder,\n merge_mode='concat'\n )\n\n encoder = encoder(embedding)\n\n dense_1 = Dense(\n 1024,\n activation='tanh',\n name='dense_1',\n kernel_regularizer=l1_l2(*self.regularization)\n )(encoder)\n\n dense_2 = Dense(\n 512,\n activation='tanh',\n name='dense_2',\n kernel_regularizer=l1_l2(*self.regularization)\n )(dense_1)\n\n dropout = Dropout(self.dropout)(\n dense_2\n )\n\n prediction = Dense(\n 1,\n activation='sigmoid',\n name='prediction'\n )(dropout)\n\n model = Model(inputs=input_layer, outputs=prediction)\n\n # sparse_categorical_crossentropy\n model.compile(optimizer=Adam(lr=self.learning_rate),\n loss='binary_crossentropy',\n metrics=['acc'])\n\n self.model = model\n\n if self.verbose > 0:\n model.summary()\n\n return [model]", "def init_emb(self):\n # Initialize users and items' embeddings\n nn.init.xavier_uniform_(self.user_embedding.weight)\n nn.init.xavier_uniform_(self.item_embedding.weight)", "def __init__(self, embed_size, hidden_size, vocab_size, num_layers, max_seq_length=20):\n super(DecoderRNN, self).__init__()\n self.embed = nn.Embedding(vocab_size, embed_size)\n self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)\n self.linear = nn.Linear(hidden_size, vocab_size)\n self.max_seg_length = max_seq_length", "def embeddings_layers_init(self):\n\n user_embeddings = tf.keras.layers.Embedding(\n self.n_users, self.user_dim, input_length=1)\n\n item_embeddings = tf.keras.layers.Embedding(\n self.n_items, self.item_dim, input_length=1)\n\n return user_embeddings, item_embeddings", "def get_embed(input_data, vocab_size, embed_dim):\n embedding = tf.Variable(tf.random_uniform((vocab_size,embed_dim), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, input_data)\n #print (\"embed_dim: \",embed_dim) # 向量表达维度为 256\n #print (\"input_data.shape: \",input_data.shape) # (50, 5)\n #print (\"embed.shap: \", embed.shape) # word 的向量表达 ==特征 (50, 5, 256) ==(batch_size, num_step, embed_dim)\n return embed # 返回input的向量表达" ]
[ "0.69322747", "0.64097595", "0.64067", "0.63635826", "0.63276964", "0.6208275", "0.6184051", "0.6084168", "0.60155404", "0.6006011", "0.6005224", "0.6002711", "0.60010916", "0.59911853", "0.5990504", "0.5958701", "0.5928769", "0.592201", "0.59150296", "0.59118164", "0.591048", "0.5881971", "0.58623266", "0.58346045", "0.5829561", "0.5825973", "0.58034843", "0.5797413", "0.5789574", "0.5770195", "0.5751782", "0.57445216", "0.57393354", "0.57042795", "0.5690474", "0.5676684", "0.56438285", "0.5636173", "0.563384", "0.56259835", "0.56242114", "0.55724645", "0.5550681", "0.554225", "0.5516452", "0.55067986", "0.5503801", "0.5492672", "0.54789984", "0.54765326", "0.5467126", "0.54629487", "0.54627794", "0.54477084", "0.54386514", "0.5437972", "0.5432589", "0.5426777", "0.5410019", "0.53922826", "0.5386396", "0.5374121", "0.5373288", "0.5370031", "0.5369764", "0.5369487", "0.53670293", "0.5366179", "0.53563356", "0.5355865", "0.5350761", "0.53452796", "0.5340862", "0.5336025", "0.5334177", "0.5326047", "0.5324481", "0.5314751", "0.5314164", "0.5312264", "0.5307636", "0.53076035", "0.53020674", "0.52993596", "0.5289326", "0.5288775", "0.5261143", "0.52540314", "0.52507263", "0.52435184", "0.52415895", "0.5235062", "0.52336156", "0.5223143", "0.52226466", "0.5220503", "0.521546", "0.5208608", "0.52029437", "0.51843596" ]
0.63652354
3
Return list of filename corresponding to JobSpider
def get_spiders_files(spiders_directory=None): if spiders_directory is None: spiders_directory = dirname(__file__) + '/spiders/' return [file for file in glob.glob(spiders_directory + "/*.py") if isfile(file) and not file.endswith('__init__.py')]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_job_names(self):\n return []", "def filenames(self):\n pass", "def get_job_list():\n\tdirlist = os.listdir(\".\")\n\tjoblist = [x for x in dirlist if \"job.sh\" in x and x in job_dict]\n\ttmplist = [x for x in dirlist if \"job.sh\" in x and x not in job_dict]\n\tdef compare_function(s: str):\n\t\treturn job_dict[s].order\n\tjoblist.sort(key=compare_function)\n\tjoblist.extend(tmplist)\n\treturn joblist", "def get_filenames(self):\n return self.filenames", "def list_output_files(self):\r\n fname = self.__get_output_filename()\r\n return [fname] if fname else []", "def GetFileNames(self):\n return self.files", "def get_jobs():\n jobs = [os.path.join(JOBS_DIR, job)\n for job in os.listdir(JOBS_DIR)\n if job != '.gitignore']\n return jobs", "def get_file_names(self, prediction=False):\n if prediction is False:\n filename_list = []\n for objects in self.bucket.objects.filter(Prefix=self.training_data_dir):\n filename = str(objects.key).split('/')[-1]\n if filename != \"\":\n filename_list.append(filename)\n return filename_list\n else:\n filename_list = []\n for objects in self.bucket.objects.filter(Prefix=self.prediction_data_dir):\n filename_list.append(str(objects.key).split('/')[-1])\n return filename_list", "def get_filenames(is_training, data_dir):\n if is_training:\n return [os.path.join(data_dir, 'LIP_train5.record')]\n else:\n return [os.path.join(data_dir, 'LIP_val5.record')]", "def filenames(self):\n return self._filenames", "def get_all_job_files(jobFolder):\n job_files = {}\n for job_file in glob.glob(os.path.join(jobFolder, '*.json')):\n __, j = os.path.split(job_file)\n job_files[j] = ''\n return job_files", "def get_filenames(is_training, data_dir):\n if is_training:\n return [\n os.path.join(data_dir, 'train-%05d-of-01024' % i)\n for i in range(_NUM_TRAIN_FILES)]\n else:\n return [\n os.path.join(data_dir, 'validation-%05d-of-00128' % i)\n for i in range(_NUM_VAL_FILES)]", "def filenames(self):\n names = []\n for furi in np.asarray(self.fileuris).flat:\n names.append(furi)\n return names", "def processed_file_names(self):\n # For 'trainval', we use files from 'train' and 'val' to save\n # memory\n if self.stage == 'trainval' and self.val_mixed_in_train:\n return [\n osp.join('train', self.pre_transform_hash, f'{w}.h5')\n for s in ('train', 'val')\n for w in self.all_cloud_ids[s]]\n if self.stage == 'trainval':\n return [\n osp.join(s, self.pre_transform_hash, f'{w}.h5')\n for s in ('train', 'val')\n for w in self.all_cloud_ids[s]]\n return [\n osp.join(self.stage, self.pre_transform_hash, f'{w}.h5')\n for w in self.cloud_ids]", "def _ls_waiting_jobs(self):\n \n jobs = [j for j in os.listdir(pjoin(self._jobsdir, \"00_waiting\")) if j.endswith(self._job_ext)]\n \n if self._job_filter:\n jobs = [j for j in jobs if self._job_filter(pjoin(self._jobsdir, \"00_waiting\", j), j)]\n \n return jobs", "def get_job_list(self):\n return self.job_list", "def get_job_list(self):\n return self.job_list", "def files(self):\n files = [self.submission]\n if self.kind == 'script':\n files.append(self.exec_script)\n if self.kind == 'function':\n files.append(self.function)\n return files", "def get_filenames(self):\n return [doc['filename'] for doc in self.vocab]", "def GetExpectationFilepaths(self) -> List[str]:\n raise NotImplementedError()", "def get_filenames(is_training, data_dir):\n\n return [ os.path.join(data_dir, 'train_'+str(shard_id)+'.tfrecord') for shard_id in range(_NUM_SHARDS)]", "def GetWorkloadFileList() -> list[str]:\n return [data.ResourcePath(workload) for workload in FLAGS.ycsb_workload_files]", "def processed_file_names(self):\n if self.force_reprocess == True:\n self.force_reprocess = False\n return 'reprocess.pt'\n \n ''' HR 01/06/22 Workaround to avoid FileNotFoundError '''\n print('self.processed_dir:', self.processed_dir)\n # folder,file = os.path.split(self.processed_dir)\n folder = self.processed_dir\n if not os.path.isdir(folder):\n print(' Making folder', folder)\n os.makedirs(folder)\n \n processedfiles = [f for f in os.listdir(self.processed_dir) if os.path.isfile(\n os.path.join(self.processed_dir, f))]\n if 'pre_filter.pt' in processedfiles:\n processedfiles.remove('pre_filter.pt')\n if 'pre_transform.pt' in processedfiles:\n processedfiles.remove('pre_transform.pt')\n # 'not_implimented.pt' #[f'data_{i}.pt' for i in list(self.data.index)]\n return processedfiles", "def get_filenames(self):\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n lookup_name = 'train'\n elif self.mode == tf.estimator.ModeKeys.EVAL:\n lookup_name = 'validation'\n elif self.mode == tf.estimator.ModeKeys.PREDICT:\n lookup_name = self.predict_split\n filenames = tf.gfile.Glob(\n os.path.join(self.data_dir, '{}-*-of-*'.format(lookup_name)))\n if tf.estimator.ModeKeys.PREDICT:\n # Sort so that TFRecords will be read out deterministically.\n filenames = sorted(filenames)\n return filenames", "def get_filenames(self):\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n lookup_name = 'train'\n elif self.mode == tf.estimator.ModeKeys.EVAL:\n lookup_name = 'validation'\n elif self.mode == tf.estimator.ModeKeys.PREDICT:\n lookup_name = self.predict_split\n filenames = tf.gfile.Glob(\n os.path.join(self.data_dir, '{}-*-of-*'.format(lookup_name)))\n if tf.estimator.ModeKeys.PREDICT:\n # Sort so that TFRecords will be read out deterministically.\n filenames = sorted(filenames)\n return filenames", "def _get_filepaths(self):\n self._printer(str(self.__len__()) + \" file paths have been parsed in \" + str(self.timer.end))\n if self._hash_files:\n return pool_hash(self.filepaths)\n else:\n return self.filepaths", "def get_file_names():\n all_file_names = []\n cwd = os.getcwd()\n # Change to dir with result files to analyze\n os.chdir(args.dir)\n \n for file in glob.glob(\"*.csv\"):\n all_file_names.append(file)\n\n # Return to current working directory\n os.chdir(cwd)\n return all_file_names", "def get_jobs_list(self, response):\n pass", "def get_file_names(self):\n return glob.glob(os.path.join(self.path, '*.csv'))", "def get_file_list() -> List[str]:\n filenames = []\n os.makedirs(\"sequence\", exist_ok=True)\n for file in glob.glob(\"sequence/*.smp\"):\n filenames.append(file.replace(\"sequence/\", \"\"))\n return filenames", "def _find_files(job_schedule_info):\n files = []\n\n job_status = _status_info(job_schedule_info)\n assert job_status is not None, \\\n \"We should not have received an empty status\"\n\n properties = job_status.get(\"properties\")\n\n if not properties:\n LOG.error(str(job_status))\n raise Exception(\"The status of the job is expected to have a \"\n \"properties key, however, it is missing.\")\n\n LOG.debug(\"We want to find the files needed to trigger %s\" %\n properties[\"buildername\"])\n\n if \"packageUrl\" in properties:\n files.append(properties[\"packageUrl\"])\n if \"testsUrl\" in properties:\n files.append(properties[\"testsUrl\"])\n\n return files", "def output_files(self):\n output_files = []\n for split in self.split_files:\n output_files.extend(split.filepaths)\n return output_files", "def mapping_names(self):\n return [self.basename]", "def get_filenames():\n filenames = []\n for filename in Path('.').glob('*.pdf'):\n if 'reordered' not in filename.stem:\n filenames.append(filename)\n\n return filenames", "def list_filenames(self):\n l = []\n for path, dirs, files in os.walk(self.archive_path):\n for file in files:\n l.append(os.path.relpath(os.path.join(path,file),self.archive_path))\n l.sort()\n return l", "def get_jobs(dumpruninfo):\n if \"jobs\" not in dumpruninfo:\n return []\n return dumpruninfo[\"jobs\"].keys()", "def get_result_filenames(self,directory):\n return [os.path.join(directory,name) for name in os.listdir(directory)\n if os.path.isfile(os.path.join(directory,name)) and\n os.path.splitext(name)[1].lower() == '.trf']", "def _getfilenames(self):\n\n # Set up the path and file prefix depending on the filetype.\n if self._filetype == 'nightwatch':\n fileprefix = 'qcframe'\n\n if self._location == 'nersc':\n prefix = '/global/project/projectdirs/desi/spectro/nightwatch/kpno'\n elif self._location == 'kpno':\n prefix = '/exposures/desi' # not correct path!\n else:\n raise ValueError('Unknown location {}'.format(self._location))\n elif self._filetype == 'redux':\n fileprefix = 'sframe'\n\n if self._location == 'nersc':\n prefix = '/global/project/projectdirs/desi/spectro/redux/daily/exposures'\n elif self._location == 'kpno':\n prefix = '/exposures/desi' # not correct path!\n else:\n raise ValueError('Unknown location {}'.format(self._location))\n else:\n raise ValueError('Unknown file type {}'.format(self._filetype))\n\n # Find the exposures files.\n exfiles = {}\n for ex in self._exposures:\n folder = '{}/{}/{:08d}'.format(prefix, self._date, ex)\n files = sorted(glob('{}/{}*.fits'.format(folder, fileprefix)))\n exfiles[ex] = files\n\n return exfiles", "def get_filenames_reid(is_training, data_dir):\n if is_training:\n return [os.path.join(data_dir, 'train-512-170.tfrecords')]\n else:\n return [os.path.join(data_dir, 'val-512-170.tfrecords')]", "def getHTMLFileNames(self):\n HTMLFiles = []\n for _file in os.listdir(self.dir):\n if _file.lower().endswith(\".html\"):\n HTMLFiles.append(_file)\n elif _file.lower().endswith(\".htm\"):\n HTMLFiles.append(_file)\n HTMLFiles.append(\"statistics.html\")\n return HTMLFiles", "def raw_file_names(self):\n return self.filename", "def list(self):\n return self.rpc.call(MsfRpcMethod.JobList)", "def output_files(self):\n return [self.input_files()[0].replace(\".lhe.gz\", \".stdhep\").replace(\".lhe\", \".stdhep\")]", "def get_submission():\n result_files = []\n for filename in os.listdir(\".\"):\n if filename.endswith(\"_output.csv\"):\n result_files.append(filename)\n return result_files[0]", "def get_urls(self):\r\n if self.mod.filename:\r\n return [x + self.mod.filename for x in self.mod.service.get_mirrors()]", "def index_files(self) -> List[str]:\n return self.get(\"index_files\", [\"index.html\", \"index.htm\"])", "def extract_files(self) -> list:\n pass", "def _get_parsed_files(self):\n\n parsed = []\n with Historical_ROAs_Parsed_Table() as t:\n for row in t.execute(f'SELECT * FROM {t.name}'):\n parsed.append(row['file'])\n return parsed", "def namelist(self):\n\n # try to create a name from the archive name\n # because a gzipped file doesn't have information about the\n # original filename\n # gzipping a file creates the archive name by appending \".gz\"\n genericfilename = self._archivename\n\n if not genericfilename:\n genericfilename = \"generic.unknown.gz\"\n\n try:\n # get list of file extensions\n fileendinglist = Archivehandle.avail_archive_extensionlist4type['gz']\n replacedict = {\"wmz\": \"wmf\",\n \"emz\": \"emf\"}\n for ending in fileendinglist:\n endingwithdot = \".\"+ending\n if genericfilename.endswith(endingwithdot):\n if ending in replacedict:\n genericfilename = genericfilename[:-len(ending)]+replacedict[ending]\n else:\n genericfilename = genericfilename[:-len(endingwithdot)]\n break\n\n except Exception as e:\n print(e)\n pass\n return [genericfilename]", "def genJobList():\n nit=10\n reply=[]\n while len(reply)<10: #assume qstat fails if less that 10 jobs on cluster\n reply=chomp(os.popen('qstat|expand|tr -s \\' \\'|cut -d\\' \\' -f 1,2,5').readlines())\n nit+=1\n if nit>10: break\n return reply", "def getLogFileNames():\r\n return [\"Server1.txt\", \"Server2.txt\", \"Client1.txt\", \"Client2.txt\"]", "def discover(self):\n ids = []\n for f in os.listdir(self.dirname):\n if self.file_prefix in f:\n ids.append(self.inv_filename(f))\n return sorted(ids)", "def getMemberNames(self):\r\n # On Windows NT/2k/XP and Unix, if path is a Unicode object, the result \r\n # will be a list of Unicode objects. \r\n # Undecodable filenames will still be returned as string objects \r\n # If we don't request unicode, for example Vista may return a '?' \r\n # instead of a special character. The name would then be unusable to\r\n # build a distinct URL that references this resource.\r\n\r\n nameList = []\r\n\r\n for item in self.nibbler.listdir(self.path):\r\n name = to_str(item.name)\r\n nameList.append(name)\r\n\r\n for item in self.provider.cache_fs.get_dir_content(self.path):\r\n if item not in nameList:\r\n nameList.append(to_str(item))\r\n\r\n #this magic does not allow load the whole content for crazy Finder on MacOS\r\n magic_files = ['.ql_disablecache', '.ql_disablethumbnails']\r\n if nameList:\r\n for magic_file in magic_files:\r\n if magic_file not in nameList:\r\n f_obj = FSItem(magic_file, is_dir=False) \r\n self.provider.cache_fs.put(os.path.join(self.path, magic_file), f_obj)\r\n nameList.append(magic_file)\r\n\r\n return nameList", "def getExternalFiles(self):\n return []", "def get_files(self):\n m = []\n for post in self:\n m.append(post.FileName)\n return list(sorted(set(m), reverse=True))", "def ListJobs(cls):\n return [key.parent().string_id() for key in cls.query().fetch(\n 100, keys_only=True)]", "def list_of_expected_arrow_fq_files(self):\n def iter_script_to_get_fq(script_filename):\n for line in open(script_filename):\n # line might be like:\n # bash <arrow_dir>/c0to9.sh\n sh_file = line.strip().split()[-1]\n assert sh_file.endswith('.sh')\n yield sh_file[:-3] + '.arrowed.fastq'\n\n\n sge_ids = []\n submitted = {} # expected fq --> (\"local\" or SGE jobid, script used to get this)\n for line in open(self.arrow_submission_run_file):\n jobid, script = line.strip().split('\\t')\n # read the script to see which c<i>to<j>.sh files are associated with this\n for fq in iter_script_to_get_fq(script):\n submitted[fq] = (jobid, script)\n if jobid!='local':\n sge_ids.append(jobid)\n\n return sge_ids, submitted", "def list_jobs(exproot, **kwargs):\n for jobname, args, results in load_all(exproot):\n print jobname, args, results", "def files(self) -> List[str]:\n return [packet.name for packet in self.packets.file_description.values()]", "def filenames(self):\n return self._files.keys()", "def filenames(self):\n return self._files.keys()", "def get_files(self):\n # self.folder= +str(int(time.time()))\n if not os.path.exists(self.folder):\n os.mkdir(self.folder)\n while len(self.url_queue): # If we have URLs to crawl - we crawl\n href = self.url_queue.popleft() # We grab a URL from the left of the list\n filename = href.rsplit('/', 1)[-1]\n print(\"Downloading %s to %s...\" % (href, filename))\n fullname = os.path.join(self.folder, filename)\n urlretrieve(href, fullname)\n self.xlfnames.append(filename)", "def list_files(self):\n ret = []\n for fname in self.files:\n ret.append('filename: %s\\t replica locations: %s' %\n (fname, ','.join(self.files[fname])))\n return ret", "def find_files():\n \n p = re.compile(REGEX_PART_NUMBER)\n job_files = []\n \n for root, dirs, files in os.walk(project_path): # r at start of string need to prevent unicode error\n for filename in files:\n re_part_number = p.match(filename)\n if re_part_number:\n file_ext = filename.split(\".\")[-1].lower() # extract file extension \n file_size = os.path.getsize((os.path.join(root, filename))) # filesize in bytes \n \n part_number = re_part_number.group() # extract part number from regular expression match\n part_code = part_number.split(\"-\")[0]\n \n destinations = [] # destinations is a list in case a filetype is both a source and output filetype\n \n if (file_ext in EXTS_SOURCE_FILES) and flag_find_source_files:\n destinations.append(os.path.join(target_source_path,part_code,part_number)) \n \n if (file_ext in EXTS_OUTPUT_FILES) and flag_find_output_files:\n destinations.append(os.path.join(target_source_path,part_code,part_number)) \n \n if destinations: \n job_files.append(File(filename,root,file_size,destinations,part_number,part_code))\n print(f\"Found: {filename}\")\n \n return job_files", "def job_names(self):\n resp = self._cmd(uri = '/jenkins_jobs')\n names = []\n for item in resp.get('jobs'):\n names.append(item.get('job_name'))\n return sorted(names)", "def get_pipeline_names() -> Iterable[str]:\n for item in sorted((SRC / \"pipelines\").iterdir()):\n if not item.name.startswith(\"_\") and not item.is_file():\n yield item.name", "def _get_file_paths(self):\n return [os.path.join(self.path, self.mode, 'waveforms', file_name + '.npy') for file_name in self.file_names]", "def filenames(self) -> dict[str, str]:\r\n ...", "def get_files(self) -> list:\n files = []\n for file in os.listdir(self.root):\n if file.endswith(f\".{self.suffix}\"):\n files.append(os.path.join(self.root, file))\n return files", "def ListJobs(self, token=None):\n return aff4.FACTORY.Open(self.CRON_JOBS_PATH, token=token).ListChildren()", "def list_state_files(self, force_update: bool = False) -> List[str]:\n self._update_results(force_update)\n state_files = self._detailedOutputs['states']\n file_prefix = self._detailedOutputs['jobOutputPath']\n return [f\"{file_prefix}/{s}\" for s in state_files]", "def listFiles(self):\n pass", "def jobs(self):\n return self.get_jobs()", "def filepaths(self):\n pass", "def job_ids(self) -> List[str]:\n return self._db_data.job_ids", "def handle_list(self, job):\n print(\"User requested list of files.\")\n # Send LIST job to all servers\n self.put_job_in_all_queues(job)\n list_job_results = self.get_internal_results_from_all_servers()\n if len(list_job_results) == 0:\n # There were no servers active\n self.put_external_result(self.generate_failure_job(\"Unsuccessful, no servers running\"))\n return\n\n # Concatenate the lists of files\n total_files_list = []\n for result in list_job_results:\n files_list = result.result[\"files_list\"]\n for each_file in files_list:\n if each_file not in total_files_list:\n total_files_list.append(each_file)\n\n # Return the files list\n response_result = copy.deepcopy(list_job_results[0])\n response_result.result[\"files_list\"] = total_files_list\n\n self.put_external_result(response_result)", "def _filenames(self, dir_or_file):\n if os.path.isdir(dir_or_file):\n return glob(os.path.join(dir_or_file, \"*.txt\"))\n else:\n return [dir_or_file]", "def _get_filenames():\n src_dir = os.path.join(FLAGS.dataset_dir, FLAGS.src_dir)\n filenames = [os.path.join(src_dir, f) for f in os.listdir(src_dir) if\n f.endswith(\".tfrecord\") and\n all([blackflag not in f for blackflag in TEMP_BLACK_LIST])]\n shuffle(filenames)\n return filenames", "def list(self):\n return [os.splitext(el)[0] for el in\n os.listdir(str(self.model_dir))]", "def _get_files_list(self):\n ts_filepaths = []\n conn_filepaths = []\n ts_filepaths_from_dir = sorted(os.listdir(self.ts_dir))\n conn_filepaths_from_dir = sorted(os.listdir(self.conn_dir))\n for sub_id in self.ids:\n for ts_file in ts_filepaths_from_dir:\n if sub_id in ts_file:\n ts_filepaths += [os.path.join(self.ts_dir, ts_file)]\n ts_filepaths_from_dir.remove(ts_file)\n break\n for conn_file in conn_filepaths_from_dir:\n if sub_id in conn_file:\n conn_filepaths += [os.path.join(self.conn_dir, conn_file)]\n conn_filepaths_from_dir.remove(conn_file)\n break\n\n return ts_filepaths, conn_filepaths", "def get_job_list(self):\n job_list = []\n if mysql.job_list() == None:\n return job_list\n return mysql.job_list()", "def list_requested_files(request):\n request_datetime_range = datetime_range(\n request.start_datetime,\n request.end_datetime,\n request.time_resolution\n )\n # Translate request datetime into files\n request_file_names = [\n datetime_to_file_name(ts) for ts in request_datetime_range\n ]\n return request_file_names", "def process_jobs_(jobs):\n out = []\n for job in jobs:\n out_ = MultiProcessingFunctions.expand_call(job)\n out.append(out_)\n return out", "def joplin_file_name(joplin_dir: str) -> str:\n for (_, _, files) in os.walk(joplin_dir):\n for filename in files:\n file_full_name = os.path.join(joplin_dir, filename)\n if os.path.isfile(file_full_name):\n yield file_full_name", "def _get_file_names():\n file_names = {}\n file_names['train'] = ['data_batch_%d' % i for i in xrange(1, 6)]\n file_names['test'] = ['test_batch']\n\n return file_names", "def get_jobs(self):\n return list(self._jobs.values())", "def get_path_list(self, suffix=img_type):\n img_list = list(filter(lambda x: x.endswith(suffix), self.path_list))\n return img_list", "def getFiles(self):\n\t\treturn os.listdir(self.getPath())", "def _list(self):\n\n files = self.read_all_pages(\n self.metadata_url + 'nodes/' + self.backup_target_id +\n '/children?filters=kind:FILE')\n\n self.names_to_ids = {f['name']: f['id'] for f in files}\n\n return self.names_to_ids.keys()", "def _get_output_filenames(output_path, dpp=None):\n ret = []\n for fname in os.listdir(output_path):\n ext = _ext(dpp)\n if re.match(r\"get[^_]+[_free\\d?]?\" + ext, fname):\n ret.append(fname)\n return ret", "def get_filenames_strains(self, file_path_template_newick_tree):\n\t\tassert self.validate_file(file_path_template_newick_tree)\n\t\tlist_of_filenames_strains = []\n\t\ttree = Phylo.read(file_path_template_newick_tree, 'newick')\n\t\tfor leaf in tree.get_terminals():\n\t\t\tprefix = leaf.name\n\t\t\tif prefix.lower() == \"ancestor\":\n\t\t\t\tcontinue\n\t\t\tlist_of_filenames_strains.append(\"{prefix}.fasta\".format(prefix=prefix))\n\t\treturn list_of_filenames_strains", "def get_files(self) -> tp.Iterable[str]:\n return os.listdir(self.path)", "def getFilenameList(path, pattern='*.nii.gz'):\n\n filename = [os.path.basename(x) for x in sorted(glob.glob(os.path.join(path, pattern)))]\n\n return filename", "def importMethodFileNames(directory):\n\n allfileNames = os.listdir(directory)\n methodsRunList = []\n\n for filename in allfileNames:\n if \"_MethodRunLog_\" in filename:\n methodsRunList.append(filename)\n return methodsRunList", "def get_files_to_be_indexed(self):\n\t\tfiles = self.get_all_files()\n\t\tfiles_list = []\n\t\tfor name in files:\n\t\t\tif(name.split('.')[-1] in self.accepted_formats and os.stat(os.path.join(self.root, name)).st_size < 5000000):\n\t\t\t\tfiles_list.append(os.path.join(self.root, name))\n\t\treturn files_list[0:-1]", "def urls(self) -> list[str]:\r\n ...", "def queue_job_names(self):\n return [attrs[self.QCOL_NAME] for attrs in self.queue.values()]", "def get_filename(self, dt):\n if self.period:\n flist = self.files_matching(dt)\n else:\n raise NotImplementedError\n #Now figure out the priority..", "def get_filenames(self, bucket, directory, delimiter=''):\n b = self.conn.get_bucket(bucket)\n rs = b.list(directory, delimiter)\n return [key.name for key in rs if '$folder$' not in key.name]", "def _get_files(self):\n # pylint: disable=unused-variable\n for dirpath, __, filenames in os.walk(self.start_location):\n for file_ in filenames:\n if file_.endswith('.py'):\n yield \"{0}{1}\".format(dirpath, file_)", "def get_html_filenames():\n filenames = []\n file_folder = os.getcwd() + \"\\\\htmls\"\n for file in os.listdir(file_folder):\n if file.endswith(\".html\"):\n filenames.append('htmls\\\\' + file)\n return filenames" ]
[ "0.6951387", "0.6711222", "0.6683628", "0.6501705", "0.64207095", "0.6337588", "0.6322085", "0.6264899", "0.62041765", "0.6117254", "0.61150414", "0.6107383", "0.6075868", "0.60367036", "0.6032433", "0.6024823", "0.6024823", "0.6017", "0.6010282", "0.5977642", "0.59388155", "0.5893192", "0.58910984", "0.5885156", "0.5885156", "0.58743596", "0.58691245", "0.58625114", "0.5851566", "0.5829595", "0.5815875", "0.5813537", "0.58107287", "0.58093995", "0.5801752", "0.5791863", "0.578466", "0.5782432", "0.5777231", "0.57763326", "0.5756263", "0.57313377", "0.57166743", "0.5678608", "0.56747305", "0.5671772", "0.5668712", "0.5666337", "0.5653752", "0.56384534", "0.56355464", "0.5634077", "0.5632714", "0.56298697", "0.56268454", "0.5624901", "0.5619775", "0.5590431", "0.5586394", "0.55804753", "0.55804753", "0.558037", "0.5578246", "0.5577049", "0.5571723", "0.55703366", "0.55631286", "0.55614835", "0.5557646", "0.55511683", "0.5539901", "0.5537065", "0.55368024", "0.55327445", "0.5530444", "0.5528505", "0.55279344", "0.55197763", "0.5515078", "0.55092037", "0.5506776", "0.55044216", "0.54955655", "0.54911655", "0.5487684", "0.548213", "0.5480755", "0.546994", "0.5467194", "0.5458641", "0.54573023", "0.5454444", "0.54531425", "0.5447987", "0.54479134", "0.5433635", "0.5431097", "0.54287976", "0.54248", "0.5422535", "0.5418382" ]
0.0
-1
Do the crawn job (see crawl function) from spider class name (eg. pyjobs_crawlers.spiders.myspider.MySpiderClass)
def crawl_from_class_name(spider_class_name, connector, spider_error_callback=None): module_name = '.'.join(spider_class_name.split('.')[:-1]) class_name = spider_class_name.split('.')[-1] spider_module = import_module(module_name) spider_class = getattr(spider_module, class_name) return crawl([spider_class], connector, spider_error_callback)[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runSpider(spiderClass):\n\n\tprocess = CrawlerProcess({\n\t\t# 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'\n\t})\n\n\tprocess.crawl(spiderClass)\n\tprocess.start() # the script will block here until the crawling is finished", "def crawl(spider: str, book_id: int):\n proc = CrawlerProcess(get_project_settings())\n\n proc.crawl(spider, book_id=book_id)\n\n proc.start()", "def crawl(spiders_classes, connector, debug=False, spider_error_callback=stdout_error_callback):\n if debug:\n dispatcher.connect(spider_error_callback, signals.spider_error)\n\n process = CrawlerProcess({\n 'ITEM_PIPELINES': {\n 'pyjobs_crawlers.pipelines.RecordJobPipeline': 1,\n },\n 'connector': connector,\n 'LOG_ENABLED': False\n })\n\n for spider_class in spiders_classes:\n process.crawl(spider_class)\n\n spiders = []\n for crawler in list(process.crawlers):\n spiders.append(crawler.spider)\n process.start()\n\n return spiders", "def spidercls_for_request(spider_loader, request, default_spidercls: Optional[Any] = ..., log_none: bool = ..., log_multiple: bool = ...):\n ...", "def runSpider(spider, searchterm = None, fullink = None, spiderbotid = -1):\n sclogic.runSpider(spider, searchterm, fullink, spiderbotid)", "def main():\n\n from scrapy.crawler import CrawlerProcess\n from scrapy.utils.project import get_project_settings\n\n process = CrawlerProcess(get_project_settings())\n process.crawl(NCBIGeoSpider)\n process.start()", "def run_spider_on_zyte(spider_name):\n print(f'Running spider {spider_name}...')\n data = dict(project=project_id, spider=spider_name)\n response = requests.post('https://app.scrapinghub.com/api/run.json', data=data, auth=(api_key, ''))\n return response.json()['jobid']", "def runCrawler(crawlerid):\n sclogic.runCrawler(crawlerid)", "def crawl(self):\n try:\n self.crawl_pages()\n self.crawl_posts()\n self.crawl_comments()\n except Exception as exception:\n self.handle_request_limit(exception)", "def main():\n # get input params\n args = parm_parser.get_args()\n # init log config\n log.init_log('./log/mini_spider')\n if args:\n # read config file spider.conf\n conf_params = parm_parser.set_config_by_file(args.conf)\n # use config set up spider initial params\n spider = SpiderWorker(conf_params)\n # init result_path, make it complete\n spider.set_path()\n # init url queue\n spider.set_url_queue()\n # start to crawl url\n spider.start_crawl_work()\n\n return", "def start_crawling(crawler_instance: Crawler):\n print('start crawling!')\n logger.info('started a new crawl!')\n crawler_instance.crawl()\n threading.Timer(Config.CRAWL_TIMEOUT, start_crawling,\n [crawler_instance]).start()", "def crawl(self):\n\n # create helper process and setup IPC\n self.socket.listen(1)\n help_out_fd = open(self.helper_outfile, \"w\")\n with subprocess.Popen(\"./crawl_helper.py\", stdout=help_out_fd, stderr=subprocess.STDOUT) as proc:\n self.helper_pid = proc.pid\n try:\n conn, _ = self.socket.accept()\n # create initial params for crawler helper and send them\n new_urls = set()\n setup_params = {\"start_urls\": self.start_urls, \"allowed_domains\": [self.domain],\n \"cookies\": self.cookies, \"user_agent\": self.config[\"user_agent\"]}\n ipc_operations.send_object(conn, setup_params)\n\n # loop: receive a response object, then send new URLs to crawl. Catch & handle problems.\n while True:\n try:\n proc.wait(timeout=0.001)\n break\n except subprocess.TimeoutExpired:\n response = ipc_operations.receive_object(conn)\n if not response: # socket is dead / closed\n break\n new_urls = self.process_response(response)\n ipc_operations.send_object(conn, new_urls)\n except socket.timeout:\n util.printit(\"Unix socket connection to scrapy crawler unexpectedly broke. \" +\n \"Quitting crawling of %s\" % self.base_url, color=util.RED)\n break\n finally:\n # ensure connection is closed and helper process killed in any case\n conn.close()\n proc.kill()\n\n # after the actual crawling, extract all the gathered cookies from Selenium\n if self.config[\"use_selenium\"].lower() == \"true\":\n selenium_cookies = self.driver.get_cookies()\n for cookie in selenium_cookies:\n if not any(cookie[\"name\"] == c[\"name\"] and cookie[\"path\"] == c[\"path\"] and\n cookie[\"domain\"] == c[\"domain\"] for c in self.found_cookies):\n parsed_cookie = {}\n for key in (\"name\", \"path\", \"domain\", \"httpOnly\", \"secure\"):\n parsed_cookie[key] = cookie[key]\n self.found_cookies.append(parsed_cookie)\n\n help_out_fd.close()\n return self.create_results()", "def WebscraperTask(job_id):\n logger.debug(\"Entering dispatch_wordscraper with job ID: %s\" % str(job_id))\n\n # Obtain all required crawler arguments\n job = Job.objects.get(pk=job_id)\n word_list = [(term.term_id, term.term)\n for term in Term.objects.filter(job=job)]\n site_list = [(site.site_id, site.url, job.max_depth, 'English')\n for site in Site.objects.filter(job=job)]\n\n # Initialize the crawling process\n logger.debug(\"Configuring crawl controller with job_id: \" +\n \"%d, terms: %s, sites: %s\" % (job_id, str(word_list),\n str(site_list),))\n controller = init_controller()\n controller.crawl(job_id, site_list, word_list)\n logger.debug(\"Crawl complete for job: %d\" % job_id)\n\n # Cache the results in the database so the results are dated as close to the\n # crawl date as possible.\n results = controller.query(site_list, word_list)\n result_cache = Result()\n result_cache.job = job\n result_cache.output = results\n result_cache.save()", "def run_crawl(self, args):\n\n # Crawler process declaration\n process = CrawlerProcess({\n 'USER_AGENT': 'David Avs Crawler',\n 'FEED_FORMAT': 'json',\n 'FEED_URI': 'stdout:',\n 'LOG_ENABLED': args.verbose,\n })\n kwargs = dict(\n start_urls=[args.url],\n allowed_domains=[urlparse.urlparse(args.url).netloc.lower()],\n )\n\n # Run crawling\n old_stdout = sys.stdout\n sys.stdout = args.file\n try:\n process.crawl(DavidAvsSpider, **kwargs)\n process.start()\n finally:\n sys.stdout = old_stdout", "def start_crawlers(spider_name: str, rules: List[Rule]) -> None:\n runner = CrawlerRunner(settings)\n crawlers = runner.spider_loader.list()\n crawlers = [c for c in crawlers if c.__contains__(spider_name)]\n if crawlers:\n for rule in rules:\n runner.crawl(crawlers[0], rule=rule)\n d = runner.join()\n d.addBoth(lambda _: reactor.stop())\n reactor.run()\n launch_logger.debug('all finished.')\n else:\n launch_logger.warning('provide the right spider name.')", "def start(self):\n self.start_spider()\n self.start_ranker()\n\n concurrent.futures.wait(self.spider_thread_futures) # wait for spiders to finish\n self.logger.info(\"Done crawling\")\n self.ranker.done_crawling.set()\n\n self.ranker.print_ranks()", "def open_spider(self,Spider):\n pass", "def crawl_start(crawl_obj):\n \n res = None\n\n if crawl_obj.type in ['user', 'song'] :\n res = eval('crawl_' + crawl_obj.type)(crawl_obj)\n elif crawl_obj.type in ['artist', 'album'] :\n web_data = requests.get(crawl_obj.url, headers = cheat_headers)\n soup = bs4.BeautifulSoup(web_data.text, 'lxml')\n\n res = eval('crawl_' + crawl_obj.type)(crawl_obj.type, soup)\n else:\n print(\"Object type UNKNOWN!\")\n \n return res", "def from_crawler(cls, crawler, *args, **kwargs):\n\n spider = super(autos_detran_terceiros_sp_spider, cls).from_crawler(\n crawler, *args, **kwargs)\n crawler.signals.connect(spider.get_final_result, signals.spider_idle)\n return spider", "def run_spiders():\n hour_limit = 3600 # in seconds\n\n settings = get_project_settings()\n\n # Uncomment the following block of code if you want to test the manager on\n # the \"onepagetest\" to make sure the manager is working.\n \"\"\"\n settings[\"HTTPCACHE_ENABLED\"] = 1\n settings[\"HTTPCACHE_EXPIRATION_SECS\"] = 0\n settings[\"HTTPCACHE_STORAGE\"] = \"scrapy.extensions.httpcache.FilesystemCacheStorage\"\n settings[\"HTTPCACHE_IGNORE_MISSING\"] = 1\n settings[\"HTTPCACHE_DIR\"] = \"onepagetest\"\n \"\"\"\n\n runner = CrawlerRunner(settings)\n begin_time = datetime.now()\n\n d = runner.crawl(\"sb_spider\")\n d.addBoth(lambda _: continue_crawl(d, runner, begin_time, hour_limit))\n reactor.run()", "def run_generic_spider( user_id\t\t\t\t= None,\n\t\t\t\t\t\tspider_id\t\t\t= None,\n\t\t\t\t\t\tdatamodel\t\t\t= None,\n\t\t\t\t\t\trun_spider_config\t= None,\n\t\t\t\t\t\ttest_limit\t\t\t= None\n\t\t\t\t\t\t):\n\n\tprint ()\n\tlog_scrap.info(\"--- run_generic_spider / spider_id : %s \", spider_id )\n\n\t### WARNING !!! --> TEMPORARY SOLUTION\n\t### remove spider folder for spider_id in JOBDIR\n\tlog_scrap.debug(u\"--- run_generic_spider / cwd : %s\", os.getcwd() )\n\ttry :\n\t\tshutil.rmtree( os.getcwd() + \"/\" + JOBDIR_FOLDER + \"/\" + spider_id )\n\texcept:\n\t\tpass\n\tlog_scrap.debug(u\"--- run_generic_spider / removed folder : {}/{}\".format(JOBDIR_FOLDER, spider_id) )\n\n\t# !!! spider is launched from main.py level !!!\n\t# all relative routes referring to this...\n\tlog_scrap.info(\"--- run_generic_spider / os.getcwd() : %s \", os.getcwd() )\n\n\t### flattening run_spider_config : from nested to flat dict\n\tlog_scrap.info(\"--- run_generic_spider / 'flattenSpiderConfig()' on 'run_spider_config' --> 'spider_config_flat' ...\" )\n\tspider_config_flat = flattenSpiderConfig( run_spider_config )\n\n\n\t### settings for crawler\n\t# cf : https://hackernoon.com/how-to-crawl-the-web-politely-with-scrapy-15fbe489573d\n\n\t### global settings for scrapy processes (see upper)\n\tlog_scrap.info(\"--- run_generic_spider / BOT_NAME : %s \", settings.get('BOT_NAME') )\n\tlog_scrap.info(\"--- run_generic_spider / USER_AGENT : %s \", settings.get('USER_AGENT') )\n\tlog_scrap.info(\"--- run_generic_spider / ITEM_PIPELINES : %s \", settings.get('ITEM_PIPELINES').__dict__ )\n\n\n\t# specific settings for this scrapy process\n\n\t# settings.set( \"RETRY_TIMES\"\t\t\t\t\t\t, RETRY_TIMES )\n\t# settings.set( \"CONCURRENT_ITEMS\"\t\t\t\t, CONCURRENT_ITEMS )\n\t# settings.set( \"CONCURRENT_REQUESTS\"\t\t\t\t, CONCURRENT_REQUESTS )\n\t# settings.set( \"CONCURRENT_REQUESTS_PER_DOMAIN\"\t, CONCURRENT_REQUESTS_PER_DOMAIN )\n\t# settings.set( \"REDIRECT_MAX_TIMES\"\t\t\t\t, REDIRECT_MAX_TIMES )\n\t# settings.set( \"DOWNLOAD_MAXSIZE\" \t\t\t\t, DOWNLOAD_MAXSIZE )\n\t# settings.set( \"DEPTH_PRIORITY\"\t\t\t\t\t, DEPTH_PRIORITY )\n\t# settings.set( \"SCHEDULER_DISK_QUEUE\"\t\t\t, SCHEDULER_DISK_QUEUE )\n\t# settings.set( \"DEPTH_PRIORITY\"\t\t\t\t\t, SCHEDULER_MEMORY_QUEUE )\n\n\t# settings.set( \"RANDOMIZE_DOWNLOAD_DELAY\"\t\t, RANDOMIZE_DOWNLOAD_DELAY )\n\t# cf : https://doc.scrapy.org/en/latest/topics/jobs.html#job-directory\n\tsettings.set( \"JOBDIR\"\t\t\t\t\t\t\t, JOBDIR_FOLDER + \"/\" + spider_id )\n\n\t## https://scrapy.readthedocs.io/en/0.12/topics/extensions.html#module-scrapy.contrib.closespider\n\n\tsettings.set( \"CURRENT_SPIDER_ID\" \t\t\t, spider_id )\n\tsettings.set( \"RETRY_TIMES\"\t\t\t\t\t\t\t, spider_config_flat[\"RETRY_TIMES\"] )\n\tsettings.set( \"CLOSESPIDER_ITEMCOUNT\"\t\t, spider_config_flat[\"LIMIT_ITEMS\"] )\n\t# settings.set( \"CLOSESPIDER_PAGECOUNT\"\t\t, spider_config_flat[\"LIMIT_PAGES\"] )\n\tsettings.set( \"DOWNLOAD_DELAY\" \t\t\t\t\t, spider_config_flat[\"download_delay\"] )\n\tsettings.set( \"CONCURRENT_ITEMS\"\t\t\t\t, spider_config_flat[\"CONCURRENT_ITEMS\"] )\n\tsettings.set( \"CONCURRENT_REQUESTS\"\t\t\t, spider_config_flat[\"CONCURRENT_REQUESTS\"] )\n\t# settings.set( \"DOWNLOAD_DELAY\" \t\t\t\t, DOWNLOAD_DELAY )\n\n\tsettings.set( \"BOT_NAME\"\t\t\t\t\t\t\t\t\t, spider_config_flat[\"BOT_NAME\"] )\n\tsettings.set( \"USER_AGENT\"\t\t\t\t\t\t\t\t, spider_config_flat[\"USER_AGENT\"] )\n\tsettings.set( \"ROBOTSTXT_OBEY\"\t\t\t\t\t\t, spider_config_flat[\"ROBOTSTXT_OBEY\"] )\n\tsettings.set( \"AUTOTHROTTLE_ENABLED\"\t\t\t, spider_config_flat[\"AUTOTHROTTLE_ENABLED\"] )\n\tsettings.set( \"HTTPCACHE_ENABLED\"\t\t\t\t\t, spider_config_flat[\"HTTPCACHE_ENABLED\"] )\n\tsettings.set( \"RANDOMIZE_DOWNLOAD_DELAY\"\t, spider_config_flat[\"RANDOMIZE_DOWNLOAD_DELAY\"] )\n\n\t### initiating crawler process\n\tlog_scrap.info(\"--- run_generic_spider / instanciate process ...\" \t )\n\tprocess = CrawlerRunner( settings = settings )\n\n\t### adding CrawlerRunner as deferred\n\tdef f(q):\n\t\ttry:\n\t\t\t### send/create custom spider from run_spider_config\n\t\t\t### cf : https://stackoverflow.com/questions/35662146/dynamic-spider-generation-with-scrapy-subclass-init-error\n\n\t\t\tdeferred = process.crawl( \tGenericSpider,\n\t\t\t\t\t\t\t\t\t\t\tuser_id\t\t\t\t\t\t\t= user_id,\n\t\t\t\t\t\t\t\t\t\t\tdatamodel \t\t\t\t\t= datamodel ,\n\t\t\t\t\t\t\t\t\t\t\tspider_id \t\t\t\t\t= spider_id ,\n\t\t\t\t\t\t\t\t\t\t\tspider_config_flat\t= spider_config_flat,\n\t\t\t\t\t\t\t\t\t\t\ttest_limit\t\t\t\t\t= test_limit\n\t\t\t\t\t\t\t\t\t)\n\t\t\tdeferred.addBoth(lambda _: reactor.stop())\n\t\t\treactor.run()\n\t\t\tq.put(None)\n\t\texcept Exception as e:\n\t\t\tq.put(e)\n\n\t### putting task in queue and start\n\tq = Queue()\n\tp = Process(target=f, args=(q,))\n\tp.start()\n\tresult = q.get()\n\tp.join()\n\n\tif result is not None:\n\t\traise result\n\n\n\n\tprint (\"\\n\\n{}\\n\".format(\"> > > \"*20))", "def crawl(self):\n self.get('http://code.google.com/p/webscraping/')\n self.get('http://code.google.com/p/sitescraper/')\n QTimer.singleShot(5000, self.app.quit)", "def from_crawler(cls, crawler, *args, **kwargs):\n\n spider = super(telecom_vivo_movel_spider, cls).from_crawler(\n crawler, *args, **kwargs)\n crawler.signals.connect(spider.get_final_result, signals.spider_idle)\n return spider", "def run(self):\n \n try:\n logging.info('Thread:{} starting'.format(self.thread_id))\n\n self.crawl_url()\n self.parse_html()\n except IOError as e:\n self.thread_post_processing()\n logging.error('CrawlUrlError url:{} msg:{}'.format(self.url, e))\n\n self.thread_post_processing()", "def run_crawler(self) -> List[JobEventSchema]:\n print(f\"Ready for scraping, current task: {self.tasks}\")\n\n crawling_result = []\n for task in self.tasks:\n result = task.run()\n crawling_result.extend(result)\n return crawling_result", "def open_spider(self, spider):\n pass", "def _scrape(self):", "def crawl(self):\n if os.path.exists(self.__work_path):\n shutil.rmtree(self.__work_path)\n print '\\nOld Data Was Found And Removed.\\n'\n\n initial_first_run = True\n initial_recursion_depth = 0\n initial_prev_link_size = 0\n for url in self.__urls:\n self.__start_recursion(url, initial_first_run,\n initial_recursion_depth, initial_prev_link_size)\n\n Crawler.mission_report(self.__work_path)", "def parse(self, response):\n page_jobs=[]\n\n # Calling abstarct method get_jobs_list() and iterating...\n jobs_div_list=self.get_jobs_list(response)\n for div in jobs_div_list:\n \n # Calling abstarct method get_job_dict()\n job_dict=self.get_job_dict(div)\n\n if not job_dict['url'] or not job_dict['title'] :\n # At least url, title data is loaded from the list of job posting ...\n raise ValueError( \"Could not find valid job information ('url' and 'title') in data:\\n\" + \n str(div.get()) + \"\\nScraped infos:\\n\" + str(job_dict) + \"\\nReport this issue on github!\" )\n \n # Store source as the name of the spider aka website\n job_dict['source']=self.name\n page_jobs.append(job_dict)\n \n \"\"\"\n Load full job page only if:\n - it's a new job (not in database)\n - load_full_jobs=Yes\n - the method parse_full_job_page() has been re-wrote by the Scraper subclass\n \"\"\"\n if ( (not self.db or self.db.find_job(job_dict)==None)\n and self.load_full_jobs ):\n if type(self).parse_full_job_page != Scraper.parse_full_job_page:\n # load_full_jobs=Yes and it's supported by scraper\n # Call parse_full_job_page() with job URL\n\n # Handle SeleniumRequest if use_selenium=True\n if self.use_selenium:\n yield SeleniumRequest(url=job_dict['url'], \n callback=self.parse_full_job_page,\n cb_kwargs=dict(job_dict=job_dict),\n wait_time=self.selenium_wait_time, script=SCROLL_DOWN)\n else:\n yield response.follow(url=job_dict['url'], \n callback=self.parse_full_job_page,\n cb_kwargs=dict(job_dict=job_dict))\n else:\n yield Job(job_dict)\n else:\n yield Job(job_dict)\n\n \"\"\" Just printing in one line \"\"\"\n if self.load_full_jobs:\n if type(self).parse_full_job_page == Scraper.parse_full_job_page:\n if self.load_all_new_pages==False:\n self.log.info(\"Scraped {} jobs from {}. Scraper {} does not support load_full_jobs=True and load_all_new_pages=False, some new job postings and job informations might be missing\".format(len(page_jobs), response.url, self.name))\n else:\n self.log.info(\"Scraped {} jobs from {}. Scraper {} does not support load_full_jobs=True, some informations might be missing\".format(len(page_jobs), response.url, self.name))\n else:\n self.log.info(\"Scraping {} jobs from {}...\".format(len(page_jobs), response.url))\n else:\n if self.load_all_new_pages==False:\n self.log.info(\"Scraped {} jobs from {}. load_all_new_pages=False and load_full_jobs=False, some new job postings and job informations might be missing\".format(len(page_jobs), response.url))\n else:\n self.log.info(\"Scraped {} jobs from {}. load_full_jobs=False, some informations might be missing\".format(len(page_jobs), response.url))\n \n \"\"\"\n If all page jobs are new and \n The method get_next_page_url() has been re-wrote by the Scraper subclass\n Scrape next page\n \"\"\"\n if self.load_all_new_pages==True:\n if self.db and any( [self.db.find_job(job_dict)!=None for job_dict in page_jobs] ):\n # All new job postings loaded\n pass\n else:\n if self.get_next_page_url(response)!=None :\n # Loading next page...\n if self.use_selenium:\n yield SeleniumRequest(\n url=self.get_next_page_url(response),\n callback=self.parse,\n wait_time=self.selenium_wait_time, script=SCROLL_DOWN)\n else:\n yield response.follow(\n url=self.get_next_page_url(response),\n callback=self.parse)\n else:\n if type(self).get_next_page_url != Scraper.get_next_page_url:\n # Last page loaded\n pass\n else:\n self.log.info(\"Scraper {} does not support load_all_new_pages=True, some new job postings might be missing\".format(self.name))", "def crawl(self, url):\n return None", "def addCrawler(name):\n global allCrawlerNames\n if name == 'scihub':\n allCrawlers.append(ScihubCrawler())\n allCrawlerNames = [ c.name for c in allCrawlers ]", "def spider_and_save_thread(\n self, source_url, crawl_timeout, target_url_patten, max_depth, output_directory):\n url_result = self.crawl(\n source_url,\n crawl_timeout,\n target_url_patten,\n max_depth)\n self.save_url(url_result, crawl_timeout, output_directory)", "def scrape(self):\n\n self.jobs_load()\n self.new_jobs = []\n\n for bot in self.bot_squad:\n self.new_jobs += bot.scrape_all_pages()\n\n self.statistics(self.new_jobs)\n print('SCRAPE COMPLETE. NOTE: Resulting job list still in RAM')\n print('We observed %d new jobs' % len(self.new_jobs))", "def crawl(self, keyword, since=None, to=None):\n no_of_pages = self.get_no_of_pages(keyword, since, to)\n print('pages: ' + str(no_of_pages))\n self.keyword = keyword\n self.since = since\n self.to = to\n p = Pool()\n p.map(self.distribute, range(1, no_of_pages+1))", "def __init__(self):\n self.SPIDER = \"spider\"", "def parse(self, response):\n\n\t\t### close spider if exception\n\t\tif 'Bandwidth exceeded' in response.body:\n\t\t\traise CloseSpider('bandwidth_exceeded')\n\n\t\tlog_scrap.debug(u\"\\n>>> NEW PARSING >>>\\n\" )\n\t\tlog_scrap.info(\"--- GenericSpider.parse ...\" )\n\n\t\tlog_scrap.info(\"\\n--- GenericSpider.parse /response : \\n%s\" , response)\n\t\tlog_scrap.info(\"\\n--- GenericSpider.parse /response : \\n%s \\n\" , response.__dict__.keys() )\n\n\t\t# for k, v in response.__dict__.iteritems() :\n\t\t# \tlog_scrap.info(\"\\n--- [k] {} : [v] {} : \".format(k,v))\n\t\t# print response._body\n\t\tstart_url = response.meta[\"start_url\"]\n\t\tlog_scrap.info(\"--- GenericSpider.parse / start_url : %s\", start_url )\n\n\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\t### start request with API crawler\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\t# if self.spider_config_flat[\"parse_api\"] == True :\n\t\tif self.parse_api == True :\n\n\t\t\tlog_scrap.info(\"\\n--- GenericSpider.parse / starting request on API endpoint... \" )\n\t\t\tjsonresponse = json.loads(response.body_as_unicode())\n\t\t\t# log_scrap.info(\"--- GenericSpider.parse / jsonresponse : \\n%s\", jsonresponse )\n\t\t\tlog_scrap.info(\"--- GenericSpider.parse / jsonresponse received...\" )\n\n\t\t\traw_items_list = get_dictvalue_from_xpath(jsonresponse, self.item_xpath)\n\t\t\t# raw_items_list = jsonresponse[self.item_xpath]\n\t\t\tlog_scrap.info(\"--- GenericSpider.parse / raw_items_list[0] : \\n%s\\n...\", pformat(raw_items_list[0]) )\n\n\t\t\t### - - - - - - - - - - ###\n\t\t\t### PARSING PAGE - API\n\t\t\t### start parsing page : loop through data items in page in response\n\t\t\tif len(raw_items_list) != 0 :\n\n\t\t\t\tlog_scrap.info(\"--- GenericSpider. / START LOOPING raw_items_list WITH API ...\" )\n\n\t\t\t\t# while self.there_is_more_items_to_scrap_dict[start_url] :\n\n\t\t\t\tfor raw_data in raw_items_list :\n\n\t\t\t\t\tself.item_count += 1\n\n\t\t\t\t\t### check if can continue depending on item_count\n\t\t\t\t\tif self.settings_limit_items == 0 or self.item_count <= self.settings_limit_items :\n\n\t\t\t\t\t\tprint()\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - API - item n°{} >>> \\n\".format(self.item_count) )\n\n\t\t\t\t\t\t### instantiate Item to fill from datamodel --> cf items.py\n\t\t\t\t\t\titemclass \t= create_item_class( 'GenericItemClass', fields_list = self.dm_item_related )\n\t\t\t\t\t\titem \t\t= itemclass()\n\n\t\t\t\t\t\t### add global info to item : i.e. core fields in dm_core_item_related list\n\t\t\t\t\t\titem[ 'spider_id' ]\t= self.spider_id\n\t\t\t\t\t\titem[ 'added_by' ]\t= self.user_id\n\t\t\t\t\t\titem[ 'added_at' ]\t= time.time()\t\t# timestamp\n\t\t\t\t\t\titem[ 'link_src' ]\t= response._url\n\n\t\t\t\t\t\titem[ 'page_n' ]\t\t= self.page_count\n\t\t\t\t\t\titem[ 'item_n' ]\t\t= self.item_count\n\n\t\t\t\t\t\t### extract data and feed it to the Item instance based on spider_config_flat\n\t\t\t\t\t\titem = self.fill_item_from_results_page(raw_data, item, is_api_rest=True, item_n=self.item_count)\n\n\n\t\t\t\t\t\t### - - - - - - - - - - ###\n\t\t\t\t\t\t### FOLLOW LINK - API\n\t\t\t\t\t\t### if need to follow to extract all data\n\t\t\t\t\t\tif self.spider_config_flat[\"parse_follow\"] == True :\n\n\t\t\t\t\t\t\tlog_scrap.debug(u\">>> FOLLOW LINK - API - item n°{} / page n°{} >>>>>> \\n\".format(self.item_count, self.page_count) )\n\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.follow_xpath : %s\", self.follow_xpath )\n\n\t\t\t\t\t\t\t# follow_link_raw = raw_data[ self.follow_xpath ]\n\t\t\t\t\t\t\tfollow_link_raw = get_dictvalue_from_xpath(raw_data, self.follow_xpath)\n\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link RAW ({}) : {} \".format(type(follow_link_raw),follow_link_raw) )\n\n\t\t\t\t\t\t\turl_follow = \"\"\n\t\t\t\t\t\t\tif self.api_follow_root != \"\" :\n\t\t\t\t\t\t\t\t\turl_follow = self.api_follow_root\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\turl_follow = self.page_url\n\n\t\t\t\t\t\t\t# complete follow link if needed\n\t\t\t\t\t\t\tfollow_link = self.clean_link(follow_link_raw, url_root=url_follow)\n\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link CLEAN : %s \", follow_link )\n\n\t\t\t\t\t\t\t# store follow_link\n\t\t\t\t\t\t\titem[ 'link_data' ]\t= follow_link\n\t\t\t\t\t\t\turl \t\t\t\t= item['link_data']\n\n\t\t\t\t\t\t\tfollow_is_api = self.follow_is_api\n\n\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\tyield scrapy.Request(url, callback=self.parse_detailed_page, meta={ 'item': item, 'start_url' : start_url, 'item_n' : self.item_count , 'parse_api' : follow_is_api })\n\n\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t### if no follow link\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t### item completion is finished - yield and so spark pipeline for item (store in db for instance)\n\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t# log_scrap.info(\" --> item : \\n %s \\n\", pformat(item) )\n\t\t\t\t\t\tlog_scrap.debug(u\" --> item ...\" )\n\n\t\t\t\t\telse :\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {}\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\t# log_scrap.warning(u\"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {}\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t\t# raise CloseSpider('OUT OF LIMIT_ITEMS')\n\n\t\t\t\telse :\n\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t# raise CloseSpider('OUT OF ITEMS')\n\n\t\t\t### - - - - - - - - - - - - ###\n\t\t\t### NEXT PAGE - API\n\t\t\tif self.test_limit == None or self.page_count < self.test_limit :\n\n\t\t\t\tif self.page_count < self.settings_limit_pages or self.settings_limit_pages == 0 :\n\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse (API) >>> PAGE n°{} DONE -> NEXT PAGE >>> \\n\".format(self.page_count) )\n\n\t\t\t\t\t### get and go to next page\n\t\t\t\t\tself.page_count += 1\n\n\t\t\t\t\turl_next = \"\"\n\t\t\t\t\tif self.api_pagination_root != \"\" :\n\t\t\t\t\t\turl_next = self.api_pagination_root\n\t\t\t\t\telse :\n\t\t\t\t\t\turl_next = self.page_url\n\n\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_name : '%s' >>>\" %(self.spider_name) )\n\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\tnext_page = url_next + str(self.page_count)\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse >>> NEXT PAGE II : %s\", next_page )\n\n\t\t\t\t\tyield response.follow(next_page, callback=self.parse, meta={'start_url': start_url} )\n\n\t\t\t\telse :\n\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - page n°{} - limit : {} - test_limit : {} \".format(self.page_count, self.settings_limit_pages, self.test_limit) )\n\t\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\n\t\t\telse :\n\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} \".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\n\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\t### start requests with pure Scrapy requests\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\telif self.spider_config_flat[\"parse_reactive\"] == False :\n\t\t# elif self.parse_reactive == False :\n \n\t\t\tlog_scrap.info(\"\\n--- GenericSpider.parse / starting requests with Scrapy... \" )\n\t\t\t# self.parse_scrapy(response)\n\n\t\t\t### find items list\n\t\t\tlog_scrap.info(\"--- GenericSpider.parse / self.item_xpath : %s\", self.item_xpath )\n\t\t\traw_items_list = response.xpath(self.item_xpath)\n\t\t\tlog_scrap.info(\"--- GenericSpider.parse / len(raw_items_list) : %d \", len(raw_items_list) )\n\n\n\t\t\t### - - - - - - - - - - - ###\n\t\t\t### PARSING PAGE - SCRAPY\n\t\t\t### start parsing page : loop through data items in page in response\n\t\t\tif len(raw_items_list) != 0 :\n\n\t\t\t\tlog_scrap.info(\"--- GenericSpider. / START LOOPING raw_items_list WITH SCRAPY ...\" )\n\n\t\t\t\tfor raw_data in raw_items_list :\n\n\t\t\t\t\tself.item_count += 1\n\n\t\t\t\t\t### check if can continue depending on item_count\n\t\t\t\t\tif self.settings_limit_items == 0 or self.item_count <= self.settings_limit_items :\n\n\t\t\t\t\t\tprint()\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - Scrapy - item n°{} / page n°{} >>> \\n\".format(self.item_count, self.page_count) )\n\n\t\t\t\t\t\t# print \">>> raw_data : \\n\", raw_data.extract()\n\n\t\t\t\t\t\t### instantiate Item to fill from datamodel --> cf items.py\n\t\t\t\t\t\titemclass \t= create_item_class( 'GenericItemClass', fields_list = self.dm_item_related )\n\t\t\t\t\t\titem \t\t= itemclass()\n\n\t\t\t\t\t\t### add global info to item : i.e. core fields in dm_core_item_related list\n\t\t\t\t\t\titem[ 'spider_id' ]\t\t= self.spider_id\n\t\t\t\t\t\titem[ 'added_by' ]\t\t= self.user_id\n\t\t\t\t\t\titem[ 'added_at' ]\t\t= time.time()\t\t# timestamp\n\t\t\t\t\t\titem[ 'link_src' ]\t\t= response._url\n\n\t\t\t\t\t\titem[ 'page_n' ]\t\t= self.page_count\n\t\t\t\t\t\titem[ 'item_n' ]\t\t= self.item_count\n\n\t\t\t\t\t\t### extract data and feed it to the Item instance based on spider_config_flat\n\t\t\t\t\t\titem = self.fill_item_from_results_page(raw_data, item, item_n=self.item_count)\n\n\n\t\t\t\t\t\t### - - - - - - - - - - - ###\n\t\t\t\t\t\t### FOLLOW LINK - SCRAPY\n\t\t\t\t\t\t### if need to follow to extract all data\n\t\t\t\t\t\tif self.spider_config_flat[\"parse_follow\"] == True :\n\n\t\t\t\t\t\t\tlog_scrap.debug(u\">>> FOLLOW LINK - SCRAPY - item n°{} / page n°{} >>>>>> \\n\".format(self.item_count, self.page_count) )\n\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.follow_xpath : %s\", self.follow_xpath )\n\n\t\t\t\t\t\t\tfollow_link \t= raw_data.xpath( self.follow_xpath ).extract_first()\n\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link RAW ({}) : {} \".format(type(follow_link),follow_link) )\n\n\t\t\t\t\t\t\turl_follow = \"\"\n\t\t\t\t\t\t\tif self.api_follow_root != \"\" :\n\t\t\t\t\t\t\t\turl_follow = self.api_follow_root\n\n\t\t\t\t\t\t\t# complete follow link if needed\n\t\t\t\t\t\t\tfollow_link = self.clean_link(follow_link, url_root=url_follow)\n\t\t\t\t\t\t\t# log_scrap.info(\" --> follow_link CLEAN : %s \", follow_link )\n\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link CLEAN ({}) : {} \".format(type(follow_link),follow_link) )\n\n\t\t\t\t\t\t\t# store follow_link\n\t\t\t\t\t\t\titem[ 'link_data' ]\t= follow_link\n\t\t\t\t\t\t\turl \t\t\t\t= item['link_data']\n\n\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\tlog_scrap.warning(u\">>> FOLLOWING LINK --> url : {} \".format(url) )\n\t\t\t\t\t\t\t\t# yield Request(url, callback=self.parse_detailed_page, meta={ 'item': item, 'start_url' : start_url } )\n\t\t\t\t\t\t\t\tyield scrapy.Request(url, callback=self.parse_detailed_page, meta={ 'item': item, 'start_url' : start_url , 'item_n' : self.item_count , 'parse_api' : False} )\n\t\t\t\t\t\t\t\t# log_scrap.warning(u\">>> FOLLOWING LINK --> url : {} / WORKED !!! \".format(url) )\n\n\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\tlog_scrap.warning(u\">>> FOLLOW LINK - NOT WORKING : {} \".format(url) )\n\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t### if no follow link\n\t\t\t\t\t\telse :\n\n\t\t\t\t\t\t\tlog_scrap.warning(u\">>> NO FOLLOW LINK ... \" )\n\t\t\t\t\t\t\t### item completion is finished - yield and so spark pipeline for item (store in db for instance)\n\t\t\t\t\t\t\t# log_scrap.info(\">>> GenericSpider.parse - item.items() : \\n %s\", item.items() )\n\t\t\t\t\t\t\t# log_scrap.info(\">>> GenericSpider.parse - item.keys() : \\n %s\", item.items() )\n\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t\t# print (\"\\n>>> NEXT ITEM \" + \">>> >>> \"*10, \"\\n\")\n\n\t\t\t\t\t\t# log_scrap.info(\" --> item : \\n %s \\n\", pformat(item) )\n\t\t\t\t\t\tlog_scrap.debug(u\" --> item ...\" )\n\n\t\t\t\t\telse :\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {}\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t\t# raise CloseSpider('OUT OF LIMIT_ITEMS')\n\n\t\t\telse :\n\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t# raise CloseSpider('OUT OF ITEMS')\n\n\t\t\t### - - - - - - - - - - ###\n\t\t\t### NEXT PAGE - SCRAPY\n\t\t\t### check if there is a test_limit\n\t\t\tif self.test_limit == None or self.page_count < self.test_limit :\n\n\t\t\t\tif self.page_count < self.settings_limit_pages or self.settings_limit_pages == 0 :\n\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse (Scrapy) >>> PAGE n°{} DONE -> NEXT PAGE >>> \\n\".format(self.page_count) )\n\n\t\t\t\t\t### get and go to next page\n\t\t\t\t\tis_next_page, next_page = self.get_next_page(response, start_url)\n\n\t\t\t\t\tif is_next_page :\n\n\t\t\t\t\t\tself.page_count += 1\n\n\t\t\t\t\t\turl_next = \"\"\n\t\t\t\t\t\tif self.api_pagination_root != \"\" :\n\t\t\t\t\t\t\turl_next = self.api_pagination_root\n\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_name : '%s' >>>\" %(self.spider_name) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse >>> NEXT PAGE I : %s\", next_page )\n\t\t\t\t\t\tnext_page = self.clean_link(next_page, url_root=url_next)\n\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse >>> NEXT PAGE II : %s\", next_page )\n\n\t\t\t\t\t\tyield response.follow(next_page, callback=self.parse, meta={'start_url': start_url} )\n\n\t\t\t\t\telse :\n\t\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / NO MORE PAGE TO SCRAP - pages count : {} \".format(self.page_count) )\n\t\t\t\t\t\t# raise CloseSpider('NO MORE PAGE TO SCRAP')\n\n\t\t\t\telse :\n\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - page n°{} - limit : {} - test_limit : {} / except -> break\".format(self.page_count, self.settings_limit_pages, self.test_limit) )\n\t\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\n\t\t\telse :\n\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\n\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\t### start requests with Selenium\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\telse :\n\t\t\t### initiate selenium browser\n\t\t\t### cf : https://github.com/voliveirajr/seleniumcrawler/blob/master/seleniumcrawler/spiders/seleniumcrawler_spider.py\n\t\t\tlog_scrap.info(\"\\n--- GenericSpider.parse / starting Selenium driver... \" )\n\n\t\t\t# retrieve exec path for chromedriver from settings_scrapy.py\n\t\t\t### GET APP MODE FROM ENV VARS\n\t\t\tapp_mode \t\t\t\t\t\t= os.environ.get('APP_MODE', 'default')\n\t\t\tlog_scrap.debug(u\"--- GenericSpider.parse / APP_MODE : %s\", app_mode)\n\t\t\tchromedriver_path \t= CHROMEDRIVER_PATH_LIST[ app_mode ]\n\t\t\tlog_scrap.debug(u\"--- GenericSpider.parse / chromedriver_path : %s\", chromedriver_path)\n\n\t\t\t### specify executable path to launch webdriver-->\n\t\t\t# cf : where chromedriver was installed when `brew install chromedriver`\n\t\t\tself.driver = webdriver.Chrome(executable_path=chromedriver_path, chrome_options=options_selenium)\n\t\t\t# self.driver = webdriver.Chrome(chrome_options=options_selenium)\n\t\t\t# self.driver = webdriver.Firefox()\n\t\t\t# self.driver = webdriver.Chrome()\n\t\t\t# self.driver = webdriver.PhantomJS() ### deprecated\n\n\t\t\t### setup waiting times\n\t\t\t# self.driver.set_page_load_timeout(60)\n\t\t\tself.wait_driver\t= WebDriverWait(self.driver, self.delay_driver)\n\t\t\tself.wait_page \t\t= WebDriverWait(self.driver, self.delay_new_page)\n\t\t\tself.driver.implicitly_wait(self.delay_implicit)\n\t\t\tlog_scrap.debug(u\"--- GenericSpider. / self.delay_driver : %s\", self.delay_driver )\n\t\t\tlog_scrap.debug(u\"--- GenericSpider. / self.delay_new_page : %s\", self.delay_new_page )\n\t\t\tlog_scrap.debug(u\"--- GenericSpider. / self.delay_implicit : %s\", self.delay_implicit )\n\n\n\t\t\t### start parsing with selenium\n\t\t\tlog_scrap.debug(u\"--- GenericSpider. / response._url : %s\", response._url )\n\t\t\ttry :\n\t\t\t\tself.driver.get(response._url)\n\n\t\t\t\t### try scroll_down if needed in config\n\t\t\t\tif self.spider_config_flat['scroll_down'] : \n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / scroll_down is TRUE ... \" )\n\t\t\t\t\t# log_scrap.debug(u\"--- GenericsSpider. / scroll_down - self.spider_config_flat : \\n%s\", pformat(self.spider_config_flat) )\n\n\t\t\t\t\tscroll_pause_time = self.spider_config_flat[\"scroll_pause_time\"]\n\t\t\t\t\tmax_loops \t\t\t\t= self.spider_config_flat[\"scroll_loops\"]\n\t\t\t\t\tself.driver = scroll_down(self.driver, scroll_pause_time, max_loops)\n\t\t\t\t\t# scroll_down(self.driver, scroll_pause_time, max_loops)\n\t\t\t\tlog_scrap.info(\"--- GenericSpider. / url '{}' is loaded ... \".format( response._url ))\n\t\t\t\n\t\t\texcept :\n\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\tself.driver.close()\n\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\traise CloseSpider('DRIVER NOT RESPONDING')\n\n\n\t\t\t### clean original xpath from strings\n\t\t\tstrings_to_clean = [\n\t\t\t\t'/@src',\n\t\t\t\t'/@href',\n\t\t\t\t'/text()',\n\t\t\t\t'/@*[name()=\"xlink:href\"]',\n\t\t\t\t'/@datetime'\n\t\t\t]\n\n\t\t\t# while self.there_is_more_items_to_scrap :\n\t\t\twhile self.there_is_more_items_to_scrap_dict[start_url] :\n\n\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / while loop continues : %s\", self.there_is_more_items_to_scrap )\n\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / while loop continues : %s\", self.there_is_more_items_to_scrap_dict[start_url] )\n\n\t\t\t\ttry :\n\n\t\t\t\t\t### wait / debug page content\n\t\t\t\t\tpage_source_code = self.driver.page_source.encode(\"utf-8\")\n\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / page_source_code : \\n %s \", page_source_code )\n\t\t\t\t\ttime.sleep(self.delay_new_page)\n\n\t\t\t\t\t### start parsing page :\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.item_xpath : %s\", self.item_xpath )\n\t\t\t\t\traw_items_list \t= self.driver.find_elements_by_xpath(self.item_xpath)\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / raw_items_list length : %s\", len(raw_items_list) )\n\t\t\t\t\t# log_scrap.info(\"--- GenericSpider. / raw_items_list[0].text : \\n%s\", raw_items_list[0].text )\n\n\t\t\t\t\t# current_item_index = 0\n\n\t\t\t\t\t### - - - - - - - - - - - - ###\n\t\t\t\t\t### PARSING PAGE - SELENIUM\n\t\t\t\t\t# loop through data items in page in response\n\t\t\t\t\tif len(raw_items_list) != 0 :\n\n\t\t\t\t\t\t# log_scrap.info(\"--- GenericSpider. / START PARSING WITH SELENIUM ...\\n\" )\n\n\t\t\t\t\t\tfor raw_data in raw_items_list :\n\n\t\t\t\t\t\t\tprint()\n\t\t\t\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / START LOOPING raw_items_list WITH SELENIUM ...\" )\n\n\t\t\t\t\t\t\t### add +1 to items count\n\t\t\t\t\t\t\tself.item_count += 1\n\n\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / VARIABLES - spider_name : {} - item n°{} - there_is_more_items_to_scrap_dict[start_url] : {} \".format(str(self.spider_name), self.item_count, self.there_is_more_items_to_scrap_dict[start_url]) )\n\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / VARIABLES - spider_name : {} - item n°{} \".format(self.spider_name, self.item_count) )\n\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / VARIABLES - item n°{} \".format(self.item_count) )\n\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / VARIABLES - spider_name : '%s' - item n°%s \" %(self.spider_name, self.item_count) )\n\n\t\t\t\t\t\t\t### check if can continue depending on item_count\n\t\t\t\t\t\t\tif self.settings_limit_items == 0 or self.item_count <= self.settings_limit_items :\n\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - spider_name : '%s' >>>\" %(self.spider_name) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - Selenium - item n°{} / page n°{} >>> \\n\".format(self.item_count, self.page_count) )\n\n\t\t\t\t\t\t\t\t### instantiate Item to fill from datamodel --> cf items.py\n\t\t\t\t\t\t\t\titemclass \t= create_item_class( 'GenericItemClass', fields_list = self.dm_item_related )\n\t\t\t\t\t\t\t\titem \t\t= itemclass()\n\n\t\t\t\t\t\t\t\t### add global info to item : i.e. core fields in dm_core_item_related list\n\t\t\t\t\t\t\t\titem[ 'spider_id' ]\t\t= self.spider_id\n\t\t\t\t\t\t\t\titem[ 'added_by' ]\t\t= self.user_id\n\t\t\t\t\t\t\t\titem[ 'added_at' ]\t\t= time.time()\t\t# timestamp\n\t\t\t\t\t\t\t\titem[ 'link_src' ]\t\t= response._url\n\n\t\t\t\t\t\t\t\titem[ 'page_n' ]\t\t= self.page_count\n\t\t\t\t\t\t\t\titem[ 'item_n' ]\t\t= self.item_count\n\n\t\t\t\t\t\t\t\t### extract data and feed it to the Item instance based on spider_config_flat\n\t\t\t\t\t\t\t\titem = self.fill_item_from_results_page(raw_data, item, is_reactive=True, strings_to_clean=strings_to_clean, item_n=self.item_count )\n\n\t\t\t\t\t\t\t\t### - - - - - - - - - - ###\n\t\t\t\t\t\t\t\t### FOLLOW LINK - SELENIUM\n\t\t\t\t\t\t\t\t### find follow link to open detailled item view\n\t\t\t\t\t\t\t\tif self.spider_config_flat[\"parse_follow\"] == True :\n\n\t\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> FOLLOW LINK - SELENIUM - item n°{} / page n°{} >>>>>> \\n\".format(self.item_count, self.page_count) )\n\t\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.follow_xpath : %s\", self.follow_xpath )\n\n\t\t\t\t\t\t\t\t\t### follow link with Scrapy\n\t\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / follow link with Scrapy ...\" )\n\n\t\t\t\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / get href of follow_link ...\" )\n\t\t\t\t\t\t\t\t\t\tfollow_link_xpath \t= clean_xpath_for_reactive(self.follow_xpath, strings_to_clean)\n\t\t\t\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link_xpath : %s \", follow_link_xpath )\n\n\t\t\t\t\t\t\t\t\t\tfollow_link\t\t\t= raw_data.find_element_by_xpath( follow_link_xpath ).get_attribute('href')\n\t\t\t\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link RAW : %s \", follow_link )\n\n\t\t\t\t\t\t\t\t\t\turl_follow = \"\"\n\t\t\t\t\t\t\t\t\t\tif self.api_follow_root != \"\" :\n\t\t\t\t\t\t\t\t\t\t\t\turl_follow = self.api_follow_root\n\n\t\t\t\t\t\t\t\t\t\t# complete follow link if needed\n\t\t\t\t\t\t\t\t\t\tfollow_link = self.clean_link(follow_link, url_root=url_follow)\n\t\t\t\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link CLEAN ({}) : {}\".format(type(follow_link), follow_link ) )\n\n\t\t\t\t\t\t\t\t\t\t# store follow_link\n\t\t\t\t\t\t\t\t\t\titem[ 'link_data' ]\t= follow_link\n\t\t\t\t\t\t\t\t\t\turl\t\t\t= item['link_data']\n\n\t\t\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\t\t\tlog_scrap.warning(u\">>> FOLLOWING LINK --> url : {} \".format(url) )\n\t\t\t\t\t\t\t\t\t\t\tyield scrapy.Request(url, callback=self.parse_detailed_page, meta={'item': item, 'start_url' : start_url , 'item_n' : self.item_count , 'parse_api' : False})\n\n\t\t\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\t\t\tlog_scrap.warning(u\">>> FOLLOW LINK - NOT WORKING : {} \".format(url) )\n\t\t\t\t\t\t\t\t\t\t\tyield item\n\n\n\t\t\t\t\t\t\t\t\t### follow link with Selenium\n\t\t\t\t\t\t\t\t\t### FIND A WEBSITE TEST FOR REACTIVE DETAILLED PAGES\n\t\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / follow link with Selenium ...\" )\n\n\t\t\t\t\t\t\t\t\t\tfollow_link_xpath \t= clean_xpath_for_reactive(self.follow_xpath, strings_to_clean)\n\t\t\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.follow_link_xpath : %s\", self.follow_link_xpath )\n\t\t\t\t\t\t\t\t\t\tfollow_link \t\t= raw_data.find_element_by_xpath( follow_link_xpath )\n\n\t\t\t\t\t\t\t\t\t\t### open link in new tab ?\n\t\t\t\t\t\t\t\t\t\tfollow_link.click()\n\n\t\t\t\t\t\t\t\t\t\t### get data and save data\n\t\t\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / get data and save data ...\" )\n\t\t\t\t\t\t\t\t\t\t\titem = self.fill_item_from_results_page(raw_data, item, is_reactive=True, strings_to_clean=strings_to_clean, item_n=self.item_count )\n\n\t\t\t\t\t\t\t\t\t\t\t### back to previous page and scrap from where it left\n\t\t\t\t\t\t\t\t\t\t\t### cf : https://selenium-python.readthedocs.io/navigating.html#navigation-history-and-location\n\t\t\t\t\t\t\t\t\t\t\tself.driver.back()\n\n\t\t\t\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t\t\t### if no follow link\n\t\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t\t\t# log_scrap.info(\" --> item : \\n %s \\n\", pformat(item) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\" --> item ...\" )\n\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t\t\t\tself.driver.close()\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\t\t\t\t\traise CloseSpider('OUT OF LIMIT_ITEMS')\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\telse :\n\t\t\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF ITEMS - page n°{} - limit : {} - test_limit : {} / except -> break\".format(self.page_count, self.settings_limit_pages, self.test_limit) )\n\t\t\t\t\t\tself.driver.close()\n\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\t### - - - - - - - - - - - - ###\n\t\t\t\t\t### NEXT PAGE - SELENIUM\n\t\t\t\t\tif self.test_limit == None or self.page_count < self.test_limit :\n\n\t\t\t\t\t\tif self.there_is_more_items_to_scrap_dict[start_url] :\n\n\t\t\t\t\t\t\tif self.page_count < self.settings_limit_pages or self.settings_limit_pages == 0 :\n\n\t\t\t\t\t\t\t\tprint ()\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_name : '%s' >>>\" %(self.spider_name) )\n\t\t\t\t\t\t\t\tlog_scrap.info(\" --- GenericSpider.parse (Selenium) >>> PAGE n°{} DONE -> NEXT PAGE >>> \\n\".format(self.page_count) )\n\n\t\t\t\t\t\t\t\t### add +1 to parsed pages\n\t\t\t\t\t\t\t\tself.page_count += 1\n\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - current start_url : {} >>>\".format(start_url) )\n\n\t\t\t\t\t\t\t\t### find next page btn in current view\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.next_page : %s\", self.next_page )\n\t\t\t\t\t\t\t\tnext_page_xpath = clean_xpath_for_reactive(self.next_page, strings_to_clean)\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / next_page_xpath : %s\", next_page_xpath )\n\t\t\t\t\t\t\t\t# next_page \t= re.sub(\"|\".join(strings_to_clean), \"\", next_page )\n\n\t\t\t\t\t\t\t\t# try :\n\t\t\t\t\t\t\t\t# element_present = EC.presence_of_element_located((By.XPATH, next_page_xpath ))\n\t\t\t\t\t\t\t\t# log_scrap.info(\"--- GenericSpider. / next_page present : %s\", element_present )\n\t\t\t\t\t\t\t\t# self.wait.until(element_present)\n\t\t\t\t\t\t\t\t# next_page = self.wait.until( EC.element_to_be_clickable(element_present) )\n\t\t\t\t\t\t\t\t# next_page \t\t= self.driver.find_element_by_xpath( next_page_xpath )\n\t\t\t\t\t\t\t\tnext_page \t\t= self.driver.find_element(By.XPATH, next_page_xpath )\n\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / next_page : %s\", next_page )\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / next_page.text : %s\", next_page.text )\n\n\t\t\t\t\t\t\t\t# except TimeoutException:\n\t\t\t\t\t\t\t\t# except :\n\t\t\t\t\t\t\t\t# \tlog_scrap.error(\"--- GenericSpider. / Timed out waiting for page to load\")\n\n\t\t\t\t\t\t\t\t### click next button and wait for ajax calls to complete (post and get)\n\t\t\t\t\t\t\t\t### cf : http://www.obeythetestinggoat.com/how-to-get-selenium-to-wait-for-page-load-after-a-click.html\n\n\t\t\t\t\t\t\t\t# def wait_for(condition_function):\n\t\t\t\t\t\t\t\t# \t\tstart_time = time.time()\n\t\t\t\t\t\t\t\t# \twhile time.time() < start_time + 3:\n\t\t\t\t\t\t\t\t# \t\tif condition_function():\n\t\t\t\t\t\t\t\t# \t\t\treturn True\n\t\t\t\t\t\t\t\t# \t\telse:\n\t\t\t\t\t\t\t\t# \t\t\ttime.sleep(0.1)\n\t\t\t\t\t\t\t\t# \traise Exception ('Timeout waiting for {}'.format(condition_function.__name__) )\n\n\t\t\t\t\t\t\t\t# def link_has_gone_stale():\n\t\t\t\t\t\t\t\t# \t\ttry:\n\t\t\t\t\t\t\t\t# \t\t# poll the link with an arbitrary call\n\t\t\t\t\t\t\t\t# \t\tnext_page.find_elements_by_xpath(self.item_xpath)\n\t\t\t\t\t\t\t\t# \t\treturn False\n\t\t\t\t\t\t\t\t# \texcept StaleElementReferenceException :\n\t\t\t\t\t\t\t\t# \t\treturn True\n\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\"--- ... ---\")\n\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / next_page.click() \" )\n\t\t\t\t\t\t\t\t\tnext_page.click()\n\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\t# log_scrap.info(\"--- GenericSpider. / next_page.send_keys( \\ n )\" )\n\t\t\t\t\t\t\t\t\t# next_page.send_keys(\"\\n\")\n\t\t\t\t\t\t\t\t\t# added this step for compatibility of scrolling to the view\n\t\t\t\t\t\t\t\t\tlog_scrap.error(\"--- GenericSpider. / ALTERNATIVE next_page.click() \" )\n\t\t\t\t\t\t\t\t\t# self.driver.execute_script(\"return arguments[0].scrollIntoView();\", next_page)\n\t\t\t\t\t\t\t\t\t# next_page.click()\n\t\t\t\t\t\t\t\t\tself.driver.execute_script(\"arguments[0].click();\", next_page)\n\n\t\t\t\t\t\t\t\t### wait after click\n\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / wait for ajax to finish... \" )\n\t\t\t\t\t\t\t\t\t# wait_for(link_has_gone_stale)\n\t\t\t\t\t\t\t\t\tself.wait_page.until(lambda driver: self.driver.execute_script('return jQuery.active') == 0)\n\t\t\t\t\t\t\t\t\tself.wait_page.until(lambda driver: self.driver.execute_script('return document.readyState') == 'complete')\n\t\t\t\t\t\t\t\t\t# time.sleep(self.delay_implicit)\n\t\t\t\t\t\t\t\t\ttime.sleep(self.delay_new_page)\n\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\tlog_scrap.error(\"--- GenericSpider. / !!! FAIL / wait for ajax to finish... \" )\n\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF PAGES TO SCRAP - page n°{} / except -> break\".format(self.page_count) )\n\t\t\t\t\t\t\t\tself.driver.close()\n\t\t\t\t\t\t\t\traise CloseSpider('OUT OF PAGES TO SCRAP')\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\telse :\n\t\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - page n°{} - limit : {} - test_limit : {} / except -> break\".format(self.page_count, self.settings_limit_pages, self.test_limit) )\n\t\t\t\t\t\tself.driver.close()\n\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\t\t\t\t\t\tbreak\n\n\t\t\t\texcept :\n\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / NO MORE ITEMS TO SCRAP - item_count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\tself.driver.close()\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\t\traise CloseSpider('NO MORE ITEMS TO SCRAP')\n\t\t\t\t\tbreak", "def parse_item(self, response):\n NewhouseSpider.crawled_urls.append(response.url)\n item = FocusedScrapyCrawlerItem()\n item['url'] = response.url\n item['link_text'] = response.meta.get('link_text', '') if response.meta else ''\n soup = BeautifulSoup(response.body, 'html.parser')\n\n item['body_p_tags'] = self._getBodyText(soup)\n item['head_title'] = self._getHeadTitle(soup)\n item['last_crawled'] = time.time()\n links = self._getLinks(response, soup)\n\n # get score of the page based upon classifier\n if self.classifier:\n score = self.classifier.score(item['link_text'], item['head_title'], item['body_p_tags'])\n else:\n score = 0.0\n\n item['score'] = score\n yield item\n if score <= 0:\n self.log(\"item={} does not belong to new home so stop crawling\".format(item),\n logging.INFO)\n else:\n for link in links:\n req = Request(link, priority=int(score * 1000000), # after the request is done, run parse_item to train the apprentice\n callback=self.parse_item)\n yield req", "def generate_crawler(self):\n \n targets = ['https://community.upwork.com/t5/Announcements/bd-p/news', \\\n 'https://community.upwork.com/t5/Freelancers/bd-p/freelancers', \\\n 'https://community.upwork.com/t5/Clients/bd-p/clients', \\\n 'https://community.upwork.com/t5/Agencies/bd-p/Agencies']\n target = None\n for tar in targets:\n if tar in sys.argv:\n target = tar\n\n #Regenerate crawler object depending on params\n if target is None:\n if '-d' in sys.argv:\n crawler = Crawler(self.webdriver, self.db, debug=True)\n else:\n crawler = Crawler(self.webdriver, self.db)\n else:\n if '-d' in sys.argv:\n crawler = Crawler(self.webdriver, self.db, debug=True, link=target)\n else:\n crawler = Crawler(self.webdriver, self.db, link=target)\n\n return crawler", "def scrap_keywords():\n ParScr = ParallelScraper()\n ParScr.create_and_run_threads()\n return", "def scrap_site(link):\n pass # Scrapy or BeautifulSoup", "def parse(self, response):\n content = response.body\n if not content:\n return\n sel = Selector(response)\n #print sel.xpath('//table[@class=\"board-list tiz\"]/tr').extract()\n for job in sel.xpath('//ul[@class=\"sojob-list\"]/li'):\n #print 'd',job\n info = job.xpath('div[@class=\"sojob-item-main clearfix\"]/div[@class=\"job-info\"]')\n com_info = job.xpath('div[@class=\"sojob-item-main clearfix\"]/div[@class=\"company-info nohover\"]')\n title = info.xpath('h3/a/text()').extract_first().lower()\n if title.find('python') != -1:\n url = info.xpath('h3/a/@href').extract_first()\n request = scrapy.Request(url=url,\n callback=self.parse_items,\n headers=self.spider.headers,\n cookies=self.cookies)\n company_item, job_item = CompanyItem(), JobItem()\n company_item['name'] = com_info.xpath('p[@class=\"company-name\"]/a/text()').extract_first()\n company_item['homepage'] = com_info.xpath('p[@class=\"company-name\"]/a/@href').extract_first()\n job_item['pub_time'] = info.xpath('p[@class=\"time-info clearfix\"]/time/text()').extract_first()\n year = str(date.today().year)\n if str(year) not in job_item['pub_time']:\n if job_item['pub_time'] == u'昨天':\n job_item['pub_time'] = (date.today()-timedelta(days=1)).strftime(\"%Y-%m-%d\")\n elif job_item['pub_time'] == u'前天':\n job_item['pub_time'] = (date.today() - timedelta(days=2)).strftime(\"%Y-%m-%d\")\n else:\n job_item['pub_time'] = date.today().strftime(\"%Y-%m-%d\")\n job_item['title'] = title\n job_item['welfare'] = ' '.join(com_info.xpath('p[@class=\"temptation clearfix\"]/span/text()').extract())\n job_item['salary'] = info.xpath('p[@class=\"condition clearfix\"]/span[@class=\"text-warning\"]/text()').extract_first()\n request.meta['company_item'] = company_item\n request.meta['job_item'] = job_item\n yield request", "def scrape(location='', *args, **kwargs):\n if not location:\n location = Courses.location\n\n logger = setup_logging()\n\n logger.info('Starting Courses scrape')\n queue = Queue()\n\n for _ in Courses.LETTERS:\n course_worker = CourseWorker(queue, location)\n course_worker.daemon = True\n course_worker.start()\n\n for letter in Courses.LETTERS:\n queue.put(letter)\n\n queue.join()\n logger.info('Completed Courses scrape')", "def scrape(self):\n pass", "def crawlSite(siteUrl, siteName):\n\n\t#add the site to crawl & resources list. Also get the site name\n\tvariables.crawlFrontier = [siteUrl]\n\tvariables.resources = [siteUrl]\n\n\t#scan the site's home page\n\tscanPage(siteUrl, siteName)\n\n\t#repeatedly scan all the pages in crawlFrontier\n\tposLink = 1\t\t#at 0th index is the site's home page\n\n\twhile posLink < len(variables.crawlFrontier):\n\t\tprint('url=',variables.crawlFrontier[posLink])\n\n\t\turl = variables.crawlFrontier[posLink]\n\t\tscanPage(url, siteName)\n\n\t\tposLink += 1", "def run(self):\n print(\"INFO: start crawling comic, \" + self.comicSource.getComicName())\n\n if not self.onlyGetCurVol:\n pageNotFoundCounter = 0\n while self.comicSource.isCrawlAtEnd() == False:\n print(\"INFO: crawling volume \" + str(self.volumeNum) + \"...\")\n\n if self.comicSource.crawler(self.volumeNum, self.downloadDir) == False:\n pageNotFoundCounter += 1\n else:\n pageNotFoundCounter = 0\n\n if pageNotFoundCounter >= 400:\n print(\"ERROR: comic \" + self.comicSource.getComicName() + \" not found, exit\")\n break\n\n self.volumeNum += 1\n else:\n print(\"INFO: crawling volume \" + str(self.volumeNum) + \"...\")\n self.comicSource.crawl(self.volumeNum)\n\n print(\"INFO: retrieve comic\" + self.comicSource.getComicName() + \" end!\")\n self.comicSource.quit()", "def __init__(self, urls_file_, file_spider_='no', target_format_='', ignored_links_file_='',\n allow_clean_url_='no', time_out_=60, work_path_='./',\n max_recursion_depth_=0, one_bite_='no', white_list_path_=\"\"):\n self.__urls = Crawler.__read_file(urls_file_)\n self.__file_spider = file_spider_\n self.__target_format = target_format_\n self.__allow_clean_url = allow_clean_url_\n self.__one_bite = one_bite_\n self.__white_list_path = white_list_path_\n self.__white_list = []\n\n # loads white list in beginning in case an argument was passed for it\n if self.__file_spider == 'yes' and self.__white_list_path != '':\n self.__white_list = Crawler.__read_white_list(self.__white_list_path)\n\n # link titles that should be ignored during recursions\n self.__ignored_links = Crawler.__read_file(ignored_links_file_)\n\n self.__time_out = time_out_\n self.__work_path = os.path.join(work_path_.rstrip('/')+'/', 'DATA')\n self.__recursion_max_depth = max_recursion_depth_\n self.__extensions = ['txt', 'html', 'csv', 'tsv', 'tar', 'raw']\n\n logging.info('''Crawler Has been Initialized With The Below Configurations:\n-------------------------------------------------------------------\n-urls: %s\n-file_spider: %s\n-target_format: %s\n-ignored_links_file: %s\n-allow_clean_url: %s\n-time_out: %s\n-work_path: %s\n-max_recursion_depth: %s\n-one_bite: %s\n-white_list_path: %s\n''', self.__urls, self.__file_spider, self.__target_format, self.__ignored_links,\n self.__allow_clean_url, self.__time_out, self.__work_path,\n self.__recursion_max_depth, self.__one_bite, self.__white_list_path)", "def setup_crawler(self, crawlers: List[BaseCrawler]) -> None:\n self.tasks.extend(crawlers)", "def threaded_crawler(seed_url, delay=5, cache=None, scrape_callback=None, proxies=None, num_retries=1, max_threads=10):\n crawl_queue = MongoQueue(timeout=30)\n crawl_queue.clear()\n crawl_queue.push(seed_url)\n D = Downloader(\n cache=cache,\n delay=delay,\n proxies=proxies,\n num_retries=num_retries\n )\n\n def process_queue():\n while True:\n try:\n url = crawl_queue.pop()\n # print(url)\n except KeyError:\n # crawl queue is empty\n break\n else:\n html = D(url)\n if scrape_callback:\n try:\n links = scrape_callback(url, html) or []\n crawl_queue.complete(url)\n except Exception as e:\n print('Error in callback for: {}: {}'.format(url, e))\n\n # wait for all download threads to finish\n threads = []\n while threads or crawl_queue:\n # the crawl is still active\n for thread in threads:\n if not thread.is_alive():\n # remove the stopped threads\n threads.remove(thread)\n while len(threads) < max_threads and crawl_queue:\n # can start some more threads\n thread = threading.Thread(target=process_queue)\n thread.setDaemon(True) # set daemon so main thread can exit when receives ctrl-c\n thread.start()\n threads.append(thread)\n # all threads have been processed\n # sleep temporarily so CPU can focus execution on other threads\n time.sleep(SLEEP_TIME)", "def crawl_page(self, keyword, since=None, to=None, page=None):\n data = self.get_news(keyword, since, to, page)\n print(current_process())\n print('crawling page no.: ' + str(page))\n urls = self.get_urls(data)\n p = Process()\n p.start(urls)", "def genspider(ctx, name, agency_name, start_urls):\n start_urls = start_urls.split(',')\n domains = _get_domains(start_urls)\n _gen_spider(name, agency_name, domains, start_urls)\n _gen_tests(name)\n _gen_html(name, start_urls)", "def parse(self, response):\n page_source = self.upwork_controller.get_source_home()\n\n # Hand-off between Selenium and Scrapy happens here\n sel = Selector(text=page_source)\n # Extract data\n sections = sel.xpath(\"//section/div\")\n\n for section in sections:\n selector = Selector(text=section.get())\n jobtitle = selector.xpath(\"//div/div/div/h4/a/text()\")\n jobdescription = selector.xpath(\"//div/div/div/div/div/div/div/span/span/text()\")\n hourlypay = selector.xpath(\"//div/div/div/div/small/span/strong/text()\")\n proposals = selector.xpath(\"//div/div/div/div/div/span/small/strong/text()\")\n country = selector.xpath(\"//div/div/div/div/small/span/span/span/span/strong[@class='text-muted client-location ng-binding']/text()\")\n\n job = Job(jobtitle=jobtitle.get(),\n jobdescription=jobdescription.get(),\n hourlypay=hourlypay.get(),\n proposals=proposals.get(),\n country=country.get())\n job.serialize()\n yield job.dict()", "def parse_inner_urls(self, response):\n s = Selector(response)\n\n jobs_per_site = s.xpath('//div[@class=\"col-lg-12 col-md-12 col-sm-12 aggelia-view-title\"]//a/@href').extract()\n print(jobs_per_site)\n\n for inner_site in jobs_per_site:\n url = urljoin(\"https://www.skywalker.gr/\", inner_site)\n yield scrapy.Request(url, callback=self.parse_items)", "def scrape(site=''):\n scraper.scrape(get_site_config(site))", "def continue_crawl(d, runner, begin_time, hour_limit):\n end_time = datetime.now()\n\n # Convert elapsed time to seconds\n elapsed_time = end_time - begin_time\n elapsed_time_seconds = elapsed_time.days * 86400\n elapsed_time_seconds += elapsed_time.seconds\n\n if elapsed_time_seconds < hour_limit:\n wait_time = hour_limit - elapsed_time_seconds\n # print(\"Waiting for {0} seconds\".format(wait_time))\n time.sleep(wait_time)\n\n d = runner.crawl(\"sb_spider\")\n\n # end_time is the new \"start\" time for this iteration of the spider.\n # Pass it in as the 3rd argument.\n d.addBoth(lambda _: continue_crawl(d, runner, end_time, hour_limit))", "def crawler():\n job_entries = []\n for job in job_info(URL):\n labels = \"\"\n if job[\"labels\"]:\n for label in job[\"labels\"]:\n labels += label[\"name\"]\n if job[\"labels\"].index(label) != len(job[\"labels\"]) - 1:\n labels += \",\"\n job_entries.append((job[\"number\"], job[\"id\"],\n job[\"title\"], job[\"html_url\"], labels))\n\n conn = sqlite3.connect('jobber/jobber.db')\n c = conn.cursor()\n c.executemany(('INSERT OR IGNORE INTO job_entries '\n 'VALUES (?,?,?,?,?)'), job_entries)\n conn.commit()\n conn.close()", "def __init__(self, *args, **kwargs):\n super(AlibabaCompanySpider, self).__init__(*args, **kwargs)", "def run(self):\n\n # The url is too deep, skip the url.. Work is done!\n if self.depth_ > self.depth:\n return\n\n # Get doc id corresponds to the url. Add a new entry into doc index if there is no entry.\n doc_id = self.crawler.document_id(self.curr_url)\n\n # Check if the doc_id has been visited/processed by any of crawler_threads. Add doc_id to seen if not so.\n if self.crawler.checkDocVisitedAndUpdate(doc_id):\n return\n\n # Process the document corresponds to the url\n socket = None\n try:\n socket = urllib2.urlopen(self.curr_url, timeout=self.timeout)\n soup = BeautifulSoup(socket.read())\n self._curr_depth = self.depth_ + 1\n self._curr_doc_id = doc_id\n # Traverse the document as deep as possible and add those newly discovered urls into url queue\n self._index_document(soup)\n # Store (wordId, docId) and (word, url) into inverted_index and resolved_inverted_index respectively.\n self.crawler._add_words_to_document(self._curr_words, self._curr_doc_id)\n except:\n pass\n finally:\n if socket:\n socket.close()", "def single_crawl(self, urlitem: str):\n # print(\"Item: \", urlitem)\n try:\n hdr = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36 \",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Charset\": \"ISO-8859-1,utf-8;q=0.7,*;q=0.3\",\n \"Accept-Encoding\": \"none\",\n \"Accept-Language\": \"en-US,en;q=0.8\",\n \"Connection\": \"keep-alive\",\n }\n try:\n req = Request(urlitem, headers=hdr)\n html_page = urlopen(req)\n soup = BeautifulSoup(html_page, \"lxml\")\n links = [\n requests.compat.urljoin(urlitem, link.get(\"href\"))\n for link in soup.findAll(\"a\")\n ]\n links = [x for x in links if \"#\" not in x]\n except Exception as e:\n # print(e)\n pass\n return links\n\n except:\n pass", "def crawl(self, query, start_date, end_date, *args, **kwargs):\n pass", "def __init__(self, board='Gossiping', pages=1, file='tmp.json', title_lim=[], jsonf=None, copy_data=[], simple_mode=True):\n if copy_data:\n self.extend(copy_data)\n return\n os.chdir(os.path.split(os.path.realpath(__file__))[0])\n print(os.getcwd())\n com = 'scrapy crawl ptt ' if not simple_mode else 'scrapy crawl ptt_url '\n # output json file name\n com += '-o %s ' % (file)\n # page\n com += '-a pages=%d ' % (pages)\n # board\n com += '-a board=%s ' % (board)\n\n # title limit\n if title_lim:\n com += '-a title_lim=\"'\n for lim in title_lim:\n com += \"%s,\" % (str(lim))\n com += '\" '\n # not opened by json_file\n if not jsonf:\n # start crawl\n print('Command: ' + com)\n os.system('rm -f {}'.format(file))\n os.system('{}'.format(com))\n # opened by json file\n else:\n file = jsonf\n\n # all data save in self\n self.load_json(file)\n self.com = com\n self.file = file", "def scrape_pipeline(args):\n kickoff = args.kickoff\n fname = args.fname\n d = DbHelper()\n s = Scraper()\n c = Crawler(20)\n if fname is not None:\n app_names = pd.read_csv(fname)['packageName'].tolist()\n apps = [list(a) for a in zip(app_names, d.app_names_to_uuids(app_names))]\n else:\n apps = None\n\n # start by updating top apps\n if not args.skip_top:\n logger.info(\"getting top apps...\")\n new_top_list = c.get_top_apps_list()\n logger.info(\"scraping top apps not in DB...\")\n s.scrape_missing(new_top_list, compare_top=True)\n logger.info(\"updating top apps...\")\n d.update_top_apps(new_top_list)\n\n if kickoff == True:\n s = None\n if fname is None:\n # use crawler to get list of package names\n logger.error(\"Crawler for package names not implemented yet\")\n return\n else:\n # use specified file of package names\n s = Scraper(input_file=fname)\n\n # use scraper\n logger.info(\"Starting efficient scrape...\")\n s.efficient_scrape()\n logger.info(\"...efficient scrape done\")\n else:\n # use updater\n logger.info(\"Starting updater...\")\n if fname is None:\n u = Updater()\n else:\n u = Updater(input_file=fname)\n u.update_apps()\n logger.info(\"...update done\")\n\n # crawl privacy policies\n c.crawl_app_privacy_policies(app_list=apps)\n\n if args.no_decompile:\n # download only\n logger.info(\"Starting download...\")\n downloader = Downloader()\n if apps is None:\n downloader.download_all_from_db(top=True)\n else:\n downloader.download(apps)\n logger.info(\"...done\")\n else:\n # download/decompile\n logger.info(\"Starting download and decompile...\")\n download_decompile_all()\n logger.info(\"...download and decompile done\")\n logger.info(\"run analysis pipeline now\")", "def parse(self, response):\n content_type = self.get_content_type(response.headers)\n\n sitescan = response.meta.get('sitescan')\n\n if 'text/html' not in self.get_content_type(response.headers):\n\n # For linked content, find the urlscan it linked from\n urlscan = model.URLScan.objects.get(\n site_scan=sitescan,\n page_url_hash=sha256(response.meta['referrer']).hexdigest())\n else:\n # Only create urlscans for text/html\n urlscan, us_created = model.URLScan.objects.get_or_create(\n\n site_scan=sitescan,\n page_url_hash=sha256(response.url).hexdigest(),\n defaults={'page_url': response.url,\n 'timestamp': self.get_now_time()})\n\n # Continue crawling\n # Parse stylesheet links, scripts, and hyperlinks\n hxs = HtmlXPathSelector(response)\n\n # Extract other target links\n try:\n css_links = hxs.select('//link/@href').extract()\n except TypeError:\n css_links = []\n\n try:\n js_links = hxs.select('//script/@src').extract()\n except TypeError:\n js_links = []\n\n try:\n hyperlinks = hxs.select('//a/@href').extract()\n except TypeError:\n hyperlinks = []\n\n # Using a set removes duplicate links.\n all_links = set(hyperlinks + js_links + css_links)\n\n # Examine links, yield requests if they are valid\n for url in all_links:\n\n if not url.startswith('http://'):\n # ensure that links are to real sites\n if url.startswith('javascript:'):\n continue\n else:\n url = urljoin(response.url, url)\n\n ua = response.meta['user_agent']\n\n request = Request(url)\n request.headers.setdefault('User-Agent', ua.ua_string)\n request.meta['referrer'] = response.url\n request.meta['sitescan'] = sitescan\n request.meta['user_agent'] = ua\n request.meta['content_type'] = None\n\n yield request\n\n # The response contains a user agent, we should yield an item\n item = MarkupItem()\n item['content_type'] = self.get_content_type(response.headers)\n item['filename'] = os.path.basename(urlparse(response.url).path)\n item['headers'] = unicode(response.headers)\n item['meta'] = response.meta\n item['raw_content'] = response.body\n item['sitescan'] = sitescan\n item['urlscan'] = urlscan\n item['url'] = response.url\n item['user_agent'] = response.meta.get('user_agent')\n item['redirected_from'] = response.meta.get('redirected_from',\n u'')\n yield item", "def kickoff(self):\n settings = Settings()\n\n # settings.set(\"USER_AGENT\", \"Test\")\n settings.set('JOBDIR', self.args.data_dir)\n self.spider = MavenDataSpider()\n\n # Wrap with crawler, configure\n crawler = Crawler(self.spider, settings)\n crawler.signals.connect(spider_closing, signal=signals.spider_closed)\n\n logger.info('Starting crawler')\n crawler.crawl(self.spider, app=self, dbsess=self.session)\n\n self.spider = crawler.spider\n self.spider.link_queue_mode = False\n if self.args.debug:\n coloredlogs.install(level=logging.DEBUG)\n\n # Keeping thread working\n reactor.run()", "def start_requests(self):\n url = self.start_urls[0]\n yield scrapy.Request(url=url, callback=self.parse)", "def run(self):\n print \"CAlled run in querythread\"\n #global config.mqchannel\n self.qstatus = \"Running\"\n start = time.clock()\n \n self.crawl_async_result = crawl.apply_async(args=[self.start_url, self.max_depth, self.parser], serializer=\"json\")\n while not self.crawl_async_result.ready():\n time.sleep(0)\n \n # self.crawl_async_result is a list of { URLs, links, htmls } to be parsed\n \n self.crawlstatus = \"Done\"\n self.elapsed = (time.clock() - start)\n print \"Crawl Done\"\n print json.dumps(self.crawl_async_result.result, indent=4)\n \n self.__insert_into_db(self.crawl_async_result.result)\n content = json.dumps({\"query_id\":self.qid, \"message\":\"done\", \"dbkey\":str(self.dbkey), \"time\":self.elapsed});\n config.mqchannel.basic_publish(exchange=config.get(\"MQEXCHANGE\"), routing_key='', body=content)", "def __init__(self, config, processors):\n source = HackernewsStories()\n source.configure(config)\n\n super(HackernewsCrawlJob, self).__init__(source, processors)", "def from_crawler(cls, crawler, *args, **kwargs):\n pipeline = cls()\n crawler.signals.connect(pipeline.handle_list_of_items, signal=signals.spider_idle)\n return pipeline", "def start_requests(self):\n\n with open(os.path.join(os.path.dirname(__file__), \"../resources/mapemall_categories.csv\")) as categories:\n for category in csv.DictReader(categories):\n category_text=category[\"category\"]\n url=str(MapemallCrawlerSpider.start_urls[0])+category_text\n # The meta is used to send our search text into the parser as metadata\n yield scrapy.Request(url, callback = self.parse, meta = {\"category_text\": category_text})", "def iter_spider_classes(module):\n ...", "def main(call_args):\n\n args = parse_args(call_args)\n init_logging(args.loglevel)\n\n _log.info(\"'scrape-jobs' called with args: %s\", args)\n runner.run_with_config_file(args.site, args.config_file)", "def next_spider(self, spider):\n for spidercls, urls, isdata in self.nextnodes:\n if self.spider_name(spider) == self.spider_name(spidercls):\n if isinstance(spidercls, (str, unicode,)):\n if '.' in spidercls:\n modulestr, classtr = spidercls.rsplit('.', 1)\n else:\n modulestr, classtr = self.__class__.__module__, spidercls\n spidercls = getattr(__import__(modulestr),classtr)\n return spidercls, urls, isdata\n \n raise ValueError(\"The spider of {0} not in nextnodes of spidernode {1}\".format(\n self.spider_name(spider), self.__class__.__name__))", "def __init__(self):\n self.gdc = GdocsCrawler()", "def crawler(self):\n\n\t\tfor page in range(self.first_page, self.last_page+1):\n\t\t\tprint(\"\\nCrawling Page \" + str(page))\n\t\t\tpage_url = self.site_url + \"?page=\" + str(page) +\\\n\t\t\t \"&index=prod_all_products_term_optimization\"\n\t\t\t\n\t\t\tself.scrape_features(page_url)", "def _genspider(self, module, name, domain, template_name, template_file):\n tvars = {\n 'project_name': settings.get('BOT_NAME'),\n 'ProjectName': string_camelcase(settings.get('BOT_NAME')),\n 'module': module,\n 'name': name,\n 'domain': domain,\n 'classname': '%sSpider' % ''.join([s.capitalize() \\\n for s in module.split('_')])\n }\n\n spiders_module = __import__(settings['NEWSPIDER_MODULE'], {}, {}, [''])\n spiders_dir = abspath(dirname(spiders_module.__file__))\n spider_file = \"%s.py\" % join(spiders_dir, module)\n\n shutil.copyfile(template_file, spider_file)\n render_templatefile(spider_file, **tvars)\n print \"Created spider %r using template %r in module:\" % (name, \\\n template_name)\n print \" %s.%s\" % (spiders_module.__name__, module)", "def genspider(ctx, name, domain):\n spider_filename = _gen_spider(name, domain)\n print('Created {0}'.format(spider_filename))\n\n test_filename = _gen_tests(name, domain)\n print('Created {0}'.format(test_filename))", "def run(self):\n if self.is_full():\n return\n for crawler in self.crawlers:\n logger.info(f'crawler {crawler} to get proxy')\n proxies = crawler.run()\n if proxies:\n for proxy in proxies:\n self.redis.add(proxy)\n logger.info(f'crawled {len(proxies)} proxies from {crawler}')\n else:\n logger.info(f'cannot crawl proxies from {crawler}')", "def open_spider(self, spider):\n now = spider.get_now_time()\n\n # Create initial batch\n spider.batch = model.Batch.objects.create(\n kickoff_time=now, finish_time=now)\n spider.batch.save()\n\n # save initial site list\n file_content = ContentFile('\\n'.join(spider.start_urls))\n filename = str(spider.batch).replace(' ', '')\n spider.batch.sitelist.save(filename, file_content)\n spider.batch.sitelist.close()\n spider.batch.save()\n\n spider.batch_user_agents = []\n\n # Give the spider a set of batch user agents, which preserve historical\n # user agent data\n for ua in list(model.UserAgent.objects.all()):\n batch_user_agent = model.BatchUserAgent.objects.create(\n batch=spider.batch,\n ua_string=ua.ua_string,\n primary_ua=ua.primary_ua,\n ua_type=ua.ua_type,\n ua_human_name=ua.ua_human_name\n )\n spider.batch_user_agents.append(batch_user_agent)\n\n if not spider.batch_user_agents:\n raise ValueError(\n \"No user agents; add some with 'manage.py useragents --add'\")", "def distribute(self, page):\n self.crawl_page(self.keyword, self.since, self.to, page)", "def __init__(self, thread_id, depth, spider_config, url, pattern):\n\n super(CrawlUrl, self).__init__()\n\n self.thread_id = thread_id\n self.depth = depth\n self.spider_config = spider_config\n self.url = url\n self.pattern = pattern\n self.page = ''", "def crawler(self):\n\n def decorator(f: Callable) -> Callable:\n self.register_crawler(f)\n return f\n\n return decorator", "def start_requests(self):\n for url in self.start_urls:\n yield scrapy.Request(url, callback=self.parse)", "def start_requests(self):\n self.spider = Base_Spider(LpCfg)\n self.first_url = 'https://www.liepin.com/zhaopin/' \\\n '?industries=&dqs=010&salary=15%2440' \\\n '&jobKind=2&pubTime=3&compkind=&compscale=' \\\n '&industryType=&searchType=1&clean_condition=' \\\n '&isAnalysis=&init=1&sortFlag=15&flushckid=1' \\\n '&fromSearchBtn=2&headckid=0b5a9690a5cb1d82&key=Python'\n urls = []\n s = self.spider.get_content(self.first_url)\n self.cookies = self.spider.session.cookies.get_dict()\n del s\n self.spider.headers.update({'Cookie': self.cookies})\n for page in range(1,5):\n url = self.first_url + '&curPage=%d'%page\n urls.append(url)\n for url in urls:\n print url\n yield scrapy.Request(url=url,\n callback=self.parse,\n headers=self.spider.headers,\n cookies=self.cookies\n )", "def scrape_main() -> None:\n\n logger.info(\"Starting scrape\")\n search_info = construct_scrape_regex_patterns(grab_scrape_info())\n links = run_scrape(\n url=search_info['url'],\n seasons_regex=search_info['seasons'],\n episodes_regex=search_info['episodes']\n )\n if links:\n logger.debug(\"Writing urls to file\")\n with open('urls.txt', 'w') as f:\n for link in links:\n f.write(link + '\\n')\n else:\n logger.warning(\"No links available\")", "def run(self):\n\n for url in self.urls:\n try:\n # Use requests to retrieve web page data\n print(url)\n response = session.get(url, ) # allow_redirects=True)\n\n if response.status_code != 200:\n print('Failed to retrieve page, URL: {0}, error: {1}\\n'.format(url, response.status_code))\n return\n\n # Get web page data from HTML response\n content = get_json_data(response.text)\n\n # Compile data into dictionary to be used for reporting\n summary_data = generate_report(content)\n\n # Generate/print report\n print_report(summary_data)\n\n except Exception as error:\n print('Scraper failed to run for URL {0}, error: {1}, {2}\\n'.format(\n url, type(error).__name__, error\n ))\n\n # time.sleep(1) # for load concerns", "def parse(self, response):\n for cate_selector in response.css(\"div.sub-nav-cont ul li a\"):\n link = cate_selector.css(\"::attr(href)\").extract_first()\n if response.urljoin(link) in self.start_urls:\n continue\n cate_name = cate_selector.css(\"::attr(c-bname)\").extract_first()\n meta = {\"cate_name\": cate_name, \"page_num\": 1,\n \"base_url\": link.rsplit(\".\", 1)[0]}\n yield response.follow(link, callback=self.parse_category, meta=meta)", "def start_requests(self):\r\n try:\r\n\r\n for url in self.start_urls:\r\n yield scrapy.Request(url,\r\n callback=self.navigate_to)\r\n except Exception as err:\r\n logger.error(f'TekDefenceScraper : start_requests : {err}')\r\n raise err", "def start_requests(self):\n # Load sitemap JSON - generate queues\n if self.args.sitemap_json is None:\n yield Request('https://repo1.maven.org/maven2/', callback=self.spider.parse_page, meta=dict())\n return\n\n for req in self.gen_links(self.args.sitemap_json):\n yield req", "def crawlerAdd(name):\n sclogic.crawlerAdd(name)", "def __init__(self):\n self.redis = RedisClient()\n self.crawlers = [crawler_cls() for crawler_cls in crawlers_cls]", "def test_with_progress_start_requests_all_visited(mock_spider_state_cls, mock_site_pager_cls, _):\n data = CategoryBasedSpiderData(\"some-progress-file-dir\", \"some-spider-name\", \"http://some-recipes.com\")\n mock_request_factory = Mock()\n\n mock_current_category_parent = Mock()\n mock_current_category_parent.parent = None\n mock_current_category_parent.children = [Mock(), Mock()]\n mock_current_category_parent.children[0].visit_state = VisitState.VISITED\n mock_current_category_parent.children[1].visit_state = VisitState.VISITED\n mock_current_category_node = Mock()\n mock_current_category_node.parent = mock_current_category_parent\n\n mock_spider_state_instance = __prepare_mock_spider_instance(True)\n mock_spider_state_instance.site_structure.get_node_at_path.return_value = mock_current_category_node\n mock_spider_state_cls.return_value = mock_spider_state_instance\n\n mock_site_pager_cls_instance = Mock()\n mock_site_pager_cls.return_value = mock_site_pager_cls_instance\n\n spider = CategoryBasedSpider(Mock(), Mock(), data)\n spider.request_factory = mock_request_factory\n list(spider.start_requests())\n mock_site_pager_cls_instance.start.assert_called()\n\n site_page_callbacks = mock_site_pager_cls.call_args[0][3]\n site_page_callbacks.on_page_finished(\"http://some-recipes.com/next-page\")\n site_page_callbacks.on_paging_finished()\n mock_spider_state_instance.save.assert_called()\n mock_current_category_parent.set_visit_state.assert_called()", "def main(state=None, overwrite=False):\n \n # Initialize process\n process = initialize_process()\n\n if overwrite:\n # TODO: delete all (and only) files to be regenerated\n pass\n\n # Add a spider instance for each state to be run\n if state:\n add_state_to_process(state, process=process)\n else:\n for s in list(CONFIG.keys()):\n add_state_to_process(s, process=process)\n\n # Run scrape\n process.start()", "def crawl(self):\n retrievedSubs = []\n reddit = praw.Reddit(\n client_id='QRl_4bwjckcg9A',\n client_secret='dsavqFoOk5NgWEOWtMf9NknwxRIoIw',\n password='P@ssword123',\n user_agent='cluelessv1',\n username='theclueless1009'\n )\n submissions = reddit.subreddit('all').search(self.keyword, sort='relevance', limit=50, time_filter='week')\n\n for sub in submissions:\n self.data = [sub.selftext, sub.upvote_ratio, sub.score,\n sub.title, sub.id, sub.total_awards_received, sub.created_utc]\n self.data = tuple(self.data)\n retrievedSubs.append(self.data)\n\n return retrievedSubs", "def add_crawler(self, crawler: BaseCrawler) -> None:\n self.tasks.append(crawler)", "def crawl(url):\n\n if not url in visited_urls:\n try:\n r = sess.get(url, headers={'User-Agent': user_agent}, stream=True)\n visited_urls.add(url)\n # Don't download non html files\n if r.headers['content-type'].startswith(\"text/html\"):\n # TODO: Use console logger\n print url, datetime.datetime.now()\n links = extract_links(r.text.encode(\"utf-8\"))\n update_queue(url, links)\n return r.text.encode('utf-8', 'ignore')\n # what if url is email address\n except requests.exceptions.MissingSchema, e:\n print(e)\n return \"\"\n except requests.ConnectionError, e:\n # Any requests exception, log and don't quit.\n print(e)\n logger.error(e)", "def scrape_pdfs(db):\n process = CrawlerProcess()\n process.crawl(PdfSpider, db=db)\n process.start()", "def work(self):\n while True:\n url, depth = self.crawl_queue.get(timeout=self.crawl_queue_time_out)\n self.crawl_queue.task_done()\n try:\n if depth <= self.depth_limit:\n with Spider.seen_urls_lock:\n seen_already = url in self.seen_urls\n if not seen_already:\n page, links = self.crawl_page(url, depth, self.domain_name)\n self._add_links_to_crawl_queue(links, depth)\n self._add_page_to_rank_queue(page)\n self._add_page_to_storage(page)\n with Spider.seen_urls_lock:\n self.seen_urls.add(url)\n except Exception as e:\n self.logger.debug(e)", "def main(args):\n parser = create_parser()\n\n if not args:\n parser.print_usage()\n sys.exit(1)\n\n parsed_args = parser.parse_args(args)\n scrape_url(parsed_args.url)", "async def crawl(self):\n fetch_urls = [self.start_url]\n results = []\n while len(fetch_urls):\n \"\"\"\n slicing array urls with max_async_call arg and then run extract_data_urls\n extract_data_urls return a object that contains url, data, found_urls, and all_urls\n url is a url that we crawled\n data is Html content of the url\n found_urls are new urls that we have to crawl that\n all_urls are all links in the html page\n \"\"\"\n urls = await self.extract_data_urls(fetch_urls[0:self.max_async_call])\n del fetch_urls[0:self.max_async_call]\n for url, data, found_urls, all_urls in urls:\n fetch_urls.extend(found_urls)\n result = self.parse_html_content(data)\n result['urls'] = all_urls\n results.append((url, result))\n return results", "def daily_task():\n global CATEGORIES_PAGES\n global BROWSER\n global DATE\n global OBSERVATION\n log.info('Scraper started')\n # Refresh date\n DATE = str(datetime.date.today())\n OBSERVATION = 0\n # Initiate headless web browser\n log.info('Initializing browser')\n BROWSER = webdriver.Chrome(executable_path=PROJECT_PATH + \"/bin/chromedriver\",\n options=OPTIONS)\n # Download topsite and get categories directories\n base_file_name = \"All_cat_\" + DATE + \".html\"\n fetch_html(BASE_URL, base_file_name, PATH_HTML, attempts_limit=1000)\n html_file = open(PATH_HTML + base_file_name).read()\n CATEGORIES_PAGES = get_category_list(html_file)\n log.info('Found ' + str(len(CATEGORIES_PAGES)) + ' categories')\n # Read each categories pages and scrape for data\n for cat in track(CATEGORIES_PAGES,\n description = \"[green]Scraping...\",\n total = len(CATEGORIES_PAGES)):\n cat_file = \"cat_\" + cat['name'] + \"_\" + DATE + \".html\"\n download = fetch_html(cat['directlink'], cat_file, PATH_HTML)\n if download:\n scrap_data(cat)\n # Close browser\n BROWSER.close()\n BROWSER.service.process.send_signal(signal.SIGTERM)\n BROWSER.quit()", "def schedule(args):\n job_args = [(x[0], x[1]) for x in (y.split(\"=\", 1) for y in args.arg)]\n _projects = lib.get_projects(\n args.target, args.project, username=args.username, password=args.password\n )\n for project in _projects:\n _spiders = lib.get_spiders(\n args.target,\n project,\n args.spider,\n username=args.username,\n password=args.password,\n )\n for spider in _spiders:\n job_id = lib.schedule(\n args.target,\n project,\n spider,\n job_args,\n username=args.username,\n password=args.password,\n )\n print(f\"{project} / {spider} => {job_id}\")" ]
[ "0.73961866", "0.6965943", "0.6719651", "0.6231977", "0.6110582", "0.60992587", "0.6060535", "0.6008926", "0.5995375", "0.5984809", "0.5936894", "0.59176165", "0.59090364", "0.5877148", "0.58751583", "0.5838686", "0.5806472", "0.57944125", "0.57699037", "0.57601506", "0.5735935", "0.57135326", "0.57116354", "0.5597671", "0.5583405", "0.5578234", "0.5562777", "0.5553221", "0.55443513", "0.5541015", "0.55283964", "0.55125195", "0.5489356", "0.54739016", "0.5466806", "0.5466082", "0.54315734", "0.54226536", "0.5407359", "0.540429", "0.5357472", "0.53524363", "0.5339555", "0.5333631", "0.531721", "0.5316352", "0.52996457", "0.528219", "0.527444", "0.5250569", "0.52137125", "0.52099156", "0.520455", "0.5203667", "0.51851654", "0.5180572", "0.517663", "0.51686335", "0.515485", "0.51335174", "0.51299375", "0.51291525", "0.5125681", "0.5117038", "0.509505", "0.50915104", "0.50797415", "0.5079427", "0.5056364", "0.50474745", "0.503386", "0.5026094", "0.50149435", "0.49695602", "0.49643198", "0.49618998", "0.49577165", "0.49514976", "0.4933116", "0.49280772", "0.4924991", "0.49241325", "0.491499", "0.4904311", "0.48882332", "0.48845848", "0.48770577", "0.4876546", "0.4869455", "0.48608375", "0.48413327", "0.48407328", "0.48400724", "0.48324504", "0.48315194", "0.4824769", "0.48243928", "0.48222598", "0.4819525", "0.48167318" ]
0.70772463
1
Launch crawl job for JobSpider class
def crawl(spiders_classes, connector, debug=False, spider_error_callback=stdout_error_callback): if debug: dispatcher.connect(spider_error_callback, signals.spider_error) process = CrawlerProcess({ 'ITEM_PIPELINES': { 'pyjobs_crawlers.pipelines.RecordJobPipeline': 1, }, 'connector': connector, 'LOG_ENABLED': False }) for spider_class in spiders_classes: process.crawl(spider_class) spiders = [] for crawler in list(process.crawlers): spiders.append(crawler.spider) process.start() return spiders
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n # get input params\n args = parm_parser.get_args()\n # init log config\n log.init_log('./log/mini_spider')\n if args:\n # read config file spider.conf\n conf_params = parm_parser.set_config_by_file(args.conf)\n # use config set up spider initial params\n spider = SpiderWorker(conf_params)\n # init result_path, make it complete\n spider.set_path()\n # init url queue\n spider.set_url_queue()\n # start to crawl url\n spider.start_crawl_work()\n\n return", "def crawl(spider: str, book_id: int):\n proc = CrawlerProcess(get_project_settings())\n\n proc.crawl(spider, book_id=book_id)\n\n proc.start()", "def runSpider(spiderClass):\n\n\tprocess = CrawlerProcess({\n\t\t# 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'\n\t})\n\n\tprocess.crawl(spiderClass)\n\tprocess.start() # the script will block here until the crawling is finished", "def main():\n\n from scrapy.crawler import CrawlerProcess\n from scrapy.utils.project import get_project_settings\n\n process = CrawlerProcess(get_project_settings())\n process.crawl(NCBIGeoSpider)\n process.start()", "def parse(self, response):\n page_jobs=[]\n\n # Calling abstarct method get_jobs_list() and iterating...\n jobs_div_list=self.get_jobs_list(response)\n for div in jobs_div_list:\n \n # Calling abstarct method get_job_dict()\n job_dict=self.get_job_dict(div)\n\n if not job_dict['url'] or not job_dict['title'] :\n # At least url, title data is loaded from the list of job posting ...\n raise ValueError( \"Could not find valid job information ('url' and 'title') in data:\\n\" + \n str(div.get()) + \"\\nScraped infos:\\n\" + str(job_dict) + \"\\nReport this issue on github!\" )\n \n # Store source as the name of the spider aka website\n job_dict['source']=self.name\n page_jobs.append(job_dict)\n \n \"\"\"\n Load full job page only if:\n - it's a new job (not in database)\n - load_full_jobs=Yes\n - the method parse_full_job_page() has been re-wrote by the Scraper subclass\n \"\"\"\n if ( (not self.db or self.db.find_job(job_dict)==None)\n and self.load_full_jobs ):\n if type(self).parse_full_job_page != Scraper.parse_full_job_page:\n # load_full_jobs=Yes and it's supported by scraper\n # Call parse_full_job_page() with job URL\n\n # Handle SeleniumRequest if use_selenium=True\n if self.use_selenium:\n yield SeleniumRequest(url=job_dict['url'], \n callback=self.parse_full_job_page,\n cb_kwargs=dict(job_dict=job_dict),\n wait_time=self.selenium_wait_time, script=SCROLL_DOWN)\n else:\n yield response.follow(url=job_dict['url'], \n callback=self.parse_full_job_page,\n cb_kwargs=dict(job_dict=job_dict))\n else:\n yield Job(job_dict)\n else:\n yield Job(job_dict)\n\n \"\"\" Just printing in one line \"\"\"\n if self.load_full_jobs:\n if type(self).parse_full_job_page == Scraper.parse_full_job_page:\n if self.load_all_new_pages==False:\n self.log.info(\"Scraped {} jobs from {}. Scraper {} does not support load_full_jobs=True and load_all_new_pages=False, some new job postings and job informations might be missing\".format(len(page_jobs), response.url, self.name))\n else:\n self.log.info(\"Scraped {} jobs from {}. Scraper {} does not support load_full_jobs=True, some informations might be missing\".format(len(page_jobs), response.url, self.name))\n else:\n self.log.info(\"Scraping {} jobs from {}...\".format(len(page_jobs), response.url))\n else:\n if self.load_all_new_pages==False:\n self.log.info(\"Scraped {} jobs from {}. load_all_new_pages=False and load_full_jobs=False, some new job postings and job informations might be missing\".format(len(page_jobs), response.url))\n else:\n self.log.info(\"Scraped {} jobs from {}. load_full_jobs=False, some informations might be missing\".format(len(page_jobs), response.url))\n \n \"\"\"\n If all page jobs are new and \n The method get_next_page_url() has been re-wrote by the Scraper subclass\n Scrape next page\n \"\"\"\n if self.load_all_new_pages==True:\n if self.db and any( [self.db.find_job(job_dict)!=None for job_dict in page_jobs] ):\n # All new job postings loaded\n pass\n else:\n if self.get_next_page_url(response)!=None :\n # Loading next page...\n if self.use_selenium:\n yield SeleniumRequest(\n url=self.get_next_page_url(response),\n callback=self.parse,\n wait_time=self.selenium_wait_time, script=SCROLL_DOWN)\n else:\n yield response.follow(\n url=self.get_next_page_url(response),\n callback=self.parse)\n else:\n if type(self).get_next_page_url != Scraper.get_next_page_url:\n # Last page loaded\n pass\n else:\n self.log.info(\"Scraper {} does not support load_all_new_pages=True, some new job postings might be missing\".format(self.name))", "def start(self):\n self.start_spider()\n self.start_ranker()\n\n concurrent.futures.wait(self.spider_thread_futures) # wait for spiders to finish\n self.logger.info(\"Done crawling\")\n self.ranker.done_crawling.set()\n\n self.ranker.print_ranks()", "def WebscraperTask(job_id):\n logger.debug(\"Entering dispatch_wordscraper with job ID: %s\" % str(job_id))\n\n # Obtain all required crawler arguments\n job = Job.objects.get(pk=job_id)\n word_list = [(term.term_id, term.term)\n for term in Term.objects.filter(job=job)]\n site_list = [(site.site_id, site.url, job.max_depth, 'English')\n for site in Site.objects.filter(job=job)]\n\n # Initialize the crawling process\n logger.debug(\"Configuring crawl controller with job_id: \" +\n \"%d, terms: %s, sites: %s\" % (job_id, str(word_list),\n str(site_list),))\n controller = init_controller()\n controller.crawl(job_id, site_list, word_list)\n logger.debug(\"Crawl complete for job: %d\" % job_id)\n\n # Cache the results in the database so the results are dated as close to the\n # crawl date as possible.\n results = controller.query(site_list, word_list)\n result_cache = Result()\n result_cache.job = job\n result_cache.output = results\n result_cache.save()", "def __init__(self, config, processors):\n source = HackernewsStories()\n source.configure(config)\n\n super(HackernewsCrawlJob, self).__init__(source, processors)", "def crawler():\n job_entries = []\n for job in job_info(URL):\n labels = \"\"\n if job[\"labels\"]:\n for label in job[\"labels\"]:\n labels += label[\"name\"]\n if job[\"labels\"].index(label) != len(job[\"labels\"]) - 1:\n labels += \",\"\n job_entries.append((job[\"number\"], job[\"id\"],\n job[\"title\"], job[\"html_url\"], labels))\n\n conn = sqlite3.connect('jobber/jobber.db')\n c = conn.cursor()\n c.executemany(('INSERT OR IGNORE INTO job_entries '\n 'VALUES (?,?,?,?,?)'), job_entries)\n conn.commit()\n conn.close()", "def main(call_args):\n\n args = parse_args(call_args)\n init_logging(args.loglevel)\n\n _log.info(\"'scrape-jobs' called with args: %s\", args)\n runner.run_with_config_file(args.site, args.config_file)", "def kickoff(self):\n settings = Settings()\n\n # settings.set(\"USER_AGENT\", \"Test\")\n settings.set('JOBDIR', self.args.data_dir)\n self.spider = MavenDataSpider()\n\n # Wrap with crawler, configure\n crawler = Crawler(self.spider, settings)\n crawler.signals.connect(spider_closing, signal=signals.spider_closed)\n\n logger.info('Starting crawler')\n crawler.crawl(self.spider, app=self, dbsess=self.session)\n\n self.spider = crawler.spider\n self.spider.link_queue_mode = False\n if self.args.debug:\n coloredlogs.install(level=logging.DEBUG)\n\n # Keeping thread working\n reactor.run()", "def run_crawl(self, args):\n\n # Crawler process declaration\n process = CrawlerProcess({\n 'USER_AGENT': 'David Avs Crawler',\n 'FEED_FORMAT': 'json',\n 'FEED_URI': 'stdout:',\n 'LOG_ENABLED': args.verbose,\n })\n kwargs = dict(\n start_urls=[args.url],\n allowed_domains=[urlparse.urlparse(args.url).netloc.lower()],\n )\n\n # Run crawling\n old_stdout = sys.stdout\n sys.stdout = args.file\n try:\n process.crawl(DavidAvsSpider, **kwargs)\n process.start()\n finally:\n sys.stdout = old_stdout", "def run_crawler(self) -> List[JobEventSchema]:\n print(f\"Ready for scraping, current task: {self.tasks}\")\n\n crawling_result = []\n for task in self.tasks:\n result = task.run()\n crawling_result.extend(result)\n return crawling_result", "def run_spider_on_zyte(spider_name):\n print(f'Running spider {spider_name}...')\n data = dict(project=project_id, spider=spider_name)\n response = requests.post('https://app.scrapinghub.com/api/run.json', data=data, auth=(api_key, ''))\n return response.json()['jobid']", "def open_spider(self,Spider):\n pass", "def runSpider(spider, searchterm = None, fullink = None, spiderbotid = -1):\n sclogic.runSpider(spider, searchterm, fullink, spiderbotid)", "def parse(self, response):\n content = response.body\n if not content:\n return\n sel = Selector(response)\n #print sel.xpath('//table[@class=\"board-list tiz\"]/tr').extract()\n for job in sel.xpath('//ul[@class=\"sojob-list\"]/li'):\n #print 'd',job\n info = job.xpath('div[@class=\"sojob-item-main clearfix\"]/div[@class=\"job-info\"]')\n com_info = job.xpath('div[@class=\"sojob-item-main clearfix\"]/div[@class=\"company-info nohover\"]')\n title = info.xpath('h3/a/text()').extract_first().lower()\n if title.find('python') != -1:\n url = info.xpath('h3/a/@href').extract_first()\n request = scrapy.Request(url=url,\n callback=self.parse_items,\n headers=self.spider.headers,\n cookies=self.cookies)\n company_item, job_item = CompanyItem(), JobItem()\n company_item['name'] = com_info.xpath('p[@class=\"company-name\"]/a/text()').extract_first()\n company_item['homepage'] = com_info.xpath('p[@class=\"company-name\"]/a/@href').extract_first()\n job_item['pub_time'] = info.xpath('p[@class=\"time-info clearfix\"]/time/text()').extract_first()\n year = str(date.today().year)\n if str(year) not in job_item['pub_time']:\n if job_item['pub_time'] == u'昨天':\n job_item['pub_time'] = (date.today()-timedelta(days=1)).strftime(\"%Y-%m-%d\")\n elif job_item['pub_time'] == u'前天':\n job_item['pub_time'] = (date.today() - timedelta(days=2)).strftime(\"%Y-%m-%d\")\n else:\n job_item['pub_time'] = date.today().strftime(\"%Y-%m-%d\")\n job_item['title'] = title\n job_item['welfare'] = ' '.join(com_info.xpath('p[@class=\"temptation clearfix\"]/span/text()').extract())\n job_item['salary'] = info.xpath('p[@class=\"condition clearfix\"]/span[@class=\"text-warning\"]/text()').extract_first()\n request.meta['company_item'] = company_item\n request.meta['job_item'] = job_item\n yield request", "def crawl(self):\n\n # create helper process and setup IPC\n self.socket.listen(1)\n help_out_fd = open(self.helper_outfile, \"w\")\n with subprocess.Popen(\"./crawl_helper.py\", stdout=help_out_fd, stderr=subprocess.STDOUT) as proc:\n self.helper_pid = proc.pid\n try:\n conn, _ = self.socket.accept()\n # create initial params for crawler helper and send them\n new_urls = set()\n setup_params = {\"start_urls\": self.start_urls, \"allowed_domains\": [self.domain],\n \"cookies\": self.cookies, \"user_agent\": self.config[\"user_agent\"]}\n ipc_operations.send_object(conn, setup_params)\n\n # loop: receive a response object, then send new URLs to crawl. Catch & handle problems.\n while True:\n try:\n proc.wait(timeout=0.001)\n break\n except subprocess.TimeoutExpired:\n response = ipc_operations.receive_object(conn)\n if not response: # socket is dead / closed\n break\n new_urls = self.process_response(response)\n ipc_operations.send_object(conn, new_urls)\n except socket.timeout:\n util.printit(\"Unix socket connection to scrapy crawler unexpectedly broke. \" +\n \"Quitting crawling of %s\" % self.base_url, color=util.RED)\n break\n finally:\n # ensure connection is closed and helper process killed in any case\n conn.close()\n proc.kill()\n\n # after the actual crawling, extract all the gathered cookies from Selenium\n if self.config[\"use_selenium\"].lower() == \"true\":\n selenium_cookies = self.driver.get_cookies()\n for cookie in selenium_cookies:\n if not any(cookie[\"name\"] == c[\"name\"] and cookie[\"path\"] == c[\"path\"] and\n cookie[\"domain\"] == c[\"domain\"] for c in self.found_cookies):\n parsed_cookie = {}\n for key in (\"name\", \"path\", \"domain\", \"httpOnly\", \"secure\"):\n parsed_cookie[key] = cookie[key]\n self.found_cookies.append(parsed_cookie)\n\n help_out_fd.close()\n return self.create_results()", "def scrape(self):\n\n self.jobs_load()\n self.new_jobs = []\n\n for bot in self.bot_squad:\n self.new_jobs += bot.scrape_all_pages()\n\n self.statistics(self.new_jobs)\n print('SCRAPE COMPLETE. NOTE: Resulting job list still in RAM')\n print('We observed %d new jobs' % len(self.new_jobs))", "def run(self):\n \n try:\n logging.info('Thread:{} starting'.format(self.thread_id))\n\n self.crawl_url()\n self.parse_html()\n except IOError as e:\n self.thread_post_processing()\n logging.error('CrawlUrlError url:{} msg:{}'.format(self.url, e))\n\n self.thread_post_processing()", "def run_generic_spider( user_id\t\t\t\t= None,\n\t\t\t\t\t\tspider_id\t\t\t= None,\n\t\t\t\t\t\tdatamodel\t\t\t= None,\n\t\t\t\t\t\trun_spider_config\t= None,\n\t\t\t\t\t\ttest_limit\t\t\t= None\n\t\t\t\t\t\t):\n\n\tprint ()\n\tlog_scrap.info(\"--- run_generic_spider / spider_id : %s \", spider_id )\n\n\t### WARNING !!! --> TEMPORARY SOLUTION\n\t### remove spider folder for spider_id in JOBDIR\n\tlog_scrap.debug(u\"--- run_generic_spider / cwd : %s\", os.getcwd() )\n\ttry :\n\t\tshutil.rmtree( os.getcwd() + \"/\" + JOBDIR_FOLDER + \"/\" + spider_id )\n\texcept:\n\t\tpass\n\tlog_scrap.debug(u\"--- run_generic_spider / removed folder : {}/{}\".format(JOBDIR_FOLDER, spider_id) )\n\n\t# !!! spider is launched from main.py level !!!\n\t# all relative routes referring to this...\n\tlog_scrap.info(\"--- run_generic_spider / os.getcwd() : %s \", os.getcwd() )\n\n\t### flattening run_spider_config : from nested to flat dict\n\tlog_scrap.info(\"--- run_generic_spider / 'flattenSpiderConfig()' on 'run_spider_config' --> 'spider_config_flat' ...\" )\n\tspider_config_flat = flattenSpiderConfig( run_spider_config )\n\n\n\t### settings for crawler\n\t# cf : https://hackernoon.com/how-to-crawl-the-web-politely-with-scrapy-15fbe489573d\n\n\t### global settings for scrapy processes (see upper)\n\tlog_scrap.info(\"--- run_generic_spider / BOT_NAME : %s \", settings.get('BOT_NAME') )\n\tlog_scrap.info(\"--- run_generic_spider / USER_AGENT : %s \", settings.get('USER_AGENT') )\n\tlog_scrap.info(\"--- run_generic_spider / ITEM_PIPELINES : %s \", settings.get('ITEM_PIPELINES').__dict__ )\n\n\n\t# specific settings for this scrapy process\n\n\t# settings.set( \"RETRY_TIMES\"\t\t\t\t\t\t, RETRY_TIMES )\n\t# settings.set( \"CONCURRENT_ITEMS\"\t\t\t\t, CONCURRENT_ITEMS )\n\t# settings.set( \"CONCURRENT_REQUESTS\"\t\t\t\t, CONCURRENT_REQUESTS )\n\t# settings.set( \"CONCURRENT_REQUESTS_PER_DOMAIN\"\t, CONCURRENT_REQUESTS_PER_DOMAIN )\n\t# settings.set( \"REDIRECT_MAX_TIMES\"\t\t\t\t, REDIRECT_MAX_TIMES )\n\t# settings.set( \"DOWNLOAD_MAXSIZE\" \t\t\t\t, DOWNLOAD_MAXSIZE )\n\t# settings.set( \"DEPTH_PRIORITY\"\t\t\t\t\t, DEPTH_PRIORITY )\n\t# settings.set( \"SCHEDULER_DISK_QUEUE\"\t\t\t, SCHEDULER_DISK_QUEUE )\n\t# settings.set( \"DEPTH_PRIORITY\"\t\t\t\t\t, SCHEDULER_MEMORY_QUEUE )\n\n\t# settings.set( \"RANDOMIZE_DOWNLOAD_DELAY\"\t\t, RANDOMIZE_DOWNLOAD_DELAY )\n\t# cf : https://doc.scrapy.org/en/latest/topics/jobs.html#job-directory\n\tsettings.set( \"JOBDIR\"\t\t\t\t\t\t\t, JOBDIR_FOLDER + \"/\" + spider_id )\n\n\t## https://scrapy.readthedocs.io/en/0.12/topics/extensions.html#module-scrapy.contrib.closespider\n\n\tsettings.set( \"CURRENT_SPIDER_ID\" \t\t\t, spider_id )\n\tsettings.set( \"RETRY_TIMES\"\t\t\t\t\t\t\t, spider_config_flat[\"RETRY_TIMES\"] )\n\tsettings.set( \"CLOSESPIDER_ITEMCOUNT\"\t\t, spider_config_flat[\"LIMIT_ITEMS\"] )\n\t# settings.set( \"CLOSESPIDER_PAGECOUNT\"\t\t, spider_config_flat[\"LIMIT_PAGES\"] )\n\tsettings.set( \"DOWNLOAD_DELAY\" \t\t\t\t\t, spider_config_flat[\"download_delay\"] )\n\tsettings.set( \"CONCURRENT_ITEMS\"\t\t\t\t, spider_config_flat[\"CONCURRENT_ITEMS\"] )\n\tsettings.set( \"CONCURRENT_REQUESTS\"\t\t\t, spider_config_flat[\"CONCURRENT_REQUESTS\"] )\n\t# settings.set( \"DOWNLOAD_DELAY\" \t\t\t\t, DOWNLOAD_DELAY )\n\n\tsettings.set( \"BOT_NAME\"\t\t\t\t\t\t\t\t\t, spider_config_flat[\"BOT_NAME\"] )\n\tsettings.set( \"USER_AGENT\"\t\t\t\t\t\t\t\t, spider_config_flat[\"USER_AGENT\"] )\n\tsettings.set( \"ROBOTSTXT_OBEY\"\t\t\t\t\t\t, spider_config_flat[\"ROBOTSTXT_OBEY\"] )\n\tsettings.set( \"AUTOTHROTTLE_ENABLED\"\t\t\t, spider_config_flat[\"AUTOTHROTTLE_ENABLED\"] )\n\tsettings.set( \"HTTPCACHE_ENABLED\"\t\t\t\t\t, spider_config_flat[\"HTTPCACHE_ENABLED\"] )\n\tsettings.set( \"RANDOMIZE_DOWNLOAD_DELAY\"\t, spider_config_flat[\"RANDOMIZE_DOWNLOAD_DELAY\"] )\n\n\t### initiating crawler process\n\tlog_scrap.info(\"--- run_generic_spider / instanciate process ...\" \t )\n\tprocess = CrawlerRunner( settings = settings )\n\n\t### adding CrawlerRunner as deferred\n\tdef f(q):\n\t\ttry:\n\t\t\t### send/create custom spider from run_spider_config\n\t\t\t### cf : https://stackoverflow.com/questions/35662146/dynamic-spider-generation-with-scrapy-subclass-init-error\n\n\t\t\tdeferred = process.crawl( \tGenericSpider,\n\t\t\t\t\t\t\t\t\t\t\tuser_id\t\t\t\t\t\t\t= user_id,\n\t\t\t\t\t\t\t\t\t\t\tdatamodel \t\t\t\t\t= datamodel ,\n\t\t\t\t\t\t\t\t\t\t\tspider_id \t\t\t\t\t= spider_id ,\n\t\t\t\t\t\t\t\t\t\t\tspider_config_flat\t= spider_config_flat,\n\t\t\t\t\t\t\t\t\t\t\ttest_limit\t\t\t\t\t= test_limit\n\t\t\t\t\t\t\t\t\t)\n\t\t\tdeferred.addBoth(lambda _: reactor.stop())\n\t\t\treactor.run()\n\t\t\tq.put(None)\n\t\texcept Exception as e:\n\t\t\tq.put(e)\n\n\t### putting task in queue and start\n\tq = Queue()\n\tp = Process(target=f, args=(q,))\n\tp.start()\n\tresult = q.get()\n\tp.join()\n\n\tif result is not None:\n\t\traise result\n\n\n\n\tprint (\"\\n\\n{}\\n\".format(\"> > > \"*20))", "def start_requests(self):\n url = self.start_urls[0]\n yield scrapy.Request(url=url, callback=self.parse)", "def run_spiders():\n hour_limit = 3600 # in seconds\n\n settings = get_project_settings()\n\n # Uncomment the following block of code if you want to test the manager on\n # the \"onepagetest\" to make sure the manager is working.\n \"\"\"\n settings[\"HTTPCACHE_ENABLED\"] = 1\n settings[\"HTTPCACHE_EXPIRATION_SECS\"] = 0\n settings[\"HTTPCACHE_STORAGE\"] = \"scrapy.extensions.httpcache.FilesystemCacheStorage\"\n settings[\"HTTPCACHE_IGNORE_MISSING\"] = 1\n settings[\"HTTPCACHE_DIR\"] = \"onepagetest\"\n \"\"\"\n\n runner = CrawlerRunner(settings)\n begin_time = datetime.now()\n\n d = runner.crawl(\"sb_spider\")\n d.addBoth(lambda _: continue_crawl(d, runner, begin_time, hour_limit))\n reactor.run()", "def runCrawler(crawlerid):\n sclogic.runCrawler(crawlerid)", "def start_crawling(crawler_instance: Crawler):\n print('start crawling!')\n logger.info('started a new crawl!')\n crawler_instance.crawl()\n threading.Timer(Config.CRAWL_TIMEOUT, start_crawling,\n [crawler_instance]).start()", "def parse(self, response):\n page_source = self.upwork_controller.get_source_home()\n\n # Hand-off between Selenium and Scrapy happens here\n sel = Selector(text=page_source)\n # Extract data\n sections = sel.xpath(\"//section/div\")\n\n for section in sections:\n selector = Selector(text=section.get())\n jobtitle = selector.xpath(\"//div/div/div/h4/a/text()\")\n jobdescription = selector.xpath(\"//div/div/div/div/div/div/div/span/span/text()\")\n hourlypay = selector.xpath(\"//div/div/div/div/small/span/strong/text()\")\n proposals = selector.xpath(\"//div/div/div/div/div/span/small/strong/text()\")\n country = selector.xpath(\"//div/div/div/div/small/span/span/span/span/strong[@class='text-muted client-location ng-binding']/text()\")\n\n job = Job(jobtitle=jobtitle.get(),\n jobdescription=jobdescription.get(),\n hourlypay=hourlypay.get(),\n proposals=proposals.get(),\n country=country.get())\n job.serialize()\n yield job.dict()", "def __init__(self,\n config,\n cache_manager=None,\n jobs=None,\n scraper_search=None,\n session=None,\n db_lock=None,\n cache_lock=None,\n start_page_pos=1,\n search_engine=None,\n search_type=None,\n proxy=None,\n progress_queue=None):\n\n self.config = config\n self.cache_manager = cache_manager\n\n jobs = jobs or {}\n self.search_engine_name = search_engine['engine'].lower()\n self.search_instance = search_engine\n\n assert self.search_engine_name, 'You need to specify an search_engine'\n\n if not search_type:\n self.search_type = self.config.get('search_type', 'normal')\n else:\n self.search_type = search_type\n\n self.jobs = jobs\n\n # the keywords that couldn't be scraped by this worker\n self.missed_keywords = set()\n # the number of keywords\n self.num_keywords = len(self.jobs)\n # The actual keyword that is to be scraped next\n self.query = ''\n # The default pages per keywords\n self.pages_per_keyword = [1, ]\n # The number that shows how many searches have been done by the worker\n self.search_number = 1\n # The parser that should be used to parse the search engine results\n self.parser = Parsing().get_parser_by_search_engine(\n self.search_engine_name\n )(config=self.config)\n # The number of results per page\n self.num_results_per_page = int(self.config.get('num_results_per_page', 10))\n\n # The page where to start scraping. By default the starting page is 1.\n if start_page_pos:\n self.start_page_pos = 1 if start_page_pos < 1 else start_page_pos\n else:\n self.start_page_pos = int(self.config.get('search_offset', 1))\n\n # The page where we are right now\n self.page_number = self.start_page_pos\n\n # Install the proxy if one was provided\n self.proxy = proxy\n if isinstance(proxy, Proxies().Proxy):\n self.set_proxy()\n self.requested_by = self.proxy.host + ':' + self.proxy.port\n else:\n self.requested_by = 'localhost'\n\n # the scraper_search object\n self.scraper_search = scraper_search\n # the scrape mode\n # to be set by subclasses\n self.scrape_method = ''\n # Whether the instance is ready to run\n self.startable = True\n # set the database lock\n self.db_lock = db_lock\n # init the cache lock\n self.cache_lock = cache_lock\n # a queue to put an element in whenever a new keyword is scraped.\n # to visualize the progress\n self.progress_queue = progress_queue\n # set the session\n self.session = session\n # the current request time\n self.requested_at = None\n # The name of the scraper\n self.name = '[{}]'.format(self.search_engine_name) + self.__class__.__name__\n\n # How long to sleep (in seconds) after every request\n self.sleeping_min = self.config.get('sleeping_min')\n self.sleeping_max = self.config.get('sleeping_max')\n\n # the default timeout\n self.timeout = 5\n # the status of the thread after finishing or failing\n self.status = 'successful'\n self.html = ''", "def open_spider(self, spider):\n pass", "def start_requests(self):\n # Load sitemap JSON - generate queues\n if self.args.sitemap_json is None:\n yield Request('https://repo1.maven.org/maven2/', callback=self.spider.parse_page, meta=dict())\n return\n\n for req in self.gen_links(self.args.sitemap_json):\n yield req", "def start_requests(self):\n for url in self.start_urls:\n yield scrapy.Request(url, callback=self.parse)", "def schedule(args):\n job_args = [(x[0], x[1]) for x in (y.split(\"=\", 1) for y in args.arg)]\n _projects = lib.get_projects(\n args.target, args.project, username=args.username, password=args.password\n )\n for project in _projects:\n _spiders = lib.get_spiders(\n args.target,\n project,\n args.spider,\n username=args.username,\n password=args.password,\n )\n for spider in _spiders:\n job_id = lib.schedule(\n args.target,\n project,\n spider,\n job_args,\n username=args.username,\n password=args.password,\n )\n print(f\"{project} / {spider} => {job_id}\")", "def run(self):\n print \"CAlled run in querythread\"\n #global config.mqchannel\n self.qstatus = \"Running\"\n start = time.clock()\n \n self.crawl_async_result = crawl.apply_async(args=[self.start_url, self.max_depth, self.parser], serializer=\"json\")\n while not self.crawl_async_result.ready():\n time.sleep(0)\n \n # self.crawl_async_result is a list of { URLs, links, htmls } to be parsed\n \n self.crawlstatus = \"Done\"\n self.elapsed = (time.clock() - start)\n print \"Crawl Done\"\n print json.dumps(self.crawl_async_result.result, indent=4)\n \n self.__insert_into_db(self.crawl_async_result.result)\n content = json.dumps({\"query_id\":self.qid, \"message\":\"done\", \"dbkey\":str(self.dbkey), \"time\":self.elapsed});\n config.mqchannel.basic_publish(exchange=config.get(\"MQEXCHANGE\"), routing_key='', body=content)", "def start_job(self):\n # POST /jobs/{job_id}/results\n pass", "def start_crawlers(spider_name: str, rules: List[Rule]) -> None:\n runner = CrawlerRunner(settings)\n crawlers = runner.spider_loader.list()\n crawlers = [c for c in crawlers if c.__contains__(spider_name)]\n if crawlers:\n for rule in rules:\n runner.crawl(crawlers[0], rule=rule)\n d = runner.join()\n d.addBoth(lambda _: reactor.stop())\n reactor.run()\n launch_logger.debug('all finished.')\n else:\n launch_logger.warning('provide the right spider name.')", "def start_screenshot_job_if_needed(self):\n url2scrape = None\n if self.task_data.get('product_url', self.task_data.get('url', None)):\n url2scrape = self.task_data.get('product_url', self.task_data.get('url', None))\n # TODO: searchterm jobs? checkout scrapers?\n if url2scrape:\n # scrapy_path = \"/home/spiders/virtual_environment/bin/scrapy\"\n # python_path = \"/home/spiders/virtual_environment/bin/python\"\n output_path = self.get_output_path()\n cmd = ('cd {repo_base_path}/product-ranking'\n ' && scrapy crawl url2screenshot_products'\n ' -a product_url=\"{url2scrape}\" '\n ' -a width=1280 -a height=1024 -a timeout=90 '\n ' -s LOG_FILE=\"{log_file}\"'\n ' -o \"{output_file}\" &').format(\n repo_base_path=REPO_BASE_PATH,\n log_file=output_path+'.screenshot.log', url2scrape=url2scrape,\n output_file=output_path+'.screenshot.jl')\n logger.info('Starting a new parallel screenshot job: %s' % cmd)\n os.system(cmd) # use Popen instead?", "def crawl(self, url):\n return None", "def start_requests(self):\r\n try:\r\n\r\n for url in self.start_urls:\r\n yield scrapy.Request(url,\r\n callback=self.navigate_to)\r\n except Exception as err:\r\n logger.error(f'TekDefenceScraper : start_requests : {err}')\r\n raise err", "def start(self, job: PandaJob) -> None:\n raise NotImplementedError(\"Base method not implemented\")", "def crawl(self):\n try:\n self.crawl_pages()\n self.crawl_posts()\n self.crawl_comments()\n except Exception as exception:\n self.handle_request_limit(exception)", "def open_spider(self, spider):\n now = spider.get_now_time()\n\n # Create initial batch\n spider.batch = model.Batch.objects.create(\n kickoff_time=now, finish_time=now)\n spider.batch.save()\n\n # save initial site list\n file_content = ContentFile('\\n'.join(spider.start_urls))\n filename = str(spider.batch).replace(' ', '')\n spider.batch.sitelist.save(filename, file_content)\n spider.batch.sitelist.close()\n spider.batch.save()\n\n spider.batch_user_agents = []\n\n # Give the spider a set of batch user agents, which preserve historical\n # user agent data\n for ua in list(model.UserAgent.objects.all()):\n batch_user_agent = model.BatchUserAgent.objects.create(\n batch=spider.batch,\n ua_string=ua.ua_string,\n primary_ua=ua.primary_ua,\n ua_type=ua.ua_type,\n ua_human_name=ua.ua_human_name\n )\n spider.batch_user_agents.append(batch_user_agent)\n\n if not spider.batch_user_agents:\n raise ValueError(\n \"No user agents; add some with 'manage.py useragents --add'\")", "def scrape_pipeline(args):\n kickoff = args.kickoff\n fname = args.fname\n d = DbHelper()\n s = Scraper()\n c = Crawler(20)\n if fname is not None:\n app_names = pd.read_csv(fname)['packageName'].tolist()\n apps = [list(a) for a in zip(app_names, d.app_names_to_uuids(app_names))]\n else:\n apps = None\n\n # start by updating top apps\n if not args.skip_top:\n logger.info(\"getting top apps...\")\n new_top_list = c.get_top_apps_list()\n logger.info(\"scraping top apps not in DB...\")\n s.scrape_missing(new_top_list, compare_top=True)\n logger.info(\"updating top apps...\")\n d.update_top_apps(new_top_list)\n\n if kickoff == True:\n s = None\n if fname is None:\n # use crawler to get list of package names\n logger.error(\"Crawler for package names not implemented yet\")\n return\n else:\n # use specified file of package names\n s = Scraper(input_file=fname)\n\n # use scraper\n logger.info(\"Starting efficient scrape...\")\n s.efficient_scrape()\n logger.info(\"...efficient scrape done\")\n else:\n # use updater\n logger.info(\"Starting updater...\")\n if fname is None:\n u = Updater()\n else:\n u = Updater(input_file=fname)\n u.update_apps()\n logger.info(\"...update done\")\n\n # crawl privacy policies\n c.crawl_app_privacy_policies(app_list=apps)\n\n if args.no_decompile:\n # download only\n logger.info(\"Starting download...\")\n downloader = Downloader()\n if apps is None:\n downloader.download_all_from_db(top=True)\n else:\n downloader.download(apps)\n logger.info(\"...done\")\n else:\n # download/decompile\n logger.info(\"Starting download and decompile...\")\n download_decompile_all()\n logger.info(\"...download and decompile done\")\n logger.info(\"run analysis pipeline now\")", "def job(name):\n current_app.logger.info(\"Running custom job: {}\".format(name))", "def start_requests(self):\n self.spider = Base_Spider(LpCfg)\n self.first_url = 'https://www.liepin.com/zhaopin/' \\\n '?industries=&dqs=010&salary=15%2440' \\\n '&jobKind=2&pubTime=3&compkind=&compscale=' \\\n '&industryType=&searchType=1&clean_condition=' \\\n '&isAnalysis=&init=1&sortFlag=15&flushckid=1' \\\n '&fromSearchBtn=2&headckid=0b5a9690a5cb1d82&key=Python'\n urls = []\n s = self.spider.get_content(self.first_url)\n self.cookies = self.spider.session.cookies.get_dict()\n del s\n self.spider.headers.update({'Cookie': self.cookies})\n for page in range(1,5):\n url = self.first_url + '&curPage=%d'%page\n urls.append(url)\n for url in urls:\n print url\n yield scrapy.Request(url=url,\n callback=self.parse,\n headers=self.spider.headers,\n cookies=self.cookies\n )", "def process_job():\n r = redis.StrictRedis()\n while True:\n curr_job = r.blpop('job_queue', 0)[1]\n r.hset('status', curr_job, 'processing')\n print('current job ID:', curr_job)\n # convert byte to string\n url = r.hget('urls', curr_job).decode(\"utf-8\")\n print('Current URL:', url)\n\n # if this url has not been requested before/is not in the db\n if Site.query.filter_by(url=url).first():\n r.hset('status', curr_job, 'complete')\n print('Job', curr_job, 'Completed')\n else:\n # fetches url page source\n try:\n html = str(get_html(url))\n print('Successfully retrieved HTML')\n # add results to database\n db.session.add(Site(url=url, html=html))\n db.session.commit()\n print('Added to database')\n r.hset('status', curr_job, 'complete')\n print('Job', curr_job, 'Completed')\n except ValueError:\n r.hset('status', curr_job, 'abort')\n print('Job', curr_job, 'Aborted')\n except TimeoutError:\n r.hset('status', curr_job, 'timeout')\n print('Job', curr_job, 'Timed Out')\n return", "def __process_job(self, job):\r\n\r\n job_a_tag = job.find_element_by_tag_name('a')\r\n job_href = job_a_tag.get_attribute('href')\r\n # Removing all extraneous indeed url query string parameters\r\n job_href = job_href.split('&from')[0] \r\n self.nav(job_href)", "def crawl(self, keyword, since=None, to=None):\n no_of_pages = self.get_no_of_pages(keyword, since, to)\n print('pages: ' + str(no_of_pages))\n self.keyword = keyword\n self.since = since\n self.to = to\n p = Pool()\n p.map(self.distribute, range(1, no_of_pages+1))", "def crawl(self):\n self.get('http://code.google.com/p/webscraping/')\n self.get('http://code.google.com/p/sitescraper/')\n QTimer.singleShot(5000, self.app.quit)", "def crawl(self):\n if os.path.exists(self.__work_path):\n shutil.rmtree(self.__work_path)\n print '\\nOld Data Was Found And Removed.\\n'\n\n initial_first_run = True\n initial_recursion_depth = 0\n initial_prev_link_size = 0\n for url in self.__urls:\n self.__start_recursion(url, initial_first_run,\n initial_recursion_depth, initial_prev_link_size)\n\n Crawler.mission_report(self.__work_path)", "def _start_scrapy_process(self):\n cmd = self._parse_task_and_get_cmd()\n self.process = Popen(cmd, shell=True, stdout=PIPE,\n stderr=PIPE, preexec_fn=os.setsid)\n if self.task_data.get('with_best_seller_ranking', False):\n logger.info('With best seller ranking')\n cmd = self._parse_task_and_get_cmd(True)\n self.process_bsr = Popen(cmd, shell=True, stdout=PIPE,\n stderr=PIPE, preexec_fn=os.setsid)\n else:\n logger.info('Skipping best seller')\n logger.info('Scrapy process started for task #%s',\n self.task_data.get('task_id', 0))", "def parse(self, response):\n\n\t\t### close spider if exception\n\t\tif 'Bandwidth exceeded' in response.body:\n\t\t\traise CloseSpider('bandwidth_exceeded')\n\n\t\tlog_scrap.debug(u\"\\n>>> NEW PARSING >>>\\n\" )\n\t\tlog_scrap.info(\"--- GenericSpider.parse ...\" )\n\n\t\tlog_scrap.info(\"\\n--- GenericSpider.parse /response : \\n%s\" , response)\n\t\tlog_scrap.info(\"\\n--- GenericSpider.parse /response : \\n%s \\n\" , response.__dict__.keys() )\n\n\t\t# for k, v in response.__dict__.iteritems() :\n\t\t# \tlog_scrap.info(\"\\n--- [k] {} : [v] {} : \".format(k,v))\n\t\t# print response._body\n\t\tstart_url = response.meta[\"start_url\"]\n\t\tlog_scrap.info(\"--- GenericSpider.parse / start_url : %s\", start_url )\n\n\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\t### start request with API crawler\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\t# if self.spider_config_flat[\"parse_api\"] == True :\n\t\tif self.parse_api == True :\n\n\t\t\tlog_scrap.info(\"\\n--- GenericSpider.parse / starting request on API endpoint... \" )\n\t\t\tjsonresponse = json.loads(response.body_as_unicode())\n\t\t\t# log_scrap.info(\"--- GenericSpider.parse / jsonresponse : \\n%s\", jsonresponse )\n\t\t\tlog_scrap.info(\"--- GenericSpider.parse / jsonresponse received...\" )\n\n\t\t\traw_items_list = get_dictvalue_from_xpath(jsonresponse, self.item_xpath)\n\t\t\t# raw_items_list = jsonresponse[self.item_xpath]\n\t\t\tlog_scrap.info(\"--- GenericSpider.parse / raw_items_list[0] : \\n%s\\n...\", pformat(raw_items_list[0]) )\n\n\t\t\t### - - - - - - - - - - ###\n\t\t\t### PARSING PAGE - API\n\t\t\t### start parsing page : loop through data items in page in response\n\t\t\tif len(raw_items_list) != 0 :\n\n\t\t\t\tlog_scrap.info(\"--- GenericSpider. / START LOOPING raw_items_list WITH API ...\" )\n\n\t\t\t\t# while self.there_is_more_items_to_scrap_dict[start_url] :\n\n\t\t\t\tfor raw_data in raw_items_list :\n\n\t\t\t\t\tself.item_count += 1\n\n\t\t\t\t\t### check if can continue depending on item_count\n\t\t\t\t\tif self.settings_limit_items == 0 or self.item_count <= self.settings_limit_items :\n\n\t\t\t\t\t\tprint()\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - API - item n°{} >>> \\n\".format(self.item_count) )\n\n\t\t\t\t\t\t### instantiate Item to fill from datamodel --> cf items.py\n\t\t\t\t\t\titemclass \t= create_item_class( 'GenericItemClass', fields_list = self.dm_item_related )\n\t\t\t\t\t\titem \t\t= itemclass()\n\n\t\t\t\t\t\t### add global info to item : i.e. core fields in dm_core_item_related list\n\t\t\t\t\t\titem[ 'spider_id' ]\t= self.spider_id\n\t\t\t\t\t\titem[ 'added_by' ]\t= self.user_id\n\t\t\t\t\t\titem[ 'added_at' ]\t= time.time()\t\t# timestamp\n\t\t\t\t\t\titem[ 'link_src' ]\t= response._url\n\n\t\t\t\t\t\titem[ 'page_n' ]\t\t= self.page_count\n\t\t\t\t\t\titem[ 'item_n' ]\t\t= self.item_count\n\n\t\t\t\t\t\t### extract data and feed it to the Item instance based on spider_config_flat\n\t\t\t\t\t\titem = self.fill_item_from_results_page(raw_data, item, is_api_rest=True, item_n=self.item_count)\n\n\n\t\t\t\t\t\t### - - - - - - - - - - ###\n\t\t\t\t\t\t### FOLLOW LINK - API\n\t\t\t\t\t\t### if need to follow to extract all data\n\t\t\t\t\t\tif self.spider_config_flat[\"parse_follow\"] == True :\n\n\t\t\t\t\t\t\tlog_scrap.debug(u\">>> FOLLOW LINK - API - item n°{} / page n°{} >>>>>> \\n\".format(self.item_count, self.page_count) )\n\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.follow_xpath : %s\", self.follow_xpath )\n\n\t\t\t\t\t\t\t# follow_link_raw = raw_data[ self.follow_xpath ]\n\t\t\t\t\t\t\tfollow_link_raw = get_dictvalue_from_xpath(raw_data, self.follow_xpath)\n\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link RAW ({}) : {} \".format(type(follow_link_raw),follow_link_raw) )\n\n\t\t\t\t\t\t\turl_follow = \"\"\n\t\t\t\t\t\t\tif self.api_follow_root != \"\" :\n\t\t\t\t\t\t\t\t\turl_follow = self.api_follow_root\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\turl_follow = self.page_url\n\n\t\t\t\t\t\t\t# complete follow link if needed\n\t\t\t\t\t\t\tfollow_link = self.clean_link(follow_link_raw, url_root=url_follow)\n\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link CLEAN : %s \", follow_link )\n\n\t\t\t\t\t\t\t# store follow_link\n\t\t\t\t\t\t\titem[ 'link_data' ]\t= follow_link\n\t\t\t\t\t\t\turl \t\t\t\t= item['link_data']\n\n\t\t\t\t\t\t\tfollow_is_api = self.follow_is_api\n\n\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\tyield scrapy.Request(url, callback=self.parse_detailed_page, meta={ 'item': item, 'start_url' : start_url, 'item_n' : self.item_count , 'parse_api' : follow_is_api })\n\n\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t### if no follow link\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t### item completion is finished - yield and so spark pipeline for item (store in db for instance)\n\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t# log_scrap.info(\" --> item : \\n %s \\n\", pformat(item) )\n\t\t\t\t\t\tlog_scrap.debug(u\" --> item ...\" )\n\n\t\t\t\t\telse :\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {}\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\t# log_scrap.warning(u\"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {}\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t\t# raise CloseSpider('OUT OF LIMIT_ITEMS')\n\n\t\t\t\telse :\n\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t# raise CloseSpider('OUT OF ITEMS')\n\n\t\t\t### - - - - - - - - - - - - ###\n\t\t\t### NEXT PAGE - API\n\t\t\tif self.test_limit == None or self.page_count < self.test_limit :\n\n\t\t\t\tif self.page_count < self.settings_limit_pages or self.settings_limit_pages == 0 :\n\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse (API) >>> PAGE n°{} DONE -> NEXT PAGE >>> \\n\".format(self.page_count) )\n\n\t\t\t\t\t### get and go to next page\n\t\t\t\t\tself.page_count += 1\n\n\t\t\t\t\turl_next = \"\"\n\t\t\t\t\tif self.api_pagination_root != \"\" :\n\t\t\t\t\t\turl_next = self.api_pagination_root\n\t\t\t\t\telse :\n\t\t\t\t\t\turl_next = self.page_url\n\n\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_name : '%s' >>>\" %(self.spider_name) )\n\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\tnext_page = url_next + str(self.page_count)\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse >>> NEXT PAGE II : %s\", next_page )\n\n\t\t\t\t\tyield response.follow(next_page, callback=self.parse, meta={'start_url': start_url} )\n\n\t\t\t\telse :\n\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - page n°{} - limit : {} - test_limit : {} \".format(self.page_count, self.settings_limit_pages, self.test_limit) )\n\t\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\n\t\t\telse :\n\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} \".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\n\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\t### start requests with pure Scrapy requests\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\telif self.spider_config_flat[\"parse_reactive\"] == False :\n\t\t# elif self.parse_reactive == False :\n \n\t\t\tlog_scrap.info(\"\\n--- GenericSpider.parse / starting requests with Scrapy... \" )\n\t\t\t# self.parse_scrapy(response)\n\n\t\t\t### find items list\n\t\t\tlog_scrap.info(\"--- GenericSpider.parse / self.item_xpath : %s\", self.item_xpath )\n\t\t\traw_items_list = response.xpath(self.item_xpath)\n\t\t\tlog_scrap.info(\"--- GenericSpider.parse / len(raw_items_list) : %d \", len(raw_items_list) )\n\n\n\t\t\t### - - - - - - - - - - - ###\n\t\t\t### PARSING PAGE - SCRAPY\n\t\t\t### start parsing page : loop through data items in page in response\n\t\t\tif len(raw_items_list) != 0 :\n\n\t\t\t\tlog_scrap.info(\"--- GenericSpider. / START LOOPING raw_items_list WITH SCRAPY ...\" )\n\n\t\t\t\tfor raw_data in raw_items_list :\n\n\t\t\t\t\tself.item_count += 1\n\n\t\t\t\t\t### check if can continue depending on item_count\n\t\t\t\t\tif self.settings_limit_items == 0 or self.item_count <= self.settings_limit_items :\n\n\t\t\t\t\t\tprint()\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - Scrapy - item n°{} / page n°{} >>> \\n\".format(self.item_count, self.page_count) )\n\n\t\t\t\t\t\t# print \">>> raw_data : \\n\", raw_data.extract()\n\n\t\t\t\t\t\t### instantiate Item to fill from datamodel --> cf items.py\n\t\t\t\t\t\titemclass \t= create_item_class( 'GenericItemClass', fields_list = self.dm_item_related )\n\t\t\t\t\t\titem \t\t= itemclass()\n\n\t\t\t\t\t\t### add global info to item : i.e. core fields in dm_core_item_related list\n\t\t\t\t\t\titem[ 'spider_id' ]\t\t= self.spider_id\n\t\t\t\t\t\titem[ 'added_by' ]\t\t= self.user_id\n\t\t\t\t\t\titem[ 'added_at' ]\t\t= time.time()\t\t# timestamp\n\t\t\t\t\t\titem[ 'link_src' ]\t\t= response._url\n\n\t\t\t\t\t\titem[ 'page_n' ]\t\t= self.page_count\n\t\t\t\t\t\titem[ 'item_n' ]\t\t= self.item_count\n\n\t\t\t\t\t\t### extract data and feed it to the Item instance based on spider_config_flat\n\t\t\t\t\t\titem = self.fill_item_from_results_page(raw_data, item, item_n=self.item_count)\n\n\n\t\t\t\t\t\t### - - - - - - - - - - - ###\n\t\t\t\t\t\t### FOLLOW LINK - SCRAPY\n\t\t\t\t\t\t### if need to follow to extract all data\n\t\t\t\t\t\tif self.spider_config_flat[\"parse_follow\"] == True :\n\n\t\t\t\t\t\t\tlog_scrap.debug(u\">>> FOLLOW LINK - SCRAPY - item n°{} / page n°{} >>>>>> \\n\".format(self.item_count, self.page_count) )\n\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.follow_xpath : %s\", self.follow_xpath )\n\n\t\t\t\t\t\t\tfollow_link \t= raw_data.xpath( self.follow_xpath ).extract_first()\n\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link RAW ({}) : {} \".format(type(follow_link),follow_link) )\n\n\t\t\t\t\t\t\turl_follow = \"\"\n\t\t\t\t\t\t\tif self.api_follow_root != \"\" :\n\t\t\t\t\t\t\t\turl_follow = self.api_follow_root\n\n\t\t\t\t\t\t\t# complete follow link if needed\n\t\t\t\t\t\t\tfollow_link = self.clean_link(follow_link, url_root=url_follow)\n\t\t\t\t\t\t\t# log_scrap.info(\" --> follow_link CLEAN : %s \", follow_link )\n\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link CLEAN ({}) : {} \".format(type(follow_link),follow_link) )\n\n\t\t\t\t\t\t\t# store follow_link\n\t\t\t\t\t\t\titem[ 'link_data' ]\t= follow_link\n\t\t\t\t\t\t\turl \t\t\t\t= item['link_data']\n\n\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\tlog_scrap.warning(u\">>> FOLLOWING LINK --> url : {} \".format(url) )\n\t\t\t\t\t\t\t\t# yield Request(url, callback=self.parse_detailed_page, meta={ 'item': item, 'start_url' : start_url } )\n\t\t\t\t\t\t\t\tyield scrapy.Request(url, callback=self.parse_detailed_page, meta={ 'item': item, 'start_url' : start_url , 'item_n' : self.item_count , 'parse_api' : False} )\n\t\t\t\t\t\t\t\t# log_scrap.warning(u\">>> FOLLOWING LINK --> url : {} / WORKED !!! \".format(url) )\n\n\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\tlog_scrap.warning(u\">>> FOLLOW LINK - NOT WORKING : {} \".format(url) )\n\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t### if no follow link\n\t\t\t\t\t\telse :\n\n\t\t\t\t\t\t\tlog_scrap.warning(u\">>> NO FOLLOW LINK ... \" )\n\t\t\t\t\t\t\t### item completion is finished - yield and so spark pipeline for item (store in db for instance)\n\t\t\t\t\t\t\t# log_scrap.info(\">>> GenericSpider.parse - item.items() : \\n %s\", item.items() )\n\t\t\t\t\t\t\t# log_scrap.info(\">>> GenericSpider.parse - item.keys() : \\n %s\", item.items() )\n\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t\t# print (\"\\n>>> NEXT ITEM \" + \">>> >>> \"*10, \"\\n\")\n\n\t\t\t\t\t\t# log_scrap.info(\" --> item : \\n %s \\n\", pformat(item) )\n\t\t\t\t\t\tlog_scrap.debug(u\" --> item ...\" )\n\n\t\t\t\t\telse :\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {}\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t\t# raise CloseSpider('OUT OF LIMIT_ITEMS')\n\n\t\t\telse :\n\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t# raise CloseSpider('OUT OF ITEMS')\n\n\t\t\t### - - - - - - - - - - ###\n\t\t\t### NEXT PAGE - SCRAPY\n\t\t\t### check if there is a test_limit\n\t\t\tif self.test_limit == None or self.page_count < self.test_limit :\n\n\t\t\t\tif self.page_count < self.settings_limit_pages or self.settings_limit_pages == 0 :\n\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse (Scrapy) >>> PAGE n°{} DONE -> NEXT PAGE >>> \\n\".format(self.page_count) )\n\n\t\t\t\t\t### get and go to next page\n\t\t\t\t\tis_next_page, next_page = self.get_next_page(response, start_url)\n\n\t\t\t\t\tif is_next_page :\n\n\t\t\t\t\t\tself.page_count += 1\n\n\t\t\t\t\t\turl_next = \"\"\n\t\t\t\t\t\tif self.api_pagination_root != \"\" :\n\t\t\t\t\t\t\turl_next = self.api_pagination_root\n\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_name : '%s' >>>\" %(self.spider_name) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse >>> NEXT PAGE I : %s\", next_page )\n\t\t\t\t\t\tnext_page = self.clean_link(next_page, url_root=url_next)\n\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse >>> NEXT PAGE II : %s\", next_page )\n\n\t\t\t\t\t\tyield response.follow(next_page, callback=self.parse, meta={'start_url': start_url} )\n\n\t\t\t\t\telse :\n\t\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / NO MORE PAGE TO SCRAP - pages count : {} \".format(self.page_count) )\n\t\t\t\t\t\t# raise CloseSpider('NO MORE PAGE TO SCRAP')\n\n\t\t\t\telse :\n\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - page n°{} - limit : {} - test_limit : {} / except -> break\".format(self.page_count, self.settings_limit_pages, self.test_limit) )\n\t\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\n\t\t\telse :\n\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\n\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\t### start requests with Selenium\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\telse :\n\t\t\t### initiate selenium browser\n\t\t\t### cf : https://github.com/voliveirajr/seleniumcrawler/blob/master/seleniumcrawler/spiders/seleniumcrawler_spider.py\n\t\t\tlog_scrap.info(\"\\n--- GenericSpider.parse / starting Selenium driver... \" )\n\n\t\t\t# retrieve exec path for chromedriver from settings_scrapy.py\n\t\t\t### GET APP MODE FROM ENV VARS\n\t\t\tapp_mode \t\t\t\t\t\t= os.environ.get('APP_MODE', 'default')\n\t\t\tlog_scrap.debug(u\"--- GenericSpider.parse / APP_MODE : %s\", app_mode)\n\t\t\tchromedriver_path \t= CHROMEDRIVER_PATH_LIST[ app_mode ]\n\t\t\tlog_scrap.debug(u\"--- GenericSpider.parse / chromedriver_path : %s\", chromedriver_path)\n\n\t\t\t### specify executable path to launch webdriver-->\n\t\t\t# cf : where chromedriver was installed when `brew install chromedriver`\n\t\t\tself.driver = webdriver.Chrome(executable_path=chromedriver_path, chrome_options=options_selenium)\n\t\t\t# self.driver = webdriver.Chrome(chrome_options=options_selenium)\n\t\t\t# self.driver = webdriver.Firefox()\n\t\t\t# self.driver = webdriver.Chrome()\n\t\t\t# self.driver = webdriver.PhantomJS() ### deprecated\n\n\t\t\t### setup waiting times\n\t\t\t# self.driver.set_page_load_timeout(60)\n\t\t\tself.wait_driver\t= WebDriverWait(self.driver, self.delay_driver)\n\t\t\tself.wait_page \t\t= WebDriverWait(self.driver, self.delay_new_page)\n\t\t\tself.driver.implicitly_wait(self.delay_implicit)\n\t\t\tlog_scrap.debug(u\"--- GenericSpider. / self.delay_driver : %s\", self.delay_driver )\n\t\t\tlog_scrap.debug(u\"--- GenericSpider. / self.delay_new_page : %s\", self.delay_new_page )\n\t\t\tlog_scrap.debug(u\"--- GenericSpider. / self.delay_implicit : %s\", self.delay_implicit )\n\n\n\t\t\t### start parsing with selenium\n\t\t\tlog_scrap.debug(u\"--- GenericSpider. / response._url : %s\", response._url )\n\t\t\ttry :\n\t\t\t\tself.driver.get(response._url)\n\n\t\t\t\t### try scroll_down if needed in config\n\t\t\t\tif self.spider_config_flat['scroll_down'] : \n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / scroll_down is TRUE ... \" )\n\t\t\t\t\t# log_scrap.debug(u\"--- GenericsSpider. / scroll_down - self.spider_config_flat : \\n%s\", pformat(self.spider_config_flat) )\n\n\t\t\t\t\tscroll_pause_time = self.spider_config_flat[\"scroll_pause_time\"]\n\t\t\t\t\tmax_loops \t\t\t\t= self.spider_config_flat[\"scroll_loops\"]\n\t\t\t\t\tself.driver = scroll_down(self.driver, scroll_pause_time, max_loops)\n\t\t\t\t\t# scroll_down(self.driver, scroll_pause_time, max_loops)\n\t\t\t\tlog_scrap.info(\"--- GenericSpider. / url '{}' is loaded ... \".format( response._url ))\n\t\t\t\n\t\t\texcept :\n\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\tself.driver.close()\n\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\traise CloseSpider('DRIVER NOT RESPONDING')\n\n\n\t\t\t### clean original xpath from strings\n\t\t\tstrings_to_clean = [\n\t\t\t\t'/@src',\n\t\t\t\t'/@href',\n\t\t\t\t'/text()',\n\t\t\t\t'/@*[name()=\"xlink:href\"]',\n\t\t\t\t'/@datetime'\n\t\t\t]\n\n\t\t\t# while self.there_is_more_items_to_scrap :\n\t\t\twhile self.there_is_more_items_to_scrap_dict[start_url] :\n\n\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / while loop continues : %s\", self.there_is_more_items_to_scrap )\n\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / while loop continues : %s\", self.there_is_more_items_to_scrap_dict[start_url] )\n\n\t\t\t\ttry :\n\n\t\t\t\t\t### wait / debug page content\n\t\t\t\t\tpage_source_code = self.driver.page_source.encode(\"utf-8\")\n\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / page_source_code : \\n %s \", page_source_code )\n\t\t\t\t\ttime.sleep(self.delay_new_page)\n\n\t\t\t\t\t### start parsing page :\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.item_xpath : %s\", self.item_xpath )\n\t\t\t\t\traw_items_list \t= self.driver.find_elements_by_xpath(self.item_xpath)\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / raw_items_list length : %s\", len(raw_items_list) )\n\t\t\t\t\t# log_scrap.info(\"--- GenericSpider. / raw_items_list[0].text : \\n%s\", raw_items_list[0].text )\n\n\t\t\t\t\t# current_item_index = 0\n\n\t\t\t\t\t### - - - - - - - - - - - - ###\n\t\t\t\t\t### PARSING PAGE - SELENIUM\n\t\t\t\t\t# loop through data items in page in response\n\t\t\t\t\tif len(raw_items_list) != 0 :\n\n\t\t\t\t\t\t# log_scrap.info(\"--- GenericSpider. / START PARSING WITH SELENIUM ...\\n\" )\n\n\t\t\t\t\t\tfor raw_data in raw_items_list :\n\n\t\t\t\t\t\t\tprint()\n\t\t\t\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / START LOOPING raw_items_list WITH SELENIUM ...\" )\n\n\t\t\t\t\t\t\t### add +1 to items count\n\t\t\t\t\t\t\tself.item_count += 1\n\n\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / VARIABLES - spider_name : {} - item n°{} - there_is_more_items_to_scrap_dict[start_url] : {} \".format(str(self.spider_name), self.item_count, self.there_is_more_items_to_scrap_dict[start_url]) )\n\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / VARIABLES - spider_name : {} - item n°{} \".format(self.spider_name, self.item_count) )\n\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / VARIABLES - item n°{} \".format(self.item_count) )\n\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / VARIABLES - spider_name : '%s' - item n°%s \" %(self.spider_name, self.item_count) )\n\n\t\t\t\t\t\t\t### check if can continue depending on item_count\n\t\t\t\t\t\t\tif self.settings_limit_items == 0 or self.item_count <= self.settings_limit_items :\n\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - spider_name : '%s' >>>\" %(self.spider_name) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - Selenium - item n°{} / page n°{} >>> \\n\".format(self.item_count, self.page_count) )\n\n\t\t\t\t\t\t\t\t### instantiate Item to fill from datamodel --> cf items.py\n\t\t\t\t\t\t\t\titemclass \t= create_item_class( 'GenericItemClass', fields_list = self.dm_item_related )\n\t\t\t\t\t\t\t\titem \t\t= itemclass()\n\n\t\t\t\t\t\t\t\t### add global info to item : i.e. core fields in dm_core_item_related list\n\t\t\t\t\t\t\t\titem[ 'spider_id' ]\t\t= self.spider_id\n\t\t\t\t\t\t\t\titem[ 'added_by' ]\t\t= self.user_id\n\t\t\t\t\t\t\t\titem[ 'added_at' ]\t\t= time.time()\t\t# timestamp\n\t\t\t\t\t\t\t\titem[ 'link_src' ]\t\t= response._url\n\n\t\t\t\t\t\t\t\titem[ 'page_n' ]\t\t= self.page_count\n\t\t\t\t\t\t\t\titem[ 'item_n' ]\t\t= self.item_count\n\n\t\t\t\t\t\t\t\t### extract data and feed it to the Item instance based on spider_config_flat\n\t\t\t\t\t\t\t\titem = self.fill_item_from_results_page(raw_data, item, is_reactive=True, strings_to_clean=strings_to_clean, item_n=self.item_count )\n\n\t\t\t\t\t\t\t\t### - - - - - - - - - - ###\n\t\t\t\t\t\t\t\t### FOLLOW LINK - SELENIUM\n\t\t\t\t\t\t\t\t### find follow link to open detailled item view\n\t\t\t\t\t\t\t\tif self.spider_config_flat[\"parse_follow\"] == True :\n\n\t\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> FOLLOW LINK - SELENIUM - item n°{} / page n°{} >>>>>> \\n\".format(self.item_count, self.page_count) )\n\t\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.follow_xpath : %s\", self.follow_xpath )\n\n\t\t\t\t\t\t\t\t\t### follow link with Scrapy\n\t\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / follow link with Scrapy ...\" )\n\n\t\t\t\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / get href of follow_link ...\" )\n\t\t\t\t\t\t\t\t\t\tfollow_link_xpath \t= clean_xpath_for_reactive(self.follow_xpath, strings_to_clean)\n\t\t\t\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link_xpath : %s \", follow_link_xpath )\n\n\t\t\t\t\t\t\t\t\t\tfollow_link\t\t\t= raw_data.find_element_by_xpath( follow_link_xpath ).get_attribute('href')\n\t\t\t\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link RAW : %s \", follow_link )\n\n\t\t\t\t\t\t\t\t\t\turl_follow = \"\"\n\t\t\t\t\t\t\t\t\t\tif self.api_follow_root != \"\" :\n\t\t\t\t\t\t\t\t\t\t\t\turl_follow = self.api_follow_root\n\n\t\t\t\t\t\t\t\t\t\t# complete follow link if needed\n\t\t\t\t\t\t\t\t\t\tfollow_link = self.clean_link(follow_link, url_root=url_follow)\n\t\t\t\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link CLEAN ({}) : {}\".format(type(follow_link), follow_link ) )\n\n\t\t\t\t\t\t\t\t\t\t# store follow_link\n\t\t\t\t\t\t\t\t\t\titem[ 'link_data' ]\t= follow_link\n\t\t\t\t\t\t\t\t\t\turl\t\t\t= item['link_data']\n\n\t\t\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\t\t\tlog_scrap.warning(u\">>> FOLLOWING LINK --> url : {} \".format(url) )\n\t\t\t\t\t\t\t\t\t\t\tyield scrapy.Request(url, callback=self.parse_detailed_page, meta={'item': item, 'start_url' : start_url , 'item_n' : self.item_count , 'parse_api' : False})\n\n\t\t\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\t\t\tlog_scrap.warning(u\">>> FOLLOW LINK - NOT WORKING : {} \".format(url) )\n\t\t\t\t\t\t\t\t\t\t\tyield item\n\n\n\t\t\t\t\t\t\t\t\t### follow link with Selenium\n\t\t\t\t\t\t\t\t\t### FIND A WEBSITE TEST FOR REACTIVE DETAILLED PAGES\n\t\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / follow link with Selenium ...\" )\n\n\t\t\t\t\t\t\t\t\t\tfollow_link_xpath \t= clean_xpath_for_reactive(self.follow_xpath, strings_to_clean)\n\t\t\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.follow_link_xpath : %s\", self.follow_link_xpath )\n\t\t\t\t\t\t\t\t\t\tfollow_link \t\t= raw_data.find_element_by_xpath( follow_link_xpath )\n\n\t\t\t\t\t\t\t\t\t\t### open link in new tab ?\n\t\t\t\t\t\t\t\t\t\tfollow_link.click()\n\n\t\t\t\t\t\t\t\t\t\t### get data and save data\n\t\t\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / get data and save data ...\" )\n\t\t\t\t\t\t\t\t\t\t\titem = self.fill_item_from_results_page(raw_data, item, is_reactive=True, strings_to_clean=strings_to_clean, item_n=self.item_count )\n\n\t\t\t\t\t\t\t\t\t\t\t### back to previous page and scrap from where it left\n\t\t\t\t\t\t\t\t\t\t\t### cf : https://selenium-python.readthedocs.io/navigating.html#navigation-history-and-location\n\t\t\t\t\t\t\t\t\t\t\tself.driver.back()\n\n\t\t\t\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t\t\t### if no follow link\n\t\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t\t\t# log_scrap.info(\" --> item : \\n %s \\n\", pformat(item) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\" --> item ...\" )\n\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t\t\t\tself.driver.close()\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\t\t\t\t\traise CloseSpider('OUT OF LIMIT_ITEMS')\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\telse :\n\t\t\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF ITEMS - page n°{} - limit : {} - test_limit : {} / except -> break\".format(self.page_count, self.settings_limit_pages, self.test_limit) )\n\t\t\t\t\t\tself.driver.close()\n\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\t### - - - - - - - - - - - - ###\n\t\t\t\t\t### NEXT PAGE - SELENIUM\n\t\t\t\t\tif self.test_limit == None or self.page_count < self.test_limit :\n\n\t\t\t\t\t\tif self.there_is_more_items_to_scrap_dict[start_url] :\n\n\t\t\t\t\t\t\tif self.page_count < self.settings_limit_pages or self.settings_limit_pages == 0 :\n\n\t\t\t\t\t\t\t\tprint ()\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_name : '%s' >>>\" %(self.spider_name) )\n\t\t\t\t\t\t\t\tlog_scrap.info(\" --- GenericSpider.parse (Selenium) >>> PAGE n°{} DONE -> NEXT PAGE >>> \\n\".format(self.page_count) )\n\n\t\t\t\t\t\t\t\t### add +1 to parsed pages\n\t\t\t\t\t\t\t\tself.page_count += 1\n\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - current start_url : {} >>>\".format(start_url) )\n\n\t\t\t\t\t\t\t\t### find next page btn in current view\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.next_page : %s\", self.next_page )\n\t\t\t\t\t\t\t\tnext_page_xpath = clean_xpath_for_reactive(self.next_page, strings_to_clean)\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / next_page_xpath : %s\", next_page_xpath )\n\t\t\t\t\t\t\t\t# next_page \t= re.sub(\"|\".join(strings_to_clean), \"\", next_page )\n\n\t\t\t\t\t\t\t\t# try :\n\t\t\t\t\t\t\t\t# element_present = EC.presence_of_element_located((By.XPATH, next_page_xpath ))\n\t\t\t\t\t\t\t\t# log_scrap.info(\"--- GenericSpider. / next_page present : %s\", element_present )\n\t\t\t\t\t\t\t\t# self.wait.until(element_present)\n\t\t\t\t\t\t\t\t# next_page = self.wait.until( EC.element_to_be_clickable(element_present) )\n\t\t\t\t\t\t\t\t# next_page \t\t= self.driver.find_element_by_xpath( next_page_xpath )\n\t\t\t\t\t\t\t\tnext_page \t\t= self.driver.find_element(By.XPATH, next_page_xpath )\n\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / next_page : %s\", next_page )\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / next_page.text : %s\", next_page.text )\n\n\t\t\t\t\t\t\t\t# except TimeoutException:\n\t\t\t\t\t\t\t\t# except :\n\t\t\t\t\t\t\t\t# \tlog_scrap.error(\"--- GenericSpider. / Timed out waiting for page to load\")\n\n\t\t\t\t\t\t\t\t### click next button and wait for ajax calls to complete (post and get)\n\t\t\t\t\t\t\t\t### cf : http://www.obeythetestinggoat.com/how-to-get-selenium-to-wait-for-page-load-after-a-click.html\n\n\t\t\t\t\t\t\t\t# def wait_for(condition_function):\n\t\t\t\t\t\t\t\t# \t\tstart_time = time.time()\n\t\t\t\t\t\t\t\t# \twhile time.time() < start_time + 3:\n\t\t\t\t\t\t\t\t# \t\tif condition_function():\n\t\t\t\t\t\t\t\t# \t\t\treturn True\n\t\t\t\t\t\t\t\t# \t\telse:\n\t\t\t\t\t\t\t\t# \t\t\ttime.sleep(0.1)\n\t\t\t\t\t\t\t\t# \traise Exception ('Timeout waiting for {}'.format(condition_function.__name__) )\n\n\t\t\t\t\t\t\t\t# def link_has_gone_stale():\n\t\t\t\t\t\t\t\t# \t\ttry:\n\t\t\t\t\t\t\t\t# \t\t# poll the link with an arbitrary call\n\t\t\t\t\t\t\t\t# \t\tnext_page.find_elements_by_xpath(self.item_xpath)\n\t\t\t\t\t\t\t\t# \t\treturn False\n\t\t\t\t\t\t\t\t# \texcept StaleElementReferenceException :\n\t\t\t\t\t\t\t\t# \t\treturn True\n\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\"--- ... ---\")\n\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / next_page.click() \" )\n\t\t\t\t\t\t\t\t\tnext_page.click()\n\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\t# log_scrap.info(\"--- GenericSpider. / next_page.send_keys( \\ n )\" )\n\t\t\t\t\t\t\t\t\t# next_page.send_keys(\"\\n\")\n\t\t\t\t\t\t\t\t\t# added this step for compatibility of scrolling to the view\n\t\t\t\t\t\t\t\t\tlog_scrap.error(\"--- GenericSpider. / ALTERNATIVE next_page.click() \" )\n\t\t\t\t\t\t\t\t\t# self.driver.execute_script(\"return arguments[0].scrollIntoView();\", next_page)\n\t\t\t\t\t\t\t\t\t# next_page.click()\n\t\t\t\t\t\t\t\t\tself.driver.execute_script(\"arguments[0].click();\", next_page)\n\n\t\t\t\t\t\t\t\t### wait after click\n\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / wait for ajax to finish... \" )\n\t\t\t\t\t\t\t\t\t# wait_for(link_has_gone_stale)\n\t\t\t\t\t\t\t\t\tself.wait_page.until(lambda driver: self.driver.execute_script('return jQuery.active') == 0)\n\t\t\t\t\t\t\t\t\tself.wait_page.until(lambda driver: self.driver.execute_script('return document.readyState') == 'complete')\n\t\t\t\t\t\t\t\t\t# time.sleep(self.delay_implicit)\n\t\t\t\t\t\t\t\t\ttime.sleep(self.delay_new_page)\n\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\tlog_scrap.error(\"--- GenericSpider. / !!! FAIL / wait for ajax to finish... \" )\n\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF PAGES TO SCRAP - page n°{} / except -> break\".format(self.page_count) )\n\t\t\t\t\t\t\t\tself.driver.close()\n\t\t\t\t\t\t\t\traise CloseSpider('OUT OF PAGES TO SCRAP')\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\telse :\n\t\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - page n°{} - limit : {} - test_limit : {} / except -> break\".format(self.page_count, self.settings_limit_pages, self.test_limit) )\n\t\t\t\t\t\tself.driver.close()\n\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\t\t\t\t\t\tbreak\n\n\t\t\t\texcept :\n\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / NO MORE ITEMS TO SCRAP - item_count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\tself.driver.close()\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\t\traise CloseSpider('NO MORE ITEMS TO SCRAP')\n\t\t\t\t\tbreak", "def spider_and_save_thread(\n self, source_url, crawl_timeout, target_url_patten, max_depth, output_directory):\n url_result = self.crawl(\n source_url,\n crawl_timeout,\n target_url_patten,\n max_depth)\n self.save_url(url_result, crawl_timeout, output_directory)", "def start_requests(self):\n yield scrapy.Request(url=self.start_urls[0])", "def executor(self, job):\n job.connect(self)\n job.trigger(wait=True, **job.trigger_args) # Wait until job is ready\n job.start() # Notify star of job\n # Run specific action for this job\n job.action(**job.action_args)", "def work(self, job):\n pass", "def daemon_job(interval):\n time.sleep(3) # Wait for api server to start first\n while True:\n try:\n crawl()\n process_notification()\n except Exception:\n traceback.print_exc()\n time.sleep(interval)", "def run(self) -> None:\n\n # These are set here, because user may\n # change settings (i.e. app.config['LOG_LEVEL] = 'DEBUG')\n # after instantiation\n self.logger = create_logger(self.config[\"LOG_LEVEL\"], self.config[\"LOG_FILE\"])\n\n self._check_valid_config()\n self.exporter = Exporter(self.config[\"OUT_FILE\"], self.config[\"OUT_FORMAT\"])\n self.crawler = Crawler(\n self.logger,\n self.exporter,\n wait=self.config[\"WAIT\"],\n timeout=self.config[\"TIMEOUT\"],\n concurrency=self.config[\"CONCURRENCY\"],\n max_retries=self.config[\"MAX_RETRIES\"],\n )\n\n self.logger.info(\"Starting crawler\")\n indent = \" \" * 4\n for key, val in self.config.items():\n self.logger.info(f\"{indent}{key}: {val}\")\n\n # Create a new event loop for each execution\n # Allows run() to be called multiple times\n try:\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n rc = loop.run_until_complete(self.crawler.crawl(self._starting_requests))\n finally:\n loop.close()\n self.end()\n\n if rc == ReturnCode.SUCCESS:\n self.logger.info(\"Crawler ended successfully\")\n else:\n self.logger.critical(\"Crawler ended with error\")", "def start_requests(self):\n yield Request(self.base_url, \n callback=self.parse_urls)", "def scrap_keywords():\n ParScr = ParallelScraper()\n ParScr.create_and_run_threads()\n return", "def _run_express_job(self, class_name, options=\"\"):\n cmd = \"source {bento_home}/bin/kiji-env.sh; express job {jar} {myclass} --kiji {kiji_uri}\"\n cmd = cmd.format(\n bento_home=self.bento_home,\n jar=os.path.join(self.movie_advisor_home, self.express_jar),\n myclass=class_name,\n kiji_uri=self.kiji_uri,\n ) + \" \" + options\n print(run(cmd))", "def parse_inner_urls(self, response):\n s = Selector(response)\n\n jobs_per_site = s.xpath('//div[@class=\"col-lg-12 col-md-12 col-sm-12 aggelia-view-title\"]//a/@href').extract()\n print(jobs_per_site)\n\n for inner_site in jobs_per_site:\n url = urljoin(\"https://www.skywalker.gr/\", inner_site)\n yield scrapy.Request(url, callback=self.parse_items)", "def launch_job(self,\n job_id: Text,\n parent: Text,\n training_input: Dict[Text, Any],\n job_labels: Optional[Dict[Text, Text]] = None) -> None:\n pass", "def scrape_job_page(driver, job_title, job_location):\n \n current_date = str(datetime.datetime.now(pytz.timezone('US/Mountain')))\n json_dct = {'search_title': job_title, \\\n 'search_location': job_location, \\\n 'search_date': current_date, 'job_site': 'glassdoor'}\n\n jobs = driver.find_elements_by_class_name('jobListing')\n\n mongo_update_lst = [query_for_data(driver, json_dct, job, idx) for \n idx, job in enumerate(jobs[:-1])]\n\n store_in_mongo(mongo_update_lst, 'job_postings', 'glassdoor')", "def t_run_process(self, *args, **kwargs):\n\n str_cmd = \"\"\n d_request = {}\n d_meta = {}\n\n for k,v in kwargs.items():\n if k == 'request': d_request = v\n\n d_meta = d_request['meta']\n\n if d_meta:\n self.jid = d_meta['jid']\n self.auid = d_meta['auid']\n str_cmd = d_meta['cmd']\n\n if isinstance(self.jid, int):\n self.jid = str(self.jid)\n\n self.dp.qprint(\"spawing and starting poller thread\")\n\n # Start the 'poller' worker\n self.poller = Poller(cmd = str_cmd,\n debugToFile = self.b_debugToFile,\n debugFile = self.str_debugFile)\n self.poller.start()\n\n str_timeStamp = datetime.datetime.today().strftime('%Y%m%d%H%M%S.%f')\n str_uuid = uuid.uuid4()\n str_dir = '%s_%s' % (str_timeStamp, str_uuid)\n self.str_jobRootDir = str_dir\n\n b_jobsAllDone = False\n\n p = self._ptree\n\n p.cd('/')\n p.mkcd(str_dir)\n p.touch('d_meta', json.dumps(d_meta))\n p.touch('cmd', str_cmd)\n if len(self.auid):\n p.touch('auid', self.auid)\n if len(self.jid):\n p.touch('jid', self.jid)\n\n p.mkdir('start')\n p.mkdir('end')\n\n jobCount = 0\n p.touch('jobCount', jobCount)\n\n while not b_jobsAllDone:\n try:\n b_jobsAllDone = self.poller.queueAllDone.get_nowait()\n except queue.Empty:\n self.dp.qprint('Waiting on start job info')\n d_startInfo = self.poller.queueStart.get()\n str_startDir = '/%s/start/%d' % (self.str_jobRootDir, jobCount)\n p.mkdir(str_startDir)\n p.cd(str_startDir)\n p.touch('startInfo', d_startInfo.copy())\n p.touch('/%s/startInfo' % str_dir, d_startInfo.copy())\n\n self.dp.qprint('Waiting on end job info')\n d_endInfo = self.poller.queueEnd.get()\n str_endDir = '/%s/end/%d' % (self.str_jobRootDir, jobCount)\n p.mkdir(str_endDir)\n p.cd(str_endDir)\n p.touch('endInfo', d_endInfo.copy())\n p.touch('/%s/endInfo' % str_dir, d_endInfo.copy())\n\n p.touch('/%s/jobCount' % str_dir, jobCount)\n jobCount += 1\n self.dp.qprint('All jobs processed.')", "def start_requests(self):\n for url in self.start_urls:\n yield SplashRequest(\n url=url,\n callback=self.parse,\n method=\"GET\",\n endpoint=\"execute\",\n args={\"wait\": 15.0, \"lua_source\": self.lua_script},\n )", "def __init__(self, urls_file_, file_spider_='no', target_format_='', ignored_links_file_='',\n allow_clean_url_='no', time_out_=60, work_path_='./',\n max_recursion_depth_=0, one_bite_='no', white_list_path_=\"\"):\n self.__urls = Crawler.__read_file(urls_file_)\n self.__file_spider = file_spider_\n self.__target_format = target_format_\n self.__allow_clean_url = allow_clean_url_\n self.__one_bite = one_bite_\n self.__white_list_path = white_list_path_\n self.__white_list = []\n\n # loads white list in beginning in case an argument was passed for it\n if self.__file_spider == 'yes' and self.__white_list_path != '':\n self.__white_list = Crawler.__read_white_list(self.__white_list_path)\n\n # link titles that should be ignored during recursions\n self.__ignored_links = Crawler.__read_file(ignored_links_file_)\n\n self.__time_out = time_out_\n self.__work_path = os.path.join(work_path_.rstrip('/')+'/', 'DATA')\n self.__recursion_max_depth = max_recursion_depth_\n self.__extensions = ['txt', 'html', 'csv', 'tsv', 'tar', 'raw']\n\n logging.info('''Crawler Has been Initialized With The Below Configurations:\n-------------------------------------------------------------------\n-urls: %s\n-file_spider: %s\n-target_format: %s\n-ignored_links_file: %s\n-allow_clean_url: %s\n-time_out: %s\n-work_path: %s\n-max_recursion_depth: %s\n-one_bite: %s\n-white_list_path: %s\n''', self.__urls, self.__file_spider, self.__target_format, self.__ignored_links,\n self.__allow_clean_url, self.__time_out, self.__work_path,\n self.__recursion_max_depth, self.__one_bite, self.__white_list_path)", "def start_requests(self):\r\n yield Request(url=MooreSpider.start_url,\r\n callback=self.parse_directory_list,\r\n method=\"GET\")", "def start(self):\n\t\tself.app.printflush('Sitemap: ' + self.sitemap_url)\n\t\tself.getUrlsList()\n\t\tself.app.printflush('Fetched: ' + str(self.fetched_count))\n\t\tself.app.printflush('Processes: ' + str(self.processes))\n\t\tself.CheckURLs()\n\t\tself.printReport()", "def crawl_job_detail(positionId, positionName):\n request_url = 'https://m.lagou.com/jobs/' + str(positionId) + '.html'\n\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'Host': 'm.lagou.com',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/600.1.3 (KHTML, like Gecko) Version/8.0 Mobile/12A4345d Safari/600.1.4'\n }\n\n response = requests.get(request_url, headers=headers, timeout=10, cookies=init_cookies(), proxies=PROXIES)\n\n if response.status_code == 200:\n soup = BeautifulSoup(response.text, 'html5lib')\n items = soup.find('div', class_='items')\n jobnature = items.find('span', class_='item jobnature').span.text.strip()\n workyear = items.find('span', class_='item workyear').span.text.strip()\n education = items.find('span', class_='item education').span.text.strip()\n jd = soup.find_all('div', class_='content')[0].get_text().strip().replace('\\n', '').replace('&nbps;', '') # jd\n\n elif response.status_code == 403:\n print('request is forbidden by the server...')\n else:\n print(response.status_code)\n return [positionId, positionName, jobnature, workyear, education, jd]", "def parse_item(self, response):\n NewhouseSpider.crawled_urls.append(response.url)\n item = FocusedScrapyCrawlerItem()\n item['url'] = response.url\n item['link_text'] = response.meta.get('link_text', '') if response.meta else ''\n soup = BeautifulSoup(response.body, 'html.parser')\n\n item['body_p_tags'] = self._getBodyText(soup)\n item['head_title'] = self._getHeadTitle(soup)\n item['last_crawled'] = time.time()\n links = self._getLinks(response, soup)\n\n # get score of the page based upon classifier\n if self.classifier:\n score = self.classifier.score(item['link_text'], item['head_title'], item['body_p_tags'])\n else:\n score = 0.0\n\n item['score'] = score\n yield item\n if score <= 0:\n self.log(\"item={} does not belong to new home so stop crawling\".format(item),\n logging.INFO)\n else:\n for link in links:\n req = Request(link, priority=int(score * 1000000), # after the request is done, run parse_item to train the apprentice\n callback=self.parse_item)\n yield req", "def crawl_page(self, keyword, since=None, to=None, page=None):\n data = self.get_news(keyword, since, to, page)\n print(current_process())\n print('crawling page no.: ' + str(page))\n urls = self.get_urls(data)\n p = Process()\n p.start(urls)", "def submit(self, **kwargs):\n pwd = curdir\n wd = dirname(self.logFile)\n chdir(wd)\n d = OrderedDict()\n #d['universe'] = 'vanilla'\n #d['executable'] = self.command\n\td['job-name'] = self.name\n\td['nodes'] = 1\n\td['partition'] = defaults.get('queue')\n\td['time'] = defaults.get(\"cputime\")\n\td['mem'] = defaults.get(\"memory\")\n d['output'] = op_join(wd,\"output.log\")\n d['error'] = op_join(wd,\"output.err\")\n csi_file = open(\"submit.sh\", \"w\")\n\tcsi_file.write(\"#!/bin/bash\\n\")\n data = [\"#SBATCH --%s=%s\\n\" % (k, v) for k, v in d.iteritems()]\n csi_file.write(\"\".join(data))\n\tcsi_file.write(\"export DAMPE_WORKFLOW_SERVER_URL=%s\\n\"%DAMPE_WORKFLOW_URL)\n csi_file.write(\"bash script\\n\")\n csi_file.close()\n output = self.__run__(\"sbatch submit.sh\")\n chdir(pwd)\n return self.__regexId__(output)", "def run(): # pragma: no cover\n\n register_handler(job_configs.config)\n bot = Bot()\n bot.run()", "def start_requests(self):\n\n with open(os.path.join(os.path.dirname(__file__), \"../resources/mapemall_categories.csv\")) as categories:\n for category in csv.DictReader(categories):\n category_text=category[\"category\"]\n url=str(MapemallCrawlerSpider.start_urls[0])+category_text\n # The meta is used to send our search text into the parser as metadata\n yield scrapy.Request(url, callback = self.parse, meta = {\"category_text\": category_text})", "def __init__(self, *args, **kwargs):\n super(AlibabaCompanySpider, self).__init__(*args, **kwargs)", "def parse_items(self, response):\n items = JobcrawlerItem()\n\n current_date = datetime.now()\n current_date_str = current_date.strftime(\"%b %d %Y %H:%M:%S\")\n\n items[\"timestamp\"] = current_date_str\n items[\"site\"] = self.allowed_domains[0]\n items[\"full_html\"] = response.text\n items[\"job_post_url\"] = response.request.url\n items[\"full_text\"] = \" \".join(response.xpath('//div[@id=\"aggelia-text\"]//text()').re('(\\w+)'))\n\n extracted_title = response.xpath('//h3[@id=\"aggelia-title\"]/text()').extract()\n if extracted_title:\n items[\"job_title\"] = extracted_title[0]\n else:\n items[\"job_title\"] = \"\"\n\n job_requirements = response.xpath(self.requirements_xpath).extract()\n requirements_list = list(filter(lambda item: item.strip() != '', job_requirements))\n items[\"job_requirements\"] = \" \".join(requirements_list).replace('\\n', '')\n\n return items", "def continue_crawl(d, runner, begin_time, hour_limit):\n end_time = datetime.now()\n\n # Convert elapsed time to seconds\n elapsed_time = end_time - begin_time\n elapsed_time_seconds = elapsed_time.days * 86400\n elapsed_time_seconds += elapsed_time.seconds\n\n if elapsed_time_seconds < hour_limit:\n wait_time = hour_limit - elapsed_time_seconds\n # print(\"Waiting for {0} seconds\".format(wait_time))\n time.sleep(wait_time)\n\n d = runner.crawl(\"sb_spider\")\n\n # end_time is the new \"start\" time for this iteration of the spider.\n # Pass it in as the 3rd argument.\n d.addBoth(lambda _: continue_crawl(d, runner, end_time, hour_limit))", "def _initJobs(self):\n super(DigestManager, self)._initJobs()\n conf = self.config.container_manager\n\n job4 = LoopingCall(self.performRequestedScan)\n job4.start(float(conf.activescan_interval))\n self.jobs.append(job4)", "def parse(self, response: scrapy.http.Response):\n for job in self.parse_jobs(response):\n yield job\n\n if self.has_next_page(response):\n yield scrapy.Request(self.next_url,\n meta={'cookiejar': response.meta['cookiejar'],\n 'queries': response.meta['queries']},\n dont_filter=True)", "def runNewSearch(self):\n self.__searchJob = self.__startSearch()\n\n self.monitorSearchJob()", "def main():\n try:\n jobs = {}\n scraped_jobs = []\n\n scraped_jobs = read_from_text_file(\"\", scraped_jobs_filename)\n\n for URL in URLs:\n if \"www.linkedin.com\" in URL:\n (jobs, scraped_jobs) = linkedin_meta_search(URL, jobs, scraped_jobs)\n if \"www.python.org\" in URL:\n (jobs, scraped_jobs) = pythondotorg_meta_search(URL, jobs, scraped_jobs)\n if \"www.ziprecruiter.com\" in URL:\n (jobs, scraped_jobs) = ziprecruiter_meta_search(URL, jobs, scraped_jobs)\n\n write_to_text_file(\"\", \"scraped_jobs.txt\", scraped_jobs)\n\n for id in jobs.keys():\n score, link, title, company, date_posted, location, full_text = jobs[id]\n\n print(f\"\\nEvaluating {title} at {company} ({id}).\")\n\n try:\n if \"yesterday\" in date_posted:\n age_in_days = 1\n elif \"just now\" in date_posted:\n age_in_days = 0\n elif \"hours ago\" in date_posted or \"hour ago\" in date_posted:\n age_in_days = float(date_posted.split()[0]) / 24\n else:\n datetimePosted = parser.parse(date_posted)\n age = datetime.now().timestamp() - datetimePosted.timestamp()\n age_in_days = int(age / 24 / 60 / 60)\n except parser.ParserError as e:\n print(f\"{e}: {date_posted} couldn't be parsed properly from {link}\")\n age_in_days = 0\n age_reduction = int(age_in_days ** 2)\n score -= age_reduction\n if age_reduction > 0:\n print(\n f\"... docking {age_reduction} from the score ({score}) \\\n because the listing is {age_in_days} days old.\"\n )\n\n for search_phrase in search_phrases.keys():\n if search_phrase.lower() in title.lower():\n\n score_adjustment = 2 * search_phrases[search_phrase]\n score += score_adjustment\n if score_adjustment > 0:\n score_adjustment = \"+\" + str(score_adjustment)\n print(\n f\"... {score_adjustment} points ({score}) for \\\n {search_phrase} being in {title}.\"\n )\n if search_phrase.lower() in full_text.lower():\n score_adjustment = search_phrases[search_phrase]\n score += score_adjustment\n if score_adjustment > 0:\n score_adjustment = \"+\" + str(score_adjustment)\n print(\n f\"... {score_adjustment} points ({score}) for {search_phrase} \\\n being in the text of the listing.\"\n )\n jobs[id][0] = score\n\n print(\"\\n\")\n\n send_email(jobs)\n\n except Exception as e:\n send_email(f\"Not working: {e}\") # \".message}, {e.args}\")", "def parse_job(content: scrapy.selector.Selector, job: Job):\n\n job['title'] = content.xpath('.//div[@class=\"hidden-xs likeH2\"]/text()').extract_first().strip()\n job['organization'] = content.xpath('.//div[@class=\"hidden-xs likeH3\"]/text()').extract_first().strip()\n column1_elements = content.xpath('.//div[@class=\"jobHitColumn1\"]/p')\n job['regions'] = column1_elements[0].xpath('text()').extract_first().strip()\n job['area'] = ' '.join(column1_elements[1].xpath('text()').extract_first().strip().split())\n job['url'] = 'https://www.mta-dialog.de/' + \\\n content.xpath('.//a[@class=\"detailLinkParent\"]/@href').extract_first().strip()\n return MTADialogSpider.parse_job_details(job)", "def _launch_job(self, job):\n details = self.sm.get_job_details(job.jobId)\n handler = self.handlers[details[0]['method']]\n type = details[0]['type']\n resultId = details[0]['resultid']\n job.set_phase('EXECUTING')\n job.set_start_time(datetime.utcnow().isoformat())\n job.add_result(resultId, 'http://localhost:8000/%s/%s/results/details' % (type, job.jobId))\n self.sm.update_job(job = job)\n self.threads.append(Future(handler, job.jobId, job))", "def execute(self, job):\n raise NotImplementedError", "def execute(self):\r\n\r\n if username and password:\r\n job_name = input('Please enter the name of the job that you would like to apply for: ')\r\n job_location = input('Please enter where you would like to work: ')\r\n self.login_to_linkedin()\r\n self.driver.maximize_window()\r\n time.sleep(5)\r\n self.job_filter(job_name, job_location)\r\n time.sleep(5)\r\n self.get_job_listings()\r\n else:\r\n print('Please provide a username and password')", "def initialize_process():\n\n settings = Settings({'BOT_NAME': 'warnnoticebot',\n 'LOG_LEVEL': 'INFO',\n 'ITEM_PIPELINES': {'modules.pipelines.PerStateJsonlinesExportPipeline': 300},\n 'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36', # This is my actual user agent when using a browser\n 'COOKIES_ENABLED': False,\n 'ROBOTSTXT_OBEY': True,\n 'DOWNLOAD_DELAY': 5.0,\n 'DEFAULT_REQUEST_HEADERS': {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en',\n 'Upgrade-Insecure-Requests': 1}\n })\n \n process = CrawlerProcess(settings) \n\n return process", "def get_worker(self):\n worker = self.worker(job_id=self.job_id, spider=self.spider,\n http_session={'url': self.url, 'timeout': self.timeout},\n **self.kwargs)\n return worker", "def __init__(self, board='Gossiping', pages=1, file='tmp.json', title_lim=[], jsonf=None, copy_data=[], simple_mode=True):\n if copy_data:\n self.extend(copy_data)\n return\n os.chdir(os.path.split(os.path.realpath(__file__))[0])\n print(os.getcwd())\n com = 'scrapy crawl ptt ' if not simple_mode else 'scrapy crawl ptt_url '\n # output json file name\n com += '-o %s ' % (file)\n # page\n com += '-a pages=%d ' % (pages)\n # board\n com += '-a board=%s ' % (board)\n\n # title limit\n if title_lim:\n com += '-a title_lim=\"'\n for lim in title_lim:\n com += \"%s,\" % (str(lim))\n com += '\" '\n # not opened by json_file\n if not jsonf:\n # start crawl\n print('Command: ' + com)\n os.system('rm -f {}'.format(file))\n os.system('{}'.format(com))\n # opened by json file\n else:\n file = jsonf\n\n # all data save in self\n self.load_json(file)\n self.com = com\n self.file = file", "def crawl_start(crawl_obj):\n \n res = None\n\n if crawl_obj.type in ['user', 'song'] :\n res = eval('crawl_' + crawl_obj.type)(crawl_obj)\n elif crawl_obj.type in ['artist', 'album'] :\n web_data = requests.get(crawl_obj.url, headers = cheat_headers)\n soup = bs4.BeautifulSoup(web_data.text, 'lxml')\n\n res = eval('crawl_' + crawl_obj.type)(crawl_obj.type, soup)\n else:\n print(\"Object type UNKNOWN!\")\n \n return res", "def from_crawler(cls, crawler, *args, **kwargs):\n\n spider = super(telecom_vivo_movel_spider, cls).from_crawler(\n crawler, *args, **kwargs)\n crawler.signals.connect(spider.get_final_result, signals.spider_idle)\n return spider", "def scrape_main() -> None:\n\n logger.info(\"Starting scrape\")\n search_info = construct_scrape_regex_patterns(grab_scrape_info())\n links = run_scrape(\n url=search_info['url'],\n seasons_regex=search_info['seasons'],\n episodes_regex=search_info['episodes']\n )\n if links:\n logger.debug(\"Writing urls to file\")\n with open('urls.txt', 'w') as f:\n for link in links:\n f.write(link + '\\n')\n else:\n logger.warning(\"No links available\")", "def run(self):\n self.submit()\n self.start()", "def setup_crawler(self, crawlers: List[BaseCrawler]) -> None:\n self.tasks.extend(crawlers)", "def run(self):\n\n # The url is too deep, skip the url.. Work is done!\n if self.depth_ > self.depth:\n return\n\n # Get doc id corresponds to the url. Add a new entry into doc index if there is no entry.\n doc_id = self.crawler.document_id(self.curr_url)\n\n # Check if the doc_id has been visited/processed by any of crawler_threads. Add doc_id to seen if not so.\n if self.crawler.checkDocVisitedAndUpdate(doc_id):\n return\n\n # Process the document corresponds to the url\n socket = None\n try:\n socket = urllib2.urlopen(self.curr_url, timeout=self.timeout)\n soup = BeautifulSoup(socket.read())\n self._curr_depth = self.depth_ + 1\n self._curr_doc_id = doc_id\n # Traverse the document as deep as possible and add those newly discovered urls into url queue\n self._index_document(soup)\n # Store (wordId, docId) and (word, url) into inverted_index and resolved_inverted_index respectively.\n self.crawler._add_words_to_document(self._curr_words, self._curr_doc_id)\n except:\n pass\n finally:\n if socket:\n socket.close()", "def jobs(self):\n raise NotImplementedError()", "def parse_jobs(self, response: scrapy.http.Response):\n hits = response.xpath('//div[@class=\"jobHit\"]')\n for hit in hits:\n job = self.default_job()\n job['queries'] = response.meta['queries']\n for i in MTADialogSpider.parse_job(hit, job):\n yield i", "def get_job_soup(what = WHAT, where = WHERE, start = 0):\n\tjob_page = rqs.get(get_job_url(what, where, start))\n\treturn BeautifulSoup(job_page.text, 'html.parser')", "def crawl(self, query, start_date, end_date, *args, **kwargs):\n pass", "def _initJobs(self):\n pass", "def startScrape(latest=False):\n print '*** STARTED SCRAPING: DEFAULT: ***' \n cache.set('default_scrape','true')\n cache.set('scrape_mode','default')\n \n for key in ['scrape_friends','scrape_followers','scrape_tweets']:\n cache.set(key,'')\n \n doDefaultScrape.delay(latest=latest)", "def from_crawler(cls, crawler, *args, **kwargs):\n\n spider = super(autos_detran_terceiros_sp_spider, cls).from_crawler(\n crawler, *args, **kwargs)\n crawler.signals.connect(spider.get_final_result, signals.spider_idle)\n return spider" ]
[ "0.68515605", "0.68069863", "0.6725972", "0.65931946", "0.6564873", "0.6453477", "0.6431938", "0.6309634", "0.62984973", "0.62603843", "0.62601477", "0.62413216", "0.61893225", "0.6176784", "0.6167868", "0.6138753", "0.61035246", "0.6056674", "0.60286516", "0.6023728", "0.6009284", "0.600814", "0.6007173", "0.5971846", "0.5928023", "0.58617646", "0.5830549", "0.5825821", "0.5824449", "0.5806738", "0.5753789", "0.5743707", "0.57295007", "0.5709358", "0.5700973", "0.5678749", "0.56782776", "0.5673895", "0.56604576", "0.56124383", "0.5605982", "0.56001574", "0.55969274", "0.5587366", "0.5568115", "0.55632335", "0.55599356", "0.5553495", "0.55489844", "0.55464584", "0.55376637", "0.55355144", "0.55269945", "0.55084133", "0.54970974", "0.54842186", "0.5477556", "0.54768616", "0.546674", "0.54664046", "0.54637283", "0.54476255", "0.54456925", "0.5433455", "0.54218805", "0.5419778", "0.5414361", "0.54134494", "0.5405959", "0.54038984", "0.5389293", "0.53880864", "0.53877354", "0.53747374", "0.53727424", "0.5358302", "0.535826", "0.5346432", "0.53455037", "0.5338231", "0.53070855", "0.5305761", "0.5305291", "0.5303153", "0.5302508", "0.5295953", "0.5294991", "0.52945274", "0.52826613", "0.52767074", "0.52729386", "0.52504766", "0.52498454", "0.5247935", "0.52472043", "0.52397066", "0.5239149", "0.52236176", "0.5217961", "0.5217588" ]
0.58343834
26
Return a parsed BIOM table.
def load_biom_table(table_f): return parse_biom_table(table_f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_table(text):\n\n text = str(text)\n try:\n text = text.split(\"<pre>\")[1]\n text = text.split(\"</pre>\")[0]\n text = text.split(\"To save this output\")[0]\n lines = text.split(\"\\n\")\n except Exception as exc:\n raise NNDCRequestError(f\"Unable to parse text:\\n{exc}\\n{text}\")\n table = {}\n headers = None\n for line in lines:\n tokens = line.split(\"\\t\")\n tokens = [t.strip() for t in tokens]\n if len(tokens) <= 1:\n continue\n if headers is None:\n headers = tokens\n headers = _parse_headers(headers)\n for header in headers:\n table[header] = []\n else:\n if len(tokens) != len(headers):\n raise NNDCRequestError(\n \"Too few data in table row\\n\"\n + f' Headers: \"{headers}\"\\n'\n + f' Row: \"{tokens}\"'\n )\n for header, token in zip(headers, tokens):\n table[header].append(token)\n return table", "def load_biom_table_with_file_contents(biom_f):\n table = parse_biom_table(biom_f)\n if hasattr(biom_f, 'seek'):\n biom_f.seek(0)\n return table, biom_f", "def getBiomData(self, data):\r\n try:\r\n if isfile(data):\r\n otu_table = parse_biom_table(qiime_open(data, 'U'))\r\n return otu_table\r\n except TypeError:\r\n if any([type(data) in\r\n [DenseFunctionTable,\r\n DenseGeneTable,\r\n DenseMetaboliteTable,\r\n DenseOTUTable,\r\n DenseOrthologTable,\r\n DensePathwayTable,\r\n DenseTable,\r\n DenseTaxonTable,\r\n FunctionTable,\r\n GeneTable,\r\n MetaboliteTable,\r\n OTUTable,\r\n OrthologTable,\r\n PathwayTable,\r\n SparseFunctionTable,\r\n SparseGeneTable,\r\n SparseMetaboliteTable,\r\n SparseOTUTable,\r\n SparseOrthologTable,\r\n SparsePathwayTable,\r\n SparseTable,\r\n SparseTaxonTable]]):\r\n otu_table = data\r\n return otu_table\r\n else:\r\n raise TypeError('Data is neither a path to a biom table or a' +\r\n ' biom table object.')", "def parse_table(table):\n rows = table.find_all('tr')\n if not rows:\n raise ValueError(\"No rows for table\")\n pages = []\n table_tag = \"<table>\"\n tbl_headers = get_tbl_headers(rows)\n table_tag += \"<tr>\"\n for header in tbl_headers.keys():\n table_tag += conf.ADD_TH_TAG(header)\n table_tag += \"</tr>\"\n for row in rows:\n cols = row.find_all('td')\n if not cols:\n continue\n for page_name in cols[0].find_all('a'):\n if not page_name:\n continue\n pages.append(page_name.text)\n table_tag += '<tr>'\n for header, col in tbl_headers.items():\n try:\n table_tag += f\"<td>{preprocess_data(f'{header} : {cols[col].text}')} \\t</td>\"\n except IndexError:\n pass\n table_tag += '</tr>'\n table_tag += '</table>'\n if conf.DOWNLOAD_IMAGES:\n download_images(pages)\n return table_tag", "def get_mist_eep_table():\n fp = Path(DATA_PATH, \"mist_eep_table.csv\")\n return pd.read_csv(fp, comment=\"#\")", "def wmo_code_table():\n import pandas as pd\n from ..fun import get_data\n\n return pd.read_csv(get_data('Common_C02_20181107_en.txt'))", "def parse(self):\r\n hdr = {'User-Agent': 'Mozilla/5.0'}\r\n url = CostOfLiving.URL.format(self.city)\r\n req = Request(url, headers=hdr)\r\n page = urlopen(req)\r\n soup = BeautifulSoup(page, \"html.parser\")\r\n self.table = soup.find(\"table\", attrs={\"class\": \"data_wide_table\"})", "def parse_sp500_wiki_page(html_page):\n soup = BeautifulSoup(html_page, 'html.parser')\n table = soup.find(\"table\", {\"id\": \"constituents\"})\n \n data = []\n column_names = [col_name.text.strip() for col_name in table.find_all('th')]\n for row in table.find_all('tr'):\n data_row = [col_name.text.strip() for col_name in row.find_all('td')]\n if data_row:\n data.append(data_row)\n return data, column_names", "def get_main(self) -> 'table[category: str][label: str][date: date]':\n\n raw_table = self.get_raw_table(\"M\")\n categories = raw_table[0]\n labels = raw_table[1]\n dates = self.get_dates(raw_table)\n\n def next_cat_col(i):\n n = 1\n while True:\n if i+n > len(categories)-1:\n return i\n if categories[i+n]:\n return i+n\n n += 1\n\n def get_category_labels(i):\n end_col = next_cat_col(i)\n return zip(range(i, end_col), labels[i:end_col])\n\n def get_label_cells(category, label):\n ci = categories.index(category)\n i = labels.index(label, ci)\n cells = {}\n for j, d in enumerate(dates):\n cell = raw_table[j+2][i]\n if cell and cell != \"#VALUE!\":\n cells[d] = cell\n return cells\n\n table = {}\n for i, cat in enumerate(categories):\n if not cat:\n continue\n table[cat] = {}\n for i, label in get_category_labels(i):\n table[cat][label] = get_label_cells(cat, label)\n\n return table", "def matrix2Table(self, matrix): \n M = TableModel()\n M.addColumn('Mutations')\n\n fields = matrix.columnHeaders()\n for f in fields:\n M.addColumn(f)\n i = matrix.indexOfColumnWithHeader('Mutations')\n for row in matrix:\n mutationSet = Core.Data.MutationSet(row[i])\n code = '+'.join(mutationSet.mutationCodes(reduced=True))\n M.addRow(code)\n for f in fields:\n j = matrix.indexOfColumnWithHeader(f)\n if f == 'Mutations':\n M.data[code]['Mutations'] = code\n else: \n M.data[code][f] = str(row[j])\n return M", "def format_biom_table(biom_table):\r\n generated_by_str = \"QIIME \" + get_qiime_library_version()\r\n return biom_table.getBiomFormatJsonString(generated_by_str)", "def get_table_from_file():\n with open(\"story.csv\", \"r\") as file:\n lines = file.readlines()\n table = [element.replace(\"\\n\", \"\").split(\";\") for element in lines]\n return table", "def test_parse_classic_otu_table_consensus_lineage(self):\r\n data = \"\"\"#Full OTU Counts\r\n#OTU ID\tFing\tKey\tNA\tconsensusLineage\r\n0\t19111\t44536\t42\tBacteria; Actinobacteria; Actinobacteridae; Propionibacterineae; Propionibacterium\r\n1\t1216\t3500\t6\tBacteria; Firmicutes; Alicyclobacillaceae; Bacilli; Lactobacillales; Lactobacillales; Streptococcaceae; Streptococcus\r\n2\t1803\t1184\t2\tBacteria; Actinobacteria; Actinobacteridae; Gordoniaceae; Corynebacteriaceae\r\n3\t1722\t4903\t17\tBacteria; Firmicutes; Alicyclobacillaceae; Bacilli; Staphylococcaceae\r\n4\t589\t2074\t34\tBacteria; Cyanobacteria; Chloroplasts; vectors\"\"\"\r\n data_f = StringIO(data)\r\n obs = parse_classic_otu_table(data_f)\r\n exp = (['Fing', 'Key', 'NA'],\r\n ['0', '1', '2', '3', '4'],\r\n array([[19111, 44536, 42], [1216, 3500, 6], [1803, 1184, 2],\r\n [1722, 4903, 17], [589, 2074, 34]]),\r\n [['Bacteria', 'Actinobacteria', 'Actinobacteridae', 'Propionibacterineae', 'Propionibacterium'],\r\n ['Bacteria',\r\n 'Firmicutes',\r\n 'Alicyclobacillaceae',\r\n 'Bacilli',\r\n 'Lactobacillales',\r\n 'Lactobacillales',\r\n 'Streptococcaceae',\r\n 'Streptococcus'],\r\n ['Bacteria',\r\n 'Actinobacteria',\r\n 'Actinobacteridae',\r\n 'Gordoniaceae',\r\n 'Corynebacteriaceae'],\r\n ['Bacteria',\r\n 'Firmicutes',\r\n 'Alicyclobacillaceae',\r\n 'Bacilli',\r\n 'Staphylococcaceae'],\r\n ['Bacteria', 'Cyanobacteria', 'Chloroplasts', 'vectors']])\r\n self.assertEqual(obs[0], exp[0])\r\n self.assertEqual(obs[1], exp[1])\r\n assert_almost_equal(obs[2], exp[2])\r\n self.assertEqual(obs[3], exp[3])", "def parse_table(soup, start_gen, end_gen):\n pokes = []\n for cell in soup.find_all(\"td\", attrs={'style': None}):\n for name in cell.find_all(\"a\"):\n pokes.append(name.string)\n\n start_index = pokes.index(GEN_STARTS_WITH[start_gen])\n end_index = pokes.index(GEN_ENDS_WITH[end_gen]) + 1\n\n # Doesn't have to be ordered, just personal preference.\n unique_list = OrderedSet(pokes[start_index:end_index])\n\n if start_gen != end_gen:\n print(f\"{len(unique_list)} Pokémon from gen {start_gen} to {end_gen} were fetched.\")\n else:\n print(f\"{len(unique_list)} Pokémon from gen {start_gen} were fetched.\")\n\n pkmn_string = ', '.join(unique_list)\n\n for key, value in NIDORAN_CASE.items():\n # Handling of Nidoran male/female symbols.\n pkmn_string = pkmn_string.replace(key, value)\n\n return pkmn_string", "def get_biGramTable(self):\n\t\treturn self._state.biGramTable", "def table_from_pmids(pmid):\n soup = try_or(lambda: get_soup(pmid)) \n jsoup = get_soup(pmid,retmode=\"json\")\n #print(pmid)\n cit_count, scopus_json = get_cit_by_pmid(pmid)\n print(f\"scopus_json: {scopus_json}\")\n result = {\n \"pmid\": pmid,\n \"publ_date\": try_or(lambda: get_pub_date(soup)),\n \"pi_name\": try_or(lambda: get_PI(soup)),\n \"full_affil\": try_or(lambda: get_aff_countries(pmid,PI=False)), # !!! change the function to retrieve PI and othe affiliations simultaneously\n \"pi_affil\": try_or(lambda: get_aff_countries(pmid,PI=True)), # !!! change the function to retrieve PI and othe affiliations simultaneously\n \"pi_publ_count\": try_or(lambda: count_results(f\"{get_PI(soup)}[Author - Last]\")),\n \"journal\": try_or(lambda: get_journal(soup)),\n \"journal_publ_count\": try_or(lambda: count_results(f\"{get_journal(soup)}[Journal]\")),\n \"authors_count\": try_or(lambda: get_n_authors(soup)),\n \"cit_count\": cit_count,\n \"summary\": try_or(lambda: json.loads(jsoup.text)),\n \"scopus_json\": scopus_json,\n }\n\n print(pmid)\n return(result)", "def extract_main_table_from_html(html):\n soup = bs(html, 'html.parser')\n table = soup.find('table')\n return(table)", "def _parse_table(value):\n lines = value.split('\\n')\n header = None\n rows = []\n\n for l in lines:\n if l.startswith('+-'):\n pass\n elif l.startswith('|'):\n columns = [c.strip() for c in l.split('|')[1:-1]]\n if header is None:\n header = columns\n else:\n row = {}\n for i, c in enumerate(columns):\n if len(header)-1 <= i:\n row[i] = c\n else:\n row[header[i]] = c\n rows.append(row)\n return rows", "def return_astropy_table(self):\n table = Table()\n for name in self.hdfile.root.Data.Fibers.colnames:\n if hasattr(self, name):\n table[name] = getattr(self, name)\n\n return table", "def return_astropy_table(self):\n table = Table()\n for name in self.hdfile.root.Data.Fibers.colnames:\n if hasattr(self, name):\n table[name] = getattr(self, name)\n\n return table", "def table(self):\n return self.generator.table", "def build_table(self):\n if len(self._abslines) == 0:\n return\n comp_tbl = QTable()\n comp_tbl.add_column(Column([iline.wrest.to(u.AA).value for iline in self._abslines]*u.AA,name='wrest'))\n for attrib in ['z', 'flagN', 'N', 'Nsig']:\n comp_tbl.add_column(Column([iline.attrib[attrib] for iline in self._abslines], name=attrib))\n # Return\n return comp_tbl", "def parse_html_tables(html_filename):\n\n with open(html_filename) as f:\n p = SimpleHTMLTableParser()\n p.feed(f.read())\n return p.tables", "def parse_biom(filename, keep_otus=True, internal_levels=False):\n from biom import load_table # avoid to ask for the BIOM library if there is no biom file\n\n biom_table = load_table(filename)\n strs = biom_table.delimited_self(header_value='TAXA', header_key='taxonomy')\n lst1 = [str(s) for s in strs.split('\\n')[1:]] # skip the \"# Constructed from biom file\" entry\n biom_file = []\n out = [lst1[0]] # save the header\n pre_taxa = compile(\".__\")\n classs = compile(\"\\(class\\)\")\n\n # consistency check\n i = 0\n while i < (len(lst1)-1):\n if len([s for s in lst1[i].split('\\t')]) != len([s for s in lst1[i+1].split('\\t')]):\n raise Exception('[parse_biom()] It seems that taxonomic metadata are missing, maybe is the wrong biom file?')\n\n i += 1\n\n for l in lst1[1:]:\n otu = None\n lst = [float(s.strip()) for s in l.split('\\t')[1:-1]]\n\n if keep_otus:\n otu = l.split('\\t')[0]\n\n # Clean and move taxa in first place\n taxa = '.'.join([s.strip().replace('[', '').replace('u\\'', '').replace(']', '').replace(' ', '').replace('\\'', '')\n for s in l.split('\\t')[-1].split(',')])\n taxa = pre_taxa.sub('', taxa) # remove '{k|p|o|g|s}__'\n taxa = classs.sub('', taxa) # remove '(class)'\n taxa = taxa.rstrip('.') # remove trailing dots\n\n if otu:\n taxa = taxa + '.' + otu\n\n biom_file.append([taxa] + lst)\n\n # merge such rows that have the same taxa\n i = 1\n dic = {}\n\n for l in biom_file[i:]:\n if l[0] not in dic:\n dic[l[0]] = l[1:]\n\n for k in biom_file[i+1:]:\n if l[0] == k[0]:\n lst = []\n lstdic = dic[l[0]]\n j = 1\n while j < len(lstdic):\n lst.append(float(lstdic[j]) + float(k[j]))\n j += 1\n\n dic[l[0]] = lst\n i += 1\n\n feats = dict(dic)\n\n if internal_levels:\n feats = add_missing_levels(feats)\n\n for k in feats:\n out.append('\\t'.join([str(s) for s in [k] + feats[k]]))\n\n return '\\n'.join(out)", "def convert_quick_table(result):\n headline = result.split('\\n',1)[0]\n names, converters = MastCasJobs.get_converters(headline, delimiter=',')\n tab = ascii.read(MastCasJobs.replacenull(result,delimiter=','),\n guess=False,fast_reader=False,format='csv',\n names=names,converters=converters)\n return tab", "def read_table(lines):\n # header line 1: (100*Z+A), mass in [m_neutron]\n # [MAT, 3, MT/ ZA, AWR, 0, 0, 0, 0] HEAD\n\n # header line 2: Q-value and some counts\n # [MAT, 3, MT/ QM, QI, 0, LR, NR, NP/ EINT/ S(E)] TAB1\n f = read_line(lines[1])\n nS = int(f[4]) # number of interpolation sections\n nP = int(f[5]) # number of data points\n\n # header line 3: interpolation information\n # [MAT, 3, 0/ 0.0, 0.0, 0, 0, 0, 0] SEND\n # 1 y is constant in x (constant, histogram)\n # 2 y is linear in x (linear-linear)\n # 3 y is linear in ln(x) (linear-log)\n # 4 ln(y) is linear in x (log-linear)\n # 5 ln(y) is linear in ln(x) (log-log)\n # 6 y obeys a Gamow charged-particle penetrability law\n\n # data lines\n x = []\n y = []\n for l in lines[3:]:\n f = read_line(l)\n x.append(f[0])\n y.append(f[1])\n x.append(f[2])\n y.append(f[3])\n x.append(f[4])\n y.append(f[5])\n return np.array(x[:nP]), np.array(y[:nP])", "def parse(self):\n if self.filename.endswith('.gz'):\n compression = 'gzip'\n elif self.filename.endswith('.bz2'):\n compression = 'bz2'\n else:\n compression = None\n df = pd.read_table(self.filename, compression=compression)\n\n # drop empty column from extra tab\n df.dropna(axis=1, how='all', inplace=True)\n return df", "def parse_bid_table(table):\r\n columns = table.find_all('td')\r\n player_id = int(re.findall('\\d+', columns[0].a['href'])[0])\r\n player = columns[0].text\r\n owner = columns[1].text\r\n team_id = int(re.findall('\\d+', columns[2].img['src'])[0])\r\n team = table.img['alt']\r\n price = int(columns[3].text.replace(\".\", \"\"))\r\n bid_date = columns[4].text\r\n trans_date = columns[5].text\r\n status = columns[6].text\r\n return player_id, player, owner, team_id, team, price, bid_date, trans_date, status", "def parse_emission_matrix(emissions_matrix_path):\r\n f = open(emissions_matrix_path)\r\n f.readline() # remove first line\r\n lines = f.readlines()\r\n k_counter = len(lines)\r\n emissions_mat = np.zeros([k_counter + NOT_MOTIF_STATES, ALPHABET_LEN])\r\n # B start\r\n emissions_mat[0, 0] = 1\r\n # B end\r\n emissions_mat[-1, -1] = 1\r\n # B_1\r\n emissions_mat[1, 1:-1] = UNIFORM_PROB\r\n # B_2\r\n emissions_mat[-2, 1:-1] = UNIFORM_PROB\r\n for k, line in enumerate(lines, 2): # go over every line\r\n emissions = line.split('\t')\r\n for letter in range(len(alphabet)): # create emissions for every S_i\r\n emissions_mat[k, letter + 1] = float(emissions[letter])\r\n return wrap_log(emissions_mat), k_counter", "def tables(cls):\n if not hasattr(cls, '_tables'):\n cls.parse_attributes()\n return cls._tables", "def get_table(new_arr, types, titles):\n try:\n table = agate.Table(new_arr, titles, types)\n return table\n except Exception as e:\n print e", "def read_table_data(self, table):\n data = []\n index = 0\n for row in table.rows:\n data.append([])\n for cell in row.cells:\n text_data = ''\n for para in cell.paragraphs:\n text_data += para.text.strip(' ')\n data[index].append(text_data)\n index += 1\n\n # trim unneeded rows in old & new reports\n if all('CAPA' in x for x in data[0]):\n self.table_data = data[2:]\n else:\n self.table_data = data[1:]\n # trim end of list\n self.table_data = [row[:5] for row in self.table_data]", "def read_bc_table(fname=datapath+\"/bolometric_corrections/bc_p04_ugriz.data\"):\n with open(fname,'r') as fp:\n lines = fp.readlines()\n s = lines[1].split()\n NTeff, Nlogg, NMH, Nfilt = int(s[0]), int(s[2]), int(s[5]), int(s[7])\n allBCs = {}\n\n Teffs = list(map(float, \"\".join(lines[2:5]).replace(\"\\n\",\" \").split()))\n loggs = list(map(float, lines[5].split()))\n Nlist = list(map(int, lines[6].split()))\n iline = 7\n allBCs = {}\n for ifilt in range(Nfilt):\n BCtable = np.zeros((np.sum(Nlist)*NMH,4))\n itable = 0\n for iMH in range(NMH):\n s = lines[iline].split()\n FeH = float(s[2]); aFe = float(s[5]); filter = s[9]\n iline += 1\n for ilogg,logg in enumerate(loggs):\n BCrow = []\n while len(BCrow) < Nlist[ilogg]:\n line = lines[iline]\n iline += 1\n BCrow += list(map(float, line.split()))\n for iTeff,Teff in enumerate(Teffs[0:Nlist[ilogg]]):\n BCtable[itable,0] = Teff\n BCtable[itable,1] = logg\n BCtable[itable,2] = FeH\n BCtable[itable,3] = BCrow[iTeff]\n itable += 1\n allBCs[filter] = BCtable\n return allBCs", "def parse_table(s, allow_wrap=False):\n r = []\n columns = []\n for l in s.splitlines():\n if not l.strip():\n columns = []\n continue\n if rx_header_start.match(l):\n # Column delimiters found. try to determine column's width\n columns = []\n x = 0\n while l:\n match = rx_col.match(l)\n if not match:\n break\n columns.append((x + len(match.group(1)),\n x + len(match.group(1)) + len(\n match.group(2))))\n x += match.end()\n l = l[match.end():]\n elif columns: # Fetch cells\n if allow_wrap:\n row = [l[f:t] for f, t in columns]\n if row[0].startswith(\" \") and r:\n for i, x in enumerate(row):\n r[-1][i] += x\n else:\n r += [row]\n else:\n r += [[l[f:t].strip() for f, t in columns]]\n if allow_wrap:\n return [[x.strip() for x in row] for row in r]\n else:\n return r", "def test_format_biom_table(self):\r\n generated_by = \"QIIME \" + get_qiime_library_version()\r\n self.assertTrue(generated_by in format_biom_table(self.biom1))", "def testLR0ParseTable(self):\r\n from pydsl.Parser.LR0 import _slr_build_parser_table, build_states_sets\r\n state_sets = build_states_sets(productionset0)\r\n self.assertEqual(len(state_sets), 5)\r\n #0 . EI: : . exp $ , \r\n # exp : .SR\r\n # transitions: S -> 2,\r\n # goto: exp -> 1\r\n #1 EI: exp . $ ,\r\n # transitions: $ -> 3\r\n #2 exp: S . R,\r\n # transitions: R -> 4\r\n #3 EI: exp $ .\r\n #4 exp: S R .\r\n # reduce\r\n\r\n parsetable = _slr_build_parser_table(productionset0)\r\n self.assertEqual(len(parsetable), 4)", "def test_parse_classic_otu_table_file(self):\r\n data = \"\"\"#Full OTU Counts\r\n#OTU ID\tFing\tKey\tNA\tConsensus Lineage\r\n0\t19111\t44536\t42\tBacteria; Actinobacteria; Actinobacteridae; Propionibacterineae; Propionibacterium\r\n1\t1216\t3500\t6\tBacteria; Firmicutes; Alicyclobacillaceae; Bacilli; Lactobacillales; Lactobacillales; Streptococcaceae; Streptococcus\r\n2\t1803\t1184\t2\tBacteria; Actinobacteria; Actinobacteridae; Gordoniaceae; Corynebacteriaceae\r\n3\t1722\t4903\t17\tBacteria; Firmicutes; Alicyclobacillaceae; Bacilli; Staphylococcaceae\r\n4\t589\t2074\t34\tBacteria; Cyanobacteria; Chloroplasts; vectors\"\"\"\r\n data_f = StringIO(data)\r\n obs = parse_classic_otu_table(data_f)\r\n exp = (['Fing', 'Key', 'NA'],\r\n ['0', '1', '2', '3', '4'],\r\n array([[19111, 44536, 42], [1216, 3500, 6], [1803, 1184, 2],\r\n [1722, 4903, 17], [589, 2074, 34]]),\r\n [['Bacteria', 'Actinobacteria', 'Actinobacteridae', 'Propionibacterineae', 'Propionibacterium'],\r\n ['Bacteria',\r\n 'Firmicutes',\r\n 'Alicyclobacillaceae',\r\n 'Bacilli',\r\n 'Lactobacillales',\r\n 'Lactobacillales',\r\n 'Streptococcaceae',\r\n 'Streptococcus'],\r\n ['Bacteria',\r\n 'Actinobacteria',\r\n 'Actinobacteridae',\r\n 'Gordoniaceae',\r\n 'Corynebacteriaceae'],\r\n ['Bacteria',\r\n 'Firmicutes',\r\n 'Alicyclobacillaceae',\r\n 'Bacilli',\r\n 'Staphylococcaceae'],\r\n ['Bacteria', 'Cyanobacteria', 'Chloroplasts', 'vectors']])\r\n self.assertEqual(obs[0], exp[0])\r\n self.assertEqual(obs[1], exp[1])\r\n assert_almost_equal(obs[2], exp[2])\r\n self.assertEqual(obs[3], exp[3])", "def parse(self):\n mp = {}\n cells = self.row.find_all(\"td\")\n\n for cell in cells:\n if \"visible-mobile\" in cell.attrs[\"class\"]:\n continue\n title = self._get_cell_title(cell)\n content = cell.find(\"span\", class_=\"table-responsive__inner\")\n\n if title == \"name\":\n mp.update(self._parse_name(cell, content))\n elif title == \"fraktion\":\n fraktion, klub = self._parse_abbreviation(content)\n mp[\"political_affiliation\"] = klub + \" (\" + fraktion + \")\"\n elif title == \"wahlkreis\":\n mp[\"wahlkreis\"] = content.text.strip()\n elif title == \"bundesland\":\n mp[\"state\"] = self._parse_abbreviation(content)[1]\n\n return mp", "def construct_table(self):\n table_str = self.header_row\n row_lbls, col_lbls = self.get_idxvals()\n for r,rlbl in enumerate(row_lbls):\n row_data = [self.data[rlbl,clbl] for clbl in col_lbls]\n table_str += self.construct_row(r, row_data)\n \n return table_str", "def mono_table():\n return Table(\n {\n \"obs_id\": [1, 1, 1, 1, 1, 2],\n \"event_id\": [1, 1, 1, 2, 2, 1],\n \"tel_id\": [1, 2, 3, 5, 7, 1],\n \"hillas_intensity\": [1, 2, 0, 1, 5, 9],\n \"hillas_width\": [0.1, 0.2, 0.1, 0.1, 0.2, 0.1] * u.deg,\n \"hillas_length\": 3 * ([0.1, 0.2, 0.1, 0.1, 0.2, 0.1] * u.deg),\n \"dummy_tel_energy\": [1, 10, 4, 0.5, 0.7, 1] * u.TeV,\n \"dummy_tel_is_valid\": [\n True,\n True,\n True,\n True,\n False,\n False,\n ],\n \"classifier_tel_prediction\": [1, 0, 0.5, 0, 0.6, 1],\n \"classifier_tel_is_valid\": [\n True,\n True,\n False,\n True,\n True,\n True,\n ],\n \"disp_tel_alt\": [58.5, 58, 62.5, 72, 74.5, 81] * u.deg,\n \"disp_tel_az\": [12.5, 15, 13, 21, 20, 14.5] * u.deg,\n \"disp_tel_is_valid\": [\n True,\n False,\n True,\n True,\n True,\n True,\n ],\n }\n )", "def _parse_biography(self):\n data = {}\n self.right_column = self.content.find(\"div\", class_=\"rechteSpalte60\")\n heading = self.right_column.find(\"h3\")\n # The page of the second president hides the details information\n # and displays a biography instead. By selecting the second div,\n # we get the hidden div containing the MPs details.\n if not heading:\n self.right_column = self.content.find_all(\"div\", class_=\"rechteSpalte60\")[1]\n data.update(self._parse_dob_job())\n data.update(self._parse_political_mandates())\n data.update(self._parse_political_posts())\n data.update(self._parse_work_history())\n data.update(self._parse_education())\n return data", "def parseTable(chart):\n rowelems = chart.find_all('tr')\n rows = [rowelem.find_all('td') for rowelem in rowelems]\n data = [[elem.get_text() for elem in row] for row in rows]\n return(data)", "def test_getBiomData(self):\r\n bt_string = '''{\r\n \"id\":null,\r\n \"format\": \"Biological Observation Matrix 0.9.1-dev\",\r\n \"format_url\": \"http://biom-format.org\",\r\n \"type\": \"OTU table\",\r\n \"generated_by\": \"QIIME revision XYZ\",\r\n \"date\": \"2011-12-19T19:00:00\",\r\n \"rows\":[\r\n {\"id\":\"GG_OTU_1\", \"metadata\":null},\r\n {\"id\":\"GG_OTU_2\", \"metadata\":null},\r\n {\"id\":\"GG_OTU_3\", \"metadata\":null},\r\n {\"id\":\"GG_OTU_4\", \"metadata\":null},\r\n {\"id\":\"GG_OTU_5\", \"metadata\":null}\r\n ],\r\n \"columns\": [\r\n {\"id\":\"Sample1\", \"metadata\":null},\r\n {\"id\":\"Sample2\", \"metadata\":null},\r\n {\"id\":\"Sample3\", \"metadata\":null},\r\n {\"id\":\"Sample4\", \"metadata\":null},\r\n {\"id\":\"Sample5\", \"metadata\":null},\r\n {\"id\":\"Sample6\", \"metadata\":null}\r\n ],\r\n \"matrix_type\": \"dense\",\r\n \"matrix_element_type\": \"int\",\r\n \"shape\": [5,6],\r\n \"data\": [[0,0,1,0,0,0],\r\n [5,1,0,2,3,1],\r\n [0,0,1,4,2,0],\r\n [2,1,1,0,0,1],\r\n [0,1,1,0,0,0]]\r\n }'''\r\n biom_data = parse_biom_table_str(bt_string)\r\n F = FunctionWithParams('')\r\n\r\n self.assertEqual(biom_data, F.getBiomData(biom_data))\r\n\r\n # write biom_data to temp location\r\n fd, bt_path = mkstemp(suffix='.biom')\r\n close(fd)\r\n biom_file = open(bt_path, 'w')\r\n biom_file.writelines(bt_string)\r\n biom_file.close()\r\n self.assertEqual(biom_data, F.getBiomData(bt_path))\r\n\r\n # cleanup\r\n remove(bt_path)", "def _extract_raw_table(self, expr):\n str_start = \"<table\"\n str_end = \"/table>\"\n\n ind_start = expr.find(str_start)\n assert ind_start >= 0\n\n ind_end = expr.find(str_end)\n assert ind_end >= 0\n\n return expr[ind_start: ind_end + len(str_end)]", "def getTableHead():\n return [\"Reporter\", \"Reportee\", \"aln. DKIM\", \"aln. SPF\", \"Disposition\",\n \"DKIM result\", \"SPF result\", \"msg#\", \"IP\", \"Country\",\n \"Report Begin\", \"Report End\", \"Report ID\"]", "def parse_table_from_output(output, policy_name):\n\n headers = []\n row = []\n\n separator = \"|\"\n\n for line in output.split(\"\\n\"):\n if not headers and line.startswith(\"Name\"):\n headers = [header.strip() for header in line.split(separator)]\n elif line.startswith(policy_name):\n if row:\n raise Exception(\"Multiple license policies detected - expected 1.\")\n row = [val.strip() for val in line.split(separator)]\n\n if not headers:\n raise Exception(\"Table not found in output!\")\n\n if not row:\n raise Exception(\"No license policies found - expected 1.\")\n\n return dict(zip(headers, row))", "def generate_table(self):\n states = self.get_canonical_collection()\n # self.print_canonical_collection(states)\n table = [{} for _ in range(len(states))]\n\n for index in range(len(states)):\n state = states[index]\n first_rule_cnt = 0\n second_rule_cnt = 0\n third_rule_cnt = 0\n beta = []\n for prod in state:\n dot_index = prod[1].index('.')\n alpha = prod[1][:dot_index]\n beta = prod[1][dot_index + 1:]\n if len(beta) != 0:\n first_rule_cnt += 1\n else:\n if prod[0] != 'S1':\n second_rule_cnt += 1\n production_index = self.grammar.P.index((prod[0], alpha))\n elif alpha == [self.grammar.S[0]]:\n third_rule_cnt += 1\n if first_rule_cnt == len(state):\n table[index]['action'] = 'shift'\n\n elif second_rule_cnt == len(state):\n table[index]['action'] = 'reduce ' + str(production_index)\n\n elif third_rule_cnt == len(state):\n table[index]['action'] = 'acc'\n else:\n conflict_msg = 'Conflict! State I' + str(index) + ': ' + str(state) + '\\nSymbol: ' + beta[0]\n raise (Exception(conflict_msg))\n for symbol in self.grammar.N + self.grammar.E: # the goto part of the table\n next_state = self.go_to(state, symbol)\n if next_state in states:\n table[index][symbol] = states.index(next_state)\n # print(\"table\", table)\n return table", "def get_structure(self):\n if len(self.headers) == 0:\n self.restore_structure()\n columns = self.parse_declaration()\n result = \"<tablestructure>\\n\"\n result += \"\\t<table id=\\\"%s\\\" name=\\\"%s\\\">\\n\" % (self.id, self.name)\n result += \"\\t<header>\\n\"\n for header in self.headers:\n result += \"\\t\\t%s\\n\" % columns[header].to_xml()\n result += \"\\t</header>\\n\"\n result += \"\\t</table>\\n\"\n result += \"</tablestructure>\"\n return result", "def extract_data(self):\r\n self.parse()\r\n lst = []\r\n for i in self.table.text.split(\"\\n\")[3:]:\r\n if i != \"\" and bool(re.search(r'\\d', i)):\r\n lst.append(i.replace(u'\\xa0', ''))\r\n single = lst.pop(-3)\r\n lst = [i + \" \" + j for i, j in zip(lst[::2], lst[1::2])]\r\n lst.append(single)\r\n return lst[0:28]", "def get_table(html) -> None:\n\tre_table_class = re.compile('.*2iSP.*') # familiar regex template (str w/ '2iSP')\n\ttable_class = html.find('div', {'class': re_table_class})\n\ttable_lst = re.findall('[А-Я|A-Z][^А-Я|A-Z]*', table_class.text) # regex for capitals\n\n\tfor param in table_lst:\n\t\tif 'Осадки' in param:\n\t\t\tweather_dict['precipation'] = re.search(r'\\d+', param).group()\n\t\telif 'Ветер' in param:\n\t\t\tweather_dict['wind'] = re.search(r'\\d+', param).group()\n\t\telif 'Давление' in param:\n\t\t\tweather_dict['pressure'] = re.search(r'\\d+', param).group()\n\t\telif 'Восход' in param:\n\t\t\tweather_dict['sunrise'] = ':'.join(re.findall(r'\\d+', param))\n\t\telif 'Закат' in param:\n\t\t\tweather_dict['sunset'] = ':'.join(re.findall(r'\\d+', param))", "def scrapeTable():\n\tfrom bs4 import BeautifulSoup\n\tfrom urllib2 import urlopen\n\n\turl = \"https://en.wikipedia.org/wiki/List_of_the_largest_libraries_in_the_United_States\"\n\n\t# read the html content\n\thtml = urlopen(url).read()\n\n\t# create BeautifulSoup from html\n\ttable = BeautifulSoup(html)\n\n\t# find all table row elements\n\trows = table.findAll('tr')\n\n\tarr = []\n\tfor tr in rows:\n\n\t\t# find all columns\n\t\tcols = tr.findAll('td')\n\n\t\t# column text\n\t\tx = [c.text for c in cols]\n\n\t\t# filter the content\n\t\tif len(x)!=0:\n\t\t\ttry:\n\t\t\t\tint(x[0])\n\t\t\texcept Exception, e:\n\t\t\t\tbreak\n\n\t\t\tarr.append(x)\n\n\treturn arr", "def parse(self):\n for index in range(len(self.columns)):\n if index in self.columns:\n self.parsed_table[self.columns[index].col_name_str] = []\n if not self.table.linked_pages:\n return self.create_empty_table()\n for data_chunk in self.table.linked_pages:\n original_data = data_chunk\n parsed_data = parse_data_page_header(original_data, version=self.version)\n\n last_offset = None\n for rec_offset in parsed_data.record_offsets:\n # Deleted row - Just skip it\n if rec_offset & 0x8000:\n last_offset = rec_offset & 0xfff\n continue\n # Overflow page\n if rec_offset & 0x4000:\n # overflow ptr is 4 bits flags, 12 bits ptr\n rec_ptr_offset = rec_offset & 0xfff\n # update last pointer to pointer without flags\n last_offset = rec_ptr_offset\n # The ptr is the offset in the current data page. we get a 4 byte record_pointer from that\n overflow_rec_ptr = original_data[rec_ptr_offset:rec_ptr_offset + 4]\n overflow_rec_ptr = struct.unpack(\"<I\", overflow_rec_ptr)[0]\n record = self._get_overflow_record(overflow_rec_ptr)\n if record:\n self._parse_row(record)\n continue\n # First record is actually the last one - from offset until the end of the data\n if not last_offset:\n record = original_data[rec_offset:]\n else:\n record = original_data[rec_offset:last_offset]\n last_offset = rec_offset\n if record:\n self._parse_row(record)\n return self.parsed_table", "def _populate(self):\n\n # Assume the first word is what we want, and we can find well formed years\n # This sucks, but will work for these ones.\n # Roll on bibtex for citations in the CIM.\n\n citation_detail = self.doc.citation_detail\n author = citation_detail.split(',')[0]\n match = '([^\\w])19|20\\d\\d([^\\w])*?'\n m = re.search(match, citation_detail)\n if m:\n year = m.group(0)\n else:\n year = None\n\n # one error in existing es-doc content to be fixed:\n if 'van Vuuren DP' in author:\n author = 'van Vuuren'\n print 'applying vv fix'\n\n self.year = int(year)\n\n # We assume that this table will have entries which ne\n\n # I use the first three letters of a an authors name, and for\n # three or more authors, EA, and then the year for my bibtex citation string\n self.citeguess = author[0:3] + 'EA' + year[2:]\n # This is what will appear in the table:\n self.citestring = '%s et al. (%s)' % (author, year)\n # Keep this for a reference list for checking against the eventual bibtex reference list.\n self.text = citation_detail", "def _process(self, tables=None):\n\n if self._tables:\n return self._tables\n\n tables = tables or {}\n\n for row in self.url.generator.iter_rp:\n\n table_id_key = row['Table ID'].strip().lower()\n\n if not row['Line Number'].strip():\n if 'Universe' not in row['Table Title']:\n if table_id_key not in tables:\n tables[table_id_key] = Table(row['Table ID'], row['Table Title'].strip().title(),\n seq=row['Sequence Number'],\n startpos=int(row['Start Position']))\n else:\n tables[table_id_key].seq = row['Sequence Number']\n tables[table_id_key].startpos = row['Start Position']\n tables[table_id_key].subject = row['Subject Area']\n\n else:\n tables[table_id_key].universe = row['Table Title'].replace('Universe: ', '').strip()\n\n else: # column row\n try:\n\n line_no = int(row['Line Number'])\n\n if not line_no in tables[table_id_key].columns:\n tables[table_id_key].columns[line_no] = Column(row['Table ID'],\n f\"{row['Table ID']}_{line_no:03}\",\n line_no,\n description=row['Table Title'])\n else:\n tables[table_id_key].columns[line_no].description = row['Table Title']\n\n\n except ValueError as e:\n # Headings, which have fractional line numebrs\n # print(row)\n pass\n\n self._tables = tables\n\n return self._tables", "def parse_tables_from_html(html, md_file):\n soup = BeautifulSoup(html, features=\"lxml\")\n table_contents = \"\"\n for table in soup.select('table'):\n try:\n table_content = process_table(table)\n table_contents += table_content\n except:\n continue\n\n if not table_contents:\n print(\"NO VALID TABLE\")\n return\n\n # write to the file\n with codecs.open(md_file, mode='w', encoding='utf-8') as file:\n file.write(table_contents)\n print(\"The Table is saved in\" + md_file)", "def _html_table(self):\n return '</i>'.join(APtable._repr_html_(self).split('</i>')[1:])", "def parse_mbp_data(self, url):\n body = self.get_content(url)\n if body is None:\n return None\n\n doc_body = pq(body)\n content = doc_body('#main-container').html()\n lines = content.splitlines()\n\n record = {\n 'url': url,\n 'price': self.get_price(lines),\n 'year': self.get_year(lines),\n 'bat_count': self.search_int(\n r'循環\\D*(\\d+)',\n content,\n 1\n ),\n 'screen': self.search_int(\n r'(1\\d{1})[吋\"\\']',\n content,\n 1\n ),\n 'cpu': self.search_string(\n r'i5|i7',\n content\n ),\n 'ram': self.search_int(\n r'(8|16)G',\n content,\n 1\n ),\n 'hdd': self.search_int(\n r'(128|256|512)G',\n content,\n 1\n ),\n }\n return record", "def test_parse_classic_otu_table(self):\r\n data = self.otu_table1\r\n data_f = (data.split('\\n'))\r\n obs = parse_classic_otu_table(data_f)\r\n exp = (['Fing', 'Key', 'NA'],\r\n ['0', '1', '2', '3', '4'],\r\n array([[19111, 44536, 42], [1216, 3500, 6], [1803, 1184, 2],\r\n [1722, 4903, 17], [589, 2074, 34]]),\r\n self.expected_lineages1)\r\n self.assertEqual(obs[0], exp[0])\r\n self.assertEqual(obs[1], exp[1])\r\n assert_almost_equal(obs[2], exp[2])\r\n self.assertEqual(obs[3], exp[3])\r\n\r\n\r\n # test that the modified parse_classic performs correctly on OTU tables\r\n # without leading comments\r\n data = self.otu_table_without_leading_comment\r\n data_f = (data.split('\\n'))\r\n obs = parse_classic_otu_table(data_f)\r\n sams = ['let-7i', 'miR-7', 'miR-17n', 'miR-18a', 'miR-19a', 'miR-22',\r\n 'miR-25', 'miR-26a']\r\n otus = ['A2M', 'AAAS', 'AACS', 'AADACL1']\r\n vals = array([\r\n [-0.2, 0.03680505, 0.205, 0.23, 0.66, 0.08, -0.373, 0.26],\r\n [-0.09, -0.25, 0.274, 0.15, 0.12, 0.29, 0.029, -0.1148452],\r\n [0.33, 0.19, 0.27, 0.28, 0.19, 0.25, 0.089, 0.14],\r\n [0.49, -0.92, -0.723, -0.23, 0.08, 0.49, -0.386, -0.64]])\r\n exp = (sams, otus, vals, []) # no lineages\r\n # because float comps in arrays always errors\r\n self.assertEqual(obs[0], exp[0])\r\n self.assertEqual(obs[1], exp[1])\r\n self.assertEqual(obs[3], exp[3])\r\n self.assertTrue(all((obs[2] == exp[2]).tolist()))", "def create_table_from_table_metric(table_metric: TableMetric) -> Text:\n\n supported_metric = {\n TableMetric.CONTINGENCY_TABLE,\n TableMetric.TABLE_DESCRIPTIVE\n }\n\n assert table_metric.name in supported_metric\n\n table_template = template.TABLE_TEMPLATE\n\n headers = ['&#x200B;'] + list(table_metric.column_indexes)\n header_string = \"|\".join(headers)\n header_separator = \"|\".join([\":-----:\" for i in range(len(headers))])\n\n table_content = []\n\n for row in table_metric.rows:\n # row header is in BOLD\n row_header = template.BOLD.format(\n content=str(row.row_index).strip())\n row_values = [row_header] + [formatting.numeric_formatting(item.value)\n for item in row.cells]\n table_content.append(\"|\".join(row_values))\n\n table_content_string = \"\\n\".join(table_content)\n\n return table_template.format(\n header=header_string,\n header_separator=header_separator,\n table_content=table_content_string\n )", "def read_table(self):\n\n db = self.connection(database=\"imdb\")\n\n try:\n cur = db.cursor()\n sql = \"SELECT title FROM film;\"\n cur.execute(sql)\n return cur.fetchall()\n except:\n print(\"Cannot read from table!\")\n\n db.close()", "def read_mumax3_table(filename):\n \n table = pd.read_csv(filename, sep='\\t')\n table.columns = ' '.join(table.columns).split()[1::2]\n \n return table", "def load_market_matrix():\n\tfnm = \"../datasets/bbc/bbc.mtx\"\n \tconverters = {\"termid\": toInt, \"docid\": toInt, \"freq\":toFloat}\n \tX = pandas.read_table(fnm, header=None, sep=\" \", skiprows=2, names= [\"termid\", \"docid\", \"freq\"], converters=converters)\n \tX[\"termid\"] = X[\"termid\"] - 1\n\tX[\"docid\"] = X[\"docid\"] - 1\n\treturn X", "def read_MCC_results(table):\n f = open(table)\n lines = f.readlines()\n lines = lines[1:] # discard header line\n f.close()\n t = np.zeros(len(lines))\n predicts = np.zeros((len(lines), 3))\n actuals = np.zeros((len(lines), 3))\n line_num = 0\n for line in lines:\n fields = line.split()\n t[line_num] = Time.DateTime(fields[0]).secs\n predicts[line_num, :] = [float(field) for field in fields[1:4]]\n actuals[line_num, :] = [float(field) for field in fields[5:8]]\n line_num = line_num + 1\n return t, predicts, actuals", "def message_table(message):\r\n table = Table(['property', 'value'])\r\n table.align['property'] = 'r'\r\n table.align['value'] = 'l'\r\n\r\n table.add_row(['id', message['id']])\r\n table.add_row(['initial_entry_time', message['initial_entry_time']])\r\n table.add_row(['visibility_delay', message['visibility_delay']])\r\n table.add_row(['visibility_interval', message['visibility_interval']])\r\n table.add_row(['fields', message['fields']])\r\n return [table, message['body']]", "def parse_populations(\n source, strict=True, encoding=\"utf8\", base64_metadata=True, table=None\n):\n sep = None\n if strict:\n sep = \"\\t\"\n if table is None:\n table = tables.PopulationTable()\n # Read the header and find the indexes of the required fields.\n header = source.readline().rstrip(\"\\n\").split(sep)\n metadata_index = header.index(\"metadata\")\n for line in source:\n tokens = line.rstrip(\"\\n\").split(sep)\n if len(tokens) >= 1:\n metadata = tokens[metadata_index].encode(encoding)\n if base64_metadata:\n metadata = base64.b64decode(metadata)\n table.add_row(metadata=metadata)\n return table", "def read_table(self, table):\n return READ_TABLE(table, db=self.db)", "def tables():\n return {\n \"MAT24_STD_OCTAD\" : STD_OCTAD,\n }", "def parse_table(self, table_name):\n table_offset = self.catalog.get(table_name)\n if not table_offset:\n logging.error(f\"Could not find table {table_name} in DataBase\")\n return\n table_offset = table_offset * self.page_size\n table = self._tables_with_data.get(table_offset)\n if not table:\n table_def = self._table_defs.get(table_offset)\n if table_def:\n table = TableObj(offset=table_offset, val=table_def)\n logging.info(f\"Table {table_name} has no data\")\n else:\n logging.error(f\"Could not find table {table_name} offset {table_offset}\")\n return\n access_table = AccessTable(table, self.version, self.page_size, self._data_pages, self._table_defs)\n return access_table.parse()", "def _parse_table(self): # type: (Optional[str]) -> Tuple[Key, Item]\n indent = self.extract()\n self.inc() # Skip opening bracket\n\n is_aot = False\n if self._current == \"[\":\n if not self.inc():\n raise self.parse_error(UnexpectedEofError)\n\n is_aot = True\n\n # Key\n self.mark()\n while self._current != \"]\" and self.inc():\n pass\n\n name = self.extract()\n key = Key(name, sep=\"\")\n\n self.inc() # Skip closing bracket\n if is_aot:\n # TODO: Verify close bracket\n self.inc()\n\n cws, comment, trail = self._parse_comment_trail()\n\n result = Null()\n values = Container()\n\n while not self.end():\n item = self._parse_item()\n if item:\n _key, item = item\n if not self._merge_ws(item, values):\n values.append(_key, item)\n else:\n if self._current == \"[\":\n _, name_next = self._peek_table()\n\n if self._is_child(name, name_next):\n key_next, table_next = self._parse_table()\n key_next = Key(key_next.key[len(name + \".\") :])\n\n values.append(key_next, table_next)\n\n # Picking up any sibling\n while not self.end():\n _, name_next = self._peek_table()\n\n if not self._is_child(name, name_next):\n break\n\n key_next, table_next = self._parse_table()\n key_next = Key(key_next.key[len(name + \".\") :])\n\n values.append(key_next, table_next)\n else:\n table = Table(\n values, Trivia(indent, cws, comment, trail), is_aot\n )\n\n result = table\n if is_aot and (\n not self._aot_stack or name != self._aot_stack[-1]\n ):\n result = self._parse_aot(table, name)\n\n break\n else:\n raise self.parse_error(\n InternalParserError,\n (\"_parse_item() returned None on a non-bracket character.\"),\n )\n\n if isinstance(result, Null):\n result = Table(values, Trivia(indent, cws, comment, trail), is_aot)\n\n return key, result", "def _get_table(self):\n\t\treturn self._table", "def load_main_table(table_text):\n\n lines = table_text.split('\\n')\n i = 1\n cols = []\n for thing in lines[1].split('\",\"'):\n if thing in ['C ', 'I ', 'K ', 'E ', 'H ']:\n cols.append(thing.strip() + str(i) + ' ')\n if thing == 'H ':\n i += 1\n else:\n cols.append(thing)\n lines[1] = '\",\"'.join(cols)\n text = \"\\n\".join(lines[1:])\n df = pd.read_csv(StringIO(text))\n df.index = df['Student ID']\n\n return df", "def build(f, font, tableTag=None):\n lines = Tokenizer(f)\n return parseTable(lines, font, tableTag=tableTag)", "def isccp4table(self, text):\n #\n # See e.g. http://www.ccp4.ac.uk/dist/html/loggraphformat.html\n # for format of TABLES\n #\n # Note that this regular expression accommodates slight deviations\n # by making the \"closing\" \":\" of the $TABLE line optional.\n # This is done for consistency with loggraph's behaviour.\n #\n # Set up regular expression for entire table\n # This is the \"strict\" form of the table\n table = self.compile(\n \"isccp4table\",\n r\" *\\$TABLE ?:([^:]*):?[ \\n]+\\$(GRAPHS|SCATTER)[^:]*(:[^\\$]*)\\$\\$([^\\$]*)\\$\\$([^\\$]*)\\$\\$([^\\$]*)\\$\\$\",\n ).search(text)\n result = dict()\n if table:\n result[\"rawtable\"] = table.group(0)\n result[\"title\"] = table.group(1).strip()\n result[\"type\"] = table.group(2).strip()\n result[\"graphs\"] = table.group(3)\n result[\"columns\"] = table.group(4)\n result[\"text\"] = table.group(5)\n result[\"data\"] = table.group(6)\n result[\"nlines\"] = table.group(0).count(\"\\n\")\n return result\n # If there wasn't a match then try a simpler match\n # This relaxes some of the rules in the format definintion\n table = self.compile(\n \"isccp4simplertable\",\n r\" *\\$TABLE ?:([^\\n]*)\\n+\\$(GRAPHS|SCATTER)[^:]*(:[^\\$]*)\\$\\$([^\\$]*)\\$\\$([^\\$]*)\\$\\$([^\\$]*)\\$\\$\",\n ).search(text)\n if table:\n result[\"rawtable\"] = table.group(0)\n result[\"title\"] = table.group(1).strip()\n result[\"type\"] = table.group(2).strip()\n result[\"graphs\"] = table.group(3)\n result[\"columns\"] = table.group(4)\n result[\"text\"] = table.group(5)\n result[\"data\"] = table.group(6)\n result[\"nlines\"] = table.group(0).count(\"\\n\")\n return result\n return result", "def __init__(self, tabletext=\"\"):\n\n # Table attributes\n self.__title = \"\"\n self.__type = \"GRAPHS\" # Default to GRAPHS\n self.__graphs = \"\"\n self.__columns = \"\"\n self.__text = \"\"\n self.__data = \"\"\n # Derived data\n self.__graph_list = []\n self.__column_list = []\n # Indicate the the object has been populated\n self.__table_parse_error = False\n self.__nonzero = False\n # The \"raw\" table data from the log file\n self.__rawtable = \"\"\n # Attempt to populate the table\n if tabletext:\n self.__rawtable = tabletext\n if not self.__buildtable(tabletext):\n # Failed to extract table\n # If it could be a title then use this\n # instead\n if str(tabletext).count(\"\\n\") == 0:\n self.settitle(tabletext)", "def convert_table(mkd):\n\t\n\tmd_table_codes = re.findall(r\".*\\|.*\\n.*\\-.*(?:\\n.*\\|.*)*\", mkd, re.M)\n\tfor md_code in md_table_codes:\n\t\t\n\t\tmd_rows = re.findall(r\"(.*\\|.*)\", md_code, re.M)\n\t\theader = md_rows.pop(0)\n\t\tcolumn_count = md_rows.pop(0).count(\"-\")\n\n\t\ttex_code = \"\\\\begin{tabular}{|\"+\"l|\"*column_count+\"}\\n\\hline\\n\"\n\t\ttex_code += header.strip(\" |\").replace(\"|\", \"&\")+\" \\\\\\\\\\n\\hline\\n\"\n\t\tfor row in md_rows:\n\t\t\ttex_code += row.strip(\" |\").replace(\"|\", \"&\")+\" \\\\\\\\\\n\"\n\t\ttex_code += \"\\hline\\n\\end{tabular}\"\n\n\t\tmkd = mkd.replace(md_code, tex_code)\n\n\treturn mkd", "def WriteBody( self ):\n\n self.ParseHeader( sys.stdin.readline() )\n\n self.CreateTable()\n\n num_lines = 0\n\n total_lines = 0\n sys.stderr.write(\"parsing..\")\n \n while 1:\n line = sys.stdin.readline()\n \n if not line: break\n if line[0] == \"#\": continue\n if line[0:2] == \"//\": break\n \n if not (total_lines % 100) and total_lines > 0:\n sys.stderr.write( \".%i..\" % total_lines)\n print self.table \n self.table.body = []\n\n total_lines = total_lines + 1\n\n (columns) = string.split( line[:-1], \"\\t\" )\n \n if not columns:\n break\n\n if self.mUseIndex:\n col_string = [str(total_lines),] + self.ParseColumns( columns )\n else:\n col_string = self.ParseColumns( columns )\n \n self.table.body.append( col_string ) \n \n print self.table\n \n sys.stderr.write(\"done\\n\")", "def make_table(self, content):\n html = '<table class=\"table table-condensed\">'\n\n # Check for list or tuple\n if type(content) is list or type(content) is tuple:\n if len(content) > 0:\n # If first item in list is dictionary continue\n if type(content[0]) is dict:\n # Make table header for every key\n html += '<thead><tr>'\n for key in content[0].keys():\n html += '<th>' + key + '</th>'\n html += '</tr></thead>'\n\n # Make table body\n html += '<tbody>'\n for dictonary in content:\n # New table row for every dict item in list\n html += '<tr>'\n # New column for every value in dictionary\n for value in dictonary.values():\n html += '<td>' + str(value) + '</td>'\n html += '</tr>'\n html += '</tbody>'\n else:\n html += 'No content available'\n\n html += '</table>'\n\n self.table = html\n\n return html", "def parse_conll_eval_table(self, fp):\n with open(fp, 'r') as tbl:\n tbl.readline()\n for row in tbl:\n clean_row = re.sub('([\\\\\\\\%]|hline)', '', row)\n cells = [x.strip() for x in clean_row.split('&')]\n self[cells[0]] = {\n 'precision': float(cells[1]),\n 'recall': float(cells[2]),\n 'fscore': float(cells[3])\n }\n assert self.keys(), 'Probably an empty/non-existant file.'\n self[self._total_name] = self['Overall']\n del self['Overall']", "def converttable(tablecode):\n table = etree.XML(tablecode)\n rows = iter(table)\n headers = [col.text for col in next(rows)]\n data = []\n for row in rows:\n values = [col.text for col in row]\n debugprint(dict(zip(headers, values)), \"RAW JSON\")\n data.append(dict(zip(headers, values)))\n return data", "def get_TABLE_info():\n defalt_width = 300\n defalt_height = 500\n defalt_thickness = 10\n\n message = 'Put width of table. (mm : int) (width >= 210)'\n width = rs.GetInteger(message, defalt_width, None, None)\n\n message = 'Put height of table. (mm : int) (height >= 250)'\n height = rs.GetInteger(message, defalt_height, None, None)\n\n message = 'Put thickness of material (1layer). (mm : int)'\n t_m = rs.GetReal(message, defalt_thickness, None, None)\n\n TABLE_info = [width, height, t_m]\n\n info = [\"width : %s\" % width, \"height : %s\" % height, \"thickness of material : %s\" % t_m]\n print (info)\n\n return TABLE_info", "def get_tables():\n page_html = requests.get(conf.PAGE_URL).text\n soup = BeautifulSoup(page_html, 'html.parser')\n tables = soup.find_all(\"table\", {\"class\": conf.TABLE_CLASS_NAME})\n if not tables:\n raise ValueError(\"Table class not found\")\n return tables", "def read_table(self, table_type):\n\n if table_type == 'hash':\n entry_class = MPQHashTableEntry\n elif table_type == 'block':\n entry_class = MPQBlockTableEntry\n else:\n raise ValueError(\"Invalid table type.\")\n\n table_offset = self.header['%s_table_offset' % table_type]\n table_entries = self.header['%s_table_entries' % table_type]\n key = self._hash('(%s table)' % table_type, 'TABLE')\n\n self.file.seek(table_offset + self.header['offset'])\n data = self.file.read(table_entries * 16)\n data = self._decrypt(data, key)\n\n def unpack_entry(position):\n entry_data = data[position*16:position*16+16]\n return entry_class._make(\n struct.unpack(entry_class.struct_format, entry_data))\n\n return [unpack_entry(i) for i in range(table_entries)]", "def _get_table_info(self):\n highestbet = self.highestBetNotFold(),\n bigb =self.bigBlind() if self._game_state == GAME_STATE_PRE_FLOP and not self.inSmallBlindPosition() else 0\n return [\"blinds: small:%r big:%r\" % (self.small_blind, self.big_blind),\n \"buy_ins: min:%r max:%r\" % (self.min_buy_in, self.max_buy_in),\n \"bs: %r\" % self.betting_structure,\n \"highestbet = %r\" % highestbet,\n \"bigb = %r\" % bigb,]", "def split_md_table(string: str) -> Tuple[str, Tuple[str, ...], Tuple[str, ...], str, str]:\n err = 'tabulate returned GFM pipe table with invalid first two lines: {}'\n split = string.split('\\n', 2)\n line_sep = '\\r\\n' if split[0][-1] == '\\r' else '\\n'\n def table_body(pos: int) -> str: return '\\n'.join(split[pos:])\n lines = list(map(lambda s: s.rstrip('\\r'), split[:2]))\n\n md_headers, headers, formats = '', (), None\n for line in reversed(lines):\n if formats:\n match = re.match(r'^\\|.*[^\\\\]\\|$', line)\n headers = tuple(map(\n lambda s: s.strip(' '),\n re.split(r'(?<=[^\\\\])\\|', line[1:-1])\n ))\n if match and len(headers) == len(formats):\n md_headers = line\n else:\n raise TabulateHelperError(err.format(lines))\n elif re.match(r'^\\|:?-+:?(\\|:?-+:?)*\\|$', line):\n formats = tuple(line[1:-1].split('|'))\n if formats:\n return md_headers, headers, formats, table_body(pos=2 if headers else 1), line_sep\n else:\n raise TabulateHelperError(err.format(lines))", "def get_table_data(table):\n pattern_body = re.compile(r'(?ims)\\<tbody\\>(.*?)\\</tbody\\>')\n pattern_rows = re.compile(r'(?ims)\\<tr\\>(.*?)\\</tr\\>')\n pattern_cols = re.compile(r'(?ims)\\<td.*?\\>([^<]+?)\\<.*?/td\\>')\n\n body = pattern_body.findall(table)[0]\n return [\n list(map(lambda x: html.unescape(x), pattern_cols.findall(row)[:3]))\n for row in pattern_rows.findall(body)]", "def table(self):\r\n return self._table", "def parseInput(toParse):\n splitified = toParse.split('--------')\n statesPath = splitified[0].rstrip().strip()\n availableStates = len(splitified[1].rstrip().strip().split())\n probMatrix = splitified[2].rstrip().strip().splitlines()\n\n return(probMatrix, statesPath, availableStates)", "def test_parse_taxa_summary_table(self):\r\n actual = parse_taxa_summary_table(self.taxa_summary1.split('\\n'))\r\n self.assertItemsEqual(actual[0], self.taxa_summary1_expected[0])\r\n self.assertItemsEqual(actual[1], self.taxa_summary1_expected[1])\r\n assert_almost_equal(actual[2], self.taxa_summary1_expected[2])", "def table(self):\n return self.t", "def test_format_otu_table(self):\r\n a = array([[1, 2, 3],\r\n [4, 5, 2718281828459045]])\r\n samples = ['a', 'b', 'c']\r\n otus = [1, 2]\r\n taxa = ['Bacteria', 'Archaea']\r\n res = format_otu_table(samples, otus, a)\r\n # confirm that parsing the res gives us a valid biom file with\r\n # expected observation and sample ids\r\n t = parse_biom_table(res.split('\\n'))\r\n self.assertEqual(t.ObservationIds, ('1', '2'))\r\n self.assertEqual(t.SampleIds, ('a', 'b', 'c'))", "def get_table(self, percent=False):\n TABLE_SEPARATOR = '=>'\n\n d = self.data\n headers = self.headers\n group_numbers = self.totals\n\n logging.debug('print_table:')\n logging.debug(d)\n # print header\n csv_header = ['group', 'students'] + [h.label for h in headers] + [h.label+'%' for h in headers]\n\n # print rows and calc totals\n logging.debug(d)\n logging.debug([h.fullname for h in headers])\n\n csv_body = []\n for gr, prep in self.cfg.preps.items():\n csv_body.append( [f'{gr} - {prep}'] + self.data_group(gr, get_student_numbers=True, add_percentes=True))\n\n csv_footer = ['all'] + self.data_group_all(get_student_numbers=True, add_percentes=True)\n\n print(csv_header, csv_body, csv_footer)\n return (csv_header, csv_body, csv_footer)", "def get_table(self):\n result_table = [row[:] for row in self.table] # Clone the result table\n\n # Htmlise all columns containing images\n for col_num in self.image_column_nums():\n for row_num in range(1, len(result_table)):\n result_table[row_num][col_num] = self.htmlise(result_table[row_num][col_num])\n\n # Append images\n for ((col,row), image_list) in self.images.items():\n for image in image_list:\n try:\n result_table[row][col] += \"<br>\" + image\n except IndexError:\n pass # Testing must have aborted so discard image\n\n return result_table", "def _parse_qstat_tabular(qstat_output):\n def parse_qstat_record(record):\n name, state_code = map(str.strip, record.split('|'))\n return name, Torque._job_states[state_code]\n\n jobs = qstat_output.splitlines()\n parsed = {}\n # @TODO: think of catch-and-log parsing exceptions\n if jobs and (len(jobs) > 1 or jobs[0] != ''):\n parsed = dict(map(parse_qstat_record, jobs))\n\n return parsed", "def print_mistakes_table():\n conn = sq.connect(host='localhost', user='root',\n password='student', database='quiz')\n cursor = conn.cursor()\n\n cursor.execute(\"select * from mistakes\")\n data = cursor.fetchall()\n\n table = PrettyTable()\n table.field_names = ['Question', 'Given Answer','User Given Answer']\n for row in data:\n table.add_row(row)\n conn.close()\n\n return table", "def parse_ipac_table(table_file):\n file_lines = table_file.readlines()\n if len(file_lines) < 5:\n raise ValueError(\"No images found!\")\n \n columns = file_lines[0].replace(\"|\",\" \").split()\n \n # Each row in the table starting at index 4 is metadata for a new image / observation\n metadatas = []\n for image_data in file_lines[4:]:\n line = image_data.replace(\"|\",\" \").split()\n obsdate_idx = columns.index(\"obsdate\")\n tmp = line[obsdate_idx] + \" \" + line[obsdate_idx+1]\n del line[obsdate_idx+1]\n line[obsdate_idx] = tmp\n metadatas.append(dict(zip(columns, line)))\n \n return metadatas", "def rawtable(self):\n return self.__rawtable", "def _parse_qstat_tabular(qstat_output):\n def parse_qstat_record(record):\n name, state_code = map(str.strip, record.split('|'))\n return name, Torque._job_states[state_code]\n\n jobs = qstat_output.splitlines()\n parsed = {}\n # @TODO: think of catch-and-log parsing exceptions\n if jobs and (len(jobs) > 1 or jobs[0] is not ''):\n parsed = dict(map(parse_qstat_record, jobs))\n\n return parsed", "def _pdread2astrotable(csvgzdir):\n df = pd.read_csv(csvgzdir)\n tb = Table.from_pandas(df)\n return tb", "def read_biology(url, header):\n req = requests.get(url)\n values = BeautifulSoup(req.content, 'html.parser').body.text\n\n df = pd.read_csv(StringIO(values), sep=';', header=None,\n index_col=False, names=header)\n res_dic = {}\n for i, row in df.iterrows():\n key = row['Analyse'].replace(' ', '_') # ou analyse?\n value = row['Resultat']\n\n res_dic[key] = value\n\n return res_dic", "def setUp(self):\r\n self.bt_str = '{\"id\": \"None\",\"format\": \"Biological Observation Matrix 1.0.0\",\"format_url\": \"http://biom-format.org\",\"type\": \"OTU table\",\"generated_by\": \"BIOM-Format 1.2.0-dev\",\"date\": \"2013-11-28T16:50:27.438635\",\"matrix_type\": \"sparse\",\"matrix_element_type\": \"float\",\"shape\": [10, 10],\"data\": [[0,0,22.0],[0,4,15.0],[0,5,74.0],[0,6,34.0],[0,7,76.0],[0,9,48.0],[1,0,70.0],[1,2,30.0],[1,3,37.0],[1,4,24.0],[1,5,77.0],[1,6,71.0],[1,7,58.0],[1,8,43.0],[2,1,2.0],[2,2,90.0],[2,4,48.0],[2,5,54.0],[2,7,22.0],[2,8,91.0],[3,0,80.0],[3,1,86.0],[3,3,78.0],[3,7,12.0],[4,4,68.0],[4,7,76.0],[4,8,57.0],[5,2,23.0],[5,5,66.0],[5,7,51.0],[5,9,77.0],[6,0,31.0],[6,1,47.0],[6,2,16.0],[6,4,96.0],[6,5,9.0],[7,0,17.0],[7,2,52.0],[7,5,11.0],[7,7,22.0],[8,1,74.0],[8,2,7.0],[8,4,80.0],[8,7,59.0],[9,0,6.0],[9,2,34.0],[9,3,63.0],[9,4,77.0],[9,5,8.0],[9,6,38.0],[9,7,73.0],[9,8,98.0],[9,9,45.0]],\"rows\": [{\"id\": \"o1 \", \"metadata\": {\"taxonomy\": [\"bug1\"]}},{\"id\": \"o2\", \"metadata\": {\"taxonomy\": [\"bug2\"]}},{\"id\": \"o3\", \"metadata\": {\"taxonomy\": [\"bug3\"]}},{\"id\": \"o4\", \"metadata\": {\"taxonomy\": [\"bug4\"]}},{\"id\": \"o5\", \"metadata\": {\"taxonomy\": [\"bug5\"]}},{\"id\": \"o6\", \"metadata\": {\"taxonomy\": [\"bug6\"]}},{\"id\": \"o7\", \"metadata\": {\"taxonomy\": [\"bug7\"]}},{\"id\": \"o8\", \"metadata\": {\"taxonomy\": [\"bug8\"]}},{\"id\": \"o9\", \"metadata\": {\"taxonomy\": [\"bug9\"]}},{\"id\": \"o10\", \"metadata\": {\"taxonomy\": [\"bug10\"]}}],\"columns\": [{\"id\": \"s1\", \"metadata\": null},{\"id\": \"s2\", \"metadata\": null},{\"id\": \"s6\", \"metadata\": null},{\"id\": \"s4\", \"metadata\": null},{\"id\": \"s5\", \"metadata\": null},{\"id\": \"s10\", \"metadata\": null},{\"id\": \"s7\", \"metadata\": null},{\"id\": \"s8\", \"metadata\": null},{\"id\": \"s9\", \"metadata\": null},{\"id\": \"s3\", \"metadata\": null}]}'\r\n self.mf_ordered = ['#SampleIDt\\thsid\\tfield\\tval',\r\n 's1\\t1\\tf1\\t6.1',\r\n 's3\\t1\\tf1\\t0.0',\r\n 's7\\t1\\tf1\\t14.2',\r\n 's9\\t1\\tf2\\t6.5',\r\n 's2\\t1\\tf2\\t21',\r\n 's6\\t2\\tf3\\t0.3',\r\n 's5\\t2\\tf2\\t9.1',\r\n 's4\\t2\\tf3\\t0.8',\r\n 's8\\t2\\tf2\\t5.0',\r\n 's10\\t2\\tf2\\t11.']\r\n self.mf_non_ordered = ['#SampleIDt\\thsid\\tval',\r\n 'Sample9\\t1\\t6.5',\r\n 'Sample8\\t2\\t5.0',\r\n 'Sample1\\t1\\t6.1',\r\n 'Sample2\\t1\\t21',\r\n 'Sample6\\t2\\t0.3',\r\n 'Sample5\\t2\\t9.1',\r\n 'Sample7\\t1\\t14.2',\r\n 'Sample4\\t2\\t0.8',\r\n 'Sample10\\t2\\t11.',\r\n 'Sample3\\t1\\t0.0']\r\n self.cvs1 = ['1', '2']\r\n self.mds1 = [[6.1, 0.0, 14.2, 6.5, 21], [.3, 9.1, .8, 5.0, 11.]]\r\n self.otus1 = [\r\n array([[22., 48., 34., 0., 0.],\r\n [70., 0., 71., 43., 0.],\r\n [0., 0., 0., 91., 2.],\r\n [80., 0., 0., 0., 86.],\r\n [0., 0., 0., 57., 0.],\r\n [0., 77., 0., 0., 0.],\r\n [31., 0., 0., 0., 47.],\r\n [17., 0., 0., 0., 0.],\r\n [0., 0., 0., 0., 74.],\r\n [6., 45., 38., 98., 0.]]),\r\n array([[0., 15., 0., 76., 74.],\r\n [30., 24., 37., 58., 77.],\r\n [90., 48., 0., 22., 54.],\r\n [0., 0., 78., 12., 0.],\r\n [0., 68., 0., 76., 0.],\r\n [23., 0., 0., 51., 66.],\r\n [16., 96., 0., 0., 9.],\r\n [52., 0., 0., 22., 11.],\r\n [7., 80., 0., 59., 0.],\r\n [34., 77., 63., 73., 8.]])]" ]
[ "0.5795739", "0.56989133", "0.5626127", "0.5613763", "0.55284244", "0.55272263", "0.55172074", "0.5440883", "0.5421252", "0.53791916", "0.53787607", "0.53451467", "0.53356135", "0.5328347", "0.5308771", "0.5279552", "0.52757406", "0.52719027", "0.5256095", "0.5256095", "0.52544194", "0.5246638", "0.5244476", "0.5236445", "0.52326626", "0.52254623", "0.52086556", "0.51702523", "0.5165593", "0.5159902", "0.5155478", "0.5137353", "0.5125742", "0.51252955", "0.51235604", "0.51215005", "0.51065093", "0.5102397", "0.50777656", "0.5073955", "0.5069654", "0.50662863", "0.5061748", "0.5058527", "0.5055843", "0.5054431", "0.50486267", "0.50406474", "0.50349665", "0.5023999", "0.5012887", "0.50027573", "0.50008255", "0.49992484", "0.49958986", "0.49907944", "0.4987184", "0.49863422", "0.49560782", "0.49503097", "0.4949693", "0.49490762", "0.49470997", "0.49382365", "0.49261802", "0.49179325", "0.4914772", "0.49126565", "0.49114335", "0.49087894", "0.49083355", "0.48874763", "0.48837888", "0.4882316", "0.48786104", "0.4875795", "0.48733112", "0.48672438", "0.48630866", "0.4857618", "0.48554888", "0.48401788", "0.48384422", "0.4836429", "0.4834497", "0.48333594", "0.4831966", "0.4831111", "0.4820188", "0.4816742", "0.48111683", "0.48087716", "0.4805057", "0.48042473", "0.48005772", "0.47975966", "0.4795735", "0.47944355", "0.47943327", "0.47938794" ]
0.6301872
0
Return a BIOM table and the original open filehandle as a tuple. Useful when additional computation needs to be performed on the file contents, such as an MD5 sum.
def load_biom_table_with_file_contents(biom_f): table = parse_biom_table(biom_f) if hasattr(biom_f, 'seek'): biom_f.seek(0) return table, biom_f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_file(self) -> tuple:\r\n hash_md5 = hashlib.md5()\r\n with open(self.yara_base_file, \"rb\") as f:\r\n file_map = f.read()\r\n get_file_dict = get_matches(self, file_map)\r\n hash_md5.update(file_map)\r\n return hash_md5.hexdigest(), get_file_dict", "def __enter__(self) -> Tuple[FileObj]:\n file_obj: FileObj = []\n for obj in self.__dict__.values():\n if hasattr(obj, 'close'):\n file_obj.append(obj)\n return tuple(file_obj)", "def md5_tuple(file_path):\n return file_path, md5_hash(file_path)", "def _read_file(self) -> Tuple[np.ndarray, h5py.File]:\n assert os.path.exists(self.datafile)\n LOGGER.info(f\"Found datafile: {self.datafile}\")\n\n # get ELM indices from datafile\n hf = h5py.File(self.datafile, \"r\")\n LOGGER.info(f\"Number of ELM events in the datafile: {len(hf)}\")\n elm_index = np.array([int(key) for key in hf], dtype=np.int32)\n return elm_index, hf", "def readFileTable(f, header):\n version, dataOffset, archiveFileCount, fileTableLength, endianness, fileCount = header\n \n def readFileRecords(f):\n for i in range(fileCount):\n recordBytes = f.read(FILE_RECORD_LENGTH)\n path, offset, size, endianness, archiveFileIndex = struct.unpack(\"<256sLLLL\", recordBytes)\n path, _ = path.decode('ascii').split(\"\\0\", 1)\n yield (path, offset, size, endianness, archiveFileIndex)\n \n return list(readFileRecords(f))", "def read(filename):\n with open(filename, 'rb') as filehandle:\n header = read_header(filehandle)\n data = read_data(header, filehandle, filename)\n return (data, header)", "def get_file_contents(db_cursor):\n\n db_cursor.execute(\"\"\"SELECT * FROM data\"\"\")\n db_rows = db_cursor.fetchall()\n return {row[0]: row[1] for row in db_rows if row != []}", "def open_db(self):\n # if it is not open, open it.\n if self.h5file.isopen is True:\n return self.h5file\n else:\n self.h5file = tb.open_file(filename=self.filepath, mode='a')\n assert self.h5file.isopen\n return self.h5file", "def c_open(file):\n data = cassy.CassyDaten(file)\n t = data.messung(1).datenreihe(\"t\").werte\n I = data.messung(1).datenreihe(\"I_A2\").werte\n U = data.messung(1).datenreihe(\"U_B2\").werte\n return t, U, I", "def getBiomData(self, data):\r\n try:\r\n if isfile(data):\r\n otu_table = parse_biom_table(qiime_open(data, 'U'))\r\n return otu_table\r\n except TypeError:\r\n if any([type(data) in\r\n [DenseFunctionTable,\r\n DenseGeneTable,\r\n DenseMetaboliteTable,\r\n DenseOTUTable,\r\n DenseOrthologTable,\r\n DensePathwayTable,\r\n DenseTable,\r\n DenseTaxonTable,\r\n FunctionTable,\r\n GeneTable,\r\n MetaboliteTable,\r\n OTUTable,\r\n OrthologTable,\r\n PathwayTable,\r\n SparseFunctionTable,\r\n SparseGeneTable,\r\n SparseMetaboliteTable,\r\n SparseOTUTable,\r\n SparseOrthologTable,\r\n SparsePathwayTable,\r\n SparseTable,\r\n SparseTaxonTable]]):\r\n otu_table = data\r\n return otu_table\r\n else:\r\n raise TypeError('Data is neither a path to a biom table or a' +\r\n ' biom table object.')", "def read(self) -> Tuple[Level, Any]:\n with DFReader(open(self.fullpath, \"rb\")) as reader:\n dflevel, region_offsets = reader.read_level_ex()\n region_data = reader.read_bytes(region_offsets[-1])\n return dflevel, (region_offsets, region_data)", "def _get_file(self, path: str) -> Tuple[str, bytes]:\n self._trace(\"fetching: %s\" % path)\n meta, resp = self._connection.files_download(path)\n return (meta.rev, resp.content)", "def content_md5(self):\n with self.open() as fileobj:\n return get_content_md5(fileobj)", "def file_info(self, f):\n ld8 = self.ld8_extract(f) # get luna_date\n sid = self.sesid(ld8) # make luna_visitnum\n age = self.age_lookup.get(sid)\n return (sid, age)", "def open_db(filename: str) -> Tuple[sqlite3.Connection, sqlite3.Cursor]:\n db_connection = sqlite3.connect(filename) # Create or connect to DB\n cursor = db_connection.cursor() # Read/write data\n return db_connection, cursor", "def read(self, *args) -> \"PyObject *\":\n return _ida_fpro.qfile_t_read(self, *args)", "def get_info(curf, begin, read, param_of_unpack):\n curf.seek(begin)\n info = curf.read(read)\n info = struct.unpack(param_of_unpack, info)\n return str(info[0])", "def read_relations(db, openfile):\n pass", "def get_contents_hash(self):\n md5 = hashlib.md5()\n with closing(self.open()) as handle:\n for chunk in handle.chunks():\n md5.update(chunk)\n return md5.hexdigest()", "def get_fp(self, *args) -> \"FILE *\":\n return _ida_fpro.qfile_t_get_fp(self, *args)", "def get_aligned(database):\n conn = sqlite3.connect(database, check_same_thread=False)\n cur = conn.cursor()\n data = cur.execute('SELECT `header`, `aligned` FROM SEQUENCES;').fetchall()\n for h, s in data:\n yield h, s", "def totuple(self):\n return (self.pageid, self.filename, self.uploader, self.width,\n self.height, self.size, self.quality_image,\n self.featured_picture, self.valued_image, self.timestamp)", "def get_data(self, path, owner='*'):\n sql = sa.select([history.c.data, history.c.content_type]).select_from(sa.join(history, active)).where(active.c.path == path)\n result = self.engine.execute(sql).first()\n if result:\n data, ctype = result\n if ctype == 'application/msgpack':\n import msgpack\n return msgpack.unpackb(data, encoding='utf8')\n else:\n return data, ctype", "def read_stock(db, openfile):\n pass", "def file_entry(file_path: str) -> Tuple[str, Any, str]:\n file_name = os.path.split(file_path)[1]\n file_handler = open(file_path, \"rb\")\n content_type = \"application/octet-stream\"\n\n return (file_name, file_handler, content_type)", "def getTuple(n,type=\"R\",thing = \"T\"):\r\n if type == \"R\":\r\n n=n+\".root\"\r\n print \"getting file \"+n\r\n \r\n file=TFile(n)\r\n t=file.Get(thing)\r\n if type==\"X\":\r\n translate(n)\r\n t,file=getTuple(n,\"R\")\r\n return t,file", "def get_content(self):\n filestream = StringIO()\n\n tableName, primKey = self.provider._split_path(self.path)\n if primKey is not None:\n conn = self.provider._init_connection()\n listFields = self.provider._get_field_list(conn, tableName)\n csvwriter = csv.DictWriter(filestream, listFields, extrasaction=\"ignore\")\n dictFields = {}\n for field_name in listFields:\n dictFields[field_name] = field_name\n csvwriter.writerow(dictFields)\n\n if primKey == \"_ENTIRE_CONTENTS\":\n cursor = conn.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"SELECT * from \" + self.provider._db + \".\" + tableName)\n result_set = cursor.fetchall()\n for row in result_set:\n csvwriter.writerow(row)\n cursor.close()\n else:\n row = self.provider._get_record_by_primary_key(conn, tableName, primKey)\n if row is not None:\n csvwriter.writerow(row)\n conn.close()\n\n # this suffices for small dbs, but\n # for a production big database, I imagine you would have a FileMixin that\n # does the retrieving and population even as the file object is being read\n filestream.seek(0)\n return filestream", "def open_input(self, fn):\n\treturn (None, None)", "def enumerate_files(self, table):\n for i in range(self.nrofrecords()):\n data = self.bank.readrec(i + 1)\n if data and data[0] == table.tableid:\n yield i + 1, data[1:]", "def data(self) -> Tuple[List[str], List[List[str]]]:\n format = self.format\n # Check if the file contains header information. Initialize the header\n # with the optional names of columns in the format descriptor.\n has_header = format.get('header', True)\n columns = format.get('columns')\n rows = list()\n # Delimiter depends on the file format.\n delim = '\\t' if format['type'] == 'tsv' else ','\n f = codecs.iterdecode(self.load().open(), 'utf-8')\n for row in csv.reader(f, delimiter=delim):\n if has_header:\n # Set the has_header flag to False so that all following records\n # are added to the list of rows.\n has_header = False\n columns = row if columns is None else columns\n else:\n rows.append(row)\n columns = [None] * len(rows[0]) if not columns and rows else columns\n return (columns, rows)", "def _get_file_helper(self):\n page = self.course.moodle.fetch(\n self._download_url % self.id,\n None\n )\n # The resource URL should magically 303 across to the actual file\n if page.history and page.history[0].status_code == 303:\n return page, page.content\n\n # If it doesn't 303 to the actual file then there might be a download\n # link to try\n bs = bs4.BeautifulSoup(page.text, 'lxml')\n\n div = bs.find('div', class_='resourceworkaround')\n\n if div: # it's a link to the resource\n link = div.find('a').href\n\n page = self.course.moodle.fetch(\n link,\n None\n )\n return page, page.content\n\n # Perhaps it's an embedded object\n obj = bs.find('object', id='resourceobject')\n if obj:\n link = obj['data']\n\n page = self.course.moodle.fetch(\n link,\n None\n )\n return page, page.content\n\n raise ValueError(\"No idea how to get that resource\")", "def _open_read(self, existing_file_obj=None):\n # Use explicit file handle, if given.\n if existing_file_obj is not None:\n yield existing_file_obj\n\n # Use a file handle being actively used for writes, if available.\n # There is some danger to doing this because reads will seek the\n # file. However, revlog._writeentry performs a SEEK_END before all\n # writes, so we should be safe.\n elif self.writing_handle:\n yield self.writing_handle\n\n elif self.reading_handle:\n yield self.reading_handle\n\n # Otherwise open a new file handle.\n else:\n with self._open() as fp:\n yield fp", "def get_cursor(file_name):\n con = lite.connect(file_name)\n con.row_factory = lite.Row\n return con.cursor()", "def iterRegularFileContents(self):\n unpack = {}\n for (oldFileId, newFileId), stream in self.files.iteritems():\n if not files.frozenFileHasContents(stream):\n continue\n if files.frozenFileFlags(stream).isEncapsulatedContent():\n continue\n cont = files.frozenFileContentInfo(stream)\n unpack[newFileId] = cont.sha1()\n\n want_tag = '0 ' + ChangedFileTypes.file[4:]\n while True:\n f = self._nextFile()\n if not f:\n break\n name, tag, fobj, csf = f\n if len(name) != 36 or tag != want_tag:\n continue\n fileId = name[16:]\n sha1 = unpack.get(fileId)\n if not sha1:\n continue\n yield sha1, fobj", "def get_content(self, file_path: str) -> Tuple[defaultdict, str]:\n if not os.path.exists(file_path): # If file doesn't exist\n raise FileNotFoundError(f\"{fg(1)} Could not find file: {file_path}{fg(15)}\\n\")\n html = self.__html__(file_path)\n metadata = self.__metadata__()\n return metadata, html", "def to_workbook(self) -> tuple:\n\n # Initialize the bytestream\n f = io.BytesIO()\n wb = xw.Workbook(f, {\"in_memory\": True})\n\n # Open a workbook\n self._book = wb\n self._book.set_properties({\"category\": \"atomica:databook\"})\n self._formats = standard_formats(self._book)\n self._references = {} # Reset the references dict\n\n # Write the contents\n self._write_pops()\n self._write_tdve()\n self._write_interpops()\n self._write_transfers()\n\n # Clean internal variables related to writing the worbkook\n self._book = None\n self._formats = None\n self._references = None\n\n return f, wb", "async def __getinfo(self) -> tuple:\n\n async with self.session.get(\n self.url,\n allow_redirects=True\n\n ) as response:\n # print(response)\n\n # Use redirected URL\n self.url = str(response.url)\n try:\n content_disposition = cgi.parse_header(\n response.headers['Content-Disposition'])\n filename = content_disposition[1]['filename']\n filename = urllib.parse.unquote_plus(filename)\n except KeyError:\n filename = response._real_url.name\n\n if not filename:\n guessed_extension = mimetypes.guess_extension(\n response.headers[\"Content-Type\"].split(\";\")[0])\n filename = f\"{gen_uuid(size=5)}{guessed_extension}\"\n\n try:\n size = int(response.headers['Content-Length'])\n except KeyError:\n size = 0\n return (\n filename,\n size,\n response.headers['Content-Type'],\n response._real_url\n )", "def _filehandle(self):\n if not self._fh or self._is_closed():\n filename = self._rotated_logfile or self.filename\n if filename.endswith('.gz'):\n self._fh = gzip.open(filename, 'r')\n else:\n self._fh = open(filename, \"r\", 1)\n self._fh.seek(self._offset)\n\n return self._fh", "def _get_db_connection():\n conn = sqlite3.connect(str(DB_FILE_PATH))\n c = conn.cursor()\n\n return conn, c", "def get_tpm_blobs(\n self, path: Union[bytes, str]\n ) -> Tuple[TPM2B_PUBLIC, TPM2B_PRIVATE, str]:\n path = _to_bytes_or_null(path)\n tpm_2b_public = ffi.new(\"uint8_t **\")\n tpm_2b_public_size = ffi.new(\"size_t *\")\n tpm_2b_private = ffi.new(\"uint8_t **\")\n tpm_2b_private_size = ffi.new(\"size_t *\")\n policy = ffi.new(\"char **\")\n ret = lib.Fapi_GetTpmBlobs(\n self._ctx,\n path,\n tpm_2b_public,\n tpm_2b_public_size,\n tpm_2b_private,\n tpm_2b_private_size,\n policy,\n )\n _chkrc(ret)\n\n policy_str = ffi.string(policy[0]).decode(self.encoding)\n\n tpm_2b_public_buffer = bytes(\n ffi.buffer(tpm_2b_public[0], tpm_2b_public_size[0])\n )\n tpm_2b_public_unmarsh, _ = TPM2B_PUBLIC.unmarshal(tpm_2b_public_buffer)\n\n tpm_2b_private_buffer = bytes(\n ffi.buffer(tpm_2b_private[0], tpm_2b_private_size[0])\n )\n tpm_2b_private_unmarsh, _ = TPM2B_PRIVATE.unmarshal(tpm_2b_private_buffer)\n\n return (\n tpm_2b_public_unmarsh,\n tpm_2b_private_unmarsh,\n policy_str,\n )", "def _hash_file(self, file_entry):\n if file_entry is None:\n return None\n\n if file_entry.IsDevice() or file_entry.IsPipe() or file_entry.IsSocket():\n # Ignore devices, FIFOs/pipes and sockets.\n return None\n\n hash_context = hashlib.sha256()\n\n try:\n file_object = file_entry.GetFileObject()\n except IOError as exception:\n logging.warning((\n 'Unable to open path specification:\\n{0:s}'\n 'with error: {1!s}').format(file_entry.path_spec.location, exception))\n return None\n\n if not file_object:\n return None\n\n try:\n data = file_object.read(self._READ_BUFFER_SIZE)\n while data:\n hash_context.update(data)\n data = file_object.read(self._READ_BUFFER_SIZE)\n except IOError as exception:\n logging.warning((\n 'Unable to read from path specification:\\n{0:s}'\n 'with error: {1!s}').format(file_entry.path_spec.location, exception))\n return None\n\n return hash_context.hexdigest()", "def load_biom_table(table_f):\n return parse_biom_table(table_f)", "def open_image_and_meta(image_bytes):\n with MemoryFile(image_bytes) as memfile:\n with memfile.open() as src:\n meta = src.meta\n arr = reshape_as_image(src.read())\n return arr, meta", "def _open_sql_file(dbname):\n try:\n dbpath = pathlib.Path(dbname).resolve()\n conn = sqlite3.connect(f\"{dbpath.as_uri()}?mode=ro\", timeout=1, uri=True)\n c = conn.cursor()\n except sqlite3.Error as e:\n sys.exit(f\"An error occurred opening sqlite file: {e.args[0]} {dbname}\")\n return (conn, c)", "def _read_lengths(self):\n\n stream = self.stream\n stream.seek(0)\n\n ###########\n # Read and set table lengths\n self.table_lengths = [None]*len(tables)\n\n (self.entire_file_length,\n header_length,\n self.smallest_character_code,\n self.largest_character_code) = repeat_call(stream.read_unsigned_byte2, 4)\n\n header_data_length_min = 18 # words\n self.table_lengths[tables.header] = max(\n header_data_length_min, header_length)\n\n self.number_of_chars = self.largest_character_code - \\\n self.smallest_character_code + 1\n self.table_lengths[tables.character_info] = self.number_of_chars\n\n # read the last lengths\n for i in range(tables.width, len(tables)):\n self.table_lengths[i] = stream.read_unsigned_byte2()\n\n ###########\n # Compute table pointers\n self.table_pointers = [None]*len(tables)\n\n # The header starts at 24 bytes\n self.table_pointers[tables.header] = 24\n\n for table in range(tables.header, tables.font_parameter):\n self.table_pointers[\n table+1] = self._position_in_table(table, self.table_lengths[table])\n\n ###########\n # Sanity check\n length = self._position_in_table(\n tables.font_parameter, self.table_lengths[tables.font_parameter])\n if length != self.word_ptr(0, self.entire_file_length):\n raise NameError('Bad TFM file')", "def read_opsim_db(opsim_db_path='/global/projecta/projectdirs/lsst/groups/SSim/DC2/minion_1016_desc_dithered_v4.db'):\n conn = sqlite3.connect(opsim_db_path)\n # Check the table names\n cursor = conn.cursor()\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n table_names = cursor.fetchall()\n print(table_names)\n # Turn table into Pandas df\n obs_history = pd.read_sql(sql=\"SELECT * from ObsHistory\", con=conn, index_col=None)\n field = pd.read_sql(sql=\"SELECT * from Field\", con=conn, index_col=None)\n return obs_history, field", "def info(self, fp):\n keys = (\n (\"cas.meta.compression\", CAS._convert_meta),\n (\"cas.meta.lib\", CAS._convert_meta),\n (\"cas.meta.fp_algo\", CAS._convert_meta),\n (\"cas.meta.orig_size\", CAS._convert_meta),\n (\"cas.refcount\", CAS._convert_refcount),\n )\n\n return {key: conv(self.ioctx.get_xattr(fp, key))\n for key, conv in keys}", "def read(self):\n self.connect()\n get_books = f\"select * from {self.book_table}\"\n try:\n self.cur.execute(get_books)\n self.con.commit()\n for i in self.cur:\n yield i\n except MySQLError as err:\n messagebox.showinfo(\"Failed to fetch files from database\")\n print(err)", "def fileobject_to_dict(fo):\n if fo.allocated():\n # proc = subprocess.Popen(['./extract_strings', fo.inode()], stdout=subprocess.PIPE)\n # contents = proc.stdout.read()\n return {\n 'atime_dt': epoch_to_dt(fo.atime()),\n 'compressed_b': fo.compressed(),\n 'contents_t': string.translate(fo.contents(), filter),\n 'contents_display': string.translate(fo.contents(), filter),\n 'crtime_dt': epoch_to_dt(fo.crtime()),\n 'ctime_dt': epoch_to_dt(fo.ctime()),\n 'dtime_dt': epoch_to_dt(fo.dtime()),\n 'encrypted_b': fo.encrypted(),\n 'extension_facet': fo.ext(),\n 'fileid_i': int(fo._tags['id']),\n 'filename_display': fo.filename(),\n 'filename_t': fo.filename(),\n 'filesize_l': long(fo.filesize()),\n 'fragments_i': int(fo.fragments()),\n 'gid_i': int(fo._tags['gid']),\n #'id': uuid.uuid4(),\n 'id': hashlib.sha1(os.path.basename(IMAGE) + '_' + fo.inode()).hexdigest(),\n #'imagefile': fo._tags['imagefile'],\n 'inode_i': int(fo.inode()),\n 'libmagic_display': fo.libmagic(),\n 'libmagic_facet': fo.libmagic(),\n 'md5_s': fo.md5(),\n 'meta_type_i': fo._tags['meta_type'],\n 'mode_facet': int(fo._tags['mode']),\n 'mode_i': int(fo._tags['mode']),\n 'mtime_dt': epoch_to_dt(fo.mtime()),\n 'nlink_i': fo._tags['nlink'],\n 'name_type_s': fo.name_type(),\n 'partition_i': int(fo.partition()),\n 'sha1_s': fo.sha1(),\n 'uid_i': int(fo._tags['uid']),\n 'volume_display': IMAGE,\n 'volume_facet': os.path.basename(IMAGE)\n }\n else:\n return None", "def get_two_stock_data():\n ticker1, ticker2 = 'INTC', 'AAPL'\n\n file1 = cbook.get_sample_data('INTC.dat.gz')\n file2 = cbook.get_sample_data('AAPL.dat.gz')\n M1 = fromstring(file1.read(), '<d')\n\n M1 = resize(M1, (M1.shape[0]//2, 2))\n\n M2 = fromstring(file2.read(), '<d')\n M2 = resize(M2, (M2.shape[0]//2, 2))\n\n d1, p1 = M1[:, 0], M1[:, 1]\n d2, p2 = M2[:, 0], M2[:, 1]\n return (d1, p1, d2, p2)", "def read(self):\n return self.frame, self.boundary_lines", "def getTiffAsMatrix(kv):\n file_name = kv[0].split('/')[-1]\n fbinary = kv[1]\n tiffmat = getOrthoTif(fbinary)\n return (file_name,tiffmat)", "def _getConnection():\n db = sqlite.connect(db_path)\n db.row_factory = sqlite.Row\n cursor = db.cursor()\n return cursor, db", "def get_connection_info(otu_table_fp, num_meta, meta_dict):\r\n con_by_sample = defaultdict(set)\r\n node_file = []\r\n edge_file = []\r\n red_nodes = defaultdict(int)\r\n red_node_file = []\r\n red_edge_file = []\r\n multi = defaultdict(list)\r\n edge_from = []\r\n to = []\r\n otu_dc = defaultdict(int)\r\n degree_counts = defaultdict(int)\r\n sample_dc = defaultdict(int)\r\n sample_num_seq = defaultdict(int)\r\n con_list = []\r\n\r\n otu_table = parse_biom_table(open(otu_table_fp, 'U'))\r\n\r\n # if lineages == []:\r\n # is_con = False\r\n # else:\r\n # is_con = True\r\n\r\n is_con = False\r\n # This could be moved to OTU table sub-class\r\n if (otu_table.ObservationMetadata is not None and\r\n 'taxonomy' in otu_table.ObservationMetadata[0]):\r\n is_con = True\r\n\r\n for (otu_values, otu_id, otu_metadata) in otu_table.iterObservations():\r\n # for idx,l in enumerate(otu_table):\r\n # data = l\r\n\r\n #to_otu = otu_ids[idx]\r\n con = ''\r\n if is_con:\r\n #con = ':'.join(lineages[idx][:6])\r\n con = ':'.join(otu_metadata['taxonomy'][:6])\r\n con = con.replace(\" \", \"_\")\r\n con = con.replace(\"\\t\", \"_\")\r\n # Not required: otu_values (data) is always numpy vector\r\n #counts = map(float,data)\r\n if con not in con_list:\r\n con_list.append(con)\r\n #non_zero_counts = nonzero(counts)[0]\r\n non_zero_counts = otu_values.nonzero()[0]\r\n degree = len(non_zero_counts)\r\n weighted_degree = sum(otu_values)\r\n# node_file_line = [to_otu,'','otu_node',str(degree),\\\r\n# str(weighted_degree),con]\r\n node_file_line = [otu_id, '', 'otu_node', str(degree),\r\n str(weighted_degree), con]\r\n node_file_line.extend(['otu'] * num_meta)\r\n node_file.append('\\t'.join(node_file_line))\r\n\r\n if len(non_zero_counts) != 1:\r\n red_node_file.append('\\t'.join(node_file_line))\r\n\r\n otu_dc[degree] += 1\r\n degree_counts[degree] += 1\r\n #samples = [sample_ids[i] for i in non_zero_counts]\r\n samples = [otu_table.SampleIds[i] for i in non_zero_counts]\r\n for i, s in enumerate(samples):\r\n if s not in meta_dict.keys():\r\n continue\r\n con_by_sample[s].update(samples[0:i])\r\n con_by_sample[s].update(samples[i + 1:])\r\n #sample_num_seq[s] += float(data[non_zero_counts[i]])\r\n sample_num_seq[s] += float(otu_values[non_zero_counts[i]])\r\n\r\n edge_from.append(s)\r\n # to.append(to_otu)\r\n to.append(otu_id)\r\n meta = meta_dict[s]\r\n meta[1] += 1\r\n #data_num = str(data[non_zero_counts[i]])\r\n data_num = str(otu_values[non_zero_counts[i]])\r\n # edge_file.append('\\t'.join([s, to_otu, \\\r\n # data_num, con, meta[0]]))\r\n edge_file.append('\\t'.join([s, otu_id,\r\n data_num, con, meta[0]]))\r\n #multi[to_otu].append((s,float(data[non_zero_counts[i]]), meta[0]))\r\n multi[otu_id].append(\r\n (s, float(otu_values[non_zero_counts[i]]), meta[0]))\r\n if len(non_zero_counts) == 1:\r\n #red_nodes[(sample_ids[non_zero_counts[0]],meta[0])] += degree\r\n red_nodes[(\r\n otu_table.SampleIds[non_zero_counts[0]],\r\n meta[0])] += degree\r\n else:\r\n # red_edge_file.append('\\t'.join([s, to_otu, \\\r\n # data_num, con, meta[0]]))\r\n red_edge_file.append('\\t'.join([s, otu_id,\r\n data_num, con, meta[0]]))\r\n\r\n num_otu_nodes = len(node_file)\r\n for s in meta_dict:\r\n meta = meta_dict[s]\r\n degree = meta[1]\r\n sample_dc[degree] += 1\r\n degree_counts[degree] += 1\r\n weighted_degree = sample_num_seq[s]\r\n node_file_line = '\\t'.join([s, s, 'user_node', str(meta[1]),\r\n str(weighted_degree), 'other', meta[0]])\r\n node_file.append(node_file_line)\r\n red_node_file.append(node_file_line)\r\n\r\n for n, d in red_nodes.items():\r\n red_node_file_line = ['@' + n[0], '',\r\n 'otu_collapsed', str(d), str(float(d)), 'other']\r\n red_node_file_line.extend(['otu'] * num_meta)\r\n red_node_file.append('\\t'.join(red_node_file_line))\r\n red_edge_file.append(\r\n '\\t'.join([n[0], '@' + n[0], \"1.0\", \"missed\", n[1]]))\r\n\r\n return con_by_sample, node_file, edge_file, red_node_file,\\\r\n red_edge_file, otu_dc, degree_counts, sample_dc", "def check_file(f, open_close_pairs):\n\n opens = {}\n closes = {}\n open_for = {}\n for open_symbol, close_symbol in open_close_pairs:\n opens[open_symbol] = []\n open_for[close_symbol] = opens[open_symbol]\n closes[close_symbol] = []\n\n line_number = 0\n for line in f:\n line_number += 1\n column = 0\n for sym in line:\n column += 1\n if sym in opens and open_for.get(sym, None) == opens[sym]:\n if len(opens[sym]) == 0:\n opens[sym].append((line_number, column))\n else:\n opens[sym].pop()\n elif sym in opens:\n opens[sym].append((line_number, column))\n elif sym in closes:\n if len(open_for[sym]) > 0:\n open_for[sym].pop()\n else:\n closes[sym].append((line_number, column))\n\n return { (o, c) : (opens[o], closes[c]) for o, c in open_close_pairs }", "def parse(self):\n # type: () -> Tuple[LineNo, int, LineNo, int]\n metadata = self.safely_parse_metadata()\n if len(metadata) > 2:\n raise UnsupportedCombinedDiff(self.text)\n assert len(metadata) == 2\n return tuple(flatten(metadata)) # type: ignore[return-value]", "def to_tuple(self) -> tuple:\n return (self.transaction_number, self.date, self.description, self.memo, self.amount_debit,\n self.amount_credit, self.balance, self.check_number, self.fees, self.card_type,\n self.is_payment, self.is_transaction, self.user_id)", "def get_reader(fname):\n\n if fname == \"-\":\n fh = sys.stdin\n else:\n fh = open(fname, \"r\")\n \n rdr = csv.reader(fh, dialect=\"psv\")\n return (rdr, fh)", "def _read(self, item):\n return read_hdf5(self.file_name, title=self._get_h5_path(item))", "def snapshot(self):\n return (self.block_header.state_root, self.chaindb.snapshot())", "def read_file(self, filename, force_decompress=False):\n\n def decompress(data):\n \"\"\"Read the compression type and decompress file data.\"\"\"\n compression_type = ord(data[0:1])\n if compression_type == 0:\n return data\n elif compression_type == 2:\n return zlib.decompress(data[1:], 15)\n elif compression_type == 16:\n return bz2.decompress(data[1:])\n else:\n msg = \"Unsupported compression type: {}\".format(compression_type)\n raise RuntimeError(msg)\n\n hash_entry = self.get_hash_table_entry(filename)\n if hash_entry is None:\n return None\n block_entry = self.block_table[hash_entry.block_table_index]\n\n # Read the block.\n if block_entry.flags & MPQ_FILE_EXISTS:\n if block_entry.archived_size == 0:\n return None\n\n offset = block_entry.offset + self.header['offset']\n self.file.seek(offset)\n file_data = self.file.read(block_entry.archived_size)\n\n if block_entry.flags & MPQ_FILE_ENCRYPTED:\n raise NotImplementedError(\"Encryption is not supported yet.\")\n\n if not block_entry.flags & MPQ_FILE_SINGLE_UNIT:\n # File consists of many sectors. They all need to be\n # decompressed separately and united.\n sector_size = 512 << self.header['sector_size_shift']\n sectors = block_entry.size // sector_size + 1\n if block_entry.flags & MPQ_FILE_SECTOR_CRC:\n crc = True\n sectors += 1\n else:\n crc = False\n positions = struct.unpack('<%dI' % (sectors + 1),\n file_data[:4*(sectors+1)])\n result = BytesIO()\n sector_bytes_left = block_entry.size\n for i in range(len(positions) - (2 if crc else 1)):\n sector = file_data[positions[i]:positions[i+1]]\n if (block_entry.flags & MPQ_FILE_COMPRESS and\n (force_decompress or sector_bytes_left > len(sector))):\n sector = decompress(sector)\n\n sector_bytes_left -= len(sector)\n result.write(sector)\n file_data = result.getvalue()\n else:\n # Single unit files only need to be decompressed, but\n # compression only happens when at least one byte is gained.\n if (block_entry.flags & MPQ_FILE_COMPRESS and\n (force_decompress or block_entry.size > block_entry.archived_size)):\n file_data = decompress(file_data)\n\n return file_data", "def file_loc(self):\n\t\treturn self.__dbfile", "def openmat(filename, field = None):\n tmp = []\n \n try:\n \n data = loadmat(filename)\n \n if isinstance(field,(str, np.str)):\n tmp = data[field]\n elif isinstance(field, (tuple, list)):\n \n for key in data.keys():\n if '__header__' in key:\n continue\n if '__version__' in key:\n continue\n if '__globals__' in key:\n continue\n \n if key in field: \n tmp.append(data[key])\n else:\n \n for key in data.keys():\n if '__header__' in key:\n continue\n if '__version__' in key:\n continue\n if '__globals__' in key:\n continue\n \n tmp.append(data[key]) \n except:\n \n read_file = openmat_h5_(filename, field)\n \n tmp = read_file.parse()\n \n return tmp", "def close_file_pair(pair):\n try:\n yield pair\n finally:\n pair[0].close()\n pair[1].close()", "def DRI(fp):\n info = {\n 'Lr' : unpack('>H', fp.read(2))[0],\n 'Ri' : unpack('>H', fp.read(2))[0]\n }\n\n return info", "def hdf5_file_opened(self):\n should_close = self._hdf5_file is None\n yield self.hdf5_file\n if should_close:\n self.close_and_delete_hdf5_handle()", "def readbytes(self, *args) -> \"PyObject *\":\n return _ida_fpro.qfile_t_readbytes(self, *args)", "def get_file_object(self):\n try:\n # FieldFile.open() and File.open() don't return file objects, so\n # accessing it directly\n return self.datafile.file.file # FileStoreItem.FieldFile.File.file\n except ValueError as exc:\n logger.error(\"Error opening %s: %s\", self.datafile, exc)\n return None", "def get_pickle(get_file='simbad_mag_errors.pkl'):\n print \"Opening %s and unpickling the DataFrame..\" % get_file\n with open(get_file, 'r') as opened_file:\n df_unpickled = cPickle.load(opened_file)\n print \"..Done\"\n return df_unpickled", "def get_content(file1, file2):\n with open(file1, 'r') as f1, open(file2, 'r') as f2:\n return [line for line in f1], [line for line in f2]", "def open_and_read_file(file_path1, file_path2):\n\n # your code goes here\n file_object1 = open(file_path1)\n file_object2 = open(file_path2)\n contents = file_object1.read() + \" \" + file_object2.read()\n file_object1.close()\n file_object2.close()\n\n return contents", "def _open_hdf5(self, file_path):\n\n if (file_path not in self._file_handles or\n not self._file_handles[file_path].is_open):\n self._file_handles[file_path] = pd.HDFStore(file_path, 'r')\n\n return self._file_handles[file_path]", "def read_reduced():\n ### read in the reduced data\n reduced_data = parse(join(FILE_PATH, \"lm_sm_aggz.gctx\"))\n\n ### read in the signature info and set the index to the signature id for easy indexing in the next step\n sig_info = pd.read_csv(join(FILE_PATH, \"GSE92742_Broad_LINCS_sig_info.txt\"), sep=\"\\t\")\n sig_info.index = sig_info['sig_id']\n\n ### map the columns to the pert_id that generated the signature to allow for comparison in spark\n reduced_data.data_df.columns = sig_info.loc[pd.Index(reduced_data.data_df.columns)]['pert_id']\n ### return data_frame with pert_ids in row_major form ready for scala\n return reduced_data.data_df.transpose()", "def _get_parsed_files(self):\n\n parsed = []\n with Historical_ROAs_Parsed_Table() as t:\n for row in t.execute(f'SELECT * FROM {t.name}'):\n parsed.append(row['file'])\n return parsed", "def open_with_size(filename, *args):\n\n f = open_(filename, *args)\n if isinstance(f, gzip.GzipFile):\n fo = open(f.name, 'rb')\n fo.seek(-4, 2)\n r = fo.read()\n fo.close()\n return f, struct.unpack('<I', r)[0]\n else:\n f.seek(0, os.SEEK_END)\n buflen = f.tell()\n f.seek(4, os.SEEK_SET)\n return f, buflen", "def connect(sqlite_file):\n conn = sqlite3.connect(sqlite_file)\n c = conn.cursor()\n return conn, c", "def __openRow(fd):\n\n print(\" <tr>\", file=fd)", "def tabdes(filename, body):\n # XXX checksums ignored\n head = Struct(\"!BiHBxxxB\")\n body = Struct(body)\n # foot = Struct(\"!4s\")\n\n data = []\n with open(filename, \"rb\") as f:\n buffer = f.read()\n _, _, count, length, _ = head.unpack_from(buffer, 0)\n offset = head.size\n for i in range(count):\n row = body.unpack_from(buffer, offset)\n data.append(row)\n offset += body.size\n else:\n print(\"read %d rows\" % len(data))\n # offset = 2 ** 16 - foot.size\n # _, foot.unpack_from(buffer, offset))\n return data", "def mem_get(bytestring, compressed=True):\n ff = mem_open(bytestring, compressed)\n if ff is None:\n return (None, None)\n hdr = ff[0].header\n data = ff[0].data\n ff.close()\n return (hdr, data)", "def loadDb(self,dbContent,base):\n \n db = utilities.db2bitarray(dbContent,self.dbSize,1,1,base)\n db = np.array(db).reshape(self.dbSize,-1)\n # track the size of each file\n fileSize = db.shape[-1]\n \n return (db,fileSize)", "def open_file_in_dir(path: str) -> Tuple[io.FileIO, int]:\n directory = os.path.dirname(path)\n if not os.path.isdir(directory):\n raise ValueError('No directory {}'.format(directory))\n\n if not os.path.exists(path):\n file_fd = open(path, mode='x+b', buffering=0)\n else:\n file_fd = open(path, mode='r+b', buffering=0)\n\n dir_fd = os.open(directory, os.O_RDONLY)\n\n return file_fd, dir_fd", "def ffi_pickle_contents(self) -> Tuple[int, float, float, float,\n np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n tic_id = 231663901\n ra = 62.2\n dec = -71.4\n tess_magnitude = 10\n time = np.arange(0, 100, 10)\n raw_flux = np.arange(10)\n corrected_flux = np.arange(10, 20)\n pca_flux = np.arange(20, 30)\n flux_error = np.arange(0, 1, 0.1)\n return tic_id, ra, dec, tess_magnitude, time, raw_flux, corrected_flux, pca_flux, flux_error", "def read_tuple(self):\n cmd = self.read_command()\n return StormTuple(\n cmd['id'], cmd['comp'], cmd['stream'], cmd['task'], cmd['tuple'])", "def read_file(self):\n self.write({\"datas\": self.choose_file})\n self._cr.commit()\n import_file = BytesIO(base64.decodestring(self.datas))\n file_read = StringIO(import_file.read().decode())\n reader = csv.DictReader(file_read, delimiter=\",\")\n return reader", "def _getH5File(db):\n if isinstance(db, Database3):\n return db.h5db\n else:\n raise TypeError(\"Unsupported Database type ({})!\".format(type(db)))", "def getRef(self):\n rnam = self.rnam\n if not rnam or rnam.data == chr(255)*4: return None\n if rnam.size != 4: raise Tes3Error(self.inName,_('SCPT.RNAM'),rnam.size,4,True)\n iMod = struct.unpack('3xB',rnam.data)[0]\n iObj = struct.unpack('i',rnam.data[:3]+'\\x00')[0]\n return (iMod,iObj)", "def parseIntoDB(self, filehandle, cursor, alignTab, sequenceTab=None,\n update=None):\n c = filehandle.tell()\n filehandle.seek(0, 2)\n filesize = filehandle.tell()\n filehandle.seek(c)\n l = filehandle.readline()\n rc = 0\n count = 0\n if l.split()[0] != '##maf':\n return\n else:\n self.setpar(l.split()[1:])\n\n l=filehandle.readline()\n while l:\n la = l.split()\n## print la\n if(len(la)==0 or la[0]=='#'):\n## print \"skipping\"\n 1\n elif(la[0]=='a'):\n## print \"reading alignment\"\n count+=1\n self.readalign(la[1:], filehandle)\n self._dump(alignTab, sequenceTab)\n if(update and not count % 1000):\n cursor.execute(update % (int(filehandle.tell() * 100.\n / filesize)))\n else:\n## print \"end of records\"\n return\n l=filehandle.readline()", "def info(dump_alloc_table: bytes, /) -> None:", "def readfile(file, sub_im, cr):\n\n root, ext = os.path.splitext(file)\n\n if ext == '.tif':\n print('Reading tiff image:', file)\n par = readpar(root + '.mli.par')\n data = readtiff(file, sub_im, cr)\n\n else: # must be GAMMA flat binary float format\n print('Reading flat binary image', file)\n par = readpar(root + ext + '.par')\n data = readmli(file, par, sub_im, cr)\n\n # extract relevant metadata\n rho_r = float(par['range_pixel_spacing'].split()[0])\n rho_a = float(par['azimuth_pixel_spacing'].split()[0])\n theta = float(par['incidence_angle'].split()[0])\n\n return data, rho_r, rho_a, theta", "def _get_table_info(self):\n highestbet = self.highestBetNotFold(),\n bigb =self.bigBlind() if self._game_state == GAME_STATE_PRE_FLOP and not self.inSmallBlindPosition() else 0\n return [\"blinds: small:%r big:%r\" % (self.small_blind, self.big_blind),\n \"buy_ins: min:%r max:%r\" % (self.min_buy_in, self.max_buy_in),\n \"bs: %r\" % self.betting_structure,\n \"highestbet = %r\" % highestbet,\n \"bigb = %r\" % bigb,]", "def get_mediafile_blob_data(self, old):\n if old[\"is_directory\"]:\n return None\n\n try:\n db_mediafile = Mediafile.objects.get(pk=old[\"id\"])\n except Mediafile.DoesNotExist:\n return None\n filename = db_mediafile.original_filename\n\n if use_mediafile_database:\n with connections[\"mediafiles\"].cursor() as cursor:\n cursor.execute(\n f\"SELECT data FROM {mediafile_database_tablename} WHERE id = %s\",\n [old[\"id\"]],\n )\n row = cursor.fetchone()\n if row is None:\n return None\n data = row[0]\n else:\n data = db_mediafile.mediafile.open().read()\n\n blob = base64.b64encode(data).decode(\"utf-8\")\n return filename, len(data), blob", "def read_syn_data():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n folder = pjoin(dipy_home, 'syn_test')\n t1_name = pjoin(folder, 't1.nii.gz')\n b0_name = pjoin(folder, 'b0.nii.gz')\n\n md5_dict = {'t1': '701bda02bb769655c7d4a9b1df2b73a6',\n 'b0': 'e4b741f0c77b6039e67abb2885c97a78'}\n\n check_md5(t1_name, md5_dict['t1'])\n check_md5(b0_name, md5_dict['b0'])\n\n t1 = nib.load(t1_name)\n b0 = nib.load(b0_name)\n return t1, b0", "def retrieve_secondary_file(self, src: str) -> tuple:\n if src.find(\"http://\") != -1:\n # location is on other server\n uri, file = HttpClient.get_remote_uri_and_filename(src)\n print(\"[CONNECTION] starting new client to retrieve an image\")\n new_client = HttpClient()\n new_client.main()\n\n http_command = new_client.create_secondary_http_command(file)\n loc = HttpClient.create_file_location(file)\n new_client.send(http_command)\n recv_raw = new_client.recv_all_data()\n new_client.disconnect()\n else:\n # location is on same server\n\n http_command = self.create_secondary_http_command(src)\n loc = HttpClient.create_file_location(src)\n self.send(http_command)\n recv_raw = self.recv_all_data()\n\n return loc, recv_raw", "def read(self, with_holdout_split: bool = False) -> Union['DataFrame', Tuple['DataFrame', 'DataFrame']]:\n pass", "def __get_file_chunk(self, buf=1000):\n data = self.file.read(buf)\n return data, len(data)", "def get_block(handle, offset=0):\n\n if isinstance(handle, bamnostic.core.AlignmentFile):\n handle = handle._handle\n with open(handle.name, 'rb') as header_handle:\n header_handle.seek(offset) # get to the start of the BGZF block\n\n # Capture raw bytes of metadata header\n _, meta_raw = _bgzf_metaheader(header_handle)\n\n BSIZE_raw = header_handle.read(2)\n BSIZE = struct.unpack('<H', BSIZE_raw)[0]\n\n # capture the CRC32 and ISIZE fields in addition to compressed data\n # 6 = XLEN, 19 = spec offset, 8 = CRC32 & ISIZE -> -5\n block_tail = header_handle.read(BSIZE - 5)\n return meta_raw + BSIZE_raw + block_tail", "def _genome_info_tuple(self, name, size=False):\n accession = self.assembly_accession(name)\n taxid = self.genome_taxid(name)\n annotations = bool(self.annotation_links(name))\n species = self.genomes[name].get(\"scientific_name\")\n other = self.genomes[name].get(\"genebuild\")\n if size:\n length = self.genomes[name][\"base_count\"]\n return name, accession, taxid, annotations, species, length, other\n return name, accession, taxid, annotations, species, other", "def request_file(self, path: str, token: str) -> Tuple[IO[bytes], dict]:\n response = self.request('get', path, token, stream=True)\n stream = ReadWrapper(response.iter_content,\n int(response.headers['Content-Length']))\n return stream, response.headers", "def read_table(self, table_type):\n\n if table_type == 'hash':\n entry_class = MPQHashTableEntry\n elif table_type == 'block':\n entry_class = MPQBlockTableEntry\n else:\n raise ValueError(\"Invalid table type.\")\n\n table_offset = self.header['%s_table_offset' % table_type]\n table_entries = self.header['%s_table_entries' % table_type]\n key = self._hash('(%s table)' % table_type, 'TABLE')\n\n self.file.seek(table_offset + self.header['offset'])\n data = self.file.read(table_entries * 16)\n data = self._decrypt(data, key)\n\n def unpack_entry(position):\n entry_data = data[position*16:position*16+16]\n return entry_class._make(\n struct.unpack(entry_class.struct_format, entry_data))\n\n return [unpack_entry(i) for i in range(table_entries)]", "def readSeq(seqFile):\n line = seqFile.readline()\n seq1 = line.rstrip()\n line = seqFile.readline()\n seq2 = line.rstrip()\n return (seq1, seq2)" ]
[ "0.5903592", "0.5517948", "0.5293327", "0.5092299", "0.5019564", "0.5009954", "0.49709004", "0.49704278", "0.49154168", "0.49107695", "0.48602694", "0.4815139", "0.47660074", "0.4726421", "0.4725368", "0.4721951", "0.47156245", "0.47033814", "0.46824378", "0.46797317", "0.4666152", "0.4654891", "0.46443024", "0.4623548", "0.46179524", "0.46070755", "0.46045774", "0.45729685", "0.4558212", "0.455664", "0.45498285", "0.45189953", "0.45088553", "0.44890502", "0.44885445", "0.44670472", "0.4463631", "0.44449207", "0.4441873", "0.44362852", "0.44299856", "0.44278124", "0.44230026", "0.4421514", "0.4420262", "0.4405153", "0.440437", "0.44023165", "0.43986464", "0.43872502", "0.43829325", "0.43740833", "0.43679652", "0.43662122", "0.4365856", "0.43517035", "0.43491328", "0.43488583", "0.43415296", "0.4341529", "0.43399897", "0.43352124", "0.43344167", "0.43329003", "0.43309438", "0.43290874", "0.43244934", "0.43214545", "0.431996", "0.43178132", "0.43149066", "0.43007424", "0.42992342", "0.429146", "0.42901868", "0.42895573", "0.4284795", "0.4284672", "0.42829847", "0.42817834", "0.4278182", "0.42731228", "0.42727736", "0.4266202", "0.42643234", "0.42590126", "0.4257576", "0.42553267", "0.42494896", "0.42486024", "0.42466667", "0.4246387", "0.4243771", "0.42418", "0.42394865", "0.4238013", "0.42355093", "0.4235441", "0.42349046", "0.423329" ]
0.6167393
0
Return a parsed JSON object.
def load_json_document(f): return json.load(f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def json(self):\n return self._parsejson(self.raw)", "def parse_json(data):\n return json.loads(data)", "def json(self):\n return json.loads(self.text)", "def openJson(self):\n json_file = open(self.file, 'r')\n json_data = json_file.read()\n result = json.loads(json_data)\n return result", "def json(self, **kwargs):\n return json.loads(self.content, **kwargs)", "def _get_json(self, url):\n try:\n resp = urllib.urlopen(url)\n except:\n resp = urllib.request.urlopen(url)\n json_string = resp.read()\n parsed_json = json.loads(json_string.decode('utf-8'))\n return parsed_json", "def json(self):\n try:\n return self._json_provider.load(self._body())\n except ValueError:\n raise HTTPError(400, 'Misformated JSON object')", "def parse(content):\n return json.loads(content)", "def parse_json(response):\r\n return json.loads(response.content)", "def json(self):\n\n return json.loads(self.text)", "def _parsejson(x):\n return json.loads(x.read().decode('utf-8'))", "def json_loads(self, string: str) -> object:\n return json.loads(string)", "def json(self) -> Any:\n return json.loads(self)", "def read_json(self):\n self._fopen.seek(self._json_start, 0)\n return json.loads(self._fopen.read().decode('utf-8'))", "def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)", "def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)", "def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)", "def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)", "def get_json(response):\n return json.loads(response.data.decode('utf8'))", "def extract_and_parse_json(response):\n return json.loads(response.text, object_hook=OrderedDict)", "def read_json():\n return json.loads(web.data())", "def get_json(self, script, depth=2):\n result = self.run_script(\n \"{} | ConvertTo-Json -Compress -Depth {}\".format(script, depth))\n if not result:\n return None\n try:\n return json.loads(result)\n except ValueError:\n self.logger.error(\"Returned data was not json. Data:\\n\\n%s\", result)\n raise ValueError(\"Returned data was not json\")", "def get_json(self, *args, **kwargs):\r\n resp = self.request_with_auth(\"get\", *args, **kwargs)\r\n self.assertHttpOK(resp)\r\n self.assertTrue(resp[\"Content-Type\"].startswith(\"application/json\"))\r\n return json.loads(resp.content)", "def _get_json(self, url: str) -> dict:\n r = self._req_get(url)\n return r.json() if r else None", "def parse_json(json_string):\n\n return _convert_to_jsonc(simplejson.loads(json_string))", "def from_json(self, content):\r\n return simplejson.loads(content)", "def json_of_response(response):\n return json.loads(response.text)", "def read_json(self, key):\n return json.loads(self.get_object(key))", "def convert_json_to_object(file_content):\n object = json.loads(file_content)\n print(object)\n return object", "def JSONtoObject(fileName):\n # TODO: ensure file exists first!!\n \n with open(fileName) as json_data:\n d = json.load(json_data)\n \n return d\n #return json.loads(d, object_hook=_json_object_hook)", "def dump_to_json (self):\n return json.loads(self.dump())", "def parse(self, stream, media_type=None, parser_context=None):\n parser_context = parser_context or {}\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n\n try:\n data = stream.read().decode(encoding)\n return json.loads(data)\n except ValueError as exc:\n raise ParseError('JSON parse error - %s' % six.text_type(exc))", "def _parse_json(self, file_location: str) -> Dict:\n with open(file_location, mode=\"r\") as json_file:\n raw_json = json.loads(json_file.read())\n return raw_json", "def get_json():\n try:\n return request.get_json(force=True)\n except Exception:\n raise AXApiInvalidParam(\"Invalid json supplied\")", "def get_as_json(self, use_cache_if_available=True):\n obj_bytes = self.get_bytes(use_cache_if_available=use_cache_if_available)\n return json.loads(obj_bytes.decode(\"utf-8\"))", "def _from_json(return_obj):\n\n return json.loads(return_obj.decode())", "def json_loads(value):\n return json.loads(value)", "def json(self):\n if self._json is None:\n if self.content_type is None:\n return None\n mime_type = self.content_type.split(';')[0]\n if mime_type != 'application/json':\n return None\n self._json = json.loads(self.body.decode())\n return self._json", "def get_json_from_url(url):\r\n content = get_url(url)\r\n js = json.loads(content)\r\n return js", "def load_json(response):\n return json.loads(response.data.decode('utf8'))", "def json(self):\n return self._json", "def parse_json(json_path):\n with open(json_path, 'r') as f:\n out = json.load(f)\n return out", "def getJson(self,url):\n r = req.get(str(url),\"GET\")\n jsonResponse = json.loads(r.text)\n return jsonResponse", "def getJsonObject(self):\n jsonObject = {\n 'id': self.id,\n 'name': self.name,\n 'cards': [self.cards[card].getJsonObject() for card in self.cards] if self.cards else [\"\"]\n }\n return jsonObject", "def _getJason(self, url, use_session = False):\n print ('Retrieving Jason for %s' % url)\n if use_session:\n r = session.get(url)\n else:\n r = requests.get(url)\n data = json.loads(r.text)\n return data", "def get_json():\n response = requests.get(JSON_URL)\n if response.status_code != 200:\n raise Exception(\"Could not not load json file!\")\n return response.json()", "def parse_json(self, json_cfg: Dict) -> Any:\n raise NotImplementedError", "def dict(self):\n\t\treturn self.json", "def read_json(self, *args, **kwargs):\n with self.open('rb') as f:\n return json.load(f, *args, **kwargs)", "def json(self) -> Dict[str, Union[List, Dict, str, int, float]]:", "def parse_response(response):\n return json.loads(response.text)", "def get_json(response):\n\n json_field_or_function = getattr(response, 'json', None)\n if callable(json_field_or_function):\n return response.json()\n else:\n return json.loads(response.content)", "def get_json(self, url):\n json_response = self.testapp.get(url)\n self.assertEqual(json_response.status_int, 200)\n return self._parse_json_response(json_response, expect_errors=False)", "def _read_json(cls, input_file):\n with open(input_file) as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file) as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file, 'rb') as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file, 'rb') as f:\n return json.load(f)", "def load_json(content):\n from ujson import loads\n return loads(content)", "def get_json_from_url(url):\n content = get_url(url)\n js = json.loads(content)\n return js", "def json(self) -> Any:\n return self.body.json()", "def json(self):\n return self.__json", "def get_json(self) -> dict:\n\n try:\n html = get(self.url, sessionid=self.sessionid)\n except HTTPError:\n raise HashTagNotFound(self.url.split(\"/\")[-2])\n parser = Parser()\n parser.feed(html)\n return parser.Data", "def parse_response(self, r):\n data = (\"\".join(r.split(\"\\n\")[1:])).replace(\",]\",\"]\")\n obj = simplejson.loads(data)\n return obj", "def from_json_string(my_obj):\n\n return(json.loads(my_obj))", "def get_json(url):\n r = requests.get(url)\n return r.json()", "def get_json(self) -> dict:\n\n try:\n html = get(self.url, sessionid=self.sessionid)\n except HTTPError:\n raise PostIdNotFound(self.post_id)\n parser = Parser()\n parser.feed(html)\n info = parser.Data\n return info", "def getJson(self, soup):\n script = soup.find('script', type='application/ld+json')\n json_text = BeautifulSoup.get_text(script)\n return json.loads(json_text)", "def load_json(request, json):\n return json", "def get_json(response):\n\tif requests.__version__ >= \"1.0.0\":\n\t\treturn response.json()\n\telif requests.__version__ == \"0.14.2\":\n\t\treturn response.json\n\telse:\n\t\treturn json.loads(response.content)", "def json_of_response(response):\n return json.loads(response.data.decode('utf8'))", "def json_of_response(response):\n return json.loads(response.data.decode('utf8'))", "def parse_response(self, response):\n\n return json.loads(response.text)", "def _get_json(self, url, file=None):\n r = requests.get(url)\n # If status is not OK, raise error.\n if not r.ok:\n r.raise_for_status()\n # Otherwise load JSON.\n data = json.loads(r.text)\n # Optionally save JSON to disk.\n if file is not None:\n with open(file, 'w') as f:\n json.dump(data, f)\n return data", "def parse_json_string(json_string):\n json_object = None\n\n if not isinstance(json_string, str):\n return json_string\n\n try:\n json_object = json.loads(json_string)\n except (ValueError, TypeError):\n pass\n return json_object", "def _parse_json_file(json_path):\n app.app.logger.info('Reading JSON file %s' % json_path)\n with open(json_path, 'r') as f:\n # Remove comments using jsmin, as recommended by JSON creator:\n # https://plus.google.com/+DouglasCrockfordEsq/posts/RK8qyGVaGSr\n jsonDict = json.loads(jsmin.jsmin(f.read()))\n return jsonDict", "def getjson(url, **kwargs):\n json = fetch_resource(url, **kwargs)\n return simplejson.loads(json)", "def get_json(url):\n f = urllib.request.urlopen(url)\n response_text = f.read().decode('utf-8')\n response_data = json.loads(response_text)\n # pprint(response_data)\n return response_data", "def get_json(filename) :\n result = json.load(open(filename,'r'))\n return result", "def json(self, **kwargs):\n\t\ttry:\n\t\t\treturn self.response.json(**kwargs)\n\t\texcept ValueError:\n\t\t\t# No valid JSON encoding\n\t\t\treturn None", "def get_json(url):\n f = urllib.request.urlopen(url)\n response_text = f.read().decode('utf-8')\n response_data = json.loads(response_text)\n\n return response_data", "def parse(response):\n if isinstance(response, dict):\n json = response\n else:\n json = response.json()\n\n if json.get('Error'):\n raise Exception('Error in retrieval: ' + self.json['error'])\n\n return json", "def get_json(input):\n\n if os.path.isfile(input):\n try:\n with open(input, 'r') as f:\n obj = json.load(f)\n f.close()\n except Exception as e:\n raise ValueError(\"Unable to read the JSON file: %s\" % input)\n else:\n try:\n obj = json.loads(input)\n except Exception as e:\n raise ValueError(\"Unable to read the JSON: %s\" % input)\n\n return obj", "async def json(self, encoding=\"utf-8\", content_type=None, loads=json_loads):\n return loads(self.response.decode(encoding))", "def load_json(json_string):\n return json.loads(json_string)", "def getDataParsed():\r\n serialConsole.flush()\r\n rawData = serialConsole.readline().decode(\"utf-8\").rstrip()\r\n parsedJson = json.loads(rawData)\r\n return parsedJson", "def _parse_json(dump):\n # return yaml.safe_load(dump) # slightly improved in python3?\n return json.loads(dump)", "def parse(json_string: str) -> object:\n tokens = tokenize(json_string)\n\n value = _parse(tokens)\n if len(tokens) != 0:\n raise ParseError(\n f\"Invalid JSON at {tokens[0].value} \"\n f\"(line {tokens[0].line} column {tokens[0].column})\")\n\n return value", "def get_json_data():\n return None", "def test_to_json(self):\n\n self.parser.parse()\n json_string = self.parser.to_json()\n \n self.assertTrue(isinstance(json_string, str))", "def parse(self, stream, media_type=None, parser_context=None):\n parser_context = parser_context or {}\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n\n try:\n decoded_stream = codecs.getreader(encoding)(stream)\n parse_constant = strict_constant if self.strict else None\n return ujson.load(decoded_stream, parse_constant=parse_constant)\n except ValueError as exc:\n raise ParseError('JSON parse error - %s' % str(exc))", "def _remoteloadjson(path: str) -> JSONType:\n return json.loads(request.urlopen(path).read())", "def recvjson(self):\n\n import json\n\n data = self.recvraw()\n return json.loads(data)", "def get_json(url):\n f = urlopen(url)\n response_text = f.read()\n response_data = json.loads(str(response_text, \"utf-8\"))\n #pprint(response_data)\n return response_data", "def get_page_contents_as_json(url, headers):\n json_string = get_page_contents(url, headers)\n json_object = json.loads(json_string)\n return json_object", "def json_loads(s):\n return json.loads(s, cls=DataDecoder)", "def __GetJson(self, url, auth, responseProcessor = None):\n\n conn = self.__GetConnection()\n conn.request(\"GET\", url, \"\", self.__MakeHeaders(auth))\n response = conn.getresponse()\n if (responseProcessor != None):\n if (responseProcessor(response) == False):\n return None\n\n self.__CheckResponse(response)\n data = response.read()\n return cjson.decode(data)", "def parse_json(raw):\n return escape.recursive_unicode(escape.json_decode(raw)) if raw != None else None", "def get_json(filename):\n with open(filename) as f:\n file_content = f.read()\n data = json.loads(file_content)\n return data", "def json(self, pretty=True, full_dump=True):\n return json_dump(self, pretty, full_dump)", "def read_json_to_object(fn):\n\n with open(fn, \"r\") as fid:\n obj = json.load(fid, object_hook=lambda d: SimpleNamespace(**d))\n return obj", "async def json(self, *, loads=json.loads):\n body = await self.text()\n return loads(body)" ]
[ "0.73147", "0.7164479", "0.7052984", "0.70459557", "0.69945663", "0.69648427", "0.69155174", "0.6909238", "0.6883624", "0.68454266", "0.67840165", "0.6775863", "0.67642134", "0.67337215", "0.6694092", "0.6694092", "0.6694092", "0.6694092", "0.66517884", "0.6608782", "0.65929335", "0.6556722", "0.65177184", "0.6502583", "0.64878607", "0.6482714", "0.64587164", "0.6447555", "0.64439017", "0.64193124", "0.6391434", "0.6389344", "0.63695204", "0.6353953", "0.63394606", "0.63393855", "0.62897635", "0.6286033", "0.6280681", "0.62804", "0.627933", "0.6277912", "0.6272756", "0.62681204", "0.62642866", "0.62597746", "0.62579423", "0.6257125", "0.6255825", "0.6248963", "0.6219606", "0.62145686", "0.61984533", "0.61973876", "0.61973876", "0.61969626", "0.61969626", "0.6193145", "0.61741906", "0.6164568", "0.6164536", "0.6163894", "0.6162112", "0.61467534", "0.613816", "0.6137672", "0.6134756", "0.6132258", "0.612734", "0.61254287", "0.61254287", "0.61084956", "0.61040163", "0.61004645", "0.6096719", "0.60961807", "0.6095321", "0.6093403", "0.60859495", "0.60832095", "0.6080717", "0.6069305", "0.6067498", "0.6063849", "0.6051663", "0.60503", "0.60501295", "0.6049806", "0.6048331", "0.60428184", "0.6036853", "0.60365856", "0.60283643", "0.6024698", "0.60151917", "0.6012839", "0.59971696", "0.5990541", "0.5985968", "0.5976619", "0.5974485" ]
0.0
-1
Parse a sample/observation metadata file, return a ``MetadataMap``. If ``lines`` is ``None``, this function will return ``None``.
def load_metadata(lines): if lines is not None: return MetadataMap.from_file(lines) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parseMetadataMap(lines):\r\n return MetadataMap(*parse_mapping_file_to_dict(lines))", "def get_sample_info(lines):\r\n mapping_data, header, comments = parse_mapping_file(lines)\r\n labels = [\"from\", \"to\", \"eweight\", \"consensus_lin\"]\r\n node_labels = [\"node_name\", \"node_disp_name\", \"ntype\", \"degree\",\r\n \"weighted_degree\", \"consensus_lin\"]\r\n cat_by_sample = {}\r\n sample_by_cat = defaultdict(list)\r\n meta_dict = {}\r\n category_labels = header[1:-1]\r\n labels.extend(category_labels)\r\n node_labels.extend(category_labels)\r\n label_list = [[] for c in category_labels]\r\n for r in mapping_data:\r\n categories = r[0:len(category_labels) + 1]\r\n sample = categories[0]\r\n meta_dict[sample] = ['\\t'.join(categories[1:]), 0]\r\n\r\n cat_by_sample[sample] = [(l.strip(), c.strip())\r\n for l, c in zip(category_labels, categories[1:])]\r\n\r\n cat_list = []\r\n for i, (l, c) in enumerate(zip(category_labels, categories[1:])):\r\n if c not in label_list[i]:\r\n label_list[i].append(c)\r\n l = l.strip()\r\n c = c.strip()\r\n cat_list.append((l, c))\r\n sample_by_cat[(l, c)].append(sample)\r\n\r\n cat_by_sample[sample] = cat_list\r\n\r\n return cat_by_sample, sample_by_cat, len(category_labels), meta_dict,\\\r\n labels, node_labels, label_list", "def process_lines(self, lines):\n line_index = 0\n n_lines = len(lines)\n while line_index < n_lines:\n if lines[line_index].startswith(\"HIERARCHY\"):\n line_index = self._read_skeleton(lines, line_index, n_lines)\n if lines[line_index].startswith(\"MOTION\"):\n self._read_frametime(lines, line_index+2)\n line_index = self._read_frames(lines, line_index+3, n_lines)\n else:\n line_index += 1", "def read_meta(metafn=None):\n\n metadata = {}\n\n # potential future improvement: strip quotation marks from strings, where applicable. Will then need to adjust\n # the indices used to get the dates and times in the functions above \n # (get_DEM_img_times: dtstrings = {\"sourceImage1\":(5,19, '%Y%m%d%H%M%S')})\n\n #each key is equated with '='. This loop strips and seperates then fills the dictonary.\n with open(metafn) as f: \n for line in f:\n if not line.strip(';') == \"END\":\n val = line.strip().split('=')\n if len(val) == 1:\n continue\n else:\n metadata.setdefault(val[0].strip(), []).append(val[1].strip().strip(';')) \n else:\n break\n\t\n return metadata", "def parse_mapping_file(lines, strip_quotes=True, suppress_stripping=False):\r\n if hasattr(lines, \"upper\"):\r\n # Try opening if a string was passed\r\n try:\r\n lines = open(lines, 'U')\r\n except IOError:\r\n raise QiimeParseError(\"A string was passed that doesn't refer \"\r\n \"to an accessible filepath.\")\r\n\r\n if strip_quotes:\r\n if suppress_stripping:\r\n # remove quotes but not spaces\r\n strip_f = lambda x: x.replace('\"', '')\r\n else:\r\n # remove quotes and spaces\r\n strip_f = lambda x: x.replace('\"', '').strip()\r\n else:\r\n if suppress_stripping:\r\n # don't remove quotes or spaces\r\n strip_f = lambda x: x\r\n else:\r\n # remove spaces but not quotes\r\n strip_f = lambda x: x.strip()\r\n\r\n # Create lists to store the results\r\n mapping_data = []\r\n header = []\r\n comments = []\r\n\r\n # Begin iterating over lines\r\n for line in lines:\r\n line = strip_f(line)\r\n if not line or (suppress_stripping and not line.strip()):\r\n # skip blank lines when not stripping lines\r\n continue\r\n\r\n if line.startswith('#'):\r\n line = line[1:]\r\n if not header:\r\n header = line.strip().split('\\t')\r\n else:\r\n comments.append(line)\r\n else:\r\n # Will add empty string to empty fields\r\n tmp_line = map(strip_f, line.split('\\t'))\r\n if len(tmp_line) < len(header):\r\n tmp_line.extend([''] * (len(header) - len(tmp_line)))\r\n mapping_data.append(tmp_line)\r\n if not header:\r\n raise QiimeParseError(\"No header line was found in mapping file.\")\r\n if not mapping_data:\r\n raise QiimeParseError(\"No data found in mapping file.\")\r\n\r\n return mapping_data, header, comments", "def parse_metadata_file(self, file):\n\n file_keys = list(file.keys())\n\n if 'labelAnnotations' in file_keys:\n file_annots = file['labelAnnotations']\n file_top_score = np.asarray(\n [x['score'] for x in file_annots]).mean()\n file_top_desc = [x['description'] for x in file_annots]\n else:\n file_top_score = np.nan\n file_top_desc = ['']\n\n file_colors = file['imagePropertiesAnnotation']['dominantColors'][\n 'colors']\n file_crops = file['cropHintsAnnotation']['cropHints']\n\n file_color_score = np.asarray([x['score'] for x in file_colors]).mean()\n file_color_pixelfrac = np.asarray(\n [x['pixelFraction'] for x in file_colors]).mean()\n\n file_crop_conf = np.asarray(\n [x['confidence'] for x in file_crops]).mean()\n\n if 'importanceFraction' in file_crops[0].keys():\n file_crop_importance = np.asarray(\n [x['importanceFraction'] for x in file_crops]).mean()\n else:\n file_crop_importance = np.nan\n\n df_metadata = {\n 'annots_score': file_top_score,\n 'color_score': file_color_score,\n 'color_pixelfrac': file_color_pixelfrac,\n 'crop_conf': file_crop_conf,\n 'crop_importance': file_crop_importance,\n 'annots_top_desc': self.sentence_sep.join(file_top_desc)\n }\n\n df_metadata = pd.DataFrame.from_dict(df_metadata, orient='index').T\n df_metadata = df_metadata.add_prefix('metadata_')\n\n return df_metadata", "def test_getSampleMetadata(self):\r\n exp = {'BarcodeSequence': 'AGCACGAGCCTA', 'Treatment': 'Control',\r\n 'DOB': '20061218', 'Description': '354'}\r\n obs = self.overview_map.getSampleMetadata('PC.354')\r\n self.assertEqual(obs, exp)\r\n\r\n exp = {'BarcodeSequence': 'ACCAGCGACTAG', 'Treatment': 'Control',\r\n 'DOB': '20070314', 'Description': '481'}\r\n obs = self.map_with_comments.getSampleMetadata('PC.481')\r\n self.assertEqual(obs, exp)\r\n\r\n exp = {'BarcodeSequence': 'ACGGTGAGTGTC', 'Treatment': 'Fast',\r\n 'DOB': '20080116', 'Description': '636'}\r\n obs = self.map_with_comments.getSampleMetadata('PC.636')\r\n self.assertEqual(obs, exp)\r\n\r\n exp = {}\r\n obs = self.no_metadata.getSampleMetadata('PC.636')\r\n self.assertEqual(obs, exp)", "def parse_metadata_file(self, file):\n \n file_keys = list(file.keys())\n \n if 'labelAnnotations' in file_keys:\n #file_annots = file['labelAnnotations'][:int(len(file['labelAnnotations']) * 0.5)]\n file_annots = file['labelAnnotations'][:]\n file_top_score = np.asarray([x['score'] for x in file_annots]).mean()\n file_top_desc = [x['description'] for x in file_annots]\n else:\n file_top_score = np.nan\n file_top_desc = ['']\n \n file_colors = file['imagePropertiesAnnotation']['dominantColors']['colors']\n file_crops = file['cropHintsAnnotation']['cropHints']\n\n file_color_score = np.asarray([x['score'] for x in file_colors]).mean()\n file_color_pixelfrac = np.asarray([x['pixelFraction'] for x in file_colors]).mean()\n\n file_crop_conf = np.asarray([x['confidence'] for x in file_crops]).mean()\n \n if 'importanceFraction' in file_crops[0].keys():\n file_crop_importance = np.asarray([x['importanceFraction'] for x in file_crops]).mean()\n else:\n file_crop_importance = np.nan\n\n df_metadata = {\n 'annots_score': file_top_score,\n 'color_score': file_color_score,\n 'color_pixelfrac': file_color_pixelfrac,\n 'crop_conf': file_crop_conf,\n 'crop_importance': file_crop_importance,\n 'annots_top_desc': self.sentence_sep.join(file_top_desc)\n }\n \n df_metadata = pd.DataFrame.from_dict(df_metadata, orient='index').T\n df_metadata = df_metadata.add_prefix('metadata_')\n \n return df_metadata", "def get_metadata(sessions):\n if not isinstance(sessions, list):\n sessions = list(sessions)\n\n for line in sessions:\n session_id = line['Sess.ID']\n if session_id: # we loaded a line with session info\n session_name = '{}_{}'.format(line['Experiment'], line['Sess.ID'])\n\n # Check if session is already in database\n # if database is not None and session_name in database.index:\n # print(colored('Session is already in database','yellow'))\n # continue\n\n # Create the metadata\n session_metadata = {}\n session_metadata['session_id'] = session_id\n session_metadata['experiment'] = line['Experiment']\n session_metadata['date'] = line['Date']\n session_metadata['mouse_id'] = line['MouseID']\n session_metadata['software'] = line['Software']\n session_metadata['number'] = line['Number']\n\n # initialize video data\n session_metadata['video_file_paths'] = []\n session_metadata['tdms_file_paths'] = []\n session_metadata['videodata'] = []\n\n # load data from .tdms and .avi fils\n for recording in line['Recordings']:\n path = os.path.join(line['Base fld'], line['Exp fld'], recording)\n videopaths = []\n for f in os.listdir(path):\n if '.avi' in f:\n videopaths.append(os.path.join(path, f))\n elif '.tdms' == f[-5:]:\n tdmspath = os.path.join(path, f)\n\n # add file paths to metadata\n session_metadata['video_file_paths'].append(videopaths)\n session_metadata['tdms_file_paths'].append(tdmspath)\n\n # Loop over each video and get the relevant data [e.g., number of frames, fps...]\n session_metadata['videodata'].append(get_session_videodata(videopaths))\n\n # Add to dictionary (or update entry)\n metadata_dict[session_name] = session_metadata\n return metadata_dict", "def test_getSampleMetadata(self):\n exp = {'BarcodeSequence': 'AGCACGAGCCTA', 'Treatment': 'Control',\n 'DOB': '20061218', 'Description': '354'}\n obs = self.overview_map.getSampleMetadata('PC.354')\n self.assertEqual(obs, exp)\n\n exp = {'BarcodeSequence': 'ACCAGCGACTAG', 'Treatment': 'Control',\n 'DOB': '20070314', 'Description': '481'}\n obs = self.map_with_comments.getSampleMetadata('PC.481')\n self.assertEqual(obs, exp)\n\n exp = {'BarcodeSequence': 'ACGGTGAGTGTC', 'Treatment': 'Fast',\n 'DOB': '20080116', 'Description': '636'}\n obs = self.map_with_comments.getSampleMetadata('PC.636')\n self.assertEqual(obs, exp)\n\n exp = {}\n obs = self.no_metadata.getSampleMetadata('PC.636')\n self.assertEqual(obs, exp)", "def parse_metadata(self):\n import csv\n f = open(self.seq_id_list)\n self.names = f.readlines()\n f.close()\n num_samples = len(self.names)\n for i in range(len(self.names)):\n self.names[i] = self.names[i].replace(\"\\n\", \"\")\n # Go through the combined metadata file - it has most of the data we need.\n metadata = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/combinedMetadata.csv\"))\n metadata_count = 0\n for row in metadata:\n # There has to be a more elegant way to do this.\n if row[\"SampleName\"] in self.names:\n data = dict()\n data[\"Investigator\"] = row[\"Investigator\"]\n data[\"Coverage\"] = row[\"AverageCoverageDepth\"]\n data[\"TotalLength\"] = row[\"TotalLength\"]\n data[\"rST\"] = row[\"rMLSTsequenceType\"]\n data[\"PipelineVersion\"] = row[\"PipelineVersion\"]\n data[\"MLST\"] = row[\"MLSTsequencetype\"]\n data[\"geneSeekr\"] = row[\"geneSeekrProfile\"].split(\";\")\n self.metadata[row[\"SampleName\"]] = data\n metadata_count += 1\n # Need to look in external WGS spades as well.\n metadata = csv.DictReader(open(self.nasmnt + \"External_WGSspades/reports/combinedMetadata.csv\"))\n for row in metadata:\n # There has to be a more elegant way to do this.\n if row[\"SampleName\"] in self.names:\n data = dict()\n data[\"Investigator\"] = row[\"Investigator\"]\n data[\"Coverage\"] = row[\"AverageCoverageDepth\"]\n data[\"TotalLength\"] = row[\"TotalLength\"]\n data[\"rST\"] = row[\"rMLSTsequenceType\"]\n data[\"PipelineVersion\"] = row[\"PipelineVersion\"]\n data[\"MLST\"] = row[\"MLSTsequencetype\"]\n data[\"geneSeekr\"] = row[\"geneSeekrProfile\"].split(\";\")\n self.metadata[row[\"SampleName\"]] = data\n metadata_count += 1\n\n\n\n # Also need to go through the rMLST file to make sure that all rMLST genes are covered.\n rMLST_data = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/rmlst.csv\"))\n metadata_count = 0\n for row in rMLST_data:\n if row[\"Strain\"] in self.names:\n self.metadata[row[\"Strain\"]][\"Matches\"] = row[\"Matches\"]\n metadata_count += 1\n # Check external runs.\n rMLST_data = csv.DictReader(open(self.nasmnt + \"External_WGSspades/reports/rmlst.csv\"))\n for row in rMLST_data:\n if row[\"Strain\"] in self.names:\n self.metadata[row[\"Strain\"]][\"Matches\"] = row[\"Matches\"]\n\n\n\n # Finally, need to get info on the MLST sequence type.\n metadata_count = 0\n mlst_data = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/mlst.csv\"))\n for row in mlst_data:\n if row[\"Strain\"] in self.names:\n mlst = list()\n for i in range(1, 8):\n mlst.append(row[str(i)])\n self.metadata[row[\"Strain\"]][\"mlst_info\"] = mlst\n metadata_count += 1\n\n # Also from External.\n mlst_data = csv.DictReader(open(self.nasmnt + \"External_WGSspades/reports/mlst.csv\"))\n for row in mlst_data:\n if row[\"Strain\"] in self.names:\n mlst = list()\n for i in range(1, 8):\n mlst.append(row[str(i)])\n self.metadata[row[\"Strain\"]][\"mlst_info\"] = mlst\n metadata_count += 1\n\n # Go through the ROGA Summary file from the access DB to get strain/textual IDs, and 1' and 2' enzymes.\n try: # Assume we're using ROGA summary OLF. If it isn't there, assume ROGA summary OLC\n df = pd.read_excel('ROGA_summary_OLF.xlsx')\n for i in df.index:\n if df['SeqTracking_SEQID'][i] in self.names:\n seqid = df['SeqTracking_SEQID'][i]\n self.metadata[seqid][\"IsolateID\"] = df['Isolate ID'][i]\n self.metadata[seqid][\"TextualID\"] = df['Textual ID'][i]\n self.metadata[seqid][\"1Enzyme\"] = df[\"1' Enzyme\"][i]\n self.metadata[seqid][\"2Enzyme\"] = df[\"2' Enzyme\"][i]\n self.metadata[seqid][\"Source\"] = df['Source'][i]\n self.metadata[seqid][\"ReceivedDate\"] = df['ReceivedDate'][i]\n self.metadata[seqid][\"SequenceDate\"] = df['SequenceDate'][i]\n self.metadata[seqid][\"SequencedBy\"] = df['SequenceBy'][i]\n metadata_count += 1\n\n\n except FileNotFoundError: # Should be a file not found error - look it up.\n metadata_count = 0\n df = pd.read_excel('ROGA_summary_OLC.xlsx')\n for i in df.index:\n if df['SeqTracking_SEQID'][i] in self.names:\n seqid = df['SeqTracking_SEQID'][i]\n self.metadata[seqid][\"IsolateID\"] = df['OLN ID'][i]\n self.metadata[seqid][\"TextualID\"] = df['Lab ID'][i]\n self.metadata[seqid][\"ReceivedDate\"] = df['ReceivedDate'][i]\n self.metadata[seqid][\"SequenceDate\"] = df['SequenceDate'][i]\n self.metadata[seqid][\"SequencedBy\"] = df['SequenceBy'][i]\n metadata_count += 1\n # print(self.metadata)\n self.check_for_empty_data()", "def test_parseMetadataMap(self):\n obs = MetadataMap.parseMetadataMap(self.overview_map_str)\n self.assertEqual(obs, self.overview_map)", "def get_Metadata(metafile):\n\n mslist_file = open(metafile, 'r')\n LINES = mslist_file.readlines()\n mslist_file.close()\n\n nBlocks = 6 # these are the number of correlator cards (PILOT survey value)\n \n obs_date = 'Observed from'\n code = 'Code'\n duration = 'Total elapsed time'\n antenna = 'antennas'\n frame = 'Frame'\n \n for i in range(len(LINES)):\n line = LINES[i]\n if line.find(antenna) >=0:\n TOKS = line.split()\n n_ant = TOKS[5][-2:]\n if line.find(obs_date) >=0:\n TOKS = line.split()\n start_obs_date = TOKS[6]\n end_obs_date = TOKS[8]\n if line.find(duration) >=0:\n TOKS = line.split()\n tobs = float(TOKS[10]) # in second\n if line.find(code) >= 0:\n next_line = LINES[i+1]\n TOKS = next_line.split()\n field = TOKS[5]\n ra = TOKS[6][:-5]\n dec = TOKS[7][:-4]\n if line.find(frame) >= 0:\n next_line = LINES[i+1]\n TOKS = next_line.split()\n total_obs_bw = float(TOKS[10])*nBlocks/1000.0 # kHz to MHz \n \n return n_ant, start_obs_date, end_obs_date, tobs, field, ra, dec, total_obs_bw", "def test_parseMetadataMap(self):\r\n obs = MetadataMap.parseMetadataMap(self.overview_map_str)\r\n self.assertEqual(obs, self.overview_map)", "def read_metadata(metapath):\r\n with open(metapath) as metaFile:\r\n metadata = {}\r\n for line in metaFile.readlines():\r\n if \"=\" in line: # Get only key-value pairs\r\n l = line.split(\"=\")\r\n metadata[l[0].strip()] = l[1].strip()\r\n\r\n return metadata", "def parse_metadata(filename):\n with open(filename, 'r') as f:\n reader = csv.DictReader(f)\n metadata = {}\n for row in reader:\n key = row.pop('sample_filename')\n if key in metadata:\n print('WARNING: duplicate sample_id \"' + key + '\" found in \"' + filename + '\". The previous value(s) will be overwritten.')\n metadata[key] = row\n return metadata", "def MinimalBpseqParser(lines):\n result = {'HEADER':[], 'SEQ_STRUCT':[]}\n \n for line in lines:\n if line.startswith('Filename') or line.startswith('Organism') or\\\n line.startswith('Accession') or line.startswith('Citation') or\\\n \":\" in line:\n result['HEADER'].append(line.strip())\n elif len(line.split()) == 3:\n result['SEQ_STRUCT'].append(line.strip())\n else:\n continue #unknown\n return result", "def _parse_metadata(histline: str) -> Optional[Tuple[datetime, int, str]]:\n matches = PATTERN.match(histline)\n if matches:\n g = matches.groups()\n return (parse_datetime_sec(g[0]), int(g[1]), g[2])\n return None", "def get_metadata(diagnostics_dir, verbose=False):\n metafile = find_metadata_file(diagnostics_dir, 'mslist-2*txt', verbose=False)\n\n with open(metafile, 'r') as mslist_file:\n lines = mslist_file.readlines()\n\n nBlocks = 6 # these are the number of correlator cards (PILOT survey value)\n \n obs_metadata = ObservationMetadata()\n\n obs_date = 'Observed from'\n fields = 'Fields'\n code = 'Code'\n duration = 'Total elapsed time'\n antenna = 'antennas'\n frame = 'Frame'\n \n field_list = []\n\n for i in range(len(lines)):\n line = lines[i]\n if line.find(antenna) >=0:\n toks = line.split()\n obs_metadata.n_ant = toks[5][-2:]\n if line.find(obs_date) >=0:\n toks = line.split()\n obs_metadata.start_obs_date = toks[6]\n obs_metadata.end_obs_date = toks[8]\n if line.find(duration) >=0:\n toks = line.split()\n obs_metadata.tobs = float(toks[10]) # in second\n\n # Field details\n if line.find(fields) >=0:\n toks = line.split()\n obs_metadata.num_fields = int(toks[-1])\n\n if line.find(code) >= 0:\n for j in range(obs_metadata.num_fields):\n field_metadata = FieldMetadata()\n field_line = lines[i+j+1]\n toks = field_line.split()\n field_metadata.name = toks[5]\n field_metadata.ra = toks[6][:-5]\n field_metadata.dec = toks[7][:-4]\n field_metadata.num_rows = int(toks[9])\n obs_metadata.fields.append(field_metadata)\n\n if line.find(frame) >= 0:\n next_line = lines[i+1]\n toks = next_line.split()\n obs_metadata.total_obs_bw = float(toks[10])*nBlocks/1000.0 # kHz to MHz \n \n return obs_metadata #n_ant, start_obs_date, end_obs_date, tobs, field, ra, dec, total_obs_bw", "def parse_metadata_file(filename,\n logger,\n study_id=None,\n genome_name=None,\n case_list=False):\n \n logger.debug('Starting validation of meta file', extra={'filename_': filename})\n \n metaDictionary = {}\n with open(filename, 'rU') as metafile:\n for line_index, line in enumerate(metafile):\n # skip empty lines:\n if line.strip() == '':\n continue\n if ':' not in line:\n logger.error(\n \"Invalid %s file entry, no ':' found\",\n {True: 'case list', False: 'meta'}[case_list],\n extra={'filename_': filename,\n 'line_number': line_index + 1})\n meta_file_type = None\n return metaDictionary, meta_file_type\n key_value = line.split(':', 1)\n if len(key_value) == 2:\n metaDictionary[key_value[0]] = key_value[1].strip()\n\n if case_list:\n meta_file_type = MetaFileTypes.CASE_LIST\n else:\n meta_file_type = get_meta_file_type(metaDictionary, logger, filename)\n # if type could not be inferred, no further validations are possible\n if meta_file_type is None:\n return metaDictionary, meta_file_type\n\n missing_fields = []\n for field in META_FIELD_MAP[meta_file_type]:\n mandatory = META_FIELD_MAP[meta_file_type][field]\n if field not in metaDictionary and mandatory:\n logger.error(\"Missing field '%s' in %s file\",\n field,\n {True: 'case list', False: 'meta'}[case_list],\n extra={'filename_': filename})\n missing_fields.append(field)\n\n if missing_fields:\n meta_file_type = None\n # all further checks would depend on these fields being present\n return metaDictionary, meta_file_type\n\n # validate genetic_alteration_type, datatype, stable_id\n stable_id_mandatory = META_FIELD_MAP[meta_file_type].get('stable_id',\n False)\n if stable_id_mandatory:\n valid_types_and_id = validate_types_and_id(metaDictionary, logger, filename)\n if not valid_types_and_id:\n # invalid meta file type\n meta_file_type = None\n return metaDictionary, meta_file_type\n\n for field in metaDictionary:\n if field not in META_FIELD_MAP[meta_file_type]:\n logger.warning(\n 'Unrecognized field in %s file',\n {True: 'case list', False: 'meta'}[case_list],\n extra={'filename_': filename,\n 'cause': field})\n\n # check that cancer study identifiers across files so far are consistent.\n if (\n study_id is not None and\n 'cancer_study_identifier' in metaDictionary and\n study_id != metaDictionary['cancer_study_identifier']):\n logger.error(\n \"Cancer study identifier is not consistent across \"\n \"files, expected '%s'\",\n study_id,\n extra={'filename_': filename,\n 'cause': metaDictionary['cancer_study_identifier']})\n # not a valid meta file in this study\n meta_file_type = None\n return metaDictionary, meta_file_type\n\n # type-specific validations\n if meta_file_type in (MetaFileTypes.SEG, MetaFileTypes.GISTIC_GENES):\n if genome_name is not None and metaDictionary['reference_genome_id'] != genome_name:\n logger.error(\n 'Reference_genome_id is not %s',\n genome_name,\n extra={'filename_': filename,\n 'cause': metaDictionary['reference_genome_id']})\n meta_file_type = None\n if meta_file_type == MetaFileTypes.MUTATION:\n if ('swissprot_identifier' in metaDictionary and\n metaDictionary['swissprot_identifier'] not in ('name',\n 'accession')):\n logger.error(\n \"Invalid swissprot_identifier specification, must be either \"\n \"'name' or 'accession'\",\n extra={'filename_': filename,\n 'cause': metaDictionary['swissprot_identifier']})\n meta_file_type = None\n\n logger.info('Validation of meta file complete', extra={'filename_': filename})\n return metaDictionary, meta_file_type", "def getTracksFromMitiFile(lines):\n\n tracks = {}\n\n for line in lines:\n items = line.strip().split()\n if len(items) < 9 or len(items) > 10:\n raise Exception(\"are input data file lines in MITI format? Found one had %i items\" % len(items))\n curTrackId = int(items[0])\n\n if not tracks.has_key(curTrackId):\n tracks[curTrackId] = Track([])\n\n newDet = Detection(line)\n tracks[curTrackId].addDetection(newDet)\n \n\n return tracks", "def Parse(filename):\n\n f = open(filename, 'r')\n\n metadata = Metadata()\n data = [] # array of dataset\n dataset = None\n\n for num, line in enumerate(f):\n try:\n line = line.strip()\n if not line: continue\n\n if not metadata.complete:\n metadata.Parse(line)\n continue\n\n if re.match('[a-z_]', line):\n continue\n\n if line.startswith('# StopWatch'): # Start of a new dataset\n if dataset:\n if dataset.summary:\n metadata.UpdateWith(dataset)\n else:\n data.append(dataset)\n\n dataset = DataSet(line)\n continue\n\n if line.startswith('#'):\n continue\n\n # must be data at this stage\n try:\n (time, value) = line.split(None, 1)\n except ValueError:\n print 'skipping line %d: %s' % (num, line)\n continue\n\n if dataset and not dataset.summary:\n dataset.Add(float(time), float(value))\n\n except Exception:\n print 'Error parsing line %d' % num, sys.exc_info()[0]\n raise\n data.append(dataset)\n if not metadata.complete:\n print \"\"\"Error missing metadata. Did you mount debugfs?\n [adb shell mount -t debugfs none /sys/kernel/debug]\"\"\"\n sys.exit(1)\n return (metadata, data)", "def parse_lines(lines):\n for line in lines:\n yield Record(line)", "def ExtractTrackInformation(lines):\n\n # The starting line should be something like ' TRACK 01 AUDIO'\n # and we want to create ``data = {'track': '1'}``\n # NB: Cue format has a 99 track limit\n data = {\"track\": CueMetadata.ExtractProperty(lines[0], \"TRACK\")[0:2].lstrip(\"0\")}\n\n # Parse the remaining lines for this track to find the track starting time\n # which is typically, but not necessarily, a line starting with ' INDEX 01'\n # Also want to pick up any extra tags in the block and store it in ``data``,\n # eg, the 'TITLE' field. Since not all fields are valid but remarks are\n # it's necessary to \"un-remark\" the lines starting with 'REM '\n times = {}\n for line in lines[1:]:\n if not line.startswith(' ' * 4):\n break\n line = line.strip()\n # Don't consider multi-artist albums\n if line.startswith(\"PERFORMER\"):\n continue\n line = line.replace(\"INDEX \", \"INDEX\") # Turn 'INDEX 01' into 'INDEX01', etc.\n line = line.replace(\"REM \", \"\") # Make remarks appear as valid tags\n name = line.split(\" \")[0]\n info = CueMetadata.ExtractProperty(line, name)\n if not info:\n continue\n name = name.lower()\n if \"INDEX\" in line:\n # Handle these time codes separately since there may be more than one\n times[name] = time.CueTimeToMKATime(info)\n else:\n data[name] = info\n # In CUE files, 'INDEX 00' is (typically) used for pre-gap and 'INDEX 01' denotes\n # the start of the actual track. Higher indices are possible, but rarely used,\n # typically for access to portions of songs. Here we want to prefer 'INDEX 01'\n # and use 'INDEX 00' if there is no 'INDEX 01' while ignoring higher indices.\n for idx in [\"index01\", \"index00\"]:\n if idx in times:\n time_code = idx\n break\n else:\n raise CueFormatError(f\"No valid time codes found for track {data['track']}\")\n data[\"start_time\"] = times[time_code]\n return data", "def parse_metadata(metadata):\n id_to_classes_recount = {}\n with open(metadata, \"r\") as file:\n header = next(file)\n for line in file:\n try:\n splitted_line = line.split(\"\\n\")[0].split(\"\\t\")\n file_id = splitted_line[22]\n project = splitted_line[77]\n sample_type = splitted_line[107]\n if project == \"TCGA-LIHC\":\n if sample_type == 'Primary Tumor':\n id_to_classes_recount[file_id] = 1\n elif sample_type == 'Solid Tissue Normal':\n id_to_classes_recount[file_id] = 0\n elif sample_type == 'Recurrent Tumor':\n id_to_classes_recount[file_id] = 1\n else:\n print(sample_type)\n except:\n pass\n return id_to_classes_recount", "def sample_ids_from_metadata_description(mapping_f, valid_states_str):\r\n map_data, map_header, map_comments = parse_mapping_file(mapping_f)\r\n valid_states = parse_metadata_state_descriptions(valid_states_str)\r\n sample_ids = get_sample_ids(map_data, map_header, valid_states)\r\n\r\n if len(sample_ids) < 1:\r\n raise ValueError(\"All samples have been filtered out for the criteria\" +\r\n \" described in the valid states\")\r\n\r\n return sample_ids", "def test_parse_metadata_0(self):\n data = ET.parse(\"data/metadata_0.xml\")\n data_str = ET.tostring(data.getroot())\n\n dict = tesse_ros_bridge.utils.parse_metadata(data_str)\n self.assertEqual(dict['position'], [-5.692576, 2.499105, 10.63836])\n self.assertEqual(dict['quaternion'], [0, 0.5372996, 0, 0.8433914])\n self.assertEqual(dict['velocity'], [0, -0.0004944276, 0])\n self.assertEqual(dict['ang_vel'], [0, 0, 0])\n self.assertEqual(dict['acceleration'], [0, 0.001516496, 0])\n self.assertEqual(dict['ang_accel'], [0, 0, 0])\n self.assertEqual(dict['time'], 7.935)\n self.assertEqual(dict['collision_status'], False)", "def extract_metadata(rawfile,codeversions={}):\r\n import datetime\r\n add_standard_metadata(rawfile)\r\n # get monochromator-related information\r\n mom = average_metadata(rawfile['$entry/instrument/crystal/omega'])\r\n tk_angle = average_metadata(rawfile['$entry/instrument/crystal/takeoff_angle'])\r\n # get the date\r\n date_form = datetime.datetime.strptime(str(rawfile['$entry/start_time']),\"%Y-%m-%d %H:%M:%S\")\r\n mono_change = datetime.datetime(2009,04,01)\r\n if date_form < mono_change:\r\n monotype = \"115\"\r\n else:\r\n monotype = \"335\"\r\n hklval = pick_hkl(mom - tk_angle/2.0,monotype)\r\n if len(hklval)==3: # i.e. h,k,l found\r\n rawfile.add_metadata(\"_pd_instr_monochr_pre_spec\",\r\n hklval + \" reflection from Ge crystal, \"+monotype+\" cut\",tag=\"CIF\")\r\n wavelength = calc_wavelength(hklval,tk_angle)\r\n rawfile.add_metadata(\"_diffrn_radiation_wavelength\",\"%.3f\" % wavelength,tag=\"CIF\")\r\n rawfile.add_metadata(\"_[local]_diffrn_radiation_wavelength_determination\",\r\n \"Wavelength is calculated from monochromator hkl and takeoff angle and is therefore approximate\",\r\n tag=\"CIF\")\r\n # The following is changed later if the primary collimator is found to be inserted\r\n rawfile.add_metadata(\"_pd_instr_divg_eq_src/mono\",\"%.3f\" % (0.099*2.0*wavelength),tag=\"CIF\")\r\n # Do some logic to obtain collimator positions\r\n pcr = average_metadata(rawfile[\"$entry/instrument/collimator/primary_collimator_rotation\"])\r\n pcx = average_metadata(rawfile[\"$entry/instrument/collimator/primary_collimator_translation\"])\r\n if pcx > 120:\r\n if abs(pcr-360.0)<5 or abs(pcr) < 5: # 5' collimator\r\n coll_string = \"A 5' primary collimator pre-monochromator\"\r\n rawfile.add_metadata(\"_pd_instr_divg_eq_src/mono\",\"0.0833\",tag=\"CIF\")\r\n else:\r\n coll_string = \"A 10' primary collimator pre-monochromator\"\r\n rawfile.add_metadata(\"_pd_instr_divg_eq_src/mono\",\"0.1667\",tag=\"CIF\")\r\n else: coll_string = \"No primary monochromator \"\r\n try:\r\n scr = average_metadata(rawfile['$entry/sample/secondary_collimator'])\r\n if scr>0.5:\r\n coll_string += \" and a 10' secondary collimator post-monochromator.\"\r\n rawfile.add_metadata(\"_pd_instr_divg_eq_mono/spec\",\"0.1667\",tag=\"CIF\")\r\n else:\r\n coll_string += \" and no secondary collimator.\"\r\n rawfile.add_metadata(\"_diffrn_radiation_collimation\",coll_string,tag=\"CIF\")\r\n except AttributeError: #some early files are missing secondary collimator\r\n pass\r\n # These values were in the CIF writing area of the Java routines, best put here\r\n try:\r\n program_release = str(rawfile[\"$entry/program_revision\"])\r\n except AttributeError:\r\n program_release = str(rawfile[\"$entry/sics_release\"])\r\n rawfile.add_metadata(\"_computing_data_collection\",str(rawfile[\"$entry/program_name\"]) + \" \" + \\\r\n program_release,\"CIF\")\r\n # List the code versions used for data reduction\r\n codelist = \"\"\r\n for key in codeversions.keys():\r\n codelist += \"%-20s: %s\\n\" % (key,codeversions[key])\r\n rawfile.add_metadata(\"_computing_data_reduction\", str(\"Gumtree Echidna/Python routines, Git versions:\\n\" + codelist),\"CIF\")\r\n rawfile.add_metadata(\"_pd_spec_special_details\",sanitize(str(rawfile[\"$entry/sample/name\"])),\"CIF\")\r\n rawfile.add_metadata(\"_[local]_data_collection_description\",str(rawfile[\"$entry/sample/description\"]),\"CIF\")\r\n start_time = str(rawfile[\"$entry/start_time\"]).replace(\" \",\"T\")\r\n end_time = str(rawfile[\"$entry/end_time\"]).replace(\" \",\"T\")\r\n rawfile.add_metadata(\"_pd_meas_datetime_initiated\", start_time,\"CIF\")\r\n rawfile.add_metadata(\"_[local]_datetime_completed\", end_time,\"CIF\")\r\n try:\r\n username = str(rawfile[\"user_name\"])\r\n except:\r\n username = \"?\"\r\n rawfile.add_metadata(\"_pd_meas_info_author_name\", sanitize(username),\"CIF\")\r\n rawfile.add_metadata(\"_pd_meas_info_author_email\", str(rawfile[ \"$entry/user/email\"]),\"CIF\")\r\n rawfile.add_metadata(\"_pd_meas_info_author_phone\", str(rawfile[ \"$entry/user/phone\"]),\"CIF\")\r\n rawfile.add_metadata(\"_pd_instr_2theta_monochr_pre\",\"%.3f\" % tk_angle,\"CIF\")\r\n rawfile.add_metadata(\"_pd_instr_dist_mono/spec\", \"%.1f\" % average_metadata(rawfile[ \"$entry/sample/mono_sample_mm\"]),\"CIF\")\r\n rawfile.add_metadata(\"_pd_instr_dist_spec/detc\",\"%.1f\" % average_metadata(rawfile[\"$entry/instrument/detector/radius\"]),\"CIF\")\r\n try:\r\n rawfile.add_metadata(\"_diffrn_source_power\", \"%.2f\" % (average_metadata(rawfile[\"$entry/instrument/source/power\"])*1000),\"CIF\")\r\n except AttributeError: #sometimes source power is missing\r\n pass\r\n # imgCIF information about geometry\r\n # axis loop\r\n names = (('_axis.id','_axis.type','_axis.equipment','_axis.depends_on'),)\r\n values = [['source','gravity','stth','horizontal','vertical'],\r\n ['.','.','rotation','rotation','translation'],\r\n ['source','gravity','detector','detector','detector'],\r\n ['.','.','.','stth','stth']]\r\n rawfile.__dict__['ms'].AddCifItem((names,(values,)))\r\n radius = rawfile.__dict__['ms'][\"_pd_instr_dist_spec/detc\"]\r\n # add the vectors:\r\n \"\"\"\r\n source 0 0 1 . . .\r\n gravity -1 0 0 . . .\r\n stth 1 0 0 . . .\r\n horizontal 1 0 0 . . .\r\n vertical 1 0 0 0 0 -728\r\n \"\"\"\r\n vector_dict = {\"_axis.vector[1]\":['0','-1','1','1','1'],\r\n \"_axis.vector[2]\":['0','0','0','0','0'],\r\n \"_axis.vector[3]\":['1','0','0','0','0'],\r\n \"_axis.offset[1]\":['.','.','.','.','.'],\r\n \"_axis.offset[2]\":['.','.','.','.','.'],\r\n \"_axis.offset[3]\":['1','0','0','0',\"-\"+radius]}\r\n rawfile.__dict__['ms'].AddToLoop('_axis.id',vector_dict)\r\n # Add information about the stth positions for later use\r\n rawfile.add_metadata(\"_diffrn_scan.id\",\"1\",\"CIF\")\r\n rawfile.add_metadata(\"_diffrn_scan.frames\",rawfile.shape[0],\"CIF\")\r\n frame_ids = map(lambda a:\"%d\" % a,range(rawfile.shape[0]))\r\n stths = rawfile.stth[:]\r\n names = ((\"_diffrn_scan_frame.frame_id\",\"_diffrn_scan_frame.frame_number\"),)\r\n values = [frame_ids,range(1,rawfile.shape[0]+1)] #Spec says start from 1\r\n rawfile.__dict__['ms'].AddCifItem((names,(values,)))\r\n names = ((\"_diffrn_scan_frame_axis.frame_id\",\"_diffrn_scan_frame_axis.axis_id\",\r\n \"_diffrn_scan_frame_axis.angle\"),)\r\n values = [frame_ids,['stth']*rawfile.shape[0],map(float,stths)]\r\n rawfile.__dict__['ms'].AddCifItem((names,(values,)))\r\n return rawfile", "def parse_meta_file(fname):\n flds = {}\n basename = re.match('(^.+?)\\..+', os.path.basename(fname)).groups()[0]\n flds['basename'] = basename\n with open(fname) as f:\n text = f.read()\n # split into items\n for item in re.split(';', text):\n # remove whitespace at beginning\n item = re.sub('^\\s+', '', item)\n match = re.match('(\\w+) = (\\[|\\{)(.*)(\\]|\\})', item, re.DOTALL)\n if match:\n key, _, value, _ = match.groups()\n # remove more whitespace\n value = re.sub('^\\s+', '', value)\n value = re.sub('\\s+$', '', value)\n # print key,':', value\n flds[key] = value\n # now check the needed things are there\n needed_keys = ['dimList', 'nDims', 'nrecords', 'dataprec']\n for k in needed_keys:\n assert k in flds\n # transform datatypes\n flds['nDims'] = int(flds['nDims'])\n flds['nrecords'] = int(flds['nrecords'])\n # endianness is set by _read_mds\n flds['dataprec'] = np.dtype(re.sub(\"'\", '', flds['dataprec']))\n flds['dimList'] = [[int(h) for h in\n re.split(',', g)] for g in\n re.split(',\\n', flds['dimList'])]\n if 'fldList' in flds:\n flds['fldList'] = [re.match(\"'*(\\w+)\", g).groups()[0] for g in\n re.split(\"'\\s+'\", flds['fldList'])]\n assert flds['nrecords'] == len(flds['fldList'])\n return flds", "def test_filter_mapping_file_by_metadata_states(self):\r\n actual = filter_mapping_file_by_metadata_states(\r\n self.tutorial_mapping_f,\r\n \"Treatment:Control\")\r\n expected = \"\"\"#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tTreatment\tDOB\tDescription\r\nPC.354\tAGCACGAGCCTA\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._354\r\nPC.355\tAACTCGTCGATG\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._355\r\nPC.356\tACAGACCACTCA\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061126\tControl_mouse_I.D._356\r\nPC.481\tACCAGCGACTAG\tYATGCTGCCTCCCGTAGGAGT\tControl\t20070314\tControl_mouse_I.D._481\r\nPC.593\tAGCAGCACTTGT\tYATGCTGCCTCCCGTAGGAGT\tControl\t20071210\tControl_mouse_I.D._593\"\"\"\r\n self.assertEqual(actual, expected)", "def metadata(self, tokens):\n\n return self.process_value_pairs(tokens, \"metadata\")", "def read_metadata(\n filename: Union[Path, str], marker: str = \"---\", **kwargs: Any\n) -> Dict[str, Any]:\n return read_header(filename, marker, **kwargs)[0]", "def get_metadata(self, chunk, coords):\n\n return chunk.get_metadata(coords)", "def load_metadata(self, path):\n self.paths = []\n self.annotations = []\n\n with open(path, \"r\") as f:\n for line in f:\n line = line.strip().split(\" \")\n \n rgb_path = line[0]\n\n if len(line) > 1:\n bounding_boxes = np.array([list(map(int, box.split(','))) for box in line[1:]])\n else:\n bounding_boxes = []\n \n self.annotations.append({\n \"rgb_path\": rgb_path, \n \"bounding_boxes\": bounding_boxes,\n })", "def get_taxa(taxa_fname, sample_ids):\n try:\n lines = open(taxa_fname, 'U').readlines()\n except (TypeError, IOError):\n raise MissingFileError, 'Taxa summary file required for this analysis'\n map = parse_mapping_file(lines)\n return map", "def parse_lines(filename):\n line_counter = 0\n with open(filename, 'r') as rf:\n for line_txt in rf:\n try:\n d = json.loads(line_txt)\n tup = (\n d['attributed_to'],\n int(d['date_time'][8:10]),\n d.get('used_first_time_today', False),\n d.get('first_utm_source', 'unknown') \n )\n except:\n print('Error parsing line_txt:', line_txt)\n line_counter += 1\n if line_counter % 10 ** 6 == 0:\n print('read %dM lines' % (line_counter // 10 ** 6))\n yield tup # yield: https://stackoverflow.com/a/231855", "def test_filter_mapping_file_by_metadata_states(self):\n actual = filter_mapping_file_by_metadata_states(self.tutorial_mapping_f,\"Treatment:Control\")\n expected = \"\"\"#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tTreatment\tDOB\tDescription\nPC.354\tAGCACGAGCCTA\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._354\nPC.355\tAACTCGTCGATG\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._355\nPC.356\tACAGACCACTCA\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061126\tControl_mouse_I.D._356\nPC.481\tACCAGCGACTAG\tYATGCTGCCTCCCGTAGGAGT\tControl\t20070314\tControl_mouse_I.D._481\nPC.593\tAGCAGCACTTGT\tYATGCTGCCTCCCGTAGGAGT\tControl\t20071210\tControl_mouse_I.D._593\"\"\"\n self.assertEqual(actual,expected)", "def parsemeta(metalines):\n def parseline(line):\n res = [val.strip() for val in line[5:].split(u':', 1)]\n key, value = (res[0], res[1]) if len(res) == 2 else (res[0], u'')\n if re.match(r'^-?\\d*[\\.|,]?\\d+$', value):\n value = float(value)\n elif re.match(r'^\\[(.*)\\]', value):\n value = [val.strip() for val in value[1:-1].split(u',')]\n value = [float(val) if re.match(r'^-?\\d*[\\.|,]?\\d+$', val) else val for val in value]\n return key, value\n return OrderedDict(parseline(line) for line in metalines if line.startswith(u'#CTE_'))", "def parse_metadata_line(self, line, get_set=False):\n self.E_str = \"parse_metadata_line\"\n\n metadata_name_regex = \"[A-Za-z0-9_-]+\"\n metadata_regex = f\"{VAR_REGEX}\\['{metadata_name_regex}'\\]\"\n metadata_regex += f\"|{VAR_REGEX}\\['{metadata_name_regex}'\\]\"\n\n corr_syn = \"\\n\\tTo Set: <var_name>['<metadata_name>'] = <value>\\n\"\n corr_syn = \"\\n\\tTo Get: <var_name>['<metadata_name>']\"\n\n # First get the all the parts of the line that contain metadata references\n value = False\n str_part, non_str = gen_parse.get_str_between_delims(line, '\"')\n metadata_parts = re.findall(metadata_regex, line)\n\n # Now get the metadata name and variable name for each reference\n new_line = line\n all_var_names, all_metadata_names = [], []\n for var_i, var in enumerate(metadata_parts):\n new_line = new_line.replace(var, f\"METADATA_{var_i}\")\n\n # First get the variable name\n var = var.strip(' $')\n words = var.split('[')\n if len(words) != 2:\n self.print_error(f\"Syntax Error: Correct metadata syntax {corr_syn}\")\n var_name = words[0]\n\n # Is the metadata setting data or getting it?\n if get_set is not False:\n get_set = \"get\"\n if var_name in non_str.split('=')[0]:\n get_set = \"set\"\n\n # Next get the metadata name\n metadata_name, _ = gen_parse.get_str_between_delims(words[1], \"'\")\n if not metadata_name:\n metadata_name, _ = gen_parse.get_str_between_delims(words[1], '\"')\n if not metadata_name:\n self.print_error(f\"Correct syntax for metadata is: {corr_syn}\")\n\n # Now get the metadata value\n if var_name not in self.variables:\n self.print_error(f\"Undeclared variable '{var_name}'\")\n Var = getattr(self, var_name)\n\n err = metadata_name not in Var.metadata and get_set is not False\n err *= get_set == \"get\"\n if err:\n err_msg = f\"Can't find metadata '{metadata_name}' in variable '{var_name}'\"\n err_msg += \"\\n\\n\"\n err_msg += f\"{var_name} metadata: {Var.metadata}\"\n self.print_error(err_msg)\n\n all_var_names.append(var_name)\n all_metadata_names.append(metadata_name)\n\n return all_var_names, all_metadata_names, new_line", "def parse_file(self):\n # the header was already read in the init, start at the first sample line\n\n for line in self._stream_handle:\n\n # create the dictionary of key/value pairs composed of the labels and the values from the\n # record being parsed\n # ex: data_dict = {'sci_bsipar_temp':10.67, n1, n2, nn}\n data_dict = self._read_data(line)\n\n if GliderParser._has_science_data(data_dict, self._particle_class):\n # create the timestamp\n timestamp = ntplib.system_to_ntp_time(float(data_dict[GliderParticleKey.M_PRESENT_TIME]))\n # create the particle\n self._record_buffer.append(self._extract_sample(\n self._particle_class, None, data_dict, internal_timestamp=timestamp))", "def parse(self, lines):\n # convert file to string deleting end of line charcters\n citations_string = self.prepare_file(lines)\n # extract the entries from the string\n entries = list(self.find_entries(citations_string))\n entries.append(len(citations_string))\n # parse each entry to generate a citation\n for idx, jdx in zip(entries[:-1], entries[1:]):\n self.parse_entry(citations_string[idx:jdx])\n return self.force_field.citations", "def parse(lines):\n blob_info = defaultdict(list, {})\n\n i = None\n for i, line in enumerate(lines):\n ld = line.split('%')\n\n # parse the first block with the generic stats\n lda = ld[0].split()\n blob_info['frame'].append(int(lda[0]))\n blob_info['time'].append(float(lda[1]))\n blob_info['centroid'].append((float(lda[2]), float(lda[3])))\n blob_info['area'].append(int(lda[4]))\n blob_info['std_vector'].append((float(lda[5]), float(lda[6])))\n blob_info['std_ortho'].append(float(lda[7]))\n blob_info['size'].append((float(lda[8]), float(lda[9])))\n\n # if there are geometry sections, parse them too.\n if len(ld) == 4:\n blob_info['midline'].append(tuple(zip(*alternate([int(x) for x in ld[1].split()]))))\n\n # contour data\n ldc = ld[3].split()\n blob_info['contour_start'].append((int(ldc[0]), int(ldc[1])))\n blob_info['contour_encode_len'].append(int(ldc[2]))\n blob_info['contour_encoded'].append(ldc[3])\n else:\n blob_info['midline'].append(None)\n blob_info['contour_start'].append((0, 0))\n blob_info['contour_encode_len'].append(None)\n blob_info['contour_encoded'].append(None)\n\n # check if blob was empty\n if i is None:\n return None\n\n # prevent referencing non-existant fields\n blob_info.default_factory = None\n\n # verify everything is the same length\n frames = len(blob_info['frame'])\n assert all(len(v) == frames for v in blob_info.values())\n\n return blob_info", "def parse(lines, descriptions):\n # TODO does startswith with an empty string always return true?\n result = {}\n for description in descriptions: # Fill dict with empty arrays for all entries\n result[description[0]] = []\n\n for line in lines:\n words = line.split()\n for description in descriptions:\n try:\n result[description[0]].append(parse_line(words, description[0], description[1]))\n break\n except ValueError:\n pass\n return result", "def parse_mapping_file_to_dict(*args, **kwargs):\r\n mapping_data, header, comments = parse_mapping_file(*args, **kwargs)\r\n return mapping_file_to_dict(mapping_data, header), comments", "def _parse_metadata(config):\n if not config.active or config.device_metadata is None:\n return None\n\n width, height = config.width, config.height\n points = []\n for point in config.device_metadata.split(\"|\"):\n try:\n x, y = point.split(\";\")\n points.append([float(x) * width, float(y) * height])\n except:\n return None\n\n if len(points) != 4:\n return None\n\n return points", "def get_metadata(hf_patients_file, metadata_file, output_file):\n\n # Use 'dicom_id' as names for row indices\n hf_patients = pd.read_csv(hf_patients_file, sep=',', index_col=\"dicom_id\")\n\n # Use 'dicom' as name\n metadata = pd.read_csv(metadata_file, index_col=\"dicom\", dtype={\"StudyDate\": str, \"StudyTime\": str})\n\n # Disregard all columns except 'subject_id' and 'study_id'\n hf_patients = pd.concat([hf_patients['study_id'], hf_patients['subject_id']], axis=1)\n\n # Find study date/time for heart failure patients\n study_date = metadata[\"StudyDate\"][hf_patients.index]\n study_time = metadata[\"StudyTime\"][hf_patients.index]\n\n result = pd.concat([hf_patients, study_date, study_time], axis=1)\n result = result.rename(columns={\"StudyDate\": \"study_date\", \"StudyTime\": \"study_time\"})\n\n result.to_csv(output_file)", "def build_metadata(self, meta):\n if meta is None:\n try:\n self.parser = GenericParser(\n fn_re='{}/(e\\d+s\\d+)_.*/Production.nc'.format(self.data_folder),\n group_names=['sim'],\n group_transforms=[lambda x: x],\n top_fn='',\n step_ps=self.timestep\n )\n meta = gather_metadata('{}/e*/*nc'.format(self.data_folder), parser)\n except:\n logger.warning(\"Could not automatically build metadata\")\n return None\n else:\n if not isinstance(meta, pd.DataFrame):\n meta = load_meta(meta)\n return meta", "def parse_metadata(source_text):\n meta = {}\n key = None\n lines = source_text.split('\\n')\n META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\\s*(?P<value>.*)')\n META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')\n while 1:\n line = lines.pop(0)\n if line.strip() == '':\n break # blank line - done\n m1 = META_RE.match(line)\n if m1:\n key = m1.group('key').lower().strip()\n value = m1.group('value').strip()\n try:\n meta[key].append(value)\n except KeyError:\n meta[key] = [value]\n else:\n m2 = META_MORE_RE.match(line)\n if m2 and key:\n # Add another line to existing key\n meta[key].append(m2.group('value').strip())\n else:\n lines.insert(0, line)\n break # no meta data - done\n return (meta, '\\n'.join(lines))", "def read_metadata_record(raw_features_string):\n all_data = json.loads(raw_features_string)\n metadata_keys = {\"sha256\", \"appeared\", \"label\", \"avclass\"}\n return {k: all_data[k] for k in all_data.keys() & metadata_keys}", "def _parse_metadata ( self ):\n self.date = []\n self.atcorr_refl = []\n self.saa = []\n self.sza = []\n self.vaa = []\n self.vza = []\n self.res = []\n self._mask = []\n\n for md_file in self.metadata:\n tree = xml.etree.ElementTree.ElementTree ( file=md_file ).getroot()\n dirname = os.path.dirname ( md_file )\n try:\n self.date.append(\n datetime.datetime.strptime(tree[0][1].text, \"%Y-%m-%d %H:%M:%S\") )\n except:\n self.date.append(\n datetime.datetime.strptime(tree[0][1].text, \"%Y-%m-%d %H:%M:%S.%f\") )\n self.atcorr_refl.append(\n os.path.join ( dirname, tree[1][2].text ) )\n self.saa.append( float ( tree[4][10][0].text ) )\n self.sza.append( float ( tree[4][10][1].text ) )\n self.vaa.append( float ( tree[4][10][2].text ) )\n self.vza.append( float ( tree[4][10][3].text ) )\n self.res.append( float ( tree[2][1].text ) )\n self._mask.append( os.path.join ( dirname, tree[1][5].text ) )", "def extract_metadata(source,\n is_metadata=is_metadata_line,\n parser=get_metadata,\n container=MetaData.from_raw_fields):\n class in_metadata(object):\n def __init__(self):\n self.in_metadata = True\n def __call__(self, line):\n if not is_metadata(line):\n self.in_metadata = False\n return self.in_metadata\n\n # Split the source, assuming metadata is at the beginning\n # (Keep only the group data, i)\n grouping = itertools.groupby(source, key=in_metadata())\n #grouping = itertools.imap(operator.itemgetter(1), grouping)\n has_metadata, first_group = next(grouping)\n if has_metadata:\n # We need to strictly consume all the group members here or\n # they will be lost to the groupby iterator process\n metadata_lines = list(first_group)\n _, body_iterator = next(grouping)\n else:\n # Use the first group if no metadata was seen\n metadata_lines = []\n body_iterator = first_group\n\n parsed = load(metadata_lines,\n is_metadata=is_metadata,\n container=container,\n parser=parser)\n return parsed, body_iterator", "def extract_metadata(self):\n metadata_file_path = self.create_metadata_file(\".metadata.txt\")\n mt = self.mimetype\n metadata_processing_method = self.metadata_mimetype_methods.get(mt)\n if metadata_processing_method:\n # TODO: should we return metadata and write it here instead of in processing method?\n metadata_processing_method(metadata_file_path)", "def LoadMetadata(filename):\r\n## print filename\r\n globbed=glob.glob(os.path.join(os.path.dirname(filename),'*.zvi'))\r\n if globbed:\r\n return LoadZVIMetaData(globbed[0])\r\n globbed=glob.glob(os.path.join(os.path.dirname(filename),'*.xml'))\r\n if globbed:\r\n return LoadAxioVisionXMLMetaData(globbed[0])\r\n globbed=glob.glob(os.path.join(os.path.dirname(filename),'metadata.txt'))\r\n if globbed:\r\n return LoadMMMetaData(globbed[0])\r\n return None\r\n #no further valid options, crash horribly\r", "def __init__(self, src_lines):\n self.study_id = None\n self.citation = None\n self.abstract = None\n self.authors = []\n self.study_matrices = {}\n self.history_date = None\n self.history_time = None\n self.history_person = None\n self.history_event = None\n self.analyses = []\n \n self.parse_src_lines(src_lines)", "def _parse_metadata ( self ):\n self.date = []\n self.atcorr_refl = []\n self.saa = []\n self.sza = []\n self.vaa = []\n self.vza = []\n self.res = []\n self._mask = []\n for md_file in self.metadata:\n # This is required to get rid of the namespace cruft\n it = xml.etree.ElementTree.iterparse ( md_file )\n for _, el in it:\n el.tag = el.tag.split('}', 1)[1] # strip all namespaces\n tree = it.root\n\n dirname = os.path.dirname ( md_file )\n\n self.date.append( datetime.datetime.strptime(\n tree.find(\"global_metadata/acquisition_date\").text,\n \"%Y-%m-%d\") )\n\n for c in tree.findall (\"global_metadata/corner\"):\n if c.attrib['location'] == \"UL\":\n ulx = float ( c.attrib['longitude'] )\n uly = float ( c.attrib['latitude'] )\n else:\n lrx = float ( c.attrib['longitude'] )\n lry = float ( c.attrib['latitude'] )\n\n self.vaa.append ( get_vaa ( lrx, lry, ulx, uly ) )\n\n #self.atcorr_refl.append( os.path.join ( dirname, tree[1][2].text ) )\n self.saa.append(\n float ( tree.find(\"global_metadata/solar_angles\").attrib['azimuth'] ) )\n self.sza.append(\n float ( tree.find(\"global_metadata/solar_angles\").attrib['zenith'] ) )\n self.vza.append( 0.0 ) # Note that LDCM can look sideways a bit!\n self.res.append( 30. ) # 30m\n\n images = []\n mask = []\n for b in tree.findall(\"bands/band\"):\n if b.attrib['product'] == \"toa_refl\":\n fname = b.find(\"file_name\").text\n if fname.find ( \"qa.tif\" ) < 0:\n images.append ( os.path.join ( dirname, fname ) )\n elif b.attrib['product'] == \"cfmask\":\n mask = os.path.join ( dirname, fname )\n # Create VRT?\n subprocess.call ([\"gdalbuildvrt\", \"-overwrite\", \"-separate\",\n os.path.join ( dirname, md_file.replace(\".xml\", \"_crop.vrt\" )) ] + images )\n self.atcorr_refl.append ( os.path.join ( dirname,\n md_file.replace(\".xml\", \"_crop.vrt\" )) )\n self._mask.append( mask )", "def parse_mca(f):\n for line in f:\n yield RECORD_TYPES[RecordIdentity(line[:2])].from_string(line)", "def parse_metadata(path, site):\n headers = ['name', 'lat', 'lon', 'altitude', 'depth', 'prefecture', 'otherlat', 'otherlon', 'instrument']\n site_info = pd.read_csv(path, index_col=0, header=None).loc[site].values[0:9]\n return {header: site_info[i] for i, header in enumerate(headers)}", "def parse_file(self, infile, chardict, labeldict):\n examples = []\n fin = io.open(infile, 'r')\n # idx is for the index of the row in the \n # original file before shuffling and randomization\n idx = 0\n for line in fin: \n entity, label = map(clean, line.rstrip().split('\\t')[:2])\n # print entity\n ent = map(lambda c:chardict[c], list(entity))\n lab = map(lambda l:labeldict[l] if l in labeldict else 0, label.split(','))\n examples.append((idx, ent, lab))\n idx += 1\n fin.close()\n print \"num_rows:\", len(examples), \" index\", idx\n return examples", "def parse(self, src, line):\n r = line.split('\\t')\n p = {}\n if src == 'sf':\n p['businessID'] = r[0]\n p['name'] = r[1]\n p['address'] = r[2]\n p['city'] = r[3]\n p['state'] = r[4]\n p['zip'] = r[5]\n p['latitude'] = r[6]\n p['longitude'] = r[7]\n p['phone'] = r[8]\n elif src == 'nyc':\n p['businessID'] = r[0]\n p['name'] = r[1]\n # nyc separates the building number from the street name\n p['address'] = ' '.join([r[3].strip(), r[4].strip()])\n p['city'] = 'NYC'\n p['state'] = 'NY'\n p['zip'] = r[5]\n p['latitude'] = None\n p['longitude'] = None\n p['phone'] = r[6]\n return p", "def test_metadata(self):\n cr = CaseReader(self.filename)\n self.assertEqual(cr.format_version, format_version,\n msg='incorrect format version')\n self.assertIsNone(cr.parameters,\n msg='parameter metadata should be None')\n self.assertIsNone(cr.unknowns, msg='unknown metadata should be None')", "def test_parse_sample_file(self):\n parser = wv2_xml.Parser(\n 'imars_etl/drivers_metadata/wv2_xml/test_files/'\n 'WV02_20140218163417_103001002E0EB600_14FEB18163417-M1BS-'\n '500534956040_01_P004.xml'\n )\n metadata_read = parser.get_metadata()\n print('md: {}'.format(metadata_read))\n expected_subset = {\n \"time\": \"2014-02-18T16:34:17.926650\"\n }\n\n assert expected_subset.items() <= metadata_read.items()", "def test_sample_ids_from_metadata_description(self):\n self.assertRaises(ValueError, sample_ids_from_metadata_description,\n self.tutorial_mapping_f, \"Treatment:Foo\")\n self.tutorial_mapping_f.seek(0)\n self.assertRaises(ValueError, sample_ids_from_metadata_description,\n self.tutorial_mapping_f, \"DOB:!20061218,!20070314,!20071112,\"\n \"!20080116\")", "def parse(self):\n try:\n self.open_file()\n lines = list(self._file)\n\n if len(lines) > 0:\n text = ''.join(lines)\n regex = 'Song \\d+\\nStart (\\d+:\\d+:\\d+)\\nEnd (\\d+:\\d+:\\d+)\\nLength (\\d+.\\d+)'\n match = re.findall(regex, text)\n if len(match):\n starts = []\n ends = []\n lengths = []\n\n for i in range(len(match)):\n starts.append(match[i][0])\n ends.append(match[i][1])\n lengths.append(float(match[i][2]))\n\n for i in range(len(match)):\n self.debug_data.append({\n 'start':starts[i],'end':ends[i],'length':lengths[i]})\n\n match = re.search('T\\d_S(\\d{4})_.*.txt', self._filepath)\n if match:\n self._experiment_metadata['session_id'] = int(match.groups()[0])\n else:\n raise EIMParsingError(\"No valid session id found in filename %s\" % self._filepath)\n\n finally:\n if self._file and not self._file.closed:\n self.close_file()", "def read_metadata(self, file=None):\n if file is None:\n file = self.meta_data_file\n\n try:\n self.meta_data = self.input_dataframe(file, index_col=None)\n except IOError:\n self.meta_data = self.create_default_meta_data(self.expression_matrix)", "def test_parse_mapping_file(self):\r\n s1 = ['#sample\\ta\\tb', '#comment line to skip',\r\n 'x \\t y \\t z ', ' ', '#more skip', 'i\\tj\\tk']\r\n exp = ([['x', 'y', 'z'], ['i', 'j', 'k']],\r\n ['sample', 'a', 'b'],\r\n ['comment line to skip', 'more skip'])\r\n obs = parse_mapping_file(s1)\r\n self.assertEqual(obs, exp)\r\n\r\n # We don't currently support this, but we should soon...\r\n # check that first non-comment, non-blank line is used as\r\n # header\r\n # s1 = ['sample\\ta\\tb', '#comment line to skip',\\\r\n # 'x \\t y \\t z ', ' ', '#more skip', 'i\\tj\\tk']\r\n # exp = ([['x','y','z'],['i','j','k']],\\\r\n # ['sample','a','b'],\\\r\n # ['comment line to skip','more skip'])\r\n # obs = parse_mapping_file(s1)\r\n # self.assertEqual(obs, exp)\r\n\r\n # check that we strip double quotes by default\r\n s2 = ['#sample\\ta\\tb', '#comment line to skip',\r\n '\"x \"\\t\" y \"\\t z ', ' ', '\"#more skip\"', 'i\\t\"j\"\\tk']\r\n obs = parse_mapping_file(s2)\r\n self.assertEqual(obs, exp)", "def parse_map(file):\n with open(FILE) as f:\n return Map([line[:-1] for line in f])", "def extract_metadata(parser_config, snippet):\n return parser_config['implementation'](snippet)", "def extract_meta_data(video_file_name, output_file=meta.txt, *args, **kwargs):", "def extract_points(fp: T.BinaryIO) -> T.Optional[T.List[geo.Point]]:\n\n points = None\n movie_timescale = None\n media_timescale = None\n elst_entries = None\n\n for h, s in parser.parse_path(fp, [b\"moov\", [b\"mvhd\", b\"trak\"]]):\n if h.type == b\"trak\":\n trak_start_offset = s.tell()\n\n descriptions = sample_parser.parse_descriptions_from_trak(\n s, maxsize=h.maxsize\n )\n camm_descriptions = [d for d in descriptions if d[\"format\"] == b\"camm\"]\n if camm_descriptions:\n s.seek(trak_start_offset, io.SEEK_SET)\n camm_samples = _extract_camm_samples(s, h.maxsize)\n\n points_with_nones = (\n _parse_point_from_sample(fp, sample)\n for sample in camm_samples\n if sample.description[\"format\"] == b\"camm\"\n )\n\n points = [p for p in points_with_nones if p is not None]\n if points:\n s.seek(trak_start_offset)\n elst_data = parser.parse_box_data_first(\n s, [b\"edts\", b\"elst\"], maxsize=h.maxsize\n )\n if elst_data is not None:\n elst_entries = cparser.EditBox.parse(elst_data)[\"entries\"]\n\n s.seek(trak_start_offset)\n mdhd_data = parser.parse_box_data_firstx(\n s, [b\"mdia\", b\"mdhd\"], maxsize=h.maxsize\n )\n mdhd = cparser.MediaHeaderBox.parse(mdhd_data)\n media_timescale = mdhd[\"timescale\"]\n else:\n assert h.type == b\"mvhd\"\n if not movie_timescale:\n mvhd = cparser.MovieHeaderBox.parse(s.read(h.maxsize))\n movie_timescale = mvhd[\"timescale\"]\n\n # exit when both found\n if movie_timescale is not None and points:\n break\n\n if points and movie_timescale and media_timescale and elst_entries:\n segments = [\n elst_entry_to_seconds(entry, movie_timescale, media_timescale)\n for entry in elst_entries\n ]\n points = list(filter_points_by_elst(points, segments))\n\n return points", "def read_metadata_record(raw_features_string):\n full_metadata = json.loads(raw_features_string)\n return {\"sha256\": full_metadata[\"sha256\"], \"appeared\": full_metadata[\"appeared\"], \"label\": full_metadata[\"label\"]}", "def read_file(inp_fn):\n lines = [line.strip().split(\",\")\n for line in open(inp_fn)\n if not (line.startswith(\"#\"))]\n return [(int(line[0]), year_record({\"male\": int(line[-3]),\n \"female\": int(line[-2]),\n \"unknown\": int(line[-1])},\n None, None))\n for line in lines[1:]]", "def get_lineid_content():\n lineid_content = {}\n lines_file_path = os.path.join(DATA_PATH + MOVIE_LINES_FILE)\n\n with open(lines_file_path, 'r', errors='ignore') as f:\n # +++$+++ is used to split the section in a single line\n # A correct formed line includes five sections\n # The first section is lineID\n # The last section is line content\n # Here we only need lineID and content\n\n for line in f:\n line_sections = line.split(' +++$+++ ')\n assert len(line_sections) == 5\n if line_sections[4][-1] == '\\n':\n line_sections[4] = line_sections[4][:-1]\n lineid_content[line_sections[0]] = line_sections[4]\n\n return lineid_content", "def medline_parser(filename):\n pmid_abstract_dict = {}\n with open(filename) as handle:\n for record in Medline.parse(handle):\n if 'AB' in record.keys():\n pmid, abstract = record['PMID'], record['AB']\n pmid_abstract_dict[pmid] = abstract\n return pmid_abstract_dict", "def get_metadata_from_path(path):\n try:\n import yaml\n # assumes index card is in the top-level of path\n index_card = os.path.join(path, \"M_index.yaml\")\n with open(index_card, \"r\") as stream:\n file_info = yaml.safe_load(stream)\n\n metadata_dict = {}\n metadata_dict[\"book_id\"] = file_info[\"book_id\"]\n metadata_dict[\"timestamp_start\"] = file_info[\"start_time\"]\n metadata_dict[\"type\"] = file_info[\"type\"]\n metadata_dict[\"obsid\"] = _convert_book_id_to_obsid(file_info[\"book_id\"])\n # get optional bits\n if \"stop_time\" in file_info:\n metadata_dict[\"timestamp_end\"] = file_info[\"stop_time\"]\n if \"observatory\" in file_info:\n metadata_dict[\"observatory\"] = file_info[\"observatory\"]\n if \"telescope\" in file_info:\n metadata_dict[\"telescope\"] = file_info[\"telescope\"]\n if \"stream_ids\" in file_info:\n metadata_dict[\"stream_ids\"] = file_info[\"stream_ids\"]\n if \"subtype\" in file_info:\n metadata_dict[\"subtype\"] = file_info[\"subtype\"]\n if \"tags\" in file_info:\n metadata_dict[\"tags\"] = file_info[\"tags\"]\n if \"scanification\" in file_info:\n metadata_dict[\"scanification\"] = file_info[\"scanification\"]\n if \"hwp_rate_hz\" in file_info:\n metadata_dict[\"hwp_rate_hz\"] = file_info[\"hwp_rate_hz\"]\n if \"sequencer_ref\" in file_info:\n metadata_dict[\"sequencer_ref\"] = file_info[\"sequencer_ref\"]\n return metadata_dict\n except (ImportError, FileNotFoundError, KeyError):\n pass\n\n return None", "def test_parse_mapping_file_to_dict(self):\r\n s1 = ['#sample\\ta\\tb', '#comment line to skip',\r\n 'x \\t y \\t z ', ' ', '#more skip', 'i\\tj\\tk']\r\n exp = ([['x', 'y', 'z'], ['i', 'j', 'k']],\r\n ['sample', 'a', 'b'],\r\n ['comment line to skip', 'more skip'])\r\n mapdict, comments = parse_mapping_file_to_dict(s1)\r\n expdict = {'x': {'a': 'y', 'b': 'z'}, 'i': {'a': 'j', 'b': 'k'}}\r\n self.assertEqual(mapdict, expdict)\r\n self.assertEqual(comments, ['comment line to skip', 'more skip'])", "def parse_metadata(self):\n micro_metadata = {}\n ext_meta = None\n self.content_type = 'text/html'\n if self.source_metadata:\n #print(self.source_metadata)\n if len(self.source_metadata)>1:\n try:\n for sm in self.source_metadata:\n if str(sm.get('type').split('/')[-1]).lower() in self.SCHEMA_ORG_CREATIVEWORKS:\n ext_meta = sm\n except:\n pass\n self.source_name = self.getEnumSourceNames().MICRODATA_EMBEDDED\n if not ext_meta:\n ext_meta = self.source_metadata[0]\n\n if ext_meta is not None:\n self.logger.info('FsF-F2-01M : Trying to extract Microdata metadata from -: {}'.format(self.source_name))\n # TODO check syntax - not ending with /, type and @type\n # TODO (important) extend mapping to detect other pids (link to related entities)?\n # TODO replace check_context_type list context comparison by regex\n check_context_type = ['Dataset', 'Collection']\n try:\n #if ext_meta['@context'] in check_context_type['@context'] and ext_meta['@type'] in check_context_type[\"@type\"]:\n if str(ext_meta.get('type')).find('schema.org') > -1:\n micro_metadata = jmespath.search(self.metadata_mapping.value, ext_meta)\n self.namespaces.append('http://schema.org/')\n else:\n self.logger.info('FsF-F2-01M : Failed to parse non schema.org type Microdata')\n except Exception as err:\n #print(err.with_traceback())\n self.logger.info('FsF-F2-01M : Failed to parse Microdata -: {}'.format(err))\n else:\n self.logger.info('FsF-F2-01M : Could not identify Microdata metadata')\n\n return self.source_name, micro_metadata", "def loadContentSamplesFile(self, lines):\n refgenome = set()\n \n for line in lines:\n if line.startswith(\"#\"):\n continue\n \n tokens = line.rstrip(\"\\n\").split(\"\\t\")\n \n # create and fill a \"GbsSample\" object\n for samplesCol in [\"genotype\", \"flowcell\", \"lane\"]:\n if \"_\" in tokens[self.samplesCol2idx[samplesCol]]:\n msg = \"underscore in %s '%s', replace by dash '-'\" \\\n % (samplesCol, tokens[self.samplesCol2idx[samplesCol]])\n raise ValueError(msg)\n if \" \" in tokens[self.samplesCol2idx[samplesCol]]:\n msg = \"space in %s '%s', replace by dash '-'\" \\\n % (samplesCol, tokens[self.samplesCol2idx[samplesCol]])\n raise ValueError(msg)\n if \".\" in tokens[self.samplesCol2idx[samplesCol]]:\n msg = \"dot in %s '%s', replace by dash '-'\" \\\n % (samplesCol, tokens[self.samplesCol2idx[samplesCol]])\n raise ValueError(msg)\n geno = tokens[self.samplesCol2idx[\"genotype\"]]\n flowcell = tokens[self.samplesCol2idx[\"flowcell\"]]\n laneNum = int(tokens[self.samplesCol2idx[\"lane\"]])\n barcode = tokens[self.samplesCol2idx[\"barcode\"]]\n if self.fclnToKeep is not None and \\\n \"%s_%i\" % (flowcell, laneNum) != self.fclnToKeep:\n continue\n iSample = GbsSample(geno, flowcell, laneNum, barcode,\n \"before\" if int(self.lSteps[0]) < 3 \\\n else \"after\")\n iSample.refGenome = tokens[self.samplesCol2idx[\"ref_genome\"]]\n iSample.library = tokens[self.samplesCol2idx[\"library\"]]\n iSample.seqCenter = tokens[self.samplesCol2idx[\"seq_center\"]]\n iSample.seqPlatform = tokens[self.samplesCol2idx[\"seq_platform\"]]\n iSample.seqPlatformModel = tokens[self.samplesCol2idx[\"seq_platform_model\"]]\n iSample.date = tokens[self.samplesCol2idx[\"date\"]]\n iSample.initFastqFile1 \\\n = \"%s/%s\" % (self.pathToInReadsDir,\n tokens[self.samplesCol2idx[\"fastq_file_R1\"]])\n if tokens[self.samplesCol2idx[\"fastq_file_R2\"]] != \"\":\n iSample.initFastqFile2 \\\n = \"%s/%s\" % (self.pathToInReadsDir,\n tokens[self.samplesCol2idx[\"fastq_file_R2\"]])\n if iSample.id not in self.dSamples:\n self.dSamples[iSample.id] = iSample\n refgenome.add(iSample.refGenome)\n\n if flowcell not in self.dFlowcells:\n self.dFlowcells[flowcell] = []\n if laneNum not in self.dFlowcells[flowcell]:\n self.dFlowcells[flowcell].append(laneNum)\n\n # create and fill a \"GbsLane\" object\n laneId = \"%s_%i\" % (flowcell, laneNum)\n if laneId not in self.dLanes:\n self.dLanes[laneId] = GbsLane(laneId, flowcell, laneNum)\n self.dLanes[laneId].insert(iSample)\n\n # create and fill a \"GbsGeno\" object\n if geno not in self.dGenos:\n self.dGenos[geno] = GbsGeno(geno)\n self.dGenos[geno].insert(iSample)\n \n if (\"4\" in self.lSteps or \"5\" in self.lSteps or \"6\" in self.lSteps \\\n or \"7\" in self.lSteps or \"8\" in self.lSteps) \\\n and len(refgenome) > 1:\n print(refgenome)\n msg = \"samples file contains more than one reference genome\"\n raise ValueError(msg)", "def parse_lineage(tsv_filename, sample_names, allow_missing=True):\n\n samples = {}\n\n if file_is_missing(tsv_filename, allow_missing):\n for name in sample_names:\n samples[name] = { 'lineage' : None,\n 'clade': None,\n 'pangolin_ver': None,\n 'pangodata_ver': None,\n 'nextclade_ver': None }\n return { 'samples': samples }\n\n lineages = pd.read_table(tsv_filename, sep='\\t')\n try:\n df = lineages[['isolate',\n 'pango_lineage',\n 'nextstrain_clade',\n 'pangolin_version',\n 'pangoLEARN_version',\n 'nextclade_version'\n ]]\n except KeyError:\n df = lineages[['isolate',\n 'pango_lineage',\n 'nextstrain_clade',\n 'pangolin_version',\n 'version',\n 'nextclade_version'\n ]]\n\n # Pull each row, identify sid \n for row in df.itertuples():\n if row.isolate.startswith(\"Consensus\"):\n sid = re.findall(\"_(.*?)\\.\", row.isolate)[0]\n else:\n sid = str(row.isolate)\n\n assert sid in sample_names\n\n # Pull Pangolin lineage\n lineage = str(row.pango_lineage)\n clade = str(row.nextstrain_clade)\n pangolin = str(row.pangolin_version)\n try:\n pangodata = str(row.pangoLEARN_version)\n except AttributeError:\n pangodata = str(row.version)\n nextclade = str(row.nextclade_version)\n samples[sid] = { 'lineage' : lineage,\n 'clade': clade,\n 'pangolin_ver': pangolin,\n 'pangodata_ver': pangodata,\n 'nextclade_ver': nextclade }\n\n assert len(samples) == len(sample_names)\n return { 'samples': samples }", "def parse_line(line, patterns=None):\n if patterns is None:\n patterns = LINE_PATTERNS\n\n for line_re in patterns:\n match = line_re.match(line)\n if match:\n data = match.groupdict()\n av_pairs = data['av_pairs']\n data['av_pairs'] = cleanup_av_pairs(av_pairs)\n return data\n\n return None", "def parse(self, filehandle):\n l = filehandle.readline()\n if l.split()[0] != '##maf':\n return\n else:\n self.setpar(l.split()[1:])\n\n l=filehandle.readline()\n while l:\n la = l.split()\n## print la\n if(len(la)==0 or la[0]=='#'):\n## print \"skipping\"\n 1\n elif(la[0]=='a'):\n## print \"reading alignment\"\n self.readalign(la[1:], filehandle)\n else:\n## print \"end of records\"\n return\n\n l=filehandle.readline()", "def parse_record(self, in_rec):\n \n geo_util = geo.Geo()\n \n self.metadata = {}\n for k, v in in_rec.items():\n if k == 'metadata2': continue\n elif k == 'geometry':\n self.metadata['geometry'] = v\n coords = v['coordinates']\n self.metadata['wkt'] = geo_util.convert_imageGeom(\\\n coords, 'wkt')\n elif k == 'metadata':\n for m in v:\n key = to_camelCase(m[0])\n self.metadata[key] = m[1]\n else:\n self.metadata[k] = v", "def load_metadata(self):\n handler = open('meta.xml').read()\n soup = BeautifulSoup(handler, 'xml')\n\n for meta in soup.find_all(re.compile(\"^title\")):\n if meta.string:\n self.meta_title = unicode(meta.string)\n\n for meta in soup.find_all(re.compile(\"^initial.creator\")):\n if meta.string:\n self.meta_initial_creator = unicode(meta.string)\n\n for meta in soup.find_all(re.compile(\"^date\")):\n if meta.string:\n self.meta_date = unicode(meta.string)", "def load_metainfo(filename, dependencyLoader=None, extraArgsHandling=InfoKindEl.ADD_EXTRA_ARGS, uri=None):\n path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"../../../../nomad-meta-info/meta_info/nomad_meta_info/{}\".format(filename)))\n return loadJsonFile(path, dependencyLoader, extraArgsHandling, uri)", "def read_metadata_file():\n metadata = None\n if not os.path.isfile(META_DATA_FILE):\n ppg.log_info(\"No metadata found. The earthquake splitting might have not been ran yet.\")\n else:\n ppg.log_info(\"Found metadata file\")\n metadata = pd.read_csv(META_DATA_FILE)\n return metadata", "def create_meta_dict_L1(adcp_meta):\n meta_dict = {}\n with open(adcp_meta) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n next(csv_reader, None) # Skip header row\n for row in csv_reader:\n # extract all metadata from csv file into dictionary -- some items not passed to netCDF file but are extracted anyway\n if row[0] == '' and row[1] == '':\n print('Metadata file contains a blank row; skipping this row !')\n elif row[0] != '' and row[1] == '':\n print('Metadata item in csv file has blank value; skipping this row '\n 'in metadata file !')\n else:\n meta_dict[row[0]] = row[1]\n\n # Add conventions metadata to meta_dict\n meta_dict['deployment_type'] = 'Sub Surface'\n meta_dict['flag_meaning'] = 'no_quality_control, good_value, probably_good_value, probably_bad_value, ' \\\n 'bad_value, changed_value, value_below_detection, value_in_excess, ' \\\n 'interpolated_value, missing_value'\n meta_dict['flag_references'] = 'BODC SeaDataNet'\n meta_dict['flag_values'] = '0, 1, 2, 3, 4, 5, 6, 7, 8, 9'\n meta_dict['keywords'] = 'Oceans > Ocean Circulation > Ocean Currents'\n meta_dict['keywords_vocabulary'] = 'GCMD Science Keywords'\n meta_dict['naming_authority'] = 'BODC, MEDS, CF v72'\n meta_dict['variable_code_reference'] = 'BODC P01'\n meta_dict['Conventions'] = \"CF-1.8\"\n\n return meta_dict", "def parse_feature(self, feature_key, lines):\n ...", "def get_metadata(self, t: Optional[int] = None) -> Dict:\n real_t = None if t is None else int(self._resolve_index(t))\n if self._is_tiff:\n ret = self._reader.metadata(index=..., page=real_t)\n else:\n ret = self._reader.metadata(index=real_t)\n self._parse_yaml_description(ret)\n if real_t is not None:\n ret[\"frame_no\"] = real_t\n return ret", "def test_metadata():\n # Note: The json file should have been created with previous tests\n with open(file_struct.features_file) as f:\n data = json.load(f)\n assert(\"metadata\" in data.keys())\n metadata = data[\"metadata\"]\n assert(\"timestamp\" in metadata.keys())\n assert(metadata[\"versions\"][\"numpy\"] == np.__version__)\n assert(metadata[\"versions\"][\"msaf\"] == msaf.__version__)\n assert(metadata[\"versions\"][\"librosa\"] == librosa.__version__)", "def get_metadata_from_json(sample_metadata_path):\n\n try:\n return normalize_metadata(json.loads(open(sample_metadata_path).read()))\n except IOError:\n logging.exception('get_metadata')\n return {}", "def load_metadata_from_file(filename):\n try:\n extension = os.path.splitext(filename)[1]\n return _metadata_loader[extension](filename)\n except KeyError:\n raise TypeError('Cannot read metadata from file %s, extension %s not ' \n 'supported at this time' % (filename, extension))", "def get_metadata(self,\n params: typing.Optional[typing.Mapping[str, str]] = None):\n raise NotImplementedError('This data connector does not provide metadata')", "def _parse_metadata(self, md):\n md = ast.literal_eval(md)\n dd = defaultdict(list)\n\n for entry in md:\n try:\n for k, v in entry.items():\n dd[k].append(v)\n except AttributeError:\n continue\n return dd", "def read(self, file_info, **kwargs):\n\n # We need to import at least the standard fields\n user_fields = kwargs.pop(\"fields\", {})\n fields = self.standard_fields | set(user_fields)\n\n # We catch the user mapping here, since we do not want to deal with\n # user-defined names in the further processing. Instead, we use our own\n # mapping\n user_mapping = kwargs.pop(\"mapping\", None)\n\n # Load the dataset from the file:\n dataset = super().read(\n file_info, fields=fields, mapping=self.mapping, **kwargs\n )\n\n dataset[\"time\"] = self._get_time_field(dataset, file_info)\n\n # Remove fields that we do not need any longer (expect the user asked\n # for them explicitly)\n dataset = dataset.drop_vars(\n {\"UTC_start\", \"Profile_time\"} - set(user_fields),\n )\n\n if user_mapping is not None:\n dataset = dataset.rename(user_mapping)\n\n return dataset", "def test_mapping_file_to_dict(self):\r\n s1 = ['#sample\\ta\\tb', '#comment line to skip',\r\n 'x \\t y \\t z ', ' ', '#more skip', 'i\\tj\\tk']\r\n exp = ([['x', 'y', 'z'], ['i', 'j', 'k']],\r\n ['sample', 'a', 'b'],\r\n ['comment line to skip', 'more skip'])\r\n mapres = parse_mapping_file(s1) # map_data, header, comments\r\n mapdict = mapping_file_to_dict(*mapres[:2])\r\n expdict = {'x': {'a': 'y', 'b': 'z'}, 'i': {'a': 'j', 'b': 'k'}}\r\n self.assertEqual(mapdict, expdict)", "def _parse_filename(filename, metadata):\n\n file_noext = os.path.splitext(filename)[0]\n fname = file_noext.split(\"_\")\n\n metadata[\"scene_id\"] = fname[1]\n metadata[\n \"beam_mode\"] = sat_properties.radarsat_product_characteristics[\n fname[2]]\n metadata[\"product_type\"] = fname[-1]\n try:\n metadata[\n \"product_description\"] = sat_properties.radarsat_1_data_products[\n fname[-1][:3]]['description']\n except Exception:\n metadata[\"product_description\"] = \"\"\n\n metadata[\"scene_mean_time\"] = datetime.datetime.strptime(\n fname[3] + fname[4], \"%Y%m%d%H%M%S\")\n\n return metadata", "def identify_contents_metadata(cube, filename):\n metadata = {}\n\n try:\n # This could be None if cube.var_name isn't defined\n metadata['var_name'] = cube.var_name\n metadata['units'] = str(cube.units)\n metadata['long_name'] = cube.long_name\n metadata['standard_name'] = cube.standard_name\n metadata['time_units'] = cube.coord('time').units.origin\n metadata['calendar'] = cube.coord('time').units.calendar\n # CMIP5 doesn't have an activity id and so supply a default\n metadata['activity_id'] = cube.attributes.get('activity_id',\n 'HighResMIP')\n try:\n metadata['institute'] = cube.attributes['institution_id']\n except KeyError:\n # CMIP5 uses institute_id but we should not be processing CMIP5\n # data but handle it just in case\n metadata['institute'] = cube.attributes['institute_id']\n except Exception as exc:\n msg = ('Unable to extract metadata from the contents of file {}\\n{}'.\n format(filename, exc.__str__()))\n raise FileValidationError(msg)\n\n return metadata", "def loadMetaChunkToServerMap (fileName):\n if not os.path.exists(fileName):\n print \"File \", fileName, \" does not exists\"\n sys.exit(1)\n\n infile = open (fileName, \"r\")\n count = 0\n while infile:\n count = count + 1\n line = infile.readline()\n if not line:\n break\n print \"DEBUGME : processing line %s, %d\" % (line, count)\n lineParts = line.split(' ')\n gChunkMap[lineParts[0]] = ChunkInfo(lineParts[0], lineParts[1], lineParts[2])\n # Add a ChunkHostInfo\n numServers = int(lineParts[2])\n for i in range(numServers):\n i = i * 3\n gChunkMap[lineParts[0]].addChunkHostInfo(ChunkHostInfo(lineParts[i+3], lineParts[i+4], lineParts[i+5]))", "def metadata(sceneid, pmin=2, pmax=98, **kwargs):\n scene_params = _sentinel_parse_scene_id(sceneid)\n path_prefix = os.path.join(scene_params[\"aws_bucket\"], scene_params[\"aws_prefix\"])\n preview_file = os.path.join(path_prefix, scene_params[\"preview_file\"])\n\n dst_crs = CRS({\"init\": \"EPSG:4326\"})\n with rasterio.open(preview_file) as src:\n bounds = transform_bounds(\n *[src.crs, dst_crs] + list(src.bounds), densify_pts=21\n )\n\n info = {\"sceneid\": sceneid}\n info[\"bounds\"] = {\"value\": bounds, \"crs\": dst_crs.to_string()}\n\n addresses = [\n \"{}/{}/B{}.jp2\".format(path_prefix, scene_params[\"preview_prefix\"], band)\n for band in scene_params[\"bands\"]\n ]\n _stats_worker = partial(_sentinel_stats, percentiles=(pmin, pmax), **kwargs)\n with futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:\n responses = executor.map(_stats_worker, addresses)\n\n info[\"statistics\"] = {\n b: v for b, d in zip(scene_params[\"bands\"], responses) for k, v in d.items()\n }\n return info", "def createMetadata(request, datafile):\n samples = []\n datafile = datafile.split(',')\n for f in datafile:\n filename = f.replace('[', '').replace(']', '').replace('\"', '').replace(' ', '')\n cont = subprocess.Popen(\n [\"curl -u \" + request.session.get('username') + \":\" + request.session.get('password') + \" -k -s \" + filename[1:]],\n stdout=subprocess.PIPE, shell=True).communicate()[0]\n with open(request.session.get('username') + \"/data.txt\", \"w\") as datafile:\n datafile.write(cont)\n with open(datafile.name, \"r\") as tfile:\n for line in tfile:\n if \"!Sample_geo_accession\" in line:\n line = line.split('\\t')\n for x in range(0, len(line)):\n samples.append(line[x].replace('\\n', ''))\n samples = filter(None, samples)\n tfile.seek(0)\n with open(request.session.get('username') + \"/meta.txt\", \"w\") as meta:\n for i in range(0, len(samples)):\n for line in tfile:\n if \"!Sample\" in line:\n line = line.split('\\t')\n line[i] = line[i].replace(\"!Sample_\", \"\").replace(\"\\n\", \"\").replace(\"'\", \"\").replace(\",\", \"\").replace(\"\\\"\", \"\")\n if line[i] == \"geo_accession\":\n line[i] = \"sample_id\"\n elif line[1] == \"\\\"female\\\"\" or line[1] == \"\\\"male\\\"\":\n line[0] = \"sex\"\n if \"title\" not in line[0]:\n meta.write(re.sub(r'[^\\x00-\\x7F]+', ' ', line[i]) + '\\t')\n meta.write('\\n')\n tfile.seek(0)\n meta.close()\n datafile.close()\n call([\"rm\", request.session.get('username') + \"/data.txt\"])\n return meta", "def extract_metadata(file_name, token_type, create_labels, remove_stop_words):\n metadata = {'file': file_name}\n\n with open(file_name, 'r') as file:\n rows = file.readlines()\n\n rules = [\n ['Message-ID:', 'id'],\n ['Subject:', 'subject'],\n ]\n\n for (index, row) in enumerate(rows):\n row = row.lstrip('> \\t')\n for (pattern, prop) in rules:\n if row.startswith(pattern):\n metadata[prop] = row.replace(pattern,'')\n\n if 'body' not in metadata:\n if row.startswith('\\n'):\n metadata['body'] = '\\n'.join(rows[index:])\n\n elif '-----Original Message-----' in row:\n del metadata['body']\n\n if 'body' in metadata:\n if create_labels:\n metadata['label'] = create_label(metadata['body'])\n metadata['original_body'] = metadata['body']\n metadata['body'] = clean_text(metadata['body'], token_type, remove_stop_words)\n clean_subject = cleanse(metadata['subject'])\n metadata['subject'] = '' if clean_subject.count(' ') == len(clean_subject) else clean_subject\n\n return metadata" ]
[ "0.8028162", "0.5969158", "0.58202237", "0.57127655", "0.56976295", "0.56358564", "0.5572141", "0.55685896", "0.556603", "0.5544489", "0.55414313", "0.5443912", "0.5436588", "0.5433023", "0.5427803", "0.5423232", "0.53913337", "0.5390329", "0.5361068", "0.53360075", "0.52867633", "0.52782154", "0.5271059", "0.5260443", "0.525197", "0.5208082", "0.52003556", "0.5194747", "0.51939666", "0.51579696", "0.5109985", "0.50747025", "0.50597095", "0.50483984", "0.5035302", "0.50304526", "0.50262046", "0.5026118", "0.50249547", "0.50161594", "0.50130296", "0.5006077", "0.49837285", "0.49829236", "0.4980243", "0.49681595", "0.49657747", "0.4963939", "0.4952166", "0.4948751", "0.49470976", "0.49453062", "0.49350342", "0.49292752", "0.49252602", "0.49231008", "0.49106947", "0.49098626", "0.49079737", "0.4899399", "0.48993298", "0.4896533", "0.4890665", "0.48883104", "0.48873743", "0.4879336", "0.48765928", "0.48580912", "0.48510376", "0.48449358", "0.48275778", "0.4821151", "0.48186034", "0.48148435", "0.4812345", "0.4808401", "0.48083013", "0.48071858", "0.48068842", "0.48055384", "0.4804821", "0.48022348", "0.47998857", "0.4798136", "0.47826743", "0.47676834", "0.47649446", "0.47601375", "0.47595653", "0.47558925", "0.47544202", "0.47497126", "0.47455254", "0.47453317", "0.4743488", "0.47354332", "0.47341618", "0.47202504", "0.4714753", "0.4711243" ]
0.8175402
0
Initialize the contact tag.
def __init__( self, tag: typing.Optional[int], contact: typing.Optional[int], **kwargs: typing.Dict, ) -> None: super().__init__(**kwargs) self.tag = tag self.contact = contact
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, contact):\n\n\t\tself._id = UserClassroom.__ID\n\t\tself._contact = contact\n\t\tself.size = 1\n\n\t\tUserClassroom.__ID += 1", "def __init__(self, uid=\"\", first_name=\"\", last_name=\"\", email=\"\", phone=\"\", description=\"\",\n company_uid=None, testing=False):\n\n # Initialization of the parent class\n super(Contact, self).__init__(uid, first_name, last_name, email, phone, description, company_uid,\n testing=testing)", "def __init__(self, contact = None):\n try:\n self.__contact = contact\n if not self.__contact:\n FRegulatoryLogger.ERROR(logger, \"The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object\")\n return None\n self.__reg_date_of_birth = None\n self.__reg_first_name = None\n self.__reg_last_name = None\n self.__reg_national_id = None\n self.__reg_crm_id = None\n self.__crm_id_source = None\n self.__reg_exchange_id = None\n self.__reg_unique_name = None\n self.__client_type = None\n self.__is_general_partner = None\n if contact:\n self.__refresh(contact)\n self.__integration_utils = FIntegrationUtils.FIntegrationUtils()\n except Exception as e :\n FRegulatoryLogger.ERROR(logger, str(e))", "def __init__(self, contact, phone_number, first_name, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.contact = contact\n self.phone_number = phone_number\n self.first_name = first_name", "def __init__(self):\n self.tag = None", "def init_cid(self) -> None:\n self.position = 0", "def __init__(__self__, *,\n contact_id: pulumi.Input[str],\n is_essential: pulumi.Input[bool]):\n pulumi.set(__self__, \"contact_id\", contact_id)\n pulumi.set(__self__, \"is_essential\", is_essential)", "def __init__(__self__, *,\n contact_id: pulumi.Input[str],\n is_essential: pulumi.Input[bool]):\n pulumi.set(__self__, \"contact_id\", contact_id)\n pulumi.set(__self__, \"is_essential\", is_essential)", "def setUp(self):\n self.new_contact = Contact(\"zoo\", \"vier\", 254719702373, \"[email protected]\")", "def __init__(self):\n self.swagger_types = {\n 'source_contact': 'AddressableEntityRef',\n 'target_contact': 'AddressableEntityRef',\n 'resulting_contact': 'AddressableEntityRef'\n }\n\n self.attribute_map = {\n 'source_contact': 'sourceContact',\n 'target_contact': 'targetContact',\n 'resulting_contact': 'resultingContact'\n }\n\n self._source_contact = None\n self._target_contact = None\n self._resulting_contact = None", "def __init__(self, tag):\n self.tag = tag", "def contact(self, contact):\n\n self._contact = contact", "def contact(self, contact):\n\n self._contact = contact", "def __init__(__self__, *,\n channel_target_info: Optional[pulumi.Input['ContactChannelTargetInfoArgs']] = None,\n contact_target_info: Optional[pulumi.Input['ContactTargetInfoArgs']] = None):\n if channel_target_info is not None:\n pulumi.set(__self__, \"channel_target_info\", channel_target_info)\n if contact_target_info is not None:\n pulumi.set(__self__, \"contact_target_info\", contact_target_info)", "def __init__(self, contact_loader):\n self.contacts_by_group_list = contact_loader.contacts_by_group_list\n self.contact_list = None", "def __init__(self):\n try:\n self.contact_api_key = helpers.config_section_map(\"Full Contact\")[\"api_key\"]\n except Exception:\n self.contact_api_key = None\n click.secho(\"[!] Did not find a Full Contact API key.\",fg=\"yellow\")", "def __init__(self, tag):\n self.tag = tag.lower()\n self.attrs = {}\n self.contents = ()", "def __init__(self, contact_detail):\n\t\tself.first_name = contact_detail['First Name'].strip()\n\t\tself.last_name = contact_detail['Last Name'].strip()\n\t\tself.mobile = contact_detail['Mobile Phone'].strip()\n\t\tself.email = contact_detail['E-mail Address'].strip()", "def contact(self, contact):\n\n self.logger.debug(\"In 'contact' setter.\")\n\n self._contact = contact", "def __init__(self, contacts_client):\n self.contacts_client = contacts_client", "def setUp(self):\n # Below creating the new contact object to test.\n self.new_contact = Contact(\n \"James\", \"Muriuki\", \"0712345678\", \"[email protected]\")", "def initDocTagText(self):\n self.doc, self.tag, self.text = Doc().tagtext()", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def __init__(self, tag=None, sender=None, template_name=None, html_content=None, html_url=None, subject=None, reply_to=None, to_field=None, attachment_url=None, is_active=None): # noqa: E501 # noqa: E501\n\n self._tag = None\n self._sender = None\n self._template_name = None\n self._html_content = None\n self._html_url = None\n self._subject = None\n self._reply_to = None\n self._to_field = None\n self._attachment_url = None\n self._is_active = None\n self.discriminator = None\n\n if tag is not None:\n self.tag = tag\n if sender is not None:\n self.sender = sender\n if template_name is not None:\n self.template_name = template_name\n if html_content is not None:\n self.html_content = html_content\n if html_url is not None:\n self.html_url = html_url\n if subject is not None:\n self.subject = subject\n if reply_to is not None:\n self.reply_to = reply_to\n if to_field is not None:\n self.to_field = to_field\n if attachment_url is not None:\n self.attachment_url = attachment_url\n if is_active is not None:\n self.is_active = is_active", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def initialize(self):\n\t\tpass", "def contact_info(self, contact_info):\n\n self._contact_info = contact_info", "def __init__(self, tagged_sents, default_tag='nc0s000'):\n self._default_tag = default_tag", "def __init__(self):\n\n\t\tself.__contacts = None\n\t\tself.__accounts = None\n\t\tself.__deals = None\n\t\tself.__key_modified = dict()", "def init(self):\n logger.info(mm_cnofs.ackn_str)\n self.acknowledgements = mm_cnofs.ackn_str\n self.references = '\\n'.join((mm_cnofs.refs['mission'],\n mm_cnofs.refs['vefi']))\n\n return", "def initialize(self) -> None:\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def __init__(self, first_name=\" \", last_name=\" \", phone_number=0, phone_number_type=\" \", contact_list=[]):\n self.first_name = first_name\n self.last_name = last_name\n self.phone_number = phone_number\n self.phone_number_type = phone_number_type\n self.valid_phone_number_types = [\"home\", \"office\", \"cell\"]\n self.contact_list = contact_list", "def __init__(self, buf=None, *args, **kwargs):\n super(Message, self).__init__(buf, *args, **kwargs)\n self.__initialized = True", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(self, context):\r\n pass", "def initialize(self, context):\n pass", "def initialize(self, context):\n pass", "def initialize(self, context):\n pass", "def initialize(self):\n self.ros.enable()\n self.phone_link.enable()", "def initialize(self):\n self.ID = uuid.uuid4()\n self.TMDB_ID = 0\n self.title = \"\"\n self.release_date = \"\"\n self.popularity = \"\"\n self.overview = \"\"", "def __init__(self, bstream):\r\n # Tag itself doesn't do this. Must be overridden.\r\n self.name = \"\"\r\n ## named tags..? Are named tags only named when in a tag_compound that defines their names? And tag_compounds are always named?\r\n #self.value = \"\" needed?\r\n #payload... varies by subclass.\r\n self._parseContent(bstream)", "def __init__(self, char_name, char_description):\r\n self.name = char_name\r\n self.description = char_description\r\n self.conversation = None\r\n self.bribe = None", "def init(self):\n\n logger.info(mm_chain.ackn_str)\n self.acknowledgements = mm_chain.ackn_str\n self.references = mm_chain.refs['chain']\n\n return", "def __init__(self, caption, tag):\n self._caption = caption\n self._tag = tag.replace(\" \", \"\")", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def __init__(self, tags=''):\n self.tags = tags", "def on_initialize(self) -> None:\n pass", "def __init__(self, _from, _to, _cc=None, _bcc=None, _subject='', _body='', _attach=None):\n self._from = _from\n self._to = _to\n self._cc = _cc\n self._bcc = _bcc\n self._subject = _subject\n self._body = _body\n self._attach = _attach", "def onload(self):\n\t\tload_address_and_contact(self)", "def initialize(self, cwrap):\n pass", "def init(self, domain, category, methodname, params={},login=\"\",passwd=\"\",tags=\"\"):\n if tags<>\"\":\n self.tags = tags\n self.domain=domain\n self.category=category\n self.methodname=methodname\n self.params=params\n self.login=login\n self.passwd=passwd \n self.tagBodyEncode()", "def initialize(self):\n return", "def initialize(self):\n self.event = self.handler.event\n self.what = self.event\n self.on_what = self.event.object\n self.when = datetime.now()\n self.who = self.handler.mtool.getAuthenticatedMember().getId()\n self.where = self.event.object", "def __init__(self, contacts=None, sent=None, received=None): # noqa: E501 # noqa: E501\n\n self._contacts = None\n self._sent = None\n self._received = None\n self.discriminator = None\n\n self.contacts = contacts\n self.sent = sent\n self.received = received", "def init(self) -> None:\n ...", "def initialize(self):\n pass # pragma: no cover", "def init():\n return _nfc.init()", "def __init__(self, phone):\r\n self.phone = phone", "def __init__(self, phone):\r\n self.phone = phone", "def __init__(self, phone):\r\n self.phone = phone", "def __init__(self, phone):\r\n self.phone = phone", "def __init__(self, phone):\r\n self.phone = phone", "def contact_person(self, contact_person):\n\n self._contact_person = contact_person", "def contact_person(self, contact_person):\n\n self._contact_person = contact_person", "def __init__(self, tag):\r\n self.tag = tag.lower()", "def __init__ (self):\n self.tag = 0\n self.mDict = {}", "def _init(self, position):\n\t\tself._position = position" ]
[ "0.6380634", "0.63572264", "0.62336594", "0.61620253", "0.60732037", "0.6036036", "0.60072947", "0.60072947", "0.60046685", "0.6001961", "0.5892605", "0.58892137", "0.58892137", "0.58549345", "0.58439875", "0.57779026", "0.57670283", "0.5691324", "0.56651187", "0.5649181", "0.5645289", "0.5642232", "0.5589417", "0.5589417", "0.5589417", "0.5589417", "0.5589417", "0.55779946", "0.5576385", "0.5576385", "0.55624014", "0.55591416", "0.5553269", "0.5538311", "0.5530503", "0.55295527", "0.552804", "0.552804", "0.552804", "0.55234724", "0.551216", "0.5505954", "0.5505954", "0.5505954", "0.5505954", "0.5505954", "0.5505954", "0.5505954", "0.5505954", "0.5505954", "0.5505954", "0.5505954", "0.5505954", "0.5505954", "0.5505954", "0.5505954", "0.5505954", "0.5505954", "0.5505954", "0.5505954", "0.547451", "0.54655224", "0.54655224", "0.54655224", "0.54455817", "0.5428948", "0.5419821", "0.5406856", "0.539788", "0.53917", "0.53683686", "0.53683686", "0.53683686", "0.53683686", "0.53683686", "0.53683686", "0.53683686", "0.53683686", "0.5348318", "0.53248614", "0.53034204", "0.528949", "0.5282309", "0.5277172", "0.52767485", "0.52749455", "0.5269021", "0.5258745", "0.52557075", "0.52524424", "0.52522665", "0.52522665", "0.52522665", "0.52522665", "0.52522665", "0.52440155", "0.52440155", "0.52427435", "0.5233813", "0.5229799" ]
0.73358494
0
Get the name of the API resource.
def resource_name() -> str: return "contactTags"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name(self):\n return self.raw_resource[\"name\"]", "def name(self):\n\n return self.resource[\"metadata\"][\"name\"]", "def get_resource_name(self):\n return self._resource_name", "def resource_name(self) -> Optional[str]:\n return pulumi.get(self, \"resource_name\")", "def resource_name(self) -> Optional[str]:\n return pulumi.get(self, \"resource_name\")", "def API_NAME(): # noqa\n raise NotImplementedError(\"API_NAME must be implemented\")", "def resource_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_name\")", "def resource_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_name\")", "def resource_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_name\")", "def resource_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_name\")", "def resource_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_name\")", "def _get_api_resource_type_name(self):\n return \"AWS::Serverless::HttpApi\"", "def get_name(self) -> Text:\n return self._get_value(KubernetesResource.Keys.NAME)", "def subresource_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"subresource_name\")", "def get_objectName(self):\n return self.resource.get_name()", "def get_name_from_resource_id(resource_id):\n return resource_id.rstrip(\"/\").split(\"/\")[-1]", "def get_name() -> str:\n pass", "def subresource_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subresource_name\")", "def subresource_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subresource_name\")", "def get_name(self) -> str:\n pass", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")" ]
[ "0.7821574", "0.7735648", "0.7670678", "0.7447657", "0.7447657", "0.74427205", "0.7221747", "0.7221747", "0.7221747", "0.7221747", "0.7221747", "0.7201344", "0.7172618", "0.69053936", "0.68893594", "0.68083745", "0.68061125", "0.6781027", "0.6781027", "0.6777424", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043", "0.67408043" ]
0.0
-1
Map field names to attributes.
def map_field_name_to_attribute() -> typing.Dict: return { "tag": "tag", "contact": "contact", }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _attrs_map(self) -> \"dict[int, str]\":\n return {i: attr.name for i, attr in enumerate(self._attrs())}", "def map_to_db_fields(field_attrs):\n attr_keys = field_attrs.keys()\n field_name = field_attrs[attr_keys.pop(attr_keys.index('name'))]\n field_type_raw = field_attrs[attr_keys.pop(attr_keys.index('type'))]\n\n # field_type - constructor for a django.db.models.fields objects\n try:\n field_type = getattr(fields, field_type_raw)\n except:\n raise Exception(\n \"Can not create field with type {0}\".format(field_type_raw))\n\n field_attributes = {}\n\n for key in attr_keys:\n if key in TO_INT_ATTRS:\n value = int(field_attrs[key])\n elif key in TO_BOOL_ATTRS:\n value = True if field_attrs[key] == 'true' else False\n else:\n value = field_attrs[key]\n\n field_attributes[key] = value\n\n return {field_name: field_type(**field_attributes)}", "def field_mapping(self):\n fields = self.fields\n if self.target_field is not None:\n del fields[self.target_field.get('name')]\n field_labels = list(self.fields.keys())\n\n field_mapping = {\n name: (\n field_labels.index(name),\n lambda value, e=e: self.parse_type(value, e)\n )\n for name, e in fields.items()\n if e.tag == f'{{{self.namespace}}}DataField'\n }\n\n field_mapping.update({\n name: (\n field_labels.index(self.find(e, 'FieldRef').get('field')),\n lambda value, e=e: self.parse_type(value, e)\n )\n for name, e in fields.items()\n if e.tag == f'{{{self.namespace}}}DerivedField'\n })\n\n return field_mapping", "def read_field_attributes(self, fieldname):\n return self.read_field(fieldname).attributes", "def _make_field_map(fields):\n field_map = {}\n for field in fields:\n if field.name in field_map:\n raise SchemaParseException(\n 'Duplicate record field name %r.' % field.name)\n field_map[field.name] = field\n return field_map", "def _map_field_names(self, members):\n result = []\n for member in members:\n mapped_info = {}\n for entry_key, entry_value in member.iteritems():\n if not entry_key in self.DATA_MAPPING: # skip the entry if there is no mapping\n continue\n mapped_info[self.DATA_MAPPING[entry_key]] = entry_value\n result.append(mapped_info)\n return result", "def add_fields(self, fields):\n for label, data in fields.items():\n self[label] = data", "def get_fields(cls):\n return map(lambda x: getattr(cls, x), cls.get_field_names())", "def _getAttributeNames(self):\n return sorted(self._field_map.keys())", "def convert_fields(fields, _fields):\n mapper = {\n \"id\": \"local_id\",\n \"local_id\": \"id\"\n }\n fields = deepcopy(fields)\n for field in fields:\n if field['name'] in _fields:\n field['name'] = mapper[field['name']]\n return fields", "def model_fields(self):\n converter = connections[self.db].introspection.identifier_converter\n model_fields = {}\n for field in self.model._meta.fields:\n name, column = field.get_attname_column()\n model_fields[converter(column)] = field\n return model_fields", "def map_cols_to_attr(self):\n ## this is from the base class:\n ## for attr, label in zip(self.attr_names, self.labels):\n ## col_ind = self.col_inds[label]\n ## if len(self.data) > 0:\n ## setattr(self, attr, self.data[:,col_ind])\n #\n # hard coding based on what I know about saleae files:\n self.t = self.data[:,0]#.astype(float)\n nr, nc = self.data.shape\n self.num_cols = nc-1\n \n for i in range(0,self.num_cols):\n attr = 'ch_%i' % i\n j = i+1\n setattr(self, attr, self.data[:,j])#.astype(float))", "def get_fields(self):\n fields = {}\n allowed_types = (\n SerializerMethodField,\n Field,\n Serializer,\n )\n for attr in dir(self):\n if attr == 'data':\n continue\n\n if isinstance(getattr(self, attr), allowed_types):\n fields[attr] = getattr(self, attr)\n\n return fields", "def get_attributes(self, shape):\n attributes = {}\n identifier_names = [i.name for i in self.identifiers]\n\n for name, member in shape.members.items():\n snake_cased = xform_name(name)\n if snake_cased in identifier_names:\n # Skip identifiers, these are set through other means\n continue\n snake_cased = self._get_name(\n 'attribute', snake_cased, snake_case=False\n )\n attributes[snake_cased] = (name, member)\n\n return attributes", "def _set_attr_name_map(self):\n self.attr_name_map = {}\n for object_query in self.query:\n object_name = object_query[\"object_name\"]\n object_class = self.object_map[object_name]\n aliases = AttributeInfo.gather_aliases(object_class)\n self.attr_name_map[object_class] = {}\n for key, value in aliases.items():\n filter_by = None\n if isinstance(value, dict):\n filter_name = value.get(\"filter_by\", None)\n if filter_name is not None:\n filter_by = getattr(object_class, filter_name, None)\n value = value[\"display_name\"]\n if value:\n self.attr_name_map[object_class][value.lower()] = (key.lower(),\n filter_by)\n custom_attrs = AttributeInfo.get_custom_attr_definitions(\n object_class)\n for key, definition in custom_attrs.items():\n if not key.startswith(\"__custom__:\") or \\\n \"display_name\" not in definition:\n continue\n try:\n # Global custom attribute definition can only have a single id on\n # their name, so it is safe for that. Currently the filters do not\n # work with object level custom attributes.\n attr_id = definition[\"definition_ids\"][0]\n except KeyError:\n continue\n filter_by = CustomAttributeValue.mk_filter_by_custom(object_class,\n attr_id)\n name = definition[\"display_name\"].lower()\n self.attr_name_map[object_class][name] = (name, filter_by)", "def _attrs(self):\n for field in self.model._meta.get_fields():\n if isinstance(field, (models.OneToOneField, models.ManyToOneRel)):\n # Skip non-field attributes\n continue\n if field is self._geom_field:\n # Skip the geometry field, which is not an attribute\n continue\n yield field", "def fields(cls):\n if not hasattr(cls, '_fields'):\n cls.parse_attributes()\n return cls._fields", "def get_attributes(self) -> Dict[str, str]:\n pass", "def add_lowercase_fields(attributes, data):\n for attrib in attributes:\n if attrib['similarity'] == 'EqualIgnoreCase':\n value = data.get(attrib['name'])\n if value is not None:\n data[attrib['name']] = value.lower()\n return data", "def _update_all_fields(self, name, value):\n for field in self._field_map.values():\n setattr(field, name, value)", "def fields(self):\n return {k:getattr(self, k, None) for k in self.schema.fields}", "def _attribs(self, name=None, description=None):\n a = {}\n if name:\n a['name'] = name\n if description:\n a['description'] = description\n return a", "def attributes(self):\n _attrs = [\"label\"]\n if self.attrs:\n _attrs.append(\"attrs\")\n return _attrs", "def attributes(self):\n return { k: getattr(self, k) for k in self.__class__.columns().keys() }", "def get_field_attr(name):\n # de variant met een repeating group (entiteit, dataitem) levert hier nog een probleem op.\n # is dat omdat er twee entiteiten in 1 scherm staan?\n fields = []\n opts = my.rectypes[name]._meta\n for x in opts.get_fields(): # fields:\n fldname = x.name\n fldtype = x.get_internal_type()\n if fldname == 'id' or fldtype in ('ForeignKey', 'ManyToManyField'):\n # if fldname == 'id' or any((x.many2one, x.many2many, x.one2many))\n continue\n try:\n length = x.max_length\n except AttributeError:\n length = -1\n fields.append((fldname, fldtype[:-5], length))\n return fields", "def rename_fields(r, rename_map):\n new_record = {}\n for f in r.keys():\n if f in rename_map.keys():\n new_record[rename_map[f]] = r[f]\n else:\n new_key = f.lower().replace(' ', '_')\n new_record[new_key] = r[f]\n return new_record", "def field_names(self):\n ...", "def modelfields(entity) -> Dict[str, Field]:\n return entity.__modelfields__", "def _to_user_model_attrs(self, data, attrs_map):\n user_attrs = {}\n for k, v in attrs_map.iteritems():\n attr = (v, data.get(k)) if isinstance(v, str) else v(data.get(k))\n user_attrs.setdefault(*attr)\n\n user_attrs.setdefault('server', 'All')\n user_attrs.setdefault('faction', 0)\n user_attrs.setdefault('admin', True)\n user_attrs.setdefault('uploader', True)\n user_attrs.setdefault('upload_server', None)\n\n return user_attrs", "def fields(self) -> Mapping[str, str]:\n return pulumi.get(self, \"fields\")", "def _init_fields(self):\n if self._fields is None:\n M.mset('U', \"^\") # DBS Calls Require this\n f = self._fields = {}\n attrs = self.fieldnames = {}\n fieldid = \"0\"\n while 1:\n # Subscript 0 is field description, .1 is the title, 3 is help\n fieldid, info, title, fieldhelp = M.ddwalk(self._fileid, fieldid)\n #fieldid, info, title, fieldhelp = M.mexec(\n # \"\"\"set s0=$order(^DD(s2,s0)) Q:s0'=+s0 s s1=$G(^DD(s2,s0,0)),s3=$G(^DD(s2,s0,.1)),s4=$G(^DD(s2,s0,3))\"\"\",\n # M.INOUT(str(fieldid)), M.INOUT(\"\"), str(self._fileid), M.INOUT(\"\"), M.INOUT(\"\"))\n if fieldid == \"\" or fieldid[0] not in \"0123456789.\":\n break\n\n info = info.split(\"^\", 4) \n label = self._clean_label(info[0])\n try:\n ftype = info[1]\n except:\n ftype = None\n if ftype:\n finst = None\n for klass in FIELD_TYPES:\n if klass.isa(ftype):\n finst = f[fieldid] = klass(fieldid, label, info)\n finst.fileid = self.fileid\n finst.ownerdd = self\n attrs[label] = fieldid\n break\n if finst is None:\n print finst, \"FIELD [%s], spec [%s] was not identified\" % (label, ftype)\n continue\n finst.title = title\n finst.fieldhelp = fieldhelp\n else:\n assert finst, \"FIELD [%s] %s has no fieldspec\" % (label, info)\n\n return self._fields", "def __init__(cls, name, bases, attrs):\r\n if sys.version_info[1] < 6: # Backport of __set_name__ from 3.6 :)\r\n for k, v in attrs.items():\r\n if isinstance(v, (Field, Store, Section)):\r\n v.__set_name__(cls, k)\r\n\r\n fields = attrs['__fields__']\r\n stores = attrs['__store_attrs__']\r\n\r\n for attr_name in dir(cls):\r\n attr = getattr(cls, attr_name)\r\n if isinstance(attr, Field):\r\n fields.add(attr)\r\n stores.add(attr.store_attr)\r\n\r\n super().__init__(name, bases, attrs)", "def fields(self):\n _fields = {\n i: attrgetter(i) for i in ('pf_type', 'label',)\n }\n _fields['host'] = self.get_safely_instance_partial(Host, 'host')\n return _fields", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n fields: Dict[str, Callable[[Any], None]] = {\n \"assignedDateTime\": lambda n : setattr(self, 'assigned_date_time', n.get_datetime_value()),\n \"capabilityStatus\": lambda n : setattr(self, 'capability_status', n.get_str_value()),\n \"@odata.type\": lambda n : setattr(self, 'odata_type', n.get_str_value()),\n \"service\": lambda n : setattr(self, 'service', n.get_str_value()),\n \"servicePlanId\": lambda n : setattr(self, 'service_plan_id', n.get_uuid_value()),\n }\n return fields", "def record_dict(self):\n return {p.key: getattr(self, p.key) for p in self.__mapper__.attrs}", "def parse_attributes(cls):\n cls._fields = []\n cls._tables = []\n for attr_k in dir(cls):\n try:\n attr = object.__getattribute__(cls, attr_k)\n except AttributeError:\n continue\n if issubclass(attr.__class__, ReferenceManyField):\n cls._tables.append(attr_k)\n elif issubclass(attr.__class__, Field):\n cls._fields.append(attr_k)", "def get_dataclass_attributes(cls) -> Dict[str, Tuple[Any, str]]:\n fields = cls.__dataclass_fields__.values()\n attrs = {}\n for field in fields:\n if field.type != InitVar:\n attrs[field.name] = field.type, \"\"\n return attrs", "def attributes(self):\n _attrs = [\"type\", \"name\", \"value\"]\n if self.confidence is not None:\n _attrs.append(\"confidence\")\n if self.constant:\n _attrs.append(\"constant\")\n if self.tags:\n _attrs.append(\"tags\")\n\n return _attrs", "def attributes(self):\n attrs_ = [\"name\", \"type\"]\n if self.exclusive:\n attrs_.append(\"exclusive\")\n if self.default is not None:\n attrs_.append(\"default\")\n\n return attrs_", "def update_attributes_map(klass):\n\n return {\n 'name': '',\n 'default_locale': ''\n }", "def _getAttrMap(self):\r\n if not getattr(self, 'attrMap'):\r\n self.attrMap = {}\r\n for (key, value) in self.attrs:\r\n self.attrMap[key] = value\r\n return self.attrMap", "def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic", "def fields(cls):\n return cls._nameToValue", "def attributes(self):\n _attrs = []\n if self.name:\n _attrs.append(\"name\")\n if self.label:\n _attrs.append(\"label\")\n if self.confidence:\n _attrs.append(\"confidence\")\n if self.index:\n _attrs.append(\"index\")\n if self.attrs:\n _attrs.append(\"attrs\")\n return _attrs + [\"points\"]", "def map_csv_fields(self):\n etod_csv_fields = {\n 'ctry_id': None,\n 'obst_identifier': None,\n 'obst_name': None,\n 'lon_src': None,\n 'lat_src': None,\n 'agl': None,\n 'amsl': None,\n 'vert_uom': None,\n 'hor_acc': None,\n 'hor_acc_uom': None,\n 'vert_acc': None,\n 'vert_acc_uom': None,\n 'obst_type': None,\n 'lighting': None,\n 'marking': None,\n 'is_group': None,\n }\n\n for field in etod_csv_fields:\n try:\n etod_csv_fields[field] = etod_map[self.ctry_short_name]['fields'][field]\n except KeyError:\n etod_csv_fields[field] = None\n\n self.field_map = etod_csv_fields", "def meta(cls):\n if getattr(cls, '__from_class__', None) is not None:\n cls = cls.__from_class__\n attribute_info = {}\n for name, value in cls.__table__.columns.items():\n attribute_info[name] = str(value.type).lower()\n\n return {cls.__name__: attribute_info}", "def get_switched_form_field_attrs(self, prefix, input_type, name):\n attributes = {'class': 'switched', 'data-switch-on': prefix + 'field'}\n attributes['data-' + prefix + 'field-' + input_type] = name\n return attributes", "def make_record_properties(fieldnames,\n default='float',\n overrides={},\n aliases={}):\n return [(aliases.get(f, f), overrides.get(f, default))\n for f in fieldnames]", "def get_field_names(self):\n return {rv[0] for rv in self.iter_fields()}", "def get_attributes(cls):\r\n return [Attribute('allowed_files', '[]', transform=cls.parse_files),\r\n Attribute('label', ''),\r\n Attribute('required_files', '[]', transform=cls.parse_files), ]", "def _fields(self):\n fields = [(\"serial\", self.serial), (\"active\", str(self.active)),\n (\"name\", self.name), (\"version\", self.version),\n (\"auto_update\", str(self.auto_update)),\n (\"new_version_available\", str(self.new_version_available)),\n (\"product_type\", self.product_type),\n (\"network_device\", str(self.network_device))]\n return fields", "def get_attributes(self):\n \n retdict = {}\n retdict['s'] = str(self.s)\n if self.t != None:\n retdict['t'] = str(self.t)\n retdict['a'] = str(self.a)\n retdict['b'] = str(self.b)\n retdict['c'] = str(self.c)\n retdict['d'] = str(self.d)\n return retdict", "def _fields_names(cls) -> List:\n return list(field.name for field in dataclasses.fields(cls))", "def as_dict(self):\n return {c.key: getattr(self, c.key)\n for c in inspect(self).mapper.column_attrs}", "def _field_names(self):\n return [self._sanitize_field_name(field_name)\n for field_name in self._all_fields]", "def fields(self) -> Dict[str, Field]:\n return self._fields", "def add_attributes(self, x):\n for k, v in x.items():\n setattr(self, k, v)", "def get_fields(self):\n\n return {\n attr: field['serializer']\n for attr, field in self._fields.items()\n }", "def _replace_fields(self):\n for name, value in self._cleaned_data.items():\n setattr(self, name, value)", "def process_attrs(attrs):\n if attrs.getLength() == 0:\n return {}\n tmp_dict = {}\n for name in attrs.getNames():\n tmp_dict[name] = attrs.getValue(name)\n return tmp_dict", "def get_attributes(cls):\r\n return [Attribute('size', '20'),\r\n Attribute('label', ''), ]", "def fields() -> Dict[str, models.Field]:\n return dict(\n (field.name, field)\n for field in AccountTier._meta.get_fields()\n if field.name not in [\"id\"]\n )", "def items(self):\n for name in self.fields:\n yield name, getattr(self, name)", "def get_attrs_field_names(env, arch, model, editable):\n VIEW_TYPES = {item[0] for item in type(env['ir.ui.view']).type.selection}\n symbols = _get_attrs_symbols() | {None}\n result = []\n\n def get_name(node):\n \"\"\" return the name from an AST node, or None \"\"\"\n if isinstance(node, ast.Name):\n return node.id\n\n def get_subname(get, node):\n \"\"\" return the subfield name from an AST node, or None \"\"\"\n if isinstance(node, ast.Attribute) and get(node.value) == 'parent':\n return node.attr\n\n def process_expr(expr, get, key, val):\n \"\"\" parse `expr` and collect triples \"\"\"\n for node in ast.walk(ast.parse(expr.strip(), mode='eval')):\n name = get(node)\n if name not in symbols:\n result.append((name, key, val))\n\n def process_attrs(expr, get, key, val):\n \"\"\" parse `expr` and collect field names in lhs of conditions. \"\"\"\n for domain in safe_eval(expr).values():\n if not isinstance(domain, list):\n continue\n for arg in domain:\n if isinstance(arg, (tuple, list)):\n process_expr(str(arg[0]), get, key, expr)\n\n def process(node, model, editable, get=get_name):\n \"\"\" traverse `node` and collect triples \"\"\"\n if node.tag in VIEW_TYPES:\n # determine whether this view is editable\n editable = editable and _view_is_editable(node)\n elif node.tag in ('field', 'groupby'):\n # determine whether the field is editable\n field = model._fields.get(node.get('name'))\n if field:\n editable = editable and field_is_editable(field, node)\n\n for key, val in node.items():\n if not val:\n continue\n if key in ATTRS_WITH_FIELD_NAMES:\n process_expr(val, get, key, val)\n elif key == 'attrs':\n process_attrs(val, get, key, val)\n\n if node.tag in ('field', 'groupby') and field and field.relational:\n if editable and not node.get('domain'):\n domain = field._description_domain(env)\n # process the field's domain as if it was in the view\n if isinstance(domain, str):\n process_expr(domain, get, 'domain', domain)\n # retrieve subfields of 'parent'\n model = env[field.comodel_name]\n get = partial(get_subname, get)\n\n for child in node:\n if node.tag == 'search' and child.tag == 'searchpanel':\n # searchpanel part has to be validated independently\n continue\n process(child, model, editable, get)\n\n process(arch, model, editable)\n return result", "def extra_from_record(self, record):\n return {\n attr_name: record.__dict__[attr_name]\n for attr_name in record.__dict__\n if attr_name not in BUILTIN_ATTRS\n }", "def field_names(cls) -> tuple:\n return tuple((field.name for field in fields(cls)))", "def proc_attr(inp):\n dic = {}\n for att in inp.attrs.keys():\n if getattr(inp.attrs[att], \"dtype\", None) is None:\n dic[att] = inp.attrs[att]\n elif inp.attrs[att].dtype.char == 'S':\n dic[att] = [\n x.strip() for x in inp.attrs[att].tostring().decode('ascii').split(',')\n ]\n else:\n dic[att] = (\n inp.attrs[att][0]\n if isinstance(inp.attrs[att],np.ndarray) and\n inp.attrs[att].size==1\n else inp.attrs[att]\n )\n return dic\n pass", "def fields(self):\r\n return self._by_name.iteritems()", "def attributes(self, *args):\n kwargs = {}\n if args:\n kwargs[\"attributenames\"] = args\n\n r = self._token_id_request(urljoin(self._url, Client._attribute_resource), **kwargs)\n\n # parse contennt looking for all attributes\n attributes = []\n for line in r.text.splitlines():\n r = re.match(\"(userdetails\\.attribute\\.name)=(.*)\", line)\n if r:\n name = r.groups()[1]\n attributes.append([name, None])\n continue # next line\n\n r = re.match(\"(userdetails\\.attribute\\.value)=(.*)\", line)\n if r:\n value = r.groups()[1]\n # last name parsed is where it has to\n # be stacked\n if attributes[-1][1] == None:\n attributes[-1][1] = value\n if isinstance(attributes[-1][1], list):\n attributes[-1][1].append(value)\n else:\n # cast to list\n attributes[-1].append([attributes[-1][1], value])\n\n return dict([(item[0], item[1]) for item in attributes])", "def normalize_field_names(cls, torrent_info: dict):\n for old_name, new_name in cls.torrent_fields_map.items():\n if old_name in torrent_info:\n torrent_info[new_name] = torrent_info[old_name]", "def _iter_attrs_for_field_type(cls, field_type):\n return six.iterkeys(cls._get_defaults_for_field_type(field_type))", "def _iter_attrs_for_field_type(cls, field_type):\n return six.iterkeys(cls._get_defaults_for_field_type(field_type))", "def _get_field_details(self, data, fields):\n fields_metadata = dict()\n for field in fields:\n dtype = data[field].dtype\n field_template = self._FIELD_TEMPLATES.get(dtype.kind)\n if not field_template:\n raise ValueError('Unsupported dtype {} in column {}'.format(dtype, field))\n\n field_details = copy.deepcopy(field_template)\n fields_metadata[field] = field_details\n\n return fields_metadata", "def fields(self):", "def __new__(metacls, name, bases, attributes, **kwargs):\n # Use both a namespaced mapping and a standard dict\n # as class-based records of our field attributes:\n field_index = Flat()\n field_names = {}\n \n # Stow both the Python name and the namespaced name\n # for each field attribute defined on the schema,\n # additionally manually calling __set_name__(…) if\n # we’re on a pre-3.6 version of Python:\n for attribute, value in attributes.items():\n if isinstance(value, FieldBase):\n if NEED_NAME:\n value.__set_name__(None, attribute)\n attributes[attribute] = value\n field_names[attribute] = value\n field_index.set(attribute, value,\n namespace=value.namespace)\n \n # This is the same as the above, but for the base\n # ancestor class – this enables field inheritance:\n for base in bases:\n parent = base.__mro__[0]\n for attribute, value in vars(parent).items():\n if isinstance(value, FieldBase) and attribute not in attributes:\n if NEED_NAME:\n value.__set_name__(None, attribute)\n attributes[attribute] = value\n field_names[attribute] = value\n field_index.set(attribute, value,\n namespace=value.namespace)\n \n for namespace in field_index.namespaces():\n nsfield = Namespace(field_index, namespace=namespace)\n if NEED_NAME:\n nsfield.__set_name__(None, namespace)\n attributes[namespace] = nsfield\n field_names[namespace] = nsfield\n \n # Add both the field-index and the field-names mappings\n # to the class dictionary for the new type:\n attributes['__field_index__'] = field_index\n attributes['__field_names__'] = field_names\n \n # Create and return the schema type:\n return super(MetaSchema, metacls).__new__(metacls, name,\n bases,\n attributes,\n **kwargs)", "def field_names(self):\n return self.base_field_names() + list(self.data.keys())", "def field_names(self):\n if not self._field_names:\n self._field_names.update(self.properties.keys())\n\n self._field_names = [attr for attr in self._field_names if not attr.startswith(\"_\")]\n\n return self._field_names", "def map_conf_field_names(self, data, **kwargs):\n # TODO: it's dangerous to depend on an alphabetical order, we'd better move related logic out of Schema.\n conf = data[\"conf\"] if \"conf\" in data else None\n if conf is not None:\n for field_key, dict_key in CONF_KEY_MAP.items():\n value = conf.get(dict_key, None)\n if dict_key in conf and value is not None:\n del conf[dict_key]\n conf[field_key] = value\n data[\"conf\"] = conf\n return data", "def _build_attributes(self):\n\n # We might rebuild the program because of snippets but we must\n # keep already bound attributes\n\n dtype = []\n for (name,gtype) in self.all_attributes:\n if name not in self._attributes.keys():\n attribute = Attribute(self, name, gtype)\n else:\n attribute = self._attributes[name]\n\n self._attributes[name] = attribute\n dtype.append(attribute.dtype)", "def attribute_to_params_map(self):\n return self._param_names_map", "def get_fields(self):\n fields = super(GeoModelSerializer, self).get_fields()\n # Set the geometry field name when it's undeclared.\n if not self.Meta.geom_field:\n for name, field in fields.items():\n if isinstance(field, GeometryField):\n self.Meta.geom_field = name\n break\n return fields", "def map(self, attr1, attr2):\n return dict(zip(getattr(self, attr1), getattr(self, attr2)))", "def map(self, attr1, attr2):\n return dict(zip(getattr(self, attr1), getattr(self, attr2)))", "def get_fields(self) -> Iterable[fields.Field]:\n for attr_name in dir(self):\n attr = getattr(self, attr_name)\n if isinstance(attr, fields.Field):\n yield attr", "def get_ask_mapping(cls):\n mapping = {}\n for field in cls._meta.fields:\n if isinstance(field, AskForField):\n mapping[field.allows_field] = field.name\n return mapping", "def get_attrs(self):\n attrs = []\n for attribute in self.__dict__.keys():\n attrs.append(attribute)", "def set_up_fields(self, fields):\n self.fields = {\n 'name': self.name,\n 'email': self.email\n }\n for key in fields.keys():\n # special keys first, not to be used in the template\n if key.upper() == 'CC':\n self.is_cc = fields[key]\n elif key.upper() == 'BCC':\n self.is_bcc = fields[key]\n else:\n self.fields[key] = fields[key]", "def fill_by_name(self, fields, prefix=\"\"):\n self.fill({'[name=\"%s%s\"]' % (prefix, k): v for k, v in fields.items()})", "def retype(self, dictionary):\r\n\r\n for name, retype in dictionary.items():\r\n field = self._field_dict[name]\r\n for key, value in retype.items():\r\n if key in _valid_retype_attributes:\r\n field.__setattr__(key, value)\r\n else:\r\n raise Exception(\"Should not use retype to change field attribute '%s'\", key)", "def generate_attributes(self):\n for group in self.dict:\n for param in self.dict[group]:\n if group in self.group_douplicate and param in self.name_douplicate:\n setattr(self, group+'_'+param, self(group, param))\n else:\n setattr(self, param, self(group, param))", "def map_attributes(order: dict) -> dict:\n map_dict = {\n \"has_batteries\": FactoryMapping.cast_str_to_bool,\n \"has_glow\": FactoryMapping.cast_str_to_bool,\n \"has_lactose\": FactoryMapping.cast_str_to_bool,\n \"has_nuts\": FactoryMapping.cast_str_to_bool,\n \"min_age\": int,\n \"num_rooms\": int,\n \"num_sound\": int,\n \"pack_size\": int,\n \"dimensions\": lambda x: float(x.replace(\",\", '.')),\n \"spider_type\": SpiderType.map_str_to_enum,\n \"colour\": Colours.map_str_to_enum,\n \"variety\": ToffeeVariety.map_str_to_enum,\n \"stuffing\": Stuffing.map_str_to_enum,\n \"size\": Size.map_str_to_enum,\n \"fabric\": Fabric.map_str_to_enum\n }\n for key, value in map_dict.items():\n if key in order:\n order[key] = value(order[key])\n return order", "def _parse_attr(self, attr_proto):\n attrs = {}\n for a in attr_proto:\n for f in ['f', 'i', 's']:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in ['floats', 'ints', 'strings']:\n if list(getattr(a, f)):\n assert a.name not in attrs, \"Only one type of attr is allowed\"\n attrs[a.name] = tuple(getattr(a, f))\n for f in ['t', 'g']:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in ['tensors', 'graphs']:\n if list(getattr(a, f)):\n raise NotImplementedError(\"Filed {} is not supported in mxnet.\".format(f))\n if a.name not in attrs:\n raise ValueError(\"Cannot parse attribute: \\n{}\\n.\".format(a))\n return attrs", "def get_format_attrs(self, name, field, alt_field_info={}):\n # important_props = ('initial', 'autofocus', 'widget')\n if name in alt_field_info:\n field = deepcopy(field)\n for prop, value in alt_field_info[name].items():\n setattr(field, prop, value)\n initial = field.initial\n initial = initial() if callable(initial) else initial\n attrs, result = {}, []\n if initial and not isinstance(field.widget, Textarea):\n attrs['value'] = str(initial)\n data_val = self.form.data.get(get_html_name(self.form, name), None)\n if data_val not in ('', None):\n attrs['value'] = data_val\n attrs.update(field.widget_attrs(field.widget))\n result = ''.join(f'{key}=\"{val}\" ' for key, val in attrs.items())\n if getattr(field, 'autofocus', None):\n result += 'autofocus '\n if issubclass(self.form.__class__, FormOverrideMixIn):\n # TODO: Expand for actual output when using FormOverrideMixIn, or a sub-class of it.\n result += '%(attrs)s' # content '%(attrs)s'\n else:\n result = '%(attrs)s' + result # '%(attrs)s' content\n return result", "def _process_attrs(attrs):\n new_attrs = OrderedDict()\n for attr in attrs:\n col = attr\n if isinstance(attr, tuple):\n col, attr = attr\n # special cases\n if attr == 'class_name':\n attr = '__class__.__name__'\n if attr == 'repr':\n attr = repr\n new_attrs[col] = attr\n\n return new_attrs", "def iterate_over_fields(cls):\n for attr in dir(cls):\n clsattr = getattr(cls, attr)\n if isinstance(clsattr, BaseField):\n yield attr, clsattr", "def extract_mapping(self) -> DatasetMapping:\n # store fields\n fields = []\n for col in self.data.columns:\n #get field label\n label = col\n #get field type using PANDAS_TYPE (see apps.utils.utils)\n col_type = self.data[col].dtype\n field_type = PANDAS_TYPE[col_type]\n #set field\n field = FieldMapping(label=label, type=field_type)\n fields.append(field)\n self.mapping.append(label)\n return DatasetMapping(fields=fields)", "def create_attributes(klass, attributes, previous_object=None):\n\n if previous_object is not None:\n return {'name': attributes.get('name', previous_object.name)}\n return {\n 'name': attributes.get('name', ''),\n 'defaultLocale': attributes['default_locale']\n }", "def createFieldMapping(sgidPoints):\n # Create field mappings\n sgidFMs = arcpy.FieldMappings()\n\n # Perform some field renaming\n mapPairs = [\n ('State', 'State'),\n ('City', 'Inc_Muni'),\n ('CountyID', 'County'),\n ('ZipCode', 'Zip_Code'),\n ('PrefixDir', 'StN_PreDir'),\n ('StreetName', 'StreetName'),\n ('StreetType', 'StN_PosTyp'),\n ('SuffixDir', 'StN_PosDir'),\n ('AddNum', 'Add_Number'),\n ('LandmarkName', 'landmkName'),\n ('Building', 'Building'),\n ('UnitType', 'Unit'),\n ('AddSource', 'AddAuth'),\n ('AddSystem', 'UniqWithin'),\n ('LoadDate', 'LastUpdate')]\n\n for p in mapPairs:\n print p\n sgidFMs.addFieldMap(getRenameFieldMap(sgidPoints, p[0], p[1]))\n\n return sgidFMs", "def insertable_dict(self):\n # .strip('_') is for type_\n return {\n 'f_' +\n p.key.strip('_'): getattr(\n self,\n p.key) for p in self.__mapper__.attrs}", "def audit_fields(elem, fields):\r\n errs = []\r\n parsed = {}\r\n for field, field_type, dict_field in fields:\r\n if field not in elem.attrib:\r\n errs.append(('missing value', field))\r\n else:\r\n value = ensure_type(elem.get(field), field_type)\r\n if not value:\r\n errs.append(('wrong type', field))\r\n else:\r\n parsed[dict_field] = value\r\n \r\n if errs:\r\n parsed = None\r\n return parsed, errs" ]
[ "0.6646376", "0.65862083", "0.65552986", "0.65168107", "0.6496606", "0.63440627", "0.6253964", "0.6224082", "0.61496115", "0.61344844", "0.60731304", "0.60435295", "0.60270566", "0.5998152", "0.5984378", "0.59624", "0.59521395", "0.59237885", "0.5913278", "0.59089035", "0.58935136", "0.5889567", "0.5886418", "0.58837163", "0.5882489", "0.586821", "0.5844601", "0.57894444", "0.5788735", "0.5788207", "0.57765007", "0.577326", "0.5768034", "0.5741848", "0.5704149", "0.5698268", "0.5685755", "0.56689656", "0.56487036", "0.5645976", "0.5637582", "0.55970937", "0.5587819", "0.55820286", "0.557871", "0.55745214", "0.5556045", "0.55559254", "0.55429053", "0.5531964", "0.5528027", "0.55157113", "0.55125535", "0.5504066", "0.5496073", "0.54950047", "0.54942036", "0.54852515", "0.54837424", "0.54782367", "0.54775023", "0.54748887", "0.5462258", "0.54608315", "0.5456623", "0.54565156", "0.5454708", "0.5447678", "0.5447429", "0.54429644", "0.54421574", "0.54421574", "0.5424822", "0.54154396", "0.53954804", "0.53939646", "0.53908944", "0.53895026", "0.5380292", "0.5378784", "0.5372736", "0.5368681", "0.5368681", "0.5368519", "0.53621787", "0.53604156", "0.53586674", "0.53520733", "0.5344714", "0.53401035", "0.53355265", "0.5334642", "0.5329467", "0.5323729", "0.53182125", "0.5316282", "0.53137594", "0.53022665", "0.52959454", "0.5294544" ]
0.81743294
0
Get all ContactTags associated to contact with that id
def all_in_contact(cls, contact_id: int): for contact_tag in cls.get_all_in("contacts", contact_id): yield contact_tag
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_photo_tags(self, photo_id):\n\n query_string = '''\n select photo_tag.tag_name from photo\n join photo_tag on(photo_tag.photo_id=photo.photo_id)\n where photo.photo_id={}\n '''.format(photo_id)\n\n # so an array of tags would be ok\n tag_data = self.db.get_query_as_list(query_string)\n for tag in tag_data:\n # print(self.decode_tag(tag['tag_name']))\n\n tag['human_readable_tag'] = self.decode_tag(tag['tag_name'])\n\n # print(tag_data)\n\n return tag_data", "def get_tag_to_post(id_post):\n try:\n tags = Posts.objects.get(id=id_post)\n list_tags = [t.tag for t in tags.tags.all()]\n except:\n list_tags = []\n return list_tags", "def subject_tag_get_all(context, subject_id, session=None):\n _check_subject_id(subject_id)\n session = session or get_session()\n tags = session.query(models.SubjectTag.value).filter_by(\n subject_id=subject_id).filter_by(deleted=False).all()\n return [tag[0] for tag in tags]", "def get_all_id_and_tags(self):\n return self.database.select(self.tname,\n [self.primary_key, 'tags'])", "def get_taggable_contacts(state, user):\n\n def get_state_users(state):\n if state is None:\n criteria = {'location__slug': 'nigeria'}\n else:\n criteria = {'location__type__slug': 'state', 'location__slug': state}\n\n users = Contact.objects.filter(**criteria).select_related()\n for u in users:\n if user.id != u.user.id:\n yield {\n 'user_id': u.id,\n 'username': u.user.username,\n 'first_name': u.first_name,\n 'last_name': u.last_name,\n 'state': state or 'national'\n }\n\n taggables = list(get_state_users(None))\n if state:\n taggables.extend(get_state_users(state))\n\n by_state = map_reduce(taggables, lambda u: [(u['state'], u)], lambda v, k: sorted(v, key=lambda u: (u['last_name'], u['first_name'])))\n by_state = [{'state': k, 'users': v} for k, v in by_state.iteritems()]\n by_state.sort(key=lambda e: 'zzzzz' if e['state'] == 'national' else e['state'])\n return by_state", "def get(self, currency, address):\n check_inputs(address=address, currency=currency) # abort if fails\n address_tags = commonDAO.list_address_tags(currency, address)\n return address_tags # can be empty list", "def getcontacts():\n contacts = {}\n\n try:\n #get list of contact ids\n contactids = r.smembers(\"contacts\")\n\n #for each contact id get data\n for contactid in contactids:\n contacts.update(_getcontact(str(contactid)))\n return contacts\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def get_tags(self, obj):\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data", "def get(self, currency, entity):\n check_inputs(currency=currency, entity=entity)\n tags = entitiesDAO.list_entity_tags(currency, entity)\n return tags", "def list_contacts(self):\n return self.contacts", "def get_contact_interactions(request, pk):\n try:\n contact = Contact.objects.get(pk=pk)\n except Contact.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n ans = []\n interactions = contact.interaction_set.all()\n for interaction in interactions:\n ans.append(InteractionSerializer(interaction).data)\n return Response(ans)", "async def get_contacts_for_contact_group(dbcon: DBConnection, contact_group_id: int) -> Iterable[object_models.Contact]:\n q = \"\"\"select\n contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active\n from contact_group_contacts, contacts\n where contact_group_contacts.contact_group_id = %s\n and contact_group_contacts.contact_id = contacts.id\"\"\"\n return [object_models.Contact(*row) for row in await dbcon.fetch_all(q, (contact_group_id,))]", "def getTagsUsingId(self,resourceId):\n response = requests.get('https://api.imagga.com/v1/tagging?content=%s' % resourceId,\n auth=(self.apikey, self.secret))\n #print ('printing response')\n #print (response.json())", "def get_queryset(self):\n contact_data = Contact.objects.filter(contact_groups__in=Member.objects.filter(\n user=self.request.user).values('group_id').distinct())\n\n return contact_data", "def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts", "def get_tags_list(*args, **kwargs):\n return Tag.objects.active()", "def get_tags_list(*args, **kwargs):\n return Tag.objects.active()", "def get_contacts_by_company(self, company_id):\n\n contacts = self._request('getContactsByCompany', {'company_id': company_id})\n for contact in contacts:\n yield contact", "def tags(self, uuid):\n return self._backend.tags(uuid)", "def get_all_contacts(self):\n self.init_db(self._testing)\n\n query = \"SELECT {} FROM {} ORDER BY id;\".format(\", \".join(Contact.columns_with_uid), Contact.table_name)\n\n data = self.db.conn.execute(query)\n\n return [Contact(*item) for item in data]", "def get_all_tags():\n try:\n tags = g.projects.distinct('tags')\n return jsonify(sorted(tags, key=str.lower))\n except Exception as err:\n raise ApiException(str(err), 500)", "def get_tags(self):\n resp = self.get(_u.build_uri(\"tags\", domain=self.domain))\n return utils.handle_response(resp)", "def get_tag_ids(self, cr, uid, model, code=None, name=None, context=None):\n assert bool(code) or bool(name), \"code or name must not be None! (code=%s;name=%s)\" % (code, name)\n tag_domain = [('model_id.model', '=', model)]\n if code is not None:\n tag_domain.append(('code', '=', code))\n if name is not None:\n tag_domain.append(('name', '=', name))\n return self.search(cr, uid, tag_domain, context=context)", "def get_all_tagged(self,tag_name):\n return self.tag2elements[tag_name]", "def resource_name() -> str:\n return \"contactTags\"", "def ListAllContacts(self):\n feed = self.gd_client.GetContacts()\n self.contacts = self.CleanPhoneNumbers(self.GetContactsInfo(feed))\n return self.contacts", "def list(self):\n return self._post(\n request='list',\n uri=ApiUri.TAGS.value,\n ).get('tags')", "def get_contacts(self):\n\n\t\treturn self.__contacts", "def contact_list(self):\n return self._contact_list", "def get_all_tags_list(cls):\n all_tags_list = []\n # obj_list = cls.objects.filter(status=0).order_by('-update_time')\n obj_list = Article.objects.all()\n for obj in obj_list:\n all_tags_list = all_tags_list + obj.tags_list()\n # for tag in obj.tags.split(','):\n # all_tags_list.append(tag)\n return all_tags_list", "def get_tags(self, tag_name: str):\n return self.soup.find_all(tag_name)", "def read(self, request, tag=None):\n tags = Tag.objects\n if tag:\n t = tags.get(slug=tag)\n return t.entry_set.all()\n else:\n return tags.all()", "def get_tag_interactions(request, pk):\n try:\n tag = InteractionTag.objects.get(pk=pk)\n except InteractionTag.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n ans = []\n interactions = tag.interaction_set.all()\n for interaction in interactions:\n if request.user == interaction.owner:\n ans.append(InteractionSerializer(interaction).data)\n return Response(ans)", "def getTags(self,):\n\t\treturn self.tags;", "def contacts(self):\r\n return contacts.Contacts(self)", "def tag_ids(self, convthread_id=None):\n if None == convthread_id:\n return [tag[0] for tag in self.dfs[\"tags\"][[\"tag_id\"]].values]\n else :\n df = self.dfs[\"convthreads_with_tags\"]\n tag_records = df[df.convthread_id == convthread_id]\n return tag_records[\"tag_id\"].values", "def get_tags(self):\n return self.get_url_data(self.api_url + 'refs/tags')", "def contacts(self):\n return ContactCollection(self.request)", "def address_tags(self):\n return self._address_tags", "def get_tags_for_instance(self, instance_id):\n try:\n response = self.ec2.describe_instances(InstanceIds=[instance_id])\n except Exception as e:\n logger.info(e)\n return []\n for reservation in response['Reservations']:\n for instance in reservation['Instances']:\n if instance['InstanceId'] == instance_id:\n return instance['Tags']\n return []", "def get_tags(self) -> Set[Text]:\r\n return {tag for tag in self.tags}", "def get_tags(self):\n return self.tags", "def get_mailing_tagnames(self, mailing_obj):\n tag_refs = mailing_obj.get('mailing', mailing_obj).get('tags', [])\n tags = []\n for t in tag_refs:\n tdata = self.client.get('%s%s' % (self.base_url, t))\n tags.append(tdata.json().get('name'))\n return tags", "def get_tags(request):\n as_list = request.params.get('as_list')\n if as_list:\n return [\n tag.name\n for tag in Tag.query.all()\n ]\n else:\n return [\n {\n 'name': tag.name,\n 'id': tag.id\n }\n for tag in Tag.query.all()\n ]", "def prepare_tags(self, obj):\n return [tag.name for tag in obj.tags.all()]", "def find_all(self, params={}, **options):\n return self.client.get_collection(\"/tags\", params, **options)", "def get_by_tag(cls, tag):\n out = []\n \n tags = Tag.expand_implied_by([tag])\n \n for t in tags:\n results = cls.objects.filter(owner=tag.owner, tags=t)\n \n for b in results:\n if b not in out:\n out.append(b)\n \n return out", "def getTags(number=None):", "def get_queryset(self):\n return self.request.user.contacts.all()", "def get_document_tags(self, docid):\n return [(key, json.loads(value))\n for key, value\n in self.sql_session.query(Feature)\n .filter(Feature.document == docid)\n .values(Feature.key, Feature.value)]", "def tags(self):\n return self.get(\"tags\")", "def list_all_tags(self,obs):", "def list(self):\n\n\t\treturn self._list(\"/tag\", \"tag\")", "def tags(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'tags')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def fetch_contacts(owner_account_id):\n resp = oauth.tapkey.get(f\"Owners/{owner_account_id}/Contacts?$select=id,identifier\")\n contacts = resp.json()\n return contacts", "async def test_list_address_tags_by_entity(self):\n await test_service.list_address_tags_by_entity(self)", "def get_tags(self):\n\n return self.tags", "def list_tags(self, entry_name):\n return self.__datacatalog.list_tags(parent=entry_name)", "def first_level_tags(self) -> Any:\n return self.client.get_instances_id_content_tags_path(self.id_, '')", "def tags(self) -> \"IterableList[TagReference]\":\n return TagReference.list_items(self)", "def get_tags(self) -> List:\n LOGGER.info('Get all the tags')\n\n with self.client.create_session() as session:\n tag_count = (func.count(RDSTableTag.table_rk)\n + func.count(RDSDashboardTag.dashboard_rk)).label('tag_count')\n\n records = session.query(\n RDSTag.rk.label('tag_name'),\n tag_count\n )\\\n .outerjoin(RDSTableTag)\\\n .outerjoin(RDSDashboardTag)\\\n .filter(RDSTag.tag_type == 'default')\\\n .group_by(RDSTag.rk)\\\n .having(tag_count > 0)\\\n .all()\n\n results = []\n for record in records:\n results.append(TagDetail(tag_name=record.tag_name,\n tag_count=record.tag_count))\n\n return results", "def tags(request):\n return Tag.objects.filter(user=request.user)", "def tags_by_id(self, repository_id, access_token=None):\n return self._complete_request_by_id(repository_id, \"tags\", access_token)", "def create_a_tag(self, tag_id, contact_id):\n data = {\"contactTag\":{\"contact\":str(contact_id),\"tag\":str(tag_id)}}\n\n return self.client._post(\"/contactTags\", json=data)", "def dataset_tags(connection):\n assert connection\n query = \"\"\"select * from tags()\"\"\"\n result = sqlio.read_sql_query(query, connection)\n return [item.strip() for item in result['name']], [tag_id.strip() for tag_id in result['tag_id']]", "def get_tags(self):\n\n base_url = self.get_parent().url\n tags = self.tags.all()\n\n for tag in tags:\n tag.url = f\"{base_url}tags/{tag.slug}/\"\n\n return tags", "def tags(self) -> Dict:\n return dict(self.client.get_instances_id_tags(self.id_))", "def contacts(request):\n User = get_user_model()\n ids = set(request.user.chatmessage_set.all().values_list(\"recipients\", flat=True))\n context = {\n 'contacts': User.objects.filter(pk__in=ids)\n }\n return render(request, \"chat/contacts.html\", context)", "def get_tags(self, tags):\n tag_list = []\n for tag in tags:\n tag_list.append(tag[\"name\"])\n return tag_list", "def getTags(owner_id=None, photo_id=None, access_key=None):\n params = {\n 'owner_id': owner_id,\n 'photo_id': photo_id,\n 'access_key': access_key\n }\n result = call('photos.getTags', **params)\n return parse_response(result)", "def get_tags(self, *args, **kwargs):\n \n tags_data = api.get_tags(\n *args,\n api_key=self.__creds.api_key_v2,\n **kwargs)\n return [en.Tag(tag_data) for tag_data in tags_data]", "def queryset(self, request, queryset):\n for tag in get_resource_tags():\n if self.value() == tag[0]:\n return queryset.filter(tags__slug__iexact=tag[0])", "def get_all_tags():\n try:\n data = ReadTag().run()\n except Exception as ex:\n return jsonify({'code': '500','message':'Internal server error'})\n else:\n return jsonify({'code': '200','data': data})", "def findTaggedServiceIds(self, name):\n pass;", "def tags(self):\r\n url = '{0}/tags/'.format(self.get_url())\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json", "def get_tags(self) -> Dict:\n return self.orthanc.get_instance_tags(self.identifier)", "def get_post_tags(postid, posttags, tags):\n _tags = []\n _nametags = []\n for item in posttags:\n if item['post_id'] == postid:\n _tags.append(item['tag_id'])\n for tag in _tags:\n nametag = get_tagname(tags, tag)\n _nametags.append(nametag)\n return _nametags", "def get_contacts_list(self):\n return [(id + 1, contact) for id, contact in enumerate(self.contact_list)]", "def obj_categories(self):\r\n return self._tags", "def get_asset_tags(user_id):\n result = db.session.query(Tag).filter(\n NotificationAssetTag.user_id == user_id,\n NotificationAssetTag.tag_id == Tag.id\n ).all()\n return result if result else []", "def tags(self):\r\n return resources.Tags(self)", "def tags(self) -> list[str]:\n _args: list[Arg] = []\n _ctx = self._select(\"tags\", _args)\n return _ctx.execute_sync(list[str])", "def collect_tags(self):\n tags = []\n for document in self.documents:\n for tag_token in document.tags:\n tags.append(tag_token)\n return tags", "def find_asset_set(self, uid):\n doc_tags = self._collection_asset_tags.find_one({'uid': uid})\n return doc_tags", "def get_translated_ids(id):", "def entity_tags(self):\n return self._entity_tags", "def api_get_tags(request):\n\n # TODO Get favorite tags for the given user ID\n\n tags = Tag.objects.get_not_empty_tags()\n tag_names = []\n for tag in tags:\n tag_names.append(tag.name)\n\n return HttpResponse(content=json.dumps(tag_names))", "def test_get_device_tags_by_id(self):\n pass", "def list_tags(self, session):\n result = self._tag(session.get, session=session)\n return result['tags']", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def get_contacts():\n return jsonify(g.driver.get_contacts())", "def tagPosts(db, tags):\n c=db.cursor()\n print >>sys.stderr, \"Finding tagged posts from\", tags\n idents = []\n for tag in tags:\n c.execute(\"\"\"SELECT post_id FROM tags WHERE tag='%s'\"\"\" % tag)\n idents += [int(ident[0]) for ident in c.fetchall()]\n c.close()\n return list(set(idents))", "def tags(self):\n return self._item.get(\"tags\")" ]
[ "0.59750104", "0.59688324", "0.5902836", "0.58065534", "0.5737934", "0.56792", "0.5666696", "0.56300694", "0.56000763", "0.5579044", "0.55602413", "0.5539786", "0.5474715", "0.54744804", "0.54674107", "0.546423", "0.546423", "0.5387576", "0.5375769", "0.5366009", "0.53620964", "0.53493714", "0.5338336", "0.53381556", "0.53158957", "0.5290614", "0.52836025", "0.5279322", "0.5257186", "0.5247879", "0.5241851", "0.5231902", "0.52288723", "0.5218876", "0.5215778", "0.52127415", "0.51939964", "0.51857746", "0.51835656", "0.5172548", "0.5170723", "0.51678497", "0.5164406", "0.51544976", "0.5149598", "0.513339", "0.51224047", "0.5120788", "0.51200116", "0.5118745", "0.5111579", "0.5111258", "0.50984275", "0.50906295", "0.5079039", "0.5073982", "0.50712925", "0.5068463", "0.50548905", "0.50343686", "0.50287896", "0.502789", "0.50270516", "0.5026636", "0.5024029", "0.5022212", "0.5019983", "0.5019373", "0.5015886", "0.5014633", "0.50083417", "0.50023526", "0.49917528", "0.49874666", "0.49835265", "0.4981862", "0.49788502", "0.49785203", "0.49720296", "0.4969989", "0.49623483", "0.496217", "0.49522674", "0.4947717", "0.49467754", "0.49448442", "0.49413246", "0.49375886", "0.4928411", "0.4926625", "0.4926625", "0.4926625", "0.4926625", "0.4926625", "0.4926625", "0.4926625", "0.4926625", "0.49233073", "0.4922935", "0.49193725" ]
0.7933677
0
Is true when ``argument`` is an iterable collection with integerequivalent items.
def all_are_integer_equivalent_numbers(argument): from abjad import mathtools try: return all(mathtools.is_integer_equivalent_number(_) for _ in argument) except TypeError: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iterable(arg):\n return isinstance(arg, collections.Iterable) and not isinstance(arg, six.string_types)", "def is_sequence_of_int(items):\n return all(isinstance(item, int) for item in items)", "def is_iterable(arg):\n return (\n isinstance(arg, collections.Iterable)\n and not isinstance(arg, str)\n )", "def is_iterable(element):\n return isinstance(element, (set, list, tuple))", "def _is_iterable(items):\n return isinstance(items, (list, tuple, set, np.ndarray))", "def is_int(*args): \n try:\n for i in args:\n int(i)\n return True\n except Exception:\n return False", "def is_numlike(x):\r\n if iterable(x):\r\n for thisx in x:\r\n return is_numlike(thisx)\r\n else:\r\n return is_numlike(x)", "def is_iterable(var, iterable_types=ITERABLE_TYPES):\n return isinstance(var, iterable_types)", "def is_sequence_of_iterable(items):\n return all(is_item_iterable(item) for item in items)", "def _is_iterable(value):\n return isinstance(value, list) or isinstance(value, tuple)", "def iterable(x):\n try:\n iter(x)\n return True\n except TypeError:\n return False", "def is_iterable(self):\n return all(set.is_iterable for set in self.sets)", "def isIterable(value):\n try:\n iter(value)\n return True\n except:\n return False", "def is_iterable(var):\n return any(isinstance(var, cls) for cls in [list, tuple, types.GeneratorType])", "def is_item_iterable(item):\n try:\n _ = [_ for _ in item]\n except TypeError:\n return False\n return True", "def is_iterable(x: Any) -> bool:\r\n return isinstance(x, collections.abc.Iterable) and not isinstance(x, (str, bytes))", "def has_args(iterable, args):\n\n try:\n return all(x in iterable for x in args)\n\n except TypeError:\n return False", "def is_sequence(arg):\n\n # np.float{16,32,64} and np.int types have __getitem__ defined\n # this is a long-standing bug in NumPy and unlikely to be fixed\n # todo: backport to qmmlpack, write tests\n if isinstance(arg, (str, bytes, np.number, dict, set)):\n return False\n\n return hasattr(arg, \"__getitem__\") or hasattr(arg, \"__iter__\")", "def isiterable(x):\n try:\n iter(x)\n except TypeError:\n return False\n return True", "def isiterable(x):\n try:\n iter(x)\n return True\n except TypeError:\n return False", "def _is_positive_int_tuple(item):\n if not isinstance(item, tuple):\n return False\n for i in item:\n if not _is_positive_int(i):\n return False\n return True", "def is_sequence_of_uint(items):\n return all(isinstance(item, int) and item >= 0 for item in items)", "def is_integer(value: Union[float, np.ndarray]) -> bool:\n if type(value) == np.ndarray:\n for entry in value:\n result = Comparator.is_integer(entry)\n if not result:\n return False\n return True\n else:\n value = abs(value)\n value -= int(value)\n if value > 0.5:\n return Comparator.is_close_to_zero(1 - value)\n return Comparator.is_close_to_zero(value)", "def is_intscalar(x: Any) -> bool:\r\n return isinstance(x, (\r\n int,\r\n np.int8,\r\n np.int16,\r\n np.int32,\r\n np.int64,\r\n np.uint8,\r\n np.uint16,\r\n np.uint32,\r\n np.uint64,\r\n ))", "def is_iterable_container(value):\n # strings are iterable too so we have to treat them as a special case\n return not isinstance(value, str) and isinstance(value, collections.Iterable)", "def is_iterable(value):\n # noinspection PyUnresolvedReferences\n return hasattr(value, '__iter__') and hasattr(value, '__getitem__')", "def is_nonstring_iterable(x):\n if isinstance(x, primitive_iterable):\n return False\n return isinstance(x, collections.Iterable)", "def _is_integer(x):\n return (not isinstance(x, (bool, np.bool))) and \\\n isinstance(x, (numbers.Integral, int, np.int, np.long, long)) # no long type in python 3", "def is_iterable(x):\n if isinstance(x, six.string_types):\n return False\n return hasattr(x, '__iter__')", "def is_iterable(thing):\n\n try:\n iter(thing)\n except TypeError:\n return False\n return True", "def _is_iterable_non_string(arg):\n return (hasattr(arg, \"__iter__\") or hasattr(arg, \"__getattr__\")) and not isinstance(arg, str)", "def test_defined_in_iter():\n\n @type_checked\n def _run_test(thing:[(int, str, str)]):\n for group in thing:\n assert isinstance(group[0], int)\n assert isinstance(group[1], str)\n assert isinstance(group[2], str)\n assert len(thing) == 4\n\n _run_test(thing=[\n (12.3, None, False),\n (\"12.1\", True, 1),\n (False, 10, 12.1),\n (True, 14.9, None),\n ])", "def isInteger(self):", "def isInteger(self):", "def are_sequential_integers(values: List[Union[str, int, float]]):\n int_list = []\n for value in values:\n if not is_integer(value):\n return False\n int_list.append(int(float(value)))\n return (max(int_list) - min(int_list) + 1) == len(int_list)", "def acceptsArgument(self):\n range = self.validateRange(self.range)\n return not(not(range[1]))", "def test_in_operator_on_non_iterable(self):\n\n class User(Document):\n name = StringField()\n\n class BlogPost(Document):\n content = StringField()\n authors = ListField(ReferenceField(User))\n\n User.drop_collection()\n BlogPost.drop_collection()\n\n author = User.objects.create(name=\"Test User\")\n post = BlogPost.objects.create(\n content=\"Had a good coffee today...\", authors=[author]\n )\n\n # Make sure using `__in` with a list works\n blog_posts = BlogPost.objects(authors__in=[author])\n assert list(blog_posts) == [post]\n\n # Using `__in` with a non-iterable should raise a TypeError\n with pytest.raises(TypeError):\n BlogPost.objects(authors__in=author.pk).count()\n\n # Using `__in` with a `Document` (which is seemingly iterable but not\n # in a way we'd expect) should raise a TypeError, too\n with pytest.raises(TypeError):\n BlogPost.objects(authors__in=author).count()", "def any_user(iterable):\n for element in iterable:\n if element:\n return True\n return False", "def is_sequence(arg):\n return (not hasattr(arg, \"strip\") and\n hasattr(arg, \"__getitem__\") or\n hasattr(arg, \"__iter__\"))", "def isIterable(obj):\n return isinstance(obj, ListType)", "def isInteger(data):\n\tif type(data) == list or type(data) == np.ndarray:\n\t\tcol = pd.Series(data)\n\telse:\n\t\tcol = data\n\treturn col.dtype == np.int32 or col.dtype == np.int64", "def _all_equal(arg):\n return arg.count(arg[0]) == len(arg)", "def isInteger(self):\n return isinstance(self.value, int)", "def is_iterable(obj):\n return isinstance(obj, (list, tuple, types.GeneratorType)) or \\\n (not isinstance(obj, (int, str, dict)) and\n bool(getattr(obj, \"next\", False)))", "def is_convertible_to_int(v: Any) -> bool:\n\n try:\n test = int(v)\n return True\n except:\n return False", "def isInteger(self):\n pass", "def is_iterable(obj):\n if isinstance(obj, (str, bytes, bytearray)):\n return False\n return isinstance(obj, Iterable)", "def __contains__(self, item: OidValue) -> bool:\n item = to_int_tuple(item)\n return self.value == item[0 : len(self.value)]", "def is_int3(items):\n return len(items) == 3 and all(isinstance(item, int) for item in items)", "def same(seq: typing.Iterable[typing.Any]) -> bool:\n seq = iter(seq)\n first = type(next(seq))\n return all(isinstance(i, first) for i in seq)", "def is_valid_integer_list(any_list):\n list_object = json.loads(any_list)\n return not any(not is_valid_integer(str(listing_id)) for listing_id in\n list_object)", "def isiterable(obj, strings=False, isinstance=isinstance, Iterable=Iterable):\n return (isinstance(obj, Iterable) and\n not (isinstance(obj, str) and not strings))", "def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)", "def _is_integer_like(input):\n if _is_boolean_like(input):\n return True\n if type(input) is int:\n return True\n if isinstance(input, _ScalarConstant):\n if input.dtype in _int_like_types:\n return True\n return False", "def is_int(x):\n return int(x) == x", "def isIterable(obj):\n # type: (Any) -> bool\n if isinstance(obj, basestring):\n return False\n elif isinstance(obj, ProxyUnicode):\n return False\n try:\n iter(obj)\n except TypeError:\n return False\n else:\n return True", "def __eq__(self, *args):\n return _ida_hexrays.user_numforms_iterator_t___eq__(self, *args)", "def contains(self, *args):\n return _libsbml.IdList_contains(self, *args)", "def allIn(listA: Union[int, List[int]], listB: Union[int, List[int]]) -> bool:\n if isinstance(listA, int):\n listA = [listA]\n if isinstance(listB, int):\n return listB in listA\n else:\n for item in listB:\n if item not in listA:\n return False\n return True", "def is_int(x):\n # boolean are subclasses of integers in Python, so explicitly exclude them\n return isinstance(x, (int, np.integer)) and not isinstance(x, bool)", "def is_int(q):\n if isinstance(q, (Integer, int)):\n return True\n if isinstance(q, Rational):\n if q.denominator() == 1:\n return True\n if isinstance(q, tuple):\n return False\n try:\n if floor(q) == ceil(q):\n return True\n except TypeError:\n pass\n return False", "def __contains__(self, a):\n try:\n self.convert(a)\n except CoercionFailed:\n return False\n\n return True", "def _is_good_iterable(obj):\n return _is_iterable(obj) and _has_str_elems(obj)", "def is_collection(var):\n return isinstance(var, Iterable) and not isinstance(var, str)", "def any(self) -> int:", "def any(self) -> int:", "def hasNextInt(self) -> bool:\n raise NotImplementedError", "def any(iterable):\n for item in iterable:\n if item:\n return True\n return False", "def is_int(x):\n # From sktime: BSD 3-Clause\n # boolean are subclasses of integers in Python, so explicitly exclude them\n return isinstance(x, (int, np.integer)) and not isinstance(x, bool)", "def is_int(self):\n return self.value_type in (int, arrow.JuArrow)", "def _is_positive_int(item):\n if not isinstance(item, int):\n return False\n return item > 0", "def is_integer(i):\n import numpy as np\n if isinstance(i, (int, long)):\n return True\n if isinstance(i, float):\n return (i).is_integer()\n if issubclass(type(i), np.integer):\n return i\n else:\n return False", "def test_args_count_equal(args: list, target: int) -> bool:\n\n\treturn (args_count(args) == target)", "def operator_in(item: Result, container: Result) -> Result:\n result: Result = celpy.celtypes.BoolType(False)\n for c in cast(Iterable[Result], container):\n try:\n if c == item:\n return celpy.celtypes.BoolType(True)\n except TypeError as ex:\n logger.debug(f\"operator_in({item}, {container}) --> {ex}\")\n result = CELEvalError(\"no such overload\", ex.__class__, ex.args)\n logger.debug(f\"operator_in({item!r}, {container!r}) = {result!r}\")\n return result", "def is_int(value):\n return isinstance(value, int)", "def _assert_valid_value_and_cast(self, value):\n if not hasattr(value, '__iter__'):\n raise AssertionError(f\"{value} is not a valid iterable type\")\n return value", "def is_int(self, size=None):\n return False", "def isInteger(data):\n\ttry:\n\t\tfrom types import LongType, IntType\n\t\tif type(data) == LongType or type(data) == IntType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(int(0)):\n\t\t\treturn True\n\treturn False", "def _is_all_int(df_list: List[Union[dd.DataFrame, pd.DataFrame]], col: str) -> bool:\n for df in df_list:\n if col in df.columns:\n srs = df[col]\n if isinstance(srs, (dd.DataFrame, pd.DataFrame)):\n for dtype in srs.dtypes:\n if not is_integer_dtype(dtype):\n return False\n elif isinstance(srs, (dd.Series, pd.Series)):\n if not is_integer_dtype(srs.dtype):\n return False\n else:\n raise ValueError(f\"unprocessed type of data:{type(srs)}\")\n return True", "def is_integral(self):\n return all(x in ZZ for x in self._representation_vector)", "def in_list(value, arg):\r\n return value in arg", "def isInteger(self):\n return _libsbml.ASTNode_isInteger(self)", "def check_if_input_is_int(self):\n try:\n int(self.input)\n except ValueError:\n return False\n else:\n return True", "def equal(self, *args):\n return _libsbml.SwigPyIterator_equal(self, *args)", "def isiterable(obj, classinfo=None, of_type=None):\n if classinfo is not None:\n if not isinstance(obj, classinfo):\n return False\n elif not hasattr(obj, '__iter__') and not hasattr(obj, '__getitem__'):\n return False\n if of_type is not None:\n return all(isinstance(ele, of_type) for ele in obj)\n return True", "def check_iterable(value):\n try:\n iter(value)\n if not isinstance(value, six.string_types):\n return True\n else:\n return False\n except Exception as e:\n pass\n\n return False", "def is_iterable_object(maybe_iterable: Any) -> TypeGuard[Iterable[Any]]:\n\n return isinstance(maybe_iterable, Iterable)", "def all_user(iterable):\n for element in iterable:\n if not element:\n return False\n return True", "def _is_single_range(r):\n return (isinstance(r, numbers.Integral) or\n (isinstance(r, collections.Sequence) and (len(r) == 2) and\n _is_range_boundary(r[0]) and _is_range_boundary(r[1])))", "def check_all_iterable_values_equal(iterable):\n return all(second_value_onwards == iterable[0] for second_value_onwards in iterable[1:])", "def any(self) -> int:\n ...", "def is_sequence(value):\n return (hasattr(value, \"__iter__\") and not\n isinstance(value, (six.string_types, six.binary_type)))", "def _contains(self, element):\n if not isinstance(element, Tuple) or len(element) != 2:\n return S.false\n\n if not element[1].is_Integer:\n return S.false\n\n if element[1] >= len(self.sets) or element[1] < 0:\n return S.false\n\n return self.sets[element[1]]._contains(element[0])", "def is_in(elt, seq):\n\treturn any(x is elt for x in seq)", "def isInteger(self):\n return self._is_int", "def indexists(list, *args): # Technically doesn't have to do with the screen, but it is very useful. \n return all([int(arg) < len(list) for arg in args])", "def _is_sequence_like(self, data):\n return hasattr(data, \"__iter__\") and hasattr(data, \"__getitem__\")", "def is_iterable(refobject):\n is_iter = False\n try:\n for e in refobject: \n break\n is_iter = True\n except:\n pass\n\n return is_iter", "def is_int(self):\n return self.v & 1 != 0", "def hasCorrectNumberArguments(self, *args):\n return _libsbml.ASTBasePlugin_hasCorrectNumberArguments(self, *args)" ]
[ "0.6820437", "0.68176305", "0.6641058", "0.6636341", "0.6526343", "0.6496134", "0.6355871", "0.6260515", "0.62311673", "0.62234855", "0.6213004", "0.61957854", "0.6160951", "0.6089377", "0.6064915", "0.6020444", "0.60009134", "0.5981801", "0.5974256", "0.596914", "0.5965121", "0.59427434", "0.5904345", "0.5840324", "0.58266085", "0.5820261", "0.5808173", "0.5774553", "0.5761449", "0.57570106", "0.5746608", "0.5746167", "0.57446164", "0.57446164", "0.57210964", "0.57145286", "0.57141286", "0.57012105", "0.5699501", "0.56880933", "0.56764215", "0.56741446", "0.5641429", "0.56090033", "0.5605464", "0.5601156", "0.5582899", "0.55643064", "0.55611444", "0.5559913", "0.55354965", "0.5510966", "0.5501193", "0.5492886", "0.545869", "0.54577905", "0.5446587", "0.5446272", "0.5444816", "0.54329205", "0.54222965", "0.5420525", "0.5420262", "0.5419984", "0.5419324", "0.5419324", "0.5417846", "0.54144216", "0.54007024", "0.5389604", "0.5388609", "0.5385773", "0.5380712", "0.53793514", "0.5368442", "0.5367622", "0.53583646", "0.5351963", "0.53512424", "0.53398633", "0.53365177", "0.5330039", "0.532099", "0.53095216", "0.5305583", "0.5303254", "0.52941775", "0.5290233", "0.52789056", "0.52772385", "0.5274677", "0.52528936", "0.52525574", "0.5250229", "0.5248068", "0.5245593", "0.52437884", "0.5238501", "0.5222876", "0.5210599" ]
0.75479543
0
Initialize this dataset class.
def __init__(self, opt): BaseDataset.__init__(self, opt) self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size)) input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc self.transform = get_transform(opt, grayscale=(input_nc == 1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self.__dataset = None", "def __init__(self, dataset):\n self._dataset = dataset", "def __init__(self, dataset: Dataset):\n self.dataset = dataset", "def __init__(self, config):\n logger.info(f\"{self.__class__.__name__}: Dataset initializing ...\")\n super().__init__(config)", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n self._results_ = None", "def __init__(self):\n self.args = self._prepare_args(locals())\n self.requires_full_dataset_in_memory = False", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)", "def __init__(self):\n self.model = None\n self.joined_datasets = None\n self.id_col = None\n self.val_col = None\n self.pop_col = None\n self.total_population_per_unit = None\n self.centroids_of_areal_data = None\n self.prepared_data = None\n self.unknown_area_id = None\n\n # Parameters\n self.lags = None\n self.step = None\n self.min_no_of_observations = None\n self.max_search_radius = None", "def __init__(self):\n self.data_file = ''\n self.data = pd.DataFrame()\n self.labels = pd.DataFrame()\n self.labels_onehot = pd.DataFrame()\n self.df = pd.DataFrame()\n self.df_perm = pd.DataFrame() # for debug purpose\n self.n_samples = 0\n self.n_features = 0\n self.label_dict = {}\n self.inv_label_dict = {}\n self.n_classes = 0\n self.batch_size = 0\n self.n_batch = 0\n self.current_batch_idx = 0\n self.true_distribution = []", "def __init__(self, num_locations):\n self.dataset = {}\n self.num_locations = num_locations\n self.add_locations()", "def _init_al_dataset(self):\n\n self._init_dataset()\n\n train_dataset = self.datasets['train']\n\n dataset_size = len(train_dataset)\n self.budget = math.ceil(self.budget_frac*dataset_size)\n Sampler.__init__(self, config, self.budget) # TODO: Weird place to initialise this\n\n all_indices = set(np.arange(dataset_size))\n k_initial = math.ceil(len(all_indices)*self.initial_budget_frac)\n initial_indices = random.sample(list(all_indices), k=k_initial)\n\n sampler_init = data.sampler.SubsetRandomSampler(initial_indices) # need to sample from training dataset\n\n self.labelled_dataloader = data.DataLoader(train_dataset, sampler=sampler_init, batch_size=self.batch_size, drop_last=True)\n self.val_dataloader = data.DataLoader(self.datasets['valid'], batch_size=self.batch_size, drop_last=False)\n self.test_dataloader = data.DataLoader(self.datasets['test'], batch_size=self.batch_size, drop_last=False)\n\n return all_indices, initial_indices", "def __init__(self, data_set):\r\n self.name = data_set\r\n\r\n # The training and test labels\r\n self.labels = {'train': None, 'test': None}\r\n\r\n # The training and test examples\r\n self.examples = {'train': None, 'test': None}\r\n\r\n # Load all the data for this data set\r\n for data in ['train', 'test']:\r\n self.load_file(data)\r\n\r\n # The shape of the training and test data matrices\r\n self.num_train = self.examples['train'].shape[0]\r\n self.num_test = self.examples['test'].shape[0]\r\n self.dim = self.examples['train'].shape[1]", "def __init__(self):\n print ('Initializing Data reader object...')\n data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels = self.readDataFromFile()\n test_10k_x, test_10k_y, training_55k_x, training_55k_y, validation_5k_x, validation_5k_y = self.dataTransform(\n data_Test_Image, data_Test_Labels, data_Train_Images, data_Train_Labels)\n self.train = zip(training_55k_x, training_55k_y)\n self.valid = zip(validation_5k_x, validation_5k_y)\n self.test = zip(test_10k_x, test_10k_y)\n\n self.train_position = 0\n print ('Initialized!')", "def __init__(self):\n\n self.check_nans = False\n self.debug_force_memmap = False\n\n # Implementations must initialise the dtype so that feature arrays can be created with correct type:\n self.dtype = None", "def __init__(self, *, dataset=None, aliases=None):\n self._datasets = [] if dataset is None else [dataset]\n self._aliases = {} if aliases is None else {a: 0 for a in aliases}\n self._default_index = 0", "def __init__(self):\n\t\tsuper().__init__()\n\t\t\n\t\t# Typically a list of data here\n\t\t# Typically a dict of header keys and values here", "def __init__(self):\n self._distance_data = []\n self._location_data = []\n self._package_data = []", "def __init__(self, data):\n self.data = data\n self.columns = Columns(data)\n self.rows = Rows(data)", "def init(*args):\n global dataset\n dataset = args[0]", "def _init_from_DataArrays(self, data, validate=True):\n self._data_vars = self._DataArrays_as_mapping(data)\n\n if (len(self) > 1) and validate:\n first = self[0]\n for i in range(1, len(self)):\n da = self[i]\n first._is_compatible(da, raise_error=True)\n\n self._check_all_different_ids(self._data_vars.values())\n\n self.__itemattr = []\n for key, value in self._data_vars.items():\n self._set_name_attr(key, value)\n\n self.plot = _DatasetPlotter(self)\n\n if len(self) > 0:\n self._set_spectral_attributes(self.geometry)\n\n # since Dataset is MutableMapping it has values and keys by default\n # but we delete those to avoid confusion\n # self.values = None\n self.keys = None", "def __init__(self, path: str = './') -> None:\n super(DataHandler, self).__init__()\n self.path = path\n self.dataset = None # type: str\n self._file_format = None # type: str\n self._max_file_count = 0", "def setUp(self):\n self.dataset = self.dataset_cls()", "def __init__(self, data_dict, mode):\n self.data_dict = data_dict\n self.mode = mode\n\n #Get the data\n self.input_data = data_dict[\"input\"]\n self.output_data = data_dict[\"output\"]\n self.num_samples = self.input_data.shape[0]", "def __init__(self, dataset):\n\n if isinstance(dataset, DataConfig):\n self.__data_config = dataset\n self.dataset = dataset.dataset\n elif isinstance(dataset, str):\n logger.debug(\"Dataset argument {} is string, looking up known dataset.\".format(dataset))\n self.__data_config = DataConfig.known_dataset(dataset)\n self.dataset = dataset\n else:\n raise ValueError(\"Argument 'dataset' must be of type DataConfig or str.\")", "def __init__(\n self,\n path,\n tier,\n embeddings=None,\n preprocessor=None,\n transform=True):\n super().__init__()\n assert os.path.isdir(path)\n assert os.path.exists(os.path.join(path, f'{tier}.csv'))\n assert tier in self.TIERS\n\n self.path = path\n self.tier = tier\n self.data = None\n self.embeddings = embeddings\n self.preprocessor = preprocessor\n self.is_transform = transform\n self._init_dataset()", "def _init_data(self, data):\n assert type(data) is dict, \"dict expected: %r\" % type(data)\n assert len(data) is 1, \"size of dict should be 1: %r\" % len(data)\n self._name = data.keys()[0]\n self._data = np.asarray(data[self._name])\n self._set = True", "def __init__(self):\n self.relation = ''\n self.attributes = []\n self.attribute_types = dict()\n self.attribute_data = dict()\n self.comment = []\n self.data = []\n pass", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def __init__(self):\n\n data_extract=DataExtracter()\n self.data = tuple()", "def initialize(self):\n self.data = None\n self.errors = []", "def __init__(self):\n self.data_set = []\n self.finalized_data = LogData()", "def init(self):\n raise NotImplementedError", "def init(self):\n raise NotImplementedError", "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n\n self.data_folder = os.path.join(opt.dataroot, opt.phase)\n self.image_file_names = sorted(os.listdir(self.data_folder))\n self.batch_size = opt.batch_size\n self.z_dim = opt.z_dim\n self.imsize = opt.crop_size\n\n self.transform = self.get_transform(True, True, True, opt.center_crop)", "def __init__(self, dat):\n self.data = dat", "def __init__(self, datasets=[], summaries=[], ordinal_columns=[]):\r\n if not datasets:\r\n raise ValueError(\"Parameter datasets is empty. A list of Datasets is required.\")\r\n self.datasets = datasets\r\n if not summaries:\r\n raise ValueError(\"Parameter summaries[] is empty. A list of Summary objects is required.\")\r\n self.summaries = summaries\r\n # future- add checking here that each dataset has the column names\r\n # required by each summary,\r\n # and same for ordinal_columns when they are implemented. \r\n # Or do checking in Dataset().\r", "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n\n self.img_paths = glob.glob(os.path.join(opt.dataroot, '{}_image'.format(opt.phase), '*.png'))\n self.segment_dir = os.path.join(opt.dataroot, '{}_segment'.format(opt.phase))\n self.edge_dir = os.path.join(opt.dataroot, '{}_edge'.format(opt.phase))\n self.centerline_dir = os.path.join(opt.dataroot, '{}_centerline'.format(opt.phase))\n\n self.img_transforms = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5))])\n self.lab_transform = MaskToTensor()", "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n self.SR_factor = opt.SR_factor\n\n assert util_dataset.check_whether_last_dir(opt.dataroot), 'when SingleDataset, opt.dataroot:{} should be dir and contains only image files'.format(opt.dataroot)\n self.dir_A = opt.dataroot\n self.A_paths = sorted(make_images_dataset(self.dir_A, opt.max_dataset_size)) # get image paths\n\n self.input_nc = self.opt.input_nc\n self.output_nc = self.opt.output_nc", "def __init__(self,dataset=scripts,group=\"\"):\n self.dataset = dataset\n self.group=group", "def __init__(self, dataset_path):\n super(TorchData, self).__init__()\n self.dataset = h5py.File(dataset_path, 'r')\n self.bg = self.dataset['bg']\n self.vocal = self.dataset['vocal']\n self.mix = self.dataset['mix']\n self.len = self.bg.shape[0]", "def initialize(self) -> None:\n pass", "def __init__(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n NexusReaderBase.__init__(self, -1)\n self.taxa = None\n self._data_matrices = None", "def __init__(self, dir_path, window_size,\n user_map_path, computer_map_path, auth_type_map_path, logon_type_map_path):\n logging.info(f\"Initiating Dataset instance for directory {dir_path}\")\n self.directory = dir_path\n self.filenames = [filename for filename in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, filename))]\n assert len(self.filenames) > 0\n random.shuffle(self.filenames)\n self.window_size = window_size\n self.len = self.count_len()\n self.user_map, self.user_count = util.load_mapping(user_map_path)\n self.computer_map, self.computer_count = util.load_mapping(computer_map_path)\n self.auth_type_map, self.auth_type_count = util.load_mapping(auth_type_map_path)\n self.logon_type_map, self.logon_type_count = util.load_mapping(logon_type_map_path)", "def __init__(self):\n self.data_set_loc = conf.config_section_mapper(\"filePath\").get(\"data_set_loc\")\n self.data_extractor = DataExtractor(self.data_set_loc)", "def __init__(self, datafiles, plotter):\n self.datafiles = datafiles\n self.datasets = dict()\n self.plotter = plotter", "def __init__(self, dataset: ds.Dataset, settings):\r\n self.dataset = dataset\r\n self.settings = settings\r\n\r\n self.visualizer = visualizer.Visualizer()", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def _init_data(self) -> None:\n self.dtype = dict()\n self.shape = dict()\n self.size = dict()\n self.attrs = dict()\n self.data_ptr = dict()\n\n if self.mode == 'r':\n for k in self.fp.keys():\n self.dtype[k] = self.fp[k].dtype\n self.shape[k] = self.fp[k].shape\n self.size[k] = self.fp[k].shape[0]\n self.data_ptr[k] = 0", "def __init__(self, df):\n self.data = df", "def __init__(self, name_dataset, reduce=False):\n dataset = TUDataset(root='data/TUDataset', name=name_dataset)\n if reduce:\n new_dataset = []\n for i in tqdm(range(len(dataset))):\n aux_graph = copy.deepcopy(dataset[i])\n aux_graph.edge_index = TUDData.reduce_edges(aux_graph.edge_index)\n new_dataset.append(copy.deepcopy(aux_graph))\n dataset = WrapperSynthetic(new_dataset, dataset.num_node_features,\n dataset.num_classes, None)\n super(TUDData, self).__init__(dataset)", "def _initialize_data(self):\n self.unique_id = 123\n\n self.gas_valve_open = False\n self.buffer_valve_open = False\n self.pump_valve_open = False\n\n self.operatingmode = 0\n\n self.sample_pressure_high_limit = 100\n self.sample_pressure_low_limit = 10\n self.sample_pressure = 0\n\n self.error = 0\n\n self.buffer_pressure_high = True", "def __init__(self, dataset_name, split, num_shards, data_dir,\n filetype_suffix=None):\n self.dataset_name = dataset_name\n self.split = split\n self.num_shards = num_shards\n self.data_dir = data_dir\n self.filetype_suffix = filetype_suffix", "def __init__(\n self,\n embedder_path: Optional[str] = None,\n dataset: Optional[np.ndarray] = None,\n metadata: Optional[List[AnyStr]] = None,\n ) -> None:\n self._embedder_path = embedder_path\n\n # Cache dataset list which can be concatenated to the single dataset. This\n # is used since users may load the data several times, if each time the data\n # are directly concatenated together, memory copy will be costed each time.\n self._cache_dataset_list = []\n\n if dataset is None:\n # Sets dataset and metadata as empty. Will load them from raw input data\n # later.\n self._dataset = np.array([])\n self._metadata = []\n else:\n # Directly sets dataset and metadata.\n self._dataset = dataset\n self._metadata = metadata", "def __init__(self) -> None:\n\n self.train_env = None # Training environment\n self.agent = None # The algorithm used to solve an RL problem is represented by a TF-Agent\n self.replay_buffer = None # The replay buffer keeps track of data collected from the environment\n self.dataset = None # The agent needs access to the replay buffer via an iterable tf.data.Dataset\n self.iterator = None # The iterator of self.dataset", "def __init__(self, subset):\n if subset not in ('background', 'evaluation'):\n raise(ValueError, 'subset must be one of (background, evaluation)')\n self.subset = subset\n\n self.df = pd.DataFrame(self.index_subset(self.subset))\n\n # Index of dataframe has direct correspondence to item in dataset\n self.df = self.df.assign(id=self.df.index.values)\n\n # Convert arbitrary class names of dataset to ordered 0-(num_speakers - 1) integers\n self.unique_characters = sorted(self.df['class_name'].unique())\n self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())}\n self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c]))\n\n # Create dicts\n self.datasetid_to_filepath = self.df.to_dict()['data']\n self.datasetid_to_class_id = self.df.to_dict()['class_id']", "def __init__(self, subset):\n if subset not in ('background', 'evaluation'):\n raise(ValueError, 'subset must be one of (background, evaluation)')\n self.subset = subset\n\n self.df = pd.DataFrame(self.index_subset(self.subset))\n\n # Index of dataframe has direct correspondence to item in dataset\n self.df = self.df.assign(id=self.df.index.values)\n\n # Convert arbitrary class names of dataset to ordered 0-(num_speakers - 1) integers\n self.unique_characters = sorted(self.df['class_name'].unique())\n self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())}\n self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c]))\n\n # Create dicts\n self.datasetid_to_filepath = self.df.to_dict()['data']\n self.datasetid_to_class_id = self.df.to_dict()['class_id']", "def __init__(self, subset):\n if subset not in ('background', 'evaluation'):\n raise(ValueError, 'subset must be one of (background, evaluation)')\n self.subset = subset\n\n self.df = pd.DataFrame(self.index_subset(self.subset))\n\n # Index of dataframe has direct correspondence to item in dataset\n self.df = self.df.assign(id=self.df.index.values)\n\n # Convert arbitrary class names of dataset to ordered 0-(num_speakers - 1) integers\n self.unique_characters = sorted(self.df['class_name'].unique())\n self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())}\n self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c]))\n\n # Create dicts\n self.datasetid_to_filepath = self.df.to_dict()['data']\n self.datasetid_to_class_id = self.df.to_dict()['class_id']", "def __init__(self, subset):\n if subset not in ('background', 'evaluation'):\n raise(ValueError, 'subset must be one of (background, evaluation)')\n self.subset = subset\n\n self.df = pd.DataFrame(self.index_subset(self.subset))\n\n # Index of dataframe has direct correspondence to item in dataset\n self.df = self.df.assign(id=self.df.index.values)\n\n # Convert arbitrary class names of dataset to ordered 0-(num_speakers - 1) integers\n self.unique_characters = sorted(self.df['class_name'].unique())\n self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())}\n self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c]))\n\n # Create dicts\n self.datasetid_to_filepath = self.df.to_dict()['filepath']\n self.datasetid_to_class_id = self.df.to_dict()['class_id']", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n self.dir_images = os.path.join(opt.dataroot, opt.phase) # get the image directory\n self.image_paths = sorted(make_dataset(self.dir_images, opt.max_dataset_size)) # get image paths\n assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image\n self.input_nc = self.opt.output_nc\n self.output_nc = self.opt.input_nc", "def initialize(self):\n\t\tpass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def __init__(self, parameters: jnp.ndarray):\n\n # Note that this method is implicitly overriden by the dataclass decorator and\n # should _not_ be marked abstract.\n raise NotImplementedError()", "def initialise_dataset_loader(\n self, data_param=None, task_param=None, data_partitioner=None):\n raise NotImplementedError", "def _create_init(self):\n\n assert self._topology is not None, \\\n \"Topology must be given for a creation constructor\"\n\n # initialize the runs group\n runs_grp = self._h5.create_group(RUNS)\n\n # initialize the settings group\n settings_grp = self._h5.create_group(SETTINGS)\n\n # create the topology dataset\n self._h5.create_dataset(TOPOLOGY, data=self._topology)\n\n # sparse fields\n if self._sparse_fields is not None:\n\n # make a dataset for the sparse fields allowed. this requires\n # a 'special' datatype for variable length strings. This is\n # supported by HDF5 but not numpy.\n vlen_str_dt = h5py.special_dtype(vlen=str)\n\n # create the dataset with empty values for the length of the\n # sparse fields given\n sparse_fields_ds = settings_grp.create_dataset(SPARSE_FIELDS,\n (len(self._sparse_fields),),\n dtype=vlen_str_dt,\n maxshape=(None,))\n\n # set the flags\n for i, sparse_field in enumerate(self._sparse_fields):\n sparse_fields_ds[i] = sparse_field\n\n\n # field feature shapes and dtypes\n\n # initialize to the defaults, this gives values to\n # self._n_coords, and self.field_feature_dtypes, and\n # self.field_feature_shapes\n self._set_default_init_field_attributes(n_dims=self._n_dims)\n\n # save the number of dimensions and number of atoms in settings\n settings_grp.create_dataset(N_DIMS_STR, data=np.array(self._n_dims))\n settings_grp.create_dataset(N_ATOMS, data=np.array(self._n_coords))\n\n # the main rep atom idxs\n settings_grp.create_dataset(MAIN_REP_IDXS, data=self._main_rep_idxs, dtype=int)\n\n # alt_reps settings\n alt_reps_idxs_grp = settings_grp.create_group(ALT_REPS_IDXS)\n for alt_rep_name, idxs in self._alt_reps.items():\n alt_reps_idxs_grp.create_dataset(alt_rep_name, data=idxs, dtype=int)\n\n # if both feature shapes and dtypes were specified overwrite\n # (or initialize if not set by defaults) the defaults\n if (self._field_feature_shapes_kwarg is not None) and\\\n (self._field_feature_dtypes_kwarg is not None):\n\n self._field_feature_shapes.update(self._field_feature_shapes_kwarg)\n self._field_feature_dtypes.update(self._field_feature_dtypes_kwarg)\n\n # any sparse field with unspecified shape and dtype must be\n # set to None so that it will be set at runtime\n for sparse_field in self.sparse_fields:\n if (not sparse_field in self._field_feature_shapes) or \\\n (not sparse_field in self._field_feature_dtypes):\n self._field_feature_shapes[sparse_field] = None\n self._field_feature_dtypes[sparse_field] = None\n\n\n # save the field feature shapes and dtypes in the settings group\n shapes_grp = settings_grp.create_group(FIELD_FEATURE_SHAPES_STR)\n for field_path, field_shape in self._field_feature_shapes.items():\n if field_shape is None:\n # set it as a dimensionless array of NaN\n field_shape = np.array(np.nan)\n\n shapes_grp.create_dataset(field_path, data=field_shape)\n\n dtypes_grp = settings_grp.create_group(FIELD_FEATURE_DTYPES_STR)\n for field_path, field_dtype in self._field_feature_dtypes.items():\n if field_dtype is None:\n dt_str = NONE_STR\n else:\n # make a json string of the datatype that can be read\n # in again, we call np.dtype again because there is no\n # np.float.descr attribute\n dt_str = json.dumps(np.dtype(field_dtype).descr)\n\n dtypes_grp.create_dataset(field_path, data=dt_str)\n\n # initialize the units group\n unit_grp = self._h5.create_group(UNITS)\n\n # if units were not given set them all to None\n if self._units is None:\n self._units = {}\n for field_path in self._field_feature_shapes.keys():\n self._units[field_path] = None\n\n # set the units\n for field_path, unit_value in self._units.items():\n\n # ignore the field if not given\n if unit_value is None:\n continue\n\n unit_path = '{}/{}'.format(UNITS, field_path)\n\n unit_grp.create_dataset(unit_path, data=unit_value)\n\n\n # create the group for the run data records\n records_grp = settings_grp.create_group(RECORD_FIELDS)\n\n # create a dataset for the continuation run tuples\n # (continuation_run, base_run), where the first element\n # of the new run that is continuing the run in the second\n # position\n self._init_continuations()", "def __init__(self, **kwargs):\n is_training = kwargs.get('is_training', True)\n rootfolder = kwargs['rootfolder']\n dtype = kwargs.get('dtype', np.float64)\n self._load_mnist(rootfolder, is_training, dtype)\n # normalize data.\n self._data /= 255.\n ndarraydata.NdarrayDataLayer.__init__(\n self, sources=[self._data, self._label], **kwargs)", "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n self.dir_A = opt.dataroot\n self.classes_A, self.class_to_idx_A = self._find_classes(self.dir_A) # find classes in '/path/to/data/trainA'\n samples_A = make_dataset(self.dir_A, self.class_to_idx_A, extensions=self.img_extension, is_valid_file=None) # samples (list): List of (sample path, class_index) tuples\n self.A_paths = [s[0] for s in samples_A]\n self.A_targets = [s[1] for s in samples_A]\n #self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size))\n input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc\n self.transform = get_transform(opt, grayscale=(input_nc == 1))", "def __init__(self, directory):\n self._path = os.path.join(\"../../datasets\", directory)\n self.airlines = pd.read_csv(os.path.join(self._path, 'airlines.csv'))\n self.airports = pd.read_csv(os.path.join(self._path, 'airports.csv'))\n self.planes = pd.read_csv(os.path.join(self._path, 'planes.csv'))\n self.countries = pd.read_csv(os.path.join(self._path, 'countries.csv'))\n self.routes = pd.read_csv(os.path.join(self._path, 'routes.csv'))\n self._CreateGraph()", "def __init__(self, annotation_file=None):\n # load dataset\n self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()\n self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)\n if not annotation_file == None:\n print('loading annotations into memory...')\n tic = time.time()\n dataset = json.load(open(annotation_file, 'r'))\n assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))\n print('Done (t={:0.2f}s)'.format(time.time()- tic))\n self.dataset = dataset\n self.createIndex()", "def initialize(self):\n LOG.info(\"Initializing Model.\")\n self.model = self.convert(df=self.training_df)\n if self.bootstraps is not None:\n LOG.info(\"Bootstrapping Data.\")\n self.bootstrap_data()", "def __init__(self, subset):\n if subset not in ('background', 'evaluation'):\n raise(ValueError, 'subset must be one of (background, evaluation)')\n self.subset = subset\n\n self.df = pd.DataFrame(self.index_subset(self.subset))\n\n # Index of dataframe has direct correspondence to item in dataset\n self.df = self.df.assign(id=self.df.index.values)\n\n # Convert arbitrary class names of dataset to ordered 0-(num_speakers - 1) integers\n self.unique_characters = sorted(self.df['class_name'].unique())\n self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())}\n self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c]))\n\n # Create dicts\n self.datasetid_to_filepath = self.df.to_dict()['filepath']\n self.datasetid_to_class_id = self.df.to_dict()['class_id']\n\n # Setup transforms\n self.transform = transforms.Compose([\n transforms.CenterCrop(224),\n transforms.Resize(84),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])", "def __init__(self, *args, **kwargs):\n # Training/prediction data volume\n self.train_volume: np.ndarray = None\n # Training label volume\n self.train_label_volume: Optional[np.ndarray] = None\n # Per-voxel training error weighting volume\n self.train_weight_volume: Optional[np.ndarray] = None\n # Validation data volume\n self.eval_volume: Optional[np.ndarray] = None\n # Validation label volume\n self.eval_label_volume: Optional[np.ndarray] = None\n\n # Number of label classes\n self.n_classes: int = None\n\n # RandomState used to control random number generation\n self.random_state = np.random.RandomState()\n\n # Image augmentation instance_settings for use during training\n self.augmentation_settings: Dict[str, Any] = None\n\n # Load data if arguments are supplied\n if len(args) > 0 or len(kwargs) > 0:\n self.load(*args, **kwargs)\n\n pass", "def __init__(self, num_detectors):\n self.dataset = []\n self.num_detectors = num_detectors\n self.add_detectors()", "def __init__(self):\n self.X = None\n self.Y = None\n self.features = None\n self.max = self.min = None\n self._look_up = None\n self.attr_weight = None", "def initialize(self):\n self.initialize_edges()\n self.initialize_prob()\n self.initialize_total_input_dict()\n\n self.initialize_fpmusigv_dict()", "def __init__(self, *args, **kwargs):\n ignore_version = kwargs.pop('ignore_version', False)\n\n super(Hdf5, self).__init__(*args, **kwargs)\n\n # If True, always translate __getitem__ requests according to the\n # schema, even if __getitem__ requests a dataset that exists\n self.always_translate = False\n\n self._version = self.attrs.get('version')\n if isinstance(self._version, bytes):\n self._version = self._version.decode()\n self._timesteps = {}\n\n # Connect the schema map to this object\n if self._version in SCHEMA:\n self.schema = SCHEMA[self._version]\n elif self._version is None:\n self.schema = {}\n elif not ignore_version:\n raise KeyError(\"Unknown schema version %s\" % self._version)\n\n # Connect the schema dataset providers to this object\n if self._version in SCHEMA_DATASET_PROVIDERS:\n self.dataset_providers = SCHEMA_DATASET_PROVIDERS[self._version]\n else:\n self.dataset_providers = {}", "def __init__(self, path='data'):\r\n self.nb_data = 3\r\n self.path = path\r\n self.data_train_name = 'Xtr'\r\n self.data_test_name = 'Xte'\r\n self.features_name = '_mat100'\r\n self.label_train_name = 'Ytr'\r\n self.label_test_name = 'Ytr'\r\n # load raw data\r\n self.raw_data = {'train': self.load_data(self.data_train_name),\r\n 'test': self.load_data(self.data_test_name)}\r\n # load data features\r\n self.data_features = {'train': self.load_data(self.data_train_name, self.features_name, type_='features'),\r\n 'test': self.load_data(self.data_test_name, self.features_name, type_='features')}\r\n # load labels\r\n self.labels = {'train': self.load_data(self.label_train_name),\r\n 'test': self.load_data(self.label_test_name)}\r\n\r\n # toy data\r\n self.toy_data_functions = {\r\n 'blobs': blobs,\r\n 'two_moons': two_moons\r\n }\r\n self.toy_data = dict()", "def __init__(self,data):\n\t\tself.data = tuple([tuple(x) if isiterable(x) else (x,) for x in data])\n\t\tself.rows = len(self.data)\n\t\tself.cols = len(self.data[0]) if len(self.data)>0 else 0", "def _init(self):\n raise NotImplementedError", "def __init__(self, data=None, filename=None, schema=None):\n self.data = None\n self.schema = None\n self.filename = None\n if schema:\n self.load_schema(schema)\n if filename:\n self.load_file(filename)\n if data:\n self.load_data(data)", "def __initialize(self):\n\t\tself.matrix = [None] * self.size\n\t\tself.__get_log_values()\n\t\tfor row in range(self.size):\n\t\t\tself.matrix[row] = [None] * self.size\n\t\tmax_len = self.__get_max_length()\n\t\tdata = self.__get_data(self.text,max_len)\n\t\tmpoly = self.__get_mpoly(data)\n\t\tgpoly = self.__get_gploy()\n\t\tself.final_data = self.__get_final_data(mpoly,gpoly)\n\t\tself.__set_FIP(FP_num = 1)\n\t\tself.__set_FIP(FP_num = 2)\n\t\tself.__set_FIP(FP_num = 3)\n\t\tself.__set_AP()\n\t\tself.__fill_format_info_area()\n\t\tself.__set_TP()", "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'\n self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') # create a path '/path/to/data/trainB'\n\n self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'\n self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n self.A_size = len(self.A_paths) # get the size of dataset A\n self.B_size = len(self.B_paths) # get the size of dataset B\n btoA = self.opt.direction == 'BtoA'\n input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image\n output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image\n self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))\n self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))", "def __init__(self):\n # Calls super constructor:\n super().__init__()\n\n # This list holds all the information for computing each 'group' of features:\n self.features_group_list = []", "def __init__(self, file, sdef, name, path, attrs, parent, value, dtype, compress, link_info=None):\n super(Dataset, self).__init__(file, sdef, name, path, attrs, parent, link_info)\n # print \"Creating Dataset, sdef=\"\n # pp.pprint(sdef)\n if 'attributes' in self.sdef['df']:\n self.attributes = copy.deepcopy(self.sdef['df']['attributes'])\n # del self.sdef['df']['attributes'] # if do this, no need to check for attributes in mk_dsinfo\n # print \"found attributes:\"\n # else:\n # print \"did not find attributes:\"\n # pp.pprint(self.attributes)\n # if self.sdef['df']:\n self.dsinfo = self.mk_dsinfo(value)\n self.merge_attribute_defs(self.attributes, self.dsinfo['atags'])\n # else:\n # definition empty, must be custom dataset\n # self.dsinfo = {}\n self.merge_attrs()\n if self.link_info:\n # this dataset set to link to another. Already done in Node. Nothing to do here\n pass\n else:\n # creating new dataset (normally done)\n self.link_node = None\n # compress = \"gzip\" if compress else None\n # self.h5node = self.h5parent.create_dataset(self.name, data=value,\n # dtype=dtype, compression=compress)\n #- self.file.file_pointer.create_dataset(self.full_path, data=value,\n #- dtype=dtype, compression=compress)\n self.file.create_dataset(self.full_path, data=value, dtype=dtype,\n compress=compress)\n # self.file.h5commands.append(\"create_dataset(%s, %s)\" % (self.full_path, value))\n # if dtype:\n # self.h5node = self.h5parent.create_dataset(self.name, data=value, dtype=dtype)\n # else: # should find out what default value for dtype used in h5py and use that, combine these\n # self.h5node = self.h5parent.create_dataset(self.name, data=value)\n self.set_attr_values()", "def _initialize_data(self):\n self.reset_count = 0\n self._idn_no_firmware = \"KEPCO,BOP 50-20,E1234,\"\n self._firmware = 2.6\n self._init_data()", "def init(self, **kwargs):\n self._d = {}\n self._th = None\n self._run = True\n self.load()" ]
[ "0.816631", "0.81641185", "0.8013762", "0.76137954", "0.7422038", "0.7393049", "0.7352237", "0.73437655", "0.7111143", "0.71019876", "0.7099074", "0.7096382", "0.70223516", "0.696418", "0.6961817", "0.6889772", "0.6879775", "0.68740934", "0.68599087", "0.68520594", "0.6842619", "0.6836507", "0.6828825", "0.6809566", "0.68037564", "0.6800808", "0.67909133", "0.6782312", "0.6782312", "0.6782312", "0.67627436", "0.6756033", "0.6751991", "0.67498195", "0.67498195", "0.6730716", "0.6715901", "0.6707858", "0.67010975", "0.66988236", "0.668923", "0.66852677", "0.66807044", "0.66806763", "0.6678971", "0.66789216", "0.665757", "0.6655296", "0.66539544", "0.66539544", "0.66539544", "0.66539544", "0.66539544", "0.6644616", "0.6638509", "0.6629376", "0.66151416", "0.66142374", "0.66007245", "0.65974915", "0.6596506", "0.6596506", "0.6596506", "0.65858185", "0.6573607", "0.6573607", "0.65658706", "0.6564532", "0.65609944", "0.65609944", "0.65609944", "0.65609944", "0.65609944", "0.65609944", "0.65609944", "0.65609944", "0.654954", "0.6544916", "0.6543092", "0.65408367", "0.6540651", "0.6540625", "0.65384096", "0.6535255", "0.65305084", "0.65303224", "0.65216005", "0.6519088", "0.6515111", "0.651333", "0.65091664", "0.6499544", "0.6497439", "0.64938307", "0.64920795", "0.6488813", "0.6484341", "0.6480846", "0.64773333", "0.6466663" ]
0.6548279
77